]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9-2.6.32.57-201202252119.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-2.6.32.57-201202252119.patch
CommitLineData
2ae4ee49
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index e1efc40..47f0daf 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9+*.cis
10 *.cpio
11 *.csp
12+*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18+*.gcno
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *_MODULES
32+*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36@@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40+GPATH
41+GRTAGS
42+GSYMS
43+GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49+PERF*
50 SCCS
51 System.map*
52 TAGS
53@@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57+capability_names.h
58+capflags.c
59 classlist.h*
60+clut_vga16.c
61+common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65@@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69+config.c
70+config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74@@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78+gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90+initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103+mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110+mkpiggy
111 mkprep
112+mkregtable
113 mktables
114 mktree
115 modpost
116@@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120+piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124@@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128+regdb.c
129 relocs
130+rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152+vmlinux.bin.all
153+vmlinux.bin.bz2
154 vmlinux.lds
155+vmlinux.relocs
156+voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zoffset.h
169diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170index c840e7d..f4c451c 100644
171--- a/Documentation/kernel-parameters.txt
172+++ b/Documentation/kernel-parameters.txt
173@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178+ virtualization environments that don't cope well with the
179+ expand down segment used by UDEREF on X86-32 or the frequent
180+ page table updates on X86-64.
181+
182+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183+
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187diff --git a/Makefile b/Makefile
188index 3377650..8384f97 100644
189--- a/Makefile
190+++ b/Makefile
191@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196-HOSTCXXFLAGS = -O2
197+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203@@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207-PHONY += scripts_basic
208-scripts_basic:
209+PHONY += scripts_basic gcc-plugins
210+scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214@@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218- cscope TAGS tags help %docs check% \
219+ cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223@@ -526,6 +527,46 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227+ifndef DISABLE_PAX_PLUGINS
228+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231+endif
232+ifdef CONFIG_PAX_MEMORY_STACKLEAK
233+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235+endif
236+ifdef CONFIG_KALLOCSTAT_PLUGIN
237+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238+endif
239+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
242+endif
243+ifdef CONFIG_CHECKER_PLUGIN
244+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
245+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
246+endif
247+endif
248+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
249+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
250+ifeq ($(KBUILD_EXTMOD),)
251+gcc-plugins:
252+ $(Q)$(MAKE) $(build)=tools/gcc
253+else
254+gcc-plugins: ;
255+endif
256+else
257+gcc-plugins:
258+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
259+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
260+else
261+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
262+endif
263+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
264+endif
265+endif
266+
267 include $(srctree)/arch/$(SRCARCH)/Makefile
268
269 ifneq ($(CONFIG_FRAME_WARN),0)
270@@ -647,7 +688,7 @@ export mod_strip_cmd
271
272
273 ifeq ($(KBUILD_EXTMOD),)
274-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
275+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
276
277 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
278 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
279@@ -868,6 +909,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
280
281 # The actual objects are generated when descending,
282 # make sure no implicit rule kicks in
283+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
284 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
285
286 # Handle descending into subdirectories listed in $(vmlinux-dirs)
287@@ -877,7 +919,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
288 # Error messages still appears in the original language
289
290 PHONY += $(vmlinux-dirs)
291-$(vmlinux-dirs): prepare scripts
292+$(vmlinux-dirs): gcc-plugins prepare scripts
293 $(Q)$(MAKE) $(build)=$@
294
295 # Build the kernel release string
296@@ -986,6 +1028,7 @@ prepare0: archprepare FORCE
297 $(Q)$(MAKE) $(build)=. missing-syscalls
298
299 # All the preparing..
300+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
301 prepare: prepare0
302
303 # The asm symlink changes when $(ARCH) changes.
304@@ -1127,6 +1170,7 @@ all: modules
305 # using awk while concatenating to the final file.
306
307 PHONY += modules
308+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
309 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
310 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
311 @$(kecho) ' Building modules, stage 2.';
312@@ -1136,7 +1180,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
313
314 # Target to prepare building external modules
315 PHONY += modules_prepare
316-modules_prepare: prepare scripts
317+modules_prepare: gcc-plugins prepare scripts
318
319 # Target to install modules
320 PHONY += modules_install
321@@ -1201,7 +1245,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
322 include/linux/autoconf.h include/linux/version.h \
323 include/linux/utsrelease.h \
324 include/linux/bounds.h include/asm*/asm-offsets.h \
325- Module.symvers Module.markers tags TAGS cscope*
326+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
327
328 # clean - Delete most, but leave enough to build external modules
329 #
330@@ -1245,7 +1289,7 @@ distclean: mrproper
331 @find $(srctree) $(RCS_FIND_IGNORE) \
332 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
333 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
334- -o -name '.*.rej' -o -size 0 \
335+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
336 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
337 -type f -print | xargs rm -f
338
339@@ -1292,6 +1336,7 @@ help:
340 @echo ' modules_prepare - Set up for building external modules'
341 @echo ' tags/TAGS - Generate tags file for editors'
342 @echo ' cscope - Generate cscope index'
343+ @echo ' gtags - Generate GNU GLOBAL index'
344 @echo ' kernelrelease - Output the release version string'
345 @echo ' kernelversion - Output the version stored in Makefile'
346 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
347@@ -1393,6 +1438,7 @@ PHONY += $(module-dirs) modules
348 $(module-dirs): crmodverdir $(objtree)/Module.symvers
349 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
350
351+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
352 modules: $(module-dirs)
353 @$(kecho) ' Building modules, stage 2.';
354 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
355@@ -1448,7 +1494,7 @@ endif # KBUILD_EXTMOD
356 quiet_cmd_tags = GEN $@
357 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
358
359-tags TAGS cscope: FORCE
360+tags TAGS cscope gtags: FORCE
361 $(call cmd,tags)
362
363 # Scripts to check various things for consistency
364@@ -1513,17 +1559,19 @@ else
365 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
366 endif
367
368-%.s: %.c prepare scripts FORCE
369+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
370+%.s: %.c gcc-plugins prepare scripts FORCE
371 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
372 %.i: %.c prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374-%.o: %.c prepare scripts FORCE
375+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
376+%.o: %.c gcc-plugins prepare scripts FORCE
377 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
378 %.lst: %.c prepare scripts FORCE
379 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
380-%.s: %.S prepare scripts FORCE
381+%.s: %.S gcc-plugins prepare scripts FORCE
382 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
383-%.o: %.S prepare scripts FORCE
384+%.o: %.S gcc-plugins prepare scripts FORCE
385 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
386 %.symtypes: %.c prepare scripts FORCE
387 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
388@@ -1533,11 +1581,13 @@ endif
389 $(cmd_crmodverdir)
390 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
391 $(build)=$(build-dir)
392-%/: prepare scripts FORCE
393+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
394+%/: gcc-plugins prepare scripts FORCE
395 $(cmd_crmodverdir)
396 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
397 $(build)=$(build-dir)
398-%.ko: prepare scripts FORCE
399+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
400+%.ko: gcc-plugins prepare scripts FORCE
401 $(cmd_crmodverdir)
402 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
403 $(build)=$(build-dir) $(@:.ko=.o)
404diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
405index 610dff4..f396854 100644
406--- a/arch/alpha/include/asm/atomic.h
407+++ b/arch/alpha/include/asm/atomic.h
408@@ -251,6 +251,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
409 #define atomic_dec(v) atomic_sub(1,(v))
410 #define atomic64_dec(v) atomic64_sub(1,(v))
411
412+#define atomic64_read_unchecked(v) atomic64_read(v)
413+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
414+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
415+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
416+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
417+#define atomic64_inc_unchecked(v) atomic64_inc(v)
418+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
419+#define atomic64_dec_unchecked(v) atomic64_dec(v)
420+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
421+
422 #define smp_mb__before_atomic_dec() smp_mb()
423 #define smp_mb__after_atomic_dec() smp_mb()
424 #define smp_mb__before_atomic_inc() smp_mb()
425diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
426index 5c75c1b..c82f878 100644
427--- a/arch/alpha/include/asm/elf.h
428+++ b/arch/alpha/include/asm/elf.h
429@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
430
431 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
432
433+#ifdef CONFIG_PAX_ASLR
434+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
435+
436+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
437+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
438+#endif
439+
440 /* $0 is set by ld.so to a pointer to a function which might be
441 registered using atexit. This provides a mean for the dynamic
442 linker to call DT_FINI functions for shared libraries that have
443diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
444index 3f0c59f..cf1e100 100644
445--- a/arch/alpha/include/asm/pgtable.h
446+++ b/arch/alpha/include/asm/pgtable.h
447@@ -101,6 +101,17 @@ struct vm_area_struct;
448 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
449 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
450 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
451+
452+#ifdef CONFIG_PAX_PAGEEXEC
453+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
454+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
455+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
456+#else
457+# define PAGE_SHARED_NOEXEC PAGE_SHARED
458+# define PAGE_COPY_NOEXEC PAGE_COPY
459+# define PAGE_READONLY_NOEXEC PAGE_READONLY
460+#endif
461+
462 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
463
464 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
465diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
466index ebc3c89..20cfa63 100644
467--- a/arch/alpha/kernel/module.c
468+++ b/arch/alpha/kernel/module.c
469@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
470
471 /* The small sections were sorted to the end of the segment.
472 The following should definitely cover them. */
473- gp = (u64)me->module_core + me->core_size - 0x8000;
474+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
475 got = sechdrs[me->arch.gotsecindex].sh_addr;
476
477 for (i = 0; i < n; i++) {
478diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
479index a94e49c..d71dd44 100644
480--- a/arch/alpha/kernel/osf_sys.c
481+++ b/arch/alpha/kernel/osf_sys.c
482@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
483 /* At this point: (!vma || addr < vma->vm_end). */
484 if (limit - len < addr)
485 return -ENOMEM;
486- if (!vma || addr + len <= vma->vm_start)
487+ if (check_heap_stack_gap(vma, addr, len))
488 return addr;
489 addr = vma->vm_end;
490 vma = vma->vm_next;
491@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
492 merely specific addresses, but regions of memory -- perhaps
493 this feature should be incorporated into all ports? */
494
495+#ifdef CONFIG_PAX_RANDMMAP
496+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
497+#endif
498+
499 if (addr) {
500 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
501 if (addr != (unsigned long) -ENOMEM)
502@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
503 }
504
505 /* Next, try allocating at TASK_UNMAPPED_BASE. */
506- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
507- len, limit);
508+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
509+
510 if (addr != (unsigned long) -ENOMEM)
511 return addr;
512
513diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
514index 00a31de..2ded0f2 100644
515--- a/arch/alpha/mm/fault.c
516+++ b/arch/alpha/mm/fault.c
517@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
518 __reload_thread(pcb);
519 }
520
521+#ifdef CONFIG_PAX_PAGEEXEC
522+/*
523+ * PaX: decide what to do with offenders (regs->pc = fault address)
524+ *
525+ * returns 1 when task should be killed
526+ * 2 when patched PLT trampoline was detected
527+ * 3 when unpatched PLT trampoline was detected
528+ */
529+static int pax_handle_fetch_fault(struct pt_regs *regs)
530+{
531+
532+#ifdef CONFIG_PAX_EMUPLT
533+ int err;
534+
535+ do { /* PaX: patched PLT emulation #1 */
536+ unsigned int ldah, ldq, jmp;
537+
538+ err = get_user(ldah, (unsigned int *)regs->pc);
539+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
540+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
541+
542+ if (err)
543+ break;
544+
545+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
546+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
547+ jmp == 0x6BFB0000U)
548+ {
549+ unsigned long r27, addr;
550+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
551+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
552+
553+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
554+ err = get_user(r27, (unsigned long *)addr);
555+ if (err)
556+ break;
557+
558+ regs->r27 = r27;
559+ regs->pc = r27;
560+ return 2;
561+ }
562+ } while (0);
563+
564+ do { /* PaX: patched PLT emulation #2 */
565+ unsigned int ldah, lda, br;
566+
567+ err = get_user(ldah, (unsigned int *)regs->pc);
568+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
569+ err |= get_user(br, (unsigned int *)(regs->pc+8));
570+
571+ if (err)
572+ break;
573+
574+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
575+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
576+ (br & 0xFFE00000U) == 0xC3E00000U)
577+ {
578+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
579+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
580+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
581+
582+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
583+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
584+ return 2;
585+ }
586+ } while (0);
587+
588+ do { /* PaX: unpatched PLT emulation */
589+ unsigned int br;
590+
591+ err = get_user(br, (unsigned int *)regs->pc);
592+
593+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
594+ unsigned int br2, ldq, nop, jmp;
595+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
596+
597+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
598+ err = get_user(br2, (unsigned int *)addr);
599+ err |= get_user(ldq, (unsigned int *)(addr+4));
600+ err |= get_user(nop, (unsigned int *)(addr+8));
601+ err |= get_user(jmp, (unsigned int *)(addr+12));
602+ err |= get_user(resolver, (unsigned long *)(addr+16));
603+
604+ if (err)
605+ break;
606+
607+ if (br2 == 0xC3600000U &&
608+ ldq == 0xA77B000CU &&
609+ nop == 0x47FF041FU &&
610+ jmp == 0x6B7B0000U)
611+ {
612+ regs->r28 = regs->pc+4;
613+ regs->r27 = addr+16;
614+ regs->pc = resolver;
615+ return 3;
616+ }
617+ }
618+ } while (0);
619+#endif
620+
621+ return 1;
622+}
623+
624+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
625+{
626+ unsigned long i;
627+
628+ printk(KERN_ERR "PAX: bytes at PC: ");
629+ for (i = 0; i < 5; i++) {
630+ unsigned int c;
631+ if (get_user(c, (unsigned int *)pc+i))
632+ printk(KERN_CONT "???????? ");
633+ else
634+ printk(KERN_CONT "%08x ", c);
635+ }
636+ printk("\n");
637+}
638+#endif
639
640 /*
641 * This routine handles page faults. It determines the address,
642@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
643 good_area:
644 si_code = SEGV_ACCERR;
645 if (cause < 0) {
646- if (!(vma->vm_flags & VM_EXEC))
647+ if (!(vma->vm_flags & VM_EXEC)) {
648+
649+#ifdef CONFIG_PAX_PAGEEXEC
650+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
651+ goto bad_area;
652+
653+ up_read(&mm->mmap_sem);
654+ switch (pax_handle_fetch_fault(regs)) {
655+
656+#ifdef CONFIG_PAX_EMUPLT
657+ case 2:
658+ case 3:
659+ return;
660+#endif
661+
662+ }
663+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
664+ do_group_exit(SIGKILL);
665+#else
666 goto bad_area;
667+#endif
668+
669+ }
670 } else if (!cause) {
671 /* Allow reads even for write-only mappings */
672 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
673diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
674index b68faef..6dd1496 100644
675--- a/arch/arm/Kconfig
676+++ b/arch/arm/Kconfig
677@@ -14,6 +14,7 @@ config ARM
678 select SYS_SUPPORTS_APM_EMULATION
679 select HAVE_OPROFILE
680 select HAVE_ARCH_KGDB
681+ select GENERIC_ATOMIC64
682 select HAVE_KPROBES if (!XIP_KERNEL)
683 select HAVE_KRETPROBES if (HAVE_KPROBES)
684 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
685diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
686index d0daeab..ff286a8 100644
687--- a/arch/arm/include/asm/atomic.h
688+++ b/arch/arm/include/asm/atomic.h
689@@ -15,6 +15,10 @@
690 #include <linux/types.h>
691 #include <asm/system.h>
692
693+#ifdef CONFIG_GENERIC_ATOMIC64
694+#include <asm-generic/atomic64.h>
695+#endif
696+
697 #define ATOMIC_INIT(i) { (i) }
698
699 #ifdef __KERNEL__
700diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
701index 6aac3f5..265536b 100644
702--- a/arch/arm/include/asm/elf.h
703+++ b/arch/arm/include/asm/elf.h
704@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
705 the loader. We need to make sure that it is out of the way of the program
706 that it will "exec", and that there is sufficient room for the brk. */
707
708-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
709+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
710+
711+#ifdef CONFIG_PAX_ASLR
712+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
713+
714+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
715+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
716+#endif
717
718 /* When the program starts, a1 contains a pointer to a function to be
719 registered with atexit, as per the SVR4 ABI. A value of 0 means we
720diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
721index c019949..388fdd1 100644
722--- a/arch/arm/include/asm/kmap_types.h
723+++ b/arch/arm/include/asm/kmap_types.h
724@@ -19,6 +19,7 @@ enum km_type {
725 KM_SOFTIRQ0,
726 KM_SOFTIRQ1,
727 KM_L2_CACHE,
728+ KM_CLEARPAGE,
729 KM_TYPE_NR
730 };
731
732diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
733index 1d6bd40..fba0cb9 100644
734--- a/arch/arm/include/asm/uaccess.h
735+++ b/arch/arm/include/asm/uaccess.h
736@@ -22,6 +22,8 @@
737 #define VERIFY_READ 0
738 #define VERIFY_WRITE 1
739
740+extern void check_object_size(const void *ptr, unsigned long n, bool to);
741+
742 /*
743 * The exception table consists of pairs of addresses: the first is the
744 * address of an instruction that is allowed to fault, and the second is
745@@ -387,8 +389,23 @@ do { \
746
747
748 #ifdef CONFIG_MMU
749-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
750-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
751+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
752+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
753+
754+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
755+{
756+ if (!__builtin_constant_p(n))
757+ check_object_size(to, n, false);
758+ return ___copy_from_user(to, from, n);
759+}
760+
761+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
762+{
763+ if (!__builtin_constant_p(n))
764+ check_object_size(from, n, true);
765+ return ___copy_to_user(to, from, n);
766+}
767+
768 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
769 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
770 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
771@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
772
773 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
774 {
775+ if ((long)n < 0)
776+ return n;
777+
778 if (access_ok(VERIFY_READ, from, n))
779 n = __copy_from_user(to, from, n);
780 else /* security hole - plug it */
781@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
782
783 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
784 {
785+ if ((long)n < 0)
786+ return n;
787+
788 if (access_ok(VERIFY_WRITE, to, n))
789 n = __copy_to_user(to, from, n);
790 return n;
791diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
792index 0e62770..e2c2cd6 100644
793--- a/arch/arm/kernel/armksyms.c
794+++ b/arch/arm/kernel/armksyms.c
795@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
796 #ifdef CONFIG_MMU
797 EXPORT_SYMBOL(copy_page);
798
799-EXPORT_SYMBOL(__copy_from_user);
800-EXPORT_SYMBOL(__copy_to_user);
801+EXPORT_SYMBOL(___copy_from_user);
802+EXPORT_SYMBOL(___copy_to_user);
803 EXPORT_SYMBOL(__clear_user);
804
805 EXPORT_SYMBOL(__get_user_1);
806diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
807index ba8ccfe..2dc34dc 100644
808--- a/arch/arm/kernel/kgdb.c
809+++ b/arch/arm/kernel/kgdb.c
810@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
811 * and we handle the normal undef case within the do_undefinstr
812 * handler.
813 */
814-struct kgdb_arch arch_kgdb_ops = {
815+const struct kgdb_arch arch_kgdb_ops = {
816 #ifndef __ARMEB__
817 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
818 #else /* ! __ARMEB__ */
819diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
820index 3f361a7..6e806e1 100644
821--- a/arch/arm/kernel/traps.c
822+++ b/arch/arm/kernel/traps.c
823@@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
824
825 DEFINE_SPINLOCK(die_lock);
826
827+extern void gr_handle_kernel_exploit(void);
828+
829 /*
830 * This function is protected against re-entrancy.
831 */
832@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
833 if (panic_on_oops)
834 panic("Fatal exception");
835
836+ gr_handle_kernel_exploit();
837+
838 do_exit(SIGSEGV);
839 }
840
841diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
842index e4fe124..0fc246b 100644
843--- a/arch/arm/lib/copy_from_user.S
844+++ b/arch/arm/lib/copy_from_user.S
845@@ -16,7 +16,7 @@
846 /*
847 * Prototype:
848 *
849- * size_t __copy_from_user(void *to, const void *from, size_t n)
850+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
851 *
852 * Purpose:
853 *
854@@ -84,11 +84,11 @@
855
856 .text
857
858-ENTRY(__copy_from_user)
859+ENTRY(___copy_from_user)
860
861 #include "copy_template.S"
862
863-ENDPROC(__copy_from_user)
864+ENDPROC(___copy_from_user)
865
866 .section .fixup,"ax"
867 .align 0
868diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
869index 1a71e15..ac7b258 100644
870--- a/arch/arm/lib/copy_to_user.S
871+++ b/arch/arm/lib/copy_to_user.S
872@@ -16,7 +16,7 @@
873 /*
874 * Prototype:
875 *
876- * size_t __copy_to_user(void *to, const void *from, size_t n)
877+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
878 *
879 * Purpose:
880 *
881@@ -88,11 +88,11 @@
882 .text
883
884 ENTRY(__copy_to_user_std)
885-WEAK(__copy_to_user)
886+WEAK(___copy_to_user)
887
888 #include "copy_template.S"
889
890-ENDPROC(__copy_to_user)
891+ENDPROC(___copy_to_user)
892
893 .section .fixup,"ax"
894 .align 0
895diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
896index ffdd274..91017b6 100644
897--- a/arch/arm/lib/uaccess.S
898+++ b/arch/arm/lib/uaccess.S
899@@ -19,7 +19,7 @@
900
901 #define PAGE_SHIFT 12
902
903-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
904+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
905 * Purpose : copy a block to user memory from kernel memory
906 * Params : to - user memory
907 * : from - kernel memory
908@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
909 sub r2, r2, ip
910 b .Lc2u_dest_aligned
911
912-ENTRY(__copy_to_user)
913+ENTRY(___copy_to_user)
914 stmfd sp!, {r2, r4 - r7, lr}
915 cmp r2, #4
916 blt .Lc2u_not_enough
917@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
918 ldrgtb r3, [r1], #0
919 USER( strgtbt r3, [r0], #1) @ May fault
920 b .Lc2u_finished
921-ENDPROC(__copy_to_user)
922+ENDPROC(___copy_to_user)
923
924 .section .fixup,"ax"
925 .align 0
926 9001: ldmfd sp!, {r0, r4 - r7, pc}
927 .previous
928
929-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
930+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
931 * Purpose : copy a block from user memory to kernel memory
932 * Params : to - kernel memory
933 * : from - user memory
934@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
935 sub r2, r2, ip
936 b .Lcfu_dest_aligned
937
938-ENTRY(__copy_from_user)
939+ENTRY(___copy_from_user)
940 stmfd sp!, {r0, r2, r4 - r7, lr}
941 cmp r2, #4
942 blt .Lcfu_not_enough
943@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
944 USER( ldrgtbt r3, [r1], #1) @ May fault
945 strgtb r3, [r0], #1
946 b .Lcfu_finished
947-ENDPROC(__copy_from_user)
948+ENDPROC(___copy_from_user)
949
950 .section .fixup,"ax"
951 .align 0
952diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
953index 6b967ff..67d5b2b 100644
954--- a/arch/arm/lib/uaccess_with_memcpy.c
955+++ b/arch/arm/lib/uaccess_with_memcpy.c
956@@ -97,7 +97,7 @@ out:
957 }
958
959 unsigned long
960-__copy_to_user(void __user *to, const void *from, unsigned long n)
961+___copy_to_user(void __user *to, const void *from, unsigned long n)
962 {
963 /*
964 * This test is stubbed out of the main function above to keep
965diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
966index 4028724..beec230 100644
967--- a/arch/arm/mach-at91/pm.c
968+++ b/arch/arm/mach-at91/pm.c
969@@ -348,7 +348,7 @@ static void at91_pm_end(void)
970 }
971
972
973-static struct platform_suspend_ops at91_pm_ops ={
974+static const struct platform_suspend_ops at91_pm_ops ={
975 .valid = at91_pm_valid_state,
976 .begin = at91_pm_begin,
977 .enter = at91_pm_enter,
978diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
979index 5218943..0a34552 100644
980--- a/arch/arm/mach-omap1/pm.c
981+++ b/arch/arm/mach-omap1/pm.c
982@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
983
984
985
986-static struct platform_suspend_ops omap_pm_ops ={
987+static const struct platform_suspend_ops omap_pm_ops ={
988 .prepare = omap_pm_prepare,
989 .enter = omap_pm_enter,
990 .finish = omap_pm_finish,
991diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
992index bff5c4e..d4c649b 100644
993--- a/arch/arm/mach-omap2/pm24xx.c
994+++ b/arch/arm/mach-omap2/pm24xx.c
995@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
996 enable_hlt();
997 }
998
999-static struct platform_suspend_ops omap_pm_ops = {
1000+static const struct platform_suspend_ops omap_pm_ops = {
1001 .prepare = omap2_pm_prepare,
1002 .enter = omap2_pm_enter,
1003 .finish = omap2_pm_finish,
1004diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
1005index 8946319..7d3e661 100644
1006--- a/arch/arm/mach-omap2/pm34xx.c
1007+++ b/arch/arm/mach-omap2/pm34xx.c
1008@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
1009 return;
1010 }
1011
1012-static struct platform_suspend_ops omap_pm_ops = {
1013+static const struct platform_suspend_ops omap_pm_ops = {
1014 .begin = omap3_pm_begin,
1015 .end = omap3_pm_end,
1016 .prepare = omap3_pm_prepare,
1017diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
1018index b3d8d53..6e68ebc 100644
1019--- a/arch/arm/mach-pnx4008/pm.c
1020+++ b/arch/arm/mach-pnx4008/pm.c
1021@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
1022 (state == PM_SUSPEND_MEM);
1023 }
1024
1025-static struct platform_suspend_ops pnx4008_pm_ops = {
1026+static const struct platform_suspend_ops pnx4008_pm_ops = {
1027 .enter = pnx4008_pm_enter,
1028 .valid = pnx4008_pm_valid,
1029 };
1030diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
1031index 7693355..9beb00a 100644
1032--- a/arch/arm/mach-pxa/pm.c
1033+++ b/arch/arm/mach-pxa/pm.c
1034@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
1035 pxa_cpu_pm_fns->finish();
1036 }
1037
1038-static struct platform_suspend_ops pxa_pm_ops = {
1039+static const struct platform_suspend_ops pxa_pm_ops = {
1040 .valid = pxa_pm_valid,
1041 .enter = pxa_pm_enter,
1042 .prepare = pxa_pm_prepare,
1043diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
1044index 629e05d..06be589 100644
1045--- a/arch/arm/mach-pxa/sharpsl_pm.c
1046+++ b/arch/arm/mach-pxa/sharpsl_pm.c
1047@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1048 }
1049
1050 #ifdef CONFIG_PM
1051-static struct platform_suspend_ops sharpsl_pm_ops = {
1052+static const struct platform_suspend_ops sharpsl_pm_ops = {
1053 .prepare = pxa_pm_prepare,
1054 .finish = pxa_pm_finish,
1055 .enter = corgi_pxa_pm_enter,
1056diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1057index c83fdc8..ab9fc44 100644
1058--- a/arch/arm/mach-sa1100/pm.c
1059+++ b/arch/arm/mach-sa1100/pm.c
1060@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1061 return virt_to_phys(sp);
1062 }
1063
1064-static struct platform_suspend_ops sa11x0_pm_ops = {
1065+static const struct platform_suspend_ops sa11x0_pm_ops = {
1066 .enter = sa11x0_pm_enter,
1067 .valid = suspend_valid_only_mem,
1068 };
1069diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1070index 3191cd6..c0739db 100644
1071--- a/arch/arm/mm/fault.c
1072+++ b/arch/arm/mm/fault.c
1073@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1074 }
1075 #endif
1076
1077+#ifdef CONFIG_PAX_PAGEEXEC
1078+ if (fsr & FSR_LNX_PF) {
1079+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1080+ do_group_exit(SIGKILL);
1081+ }
1082+#endif
1083+
1084 tsk->thread.address = addr;
1085 tsk->thread.error_code = fsr;
1086 tsk->thread.trap_no = 14;
1087@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1088 }
1089 #endif /* CONFIG_MMU */
1090
1091+#ifdef CONFIG_PAX_PAGEEXEC
1092+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1093+{
1094+ long i;
1095+
1096+ printk(KERN_ERR "PAX: bytes at PC: ");
1097+ for (i = 0; i < 20; i++) {
1098+ unsigned char c;
1099+ if (get_user(c, (__force unsigned char __user *)pc+i))
1100+ printk(KERN_CONT "?? ");
1101+ else
1102+ printk(KERN_CONT "%02x ", c);
1103+ }
1104+ printk("\n");
1105+
1106+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1107+ for (i = -1; i < 20; i++) {
1108+ unsigned long c;
1109+ if (get_user(c, (__force unsigned long __user *)sp+i))
1110+ printk(KERN_CONT "???????? ");
1111+ else
1112+ printk(KERN_CONT "%08lx ", c);
1113+ }
1114+ printk("\n");
1115+}
1116+#endif
1117+
1118 /*
1119 * First Level Translation Fault Handler
1120 *
1121diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1122index f5abc51..7ec524c 100644
1123--- a/arch/arm/mm/mmap.c
1124+++ b/arch/arm/mm/mmap.c
1125@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1126 if (len > TASK_SIZE)
1127 return -ENOMEM;
1128
1129+#ifdef CONFIG_PAX_RANDMMAP
1130+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1131+#endif
1132+
1133 if (addr) {
1134 if (do_align)
1135 addr = COLOUR_ALIGN(addr, pgoff);
1136@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1137 addr = PAGE_ALIGN(addr);
1138
1139 vma = find_vma(mm, addr);
1140- if (TASK_SIZE - len >= addr &&
1141- (!vma || addr + len <= vma->vm_start))
1142+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1143 return addr;
1144 }
1145 if (len > mm->cached_hole_size) {
1146- start_addr = addr = mm->free_area_cache;
1147+ start_addr = addr = mm->free_area_cache;
1148 } else {
1149- start_addr = addr = TASK_UNMAPPED_BASE;
1150- mm->cached_hole_size = 0;
1151+ start_addr = addr = mm->mmap_base;
1152+ mm->cached_hole_size = 0;
1153 }
1154
1155 full_search:
1156@@ -94,14 +97,14 @@ full_search:
1157 * Start a new search - just in case we missed
1158 * some holes.
1159 */
1160- if (start_addr != TASK_UNMAPPED_BASE) {
1161- start_addr = addr = TASK_UNMAPPED_BASE;
1162+ if (start_addr != mm->mmap_base) {
1163+ start_addr = addr = mm->mmap_base;
1164 mm->cached_hole_size = 0;
1165 goto full_search;
1166 }
1167 return -ENOMEM;
1168 }
1169- if (!vma || addr + len <= vma->vm_start) {
1170+ if (check_heap_stack_gap(vma, addr, len)) {
1171 /*
1172 * Remember the place where we stopped the search:
1173 */
1174diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1175index 8d97db2..b66cfa5 100644
1176--- a/arch/arm/plat-s3c/pm.c
1177+++ b/arch/arm/plat-s3c/pm.c
1178@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1179 s3c_pm_check_cleanup();
1180 }
1181
1182-static struct platform_suspend_ops s3c_pm_ops = {
1183+static const struct platform_suspend_ops s3c_pm_ops = {
1184 .enter = s3c_pm_enter,
1185 .prepare = s3c_pm_prepare,
1186 .finish = s3c_pm_finish,
1187diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1188index d5d1d41..856e2ed 100644
1189--- a/arch/avr32/include/asm/elf.h
1190+++ b/arch/avr32/include/asm/elf.h
1191@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1192 the loader. We need to make sure that it is out of the way of the program
1193 that it will "exec", and that there is sufficient room for the brk. */
1194
1195-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1196+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1197
1198+#ifdef CONFIG_PAX_ASLR
1199+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1200+
1201+#define PAX_DELTA_MMAP_LEN 15
1202+#define PAX_DELTA_STACK_LEN 15
1203+#endif
1204
1205 /* This yields a mask that user programs can use to figure out what
1206 instruction set this CPU supports. This could be done in user space,
1207diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1208index b7f5c68..556135c 100644
1209--- a/arch/avr32/include/asm/kmap_types.h
1210+++ b/arch/avr32/include/asm/kmap_types.h
1211@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1212 D(11) KM_IRQ1,
1213 D(12) KM_SOFTIRQ0,
1214 D(13) KM_SOFTIRQ1,
1215-D(14) KM_TYPE_NR
1216+D(14) KM_CLEARPAGE,
1217+D(15) KM_TYPE_NR
1218 };
1219
1220 #undef D
1221diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1222index f021edf..32d680e 100644
1223--- a/arch/avr32/mach-at32ap/pm.c
1224+++ b/arch/avr32/mach-at32ap/pm.c
1225@@ -176,7 +176,7 @@ out:
1226 return 0;
1227 }
1228
1229-static struct platform_suspend_ops avr32_pm_ops = {
1230+static const struct platform_suspend_ops avr32_pm_ops = {
1231 .valid = avr32_pm_valid_state,
1232 .enter = avr32_pm_enter,
1233 };
1234diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1235index b61d86d..e292c7f 100644
1236--- a/arch/avr32/mm/fault.c
1237+++ b/arch/avr32/mm/fault.c
1238@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1239
1240 int exception_trace = 1;
1241
1242+#ifdef CONFIG_PAX_PAGEEXEC
1243+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1244+{
1245+ unsigned long i;
1246+
1247+ printk(KERN_ERR "PAX: bytes at PC: ");
1248+ for (i = 0; i < 20; i++) {
1249+ unsigned char c;
1250+ if (get_user(c, (unsigned char *)pc+i))
1251+ printk(KERN_CONT "???????? ");
1252+ else
1253+ printk(KERN_CONT "%02x ", c);
1254+ }
1255+ printk("\n");
1256+}
1257+#endif
1258+
1259 /*
1260 * This routine handles page faults. It determines the address and the
1261 * problem, and then passes it off to one of the appropriate routines.
1262@@ -157,6 +174,16 @@ bad_area:
1263 up_read(&mm->mmap_sem);
1264
1265 if (user_mode(regs)) {
1266+
1267+#ifdef CONFIG_PAX_PAGEEXEC
1268+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1269+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1270+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1271+ do_group_exit(SIGKILL);
1272+ }
1273+ }
1274+#endif
1275+
1276 if (exception_trace && printk_ratelimit())
1277 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1278 "sp %08lx ecr %lu\n",
1279diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1280index cce79d0..c406c85 100644
1281--- a/arch/blackfin/kernel/kgdb.c
1282+++ b/arch/blackfin/kernel/kgdb.c
1283@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1284 return -1; /* this means that we do not want to exit from the handler */
1285 }
1286
1287-struct kgdb_arch arch_kgdb_ops = {
1288+const struct kgdb_arch arch_kgdb_ops = {
1289 .gdb_bpt_instr = {0xa1},
1290 #ifdef CONFIG_SMP
1291 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1292diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1293index 8837be4..b2fb413 100644
1294--- a/arch/blackfin/mach-common/pm.c
1295+++ b/arch/blackfin/mach-common/pm.c
1296@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1297 return 0;
1298 }
1299
1300-struct platform_suspend_ops bfin_pm_ops = {
1301+const struct platform_suspend_ops bfin_pm_ops = {
1302 .enter = bfin_pm_enter,
1303 .valid = bfin_pm_valid,
1304 };
1305diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1306index 00a57af..c3ef0cd 100644
1307--- a/arch/frv/include/asm/atomic.h
1308+++ b/arch/frv/include/asm/atomic.h
1309@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1310 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1311 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1312
1313+#define atomic64_read_unchecked(v) atomic64_read(v)
1314+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1315+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1316+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1317+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1318+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1319+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1320+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1321+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1322+
1323 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
1324 {
1325 int c, old;
1326diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1327index f8e16b2..c73ff79 100644
1328--- a/arch/frv/include/asm/kmap_types.h
1329+++ b/arch/frv/include/asm/kmap_types.h
1330@@ -23,6 +23,7 @@ enum km_type {
1331 KM_IRQ1,
1332 KM_SOFTIRQ0,
1333 KM_SOFTIRQ1,
1334+ KM_CLEARPAGE,
1335 KM_TYPE_NR
1336 };
1337
1338diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1339index 385fd30..6c3d97e 100644
1340--- a/arch/frv/mm/elf-fdpic.c
1341+++ b/arch/frv/mm/elf-fdpic.c
1342@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1343 if (addr) {
1344 addr = PAGE_ALIGN(addr);
1345 vma = find_vma(current->mm, addr);
1346- if (TASK_SIZE - len >= addr &&
1347- (!vma || addr + len <= vma->vm_start))
1348+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1349 goto success;
1350 }
1351
1352@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1353 for (; vma; vma = vma->vm_next) {
1354 if (addr > limit)
1355 break;
1356- if (addr + len <= vma->vm_start)
1357+ if (check_heap_stack_gap(vma, addr, len))
1358 goto success;
1359 addr = vma->vm_end;
1360 }
1361@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1362 for (; vma; vma = vma->vm_next) {
1363 if (addr > limit)
1364 break;
1365- if (addr + len <= vma->vm_start)
1366+ if (check_heap_stack_gap(vma, addr, len))
1367 goto success;
1368 addr = vma->vm_end;
1369 }
1370diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1371index e4a80d8..11a7ea1 100644
1372--- a/arch/ia64/hp/common/hwsw_iommu.c
1373+++ b/arch/ia64/hp/common/hwsw_iommu.c
1374@@ -17,7 +17,7 @@
1375 #include <linux/swiotlb.h>
1376 #include <asm/machvec.h>
1377
1378-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1379+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1380
1381 /* swiotlb declarations & definitions: */
1382 extern int swiotlb_late_init_with_default_size (size_t size);
1383@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1384 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1385 }
1386
1387-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1388+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1389 {
1390 if (use_swiotlb(dev))
1391 return &swiotlb_dma_ops;
1392diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1393index 01ae69b..35752fd 100644
1394--- a/arch/ia64/hp/common/sba_iommu.c
1395+++ b/arch/ia64/hp/common/sba_iommu.c
1396@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1397 },
1398 };
1399
1400-extern struct dma_map_ops swiotlb_dma_ops;
1401+extern const struct dma_map_ops swiotlb_dma_ops;
1402
1403 static int __init
1404 sba_init(void)
1405@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1406
1407 __setup("sbapagesize=",sba_page_override);
1408
1409-struct dma_map_ops sba_dma_ops = {
1410+const struct dma_map_ops sba_dma_ops = {
1411 .alloc_coherent = sba_alloc_coherent,
1412 .free_coherent = sba_free_coherent,
1413 .map_page = sba_map_page,
1414diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1415index c69552b..c7122f4 100644
1416--- a/arch/ia64/ia32/binfmt_elf32.c
1417+++ b/arch/ia64/ia32/binfmt_elf32.c
1418@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1419
1420 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1421
1422+#ifdef CONFIG_PAX_ASLR
1423+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1424+
1425+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1426+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1427+#endif
1428+
1429 /* Ugly but avoids duplication */
1430 #include "../../../fs/binfmt_elf.c"
1431
1432diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1433index 0f15349..26b3429 100644
1434--- a/arch/ia64/ia32/ia32priv.h
1435+++ b/arch/ia64/ia32/ia32priv.h
1436@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1437 #define ELF_DATA ELFDATA2LSB
1438 #define ELF_ARCH EM_386
1439
1440-#define IA32_STACK_TOP IA32_PAGE_OFFSET
1441+#ifdef CONFIG_PAX_RANDUSTACK
1442+#define __IA32_DELTA_STACK (current->mm->delta_stack)
1443+#else
1444+#define __IA32_DELTA_STACK 0UL
1445+#endif
1446+
1447+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1448+
1449 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1450 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1451
1452diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
1453index 88405cb..de5ca5d 100644
1454--- a/arch/ia64/include/asm/atomic.h
1455+++ b/arch/ia64/include/asm/atomic.h
1456@@ -210,6 +210,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
1457 #define atomic64_inc(v) atomic64_add(1, (v))
1458 #define atomic64_dec(v) atomic64_sub(1, (v))
1459
1460+#define atomic64_read_unchecked(v) atomic64_read(v)
1461+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1462+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1463+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1464+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1465+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1466+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1467+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1468+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1469+
1470 /* Atomic operations are already serializing */
1471 #define smp_mb__before_atomic_dec() barrier()
1472 #define smp_mb__after_atomic_dec() barrier()
1473diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1474index 8d3c79c..71b3af6 100644
1475--- a/arch/ia64/include/asm/dma-mapping.h
1476+++ b/arch/ia64/include/asm/dma-mapping.h
1477@@ -12,7 +12,7 @@
1478
1479 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1480
1481-extern struct dma_map_ops *dma_ops;
1482+extern const struct dma_map_ops *dma_ops;
1483 extern struct ia64_machine_vector ia64_mv;
1484 extern void set_iommu_machvec(void);
1485
1486@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1487 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1488 dma_addr_t *daddr, gfp_t gfp)
1489 {
1490- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1491+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1492 void *caddr;
1493
1494 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1495@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1496 static inline void dma_free_coherent(struct device *dev, size_t size,
1497 void *caddr, dma_addr_t daddr)
1498 {
1499- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1500+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1501 debug_dma_free_coherent(dev, size, caddr, daddr);
1502 ops->free_coherent(dev, size, caddr, daddr);
1503 }
1504@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1505
1506 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1507 {
1508- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1509+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1510 return ops->mapping_error(dev, daddr);
1511 }
1512
1513 static inline int dma_supported(struct device *dev, u64 mask)
1514 {
1515- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1516+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1517 return ops->dma_supported(dev, mask);
1518 }
1519
1520diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1521index 86eddee..b116bb4 100644
1522--- a/arch/ia64/include/asm/elf.h
1523+++ b/arch/ia64/include/asm/elf.h
1524@@ -43,6 +43,13 @@
1525 */
1526 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1527
1528+#ifdef CONFIG_PAX_ASLR
1529+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1530+
1531+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1532+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1533+#endif
1534+
1535 #define PT_IA_64_UNWIND 0x70000001
1536
1537 /* IA-64 relocations: */
1538diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1539index 367d299..9ad4279 100644
1540--- a/arch/ia64/include/asm/machvec.h
1541+++ b/arch/ia64/include/asm/machvec.h
1542@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1543 /* DMA-mapping interface: */
1544 typedef void ia64_mv_dma_init (void);
1545 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1546-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1547+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1548
1549 /*
1550 * WARNING: The legacy I/O space is _architected_. Platforms are
1551@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1552 # endif /* CONFIG_IA64_GENERIC */
1553
1554 extern void swiotlb_dma_init(void);
1555-extern struct dma_map_ops *dma_get_ops(struct device *);
1556+extern const struct dma_map_ops *dma_get_ops(struct device *);
1557
1558 /*
1559 * Define default versions so we can extend machvec for new platforms without having
1560diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1561index 8840a69..cdb63d9 100644
1562--- a/arch/ia64/include/asm/pgtable.h
1563+++ b/arch/ia64/include/asm/pgtable.h
1564@@ -12,7 +12,7 @@
1565 * David Mosberger-Tang <davidm@hpl.hp.com>
1566 */
1567
1568-
1569+#include <linux/const.h>
1570 #include <asm/mman.h>
1571 #include <asm/page.h>
1572 #include <asm/processor.h>
1573@@ -143,6 +143,17 @@
1574 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1575 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1576 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1577+
1578+#ifdef CONFIG_PAX_PAGEEXEC
1579+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1580+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1581+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1582+#else
1583+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1584+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1585+# define PAGE_COPY_NOEXEC PAGE_COPY
1586+#endif
1587+
1588 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1589 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1590 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1591diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1592index 239ecdc..f94170e 100644
1593--- a/arch/ia64/include/asm/spinlock.h
1594+++ b/arch/ia64/include/asm/spinlock.h
1595@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1596 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1597
1598 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1599- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1600+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1601 }
1602
1603 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1604diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1605index 449c8c0..432a3d2 100644
1606--- a/arch/ia64/include/asm/uaccess.h
1607+++ b/arch/ia64/include/asm/uaccess.h
1608@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1609 const void *__cu_from = (from); \
1610 long __cu_len = (n); \
1611 \
1612- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1613+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1614 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1615 __cu_len; \
1616 })
1617@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1618 long __cu_len = (n); \
1619 \
1620 __chk_user_ptr(__cu_from); \
1621- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1622+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1623 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1624 __cu_len; \
1625 })
1626diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1627index f2c1600..969398a 100644
1628--- a/arch/ia64/kernel/dma-mapping.c
1629+++ b/arch/ia64/kernel/dma-mapping.c
1630@@ -3,7 +3,7 @@
1631 /* Set this to 1 if there is a HW IOMMU in the system */
1632 int iommu_detected __read_mostly;
1633
1634-struct dma_map_ops *dma_ops;
1635+const struct dma_map_ops *dma_ops;
1636 EXPORT_SYMBOL(dma_ops);
1637
1638 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1639@@ -16,7 +16,7 @@ static int __init dma_init(void)
1640 }
1641 fs_initcall(dma_init);
1642
1643-struct dma_map_ops *dma_get_ops(struct device *dev)
1644+const struct dma_map_ops *dma_get_ops(struct device *dev)
1645 {
1646 return dma_ops;
1647 }
1648diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1649index 1481b0a..e7d38ff 100644
1650--- a/arch/ia64/kernel/module.c
1651+++ b/arch/ia64/kernel/module.c
1652@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1653 void
1654 module_free (struct module *mod, void *module_region)
1655 {
1656- if (mod && mod->arch.init_unw_table &&
1657- module_region == mod->module_init) {
1658+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1659 unw_remove_unwind_table(mod->arch.init_unw_table);
1660 mod->arch.init_unw_table = NULL;
1661 }
1662@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1663 }
1664
1665 static inline int
1666+in_init_rx (const struct module *mod, uint64_t addr)
1667+{
1668+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1669+}
1670+
1671+static inline int
1672+in_init_rw (const struct module *mod, uint64_t addr)
1673+{
1674+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1675+}
1676+
1677+static inline int
1678 in_init (const struct module *mod, uint64_t addr)
1679 {
1680- return addr - (uint64_t) mod->module_init < mod->init_size;
1681+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1682+}
1683+
1684+static inline int
1685+in_core_rx (const struct module *mod, uint64_t addr)
1686+{
1687+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1688+}
1689+
1690+static inline int
1691+in_core_rw (const struct module *mod, uint64_t addr)
1692+{
1693+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1694 }
1695
1696 static inline int
1697 in_core (const struct module *mod, uint64_t addr)
1698 {
1699- return addr - (uint64_t) mod->module_core < mod->core_size;
1700+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1701 }
1702
1703 static inline int
1704@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1705 break;
1706
1707 case RV_BDREL:
1708- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1709+ if (in_init_rx(mod, val))
1710+ val -= (uint64_t) mod->module_init_rx;
1711+ else if (in_init_rw(mod, val))
1712+ val -= (uint64_t) mod->module_init_rw;
1713+ else if (in_core_rx(mod, val))
1714+ val -= (uint64_t) mod->module_core_rx;
1715+ else if (in_core_rw(mod, val))
1716+ val -= (uint64_t) mod->module_core_rw;
1717 break;
1718
1719 case RV_LTV:
1720@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1721 * addresses have been selected...
1722 */
1723 uint64_t gp;
1724- if (mod->core_size > MAX_LTOFF)
1725+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1726 /*
1727 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1728 * at the end of the module.
1729 */
1730- gp = mod->core_size - MAX_LTOFF / 2;
1731+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1732 else
1733- gp = mod->core_size / 2;
1734- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1735+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1736+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1737 mod->arch.gp = gp;
1738 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1739 }
1740diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1741index f6b1ff0..de773fb 100644
1742--- a/arch/ia64/kernel/pci-dma.c
1743+++ b/arch/ia64/kernel/pci-dma.c
1744@@ -43,7 +43,7 @@ struct device fallback_dev = {
1745 .dma_mask = &fallback_dev.coherent_dma_mask,
1746 };
1747
1748-extern struct dma_map_ops intel_dma_ops;
1749+extern const struct dma_map_ops intel_dma_ops;
1750
1751 static int __init pci_iommu_init(void)
1752 {
1753@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1754 }
1755 EXPORT_SYMBOL(iommu_dma_supported);
1756
1757+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1758+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1759+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1760+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1761+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1762+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1763+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1764+
1765+static const struct dma_map_ops intel_iommu_dma_ops = {
1766+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1767+ .alloc_coherent = intel_alloc_coherent,
1768+ .free_coherent = intel_free_coherent,
1769+ .map_sg = intel_map_sg,
1770+ .unmap_sg = intel_unmap_sg,
1771+ .map_page = intel_map_page,
1772+ .unmap_page = intel_unmap_page,
1773+ .mapping_error = intel_mapping_error,
1774+
1775+ .sync_single_for_cpu = machvec_dma_sync_single,
1776+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1777+ .sync_single_for_device = machvec_dma_sync_single,
1778+ .sync_sg_for_device = machvec_dma_sync_sg,
1779+ .dma_supported = iommu_dma_supported,
1780+};
1781+
1782 void __init pci_iommu_alloc(void)
1783 {
1784- dma_ops = &intel_dma_ops;
1785-
1786- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1787- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1788- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1789- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1790- dma_ops->dma_supported = iommu_dma_supported;
1791+ dma_ops = &intel_iommu_dma_ops;
1792
1793 /*
1794 * The order of these functions is important for
1795diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1796index 285aae8..61dbab6 100644
1797--- a/arch/ia64/kernel/pci-swiotlb.c
1798+++ b/arch/ia64/kernel/pci-swiotlb.c
1799@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1800 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1801 }
1802
1803-struct dma_map_ops swiotlb_dma_ops = {
1804+const struct dma_map_ops swiotlb_dma_ops = {
1805 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1806 .free_coherent = swiotlb_free_coherent,
1807 .map_page = swiotlb_map_page,
1808diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1809index 609d500..7dde2a8 100644
1810--- a/arch/ia64/kernel/sys_ia64.c
1811+++ b/arch/ia64/kernel/sys_ia64.c
1812@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1813 if (REGION_NUMBER(addr) == RGN_HPAGE)
1814 addr = 0;
1815 #endif
1816+
1817+#ifdef CONFIG_PAX_RANDMMAP
1818+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1819+ addr = mm->free_area_cache;
1820+ else
1821+#endif
1822+
1823 if (!addr)
1824 addr = mm->free_area_cache;
1825
1826@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1827 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1828 /* At this point: (!vma || addr < vma->vm_end). */
1829 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1830- if (start_addr != TASK_UNMAPPED_BASE) {
1831+ if (start_addr != mm->mmap_base) {
1832 /* Start a new search --- just in case we missed some holes. */
1833- addr = TASK_UNMAPPED_BASE;
1834+ addr = mm->mmap_base;
1835 goto full_search;
1836 }
1837 return -ENOMEM;
1838 }
1839- if (!vma || addr + len <= vma->vm_start) {
1840+ if (check_heap_stack_gap(vma, addr, len)) {
1841 /* Remember the address where we stopped this search: */
1842 mm->free_area_cache = addr + len;
1843 return addr;
1844diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1845index 8f06035..b3a5818 100644
1846--- a/arch/ia64/kernel/topology.c
1847+++ b/arch/ia64/kernel/topology.c
1848@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1849 return ret;
1850 }
1851
1852-static struct sysfs_ops cache_sysfs_ops = {
1853+static const struct sysfs_ops cache_sysfs_ops = {
1854 .show = cache_show
1855 };
1856
1857diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1858index 0a0c77b..8e55a81 100644
1859--- a/arch/ia64/kernel/vmlinux.lds.S
1860+++ b/arch/ia64/kernel/vmlinux.lds.S
1861@@ -190,7 +190,7 @@ SECTIONS
1862 /* Per-cpu data: */
1863 . = ALIGN(PERCPU_PAGE_SIZE);
1864 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1865- __phys_per_cpu_start = __per_cpu_load;
1866+ __phys_per_cpu_start = per_cpu_load;
1867 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1868 * into percpu page size
1869 */
1870diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1871index 19261a9..1611b7a 100644
1872--- a/arch/ia64/mm/fault.c
1873+++ b/arch/ia64/mm/fault.c
1874@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1875 return pte_present(pte);
1876 }
1877
1878+#ifdef CONFIG_PAX_PAGEEXEC
1879+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1880+{
1881+ unsigned long i;
1882+
1883+ printk(KERN_ERR "PAX: bytes at PC: ");
1884+ for (i = 0; i < 8; i++) {
1885+ unsigned int c;
1886+ if (get_user(c, (unsigned int *)pc+i))
1887+ printk(KERN_CONT "???????? ");
1888+ else
1889+ printk(KERN_CONT "%08x ", c);
1890+ }
1891+ printk("\n");
1892+}
1893+#endif
1894+
1895 void __kprobes
1896 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1897 {
1898@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1899 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1900 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1901
1902- if ((vma->vm_flags & mask) != mask)
1903+ if ((vma->vm_flags & mask) != mask) {
1904+
1905+#ifdef CONFIG_PAX_PAGEEXEC
1906+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1907+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1908+ goto bad_area;
1909+
1910+ up_read(&mm->mmap_sem);
1911+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1912+ do_group_exit(SIGKILL);
1913+ }
1914+#endif
1915+
1916 goto bad_area;
1917
1918+ }
1919+
1920 survive:
1921 /*
1922 * If for any reason at all we couldn't handle the fault, make
1923diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1924index b0f6157..a082bbc 100644
1925--- a/arch/ia64/mm/hugetlbpage.c
1926+++ b/arch/ia64/mm/hugetlbpage.c
1927@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1928 /* At this point: (!vmm || addr < vmm->vm_end). */
1929 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1930 return -ENOMEM;
1931- if (!vmm || (addr + len) <= vmm->vm_start)
1932+ if (check_heap_stack_gap(vmm, addr, len))
1933 return addr;
1934 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1935 }
1936diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1937index 1857766..05cc6a3 100644
1938--- a/arch/ia64/mm/init.c
1939+++ b/arch/ia64/mm/init.c
1940@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1941 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1942 vma->vm_end = vma->vm_start + PAGE_SIZE;
1943 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1944+
1945+#ifdef CONFIG_PAX_PAGEEXEC
1946+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1947+ vma->vm_flags &= ~VM_EXEC;
1948+
1949+#ifdef CONFIG_PAX_MPROTECT
1950+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1951+ vma->vm_flags &= ~VM_MAYEXEC;
1952+#endif
1953+
1954+ }
1955+#endif
1956+
1957 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1958 down_write(&current->mm->mmap_sem);
1959 if (insert_vm_struct(current->mm, vma)) {
1960diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1961index 98b6849..8046766 100644
1962--- a/arch/ia64/sn/pci/pci_dma.c
1963+++ b/arch/ia64/sn/pci/pci_dma.c
1964@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1965 return ret;
1966 }
1967
1968-static struct dma_map_ops sn_dma_ops = {
1969+static const struct dma_map_ops sn_dma_ops = {
1970 .alloc_coherent = sn_dma_alloc_coherent,
1971 .free_coherent = sn_dma_free_coherent,
1972 .map_page = sn_dma_map_page,
1973diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1974index 82abd15..d95ae5d 100644
1975--- a/arch/m32r/lib/usercopy.c
1976+++ b/arch/m32r/lib/usercopy.c
1977@@ -14,6 +14,9 @@
1978 unsigned long
1979 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1980 {
1981+ if ((long)n < 0)
1982+ return n;
1983+
1984 prefetch(from);
1985 if (access_ok(VERIFY_WRITE, to, n))
1986 __copy_user(to,from,n);
1987@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1988 unsigned long
1989 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1990 {
1991+ if ((long)n < 0)
1992+ return n;
1993+
1994 prefetchw(to);
1995 if (access_ok(VERIFY_READ, from, n))
1996 __copy_user_zeroing(to,from,n);
1997diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
1998index fd7620f..63d73a6 100644
1999--- a/arch/mips/Kconfig
2000+++ b/arch/mips/Kconfig
2001@@ -5,6 +5,7 @@ config MIPS
2002 select HAVE_IDE
2003 select HAVE_OPROFILE
2004 select HAVE_ARCH_KGDB
2005+ select GENERIC_ATOMIC64 if !64BIT
2006 # Horrible source of confusion. Die, die, die ...
2007 select EMBEDDED
2008 select RTC_LIB if !LEMOTE_FULOONG2E
2009diff --git a/arch/mips/Makefile b/arch/mips/Makefile
2010index 77f5021..2b1db8a 100644
2011--- a/arch/mips/Makefile
2012+++ b/arch/mips/Makefile
2013@@ -51,6 +51,8 @@ endif
2014 cflags-y := -ffunction-sections
2015 cflags-y += $(call cc-option, -mno-check-zero-division)
2016
2017+cflags-y += -Wno-sign-compare -Wno-extra
2018+
2019 ifdef CONFIG_32BIT
2020 ld-emul = $(32bit-emul)
2021 vmlinux-32 = vmlinux
2022diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
2023index 632f986..fd0378d 100644
2024--- a/arch/mips/alchemy/devboards/pm.c
2025+++ b/arch/mips/alchemy/devboards/pm.c
2026@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
2027
2028 }
2029
2030-static struct platform_suspend_ops db1x_pm_ops = {
2031+static const struct platform_suspend_ops db1x_pm_ops = {
2032 .valid = suspend_valid_only_mem,
2033 .begin = db1x_pm_begin,
2034 .enter = db1x_pm_enter,
2035diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2036index 09e7128..111035b 100644
2037--- a/arch/mips/include/asm/atomic.h
2038+++ b/arch/mips/include/asm/atomic.h
2039@@ -21,6 +21,10 @@
2040 #include <asm/war.h>
2041 #include <asm/system.h>
2042
2043+#ifdef CONFIG_GENERIC_ATOMIC64
2044+#include <asm-generic/atomic64.h>
2045+#endif
2046+
2047 #define ATOMIC_INIT(i) { (i) }
2048
2049 /*
2050@@ -782,6 +786,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2051 */
2052 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2053
2054+#define atomic64_read_unchecked(v) atomic64_read(v)
2055+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2056+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2057+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2058+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2059+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2060+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2061+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2062+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2063+
2064 #endif /* CONFIG_64BIT */
2065
2066 /*
2067diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2068index 7990694..4e93acf 100644
2069--- a/arch/mips/include/asm/elf.h
2070+++ b/arch/mips/include/asm/elf.h
2071@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
2072 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2073 #endif
2074
2075+#ifdef CONFIG_PAX_ASLR
2076+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2077+
2078+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2079+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2080+#endif
2081+
2082 #endif /* _ASM_ELF_H */
2083diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2084index f266295..627cfff 100644
2085--- a/arch/mips/include/asm/page.h
2086+++ b/arch/mips/include/asm/page.h
2087@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2088 #ifdef CONFIG_CPU_MIPS32
2089 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2090 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2091- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2092+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2093 #else
2094 typedef struct { unsigned long long pte; } pte_t;
2095 #define pte_val(x) ((x).pte)
2096diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
2097index e48c0bf..f3acf65 100644
2098--- a/arch/mips/include/asm/reboot.h
2099+++ b/arch/mips/include/asm/reboot.h
2100@@ -9,7 +9,7 @@
2101 #ifndef _ASM_REBOOT_H
2102 #define _ASM_REBOOT_H
2103
2104-extern void (*_machine_restart)(char *command);
2105-extern void (*_machine_halt)(void);
2106+extern void (*__noreturn _machine_restart)(char *command);
2107+extern void (*__noreturn _machine_halt)(void);
2108
2109 #endif /* _ASM_REBOOT_H */
2110diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2111index 83b5509..9fa24a23 100644
2112--- a/arch/mips/include/asm/system.h
2113+++ b/arch/mips/include/asm/system.h
2114@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2115 */
2116 #define __ARCH_WANT_UNLOCKED_CTXSW
2117
2118-extern unsigned long arch_align_stack(unsigned long sp);
2119+#define arch_align_stack(x) ((x) & ~0xfUL)
2120
2121 #endif /* _ASM_SYSTEM_H */
2122diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2123index 9fdd8bc..fcf9d68 100644
2124--- a/arch/mips/kernel/binfmt_elfn32.c
2125+++ b/arch/mips/kernel/binfmt_elfn32.c
2126@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2127 #undef ELF_ET_DYN_BASE
2128 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2129
2130+#ifdef CONFIG_PAX_ASLR
2131+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2132+
2133+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2134+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2135+#endif
2136+
2137 #include <asm/processor.h>
2138 #include <linux/module.h>
2139 #include <linux/elfcore.h>
2140diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2141index ff44823..cf0b48a 100644
2142--- a/arch/mips/kernel/binfmt_elfo32.c
2143+++ b/arch/mips/kernel/binfmt_elfo32.c
2144@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2145 #undef ELF_ET_DYN_BASE
2146 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2147
2148+#ifdef CONFIG_PAX_ASLR
2149+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2150+
2151+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2152+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2153+#endif
2154+
2155 #include <asm/processor.h>
2156
2157 /*
2158diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2159index 50c9bb8..efdd5f8 100644
2160--- a/arch/mips/kernel/kgdb.c
2161+++ b/arch/mips/kernel/kgdb.c
2162@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2163 return -1;
2164 }
2165
2166+/* cannot be const */
2167 struct kgdb_arch arch_kgdb_ops;
2168
2169 /*
2170diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2171index f3d73e1..bb3f57a 100644
2172--- a/arch/mips/kernel/process.c
2173+++ b/arch/mips/kernel/process.c
2174@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2175 out:
2176 return pc;
2177 }
2178-
2179-/*
2180- * Don't forget that the stack pointer must be aligned on a 8 bytes
2181- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2182- */
2183-unsigned long arch_align_stack(unsigned long sp)
2184-{
2185- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2186- sp -= get_random_int() & ~PAGE_MASK;
2187-
2188- return sp & ALMASK;
2189-}
2190diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2191index 060563a..7fbf310 100644
2192--- a/arch/mips/kernel/reset.c
2193+++ b/arch/mips/kernel/reset.c
2194@@ -19,8 +19,8 @@
2195 * So handle all using function pointers to machine specific
2196 * functions.
2197 */
2198-void (*_machine_restart)(char *command);
2199-void (*_machine_halt)(void);
2200+void (*__noreturn _machine_restart)(char *command);
2201+void (*__noreturn _machine_halt)(void);
2202 void (*pm_power_off)(void);
2203
2204 EXPORT_SYMBOL(pm_power_off);
2205@@ -29,16 +29,19 @@ void machine_restart(char *command)
2206 {
2207 if (_machine_restart)
2208 _machine_restart(command);
2209+ BUG();
2210 }
2211
2212 void machine_halt(void)
2213 {
2214 if (_machine_halt)
2215 _machine_halt();
2216+ BUG();
2217 }
2218
2219 void machine_power_off(void)
2220 {
2221 if (pm_power_off)
2222 pm_power_off();
2223+ BUG();
2224 }
2225diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2226index 3f7f466..3abe0b5 100644
2227--- a/arch/mips/kernel/syscall.c
2228+++ b/arch/mips/kernel/syscall.c
2229@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2230 do_color_align = 0;
2231 if (filp || (flags & MAP_SHARED))
2232 do_color_align = 1;
2233+
2234+#ifdef CONFIG_PAX_RANDMMAP
2235+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2236+#endif
2237+
2238 if (addr) {
2239 if (do_color_align)
2240 addr = COLOUR_ALIGN(addr, pgoff);
2241 else
2242 addr = PAGE_ALIGN(addr);
2243 vmm = find_vma(current->mm, addr);
2244- if (task_size - len >= addr &&
2245- (!vmm || addr + len <= vmm->vm_start))
2246+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2247 return addr;
2248 }
2249- addr = TASK_UNMAPPED_BASE;
2250+ addr = current->mm->mmap_base;
2251 if (do_color_align)
2252 addr = COLOUR_ALIGN(addr, pgoff);
2253 else
2254@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2255 /* At this point: (!vmm || addr < vmm->vm_end). */
2256 if (task_size - len < addr)
2257 return -ENOMEM;
2258- if (!vmm || addr + len <= vmm->vm_start)
2259+ if (check_heap_stack_gap(vmm, addr, len))
2260 return addr;
2261 addr = vmm->vm_end;
2262 if (do_color_align)
2263diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2264index e97a7a2..f18f5b0 100644
2265--- a/arch/mips/mm/fault.c
2266+++ b/arch/mips/mm/fault.c
2267@@ -26,6 +26,23 @@
2268 #include <asm/ptrace.h>
2269 #include <asm/highmem.h> /* For VMALLOC_END */
2270
2271+#ifdef CONFIG_PAX_PAGEEXEC
2272+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2273+{
2274+ unsigned long i;
2275+
2276+ printk(KERN_ERR "PAX: bytes at PC: ");
2277+ for (i = 0; i < 5; i++) {
2278+ unsigned int c;
2279+ if (get_user(c, (unsigned int *)pc+i))
2280+ printk(KERN_CONT "???????? ");
2281+ else
2282+ printk(KERN_CONT "%08x ", c);
2283+ }
2284+ printk("\n");
2285+}
2286+#endif
2287+
2288 /*
2289 * This routine handles page faults. It determines the address,
2290 * and the problem, and then passes it off to one of the appropriate
2291diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2292index 8bc9e96..26554f8 100644
2293--- a/arch/parisc/include/asm/atomic.h
2294+++ b/arch/parisc/include/asm/atomic.h
2295@@ -336,6 +336,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2296
2297 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2298
2299+#define atomic64_read_unchecked(v) atomic64_read(v)
2300+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2301+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2302+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2303+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2304+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2305+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2306+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2307+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2308+
2309 #else /* CONFIG_64BIT */
2310
2311 #include <asm-generic/atomic64.h>
2312diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2313index 9c802eb..0592e41 100644
2314--- a/arch/parisc/include/asm/elf.h
2315+++ b/arch/parisc/include/asm/elf.h
2316@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2317
2318 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2319
2320+#ifdef CONFIG_PAX_ASLR
2321+#define PAX_ELF_ET_DYN_BASE 0x10000UL
2322+
2323+#define PAX_DELTA_MMAP_LEN 16
2324+#define PAX_DELTA_STACK_LEN 16
2325+#endif
2326+
2327 /* This yields a mask that user programs can use to figure out what
2328 instruction set this CPU supports. This could be done in user space,
2329 but it's not easy, and we've already done it here. */
2330diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2331index a27d2e2..18fd845 100644
2332--- a/arch/parisc/include/asm/pgtable.h
2333+++ b/arch/parisc/include/asm/pgtable.h
2334@@ -207,6 +207,17 @@
2335 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2336 #define PAGE_COPY PAGE_EXECREAD
2337 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2338+
2339+#ifdef CONFIG_PAX_PAGEEXEC
2340+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2341+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2342+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2343+#else
2344+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2345+# define PAGE_COPY_NOEXEC PAGE_COPY
2346+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2347+#endif
2348+
2349 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2350 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2351 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2352diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2353index 2120746..8d70a5e 100644
2354--- a/arch/parisc/kernel/module.c
2355+++ b/arch/parisc/kernel/module.c
2356@@ -95,16 +95,38 @@
2357
2358 /* three functions to determine where in the module core
2359 * or init pieces the location is */
2360+static inline int in_init_rx(struct module *me, void *loc)
2361+{
2362+ return (loc >= me->module_init_rx &&
2363+ loc < (me->module_init_rx + me->init_size_rx));
2364+}
2365+
2366+static inline int in_init_rw(struct module *me, void *loc)
2367+{
2368+ return (loc >= me->module_init_rw &&
2369+ loc < (me->module_init_rw + me->init_size_rw));
2370+}
2371+
2372 static inline int in_init(struct module *me, void *loc)
2373 {
2374- return (loc >= me->module_init &&
2375- loc <= (me->module_init + me->init_size));
2376+ return in_init_rx(me, loc) || in_init_rw(me, loc);
2377+}
2378+
2379+static inline int in_core_rx(struct module *me, void *loc)
2380+{
2381+ return (loc >= me->module_core_rx &&
2382+ loc < (me->module_core_rx + me->core_size_rx));
2383+}
2384+
2385+static inline int in_core_rw(struct module *me, void *loc)
2386+{
2387+ return (loc >= me->module_core_rw &&
2388+ loc < (me->module_core_rw + me->core_size_rw));
2389 }
2390
2391 static inline int in_core(struct module *me, void *loc)
2392 {
2393- return (loc >= me->module_core &&
2394- loc <= (me->module_core + me->core_size));
2395+ return in_core_rx(me, loc) || in_core_rw(me, loc);
2396 }
2397
2398 static inline int in_local(struct module *me, void *loc)
2399@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2400 }
2401
2402 /* align things a bit */
2403- me->core_size = ALIGN(me->core_size, 16);
2404- me->arch.got_offset = me->core_size;
2405- me->core_size += gots * sizeof(struct got_entry);
2406+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2407+ me->arch.got_offset = me->core_size_rw;
2408+ me->core_size_rw += gots * sizeof(struct got_entry);
2409
2410- me->core_size = ALIGN(me->core_size, 16);
2411- me->arch.fdesc_offset = me->core_size;
2412- me->core_size += fdescs * sizeof(Elf_Fdesc);
2413+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2414+ me->arch.fdesc_offset = me->core_size_rw;
2415+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2416
2417 me->arch.got_max = gots;
2418 me->arch.fdesc_max = fdescs;
2419@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2420
2421 BUG_ON(value == 0);
2422
2423- got = me->module_core + me->arch.got_offset;
2424+ got = me->module_core_rw + me->arch.got_offset;
2425 for (i = 0; got[i].addr; i++)
2426 if (got[i].addr == value)
2427 goto out;
2428@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2429 #ifdef CONFIG_64BIT
2430 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2431 {
2432- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2433+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2434
2435 if (!value) {
2436 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2437@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2438
2439 /* Create new one */
2440 fdesc->addr = value;
2441- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2442+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2443 return (Elf_Addr)fdesc;
2444 }
2445 #endif /* CONFIG_64BIT */
2446@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2447
2448 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2449 end = table + sechdrs[me->arch.unwind_section].sh_size;
2450- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2451+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2452
2453 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2454 me->arch.unwind_section, table, end, gp);
2455diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2456index 9147391..f3d949a 100644
2457--- a/arch/parisc/kernel/sys_parisc.c
2458+++ b/arch/parisc/kernel/sys_parisc.c
2459@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2460 /* At this point: (!vma || addr < vma->vm_end). */
2461 if (TASK_SIZE - len < addr)
2462 return -ENOMEM;
2463- if (!vma || addr + len <= vma->vm_start)
2464+ if (check_heap_stack_gap(vma, addr, len))
2465 return addr;
2466 addr = vma->vm_end;
2467 }
2468@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2469 /* At this point: (!vma || addr < vma->vm_end). */
2470 if (TASK_SIZE - len < addr)
2471 return -ENOMEM;
2472- if (!vma || addr + len <= vma->vm_start)
2473+ if (check_heap_stack_gap(vma, addr, len))
2474 return addr;
2475 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2476 if (addr < vma->vm_end) /* handle wraparound */
2477@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2478 if (flags & MAP_FIXED)
2479 return addr;
2480 if (!addr)
2481- addr = TASK_UNMAPPED_BASE;
2482+ addr = current->mm->mmap_base;
2483
2484 if (filp) {
2485 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2486diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2487index 8b58bf0..7afff03 100644
2488--- a/arch/parisc/kernel/traps.c
2489+++ b/arch/parisc/kernel/traps.c
2490@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2491
2492 down_read(&current->mm->mmap_sem);
2493 vma = find_vma(current->mm,regs->iaoq[0]);
2494- if (vma && (regs->iaoq[0] >= vma->vm_start)
2495- && (vma->vm_flags & VM_EXEC)) {
2496-
2497+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2498 fault_address = regs->iaoq[0];
2499 fault_space = regs->iasq[0];
2500
2501diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2502index c6afbfc..c5839f6 100644
2503--- a/arch/parisc/mm/fault.c
2504+++ b/arch/parisc/mm/fault.c
2505@@ -15,6 +15,7 @@
2506 #include <linux/sched.h>
2507 #include <linux/interrupt.h>
2508 #include <linux/module.h>
2509+#include <linux/unistd.h>
2510
2511 #include <asm/uaccess.h>
2512 #include <asm/traps.h>
2513@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2514 static unsigned long
2515 parisc_acctyp(unsigned long code, unsigned int inst)
2516 {
2517- if (code == 6 || code == 16)
2518+ if (code == 6 || code == 7 || code == 16)
2519 return VM_EXEC;
2520
2521 switch (inst & 0xf0000000) {
2522@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2523 }
2524 #endif
2525
2526+#ifdef CONFIG_PAX_PAGEEXEC
2527+/*
2528+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2529+ *
2530+ * returns 1 when task should be killed
2531+ * 2 when rt_sigreturn trampoline was detected
2532+ * 3 when unpatched PLT trampoline was detected
2533+ */
2534+static int pax_handle_fetch_fault(struct pt_regs *regs)
2535+{
2536+
2537+#ifdef CONFIG_PAX_EMUPLT
2538+ int err;
2539+
2540+ do { /* PaX: unpatched PLT emulation */
2541+ unsigned int bl, depwi;
2542+
2543+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2544+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2545+
2546+ if (err)
2547+ break;
2548+
2549+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2550+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2551+
2552+ err = get_user(ldw, (unsigned int *)addr);
2553+ err |= get_user(bv, (unsigned int *)(addr+4));
2554+ err |= get_user(ldw2, (unsigned int *)(addr+8));
2555+
2556+ if (err)
2557+ break;
2558+
2559+ if (ldw == 0x0E801096U &&
2560+ bv == 0xEAC0C000U &&
2561+ ldw2 == 0x0E881095U)
2562+ {
2563+ unsigned int resolver, map;
2564+
2565+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2566+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2567+ if (err)
2568+ break;
2569+
2570+ regs->gr[20] = instruction_pointer(regs)+8;
2571+ regs->gr[21] = map;
2572+ regs->gr[22] = resolver;
2573+ regs->iaoq[0] = resolver | 3UL;
2574+ regs->iaoq[1] = regs->iaoq[0] + 4;
2575+ return 3;
2576+ }
2577+ }
2578+ } while (0);
2579+#endif
2580+
2581+#ifdef CONFIG_PAX_EMUTRAMP
2582+
2583+#ifndef CONFIG_PAX_EMUSIGRT
2584+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2585+ return 1;
2586+#endif
2587+
2588+ do { /* PaX: rt_sigreturn emulation */
2589+ unsigned int ldi1, ldi2, bel, nop;
2590+
2591+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2592+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2593+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2594+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2595+
2596+ if (err)
2597+ break;
2598+
2599+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2600+ ldi2 == 0x3414015AU &&
2601+ bel == 0xE4008200U &&
2602+ nop == 0x08000240U)
2603+ {
2604+ regs->gr[25] = (ldi1 & 2) >> 1;
2605+ regs->gr[20] = __NR_rt_sigreturn;
2606+ regs->gr[31] = regs->iaoq[1] + 16;
2607+ regs->sr[0] = regs->iasq[1];
2608+ regs->iaoq[0] = 0x100UL;
2609+ regs->iaoq[1] = regs->iaoq[0] + 4;
2610+ regs->iasq[0] = regs->sr[2];
2611+ regs->iasq[1] = regs->sr[2];
2612+ return 2;
2613+ }
2614+ } while (0);
2615+#endif
2616+
2617+ return 1;
2618+}
2619+
2620+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2621+{
2622+ unsigned long i;
2623+
2624+ printk(KERN_ERR "PAX: bytes at PC: ");
2625+ for (i = 0; i < 5; i++) {
2626+ unsigned int c;
2627+ if (get_user(c, (unsigned int *)pc+i))
2628+ printk(KERN_CONT "???????? ");
2629+ else
2630+ printk(KERN_CONT "%08x ", c);
2631+ }
2632+ printk("\n");
2633+}
2634+#endif
2635+
2636 int fixup_exception(struct pt_regs *regs)
2637 {
2638 const struct exception_table_entry *fix;
2639@@ -192,8 +303,33 @@ good_area:
2640
2641 acc_type = parisc_acctyp(code,regs->iir);
2642
2643- if ((vma->vm_flags & acc_type) != acc_type)
2644+ if ((vma->vm_flags & acc_type) != acc_type) {
2645+
2646+#ifdef CONFIG_PAX_PAGEEXEC
2647+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2648+ (address & ~3UL) == instruction_pointer(regs))
2649+ {
2650+ up_read(&mm->mmap_sem);
2651+ switch (pax_handle_fetch_fault(regs)) {
2652+
2653+#ifdef CONFIG_PAX_EMUPLT
2654+ case 3:
2655+ return;
2656+#endif
2657+
2658+#ifdef CONFIG_PAX_EMUTRAMP
2659+ case 2:
2660+ return;
2661+#endif
2662+
2663+ }
2664+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2665+ do_group_exit(SIGKILL);
2666+ }
2667+#endif
2668+
2669 goto bad_area;
2670+ }
2671
2672 /*
2673 * If for any reason at all we couldn't handle the fault, make
2674diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2675index c107b74..409dc0f 100644
2676--- a/arch/powerpc/Makefile
2677+++ b/arch/powerpc/Makefile
2678@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2679 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2680 CPP = $(CC) -E $(KBUILD_CFLAGS)
2681
2682+cflags-y += -Wno-sign-compare -Wno-extra
2683+
2684 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2685
2686 ifeq ($(CONFIG_PPC64),y)
2687diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2688index 6d94d27..50d4cad 100644
2689--- a/arch/powerpc/include/asm/device.h
2690+++ b/arch/powerpc/include/asm/device.h
2691@@ -14,7 +14,7 @@ struct dev_archdata {
2692 struct device_node *of_node;
2693
2694 /* DMA operations on that device */
2695- struct dma_map_ops *dma_ops;
2696+ const struct dma_map_ops *dma_ops;
2697
2698 /*
2699 * When an iommu is in use, dma_data is used as a ptr to the base of the
2700diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2701index e281dae..2b8a784 100644
2702--- a/arch/powerpc/include/asm/dma-mapping.h
2703+++ b/arch/powerpc/include/asm/dma-mapping.h
2704@@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2705 #ifdef CONFIG_PPC64
2706 extern struct dma_map_ops dma_iommu_ops;
2707 #endif
2708-extern struct dma_map_ops dma_direct_ops;
2709+extern const struct dma_map_ops dma_direct_ops;
2710
2711-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2712+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2713 {
2714 /* We don't handle the NULL dev case for ISA for now. We could
2715 * do it via an out of line call but it is not needed for now. The
2716@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2717 return dev->archdata.dma_ops;
2718 }
2719
2720-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2721+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2722 {
2723 dev->archdata.dma_ops = ops;
2724 }
2725@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2726
2727 static inline int dma_supported(struct device *dev, u64 mask)
2728 {
2729- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2730+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2731
2732 if (unlikely(dma_ops == NULL))
2733 return 0;
2734@@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2735
2736 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2737 {
2738- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2739+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2740
2741 if (unlikely(dma_ops == NULL))
2742 return -EIO;
2743@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2744 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2745 dma_addr_t *dma_handle, gfp_t flag)
2746 {
2747- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2748+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2749 void *cpu_addr;
2750
2751 BUG_ON(!dma_ops);
2752@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2753 static inline void dma_free_coherent(struct device *dev, size_t size,
2754 void *cpu_addr, dma_addr_t dma_handle)
2755 {
2756- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2757+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2758
2759 BUG_ON(!dma_ops);
2760
2761@@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2762
2763 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2764 {
2765- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2766+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2767
2768 if (dma_ops->mapping_error)
2769 return dma_ops->mapping_error(dev, dma_addr);
2770diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2771index 5698502..5db093c 100644
2772--- a/arch/powerpc/include/asm/elf.h
2773+++ b/arch/powerpc/include/asm/elf.h
2774@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2775 the loader. We need to make sure that it is out of the way of the program
2776 that it will "exec", and that there is sufficient room for the brk. */
2777
2778-extern unsigned long randomize_et_dyn(unsigned long base);
2779-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2780+#define ELF_ET_DYN_BASE (0x20000000)
2781+
2782+#ifdef CONFIG_PAX_ASLR
2783+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2784+
2785+#ifdef __powerpc64__
2786+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2787+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2788+#else
2789+#define PAX_DELTA_MMAP_LEN 15
2790+#define PAX_DELTA_STACK_LEN 15
2791+#endif
2792+#endif
2793
2794 /*
2795 * Our registers are always unsigned longs, whether we're a 32 bit
2796@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2797 (0x7ff >> (PAGE_SHIFT - 12)) : \
2798 (0x3ffff >> (PAGE_SHIFT - 12)))
2799
2800-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2801-#define arch_randomize_brk arch_randomize_brk
2802-
2803 #endif /* __KERNEL__ */
2804
2805 /*
2806diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2807index edfc980..1766f59 100644
2808--- a/arch/powerpc/include/asm/iommu.h
2809+++ b/arch/powerpc/include/asm/iommu.h
2810@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2811 extern void iommu_init_early_dart(void);
2812 extern void iommu_init_early_pasemi(void);
2813
2814+/* dma-iommu.c */
2815+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2816+
2817 #ifdef CONFIG_PCI
2818 extern void pci_iommu_init(void);
2819 extern void pci_direct_iommu_init(void);
2820diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2821index 9163695..5a00112 100644
2822--- a/arch/powerpc/include/asm/kmap_types.h
2823+++ b/arch/powerpc/include/asm/kmap_types.h
2824@@ -26,6 +26,7 @@ enum km_type {
2825 KM_SOFTIRQ1,
2826 KM_PPC_SYNC_PAGE,
2827 KM_PPC_SYNC_ICACHE,
2828+ KM_CLEARPAGE,
2829 KM_TYPE_NR
2830 };
2831
2832diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2833index ff24254..fe45b21 100644
2834--- a/arch/powerpc/include/asm/page.h
2835+++ b/arch/powerpc/include/asm/page.h
2836@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2837 * and needs to be executable. This means the whole heap ends
2838 * up being executable.
2839 */
2840-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2841- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2842+#define VM_DATA_DEFAULT_FLAGS32 \
2843+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2844+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2845
2846 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2847 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2848@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2849 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2850 #endif
2851
2852+#define ktla_ktva(addr) (addr)
2853+#define ktva_ktla(addr) (addr)
2854+
2855 #ifndef __ASSEMBLY__
2856
2857 #undef STRICT_MM_TYPECHECKS
2858diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2859index 3f17b83..1f9e766 100644
2860--- a/arch/powerpc/include/asm/page_64.h
2861+++ b/arch/powerpc/include/asm/page_64.h
2862@@ -180,15 +180,18 @@ do { \
2863 * stack by default, so in the absense of a PT_GNU_STACK program header
2864 * we turn execute permission off.
2865 */
2866-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2867- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2868+#define VM_STACK_DEFAULT_FLAGS32 \
2869+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2870+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2871
2872 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2873 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2874
2875+#ifndef CONFIG_PAX_PAGEEXEC
2876 #define VM_STACK_DEFAULT_FLAGS \
2877 (test_thread_flag(TIF_32BIT) ? \
2878 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2879+#endif
2880
2881 #include <asm-generic/getorder.h>
2882
2883diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2884index b5ea626..4030822 100644
2885--- a/arch/powerpc/include/asm/pci.h
2886+++ b/arch/powerpc/include/asm/pci.h
2887@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2888 }
2889
2890 #ifdef CONFIG_PCI
2891-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2892-extern struct dma_map_ops *get_pci_dma_ops(void);
2893+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2894+extern const struct dma_map_ops *get_pci_dma_ops(void);
2895 #else /* CONFIG_PCI */
2896 #define set_pci_dma_ops(d)
2897 #define get_pci_dma_ops() NULL
2898diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2899index 2a5da06..d65bea2 100644
2900--- a/arch/powerpc/include/asm/pgtable.h
2901+++ b/arch/powerpc/include/asm/pgtable.h
2902@@ -2,6 +2,7 @@
2903 #define _ASM_POWERPC_PGTABLE_H
2904 #ifdef __KERNEL__
2905
2906+#include <linux/const.h>
2907 #ifndef __ASSEMBLY__
2908 #include <asm/processor.h> /* For TASK_SIZE */
2909 #include <asm/mmu.h>
2910diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2911index 4aad413..85d86bf 100644
2912--- a/arch/powerpc/include/asm/pte-hash32.h
2913+++ b/arch/powerpc/include/asm/pte-hash32.h
2914@@ -21,6 +21,7 @@
2915 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2916 #define _PAGE_USER 0x004 /* usermode access allowed */
2917 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2918+#define _PAGE_EXEC _PAGE_GUARDED
2919 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2920 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2921 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2922diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2923index 8c34149..78f425a 100644
2924--- a/arch/powerpc/include/asm/ptrace.h
2925+++ b/arch/powerpc/include/asm/ptrace.h
2926@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2927 } while(0)
2928
2929 struct task_struct;
2930-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2931+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2932 extern int ptrace_put_reg(struct task_struct *task, int regno,
2933 unsigned long data);
2934
2935diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2936index 32a7c30..be3a8bb 100644
2937--- a/arch/powerpc/include/asm/reg.h
2938+++ b/arch/powerpc/include/asm/reg.h
2939@@ -191,6 +191,7 @@
2940 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2941 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2942 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2943+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2944 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2945 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2946 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2947diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2948index 8979d4c..d2fd0d3 100644
2949--- a/arch/powerpc/include/asm/swiotlb.h
2950+++ b/arch/powerpc/include/asm/swiotlb.h
2951@@ -13,7 +13,7 @@
2952
2953 #include <linux/swiotlb.h>
2954
2955-extern struct dma_map_ops swiotlb_dma_ops;
2956+extern const struct dma_map_ops swiotlb_dma_ops;
2957
2958 static inline void dma_mark_clean(void *addr, size_t size) {}
2959
2960diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2961index 094a12a..877a60a 100644
2962--- a/arch/powerpc/include/asm/system.h
2963+++ b/arch/powerpc/include/asm/system.h
2964@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2965 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2966 #endif
2967
2968-extern unsigned long arch_align_stack(unsigned long sp);
2969+#define arch_align_stack(x) ((x) & ~0xfUL)
2970
2971 /* Used in very early kernel initialization. */
2972 extern unsigned long reloc_offset(void);
2973diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2974index bd0fb84..a42a14b 100644
2975--- a/arch/powerpc/include/asm/uaccess.h
2976+++ b/arch/powerpc/include/asm/uaccess.h
2977@@ -13,6 +13,8 @@
2978 #define VERIFY_READ 0
2979 #define VERIFY_WRITE 1
2980
2981+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2982+
2983 /*
2984 * The fs value determines whether argument validity checking should be
2985 * performed or not. If get_fs() == USER_DS, checking is performed, with
2986@@ -327,52 +329,6 @@ do { \
2987 extern unsigned long __copy_tofrom_user(void __user *to,
2988 const void __user *from, unsigned long size);
2989
2990-#ifndef __powerpc64__
2991-
2992-static inline unsigned long copy_from_user(void *to,
2993- const void __user *from, unsigned long n)
2994-{
2995- unsigned long over;
2996-
2997- if (access_ok(VERIFY_READ, from, n))
2998- return __copy_tofrom_user((__force void __user *)to, from, n);
2999- if ((unsigned long)from < TASK_SIZE) {
3000- over = (unsigned long)from + n - TASK_SIZE;
3001- return __copy_tofrom_user((__force void __user *)to, from,
3002- n - over) + over;
3003- }
3004- return n;
3005-}
3006-
3007-static inline unsigned long copy_to_user(void __user *to,
3008- const void *from, unsigned long n)
3009-{
3010- unsigned long over;
3011-
3012- if (access_ok(VERIFY_WRITE, to, n))
3013- return __copy_tofrom_user(to, (__force void __user *)from, n);
3014- if ((unsigned long)to < TASK_SIZE) {
3015- over = (unsigned long)to + n - TASK_SIZE;
3016- return __copy_tofrom_user(to, (__force void __user *)from,
3017- n - over) + over;
3018- }
3019- return n;
3020-}
3021-
3022-#else /* __powerpc64__ */
3023-
3024-#define __copy_in_user(to, from, size) \
3025- __copy_tofrom_user((to), (from), (size))
3026-
3027-extern unsigned long copy_from_user(void *to, const void __user *from,
3028- unsigned long n);
3029-extern unsigned long copy_to_user(void __user *to, const void *from,
3030- unsigned long n);
3031-extern unsigned long copy_in_user(void __user *to, const void __user *from,
3032- unsigned long n);
3033-
3034-#endif /* __powerpc64__ */
3035-
3036 static inline unsigned long __copy_from_user_inatomic(void *to,
3037 const void __user *from, unsigned long n)
3038 {
3039@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3040 if (ret == 0)
3041 return 0;
3042 }
3043+
3044+ if (!__builtin_constant_p(n))
3045+ check_object_size(to, n, false);
3046+
3047 return __copy_tofrom_user((__force void __user *)to, from, n);
3048 }
3049
3050@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3051 if (ret == 0)
3052 return 0;
3053 }
3054+
3055+ if (!__builtin_constant_p(n))
3056+ check_object_size(from, n, true);
3057+
3058 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3059 }
3060
3061@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3062 return __copy_to_user_inatomic(to, from, size);
3063 }
3064
3065+#ifndef __powerpc64__
3066+
3067+static inline unsigned long __must_check copy_from_user(void *to,
3068+ const void __user *from, unsigned long n)
3069+{
3070+ unsigned long over;
3071+
3072+ if ((long)n < 0)
3073+ return n;
3074+
3075+ if (access_ok(VERIFY_READ, from, n)) {
3076+ if (!__builtin_constant_p(n))
3077+ check_object_size(to, n, false);
3078+ return __copy_tofrom_user((__force void __user *)to, from, n);
3079+ }
3080+ if ((unsigned long)from < TASK_SIZE) {
3081+ over = (unsigned long)from + n - TASK_SIZE;
3082+ if (!__builtin_constant_p(n - over))
3083+ check_object_size(to, n - over, false);
3084+ return __copy_tofrom_user((__force void __user *)to, from,
3085+ n - over) + over;
3086+ }
3087+ return n;
3088+}
3089+
3090+static inline unsigned long __must_check copy_to_user(void __user *to,
3091+ const void *from, unsigned long n)
3092+{
3093+ unsigned long over;
3094+
3095+ if ((long)n < 0)
3096+ return n;
3097+
3098+ if (access_ok(VERIFY_WRITE, to, n)) {
3099+ if (!__builtin_constant_p(n))
3100+ check_object_size(from, n, true);
3101+ return __copy_tofrom_user(to, (__force void __user *)from, n);
3102+ }
3103+ if ((unsigned long)to < TASK_SIZE) {
3104+ over = (unsigned long)to + n - TASK_SIZE;
3105+ if (!__builtin_constant_p(n))
3106+ check_object_size(from, n - over, true);
3107+ return __copy_tofrom_user(to, (__force void __user *)from,
3108+ n - over) + over;
3109+ }
3110+ return n;
3111+}
3112+
3113+#else /* __powerpc64__ */
3114+
3115+#define __copy_in_user(to, from, size) \
3116+ __copy_tofrom_user((to), (from), (size))
3117+
3118+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3119+{
3120+ if ((long)n < 0 || n > INT_MAX)
3121+ return n;
3122+
3123+ if (!__builtin_constant_p(n))
3124+ check_object_size(to, n, false);
3125+
3126+ if (likely(access_ok(VERIFY_READ, from, n)))
3127+ n = __copy_from_user(to, from, n);
3128+ else
3129+ memset(to, 0, n);
3130+ return n;
3131+}
3132+
3133+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3134+{
3135+ if ((long)n < 0 || n > INT_MAX)
3136+ return n;
3137+
3138+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
3139+ if (!__builtin_constant_p(n))
3140+ check_object_size(from, n, true);
3141+ n = __copy_to_user(to, from, n);
3142+ }
3143+ return n;
3144+}
3145+
3146+extern unsigned long copy_in_user(void __user *to, const void __user *from,
3147+ unsigned long n);
3148+
3149+#endif /* __powerpc64__ */
3150+
3151 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3152
3153 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3154diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3155index bb37b1d..01fe9ce 100644
3156--- a/arch/powerpc/kernel/cacheinfo.c
3157+++ b/arch/powerpc/kernel/cacheinfo.c
3158@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3159 &cache_assoc_attr,
3160 };
3161
3162-static struct sysfs_ops cache_index_ops = {
3163+static const struct sysfs_ops cache_index_ops = {
3164 .show = cache_index_show,
3165 };
3166
3167diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3168index 37771a5..648530c 100644
3169--- a/arch/powerpc/kernel/dma-iommu.c
3170+++ b/arch/powerpc/kernel/dma-iommu.c
3171@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3172 }
3173
3174 /* We support DMA to/from any memory page via the iommu */
3175-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3176+int dma_iommu_dma_supported(struct device *dev, u64 mask)
3177 {
3178 struct iommu_table *tbl = get_iommu_table_base(dev);
3179
3180diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3181index e96cbbd..bdd6d41 100644
3182--- a/arch/powerpc/kernel/dma-swiotlb.c
3183+++ b/arch/powerpc/kernel/dma-swiotlb.c
3184@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3185 * map_page, and unmap_page on highmem, use normal dma_ops
3186 * for everything else.
3187 */
3188-struct dma_map_ops swiotlb_dma_ops = {
3189+const struct dma_map_ops swiotlb_dma_ops = {
3190 .alloc_coherent = dma_direct_alloc_coherent,
3191 .free_coherent = dma_direct_free_coherent,
3192 .map_sg = swiotlb_map_sg_attrs,
3193diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3194index 6215062..ebea59c 100644
3195--- a/arch/powerpc/kernel/dma.c
3196+++ b/arch/powerpc/kernel/dma.c
3197@@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3198 }
3199 #endif
3200
3201-struct dma_map_ops dma_direct_ops = {
3202+const struct dma_map_ops dma_direct_ops = {
3203 .alloc_coherent = dma_direct_alloc_coherent,
3204 .free_coherent = dma_direct_free_coherent,
3205 .map_sg = dma_direct_map_sg,
3206diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3207index 24dcc0e..a300455 100644
3208--- a/arch/powerpc/kernel/exceptions-64e.S
3209+++ b/arch/powerpc/kernel/exceptions-64e.S
3210@@ -455,6 +455,7 @@ storage_fault_common:
3211 std r14,_DAR(r1)
3212 std r15,_DSISR(r1)
3213 addi r3,r1,STACK_FRAME_OVERHEAD
3214+ bl .save_nvgprs
3215 mr r4,r14
3216 mr r5,r15
3217 ld r14,PACA_EXGEN+EX_R14(r13)
3218@@ -464,8 +465,7 @@ storage_fault_common:
3219 cmpdi r3,0
3220 bne- 1f
3221 b .ret_from_except_lite
3222-1: bl .save_nvgprs
3223- mr r5,r3
3224+1: mr r5,r3
3225 addi r3,r1,STACK_FRAME_OVERHEAD
3226 ld r4,_DAR(r1)
3227 bl .bad_page_fault
3228diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3229index 1808876..9fd206a 100644
3230--- a/arch/powerpc/kernel/exceptions-64s.S
3231+++ b/arch/powerpc/kernel/exceptions-64s.S
3232@@ -818,10 +818,10 @@ handle_page_fault:
3233 11: ld r4,_DAR(r1)
3234 ld r5,_DSISR(r1)
3235 addi r3,r1,STACK_FRAME_OVERHEAD
3236+ bl .save_nvgprs
3237 bl .do_page_fault
3238 cmpdi r3,0
3239 beq+ 13f
3240- bl .save_nvgprs
3241 mr r5,r3
3242 addi r3,r1,STACK_FRAME_OVERHEAD
3243 lwz r4,_DAR(r1)
3244diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3245index a4c8b38..1b09ad9 100644
3246--- a/arch/powerpc/kernel/ibmebus.c
3247+++ b/arch/powerpc/kernel/ibmebus.c
3248@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3249 return 1;
3250 }
3251
3252-static struct dma_map_ops ibmebus_dma_ops = {
3253+static const struct dma_map_ops ibmebus_dma_ops = {
3254 .alloc_coherent = ibmebus_alloc_coherent,
3255 .free_coherent = ibmebus_free_coherent,
3256 .map_sg = ibmebus_map_sg,
3257diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3258index 641c74b..8339ad7 100644
3259--- a/arch/powerpc/kernel/kgdb.c
3260+++ b/arch/powerpc/kernel/kgdb.c
3261@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3262 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3263 return 0;
3264
3265- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3266+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3267 regs->nip += 4;
3268
3269 return 1;
3270@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3271 /*
3272 * Global data
3273 */
3274-struct kgdb_arch arch_kgdb_ops = {
3275+const struct kgdb_arch arch_kgdb_ops = {
3276 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3277 };
3278
3279diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3280index 477c663..4f50234 100644
3281--- a/arch/powerpc/kernel/module.c
3282+++ b/arch/powerpc/kernel/module.c
3283@@ -31,11 +31,24 @@
3284
3285 LIST_HEAD(module_bug_list);
3286
3287+#ifdef CONFIG_PAX_KERNEXEC
3288 void *module_alloc(unsigned long size)
3289 {
3290 if (size == 0)
3291 return NULL;
3292
3293+ return vmalloc(size);
3294+}
3295+
3296+void *module_alloc_exec(unsigned long size)
3297+#else
3298+void *module_alloc(unsigned long size)
3299+#endif
3300+
3301+{
3302+ if (size == 0)
3303+ return NULL;
3304+
3305 return vmalloc_exec(size);
3306 }
3307
3308@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3309 vfree(module_region);
3310 }
3311
3312+#ifdef CONFIG_PAX_KERNEXEC
3313+void module_free_exec(struct module *mod, void *module_region)
3314+{
3315+ module_free(mod, module_region);
3316+}
3317+#endif
3318+
3319 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3320 const Elf_Shdr *sechdrs,
3321 const char *name)
3322diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3323index f832773..0507238 100644
3324--- a/arch/powerpc/kernel/module_32.c
3325+++ b/arch/powerpc/kernel/module_32.c
3326@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3327 me->arch.core_plt_section = i;
3328 }
3329 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3330- printk("Module doesn't contain .plt or .init.plt sections.\n");
3331+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3332 return -ENOEXEC;
3333 }
3334
3335@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3336
3337 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3338 /* Init, or core PLT? */
3339- if (location >= mod->module_core
3340- && location < mod->module_core + mod->core_size)
3341+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3342+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3343 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3344- else
3345+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3346+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3347 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3348+ else {
3349+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3350+ return ~0UL;
3351+ }
3352
3353 /* Find this entry, or if that fails, the next avail. entry */
3354 while (entry->jump[0]) {
3355diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3356index cadbed6..b9bbb00 100644
3357--- a/arch/powerpc/kernel/pci-common.c
3358+++ b/arch/powerpc/kernel/pci-common.c
3359@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3360 unsigned int ppc_pci_flags = 0;
3361
3362
3363-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3364+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3365
3366-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3367+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3368 {
3369 pci_dma_ops = dma_ops;
3370 }
3371
3372-struct dma_map_ops *get_pci_dma_ops(void)
3373+const struct dma_map_ops *get_pci_dma_ops(void)
3374 {
3375 return pci_dma_ops;
3376 }
3377diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3378index 7b816da..8d5c277 100644
3379--- a/arch/powerpc/kernel/process.c
3380+++ b/arch/powerpc/kernel/process.c
3381@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3382 * Lookup NIP late so we have the best change of getting the
3383 * above info out without failing
3384 */
3385- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3386- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3387+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3388+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3389 #endif
3390 show_stack(current, (unsigned long *) regs->gpr[1]);
3391 if (!user_mode(regs))
3392@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3393 newsp = stack[0];
3394 ip = stack[STACK_FRAME_LR_SAVE];
3395 if (!firstframe || ip != lr) {
3396- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3397+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3398 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3399 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3400- printk(" (%pS)",
3401+ printk(" (%pA)",
3402 (void *)current->ret_stack[curr_frame].ret);
3403 curr_frame--;
3404 }
3405@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3406 struct pt_regs *regs = (struct pt_regs *)
3407 (sp + STACK_FRAME_OVERHEAD);
3408 lr = regs->link;
3409- printk("--- Exception: %lx at %pS\n LR = %pS\n",
3410+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
3411 regs->trap, (void *)regs->nip, (void *)lr);
3412 firstframe = 1;
3413 }
3414@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3415 }
3416
3417 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3418-
3419-unsigned long arch_align_stack(unsigned long sp)
3420-{
3421- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3422- sp -= get_random_int() & ~PAGE_MASK;
3423- return sp & ~0xf;
3424-}
3425-
3426-static inline unsigned long brk_rnd(void)
3427-{
3428- unsigned long rnd = 0;
3429-
3430- /* 8MB for 32bit, 1GB for 64bit */
3431- if (is_32bit_task())
3432- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3433- else
3434- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3435-
3436- return rnd << PAGE_SHIFT;
3437-}
3438-
3439-unsigned long arch_randomize_brk(struct mm_struct *mm)
3440-{
3441- unsigned long base = mm->brk;
3442- unsigned long ret;
3443-
3444-#ifdef CONFIG_PPC_STD_MMU_64
3445- /*
3446- * If we are using 1TB segments and we are allowed to randomise
3447- * the heap, we can put it above 1TB so it is backed by a 1TB
3448- * segment. Otherwise the heap will be in the bottom 1TB
3449- * which always uses 256MB segments and this may result in a
3450- * performance penalty.
3451- */
3452- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3453- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3454-#endif
3455-
3456- ret = PAGE_ALIGN(base + brk_rnd());
3457-
3458- if (ret < mm->brk)
3459- return mm->brk;
3460-
3461- return ret;
3462-}
3463-
3464-unsigned long randomize_et_dyn(unsigned long base)
3465-{
3466- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3467-
3468- if (ret < base)
3469- return base;
3470-
3471- return ret;
3472-}
3473diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3474index ef14988..856c4bc 100644
3475--- a/arch/powerpc/kernel/ptrace.c
3476+++ b/arch/powerpc/kernel/ptrace.c
3477@@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3478 /*
3479 * Get contents of register REGNO in task TASK.
3480 */
3481-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3482+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3483 {
3484 if (task->thread.regs == NULL)
3485 return -EIO;
3486@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3487
3488 CHECK_FULL_REGS(child->thread.regs);
3489 if (index < PT_FPR0) {
3490- tmp = ptrace_get_reg(child, (int) index);
3491+ tmp = ptrace_get_reg(child, index);
3492 } else {
3493 flush_fp_to_thread(child);
3494 tmp = ((unsigned long *)child->thread.fpr)
3495diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3496index d670429..2bc59b2 100644
3497--- a/arch/powerpc/kernel/signal_32.c
3498+++ b/arch/powerpc/kernel/signal_32.c
3499@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3500 /* Save user registers on the stack */
3501 frame = &rt_sf->uc.uc_mcontext;
3502 addr = frame;
3503- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3504+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3505 if (save_user_regs(regs, frame, 0, 1))
3506 goto badframe;
3507 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3508diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3509index 2fe6fc6..ada0d96 100644
3510--- a/arch/powerpc/kernel/signal_64.c
3511+++ b/arch/powerpc/kernel/signal_64.c
3512@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3513 current->thread.fpscr.val = 0;
3514
3515 /* Set up to return from userspace. */
3516- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3517+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3518 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3519 } else {
3520 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3521diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3522index b97c2d6..dd01a6a 100644
3523--- a/arch/powerpc/kernel/sys_ppc32.c
3524+++ b/arch/powerpc/kernel/sys_ppc32.c
3525@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3526 if (oldlenp) {
3527 if (!error) {
3528 if (get_user(oldlen, oldlenp) ||
3529- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3530+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3531+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3532 error = -EFAULT;
3533 }
3534- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3535 }
3536 return error;
3537 }
3538diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3539index 6f0ae1a..e4b6a56 100644
3540--- a/arch/powerpc/kernel/traps.c
3541+++ b/arch/powerpc/kernel/traps.c
3542@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3543 static inline void pmac_backlight_unblank(void) { }
3544 #endif
3545
3546+extern void gr_handle_kernel_exploit(void);
3547+
3548 int die(const char *str, struct pt_regs *regs, long err)
3549 {
3550 static struct {
3551@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3552 if (panic_on_oops)
3553 panic("Fatal exception");
3554
3555+ gr_handle_kernel_exploit();
3556+
3557 oops_exit();
3558 do_exit(err);
3559
3560diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3561index 137dc22..fe57a79 100644
3562--- a/arch/powerpc/kernel/vdso.c
3563+++ b/arch/powerpc/kernel/vdso.c
3564@@ -36,6 +36,7 @@
3565 #include <asm/firmware.h>
3566 #include <asm/vdso.h>
3567 #include <asm/vdso_datapage.h>
3568+#include <asm/mman.h>
3569
3570 #include "setup.h"
3571
3572@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3573 vdso_base = VDSO32_MBASE;
3574 #endif
3575
3576- current->mm->context.vdso_base = 0;
3577+ current->mm->context.vdso_base = ~0UL;
3578
3579 /* vDSO has a problem and was disabled, just don't "enable" it for the
3580 * process
3581@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3582 vdso_base = get_unmapped_area(NULL, vdso_base,
3583 (vdso_pages << PAGE_SHIFT) +
3584 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3585- 0, 0);
3586+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
3587 if (IS_ERR_VALUE(vdso_base)) {
3588 rc = vdso_base;
3589 goto fail_mmapsem;
3590diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3591index 77f6421..829564a 100644
3592--- a/arch/powerpc/kernel/vio.c
3593+++ b/arch/powerpc/kernel/vio.c
3594@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3595 vio_cmo_dealloc(viodev, alloc_size);
3596 }
3597
3598-struct dma_map_ops vio_dma_mapping_ops = {
3599+static const struct dma_map_ops vio_dma_mapping_ops = {
3600 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3601 .free_coherent = vio_dma_iommu_free_coherent,
3602 .map_sg = vio_dma_iommu_map_sg,
3603 .unmap_sg = vio_dma_iommu_unmap_sg,
3604+ .dma_supported = dma_iommu_dma_supported,
3605 .map_page = vio_dma_iommu_map_page,
3606 .unmap_page = vio_dma_iommu_unmap_page,
3607
3608@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3609
3610 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3611 {
3612- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3613 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3614 }
3615
3616diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3617index 5eea6f3..5d10396 100644
3618--- a/arch/powerpc/lib/usercopy_64.c
3619+++ b/arch/powerpc/lib/usercopy_64.c
3620@@ -9,22 +9,6 @@
3621 #include <linux/module.h>
3622 #include <asm/uaccess.h>
3623
3624-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3625-{
3626- if (likely(access_ok(VERIFY_READ, from, n)))
3627- n = __copy_from_user(to, from, n);
3628- else
3629- memset(to, 0, n);
3630- return n;
3631-}
3632-
3633-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3634-{
3635- if (likely(access_ok(VERIFY_WRITE, to, n)))
3636- n = __copy_to_user(to, from, n);
3637- return n;
3638-}
3639-
3640 unsigned long copy_in_user(void __user *to, const void __user *from,
3641 unsigned long n)
3642 {
3643@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3644 return n;
3645 }
3646
3647-EXPORT_SYMBOL(copy_from_user);
3648-EXPORT_SYMBOL(copy_to_user);
3649 EXPORT_SYMBOL(copy_in_user);
3650
3651diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3652index e7dae82..877ce0d 100644
3653--- a/arch/powerpc/mm/fault.c
3654+++ b/arch/powerpc/mm/fault.c
3655@@ -30,6 +30,10 @@
3656 #include <linux/kprobes.h>
3657 #include <linux/kdebug.h>
3658 #include <linux/perf_event.h>
3659+#include <linux/slab.h>
3660+#include <linux/pagemap.h>
3661+#include <linux/compiler.h>
3662+#include <linux/unistd.h>
3663
3664 #include <asm/firmware.h>
3665 #include <asm/page.h>
3666@@ -40,6 +44,7 @@
3667 #include <asm/uaccess.h>
3668 #include <asm/tlbflush.h>
3669 #include <asm/siginfo.h>
3670+#include <asm/ptrace.h>
3671
3672
3673 #ifdef CONFIG_KPROBES
3674@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3675 }
3676 #endif
3677
3678+#ifdef CONFIG_PAX_PAGEEXEC
3679+/*
3680+ * PaX: decide what to do with offenders (regs->nip = fault address)
3681+ *
3682+ * returns 1 when task should be killed
3683+ */
3684+static int pax_handle_fetch_fault(struct pt_regs *regs)
3685+{
3686+ return 1;
3687+}
3688+
3689+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3690+{
3691+ unsigned long i;
3692+
3693+ printk(KERN_ERR "PAX: bytes at PC: ");
3694+ for (i = 0; i < 5; i++) {
3695+ unsigned int c;
3696+ if (get_user(c, (unsigned int __user *)pc+i))
3697+ printk(KERN_CONT "???????? ");
3698+ else
3699+ printk(KERN_CONT "%08x ", c);
3700+ }
3701+ printk("\n");
3702+}
3703+#endif
3704+
3705 /*
3706 * Check whether the instruction at regs->nip is a store using
3707 * an update addressing form which will update r1.
3708@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3709 * indicate errors in DSISR but can validly be set in SRR1.
3710 */
3711 if (trap == 0x400)
3712- error_code &= 0x48200000;
3713+ error_code &= 0x58200000;
3714 else
3715 is_write = error_code & DSISR_ISSTORE;
3716 #else
3717@@ -250,7 +282,7 @@ good_area:
3718 * "undefined". Of those that can be set, this is the only
3719 * one which seems bad.
3720 */
3721- if (error_code & 0x10000000)
3722+ if (error_code & DSISR_GUARDED)
3723 /* Guarded storage error. */
3724 goto bad_area;
3725 #endif /* CONFIG_8xx */
3726@@ -265,7 +297,7 @@ good_area:
3727 * processors use the same I/D cache coherency mechanism
3728 * as embedded.
3729 */
3730- if (error_code & DSISR_PROTFAULT)
3731+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3732 goto bad_area;
3733 #endif /* CONFIG_PPC_STD_MMU */
3734
3735@@ -335,6 +367,23 @@ bad_area:
3736 bad_area_nosemaphore:
3737 /* User mode accesses cause a SIGSEGV */
3738 if (user_mode(regs)) {
3739+
3740+#ifdef CONFIG_PAX_PAGEEXEC
3741+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3742+#ifdef CONFIG_PPC_STD_MMU
3743+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3744+#else
3745+ if (is_exec && regs->nip == address) {
3746+#endif
3747+ switch (pax_handle_fetch_fault(regs)) {
3748+ }
3749+
3750+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3751+ do_group_exit(SIGKILL);
3752+ }
3753+ }
3754+#endif
3755+
3756 _exception(SIGSEGV, regs, code, address);
3757 return 0;
3758 }
3759diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3760index 5973631..ad617af 100644
3761--- a/arch/powerpc/mm/mem.c
3762+++ b/arch/powerpc/mm/mem.c
3763@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3764 {
3765 unsigned long lmb_next_region_start_pfn,
3766 lmb_region_max_pfn;
3767- int i;
3768+ unsigned int i;
3769
3770 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3771 lmb_region_max_pfn =
3772diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3773index 0d957a4..26d968f 100644
3774--- a/arch/powerpc/mm/mmap_64.c
3775+++ b/arch/powerpc/mm/mmap_64.c
3776@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3777 */
3778 if (mmap_is_legacy()) {
3779 mm->mmap_base = TASK_UNMAPPED_BASE;
3780+
3781+#ifdef CONFIG_PAX_RANDMMAP
3782+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3783+ mm->mmap_base += mm->delta_mmap;
3784+#endif
3785+
3786 mm->get_unmapped_area = arch_get_unmapped_area;
3787 mm->unmap_area = arch_unmap_area;
3788 } else {
3789 mm->mmap_base = mmap_base();
3790+
3791+#ifdef CONFIG_PAX_RANDMMAP
3792+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3793+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3794+#endif
3795+
3796 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3797 mm->unmap_area = arch_unmap_area_topdown;
3798 }
3799diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3800index ba51948..23009d9 100644
3801--- a/arch/powerpc/mm/slice.c
3802+++ b/arch/powerpc/mm/slice.c
3803@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3804 if ((mm->task_size - len) < addr)
3805 return 0;
3806 vma = find_vma(mm, addr);
3807- return (!vma || (addr + len) <= vma->vm_start);
3808+ return check_heap_stack_gap(vma, addr, len);
3809 }
3810
3811 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3812@@ -256,7 +256,7 @@ full_search:
3813 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3814 continue;
3815 }
3816- if (!vma || addr + len <= vma->vm_start) {
3817+ if (check_heap_stack_gap(vma, addr, len)) {
3818 /*
3819 * Remember the place where we stopped the search:
3820 */
3821@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3822 }
3823 }
3824
3825- addr = mm->mmap_base;
3826- while (addr > len) {
3827+ if (mm->mmap_base < len)
3828+ addr = -ENOMEM;
3829+ else
3830+ addr = mm->mmap_base - len;
3831+
3832+ while (!IS_ERR_VALUE(addr)) {
3833 /* Go down by chunk size */
3834- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3835+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3836
3837 /* Check for hit with different page size */
3838 mask = slice_range_to_mask(addr, len);
3839@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3840 * return with success:
3841 */
3842 vma = find_vma(mm, addr);
3843- if (!vma || (addr + len) <= vma->vm_start) {
3844+ if (check_heap_stack_gap(vma, addr, len)) {
3845 /* remember the address as a hint for next time */
3846 if (use_cache)
3847 mm->free_area_cache = addr;
3848@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3849 mm->cached_hole_size = vma->vm_start - addr;
3850
3851 /* try just below the current vma->vm_start */
3852- addr = vma->vm_start;
3853+ addr = skip_heap_stack_gap(vma, len);
3854 }
3855
3856 /*
3857@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3858 if (fixed && addr > (mm->task_size - len))
3859 return -EINVAL;
3860
3861+#ifdef CONFIG_PAX_RANDMMAP
3862+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3863+ addr = 0;
3864+#endif
3865+
3866 /* If hint, make sure it matches our alignment restrictions */
3867 if (!fixed && addr) {
3868 addr = _ALIGN_UP(addr, 1ul << pshift);
3869diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3870index b5c753d..8f01abe 100644
3871--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3872+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3873@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3874 lite5200_pm_target_state = PM_SUSPEND_ON;
3875 }
3876
3877-static struct platform_suspend_ops lite5200_pm_ops = {
3878+static const struct platform_suspend_ops lite5200_pm_ops = {
3879 .valid = lite5200_pm_valid,
3880 .begin = lite5200_pm_begin,
3881 .prepare = lite5200_pm_prepare,
3882diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3883index a55b0b6..478c18e 100644
3884--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3885+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3886@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3887 iounmap(mbar);
3888 }
3889
3890-static struct platform_suspend_ops mpc52xx_pm_ops = {
3891+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3892 .valid = mpc52xx_pm_valid,
3893 .prepare = mpc52xx_pm_prepare,
3894 .enter = mpc52xx_pm_enter,
3895diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3896index 08e65fc..643d3ac 100644
3897--- a/arch/powerpc/platforms/83xx/suspend.c
3898+++ b/arch/powerpc/platforms/83xx/suspend.c
3899@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3900 return ret;
3901 }
3902
3903-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3904+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3905 .valid = mpc83xx_suspend_valid,
3906 .begin = mpc83xx_suspend_begin,
3907 .enter = mpc83xx_suspend_enter,
3908diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3909index ca5bfdf..1602e09 100644
3910--- a/arch/powerpc/platforms/cell/iommu.c
3911+++ b/arch/powerpc/platforms/cell/iommu.c
3912@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3913
3914 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3915
3916-struct dma_map_ops dma_iommu_fixed_ops = {
3917+const struct dma_map_ops dma_iommu_fixed_ops = {
3918 .alloc_coherent = dma_fixed_alloc_coherent,
3919 .free_coherent = dma_fixed_free_coherent,
3920 .map_sg = dma_fixed_map_sg,
3921diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3922index e34b305..20e48ec 100644
3923--- a/arch/powerpc/platforms/ps3/system-bus.c
3924+++ b/arch/powerpc/platforms/ps3/system-bus.c
3925@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3926 return mask >= DMA_BIT_MASK(32);
3927 }
3928
3929-static struct dma_map_ops ps3_sb_dma_ops = {
3930+static const struct dma_map_ops ps3_sb_dma_ops = {
3931 .alloc_coherent = ps3_alloc_coherent,
3932 .free_coherent = ps3_free_coherent,
3933 .map_sg = ps3_sb_map_sg,
3934@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3935 .unmap_page = ps3_unmap_page,
3936 };
3937
3938-static struct dma_map_ops ps3_ioc0_dma_ops = {
3939+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3940 .alloc_coherent = ps3_alloc_coherent,
3941 .free_coherent = ps3_free_coherent,
3942 .map_sg = ps3_ioc0_map_sg,
3943diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3944index f0e6f28..60d53ed 100644
3945--- a/arch/powerpc/platforms/pseries/Kconfig
3946+++ b/arch/powerpc/platforms/pseries/Kconfig
3947@@ -2,6 +2,8 @@ config PPC_PSERIES
3948 depends on PPC64 && PPC_BOOK3S
3949 bool "IBM pSeries & new (POWER5-based) iSeries"
3950 select MPIC
3951+ select PCI_MSI
3952+ select XICS
3953 select PPC_I8259
3954 select PPC_RTAS
3955 select RTAS_ERROR_LOGGING
3956diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3957index 43c0aca..42c045b 100644
3958--- a/arch/s390/Kconfig
3959+++ b/arch/s390/Kconfig
3960@@ -194,28 +194,26 @@ config AUDIT_ARCH
3961
3962 config S390_SWITCH_AMODE
3963 bool "Switch kernel/user addressing modes"
3964+ default y
3965 help
3966 This option allows to switch the addressing modes of kernel and user
3967- space. The kernel parameter switch_amode=on will enable this feature,
3968- default is disabled. Enabling this (via kernel parameter) on machines
3969- earlier than IBM System z9-109 EC/BC will reduce system performance.
3970+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3971+ will reduce system performance.
3972
3973 Note that this option will also be selected by selecting the execute
3974- protection option below. Enabling the execute protection via the
3975- noexec kernel parameter will also switch the addressing modes,
3976- independent of the switch_amode kernel parameter.
3977+ protection option below. Enabling the execute protection will also
3978+ switch the addressing modes, independent of this option.
3979
3980
3981 config S390_EXEC_PROTECT
3982 bool "Data execute protection"
3983+ default y
3984 select S390_SWITCH_AMODE
3985 help
3986 This option allows to enable a buffer overflow protection for user
3987 space programs and it also selects the addressing mode option above.
3988- The kernel parameter noexec=on will enable this feature and also
3989- switch the addressing modes, default is disabled. Enabling this (via
3990- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3991- will reduce system performance.
3992+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3993+ reduce system performance.
3994
3995 comment "Code generation options"
3996
3997diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
3998index ae7c8f9..3f01a0c 100644
3999--- a/arch/s390/include/asm/atomic.h
4000+++ b/arch/s390/include/asm/atomic.h
4001@@ -362,6 +362,16 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4002 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4003 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4004
4005+#define atomic64_read_unchecked(v) atomic64_read(v)
4006+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4007+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4008+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4009+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4010+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4011+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4012+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4013+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4014+
4015 #define smp_mb__before_atomic_dec() smp_mb()
4016 #define smp_mb__after_atomic_dec() smp_mb()
4017 #define smp_mb__before_atomic_inc() smp_mb()
4018diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4019index e885442..e3a2817 100644
4020--- a/arch/s390/include/asm/elf.h
4021+++ b/arch/s390/include/asm/elf.h
4022@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
4023 that it will "exec", and that there is sufficient room for the brk. */
4024 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4025
4026+#ifdef CONFIG_PAX_ASLR
4027+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4028+
4029+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4030+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4031+#endif
4032+
4033 /* This yields a mask that user programs can use to figure out what
4034 instruction set this CPU supports. */
4035
4036diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
4037index e37478e..9ce0e9f 100644
4038--- a/arch/s390/include/asm/setup.h
4039+++ b/arch/s390/include/asm/setup.h
4040@@ -50,13 +50,13 @@ extern unsigned long memory_end;
4041 void detect_memory_layout(struct mem_chunk chunk[]);
4042
4043 #ifdef CONFIG_S390_SWITCH_AMODE
4044-extern unsigned int switch_amode;
4045+#define switch_amode (1)
4046 #else
4047 #define switch_amode (0)
4048 #endif
4049
4050 #ifdef CONFIG_S390_EXEC_PROTECT
4051-extern unsigned int s390_noexec;
4052+#define s390_noexec (1)
4053 #else
4054 #define s390_noexec (0)
4055 #endif
4056diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4057index 8377e91..e28e6f1 100644
4058--- a/arch/s390/include/asm/uaccess.h
4059+++ b/arch/s390/include/asm/uaccess.h
4060@@ -232,6 +232,10 @@ static inline unsigned long __must_check
4061 copy_to_user(void __user *to, const void *from, unsigned long n)
4062 {
4063 might_fault();
4064+
4065+ if ((long)n < 0)
4066+ return n;
4067+
4068 if (access_ok(VERIFY_WRITE, to, n))
4069 n = __copy_to_user(to, from, n);
4070 return n;
4071@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4072 static inline unsigned long __must_check
4073 __copy_from_user(void *to, const void __user *from, unsigned long n)
4074 {
4075+ if ((long)n < 0)
4076+ return n;
4077+
4078 if (__builtin_constant_p(n) && (n <= 256))
4079 return uaccess.copy_from_user_small(n, from, to);
4080 else
4081@@ -283,6 +290,10 @@ static inline unsigned long __must_check
4082 copy_from_user(void *to, const void __user *from, unsigned long n)
4083 {
4084 might_fault();
4085+
4086+ if ((long)n < 0)
4087+ return n;
4088+
4089 if (access_ok(VERIFY_READ, from, n))
4090 n = __copy_from_user(to, from, n);
4091 else
4092diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4093index 639380a..72e3c02 100644
4094--- a/arch/s390/kernel/module.c
4095+++ b/arch/s390/kernel/module.c
4096@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4097
4098 /* Increase core size by size of got & plt and set start
4099 offsets for got and plt. */
4100- me->core_size = ALIGN(me->core_size, 4);
4101- me->arch.got_offset = me->core_size;
4102- me->core_size += me->arch.got_size;
4103- me->arch.plt_offset = me->core_size;
4104- me->core_size += me->arch.plt_size;
4105+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4106+ me->arch.got_offset = me->core_size_rw;
4107+ me->core_size_rw += me->arch.got_size;
4108+ me->arch.plt_offset = me->core_size_rx;
4109+ me->core_size_rx += me->arch.plt_size;
4110 return 0;
4111 }
4112
4113@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4114 if (info->got_initialized == 0) {
4115 Elf_Addr *gotent;
4116
4117- gotent = me->module_core + me->arch.got_offset +
4118+ gotent = me->module_core_rw + me->arch.got_offset +
4119 info->got_offset;
4120 *gotent = val;
4121 info->got_initialized = 1;
4122@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4123 else if (r_type == R_390_GOTENT ||
4124 r_type == R_390_GOTPLTENT)
4125 *(unsigned int *) loc =
4126- (val + (Elf_Addr) me->module_core - loc) >> 1;
4127+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4128 else if (r_type == R_390_GOT64 ||
4129 r_type == R_390_GOTPLT64)
4130 *(unsigned long *) loc = val;
4131@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4132 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4133 if (info->plt_initialized == 0) {
4134 unsigned int *ip;
4135- ip = me->module_core + me->arch.plt_offset +
4136+ ip = me->module_core_rx + me->arch.plt_offset +
4137 info->plt_offset;
4138 #ifndef CONFIG_64BIT
4139 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4140@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4141 val - loc + 0xffffUL < 0x1ffffeUL) ||
4142 (r_type == R_390_PLT32DBL &&
4143 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4144- val = (Elf_Addr) me->module_core +
4145+ val = (Elf_Addr) me->module_core_rx +
4146 me->arch.plt_offset +
4147 info->plt_offset;
4148 val += rela->r_addend - loc;
4149@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4150 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4151 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4152 val = val + rela->r_addend -
4153- ((Elf_Addr) me->module_core + me->arch.got_offset);
4154+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4155 if (r_type == R_390_GOTOFF16)
4156 *(unsigned short *) loc = val;
4157 else if (r_type == R_390_GOTOFF32)
4158@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4159 break;
4160 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4161 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4162- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4163+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4164 rela->r_addend - loc;
4165 if (r_type == R_390_GOTPC)
4166 *(unsigned int *) loc = val;
4167diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
4168index 061479f..dbfb08c 100644
4169--- a/arch/s390/kernel/setup.c
4170+++ b/arch/s390/kernel/setup.c
4171@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
4172 early_param("mem", early_parse_mem);
4173
4174 #ifdef CONFIG_S390_SWITCH_AMODE
4175-unsigned int switch_amode = 0;
4176-EXPORT_SYMBOL_GPL(switch_amode);
4177-
4178 static int set_amode_and_uaccess(unsigned long user_amode,
4179 unsigned long user32_amode)
4180 {
4181@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4182 return 0;
4183 }
4184 }
4185-
4186-/*
4187- * Switch kernel/user addressing modes?
4188- */
4189-static int __init early_parse_switch_amode(char *p)
4190-{
4191- switch_amode = 1;
4192- return 0;
4193-}
4194-early_param("switch_amode", early_parse_switch_amode);
4195-
4196 #else /* CONFIG_S390_SWITCH_AMODE */
4197 static inline int set_amode_and_uaccess(unsigned long user_amode,
4198 unsigned long user32_amode)
4199@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4200 }
4201 #endif /* CONFIG_S390_SWITCH_AMODE */
4202
4203-#ifdef CONFIG_S390_EXEC_PROTECT
4204-unsigned int s390_noexec = 0;
4205-EXPORT_SYMBOL_GPL(s390_noexec);
4206-
4207-/*
4208- * Enable execute protection?
4209- */
4210-static int __init early_parse_noexec(char *p)
4211-{
4212- if (!strncmp(p, "off", 3))
4213- return 0;
4214- switch_amode = 1;
4215- s390_noexec = 1;
4216- return 0;
4217-}
4218-early_param("noexec", early_parse_noexec);
4219-#endif /* CONFIG_S390_EXEC_PROTECT */
4220-
4221 static void setup_addressing_mode(void)
4222 {
4223 if (s390_noexec) {
4224diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4225index f4558cc..e461f37 100644
4226--- a/arch/s390/mm/mmap.c
4227+++ b/arch/s390/mm/mmap.c
4228@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4229 */
4230 if (mmap_is_legacy()) {
4231 mm->mmap_base = TASK_UNMAPPED_BASE;
4232+
4233+#ifdef CONFIG_PAX_RANDMMAP
4234+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4235+ mm->mmap_base += mm->delta_mmap;
4236+#endif
4237+
4238 mm->get_unmapped_area = arch_get_unmapped_area;
4239 mm->unmap_area = arch_unmap_area;
4240 } else {
4241 mm->mmap_base = mmap_base();
4242+
4243+#ifdef CONFIG_PAX_RANDMMAP
4244+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4245+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4246+#endif
4247+
4248 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4249 mm->unmap_area = arch_unmap_area_topdown;
4250 }
4251@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4252 */
4253 if (mmap_is_legacy()) {
4254 mm->mmap_base = TASK_UNMAPPED_BASE;
4255+
4256+#ifdef CONFIG_PAX_RANDMMAP
4257+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4258+ mm->mmap_base += mm->delta_mmap;
4259+#endif
4260+
4261 mm->get_unmapped_area = s390_get_unmapped_area;
4262 mm->unmap_area = arch_unmap_area;
4263 } else {
4264 mm->mmap_base = mmap_base();
4265+
4266+#ifdef CONFIG_PAX_RANDMMAP
4267+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4268+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4269+#endif
4270+
4271 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4272 mm->unmap_area = arch_unmap_area_topdown;
4273 }
4274diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4275index 589d5c7..669e274 100644
4276--- a/arch/score/include/asm/system.h
4277+++ b/arch/score/include/asm/system.h
4278@@ -17,7 +17,7 @@ do { \
4279 #define finish_arch_switch(prev) do {} while (0)
4280
4281 typedef void (*vi_handler_t)(void);
4282-extern unsigned long arch_align_stack(unsigned long sp);
4283+#define arch_align_stack(x) (x)
4284
4285 #define mb() barrier()
4286 #define rmb() barrier()
4287diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4288index 25d0803..d6c8e36 100644
4289--- a/arch/score/kernel/process.c
4290+++ b/arch/score/kernel/process.c
4291@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4292
4293 return task_pt_regs(task)->cp0_epc;
4294 }
4295-
4296-unsigned long arch_align_stack(unsigned long sp)
4297-{
4298- return sp;
4299-}
4300diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4301index d936c1a..304a252 100644
4302--- a/arch/sh/boards/mach-hp6xx/pm.c
4303+++ b/arch/sh/boards/mach-hp6xx/pm.c
4304@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4305 return 0;
4306 }
4307
4308-static struct platform_suspend_ops hp6x0_pm_ops = {
4309+static const struct platform_suspend_ops hp6x0_pm_ops = {
4310 .enter = hp6x0_pm_enter,
4311 .valid = suspend_valid_only_mem,
4312 };
4313diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4314index 8a8a993..7b3079b 100644
4315--- a/arch/sh/kernel/cpu/sh4/sq.c
4316+++ b/arch/sh/kernel/cpu/sh4/sq.c
4317@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4318 NULL,
4319 };
4320
4321-static struct sysfs_ops sq_sysfs_ops = {
4322+static const struct sysfs_ops sq_sysfs_ops = {
4323 .show = sq_sysfs_show,
4324 .store = sq_sysfs_store,
4325 };
4326diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4327index ee3c2aa..c49cee6 100644
4328--- a/arch/sh/kernel/cpu/shmobile/pm.c
4329+++ b/arch/sh/kernel/cpu/shmobile/pm.c
4330@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4331 return 0;
4332 }
4333
4334-static struct platform_suspend_ops sh_pm_ops = {
4335+static const struct platform_suspend_ops sh_pm_ops = {
4336 .enter = sh_pm_enter,
4337 .valid = suspend_valid_only_mem,
4338 };
4339diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4340index 3e532d0..9faa306 100644
4341--- a/arch/sh/kernel/kgdb.c
4342+++ b/arch/sh/kernel/kgdb.c
4343@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4344 {
4345 }
4346
4347-struct kgdb_arch arch_kgdb_ops = {
4348+const struct kgdb_arch arch_kgdb_ops = {
4349 /* Breakpoint instruction: trapa #0x3c */
4350 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4351 .gdb_bpt_instr = { 0x3c, 0xc3 },
4352diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4353index afeb710..d1d1289 100644
4354--- a/arch/sh/mm/mmap.c
4355+++ b/arch/sh/mm/mmap.c
4356@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4357 addr = PAGE_ALIGN(addr);
4358
4359 vma = find_vma(mm, addr);
4360- if (TASK_SIZE - len >= addr &&
4361- (!vma || addr + len <= vma->vm_start))
4362+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4363 return addr;
4364 }
4365
4366@@ -106,7 +105,7 @@ full_search:
4367 }
4368 return -ENOMEM;
4369 }
4370- if (likely(!vma || addr + len <= vma->vm_start)) {
4371+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4372 /*
4373 * Remember the place where we stopped the search:
4374 */
4375@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4376 addr = PAGE_ALIGN(addr);
4377
4378 vma = find_vma(mm, addr);
4379- if (TASK_SIZE - len >= addr &&
4380- (!vma || addr + len <= vma->vm_start))
4381+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4382 return addr;
4383 }
4384
4385@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4386 /* make sure it can fit in the remaining address space */
4387 if (likely(addr > len)) {
4388 vma = find_vma(mm, addr-len);
4389- if (!vma || addr <= vma->vm_start) {
4390+ if (check_heap_stack_gap(vma, addr - len, len)) {
4391 /* remember the address as a hint for next time */
4392 return (mm->free_area_cache = addr-len);
4393 }
4394@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4395 if (unlikely(mm->mmap_base < len))
4396 goto bottomup;
4397
4398- addr = mm->mmap_base-len;
4399- if (do_colour_align)
4400- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4401+ addr = mm->mmap_base - len;
4402
4403 do {
4404+ if (do_colour_align)
4405+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4406 /*
4407 * Lookup failure means no vma is above this address,
4408 * else if new region fits below vma->vm_start,
4409 * return with success:
4410 */
4411 vma = find_vma(mm, addr);
4412- if (likely(!vma || addr+len <= vma->vm_start)) {
4413+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4414 /* remember the address as a hint for next time */
4415 return (mm->free_area_cache = addr);
4416 }
4417@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4418 mm->cached_hole_size = vma->vm_start - addr;
4419
4420 /* try just below the current vma->vm_start */
4421- addr = vma->vm_start-len;
4422- if (do_colour_align)
4423- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4424- } while (likely(len < vma->vm_start));
4425+ addr = skip_heap_stack_gap(vma, len);
4426+ } while (!IS_ERR_VALUE(addr));
4427
4428 bottomup:
4429 /*
4430diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
4431index 05ef538..dc9c857 100644
4432--- a/arch/sparc/Kconfig
4433+++ b/arch/sparc/Kconfig
4434@@ -32,6 +32,7 @@ config SPARC
4435
4436 config SPARC32
4437 def_bool !64BIT
4438+ select GENERIC_ATOMIC64
4439
4440 config SPARC64
4441 def_bool 64BIT
4442diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4443index 113225b..7fd04e7 100644
4444--- a/arch/sparc/Makefile
4445+++ b/arch/sparc/Makefile
4446@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4447 # Export what is needed by arch/sparc/boot/Makefile
4448 export VMLINUX_INIT VMLINUX_MAIN
4449 VMLINUX_INIT := $(head-y) $(init-y)
4450-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4451+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4452 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4453 VMLINUX_MAIN += $(drivers-y) $(net-y)
4454
4455diff --git a/arch/sparc/include/asm/atomic.h b/arch/sparc/include/asm/atomic.h
4456index 8ff83d8..4a459c2 100644
4457--- a/arch/sparc/include/asm/atomic.h
4458+++ b/arch/sparc/include/asm/atomic.h
4459@@ -4,5 +4,6 @@
4460 #include <asm/atomic_64.h>
4461 #else
4462 #include <asm/atomic_32.h>
4463+#include <asm-generic/atomic64.h>
4464 #endif
4465 #endif
4466diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4467index f5cc06f..f858d47 100644
4468--- a/arch/sparc/include/asm/atomic_64.h
4469+++ b/arch/sparc/include/asm/atomic_64.h
4470@@ -14,18 +14,40 @@
4471 #define ATOMIC64_INIT(i) { (i) }
4472
4473 #define atomic_read(v) ((v)->counter)
4474+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4475+{
4476+ return v->counter;
4477+}
4478 #define atomic64_read(v) ((v)->counter)
4479+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4480+{
4481+ return v->counter;
4482+}
4483
4484 #define atomic_set(v, i) (((v)->counter) = i)
4485+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4486+{
4487+ v->counter = i;
4488+}
4489 #define atomic64_set(v, i) (((v)->counter) = i)
4490+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4491+{
4492+ v->counter = i;
4493+}
4494
4495 extern void atomic_add(int, atomic_t *);
4496+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4497 extern void atomic64_add(long, atomic64_t *);
4498+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4499 extern void atomic_sub(int, atomic_t *);
4500+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4501 extern void atomic64_sub(long, atomic64_t *);
4502+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4503
4504 extern int atomic_add_ret(int, atomic_t *);
4505+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4506 extern long atomic64_add_ret(long, atomic64_t *);
4507+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4508 extern int atomic_sub_ret(int, atomic_t *);
4509 extern long atomic64_sub_ret(long, atomic64_t *);
4510
4511@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4512 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4513
4514 #define atomic_inc_return(v) atomic_add_ret(1, v)
4515+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4516+{
4517+ return atomic_add_ret_unchecked(1, v);
4518+}
4519 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4520+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4521+{
4522+ return atomic64_add_ret_unchecked(1, v);
4523+}
4524
4525 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4526 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4527
4528 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4529+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4530+{
4531+ return atomic_add_ret_unchecked(i, v);
4532+}
4533 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4534+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4535+{
4536+ return atomic64_add_ret_unchecked(i, v);
4537+}
4538
4539 /*
4540 * atomic_inc_and_test - increment and test
4541@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4542 * other cases.
4543 */
4544 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4545+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4546+{
4547+ return atomic_inc_return_unchecked(v) == 0;
4548+}
4549 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4550
4551 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4552@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4553 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4554
4555 #define atomic_inc(v) atomic_add(1, v)
4556+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4557+{
4558+ atomic_add_unchecked(1, v);
4559+}
4560 #define atomic64_inc(v) atomic64_add(1, v)
4561+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4562+{
4563+ atomic64_add_unchecked(1, v);
4564+}
4565
4566 #define atomic_dec(v) atomic_sub(1, v)
4567+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4568+{
4569+ atomic_sub_unchecked(1, v);
4570+}
4571 #define atomic64_dec(v) atomic64_sub(1, v)
4572+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4573+{
4574+ atomic64_sub_unchecked(1, v);
4575+}
4576
4577 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4578 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4579
4580 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4581+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4582+{
4583+ return cmpxchg(&v->counter, old, new);
4584+}
4585 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4586+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4587+{
4588+ return xchg(&v->counter, new);
4589+}
4590
4591 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4592 {
4593- int c, old;
4594+ int c, old, new;
4595 c = atomic_read(v);
4596 for (;;) {
4597- if (unlikely(c == (u)))
4598+ if (unlikely(c == u))
4599 break;
4600- old = atomic_cmpxchg((v), c, c + (a));
4601+
4602+ asm volatile("addcc %2, %0, %0\n"
4603+
4604+#ifdef CONFIG_PAX_REFCOUNT
4605+ "tvs %%icc, 6\n"
4606+#endif
4607+
4608+ : "=r" (new)
4609+ : "0" (c), "ir" (a)
4610+ : "cc");
4611+
4612+ old = atomic_cmpxchg(v, c, new);
4613 if (likely(old == c))
4614 break;
4615 c = old;
4616 }
4617- return c != (u);
4618+ return c != u;
4619 }
4620
4621 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4622@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4623 #define atomic64_cmpxchg(v, o, n) \
4624 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4625 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4626+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4627+{
4628+ return xchg(&v->counter, new);
4629+}
4630
4631 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4632 {
4633- long c, old;
4634+ long c, old, new;
4635 c = atomic64_read(v);
4636 for (;;) {
4637- if (unlikely(c == (u)))
4638+ if (unlikely(c == u))
4639 break;
4640- old = atomic64_cmpxchg((v), c, c + (a));
4641+
4642+ asm volatile("addcc %2, %0, %0\n"
4643+
4644+#ifdef CONFIG_PAX_REFCOUNT
4645+ "tvs %%xcc, 6\n"
4646+#endif
4647+
4648+ : "=r" (new)
4649+ : "0" (c), "ir" (a)
4650+ : "cc");
4651+
4652+ old = atomic64_cmpxchg(v, c, new);
4653 if (likely(old == c))
4654 break;
4655 c = old;
4656 }
4657- return c != (u);
4658+ return c != u;
4659 }
4660
4661 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4662diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4663index 41f85ae..fb54d5e 100644
4664--- a/arch/sparc/include/asm/cache.h
4665+++ b/arch/sparc/include/asm/cache.h
4666@@ -8,7 +8,7 @@
4667 #define _SPARC_CACHE_H
4668
4669 #define L1_CACHE_SHIFT 5
4670-#define L1_CACHE_BYTES 32
4671+#define L1_CACHE_BYTES 32UL
4672 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4673
4674 #ifdef CONFIG_SPARC32
4675diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4676index 5a8c308..38def92 100644
4677--- a/arch/sparc/include/asm/dma-mapping.h
4678+++ b/arch/sparc/include/asm/dma-mapping.h
4679@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4680 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4681 #define dma_is_consistent(d, h) (1)
4682
4683-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4684+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4685 extern struct bus_type pci_bus_type;
4686
4687-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4688+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4689 {
4690 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4691 if (dev->bus == &pci_bus_type)
4692@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4693 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4694 dma_addr_t *dma_handle, gfp_t flag)
4695 {
4696- struct dma_map_ops *ops = get_dma_ops(dev);
4697+ const struct dma_map_ops *ops = get_dma_ops(dev);
4698 void *cpu_addr;
4699
4700 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4701@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4702 static inline void dma_free_coherent(struct device *dev, size_t size,
4703 void *cpu_addr, dma_addr_t dma_handle)
4704 {
4705- struct dma_map_ops *ops = get_dma_ops(dev);
4706+ const struct dma_map_ops *ops = get_dma_ops(dev);
4707
4708 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4709 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4710diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4711index 381a1b5..b97e3ff 100644
4712--- a/arch/sparc/include/asm/elf_32.h
4713+++ b/arch/sparc/include/asm/elf_32.h
4714@@ -116,6 +116,13 @@ typedef struct {
4715
4716 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4717
4718+#ifdef CONFIG_PAX_ASLR
4719+#define PAX_ELF_ET_DYN_BASE 0x10000UL
4720+
4721+#define PAX_DELTA_MMAP_LEN 16
4722+#define PAX_DELTA_STACK_LEN 16
4723+#endif
4724+
4725 /* This yields a mask that user programs can use to figure out what
4726 instruction set this cpu supports. This can NOT be done in userspace
4727 on Sparc. */
4728diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4729index 9968085..c2106ef 100644
4730--- a/arch/sparc/include/asm/elf_64.h
4731+++ b/arch/sparc/include/asm/elf_64.h
4732@@ -163,6 +163,12 @@ typedef struct {
4733 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4734 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4735
4736+#ifdef CONFIG_PAX_ASLR
4737+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4738+
4739+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4740+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4741+#endif
4742
4743 /* This yields a mask that user programs can use to figure out what
4744 instruction set this cpu supports. */
4745diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
4746index 156707b..aefa786 100644
4747--- a/arch/sparc/include/asm/page_32.h
4748+++ b/arch/sparc/include/asm/page_32.h
4749@@ -8,6 +8,8 @@
4750 #ifndef _SPARC_PAGE_H
4751 #define _SPARC_PAGE_H
4752
4753+#include <linux/const.h>
4754+
4755 #define PAGE_SHIFT 12
4756
4757 #ifndef __ASSEMBLY__
4758diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4759index e0cabe7..efd60f1 100644
4760--- a/arch/sparc/include/asm/pgtable_32.h
4761+++ b/arch/sparc/include/asm/pgtable_32.h
4762@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4763 BTFIXUPDEF_INT(page_none)
4764 BTFIXUPDEF_INT(page_copy)
4765 BTFIXUPDEF_INT(page_readonly)
4766+
4767+#ifdef CONFIG_PAX_PAGEEXEC
4768+BTFIXUPDEF_INT(page_shared_noexec)
4769+BTFIXUPDEF_INT(page_copy_noexec)
4770+BTFIXUPDEF_INT(page_readonly_noexec)
4771+#endif
4772+
4773 BTFIXUPDEF_INT(page_kernel)
4774
4775 #define PMD_SHIFT SUN4C_PMD_SHIFT
4776@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4777 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4778 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4779
4780+#ifdef CONFIG_PAX_PAGEEXEC
4781+extern pgprot_t PAGE_SHARED_NOEXEC;
4782+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4783+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4784+#else
4785+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4786+# define PAGE_COPY_NOEXEC PAGE_COPY
4787+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4788+#endif
4789+
4790 extern unsigned long page_kernel;
4791
4792 #ifdef MODULE
4793diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4794index 1407c07..7e10231 100644
4795--- a/arch/sparc/include/asm/pgtsrmmu.h
4796+++ b/arch/sparc/include/asm/pgtsrmmu.h
4797@@ -115,6 +115,13 @@
4798 SRMMU_EXEC | SRMMU_REF)
4799 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4800 SRMMU_EXEC | SRMMU_REF)
4801+
4802+#ifdef CONFIG_PAX_PAGEEXEC
4803+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4804+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4805+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4806+#endif
4807+
4808 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4809 SRMMU_DIRTY | SRMMU_REF)
4810
4811diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4812index 43e5147..47622a1 100644
4813--- a/arch/sparc/include/asm/spinlock_64.h
4814+++ b/arch/sparc/include/asm/spinlock_64.h
4815@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4816
4817 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4818
4819-static void inline arch_read_lock(raw_rwlock_t *lock)
4820+static inline void arch_read_lock(raw_rwlock_t *lock)
4821 {
4822 unsigned long tmp1, tmp2;
4823
4824 __asm__ __volatile__ (
4825 "1: ldsw [%2], %0\n"
4826 " brlz,pn %0, 2f\n"
4827-"4: add %0, 1, %1\n"
4828+"4: addcc %0, 1, %1\n"
4829+
4830+#ifdef CONFIG_PAX_REFCOUNT
4831+" tvs %%icc, 6\n"
4832+#endif
4833+
4834 " cas [%2], %0, %1\n"
4835 " cmp %0, %1\n"
4836 " bne,pn %%icc, 1b\n"
4837@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4838 " .previous"
4839 : "=&r" (tmp1), "=&r" (tmp2)
4840 : "r" (lock)
4841- : "memory");
4842+ : "memory", "cc");
4843 }
4844
4845-static int inline arch_read_trylock(raw_rwlock_t *lock)
4846+static inline int arch_read_trylock(raw_rwlock_t *lock)
4847 {
4848 int tmp1, tmp2;
4849
4850@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4851 "1: ldsw [%2], %0\n"
4852 " brlz,a,pn %0, 2f\n"
4853 " mov 0, %0\n"
4854-" add %0, 1, %1\n"
4855+" addcc %0, 1, %1\n"
4856+
4857+#ifdef CONFIG_PAX_REFCOUNT
4858+" tvs %%icc, 6\n"
4859+#endif
4860+
4861 " cas [%2], %0, %1\n"
4862 " cmp %0, %1\n"
4863 " bne,pn %%icc, 1b\n"
4864@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4865 return tmp1;
4866 }
4867
4868-static void inline arch_read_unlock(raw_rwlock_t *lock)
4869+static inline void arch_read_unlock(raw_rwlock_t *lock)
4870 {
4871 unsigned long tmp1, tmp2;
4872
4873 __asm__ __volatile__(
4874 "1: lduw [%2], %0\n"
4875-" sub %0, 1, %1\n"
4876+" subcc %0, 1, %1\n"
4877+
4878+#ifdef CONFIG_PAX_REFCOUNT
4879+" tvs %%icc, 6\n"
4880+#endif
4881+
4882 " cas [%2], %0, %1\n"
4883 " cmp %0, %1\n"
4884 " bne,pn %%xcc, 1b\n"
4885@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4886 : "memory");
4887 }
4888
4889-static void inline arch_write_lock(raw_rwlock_t *lock)
4890+static inline void arch_write_lock(raw_rwlock_t *lock)
4891 {
4892 unsigned long mask, tmp1, tmp2;
4893
4894@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4895 : "memory");
4896 }
4897
4898-static void inline arch_write_unlock(raw_rwlock_t *lock)
4899+static inline void arch_write_unlock(raw_rwlock_t *lock)
4900 {
4901 __asm__ __volatile__(
4902 " stw %%g0, [%0]"
4903@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4904 : "memory");
4905 }
4906
4907-static int inline arch_write_trylock(raw_rwlock_t *lock)
4908+static inline int arch_write_trylock(raw_rwlock_t *lock)
4909 {
4910 unsigned long mask, tmp1, tmp2, result;
4911
4912diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4913index 844d73a..f787fb9 100644
4914--- a/arch/sparc/include/asm/thread_info_32.h
4915+++ b/arch/sparc/include/asm/thread_info_32.h
4916@@ -50,6 +50,8 @@ struct thread_info {
4917 unsigned long w_saved;
4918
4919 struct restart_block restart_block;
4920+
4921+ unsigned long lowest_stack;
4922 };
4923
4924 /*
4925diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4926index f78ad9a..9f55fc7 100644
4927--- a/arch/sparc/include/asm/thread_info_64.h
4928+++ b/arch/sparc/include/asm/thread_info_64.h
4929@@ -68,6 +68,8 @@ struct thread_info {
4930 struct pt_regs *kern_una_regs;
4931 unsigned int kern_una_insn;
4932
4933+ unsigned long lowest_stack;
4934+
4935 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4936 };
4937
4938diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4939index e88fbe5..96b0ce5 100644
4940--- a/arch/sparc/include/asm/uaccess.h
4941+++ b/arch/sparc/include/asm/uaccess.h
4942@@ -1,5 +1,13 @@
4943 #ifndef ___ASM_SPARC_UACCESS_H
4944 #define ___ASM_SPARC_UACCESS_H
4945+
4946+#ifdef __KERNEL__
4947+#ifndef __ASSEMBLY__
4948+#include <linux/types.h>
4949+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4950+#endif
4951+#endif
4952+
4953 #if defined(__sparc__) && defined(__arch64__)
4954 #include <asm/uaccess_64.h>
4955 #else
4956diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4957index 8303ac4..07f333d 100644
4958--- a/arch/sparc/include/asm/uaccess_32.h
4959+++ b/arch/sparc/include/asm/uaccess_32.h
4960@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4961
4962 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4963 {
4964- if (n && __access_ok((unsigned long) to, n))
4965+ if ((long)n < 0)
4966+ return n;
4967+
4968+ if (n && __access_ok((unsigned long) to, n)) {
4969+ if (!__builtin_constant_p(n))
4970+ check_object_size(from, n, true);
4971 return __copy_user(to, (__force void __user *) from, n);
4972- else
4973+ } else
4974 return n;
4975 }
4976
4977 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4978 {
4979+ if ((long)n < 0)
4980+ return n;
4981+
4982+ if (!__builtin_constant_p(n))
4983+ check_object_size(from, n, true);
4984+
4985 return __copy_user(to, (__force void __user *) from, n);
4986 }
4987
4988 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4989 {
4990- if (n && __access_ok((unsigned long) from, n))
4991+ if ((long)n < 0)
4992+ return n;
4993+
4994+ if (n && __access_ok((unsigned long) from, n)) {
4995+ if (!__builtin_constant_p(n))
4996+ check_object_size(to, n, false);
4997 return __copy_user((__force void __user *) to, from, n);
4998- else
4999+ } else
5000 return n;
5001 }
5002
5003 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5004 {
5005+ if ((long)n < 0)
5006+ return n;
5007+
5008 return __copy_user((__force void __user *) to, from, n);
5009 }
5010
5011diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5012index 9ea271e..7b8a271 100644
5013--- a/arch/sparc/include/asm/uaccess_64.h
5014+++ b/arch/sparc/include/asm/uaccess_64.h
5015@@ -9,6 +9,7 @@
5016 #include <linux/compiler.h>
5017 #include <linux/string.h>
5018 #include <linux/thread_info.h>
5019+#include <linux/kernel.h>
5020 #include <asm/asi.h>
5021 #include <asm/system.h>
5022 #include <asm/spitfire.h>
5023@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5024 static inline unsigned long __must_check
5025 copy_from_user(void *to, const void __user *from, unsigned long size)
5026 {
5027- unsigned long ret = ___copy_from_user(to, from, size);
5028+ unsigned long ret;
5029
5030+ if ((long)size < 0 || size > INT_MAX)
5031+ return size;
5032+
5033+ if (!__builtin_constant_p(size))
5034+ check_object_size(to, size, false);
5035+
5036+ ret = ___copy_from_user(to, from, size);
5037 if (unlikely(ret))
5038 ret = copy_from_user_fixup(to, from, size);
5039 return ret;
5040@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5041 static inline unsigned long __must_check
5042 copy_to_user(void __user *to, const void *from, unsigned long size)
5043 {
5044- unsigned long ret = ___copy_to_user(to, from, size);
5045+ unsigned long ret;
5046
5047+ if ((long)size < 0 || size > INT_MAX)
5048+ return size;
5049+
5050+ if (!__builtin_constant_p(size))
5051+ check_object_size(from, size, true);
5052+
5053+ ret = ___copy_to_user(to, from, size);
5054 if (unlikely(ret))
5055 ret = copy_to_user_fixup(to, from, size);
5056 return ret;
5057diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5058index 2782681..77ded84 100644
5059--- a/arch/sparc/kernel/Makefile
5060+++ b/arch/sparc/kernel/Makefile
5061@@ -3,7 +3,7 @@
5062 #
5063
5064 asflags-y := -ansi
5065-ccflags-y := -Werror
5066+#ccflags-y := -Werror
5067
5068 extra-y := head_$(BITS).o
5069 extra-y += init_task.o
5070diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
5071index 7690cc2..ece64c9 100644
5072--- a/arch/sparc/kernel/iommu.c
5073+++ b/arch/sparc/kernel/iommu.c
5074@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
5075 spin_unlock_irqrestore(&iommu->lock, flags);
5076 }
5077
5078-static struct dma_map_ops sun4u_dma_ops = {
5079+static const struct dma_map_ops sun4u_dma_ops = {
5080 .alloc_coherent = dma_4u_alloc_coherent,
5081 .free_coherent = dma_4u_free_coherent,
5082 .map_page = dma_4u_map_page,
5083@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
5084 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
5085 };
5086
5087-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5088+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5089 EXPORT_SYMBOL(dma_ops);
5090
5091 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
5092diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
5093index 9f61fd8..bd048db 100644
5094--- a/arch/sparc/kernel/ioport.c
5095+++ b/arch/sparc/kernel/ioport.c
5096@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
5097 BUG();
5098 }
5099
5100-struct dma_map_ops sbus_dma_ops = {
5101+const struct dma_map_ops sbus_dma_ops = {
5102 .alloc_coherent = sbus_alloc_coherent,
5103 .free_coherent = sbus_free_coherent,
5104 .map_page = sbus_map_page,
5105@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
5106 .sync_sg_for_device = sbus_sync_sg_for_device,
5107 };
5108
5109-struct dma_map_ops *dma_ops = &sbus_dma_ops;
5110+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
5111 EXPORT_SYMBOL(dma_ops);
5112
5113 static int __init sparc_register_ioport(void)
5114@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
5115 }
5116 }
5117
5118-struct dma_map_ops pci32_dma_ops = {
5119+const struct dma_map_ops pci32_dma_ops = {
5120 .alloc_coherent = pci32_alloc_coherent,
5121 .free_coherent = pci32_free_coherent,
5122 .map_page = pci32_map_page,
5123diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
5124index 04df4ed..55c4b6e 100644
5125--- a/arch/sparc/kernel/kgdb_32.c
5126+++ b/arch/sparc/kernel/kgdb_32.c
5127@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
5128 {
5129 }
5130
5131-struct kgdb_arch arch_kgdb_ops = {
5132+const struct kgdb_arch arch_kgdb_ops = {
5133 /* Breakpoint instruction: ta 0x7d */
5134 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
5135 };
5136diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
5137index f5a0fd4..d886f71 100644
5138--- a/arch/sparc/kernel/kgdb_64.c
5139+++ b/arch/sparc/kernel/kgdb_64.c
5140@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
5141 {
5142 }
5143
5144-struct kgdb_arch arch_kgdb_ops = {
5145+const struct kgdb_arch arch_kgdb_ops = {
5146 /* Breakpoint instruction: ta 0x72 */
5147 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
5148 };
5149diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
5150index 23c33ff..d137fbd 100644
5151--- a/arch/sparc/kernel/pci_sun4v.c
5152+++ b/arch/sparc/kernel/pci_sun4v.c
5153@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
5154 spin_unlock_irqrestore(&iommu->lock, flags);
5155 }
5156
5157-static struct dma_map_ops sun4v_dma_ops = {
5158+static const struct dma_map_ops sun4v_dma_ops = {
5159 .alloc_coherent = dma_4v_alloc_coherent,
5160 .free_coherent = dma_4v_free_coherent,
5161 .map_page = dma_4v_map_page,
5162diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5163index c49865b..b41a81b 100644
5164--- a/arch/sparc/kernel/process_32.c
5165+++ b/arch/sparc/kernel/process_32.c
5166@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
5167 rw->ins[4], rw->ins[5],
5168 rw->ins[6],
5169 rw->ins[7]);
5170- printk("%pS\n", (void *) rw->ins[7]);
5171+ printk("%pA\n", (void *) rw->ins[7]);
5172 rw = (struct reg_window32 *) rw->ins[6];
5173 }
5174 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5175@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
5176
5177 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5178 r->psr, r->pc, r->npc, r->y, print_tainted());
5179- printk("PC: <%pS>\n", (void *) r->pc);
5180+ printk("PC: <%pA>\n", (void *) r->pc);
5181 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5182 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5183 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5184 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5185 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5186 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5187- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5188+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5189
5190 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5191 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5192@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5193 rw = (struct reg_window32 *) fp;
5194 pc = rw->ins[7];
5195 printk("[%08lx : ", pc);
5196- printk("%pS ] ", (void *) pc);
5197+ printk("%pA ] ", (void *) pc);
5198 fp = rw->ins[6];
5199 } while (++count < 16);
5200 printk("\n");
5201diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5202index cb70476..3d0c191 100644
5203--- a/arch/sparc/kernel/process_64.c
5204+++ b/arch/sparc/kernel/process_64.c
5205@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
5206 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5207 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5208 if (regs->tstate & TSTATE_PRIV)
5209- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5210+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5211 }
5212
5213 void show_regs(struct pt_regs *regs)
5214 {
5215 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5216 regs->tpc, regs->tnpc, regs->y, print_tainted());
5217- printk("TPC: <%pS>\n", (void *) regs->tpc);
5218+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5219 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5220 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5221 regs->u_regs[3]);
5222@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5223 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5224 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5225 regs->u_regs[15]);
5226- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5227+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5228 show_regwindow(regs);
5229 }
5230
5231@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5232 ((tp && tp->task) ? tp->task->pid : -1));
5233
5234 if (gp->tstate & TSTATE_PRIV) {
5235- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5236+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5237 (void *) gp->tpc,
5238 (void *) gp->o7,
5239 (void *) gp->i7,
5240diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5241index 6edc4e5..06a69b4 100644
5242--- a/arch/sparc/kernel/sigutil_64.c
5243+++ b/arch/sparc/kernel/sigutil_64.c
5244@@ -2,6 +2,7 @@
5245 #include <linux/types.h>
5246 #include <linux/thread_info.h>
5247 #include <linux/uaccess.h>
5248+#include <linux/errno.h>
5249
5250 #include <asm/sigcontext.h>
5251 #include <asm/fpumacro.h>
5252diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5253index 3a82e65..ce0a53a 100644
5254--- a/arch/sparc/kernel/sys_sparc_32.c
5255+++ b/arch/sparc/kernel/sys_sparc_32.c
5256@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5257 if (ARCH_SUN4C && len > 0x20000000)
5258 return -ENOMEM;
5259 if (!addr)
5260- addr = TASK_UNMAPPED_BASE;
5261+ addr = current->mm->mmap_base;
5262
5263 if (flags & MAP_SHARED)
5264 addr = COLOUR_ALIGN(addr);
5265@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5266 }
5267 if (TASK_SIZE - PAGE_SIZE - len < addr)
5268 return -ENOMEM;
5269- if (!vmm || addr + len <= vmm->vm_start)
5270+ if (check_heap_stack_gap(vmm, addr, len))
5271 return addr;
5272 addr = vmm->vm_end;
5273 if (flags & MAP_SHARED)
5274diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5275index cfa0e19..98972ac 100644
5276--- a/arch/sparc/kernel/sys_sparc_64.c
5277+++ b/arch/sparc/kernel/sys_sparc_64.c
5278@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5279 /* We do not accept a shared mapping if it would violate
5280 * cache aliasing constraints.
5281 */
5282- if ((flags & MAP_SHARED) &&
5283+ if ((filp || (flags & MAP_SHARED)) &&
5284 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5285 return -EINVAL;
5286 return addr;
5287@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5288 if (filp || (flags & MAP_SHARED))
5289 do_color_align = 1;
5290
5291+#ifdef CONFIG_PAX_RANDMMAP
5292+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5293+#endif
5294+
5295 if (addr) {
5296 if (do_color_align)
5297 addr = COLOUR_ALIGN(addr, pgoff);
5298@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5299 addr = PAGE_ALIGN(addr);
5300
5301 vma = find_vma(mm, addr);
5302- if (task_size - len >= addr &&
5303- (!vma || addr + len <= vma->vm_start))
5304+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5305 return addr;
5306 }
5307
5308 if (len > mm->cached_hole_size) {
5309- start_addr = addr = mm->free_area_cache;
5310+ start_addr = addr = mm->free_area_cache;
5311 } else {
5312- start_addr = addr = TASK_UNMAPPED_BASE;
5313+ start_addr = addr = mm->mmap_base;
5314 mm->cached_hole_size = 0;
5315 }
5316
5317@@ -175,14 +178,14 @@ full_search:
5318 vma = find_vma(mm, VA_EXCLUDE_END);
5319 }
5320 if (unlikely(task_size < addr)) {
5321- if (start_addr != TASK_UNMAPPED_BASE) {
5322- start_addr = addr = TASK_UNMAPPED_BASE;
5323+ if (start_addr != mm->mmap_base) {
5324+ start_addr = addr = mm->mmap_base;
5325 mm->cached_hole_size = 0;
5326 goto full_search;
5327 }
5328 return -ENOMEM;
5329 }
5330- if (likely(!vma || addr + len <= vma->vm_start)) {
5331+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5332 /*
5333 * Remember the place where we stopped the search:
5334 */
5335@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5336 /* We do not accept a shared mapping if it would violate
5337 * cache aliasing constraints.
5338 */
5339- if ((flags & MAP_SHARED) &&
5340+ if ((filp || (flags & MAP_SHARED)) &&
5341 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5342 return -EINVAL;
5343 return addr;
5344@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5345 addr = PAGE_ALIGN(addr);
5346
5347 vma = find_vma(mm, addr);
5348- if (task_size - len >= addr &&
5349- (!vma || addr + len <= vma->vm_start))
5350+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5351 return addr;
5352 }
5353
5354@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5355 /* make sure it can fit in the remaining address space */
5356 if (likely(addr > len)) {
5357 vma = find_vma(mm, addr-len);
5358- if (!vma || addr <= vma->vm_start) {
5359+ if (check_heap_stack_gap(vma, addr - len, len)) {
5360 /* remember the address as a hint for next time */
5361 return (mm->free_area_cache = addr-len);
5362 }
5363@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5364 if (unlikely(mm->mmap_base < len))
5365 goto bottomup;
5366
5367- addr = mm->mmap_base-len;
5368- if (do_color_align)
5369- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5370+ addr = mm->mmap_base - len;
5371
5372 do {
5373+ if (do_color_align)
5374+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5375 /*
5376 * Lookup failure means no vma is above this address,
5377 * else if new region fits below vma->vm_start,
5378 * return with success:
5379 */
5380 vma = find_vma(mm, addr);
5381- if (likely(!vma || addr+len <= vma->vm_start)) {
5382+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5383 /* remember the address as a hint for next time */
5384 return (mm->free_area_cache = addr);
5385 }
5386@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5387 mm->cached_hole_size = vma->vm_start - addr;
5388
5389 /* try just below the current vma->vm_start */
5390- addr = vma->vm_start-len;
5391- if (do_color_align)
5392- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5393- } while (likely(len < vma->vm_start));
5394+ addr = skip_heap_stack_gap(vma, len);
5395+ } while (!IS_ERR_VALUE(addr));
5396
5397 bottomup:
5398 /*
5399@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5400 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5401 sysctl_legacy_va_layout) {
5402 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5403+
5404+#ifdef CONFIG_PAX_RANDMMAP
5405+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5406+ mm->mmap_base += mm->delta_mmap;
5407+#endif
5408+
5409 mm->get_unmapped_area = arch_get_unmapped_area;
5410 mm->unmap_area = arch_unmap_area;
5411 } else {
5412@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5413 gap = (task_size / 6 * 5);
5414
5415 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5416+
5417+#ifdef CONFIG_PAX_RANDMMAP
5418+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5419+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5420+#endif
5421+
5422 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5423 mm->unmap_area = arch_unmap_area_topdown;
5424 }
5425diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5426index c0490c7..84959d1 100644
5427--- a/arch/sparc/kernel/traps_32.c
5428+++ b/arch/sparc/kernel/traps_32.c
5429@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5430 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5431 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5432
5433+extern void gr_handle_kernel_exploit(void);
5434+
5435 void die_if_kernel(char *str, struct pt_regs *regs)
5436 {
5437 static int die_counter;
5438@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5439 count++ < 30 &&
5440 (((unsigned long) rw) >= PAGE_OFFSET) &&
5441 !(((unsigned long) rw) & 0x7)) {
5442- printk("Caller[%08lx]: %pS\n", rw->ins[7],
5443+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
5444 (void *) rw->ins[7]);
5445 rw = (struct reg_window32 *)rw->ins[6];
5446 }
5447 }
5448 printk("Instruction DUMP:");
5449 instruction_dump ((unsigned long *) regs->pc);
5450- if(regs->psr & PSR_PS)
5451+ if(regs->psr & PSR_PS) {
5452+ gr_handle_kernel_exploit();
5453 do_exit(SIGKILL);
5454+ }
5455 do_exit(SIGSEGV);
5456 }
5457
5458diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5459index 10f7bb9..cdb6793 100644
5460--- a/arch/sparc/kernel/traps_64.c
5461+++ b/arch/sparc/kernel/traps_64.c
5462@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5463 i + 1,
5464 p->trapstack[i].tstate, p->trapstack[i].tpc,
5465 p->trapstack[i].tnpc, p->trapstack[i].tt);
5466- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5467+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5468 }
5469 }
5470
5471@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5472
5473 lvl -= 0x100;
5474 if (regs->tstate & TSTATE_PRIV) {
5475+
5476+#ifdef CONFIG_PAX_REFCOUNT
5477+ if (lvl == 6)
5478+ pax_report_refcount_overflow(regs);
5479+#endif
5480+
5481 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5482 die_if_kernel(buffer, regs);
5483 }
5484@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5485 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5486 {
5487 char buffer[32];
5488-
5489+
5490 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5491 0, lvl, SIGTRAP) == NOTIFY_STOP)
5492 return;
5493
5494+#ifdef CONFIG_PAX_REFCOUNT
5495+ if (lvl == 6)
5496+ pax_report_refcount_overflow(regs);
5497+#endif
5498+
5499 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5500
5501 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5502@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5503 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5504 printk("%s" "ERROR(%d): ",
5505 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5506- printk("TPC<%pS>\n", (void *) regs->tpc);
5507+ printk("TPC<%pA>\n", (void *) regs->tpc);
5508 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5509 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5510 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5511@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5512 smp_processor_id(),
5513 (type & 0x1) ? 'I' : 'D',
5514 regs->tpc);
5515- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5516+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5517 panic("Irrecoverable Cheetah+ parity error.");
5518 }
5519
5520@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5521 smp_processor_id(),
5522 (type & 0x1) ? 'I' : 'D',
5523 regs->tpc);
5524- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5525+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5526 }
5527
5528 struct sun4v_error_entry {
5529@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5530
5531 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5532 regs->tpc, tl);
5533- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5534+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5535 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5536- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5537+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5538 (void *) regs->u_regs[UREG_I7]);
5539 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5540 "pte[%lx] error[%lx]\n",
5541@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5542
5543 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5544 regs->tpc, tl);
5545- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5546+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5547 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5548- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5549+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5550 (void *) regs->u_regs[UREG_I7]);
5551 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5552 "pte[%lx] error[%lx]\n",
5553@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5554 fp = (unsigned long)sf->fp + STACK_BIAS;
5555 }
5556
5557- printk(" [%016lx] %pS\n", pc, (void *) pc);
5558+ printk(" [%016lx] %pA\n", pc, (void *) pc);
5559 } while (++count < 16);
5560 }
5561
5562@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5563 return (struct reg_window *) (fp + STACK_BIAS);
5564 }
5565
5566+extern void gr_handle_kernel_exploit(void);
5567+
5568 void die_if_kernel(char *str, struct pt_regs *regs)
5569 {
5570 static int die_counter;
5571@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5572 while (rw &&
5573 count++ < 30&&
5574 is_kernel_stack(current, rw)) {
5575- printk("Caller[%016lx]: %pS\n", rw->ins[7],
5576+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
5577 (void *) rw->ins[7]);
5578
5579 rw = kernel_stack_up(rw);
5580@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5581 }
5582 user_instruction_dump ((unsigned int __user *) regs->tpc);
5583 }
5584- if (regs->tstate & TSTATE_PRIV)
5585+ if (regs->tstate & TSTATE_PRIV) {
5586+ gr_handle_kernel_exploit();
5587 do_exit(SIGKILL);
5588+ }
5589+
5590 do_exit(SIGSEGV);
5591 }
5592 EXPORT_SYMBOL(die_if_kernel);
5593diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5594index be183fe..1c8d332 100644
5595--- a/arch/sparc/kernel/una_asm_64.S
5596+++ b/arch/sparc/kernel/una_asm_64.S
5597@@ -127,7 +127,7 @@ do_int_load:
5598 wr %o5, 0x0, %asi
5599 retl
5600 mov 0, %o0
5601- .size __do_int_load, .-__do_int_load
5602+ .size do_int_load, .-do_int_load
5603
5604 .section __ex_table,"a"
5605 .word 4b, __retl_efault
5606diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5607index 3792099..2af17d8 100644
5608--- a/arch/sparc/kernel/unaligned_64.c
5609+++ b/arch/sparc/kernel/unaligned_64.c
5610@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5611 if (count < 5) {
5612 last_time = jiffies;
5613 count++;
5614- printk("Kernel unaligned access at TPC[%lx] %pS\n",
5615+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
5616 regs->tpc, (void *) regs->tpc);
5617 }
5618 }
5619diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5620index e75faf0..24f12f9 100644
5621--- a/arch/sparc/lib/Makefile
5622+++ b/arch/sparc/lib/Makefile
5623@@ -2,7 +2,7 @@
5624 #
5625
5626 asflags-y := -ansi -DST_DIV0=0x02
5627-ccflags-y := -Werror
5628+#ccflags-y := -Werror
5629
5630 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5631 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5632diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5633index 0268210..f0291ca 100644
5634--- a/arch/sparc/lib/atomic_64.S
5635+++ b/arch/sparc/lib/atomic_64.S
5636@@ -18,7 +18,12 @@
5637 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5638 BACKOFF_SETUP(%o2)
5639 1: lduw [%o1], %g1
5640- add %g1, %o0, %g7
5641+ addcc %g1, %o0, %g7
5642+
5643+#ifdef CONFIG_PAX_REFCOUNT
5644+ tvs %icc, 6
5645+#endif
5646+
5647 cas [%o1], %g1, %g7
5648 cmp %g1, %g7
5649 bne,pn %icc, 2f
5650@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5651 2: BACKOFF_SPIN(%o2, %o3, 1b)
5652 .size atomic_add, .-atomic_add
5653
5654+ .globl atomic_add_unchecked
5655+ .type atomic_add_unchecked,#function
5656+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5657+ BACKOFF_SETUP(%o2)
5658+1: lduw [%o1], %g1
5659+ add %g1, %o0, %g7
5660+ cas [%o1], %g1, %g7
5661+ cmp %g1, %g7
5662+ bne,pn %icc, 2f
5663+ nop
5664+ retl
5665+ nop
5666+2: BACKOFF_SPIN(%o2, %o3, 1b)
5667+ .size atomic_add_unchecked, .-atomic_add_unchecked
5668+
5669 .globl atomic_sub
5670 .type atomic_sub,#function
5671 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5672 BACKOFF_SETUP(%o2)
5673 1: lduw [%o1], %g1
5674- sub %g1, %o0, %g7
5675+ subcc %g1, %o0, %g7
5676+
5677+#ifdef CONFIG_PAX_REFCOUNT
5678+ tvs %icc, 6
5679+#endif
5680+
5681 cas [%o1], %g1, %g7
5682 cmp %g1, %g7
5683 bne,pn %icc, 2f
5684@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5685 2: BACKOFF_SPIN(%o2, %o3, 1b)
5686 .size atomic_sub, .-atomic_sub
5687
5688+ .globl atomic_sub_unchecked
5689+ .type atomic_sub_unchecked,#function
5690+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5691+ BACKOFF_SETUP(%o2)
5692+1: lduw [%o1], %g1
5693+ sub %g1, %o0, %g7
5694+ cas [%o1], %g1, %g7
5695+ cmp %g1, %g7
5696+ bne,pn %icc, 2f
5697+ nop
5698+ retl
5699+ nop
5700+2: BACKOFF_SPIN(%o2, %o3, 1b)
5701+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
5702+
5703 .globl atomic_add_ret
5704 .type atomic_add_ret,#function
5705 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5706 BACKOFF_SETUP(%o2)
5707 1: lduw [%o1], %g1
5708- add %g1, %o0, %g7
5709+ addcc %g1, %o0, %g7
5710+
5711+#ifdef CONFIG_PAX_REFCOUNT
5712+ tvs %icc, 6
5713+#endif
5714+
5715 cas [%o1], %g1, %g7
5716 cmp %g1, %g7
5717 bne,pn %icc, 2f
5718@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5719 2: BACKOFF_SPIN(%o2, %o3, 1b)
5720 .size atomic_add_ret, .-atomic_add_ret
5721
5722+ .globl atomic_add_ret_unchecked
5723+ .type atomic_add_ret_unchecked,#function
5724+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5725+ BACKOFF_SETUP(%o2)
5726+1: lduw [%o1], %g1
5727+ addcc %g1, %o0, %g7
5728+ cas [%o1], %g1, %g7
5729+ cmp %g1, %g7
5730+ bne,pn %icc, 2f
5731+ add %g7, %o0, %g7
5732+ sra %g7, 0, %o0
5733+ retl
5734+ nop
5735+2: BACKOFF_SPIN(%o2, %o3, 1b)
5736+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5737+
5738 .globl atomic_sub_ret
5739 .type atomic_sub_ret,#function
5740 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5741 BACKOFF_SETUP(%o2)
5742 1: lduw [%o1], %g1
5743- sub %g1, %o0, %g7
5744+ subcc %g1, %o0, %g7
5745+
5746+#ifdef CONFIG_PAX_REFCOUNT
5747+ tvs %icc, 6
5748+#endif
5749+
5750 cas [%o1], %g1, %g7
5751 cmp %g1, %g7
5752 bne,pn %icc, 2f
5753@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5754 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5755 BACKOFF_SETUP(%o2)
5756 1: ldx [%o1], %g1
5757- add %g1, %o0, %g7
5758+ addcc %g1, %o0, %g7
5759+
5760+#ifdef CONFIG_PAX_REFCOUNT
5761+ tvs %xcc, 6
5762+#endif
5763+
5764 casx [%o1], %g1, %g7
5765 cmp %g1, %g7
5766 bne,pn %xcc, 2f
5767@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5768 2: BACKOFF_SPIN(%o2, %o3, 1b)
5769 .size atomic64_add, .-atomic64_add
5770
5771+ .globl atomic64_add_unchecked
5772+ .type atomic64_add_unchecked,#function
5773+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5774+ BACKOFF_SETUP(%o2)
5775+1: ldx [%o1], %g1
5776+ addcc %g1, %o0, %g7
5777+ casx [%o1], %g1, %g7
5778+ cmp %g1, %g7
5779+ bne,pn %xcc, 2f
5780+ nop
5781+ retl
5782+ nop
5783+2: BACKOFF_SPIN(%o2, %o3, 1b)
5784+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
5785+
5786 .globl atomic64_sub
5787 .type atomic64_sub,#function
5788 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5789 BACKOFF_SETUP(%o2)
5790 1: ldx [%o1], %g1
5791- sub %g1, %o0, %g7
5792+ subcc %g1, %o0, %g7
5793+
5794+#ifdef CONFIG_PAX_REFCOUNT
5795+ tvs %xcc, 6
5796+#endif
5797+
5798 casx [%o1], %g1, %g7
5799 cmp %g1, %g7
5800 bne,pn %xcc, 2f
5801@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5802 2: BACKOFF_SPIN(%o2, %o3, 1b)
5803 .size atomic64_sub, .-atomic64_sub
5804
5805+ .globl atomic64_sub_unchecked
5806+ .type atomic64_sub_unchecked,#function
5807+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5808+ BACKOFF_SETUP(%o2)
5809+1: ldx [%o1], %g1
5810+ subcc %g1, %o0, %g7
5811+ casx [%o1], %g1, %g7
5812+ cmp %g1, %g7
5813+ bne,pn %xcc, 2f
5814+ nop
5815+ retl
5816+ nop
5817+2: BACKOFF_SPIN(%o2, %o3, 1b)
5818+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5819+
5820 .globl atomic64_add_ret
5821 .type atomic64_add_ret,#function
5822 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5823 BACKOFF_SETUP(%o2)
5824 1: ldx [%o1], %g1
5825- add %g1, %o0, %g7
5826+ addcc %g1, %o0, %g7
5827+
5828+#ifdef CONFIG_PAX_REFCOUNT
5829+ tvs %xcc, 6
5830+#endif
5831+
5832 casx [%o1], %g1, %g7
5833 cmp %g1, %g7
5834 bne,pn %xcc, 2f
5835@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5836 2: BACKOFF_SPIN(%o2, %o3, 1b)
5837 .size atomic64_add_ret, .-atomic64_add_ret
5838
5839+ .globl atomic64_add_ret_unchecked
5840+ .type atomic64_add_ret_unchecked,#function
5841+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5842+ BACKOFF_SETUP(%o2)
5843+1: ldx [%o1], %g1
5844+ addcc %g1, %o0, %g7
5845+ casx [%o1], %g1, %g7
5846+ cmp %g1, %g7
5847+ bne,pn %xcc, 2f
5848+ add %g7, %o0, %g7
5849+ mov %g7, %o0
5850+ retl
5851+ nop
5852+2: BACKOFF_SPIN(%o2, %o3, 1b)
5853+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5854+
5855 .globl atomic64_sub_ret
5856 .type atomic64_sub_ret,#function
5857 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5858 BACKOFF_SETUP(%o2)
5859 1: ldx [%o1], %g1
5860- sub %g1, %o0, %g7
5861+ subcc %g1, %o0, %g7
5862+
5863+#ifdef CONFIG_PAX_REFCOUNT
5864+ tvs %xcc, 6
5865+#endif
5866+
5867 casx [%o1], %g1, %g7
5868 cmp %g1, %g7
5869 bne,pn %xcc, 2f
5870diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5871index 704b126..2e79d76 100644
5872--- a/arch/sparc/lib/ksyms.c
5873+++ b/arch/sparc/lib/ksyms.c
5874@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5875
5876 /* Atomic counter implementation. */
5877 EXPORT_SYMBOL(atomic_add);
5878+EXPORT_SYMBOL(atomic_add_unchecked);
5879 EXPORT_SYMBOL(atomic_add_ret);
5880+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5881 EXPORT_SYMBOL(atomic_sub);
5882+EXPORT_SYMBOL(atomic_sub_unchecked);
5883 EXPORT_SYMBOL(atomic_sub_ret);
5884 EXPORT_SYMBOL(atomic64_add);
5885+EXPORT_SYMBOL(atomic64_add_unchecked);
5886 EXPORT_SYMBOL(atomic64_add_ret);
5887+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5888 EXPORT_SYMBOL(atomic64_sub);
5889+EXPORT_SYMBOL(atomic64_sub_unchecked);
5890 EXPORT_SYMBOL(atomic64_sub_ret);
5891
5892 /* Atomic bit operations. */
5893diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5894index 91a7d29..ce75c29 100644
5895--- a/arch/sparc/lib/rwsem_64.S
5896+++ b/arch/sparc/lib/rwsem_64.S
5897@@ -11,7 +11,12 @@
5898 .globl __down_read
5899 __down_read:
5900 1: lduw [%o0], %g1
5901- add %g1, 1, %g7
5902+ addcc %g1, 1, %g7
5903+
5904+#ifdef CONFIG_PAX_REFCOUNT
5905+ tvs %icc, 6
5906+#endif
5907+
5908 cas [%o0], %g1, %g7
5909 cmp %g1, %g7
5910 bne,pn %icc, 1b
5911@@ -33,7 +38,12 @@ __down_read:
5912 .globl __down_read_trylock
5913 __down_read_trylock:
5914 1: lduw [%o0], %g1
5915- add %g1, 1, %g7
5916+ addcc %g1, 1, %g7
5917+
5918+#ifdef CONFIG_PAX_REFCOUNT
5919+ tvs %icc, 6
5920+#endif
5921+
5922 cmp %g7, 0
5923 bl,pn %icc, 2f
5924 mov 0, %o1
5925@@ -51,7 +61,12 @@ __down_write:
5926 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5927 1:
5928 lduw [%o0], %g3
5929- add %g3, %g1, %g7
5930+ addcc %g3, %g1, %g7
5931+
5932+#ifdef CONFIG_PAX_REFCOUNT
5933+ tvs %icc, 6
5934+#endif
5935+
5936 cas [%o0], %g3, %g7
5937 cmp %g3, %g7
5938 bne,pn %icc, 1b
5939@@ -77,7 +92,12 @@ __down_write_trylock:
5940 cmp %g3, 0
5941 bne,pn %icc, 2f
5942 mov 0, %o1
5943- add %g3, %g1, %g7
5944+ addcc %g3, %g1, %g7
5945+
5946+#ifdef CONFIG_PAX_REFCOUNT
5947+ tvs %icc, 6
5948+#endif
5949+
5950 cas [%o0], %g3, %g7
5951 cmp %g3, %g7
5952 bne,pn %icc, 1b
5953@@ -90,7 +110,12 @@ __down_write_trylock:
5954 __up_read:
5955 1:
5956 lduw [%o0], %g1
5957- sub %g1, 1, %g7
5958+ subcc %g1, 1, %g7
5959+
5960+#ifdef CONFIG_PAX_REFCOUNT
5961+ tvs %icc, 6
5962+#endif
5963+
5964 cas [%o0], %g1, %g7
5965 cmp %g1, %g7
5966 bne,pn %icc, 1b
5967@@ -118,7 +143,12 @@ __up_write:
5968 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5969 1:
5970 lduw [%o0], %g3
5971- sub %g3, %g1, %g7
5972+ subcc %g3, %g1, %g7
5973+
5974+#ifdef CONFIG_PAX_REFCOUNT
5975+ tvs %icc, 6
5976+#endif
5977+
5978 cas [%o0], %g3, %g7
5979 cmp %g3, %g7
5980 bne,pn %icc, 1b
5981@@ -143,7 +173,12 @@ __downgrade_write:
5982 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5983 1:
5984 lduw [%o0], %g3
5985- sub %g3, %g1, %g7
5986+ subcc %g3, %g1, %g7
5987+
5988+#ifdef CONFIG_PAX_REFCOUNT
5989+ tvs %icc, 6
5990+#endif
5991+
5992 cas [%o0], %g3, %g7
5993 cmp %g3, %g7
5994 bne,pn %icc, 1b
5995diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5996index 79836a7..62f47a2 100644
5997--- a/arch/sparc/mm/Makefile
5998+++ b/arch/sparc/mm/Makefile
5999@@ -2,7 +2,7 @@
6000 #
6001
6002 asflags-y := -ansi
6003-ccflags-y := -Werror
6004+#ccflags-y := -Werror
6005
6006 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6007 obj-y += fault_$(BITS).o
6008diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6009index b99f81c..3453e93 100644
6010--- a/arch/sparc/mm/fault_32.c
6011+++ b/arch/sparc/mm/fault_32.c
6012@@ -21,6 +21,9 @@
6013 #include <linux/interrupt.h>
6014 #include <linux/module.h>
6015 #include <linux/kdebug.h>
6016+#include <linux/slab.h>
6017+#include <linux/pagemap.h>
6018+#include <linux/compiler.h>
6019
6020 #include <asm/system.h>
6021 #include <asm/page.h>
6022@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6023 return safe_compute_effective_address(regs, insn);
6024 }
6025
6026+#ifdef CONFIG_PAX_PAGEEXEC
6027+#ifdef CONFIG_PAX_DLRESOLVE
6028+static void pax_emuplt_close(struct vm_area_struct *vma)
6029+{
6030+ vma->vm_mm->call_dl_resolve = 0UL;
6031+}
6032+
6033+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6034+{
6035+ unsigned int *kaddr;
6036+
6037+ vmf->page = alloc_page(GFP_HIGHUSER);
6038+ if (!vmf->page)
6039+ return VM_FAULT_OOM;
6040+
6041+ kaddr = kmap(vmf->page);
6042+ memset(kaddr, 0, PAGE_SIZE);
6043+ kaddr[0] = 0x9DE3BFA8U; /* save */
6044+ flush_dcache_page(vmf->page);
6045+ kunmap(vmf->page);
6046+ return VM_FAULT_MAJOR;
6047+}
6048+
6049+static const struct vm_operations_struct pax_vm_ops = {
6050+ .close = pax_emuplt_close,
6051+ .fault = pax_emuplt_fault
6052+};
6053+
6054+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6055+{
6056+ int ret;
6057+
6058+ vma->vm_mm = current->mm;
6059+ vma->vm_start = addr;
6060+ vma->vm_end = addr + PAGE_SIZE;
6061+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6062+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6063+ vma->vm_ops = &pax_vm_ops;
6064+
6065+ ret = insert_vm_struct(current->mm, vma);
6066+ if (ret)
6067+ return ret;
6068+
6069+ ++current->mm->total_vm;
6070+ return 0;
6071+}
6072+#endif
6073+
6074+/*
6075+ * PaX: decide what to do with offenders (regs->pc = fault address)
6076+ *
6077+ * returns 1 when task should be killed
6078+ * 2 when patched PLT trampoline was detected
6079+ * 3 when unpatched PLT trampoline was detected
6080+ */
6081+static int pax_handle_fetch_fault(struct pt_regs *regs)
6082+{
6083+
6084+#ifdef CONFIG_PAX_EMUPLT
6085+ int err;
6086+
6087+ do { /* PaX: patched PLT emulation #1 */
6088+ unsigned int sethi1, sethi2, jmpl;
6089+
6090+ err = get_user(sethi1, (unsigned int *)regs->pc);
6091+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6092+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6093+
6094+ if (err)
6095+ break;
6096+
6097+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6098+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6099+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6100+ {
6101+ unsigned int addr;
6102+
6103+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6104+ addr = regs->u_regs[UREG_G1];
6105+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6106+ regs->pc = addr;
6107+ regs->npc = addr+4;
6108+ return 2;
6109+ }
6110+ } while (0);
6111+
6112+ { /* PaX: patched PLT emulation #2 */
6113+ unsigned int ba;
6114+
6115+ err = get_user(ba, (unsigned int *)regs->pc);
6116+
6117+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6118+ unsigned int addr;
6119+
6120+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6121+ regs->pc = addr;
6122+ regs->npc = addr+4;
6123+ return 2;
6124+ }
6125+ }
6126+
6127+ do { /* PaX: patched PLT emulation #3 */
6128+ unsigned int sethi, jmpl, nop;
6129+
6130+ err = get_user(sethi, (unsigned int *)regs->pc);
6131+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6132+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6133+
6134+ if (err)
6135+ break;
6136+
6137+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6138+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6139+ nop == 0x01000000U)
6140+ {
6141+ unsigned int addr;
6142+
6143+ addr = (sethi & 0x003FFFFFU) << 10;
6144+ regs->u_regs[UREG_G1] = addr;
6145+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6146+ regs->pc = addr;
6147+ regs->npc = addr+4;
6148+ return 2;
6149+ }
6150+ } while (0);
6151+
6152+ do { /* PaX: unpatched PLT emulation step 1 */
6153+ unsigned int sethi, ba, nop;
6154+
6155+ err = get_user(sethi, (unsigned int *)regs->pc);
6156+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
6157+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6158+
6159+ if (err)
6160+ break;
6161+
6162+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6163+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6164+ nop == 0x01000000U)
6165+ {
6166+ unsigned int addr, save, call;
6167+
6168+ if ((ba & 0xFFC00000U) == 0x30800000U)
6169+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6170+ else
6171+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6172+
6173+ err = get_user(save, (unsigned int *)addr);
6174+ err |= get_user(call, (unsigned int *)(addr+4));
6175+ err |= get_user(nop, (unsigned int *)(addr+8));
6176+ if (err)
6177+ break;
6178+
6179+#ifdef CONFIG_PAX_DLRESOLVE
6180+ if (save == 0x9DE3BFA8U &&
6181+ (call & 0xC0000000U) == 0x40000000U &&
6182+ nop == 0x01000000U)
6183+ {
6184+ struct vm_area_struct *vma;
6185+ unsigned long call_dl_resolve;
6186+
6187+ down_read(&current->mm->mmap_sem);
6188+ call_dl_resolve = current->mm->call_dl_resolve;
6189+ up_read(&current->mm->mmap_sem);
6190+ if (likely(call_dl_resolve))
6191+ goto emulate;
6192+
6193+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6194+
6195+ down_write(&current->mm->mmap_sem);
6196+ if (current->mm->call_dl_resolve) {
6197+ call_dl_resolve = current->mm->call_dl_resolve;
6198+ up_write(&current->mm->mmap_sem);
6199+ if (vma)
6200+ kmem_cache_free(vm_area_cachep, vma);
6201+ goto emulate;
6202+ }
6203+
6204+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6205+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6206+ up_write(&current->mm->mmap_sem);
6207+ if (vma)
6208+ kmem_cache_free(vm_area_cachep, vma);
6209+ return 1;
6210+ }
6211+
6212+ if (pax_insert_vma(vma, call_dl_resolve)) {
6213+ up_write(&current->mm->mmap_sem);
6214+ kmem_cache_free(vm_area_cachep, vma);
6215+ return 1;
6216+ }
6217+
6218+ current->mm->call_dl_resolve = call_dl_resolve;
6219+ up_write(&current->mm->mmap_sem);
6220+
6221+emulate:
6222+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6223+ regs->pc = call_dl_resolve;
6224+ regs->npc = addr+4;
6225+ return 3;
6226+ }
6227+#endif
6228+
6229+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6230+ if ((save & 0xFFC00000U) == 0x05000000U &&
6231+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6232+ nop == 0x01000000U)
6233+ {
6234+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6235+ regs->u_regs[UREG_G2] = addr + 4;
6236+ addr = (save & 0x003FFFFFU) << 10;
6237+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6238+ regs->pc = addr;
6239+ regs->npc = addr+4;
6240+ return 3;
6241+ }
6242+ }
6243+ } while (0);
6244+
6245+ do { /* PaX: unpatched PLT emulation step 2 */
6246+ unsigned int save, call, nop;
6247+
6248+ err = get_user(save, (unsigned int *)(regs->pc-4));
6249+ err |= get_user(call, (unsigned int *)regs->pc);
6250+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6251+ if (err)
6252+ break;
6253+
6254+ if (save == 0x9DE3BFA8U &&
6255+ (call & 0xC0000000U) == 0x40000000U &&
6256+ nop == 0x01000000U)
6257+ {
6258+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6259+
6260+ regs->u_regs[UREG_RETPC] = regs->pc;
6261+ regs->pc = dl_resolve;
6262+ regs->npc = dl_resolve+4;
6263+ return 3;
6264+ }
6265+ } while (0);
6266+#endif
6267+
6268+ return 1;
6269+}
6270+
6271+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6272+{
6273+ unsigned long i;
6274+
6275+ printk(KERN_ERR "PAX: bytes at PC: ");
6276+ for (i = 0; i < 8; i++) {
6277+ unsigned int c;
6278+ if (get_user(c, (unsigned int *)pc+i))
6279+ printk(KERN_CONT "???????? ");
6280+ else
6281+ printk(KERN_CONT "%08x ", c);
6282+ }
6283+ printk("\n");
6284+}
6285+#endif
6286+
6287 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6288 unsigned long address)
6289 {
6290@@ -231,6 +495,24 @@ good_area:
6291 if(!(vma->vm_flags & VM_WRITE))
6292 goto bad_area;
6293 } else {
6294+
6295+#ifdef CONFIG_PAX_PAGEEXEC
6296+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6297+ up_read(&mm->mmap_sem);
6298+ switch (pax_handle_fetch_fault(regs)) {
6299+
6300+#ifdef CONFIG_PAX_EMUPLT
6301+ case 2:
6302+ case 3:
6303+ return;
6304+#endif
6305+
6306+ }
6307+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6308+ do_group_exit(SIGKILL);
6309+ }
6310+#endif
6311+
6312 /* Allow reads even for write-only mappings */
6313 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6314 goto bad_area;
6315diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6316index 43b0da9..a0b78f9 100644
6317--- a/arch/sparc/mm/fault_64.c
6318+++ b/arch/sparc/mm/fault_64.c
6319@@ -20,6 +20,9 @@
6320 #include <linux/kprobes.h>
6321 #include <linux/kdebug.h>
6322 #include <linux/percpu.h>
6323+#include <linux/slab.h>
6324+#include <linux/pagemap.h>
6325+#include <linux/compiler.h>
6326
6327 #include <asm/page.h>
6328 #include <asm/pgtable.h>
6329@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6330 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6331 regs->tpc);
6332 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6333- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6334+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6335 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6336 dump_stack();
6337 unhandled_fault(regs->tpc, current, regs);
6338@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6339 show_regs(regs);
6340 }
6341
6342+#ifdef CONFIG_PAX_PAGEEXEC
6343+#ifdef CONFIG_PAX_DLRESOLVE
6344+static void pax_emuplt_close(struct vm_area_struct *vma)
6345+{
6346+ vma->vm_mm->call_dl_resolve = 0UL;
6347+}
6348+
6349+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6350+{
6351+ unsigned int *kaddr;
6352+
6353+ vmf->page = alloc_page(GFP_HIGHUSER);
6354+ if (!vmf->page)
6355+ return VM_FAULT_OOM;
6356+
6357+ kaddr = kmap(vmf->page);
6358+ memset(kaddr, 0, PAGE_SIZE);
6359+ kaddr[0] = 0x9DE3BFA8U; /* save */
6360+ flush_dcache_page(vmf->page);
6361+ kunmap(vmf->page);
6362+ return VM_FAULT_MAJOR;
6363+}
6364+
6365+static const struct vm_operations_struct pax_vm_ops = {
6366+ .close = pax_emuplt_close,
6367+ .fault = pax_emuplt_fault
6368+};
6369+
6370+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6371+{
6372+ int ret;
6373+
6374+ vma->vm_mm = current->mm;
6375+ vma->vm_start = addr;
6376+ vma->vm_end = addr + PAGE_SIZE;
6377+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6378+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6379+ vma->vm_ops = &pax_vm_ops;
6380+
6381+ ret = insert_vm_struct(current->mm, vma);
6382+ if (ret)
6383+ return ret;
6384+
6385+ ++current->mm->total_vm;
6386+ return 0;
6387+}
6388+#endif
6389+
6390+/*
6391+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6392+ *
6393+ * returns 1 when task should be killed
6394+ * 2 when patched PLT trampoline was detected
6395+ * 3 when unpatched PLT trampoline was detected
6396+ */
6397+static int pax_handle_fetch_fault(struct pt_regs *regs)
6398+{
6399+
6400+#ifdef CONFIG_PAX_EMUPLT
6401+ int err;
6402+
6403+ do { /* PaX: patched PLT emulation #1 */
6404+ unsigned int sethi1, sethi2, jmpl;
6405+
6406+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6407+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6408+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6409+
6410+ if (err)
6411+ break;
6412+
6413+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6414+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6415+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6416+ {
6417+ unsigned long addr;
6418+
6419+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6420+ addr = regs->u_regs[UREG_G1];
6421+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6422+
6423+ if (test_thread_flag(TIF_32BIT))
6424+ addr &= 0xFFFFFFFFUL;
6425+
6426+ regs->tpc = addr;
6427+ regs->tnpc = addr+4;
6428+ return 2;
6429+ }
6430+ } while (0);
6431+
6432+ { /* PaX: patched PLT emulation #2 */
6433+ unsigned int ba;
6434+
6435+ err = get_user(ba, (unsigned int *)regs->tpc);
6436+
6437+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6438+ unsigned long addr;
6439+
6440+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6441+
6442+ if (test_thread_flag(TIF_32BIT))
6443+ addr &= 0xFFFFFFFFUL;
6444+
6445+ regs->tpc = addr;
6446+ regs->tnpc = addr+4;
6447+ return 2;
6448+ }
6449+ }
6450+
6451+ do { /* PaX: patched PLT emulation #3 */
6452+ unsigned int sethi, jmpl, nop;
6453+
6454+ err = get_user(sethi, (unsigned int *)regs->tpc);
6455+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6456+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6457+
6458+ if (err)
6459+ break;
6460+
6461+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6462+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6463+ nop == 0x01000000U)
6464+ {
6465+ unsigned long addr;
6466+
6467+ addr = (sethi & 0x003FFFFFU) << 10;
6468+ regs->u_regs[UREG_G1] = addr;
6469+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6470+
6471+ if (test_thread_flag(TIF_32BIT))
6472+ addr &= 0xFFFFFFFFUL;
6473+
6474+ regs->tpc = addr;
6475+ regs->tnpc = addr+4;
6476+ return 2;
6477+ }
6478+ } while (0);
6479+
6480+ do { /* PaX: patched PLT emulation #4 */
6481+ unsigned int sethi, mov1, call, mov2;
6482+
6483+ err = get_user(sethi, (unsigned int *)regs->tpc);
6484+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6485+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6486+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6487+
6488+ if (err)
6489+ break;
6490+
6491+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6492+ mov1 == 0x8210000FU &&
6493+ (call & 0xC0000000U) == 0x40000000U &&
6494+ mov2 == 0x9E100001U)
6495+ {
6496+ unsigned long addr;
6497+
6498+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6499+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6500+
6501+ if (test_thread_flag(TIF_32BIT))
6502+ addr &= 0xFFFFFFFFUL;
6503+
6504+ regs->tpc = addr;
6505+ regs->tnpc = addr+4;
6506+ return 2;
6507+ }
6508+ } while (0);
6509+
6510+ do { /* PaX: patched PLT emulation #5 */
6511+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6512+
6513+ err = get_user(sethi, (unsigned int *)regs->tpc);
6514+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6515+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6516+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6517+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6518+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6519+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6520+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6521+
6522+ if (err)
6523+ break;
6524+
6525+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6526+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6527+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6528+ (or1 & 0xFFFFE000U) == 0x82106000U &&
6529+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6530+ sllx == 0x83287020U &&
6531+ jmpl == 0x81C04005U &&
6532+ nop == 0x01000000U)
6533+ {
6534+ unsigned long addr;
6535+
6536+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6537+ regs->u_regs[UREG_G1] <<= 32;
6538+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6539+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6540+ regs->tpc = addr;
6541+ regs->tnpc = addr+4;
6542+ return 2;
6543+ }
6544+ } while (0);
6545+
6546+ do { /* PaX: patched PLT emulation #6 */
6547+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6548+
6549+ err = get_user(sethi, (unsigned int *)regs->tpc);
6550+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6551+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6552+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6553+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
6554+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6555+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6556+
6557+ if (err)
6558+ break;
6559+
6560+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6561+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6562+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6563+ sllx == 0x83287020U &&
6564+ (or & 0xFFFFE000U) == 0x8A116000U &&
6565+ jmpl == 0x81C04005U &&
6566+ nop == 0x01000000U)
6567+ {
6568+ unsigned long addr;
6569+
6570+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6571+ regs->u_regs[UREG_G1] <<= 32;
6572+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6573+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6574+ regs->tpc = addr;
6575+ regs->tnpc = addr+4;
6576+ return 2;
6577+ }
6578+ } while (0);
6579+
6580+ do { /* PaX: unpatched PLT emulation step 1 */
6581+ unsigned int sethi, ba, nop;
6582+
6583+ err = get_user(sethi, (unsigned int *)regs->tpc);
6584+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6585+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6586+
6587+ if (err)
6588+ break;
6589+
6590+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6591+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6592+ nop == 0x01000000U)
6593+ {
6594+ unsigned long addr;
6595+ unsigned int save, call;
6596+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6597+
6598+ if ((ba & 0xFFC00000U) == 0x30800000U)
6599+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6600+ else
6601+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6602+
6603+ if (test_thread_flag(TIF_32BIT))
6604+ addr &= 0xFFFFFFFFUL;
6605+
6606+ err = get_user(save, (unsigned int *)addr);
6607+ err |= get_user(call, (unsigned int *)(addr+4));
6608+ err |= get_user(nop, (unsigned int *)(addr+8));
6609+ if (err)
6610+ break;
6611+
6612+#ifdef CONFIG_PAX_DLRESOLVE
6613+ if (save == 0x9DE3BFA8U &&
6614+ (call & 0xC0000000U) == 0x40000000U &&
6615+ nop == 0x01000000U)
6616+ {
6617+ struct vm_area_struct *vma;
6618+ unsigned long call_dl_resolve;
6619+
6620+ down_read(&current->mm->mmap_sem);
6621+ call_dl_resolve = current->mm->call_dl_resolve;
6622+ up_read(&current->mm->mmap_sem);
6623+ if (likely(call_dl_resolve))
6624+ goto emulate;
6625+
6626+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6627+
6628+ down_write(&current->mm->mmap_sem);
6629+ if (current->mm->call_dl_resolve) {
6630+ call_dl_resolve = current->mm->call_dl_resolve;
6631+ up_write(&current->mm->mmap_sem);
6632+ if (vma)
6633+ kmem_cache_free(vm_area_cachep, vma);
6634+ goto emulate;
6635+ }
6636+
6637+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6638+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6639+ up_write(&current->mm->mmap_sem);
6640+ if (vma)
6641+ kmem_cache_free(vm_area_cachep, vma);
6642+ return 1;
6643+ }
6644+
6645+ if (pax_insert_vma(vma, call_dl_resolve)) {
6646+ up_write(&current->mm->mmap_sem);
6647+ kmem_cache_free(vm_area_cachep, vma);
6648+ return 1;
6649+ }
6650+
6651+ current->mm->call_dl_resolve = call_dl_resolve;
6652+ up_write(&current->mm->mmap_sem);
6653+
6654+emulate:
6655+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6656+ regs->tpc = call_dl_resolve;
6657+ regs->tnpc = addr+4;
6658+ return 3;
6659+ }
6660+#endif
6661+
6662+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6663+ if ((save & 0xFFC00000U) == 0x05000000U &&
6664+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6665+ nop == 0x01000000U)
6666+ {
6667+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6668+ regs->u_regs[UREG_G2] = addr + 4;
6669+ addr = (save & 0x003FFFFFU) << 10;
6670+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6671+
6672+ if (test_thread_flag(TIF_32BIT))
6673+ addr &= 0xFFFFFFFFUL;
6674+
6675+ regs->tpc = addr;
6676+ regs->tnpc = addr+4;
6677+ return 3;
6678+ }
6679+
6680+ /* PaX: 64-bit PLT stub */
6681+ err = get_user(sethi1, (unsigned int *)addr);
6682+ err |= get_user(sethi2, (unsigned int *)(addr+4));
6683+ err |= get_user(or1, (unsigned int *)(addr+8));
6684+ err |= get_user(or2, (unsigned int *)(addr+12));
6685+ err |= get_user(sllx, (unsigned int *)(addr+16));
6686+ err |= get_user(add, (unsigned int *)(addr+20));
6687+ err |= get_user(jmpl, (unsigned int *)(addr+24));
6688+ err |= get_user(nop, (unsigned int *)(addr+28));
6689+ if (err)
6690+ break;
6691+
6692+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6693+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6694+ (or1 & 0xFFFFE000U) == 0x88112000U &&
6695+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6696+ sllx == 0x89293020U &&
6697+ add == 0x8A010005U &&
6698+ jmpl == 0x89C14000U &&
6699+ nop == 0x01000000U)
6700+ {
6701+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6702+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6703+ regs->u_regs[UREG_G4] <<= 32;
6704+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6705+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6706+ regs->u_regs[UREG_G4] = addr + 24;
6707+ addr = regs->u_regs[UREG_G5];
6708+ regs->tpc = addr;
6709+ regs->tnpc = addr+4;
6710+ return 3;
6711+ }
6712+ }
6713+ } while (0);
6714+
6715+#ifdef CONFIG_PAX_DLRESOLVE
6716+ do { /* PaX: unpatched PLT emulation step 2 */
6717+ unsigned int save, call, nop;
6718+
6719+ err = get_user(save, (unsigned int *)(regs->tpc-4));
6720+ err |= get_user(call, (unsigned int *)regs->tpc);
6721+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6722+ if (err)
6723+ break;
6724+
6725+ if (save == 0x9DE3BFA8U &&
6726+ (call & 0xC0000000U) == 0x40000000U &&
6727+ nop == 0x01000000U)
6728+ {
6729+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6730+
6731+ if (test_thread_flag(TIF_32BIT))
6732+ dl_resolve &= 0xFFFFFFFFUL;
6733+
6734+ regs->u_regs[UREG_RETPC] = regs->tpc;
6735+ regs->tpc = dl_resolve;
6736+ regs->tnpc = dl_resolve+4;
6737+ return 3;
6738+ }
6739+ } while (0);
6740+#endif
6741+
6742+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6743+ unsigned int sethi, ba, nop;
6744+
6745+ err = get_user(sethi, (unsigned int *)regs->tpc);
6746+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6747+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6748+
6749+ if (err)
6750+ break;
6751+
6752+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6753+ (ba & 0xFFF00000U) == 0x30600000U &&
6754+ nop == 0x01000000U)
6755+ {
6756+ unsigned long addr;
6757+
6758+ addr = (sethi & 0x003FFFFFU) << 10;
6759+ regs->u_regs[UREG_G1] = addr;
6760+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6761+
6762+ if (test_thread_flag(TIF_32BIT))
6763+ addr &= 0xFFFFFFFFUL;
6764+
6765+ regs->tpc = addr;
6766+ regs->tnpc = addr+4;
6767+ return 2;
6768+ }
6769+ } while (0);
6770+
6771+#endif
6772+
6773+ return 1;
6774+}
6775+
6776+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6777+{
6778+ unsigned long i;
6779+
6780+ printk(KERN_ERR "PAX: bytes at PC: ");
6781+ for (i = 0; i < 8; i++) {
6782+ unsigned int c;
6783+ if (get_user(c, (unsigned int *)pc+i))
6784+ printk(KERN_CONT "???????? ");
6785+ else
6786+ printk(KERN_CONT "%08x ", c);
6787+ }
6788+ printk("\n");
6789+}
6790+#endif
6791+
6792 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6793 {
6794 struct mm_struct *mm = current->mm;
6795@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6796 if (!vma)
6797 goto bad_area;
6798
6799+#ifdef CONFIG_PAX_PAGEEXEC
6800+ /* PaX: detect ITLB misses on non-exec pages */
6801+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6802+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6803+ {
6804+ if (address != regs->tpc)
6805+ goto good_area;
6806+
6807+ up_read(&mm->mmap_sem);
6808+ switch (pax_handle_fetch_fault(regs)) {
6809+
6810+#ifdef CONFIG_PAX_EMUPLT
6811+ case 2:
6812+ case 3:
6813+ return;
6814+#endif
6815+
6816+ }
6817+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6818+ do_group_exit(SIGKILL);
6819+ }
6820+#endif
6821+
6822 /* Pure DTLB misses do not tell us whether the fault causing
6823 * load/store/atomic was a write or not, it only says that there
6824 * was no match. So in such a case we (carefully) read the
6825diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6826index f27d103..1b06377 100644
6827--- a/arch/sparc/mm/hugetlbpage.c
6828+++ b/arch/sparc/mm/hugetlbpage.c
6829@@ -69,7 +69,7 @@ full_search:
6830 }
6831 return -ENOMEM;
6832 }
6833- if (likely(!vma || addr + len <= vma->vm_start)) {
6834+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6835 /*
6836 * Remember the place where we stopped the search:
6837 */
6838@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6839 /* make sure it can fit in the remaining address space */
6840 if (likely(addr > len)) {
6841 vma = find_vma(mm, addr-len);
6842- if (!vma || addr <= vma->vm_start) {
6843+ if (check_heap_stack_gap(vma, addr - len, len)) {
6844 /* remember the address as a hint for next time */
6845 return (mm->free_area_cache = addr-len);
6846 }
6847@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6848 if (unlikely(mm->mmap_base < len))
6849 goto bottomup;
6850
6851- addr = (mm->mmap_base-len) & HPAGE_MASK;
6852+ addr = mm->mmap_base - len;
6853
6854 do {
6855+ addr &= HPAGE_MASK;
6856 /*
6857 * Lookup failure means no vma is above this address,
6858 * else if new region fits below vma->vm_start,
6859 * return with success:
6860 */
6861 vma = find_vma(mm, addr);
6862- if (likely(!vma || addr+len <= vma->vm_start)) {
6863+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6864 /* remember the address as a hint for next time */
6865 return (mm->free_area_cache = addr);
6866 }
6867@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6868 mm->cached_hole_size = vma->vm_start - addr;
6869
6870 /* try just below the current vma->vm_start */
6871- addr = (vma->vm_start-len) & HPAGE_MASK;
6872- } while (likely(len < vma->vm_start));
6873+ addr = skip_heap_stack_gap(vma, len);
6874+ } while (!IS_ERR_VALUE(addr));
6875
6876 bottomup:
6877 /*
6878@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6879 if (addr) {
6880 addr = ALIGN(addr, HPAGE_SIZE);
6881 vma = find_vma(mm, addr);
6882- if (task_size - len >= addr &&
6883- (!vma || addr + len <= vma->vm_start))
6884+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6885 return addr;
6886 }
6887 if (mm->get_unmapped_area == arch_get_unmapped_area)
6888diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6889index dc7c3b1..34c0070 100644
6890--- a/arch/sparc/mm/init_32.c
6891+++ b/arch/sparc/mm/init_32.c
6892@@ -317,6 +317,9 @@ extern void device_scan(void);
6893 pgprot_t PAGE_SHARED __read_mostly;
6894 EXPORT_SYMBOL(PAGE_SHARED);
6895
6896+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6897+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6898+
6899 void __init paging_init(void)
6900 {
6901 switch(sparc_cpu_model) {
6902@@ -345,17 +348,17 @@ void __init paging_init(void)
6903
6904 /* Initialize the protection map with non-constant, MMU dependent values. */
6905 protection_map[0] = PAGE_NONE;
6906- protection_map[1] = PAGE_READONLY;
6907- protection_map[2] = PAGE_COPY;
6908- protection_map[3] = PAGE_COPY;
6909+ protection_map[1] = PAGE_READONLY_NOEXEC;
6910+ protection_map[2] = PAGE_COPY_NOEXEC;
6911+ protection_map[3] = PAGE_COPY_NOEXEC;
6912 protection_map[4] = PAGE_READONLY;
6913 protection_map[5] = PAGE_READONLY;
6914 protection_map[6] = PAGE_COPY;
6915 protection_map[7] = PAGE_COPY;
6916 protection_map[8] = PAGE_NONE;
6917- protection_map[9] = PAGE_READONLY;
6918- protection_map[10] = PAGE_SHARED;
6919- protection_map[11] = PAGE_SHARED;
6920+ protection_map[9] = PAGE_READONLY_NOEXEC;
6921+ protection_map[10] = PAGE_SHARED_NOEXEC;
6922+ protection_map[11] = PAGE_SHARED_NOEXEC;
6923 protection_map[12] = PAGE_READONLY;
6924 protection_map[13] = PAGE_READONLY;
6925 protection_map[14] = PAGE_SHARED;
6926diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6927index 509b1ff..bfd7118 100644
6928--- a/arch/sparc/mm/srmmu.c
6929+++ b/arch/sparc/mm/srmmu.c
6930@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6931 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6932 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6933 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6934+
6935+#ifdef CONFIG_PAX_PAGEEXEC
6936+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6937+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6938+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6939+#endif
6940+
6941 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6942 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6943
6944diff --git a/arch/um/Makefile b/arch/um/Makefile
6945index fc633db..5e1a1c2 100644
6946--- a/arch/um/Makefile
6947+++ b/arch/um/Makefile
6948@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6949 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6950 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6951
6952+ifdef CONSTIFY_PLUGIN
6953+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6954+endif
6955+
6956 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6957
6958 #This will adjust *FLAGS accordingly to the platform.
6959diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6960index 6c03acd..a5e0215 100644
6961--- a/arch/um/include/asm/kmap_types.h
6962+++ b/arch/um/include/asm/kmap_types.h
6963@@ -23,6 +23,7 @@ enum km_type {
6964 KM_IRQ1,
6965 KM_SOFTIRQ0,
6966 KM_SOFTIRQ1,
6967+ KM_CLEARPAGE,
6968 KM_TYPE_NR
6969 };
6970
6971diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6972index 4cc9b6c..02e5029 100644
6973--- a/arch/um/include/asm/page.h
6974+++ b/arch/um/include/asm/page.h
6975@@ -14,6 +14,9 @@
6976 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6977 #define PAGE_MASK (~(PAGE_SIZE-1))
6978
6979+#define ktla_ktva(addr) (addr)
6980+#define ktva_ktla(addr) (addr)
6981+
6982 #ifndef __ASSEMBLY__
6983
6984 struct page;
6985diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6986index 4a28a15..654dc2a 100644
6987--- a/arch/um/kernel/process.c
6988+++ b/arch/um/kernel/process.c
6989@@ -393,22 +393,6 @@ int singlestepping(void * t)
6990 return 2;
6991 }
6992
6993-/*
6994- * Only x86 and x86_64 have an arch_align_stack().
6995- * All other arches have "#define arch_align_stack(x) (x)"
6996- * in their asm/system.h
6997- * As this is included in UML from asm-um/system-generic.h,
6998- * we can use it to behave as the subarch does.
6999- */
7000-#ifndef arch_align_stack
7001-unsigned long arch_align_stack(unsigned long sp)
7002-{
7003- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7004- sp -= get_random_int() % 8192;
7005- return sp & ~0xf;
7006-}
7007-#endif
7008-
7009 unsigned long get_wchan(struct task_struct *p)
7010 {
7011 unsigned long stack_page, sp, ip;
7012diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
7013index d1b93c4..ae1b7fd 100644
7014--- a/arch/um/sys-i386/shared/sysdep/system.h
7015+++ b/arch/um/sys-i386/shared/sysdep/system.h
7016@@ -17,7 +17,7 @@
7017 # define AT_VECTOR_SIZE_ARCH 1
7018 #endif
7019
7020-extern unsigned long arch_align_stack(unsigned long sp);
7021+#define arch_align_stack(x) ((x) & ~0xfUL)
7022
7023 void default_idle(void);
7024
7025diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
7026index 857ca0b..9a2669d 100644
7027--- a/arch/um/sys-i386/syscalls.c
7028+++ b/arch/um/sys-i386/syscalls.c
7029@@ -11,6 +11,21 @@
7030 #include "asm/uaccess.h"
7031 #include "asm/unistd.h"
7032
7033+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
7034+{
7035+ unsigned long pax_task_size = TASK_SIZE;
7036+
7037+#ifdef CONFIG_PAX_SEGMEXEC
7038+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
7039+ pax_task_size = SEGMEXEC_TASK_SIZE;
7040+#endif
7041+
7042+ if (len > pax_task_size || addr > pax_task_size - len)
7043+ return -EINVAL;
7044+
7045+ return 0;
7046+}
7047+
7048 /*
7049 * Perform the select(nd, in, out, ex, tv) and mmap() system
7050 * calls. Linux/i386 didn't use to be able to handle more than
7051diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
7052index d1b93c4..ae1b7fd 100644
7053--- a/arch/um/sys-x86_64/shared/sysdep/system.h
7054+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
7055@@ -17,7 +17,7 @@
7056 # define AT_VECTOR_SIZE_ARCH 1
7057 #endif
7058
7059-extern unsigned long arch_align_stack(unsigned long sp);
7060+#define arch_align_stack(x) ((x) & ~0xfUL)
7061
7062 void default_idle(void);
7063
7064diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7065index 73ae02a..f932de5 100644
7066--- a/arch/x86/Kconfig
7067+++ b/arch/x86/Kconfig
7068@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
7069
7070 config X86_32_LAZY_GS
7071 def_bool y
7072- depends on X86_32 && !CC_STACKPROTECTOR
7073+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7074
7075 config KTIME_SCALAR
7076 def_bool X86_32
7077@@ -1008,7 +1008,7 @@ choice
7078
7079 config NOHIGHMEM
7080 bool "off"
7081- depends on !X86_NUMAQ
7082+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7083 ---help---
7084 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7085 However, the address space of 32-bit x86 processors is only 4
7086@@ -1045,7 +1045,7 @@ config NOHIGHMEM
7087
7088 config HIGHMEM4G
7089 bool "4GB"
7090- depends on !X86_NUMAQ
7091+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7092 ---help---
7093 Select this if you have a 32-bit processor and between 1 and 4
7094 gigabytes of physical RAM.
7095@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
7096 hex
7097 default 0xB0000000 if VMSPLIT_3G_OPT
7098 default 0x80000000 if VMSPLIT_2G
7099- default 0x78000000 if VMSPLIT_2G_OPT
7100+ default 0x70000000 if VMSPLIT_2G_OPT
7101 default 0x40000000 if VMSPLIT_1G
7102 default 0xC0000000
7103 depends on X86_32
7104@@ -1460,6 +1460,7 @@ config SECCOMP
7105
7106 config CC_STACKPROTECTOR
7107 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7108+ depends on X86_64 || !PAX_MEMORY_UDEREF
7109 ---help---
7110 This option turns on the -fstack-protector GCC feature. This
7111 feature puts, at the beginning of functions, a canary value on
7112@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
7113 config PHYSICAL_START
7114 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
7115 default "0x1000000"
7116+ range 0x400000 0x40000000
7117 ---help---
7118 This gives the physical address where the kernel is loaded.
7119
7120@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
7121 hex
7122 prompt "Alignment value to which kernel should be aligned" if X86_32
7123 default "0x1000000"
7124+ range 0x400000 0x1000000 if PAX_KERNEXEC
7125 range 0x2000 0x1000000
7126 ---help---
7127 This value puts the alignment restrictions on physical address
7128@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
7129 Say N if you want to disable CPU hotplug.
7130
7131 config COMPAT_VDSO
7132- def_bool y
7133+ def_bool n
7134 prompt "Compat VDSO support"
7135 depends on X86_32 || IA32_EMULATION
7136+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7137 ---help---
7138 Map the 32-bit VDSO to the predictable old-style address too.
7139 ---help---
7140diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7141index 0e566103..1a6b57e 100644
7142--- a/arch/x86/Kconfig.cpu
7143+++ b/arch/x86/Kconfig.cpu
7144@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
7145
7146 config X86_F00F_BUG
7147 def_bool y
7148- depends on M586MMX || M586TSC || M586 || M486 || M386
7149+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7150
7151 config X86_WP_WORKS_OK
7152 def_bool y
7153@@ -360,7 +360,7 @@ config X86_POPAD_OK
7154
7155 config X86_ALIGNMENT_16
7156 def_bool y
7157- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7158+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7159
7160 config X86_INTEL_USERCOPY
7161 def_bool y
7162@@ -406,7 +406,7 @@ config X86_CMPXCHG64
7163 # generates cmov.
7164 config X86_CMOV
7165 def_bool y
7166- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7167+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7168
7169 config X86_MINIMUM_CPU_FAMILY
7170 int
7171diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7172index d105f29..c928727 100644
7173--- a/arch/x86/Kconfig.debug
7174+++ b/arch/x86/Kconfig.debug
7175@@ -99,7 +99,7 @@ config X86_PTDUMP
7176 config DEBUG_RODATA
7177 bool "Write protect kernel read-only data structures"
7178 default y
7179- depends on DEBUG_KERNEL
7180+ depends on DEBUG_KERNEL && BROKEN
7181 ---help---
7182 Mark the kernel read-only data as write-protected in the pagetables,
7183 in order to catch accidental (and incorrect) writes to such const
7184diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7185index d2d24c9..0f21f8d 100644
7186--- a/arch/x86/Makefile
7187+++ b/arch/x86/Makefile
7188@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
7189 else
7190 BITS := 64
7191 UTS_MACHINE := x86_64
7192+ biarch := $(call cc-option,-m64)
7193 CHECKFLAGS += -D__x86_64__ -m64
7194
7195 KBUILD_AFLAGS += -m64
7196@@ -189,3 +190,12 @@ define archhelp
7197 echo ' FDARGS="..." arguments for the booted kernel'
7198 echo ' FDINITRD=file initrd for the booted kernel'
7199 endef
7200+
7201+define OLD_LD
7202+
7203+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7204+*** Please upgrade your binutils to 2.18 or newer
7205+endef
7206+
7207+archprepare:
7208+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7209diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7210index ec749c2..bbb5319 100644
7211--- a/arch/x86/boot/Makefile
7212+++ b/arch/x86/boot/Makefile
7213@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7214 $(call cc-option, -fno-stack-protector) \
7215 $(call cc-option, -mpreferred-stack-boundary=2)
7216 KBUILD_CFLAGS += $(call cc-option, -m32)
7217+ifdef CONSTIFY_PLUGIN
7218+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7219+endif
7220 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7221 GCOV_PROFILE := n
7222
7223diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7224index 878e4b9..20537ab 100644
7225--- a/arch/x86/boot/bitops.h
7226+++ b/arch/x86/boot/bitops.h
7227@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7228 u8 v;
7229 const u32 *p = (const u32 *)addr;
7230
7231- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7232+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7233 return v;
7234 }
7235
7236@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7237
7238 static inline void set_bit(int nr, void *addr)
7239 {
7240- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7241+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7242 }
7243
7244 #endif /* BOOT_BITOPS_H */
7245diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7246index 98239d2..f40214c 100644
7247--- a/arch/x86/boot/boot.h
7248+++ b/arch/x86/boot/boot.h
7249@@ -82,7 +82,7 @@ static inline void io_delay(void)
7250 static inline u16 ds(void)
7251 {
7252 u16 seg;
7253- asm("movw %%ds,%0" : "=rm" (seg));
7254+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7255 return seg;
7256 }
7257
7258@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7259 static inline int memcmp(const void *s1, const void *s2, size_t len)
7260 {
7261 u8 diff;
7262- asm("repe; cmpsb; setnz %0"
7263+ asm volatile("repe; cmpsb; setnz %0"
7264 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7265 return diff;
7266 }
7267diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7268index f8ed065..5bf5ff3 100644
7269--- a/arch/x86/boot/compressed/Makefile
7270+++ b/arch/x86/boot/compressed/Makefile
7271@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7272 KBUILD_CFLAGS += $(cflags-y)
7273 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7274 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7275+ifdef CONSTIFY_PLUGIN
7276+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7277+endif
7278
7279 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7280 GCOV_PROFILE := n
7281diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7282index f543b70..b60fba8 100644
7283--- a/arch/x86/boot/compressed/head_32.S
7284+++ b/arch/x86/boot/compressed/head_32.S
7285@@ -76,7 +76,7 @@ ENTRY(startup_32)
7286 notl %eax
7287 andl %eax, %ebx
7288 #else
7289- movl $LOAD_PHYSICAL_ADDR, %ebx
7290+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7291 #endif
7292
7293 /* Target address to relocate to for decompression */
7294@@ -149,7 +149,7 @@ relocated:
7295 * and where it was actually loaded.
7296 */
7297 movl %ebp, %ebx
7298- subl $LOAD_PHYSICAL_ADDR, %ebx
7299+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7300 jz 2f /* Nothing to be done if loaded at compiled addr. */
7301 /*
7302 * Process relocations.
7303@@ -157,8 +157,7 @@ relocated:
7304
7305 1: subl $4, %edi
7306 movl (%edi), %ecx
7307- testl %ecx, %ecx
7308- jz 2f
7309+ jecxz 2f
7310 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7311 jmp 1b
7312 2:
7313diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7314index 077e1b6..2c6b13b 100644
7315--- a/arch/x86/boot/compressed/head_64.S
7316+++ b/arch/x86/boot/compressed/head_64.S
7317@@ -91,7 +91,7 @@ ENTRY(startup_32)
7318 notl %eax
7319 andl %eax, %ebx
7320 #else
7321- movl $LOAD_PHYSICAL_ADDR, %ebx
7322+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7323 #endif
7324
7325 /* Target address to relocate to for decompression */
7326@@ -183,7 +183,7 @@ no_longmode:
7327 hlt
7328 jmp 1b
7329
7330-#include "../../kernel/verify_cpu_64.S"
7331+#include "../../kernel/verify_cpu.S"
7332
7333 /*
7334 * Be careful here startup_64 needs to be at a predictable
7335@@ -234,7 +234,7 @@ ENTRY(startup_64)
7336 notq %rax
7337 andq %rax, %rbp
7338 #else
7339- movq $LOAD_PHYSICAL_ADDR, %rbp
7340+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7341 #endif
7342
7343 /* Target address to relocate to for decompression */
7344diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7345index 842b2a3..f00178b 100644
7346--- a/arch/x86/boot/compressed/misc.c
7347+++ b/arch/x86/boot/compressed/misc.c
7348@@ -288,7 +288,7 @@ static void parse_elf(void *output)
7349 case PT_LOAD:
7350 #ifdef CONFIG_RELOCATABLE
7351 dest = output;
7352- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7353+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7354 #else
7355 dest = (void *)(phdr->p_paddr);
7356 #endif
7357@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7358 error("Destination address too large");
7359 #endif
7360 #ifndef CONFIG_RELOCATABLE
7361- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7362+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7363 error("Wrong destination address");
7364 #endif
7365
7366diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7367index bcbd36c..b1754af 100644
7368--- a/arch/x86/boot/compressed/mkpiggy.c
7369+++ b/arch/x86/boot/compressed/mkpiggy.c
7370@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7371
7372 offs = (olen > ilen) ? olen - ilen : 0;
7373 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7374- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7375+ offs += 64*1024; /* Add 64K bytes slack */
7376 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7377
7378 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7379diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7380index bbeb0c3..f5167ab 100644
7381--- a/arch/x86/boot/compressed/relocs.c
7382+++ b/arch/x86/boot/compressed/relocs.c
7383@@ -10,8 +10,11 @@
7384 #define USE_BSD
7385 #include <endian.h>
7386
7387+#include "../../../../include/linux/autoconf.h"
7388+
7389 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7390 static Elf32_Ehdr ehdr;
7391+static Elf32_Phdr *phdr;
7392 static unsigned long reloc_count, reloc_idx;
7393 static unsigned long *relocs;
7394
7395@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7396
7397 static int is_safe_abs_reloc(const char* sym_name)
7398 {
7399- int i;
7400+ unsigned int i;
7401
7402 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7403 if (!strcmp(sym_name, safe_abs_relocs[i]))
7404@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7405 }
7406 }
7407
7408+static void read_phdrs(FILE *fp)
7409+{
7410+ unsigned int i;
7411+
7412+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7413+ if (!phdr) {
7414+ die("Unable to allocate %d program headers\n",
7415+ ehdr.e_phnum);
7416+ }
7417+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7418+ die("Seek to %d failed: %s\n",
7419+ ehdr.e_phoff, strerror(errno));
7420+ }
7421+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7422+ die("Cannot read ELF program headers: %s\n",
7423+ strerror(errno));
7424+ }
7425+ for(i = 0; i < ehdr.e_phnum; i++) {
7426+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7427+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7428+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7429+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7430+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7431+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7432+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7433+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7434+ }
7435+
7436+}
7437+
7438 static void read_shdrs(FILE *fp)
7439 {
7440- int i;
7441+ unsigned int i;
7442 Elf32_Shdr shdr;
7443
7444 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7445@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7446
7447 static void read_strtabs(FILE *fp)
7448 {
7449- int i;
7450+ unsigned int i;
7451 for (i = 0; i < ehdr.e_shnum; i++) {
7452 struct section *sec = &secs[i];
7453 if (sec->shdr.sh_type != SHT_STRTAB) {
7454@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7455
7456 static void read_symtabs(FILE *fp)
7457 {
7458- int i,j;
7459+ unsigned int i,j;
7460 for (i = 0; i < ehdr.e_shnum; i++) {
7461 struct section *sec = &secs[i];
7462 if (sec->shdr.sh_type != SHT_SYMTAB) {
7463@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7464
7465 static void read_relocs(FILE *fp)
7466 {
7467- int i,j;
7468+ unsigned int i,j;
7469+ uint32_t base;
7470+
7471 for (i = 0; i < ehdr.e_shnum; i++) {
7472 struct section *sec = &secs[i];
7473 if (sec->shdr.sh_type != SHT_REL) {
7474@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7475 die("Cannot read symbol table: %s\n",
7476 strerror(errno));
7477 }
7478+ base = 0;
7479+ for (j = 0; j < ehdr.e_phnum; j++) {
7480+ if (phdr[j].p_type != PT_LOAD )
7481+ continue;
7482+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7483+ continue;
7484+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7485+ break;
7486+ }
7487 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7488 Elf32_Rel *rel = &sec->reltab[j];
7489- rel->r_offset = elf32_to_cpu(rel->r_offset);
7490+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7491 rel->r_info = elf32_to_cpu(rel->r_info);
7492 }
7493 }
7494@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7495
7496 static void print_absolute_symbols(void)
7497 {
7498- int i;
7499+ unsigned int i;
7500 printf("Absolute symbols\n");
7501 printf(" Num: Value Size Type Bind Visibility Name\n");
7502 for (i = 0; i < ehdr.e_shnum; i++) {
7503 struct section *sec = &secs[i];
7504 char *sym_strtab;
7505 Elf32_Sym *sh_symtab;
7506- int j;
7507+ unsigned int j;
7508
7509 if (sec->shdr.sh_type != SHT_SYMTAB) {
7510 continue;
7511@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7512
7513 static void print_absolute_relocs(void)
7514 {
7515- int i, printed = 0;
7516+ unsigned int i, printed = 0;
7517
7518 for (i = 0; i < ehdr.e_shnum; i++) {
7519 struct section *sec = &secs[i];
7520 struct section *sec_applies, *sec_symtab;
7521 char *sym_strtab;
7522 Elf32_Sym *sh_symtab;
7523- int j;
7524+ unsigned int j;
7525 if (sec->shdr.sh_type != SHT_REL) {
7526 continue;
7527 }
7528@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7529
7530 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7531 {
7532- int i;
7533+ unsigned int i;
7534 /* Walk through the relocations */
7535 for (i = 0; i < ehdr.e_shnum; i++) {
7536 char *sym_strtab;
7537 Elf32_Sym *sh_symtab;
7538 struct section *sec_applies, *sec_symtab;
7539- int j;
7540+ unsigned int j;
7541 struct section *sec = &secs[i];
7542
7543 if (sec->shdr.sh_type != SHT_REL) {
7544@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7545 if (sym->st_shndx == SHN_ABS) {
7546 continue;
7547 }
7548+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7549+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7550+ continue;
7551+
7552+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7553+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7554+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7555+ continue;
7556+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7557+ continue;
7558+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7559+ continue;
7560+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7561+ continue;
7562+#endif
7563 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7564 /*
7565 * NONE can be ignored and and PC relative
7566@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7567
7568 static void emit_relocs(int as_text)
7569 {
7570- int i;
7571+ unsigned int i;
7572 /* Count how many relocations I have and allocate space for them. */
7573 reloc_count = 0;
7574 walk_relocs(count_reloc);
7575@@ -634,6 +693,7 @@ int main(int argc, char **argv)
7576 fname, strerror(errno));
7577 }
7578 read_ehdr(fp);
7579+ read_phdrs(fp);
7580 read_shdrs(fp);
7581 read_strtabs(fp);
7582 read_symtabs(fp);
7583diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7584index 4d3ff03..e4972ff 100644
7585--- a/arch/x86/boot/cpucheck.c
7586+++ b/arch/x86/boot/cpucheck.c
7587@@ -74,7 +74,7 @@ static int has_fpu(void)
7588 u16 fcw = -1, fsw = -1;
7589 u32 cr0;
7590
7591- asm("movl %%cr0,%0" : "=r" (cr0));
7592+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7593 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7594 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7595 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7596@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7597 {
7598 u32 f0, f1;
7599
7600- asm("pushfl ; "
7601+ asm volatile("pushfl ; "
7602 "pushfl ; "
7603 "popl %0 ; "
7604 "movl %0,%1 ; "
7605@@ -115,7 +115,7 @@ static void get_flags(void)
7606 set_bit(X86_FEATURE_FPU, cpu.flags);
7607
7608 if (has_eflag(X86_EFLAGS_ID)) {
7609- asm("cpuid"
7610+ asm volatile("cpuid"
7611 : "=a" (max_intel_level),
7612 "=b" (cpu_vendor[0]),
7613 "=d" (cpu_vendor[1]),
7614@@ -124,7 +124,7 @@ static void get_flags(void)
7615
7616 if (max_intel_level >= 0x00000001 &&
7617 max_intel_level <= 0x0000ffff) {
7618- asm("cpuid"
7619+ asm volatile("cpuid"
7620 : "=a" (tfms),
7621 "=c" (cpu.flags[4]),
7622 "=d" (cpu.flags[0])
7623@@ -136,7 +136,7 @@ static void get_flags(void)
7624 cpu.model += ((tfms >> 16) & 0xf) << 4;
7625 }
7626
7627- asm("cpuid"
7628+ asm volatile("cpuid"
7629 : "=a" (max_amd_level)
7630 : "a" (0x80000000)
7631 : "ebx", "ecx", "edx");
7632@@ -144,7 +144,7 @@ static void get_flags(void)
7633 if (max_amd_level >= 0x80000001 &&
7634 max_amd_level <= 0x8000ffff) {
7635 u32 eax = 0x80000001;
7636- asm("cpuid"
7637+ asm volatile("cpuid"
7638 : "+a" (eax),
7639 "=c" (cpu.flags[6]),
7640 "=d" (cpu.flags[1])
7641@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7642 u32 ecx = MSR_K7_HWCR;
7643 u32 eax, edx;
7644
7645- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7646+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7647 eax &= ~(1 << 15);
7648- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7649+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7650
7651 get_flags(); /* Make sure it really did something */
7652 err = check_flags();
7653@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7654 u32 ecx = MSR_VIA_FCR;
7655 u32 eax, edx;
7656
7657- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7658+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7659 eax |= (1<<1)|(1<<7);
7660- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7661+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7662
7663 set_bit(X86_FEATURE_CX8, cpu.flags);
7664 err = check_flags();
7665@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7666 u32 eax, edx;
7667 u32 level = 1;
7668
7669- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7670- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7671- asm("cpuid"
7672+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7673+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7674+ asm volatile("cpuid"
7675 : "+a" (level), "=d" (cpu.flags[0])
7676 : : "ecx", "ebx");
7677- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7678+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7679
7680 err = check_flags();
7681 }
7682diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7683index b31cc54..8d69237 100644
7684--- a/arch/x86/boot/header.S
7685+++ b/arch/x86/boot/header.S
7686@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7687 # single linked list of
7688 # struct setup_data
7689
7690-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7691+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7692
7693 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7694 #define VO_INIT_SIZE (VO__end - VO__text)
7695diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7696index cae3feb..ff8ff2a 100644
7697--- a/arch/x86/boot/memory.c
7698+++ b/arch/x86/boot/memory.c
7699@@ -19,7 +19,7 @@
7700
7701 static int detect_memory_e820(void)
7702 {
7703- int count = 0;
7704+ unsigned int count = 0;
7705 struct biosregs ireg, oreg;
7706 struct e820entry *desc = boot_params.e820_map;
7707 static struct e820entry buf; /* static so it is zeroed */
7708diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7709index 11e8c6e..fdbb1ed 100644
7710--- a/arch/x86/boot/video-vesa.c
7711+++ b/arch/x86/boot/video-vesa.c
7712@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7713
7714 boot_params.screen_info.vesapm_seg = oreg.es;
7715 boot_params.screen_info.vesapm_off = oreg.di;
7716+ boot_params.screen_info.vesapm_size = oreg.cx;
7717 }
7718
7719 /*
7720diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7721index d42da38..787cdf3 100644
7722--- a/arch/x86/boot/video.c
7723+++ b/arch/x86/boot/video.c
7724@@ -90,7 +90,7 @@ static void store_mode_params(void)
7725 static unsigned int get_entry(void)
7726 {
7727 char entry_buf[4];
7728- int i, len = 0;
7729+ unsigned int i, len = 0;
7730 int key;
7731 unsigned int v;
7732
7733diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7734index 5b577d5..3c1fed4 100644
7735--- a/arch/x86/crypto/aes-x86_64-asm_64.S
7736+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7737@@ -8,6 +8,8 @@
7738 * including this sentence is retained in full.
7739 */
7740
7741+#include <asm/alternative-asm.h>
7742+
7743 .extern crypto_ft_tab
7744 .extern crypto_it_tab
7745 .extern crypto_fl_tab
7746@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7747 je B192; \
7748 leaq 32(r9),r9;
7749
7750+#define ret pax_force_retaddr 0, 1; ret
7751+
7752 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7753 movq r1,r2; \
7754 movq r3,r4; \
7755diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7756index eb0566e..e3ebad8 100644
7757--- a/arch/x86/crypto/aesni-intel_asm.S
7758+++ b/arch/x86/crypto/aesni-intel_asm.S
7759@@ -16,6 +16,7 @@
7760 */
7761
7762 #include <linux/linkage.h>
7763+#include <asm/alternative-asm.h>
7764
7765 .text
7766
7767@@ -52,6 +53,7 @@ _key_expansion_256a:
7768 pxor %xmm1, %xmm0
7769 movaps %xmm0, (%rcx)
7770 add $0x10, %rcx
7771+ pax_force_retaddr_bts
7772 ret
7773
7774 _key_expansion_192a:
7775@@ -75,6 +77,7 @@ _key_expansion_192a:
7776 shufps $0b01001110, %xmm2, %xmm1
7777 movaps %xmm1, 16(%rcx)
7778 add $0x20, %rcx
7779+ pax_force_retaddr_bts
7780 ret
7781
7782 _key_expansion_192b:
7783@@ -93,6 +96,7 @@ _key_expansion_192b:
7784
7785 movaps %xmm0, (%rcx)
7786 add $0x10, %rcx
7787+ pax_force_retaddr_bts
7788 ret
7789
7790 _key_expansion_256b:
7791@@ -104,6 +108,7 @@ _key_expansion_256b:
7792 pxor %xmm1, %xmm2
7793 movaps %xmm2, (%rcx)
7794 add $0x10, %rcx
7795+ pax_force_retaddr_bts
7796 ret
7797
7798 /*
7799@@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7800 cmp %rcx, %rdi
7801 jb .Ldec_key_loop
7802 xor %rax, %rax
7803+ pax_force_retaddr 0, 1
7804 ret
7805+ENDPROC(aesni_set_key)
7806
7807 /*
7808 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7809@@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7810 movups (INP), STATE # input
7811 call _aesni_enc1
7812 movups STATE, (OUTP) # output
7813+ pax_force_retaddr 0, 1
7814 ret
7815+ENDPROC(aesni_enc)
7816
7817 /*
7818 * _aesni_enc1: internal ABI
7819@@ -319,6 +328,7 @@ _aesni_enc1:
7820 movaps 0x70(TKEYP), KEY
7821 # aesenclast KEY, STATE # last round
7822 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7823+ pax_force_retaddr_bts
7824 ret
7825
7826 /*
7827@@ -482,6 +492,7 @@ _aesni_enc4:
7828 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7829 # aesenclast KEY, STATE4
7830 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7831+ pax_force_retaddr_bts
7832 ret
7833
7834 /*
7835@@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7836 movups (INP), STATE # input
7837 call _aesni_dec1
7838 movups STATE, (OUTP) #output
7839+ pax_force_retaddr 0, 1
7840 ret
7841+ENDPROC(aesni_dec)
7842
7843 /*
7844 * _aesni_dec1: internal ABI
7845@@ -563,6 +576,7 @@ _aesni_dec1:
7846 movaps 0x70(TKEYP), KEY
7847 # aesdeclast KEY, STATE # last round
7848 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7849+ pax_force_retaddr_bts
7850 ret
7851
7852 /*
7853@@ -726,6 +740,7 @@ _aesni_dec4:
7854 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7855 # aesdeclast KEY, STATE4
7856 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7857+ pax_force_retaddr_bts
7858 ret
7859
7860 /*
7861@@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7862 cmp $16, LEN
7863 jge .Lecb_enc_loop1
7864 .Lecb_enc_ret:
7865+ pax_force_retaddr 0, 1
7866 ret
7867+ENDPROC(aesni_ecb_enc)
7868
7869 /*
7870 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7871@@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7872 cmp $16, LEN
7873 jge .Lecb_dec_loop1
7874 .Lecb_dec_ret:
7875+ pax_force_retaddr 0, 1
7876 ret
7877+ENDPROC(aesni_ecb_dec)
7878
7879 /*
7880 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7881@@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7882 jge .Lcbc_enc_loop
7883 movups STATE, (IVP)
7884 .Lcbc_enc_ret:
7885+ pax_force_retaddr 0, 1
7886 ret
7887+ENDPROC(aesni_cbc_enc)
7888
7889 /*
7890 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7891@@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7892 .Lcbc_dec_ret:
7893 movups IV, (IVP)
7894 .Lcbc_dec_just_ret:
7895+ pax_force_retaddr 0, 1
7896 ret
7897+ENDPROC(aesni_cbc_dec)
7898diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7899index 6214a9b..1f4fc9a 100644
7900--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7901+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7902@@ -1,3 +1,5 @@
7903+#include <asm/alternative-asm.h>
7904+
7905 # enter ECRYPT_encrypt_bytes
7906 .text
7907 .p2align 5
7908@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7909 add %r11,%rsp
7910 mov %rdi,%rax
7911 mov %rsi,%rdx
7912+ pax_force_retaddr 0, 1
7913 ret
7914 # bytesatleast65:
7915 ._bytesatleast65:
7916@@ -891,6 +894,7 @@ ECRYPT_keysetup:
7917 add %r11,%rsp
7918 mov %rdi,%rax
7919 mov %rsi,%rdx
7920+ pax_force_retaddr
7921 ret
7922 # enter ECRYPT_ivsetup
7923 .text
7924@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7925 add %r11,%rsp
7926 mov %rdi,%rax
7927 mov %rsi,%rdx
7928+ pax_force_retaddr
7929 ret
7930diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7931index 35974a5..5662ae2 100644
7932--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7933+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7934@@ -21,6 +21,7 @@
7935 .text
7936
7937 #include <asm/asm-offsets.h>
7938+#include <asm/alternative-asm.h>
7939
7940 #define a_offset 0
7941 #define b_offset 4
7942@@ -269,6 +270,7 @@ twofish_enc_blk:
7943
7944 popq R1
7945 movq $1,%rax
7946+ pax_force_retaddr 0, 1
7947 ret
7948
7949 twofish_dec_blk:
7950@@ -321,4 +323,5 @@ twofish_dec_blk:
7951
7952 popq R1
7953 movq $1,%rax
7954+ pax_force_retaddr 0, 1
7955 ret
7956diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7957index 14531ab..a89a0c0 100644
7958--- a/arch/x86/ia32/ia32_aout.c
7959+++ b/arch/x86/ia32/ia32_aout.c
7960@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7961 unsigned long dump_start, dump_size;
7962 struct user32 dump;
7963
7964+ memset(&dump, 0, sizeof(dump));
7965+
7966 fs = get_fs();
7967 set_fs(KERNEL_DS);
7968 has_dumped = 1;
7969@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7970 dump_size = dump.u_ssize << PAGE_SHIFT;
7971 DUMP_WRITE(dump_start, dump_size);
7972 }
7973- /*
7974- * Finally dump the task struct. Not be used by gdb, but
7975- * could be useful
7976- */
7977- set_fs(KERNEL_DS);
7978- DUMP_WRITE(current, sizeof(*current));
7979 end_coredump:
7980 set_fs(fs);
7981 return has_dumped;
7982diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7983index 588a7aa..a3468b0 100644
7984--- a/arch/x86/ia32/ia32_signal.c
7985+++ b/arch/x86/ia32/ia32_signal.c
7986@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7987 }
7988 seg = get_fs();
7989 set_fs(KERNEL_DS);
7990- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
7991+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
7992 set_fs(seg);
7993 if (ret >= 0 && uoss_ptr) {
7994 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
7995@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
7996 */
7997 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7998 size_t frame_size,
7999- void **fpstate)
8000+ void __user **fpstate)
8001 {
8002 unsigned long sp;
8003
8004@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8005
8006 if (used_math()) {
8007 sp = sp - sig_xstate_ia32_size;
8008- *fpstate = (struct _fpstate_ia32 *) sp;
8009+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8010 if (save_i387_xstate_ia32(*fpstate) < 0)
8011 return (void __user *) -1L;
8012 }
8013@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8014 sp -= frame_size;
8015 /* Align the stack pointer according to the i386 ABI,
8016 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8017- sp = ((sp + 4) & -16ul) - 4;
8018+ sp = ((sp - 12) & -16ul) - 4;
8019 return (void __user *) sp;
8020 }
8021
8022@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8023 * These are actually not used anymore, but left because some
8024 * gdb versions depend on them as a marker.
8025 */
8026- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8027+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8028 } put_user_catch(err);
8029
8030 if (err)
8031@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8032 0xb8,
8033 __NR_ia32_rt_sigreturn,
8034 0x80cd,
8035- 0,
8036+ 0
8037 };
8038
8039 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8040@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8041
8042 if (ka->sa.sa_flags & SA_RESTORER)
8043 restorer = ka->sa.sa_restorer;
8044+ else if (current->mm->context.vdso)
8045+ /* Return stub is in 32bit vsyscall page */
8046+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8047 else
8048- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8049- rt_sigreturn);
8050+ restorer = &frame->retcode;
8051 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8052
8053 /*
8054 * Not actually used anymore, but left because some gdb
8055 * versions need it.
8056 */
8057- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8058+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8059 } put_user_catch(err);
8060
8061 if (err)
8062diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8063index 4edd8eb..29124b4 100644
8064--- a/arch/x86/ia32/ia32entry.S
8065+++ b/arch/x86/ia32/ia32entry.S
8066@@ -13,7 +13,9 @@
8067 #include <asm/thread_info.h>
8068 #include <asm/segment.h>
8069 #include <asm/irqflags.h>
8070+#include <asm/pgtable.h>
8071 #include <linux/linkage.h>
8072+#include <asm/alternative-asm.h>
8073
8074 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8075 #include <linux/elf-em.h>
8076@@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
8077 ENDPROC(native_irq_enable_sysexit)
8078 #endif
8079
8080+ .macro pax_enter_kernel_user
8081+ pax_set_fptr_mask
8082+#ifdef CONFIG_PAX_MEMORY_UDEREF
8083+ call pax_enter_kernel_user
8084+#endif
8085+ .endm
8086+
8087+ .macro pax_exit_kernel_user
8088+#ifdef CONFIG_PAX_MEMORY_UDEREF
8089+ call pax_exit_kernel_user
8090+#endif
8091+#ifdef CONFIG_PAX_RANDKSTACK
8092+ pushq %rax
8093+ pushq %r11
8094+ call pax_randomize_kstack
8095+ popq %r11
8096+ popq %rax
8097+#endif
8098+ .endm
8099+
8100+.macro pax_erase_kstack
8101+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8102+ call pax_erase_kstack
8103+#endif
8104+.endm
8105+
8106 /*
8107 * 32bit SYSENTER instruction entry.
8108 *
8109@@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
8110 CFI_REGISTER rsp,rbp
8111 SWAPGS_UNSAFE_STACK
8112 movq PER_CPU_VAR(kernel_stack), %rsp
8113- addq $(KERNEL_STACK_OFFSET),%rsp
8114- /*
8115- * No need to follow this irqs on/off section: the syscall
8116- * disabled irqs, here we enable it straight after entry:
8117- */
8118- ENABLE_INTERRUPTS(CLBR_NONE)
8119 movl %ebp,%ebp /* zero extension */
8120 pushq $__USER32_DS
8121 CFI_ADJUST_CFA_OFFSET 8
8122@@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
8123 pushfq
8124 CFI_ADJUST_CFA_OFFSET 8
8125 /*CFI_REL_OFFSET rflags,0*/
8126- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
8127- CFI_REGISTER rip,r10
8128+ orl $X86_EFLAGS_IF,(%rsp)
8129+ GET_THREAD_INFO(%r11)
8130+ movl TI_sysenter_return(%r11), %r11d
8131+ CFI_REGISTER rip,r11
8132 pushq $__USER32_CS
8133 CFI_ADJUST_CFA_OFFSET 8
8134 /*CFI_REL_OFFSET cs,0*/
8135 movl %eax, %eax
8136- pushq %r10
8137+ pushq %r11
8138 CFI_ADJUST_CFA_OFFSET 8
8139 CFI_REL_OFFSET rip,0
8140 pushq %rax
8141 CFI_ADJUST_CFA_OFFSET 8
8142 cld
8143 SAVE_ARGS 0,0,1
8144+ pax_enter_kernel_user
8145+ /*
8146+ * No need to follow this irqs on/off section: the syscall
8147+ * disabled irqs, here we enable it straight after entry:
8148+ */
8149+ ENABLE_INTERRUPTS(CLBR_NONE)
8150 /* no need to do an access_ok check here because rbp has been
8151 32bit zero extended */
8152+
8153+#ifdef CONFIG_PAX_MEMORY_UDEREF
8154+ mov $PAX_USER_SHADOW_BASE,%r11
8155+ add %r11,%rbp
8156+#endif
8157+
8158 1: movl (%rbp),%ebp
8159 .section __ex_table,"a"
8160 .quad 1b,ia32_badarg
8161 .previous
8162- GET_THREAD_INFO(%r10)
8163- orl $TS_COMPAT,TI_status(%r10)
8164- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8165+ GET_THREAD_INFO(%r11)
8166+ orl $TS_COMPAT,TI_status(%r11)
8167+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8168 CFI_REMEMBER_STATE
8169 jnz sysenter_tracesys
8170 cmpq $(IA32_NR_syscalls-1),%rax
8171@@ -166,13 +202,15 @@ sysenter_do_call:
8172 sysenter_dispatch:
8173 call *ia32_sys_call_table(,%rax,8)
8174 movq %rax,RAX-ARGOFFSET(%rsp)
8175- GET_THREAD_INFO(%r10)
8176+ GET_THREAD_INFO(%r11)
8177 DISABLE_INTERRUPTS(CLBR_NONE)
8178 TRACE_IRQS_OFF
8179- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8180+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8181 jnz sysexit_audit
8182 sysexit_from_sys_call:
8183- andl $~TS_COMPAT,TI_status(%r10)
8184+ pax_exit_kernel_user
8185+ pax_erase_kstack
8186+ andl $~TS_COMPAT,TI_status(%r11)
8187 /* clear IF, that popfq doesn't enable interrupts early */
8188 andl $~0x200,EFLAGS-R11(%rsp)
8189 movl RIP-R11(%rsp),%edx /* User %eip */
8190@@ -200,6 +238,9 @@ sysexit_from_sys_call:
8191 movl %eax,%esi /* 2nd arg: syscall number */
8192 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8193 call audit_syscall_entry
8194+
8195+ pax_erase_kstack
8196+
8197 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8198 cmpq $(IA32_NR_syscalls-1),%rax
8199 ja ia32_badsys
8200@@ -211,7 +252,7 @@ sysexit_from_sys_call:
8201 .endm
8202
8203 .macro auditsys_exit exit
8204- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8205+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8206 jnz ia32_ret_from_sys_call
8207 TRACE_IRQS_ON
8208 sti
8209@@ -221,12 +262,12 @@ sysexit_from_sys_call:
8210 movzbl %al,%edi /* zero-extend that into %edi */
8211 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
8212 call audit_syscall_exit
8213- GET_THREAD_INFO(%r10)
8214+ GET_THREAD_INFO(%r11)
8215 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8216 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8217 cli
8218 TRACE_IRQS_OFF
8219- testl %edi,TI_flags(%r10)
8220+ testl %edi,TI_flags(%r11)
8221 jz \exit
8222 CLEAR_RREGS -ARGOFFSET
8223 jmp int_with_check
8224@@ -244,7 +285,7 @@ sysexit_audit:
8225
8226 sysenter_tracesys:
8227 #ifdef CONFIG_AUDITSYSCALL
8228- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8229+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8230 jz sysenter_auditsys
8231 #endif
8232 SAVE_REST
8233@@ -252,6 +293,9 @@ sysenter_tracesys:
8234 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8235 movq %rsp,%rdi /* &pt_regs -> arg1 */
8236 call syscall_trace_enter
8237+
8238+ pax_erase_kstack
8239+
8240 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8241 RESTORE_REST
8242 cmpq $(IA32_NR_syscalls-1),%rax
8243@@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
8244 ENTRY(ia32_cstar_target)
8245 CFI_STARTPROC32 simple
8246 CFI_SIGNAL_FRAME
8247- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8248+ CFI_DEF_CFA rsp,0
8249 CFI_REGISTER rip,rcx
8250 /*CFI_REGISTER rflags,r11*/
8251 SWAPGS_UNSAFE_STACK
8252 movl %esp,%r8d
8253 CFI_REGISTER rsp,r8
8254 movq PER_CPU_VAR(kernel_stack),%rsp
8255+ SAVE_ARGS 8*6,1,1
8256+ pax_enter_kernel_user
8257 /*
8258 * No need to follow this irqs on/off section: the syscall
8259 * disabled irqs and here we enable it straight after entry:
8260 */
8261 ENABLE_INTERRUPTS(CLBR_NONE)
8262- SAVE_ARGS 8,1,1
8263 movl %eax,%eax /* zero extension */
8264 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8265 movq %rcx,RIP-ARGOFFSET(%rsp)
8266@@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
8267 /* no need to do an access_ok check here because r8 has been
8268 32bit zero extended */
8269 /* hardware stack frame is complete now */
8270+
8271+#ifdef CONFIG_PAX_MEMORY_UDEREF
8272+ mov $PAX_USER_SHADOW_BASE,%r11
8273+ add %r11,%r8
8274+#endif
8275+
8276 1: movl (%r8),%r9d
8277 .section __ex_table,"a"
8278 .quad 1b,ia32_badarg
8279 .previous
8280- GET_THREAD_INFO(%r10)
8281- orl $TS_COMPAT,TI_status(%r10)
8282- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8283+ GET_THREAD_INFO(%r11)
8284+ orl $TS_COMPAT,TI_status(%r11)
8285+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8286 CFI_REMEMBER_STATE
8287 jnz cstar_tracesys
8288 cmpq $IA32_NR_syscalls-1,%rax
8289@@ -327,13 +378,15 @@ cstar_do_call:
8290 cstar_dispatch:
8291 call *ia32_sys_call_table(,%rax,8)
8292 movq %rax,RAX-ARGOFFSET(%rsp)
8293- GET_THREAD_INFO(%r10)
8294+ GET_THREAD_INFO(%r11)
8295 DISABLE_INTERRUPTS(CLBR_NONE)
8296 TRACE_IRQS_OFF
8297- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8298+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8299 jnz sysretl_audit
8300 sysretl_from_sys_call:
8301- andl $~TS_COMPAT,TI_status(%r10)
8302+ pax_exit_kernel_user
8303+ pax_erase_kstack
8304+ andl $~TS_COMPAT,TI_status(%r11)
8305 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8306 movl RIP-ARGOFFSET(%rsp),%ecx
8307 CFI_REGISTER rip,rcx
8308@@ -361,7 +414,7 @@ sysretl_audit:
8309
8310 cstar_tracesys:
8311 #ifdef CONFIG_AUDITSYSCALL
8312- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8313+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8314 jz cstar_auditsys
8315 #endif
8316 xchgl %r9d,%ebp
8317@@ -370,6 +423,9 @@ cstar_tracesys:
8318 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8319 movq %rsp,%rdi /* &pt_regs -> arg1 */
8320 call syscall_trace_enter
8321+
8322+ pax_erase_kstack
8323+
8324 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8325 RESTORE_REST
8326 xchgl %ebp,%r9d
8327@@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
8328 CFI_REL_OFFSET rip,RIP-RIP
8329 PARAVIRT_ADJUST_EXCEPTION_FRAME
8330 SWAPGS
8331- /*
8332- * No need to follow this irqs on/off section: the syscall
8333- * disabled irqs and here we enable it straight after entry:
8334- */
8335- ENABLE_INTERRUPTS(CLBR_NONE)
8336 movl %eax,%eax
8337 pushq %rax
8338 CFI_ADJUST_CFA_OFFSET 8
8339@@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
8340 /* note the registers are not zero extended to the sf.
8341 this could be a problem. */
8342 SAVE_ARGS 0,0,1
8343- GET_THREAD_INFO(%r10)
8344- orl $TS_COMPAT,TI_status(%r10)
8345- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8346+ pax_enter_kernel_user
8347+ /*
8348+ * No need to follow this irqs on/off section: the syscall
8349+ * disabled irqs and here we enable it straight after entry:
8350+ */
8351+ ENABLE_INTERRUPTS(CLBR_NONE)
8352+ GET_THREAD_INFO(%r11)
8353+ orl $TS_COMPAT,TI_status(%r11)
8354+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8355 jnz ia32_tracesys
8356 cmpq $(IA32_NR_syscalls-1),%rax
8357 ja ia32_badsys
8358@@ -448,6 +505,9 @@ ia32_tracesys:
8359 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8360 movq %rsp,%rdi /* &pt_regs -> arg1 */
8361 call syscall_trace_enter
8362+
8363+ pax_erase_kstack
8364+
8365 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8366 RESTORE_REST
8367 cmpq $(IA32_NR_syscalls-1),%rax
8368@@ -462,6 +522,7 @@ ia32_badsys:
8369
8370 quiet_ni_syscall:
8371 movq $-ENOSYS,%rax
8372+ pax_force_retaddr
8373 ret
8374 CFI_ENDPROC
8375
8376diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8377index 016218c..47ccbdd 100644
8378--- a/arch/x86/ia32/sys_ia32.c
8379+++ b/arch/x86/ia32/sys_ia32.c
8380@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8381 */
8382 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8383 {
8384- typeof(ubuf->st_uid) uid = 0;
8385- typeof(ubuf->st_gid) gid = 0;
8386+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8387+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8388 SET_UID(uid, stat->uid);
8389 SET_GID(gid, stat->gid);
8390 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8391@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8392 }
8393 set_fs(KERNEL_DS);
8394 ret = sys_rt_sigprocmask(how,
8395- set ? (sigset_t __user *)&s : NULL,
8396- oset ? (sigset_t __user *)&s : NULL,
8397+ set ? (sigset_t __force_user *)&s : NULL,
8398+ oset ? (sigset_t __force_user *)&s : NULL,
8399 sigsetsize);
8400 set_fs(old_fs);
8401 if (ret)
8402@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8403 mm_segment_t old_fs = get_fs();
8404
8405 set_fs(KERNEL_DS);
8406- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8407+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8408 set_fs(old_fs);
8409 if (put_compat_timespec(&t, interval))
8410 return -EFAULT;
8411@@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8412 mm_segment_t old_fs = get_fs();
8413
8414 set_fs(KERNEL_DS);
8415- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8416+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8417 set_fs(old_fs);
8418 if (!ret) {
8419 switch (_NSIG_WORDS) {
8420@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8421 if (copy_siginfo_from_user32(&info, uinfo))
8422 return -EFAULT;
8423 set_fs(KERNEL_DS);
8424- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8425+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8426 set_fs(old_fs);
8427 return ret;
8428 }
8429@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8430 return -EFAULT;
8431
8432 set_fs(KERNEL_DS);
8433- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8434+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8435 count);
8436 set_fs(old_fs);
8437
8438diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8439index e2077d3..17d07ad 100644
8440--- a/arch/x86/include/asm/alternative-asm.h
8441+++ b/arch/x86/include/asm/alternative-asm.h
8442@@ -8,10 +8,10 @@
8443
8444 #ifdef CONFIG_SMP
8445 .macro LOCK_PREFIX
8446-1: lock
8447+672: lock
8448 .section .smp_locks,"a"
8449 .align 4
8450- X86_ALIGN 1b
8451+ X86_ALIGN 672b
8452 .previous
8453 .endm
8454 #else
8455@@ -19,4 +19,43 @@
8456 .endm
8457 #endif
8458
8459+#ifdef KERNEXEC_PLUGIN
8460+ .macro pax_force_retaddr_bts rip=0
8461+ btsq $63,\rip(%rsp)
8462+ .endm
8463+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8464+ .macro pax_force_retaddr rip=0, reload=0
8465+ btsq $63,\rip(%rsp)
8466+ .endm
8467+ .macro pax_force_fptr ptr
8468+ btsq $63,\ptr
8469+ .endm
8470+ .macro pax_set_fptr_mask
8471+ .endm
8472+#endif
8473+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8474+ .macro pax_force_retaddr rip=0, reload=0
8475+ .if \reload
8476+ pax_set_fptr_mask
8477+ .endif
8478+ orq %r10,\rip(%rsp)
8479+ .endm
8480+ .macro pax_force_fptr ptr
8481+ orq %r10,\ptr
8482+ .endm
8483+ .macro pax_set_fptr_mask
8484+ movabs $0x8000000000000000,%r10
8485+ .endm
8486+#endif
8487+#else
8488+ .macro pax_force_retaddr rip=0, reload=0
8489+ .endm
8490+ .macro pax_force_fptr ptr
8491+ .endm
8492+ .macro pax_force_retaddr_bts rip=0
8493+ .endm
8494+ .macro pax_set_fptr_mask
8495+ .endm
8496+#endif
8497+
8498 #endif /* __ASSEMBLY__ */
8499diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8500index c240efc..fdfadf3 100644
8501--- a/arch/x86/include/asm/alternative.h
8502+++ b/arch/x86/include/asm/alternative.h
8503@@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8504 " .byte 662b-661b\n" /* sourcelen */ \
8505 " .byte 664f-663f\n" /* replacementlen */ \
8506 ".previous\n" \
8507- ".section .altinstr_replacement, \"ax\"\n" \
8508+ ".section .altinstr_replacement, \"a\"\n" \
8509 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8510 ".previous"
8511
8512diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8513index 474d80d..1f97d58 100644
8514--- a/arch/x86/include/asm/apic.h
8515+++ b/arch/x86/include/asm/apic.h
8516@@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8517
8518 #ifdef CONFIG_X86_LOCAL_APIC
8519
8520-extern unsigned int apic_verbosity;
8521+extern int apic_verbosity;
8522 extern int local_apic_timer_c2_ok;
8523
8524 extern int disable_apic;
8525diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8526index 20370c6..a2eb9b0 100644
8527--- a/arch/x86/include/asm/apm.h
8528+++ b/arch/x86/include/asm/apm.h
8529@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8530 __asm__ __volatile__(APM_DO_ZERO_SEGS
8531 "pushl %%edi\n\t"
8532 "pushl %%ebp\n\t"
8533- "lcall *%%cs:apm_bios_entry\n\t"
8534+ "lcall *%%ss:apm_bios_entry\n\t"
8535 "setc %%al\n\t"
8536 "popl %%ebp\n\t"
8537 "popl %%edi\n\t"
8538@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8539 __asm__ __volatile__(APM_DO_ZERO_SEGS
8540 "pushl %%edi\n\t"
8541 "pushl %%ebp\n\t"
8542- "lcall *%%cs:apm_bios_entry\n\t"
8543+ "lcall *%%ss:apm_bios_entry\n\t"
8544 "setc %%bl\n\t"
8545 "popl %%ebp\n\t"
8546 "popl %%edi\n\t"
8547diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8548index dc5a667..939040c 100644
8549--- a/arch/x86/include/asm/atomic_32.h
8550+++ b/arch/x86/include/asm/atomic_32.h
8551@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8552 }
8553
8554 /**
8555+ * atomic_read_unchecked - read atomic variable
8556+ * @v: pointer of type atomic_unchecked_t
8557+ *
8558+ * Atomically reads the value of @v.
8559+ */
8560+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8561+{
8562+ return v->counter;
8563+}
8564+
8565+/**
8566 * atomic_set - set atomic variable
8567 * @v: pointer of type atomic_t
8568 * @i: required value
8569@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8570 }
8571
8572 /**
8573+ * atomic_set_unchecked - set atomic variable
8574+ * @v: pointer of type atomic_unchecked_t
8575+ * @i: required value
8576+ *
8577+ * Atomically sets the value of @v to @i.
8578+ */
8579+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8580+{
8581+ v->counter = i;
8582+}
8583+
8584+/**
8585 * atomic_add - add integer to atomic variable
8586 * @i: integer value to add
8587 * @v: pointer of type atomic_t
8588@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8589 */
8590 static inline void atomic_add(int i, atomic_t *v)
8591 {
8592- asm volatile(LOCK_PREFIX "addl %1,%0"
8593+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8594+
8595+#ifdef CONFIG_PAX_REFCOUNT
8596+ "jno 0f\n"
8597+ LOCK_PREFIX "subl %1,%0\n"
8598+ "int $4\n0:\n"
8599+ _ASM_EXTABLE(0b, 0b)
8600+#endif
8601+
8602+ : "+m" (v->counter)
8603+ : "ir" (i));
8604+}
8605+
8606+/**
8607+ * atomic_add_unchecked - add integer to atomic variable
8608+ * @i: integer value to add
8609+ * @v: pointer of type atomic_unchecked_t
8610+ *
8611+ * Atomically adds @i to @v.
8612+ */
8613+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8614+{
8615+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8616 : "+m" (v->counter)
8617 : "ir" (i));
8618 }
8619@@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8620 */
8621 static inline void atomic_sub(int i, atomic_t *v)
8622 {
8623- asm volatile(LOCK_PREFIX "subl %1,%0"
8624+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8625+
8626+#ifdef CONFIG_PAX_REFCOUNT
8627+ "jno 0f\n"
8628+ LOCK_PREFIX "addl %1,%0\n"
8629+ "int $4\n0:\n"
8630+ _ASM_EXTABLE(0b, 0b)
8631+#endif
8632+
8633+ : "+m" (v->counter)
8634+ : "ir" (i));
8635+}
8636+
8637+/**
8638+ * atomic_sub_unchecked - subtract integer from atomic variable
8639+ * @i: integer value to subtract
8640+ * @v: pointer of type atomic_unchecked_t
8641+ *
8642+ * Atomically subtracts @i from @v.
8643+ */
8644+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8645+{
8646+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8647 : "+m" (v->counter)
8648 : "ir" (i));
8649 }
8650@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8651 {
8652 unsigned char c;
8653
8654- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8655+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8656+
8657+#ifdef CONFIG_PAX_REFCOUNT
8658+ "jno 0f\n"
8659+ LOCK_PREFIX "addl %2,%0\n"
8660+ "int $4\n0:\n"
8661+ _ASM_EXTABLE(0b, 0b)
8662+#endif
8663+
8664+ "sete %1\n"
8665 : "+m" (v->counter), "=qm" (c)
8666 : "ir" (i) : "memory");
8667 return c;
8668@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8669 */
8670 static inline void atomic_inc(atomic_t *v)
8671 {
8672- asm volatile(LOCK_PREFIX "incl %0"
8673+ asm volatile(LOCK_PREFIX "incl %0\n"
8674+
8675+#ifdef CONFIG_PAX_REFCOUNT
8676+ "jno 0f\n"
8677+ LOCK_PREFIX "decl %0\n"
8678+ "int $4\n0:\n"
8679+ _ASM_EXTABLE(0b, 0b)
8680+#endif
8681+
8682+ : "+m" (v->counter));
8683+}
8684+
8685+/**
8686+ * atomic_inc_unchecked - increment atomic variable
8687+ * @v: pointer of type atomic_unchecked_t
8688+ *
8689+ * Atomically increments @v by 1.
8690+ */
8691+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8692+{
8693+ asm volatile(LOCK_PREFIX "incl %0\n"
8694 : "+m" (v->counter));
8695 }
8696
8697@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8698 */
8699 static inline void atomic_dec(atomic_t *v)
8700 {
8701- asm volatile(LOCK_PREFIX "decl %0"
8702+ asm volatile(LOCK_PREFIX "decl %0\n"
8703+
8704+#ifdef CONFIG_PAX_REFCOUNT
8705+ "jno 0f\n"
8706+ LOCK_PREFIX "incl %0\n"
8707+ "int $4\n0:\n"
8708+ _ASM_EXTABLE(0b, 0b)
8709+#endif
8710+
8711+ : "+m" (v->counter));
8712+}
8713+
8714+/**
8715+ * atomic_dec_unchecked - decrement atomic variable
8716+ * @v: pointer of type atomic_unchecked_t
8717+ *
8718+ * Atomically decrements @v by 1.
8719+ */
8720+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8721+{
8722+ asm volatile(LOCK_PREFIX "decl %0\n"
8723 : "+m" (v->counter));
8724 }
8725
8726@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8727 {
8728 unsigned char c;
8729
8730- asm volatile(LOCK_PREFIX "decl %0; sete %1"
8731+ asm volatile(LOCK_PREFIX "decl %0\n"
8732+
8733+#ifdef CONFIG_PAX_REFCOUNT
8734+ "jno 0f\n"
8735+ LOCK_PREFIX "incl %0\n"
8736+ "int $4\n0:\n"
8737+ _ASM_EXTABLE(0b, 0b)
8738+#endif
8739+
8740+ "sete %1\n"
8741 : "+m" (v->counter), "=qm" (c)
8742 : : "memory");
8743 return c != 0;
8744@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8745 {
8746 unsigned char c;
8747
8748- asm volatile(LOCK_PREFIX "incl %0; sete %1"
8749+ asm volatile(LOCK_PREFIX "incl %0\n"
8750+
8751+#ifdef CONFIG_PAX_REFCOUNT
8752+ "jno 0f\n"
8753+ LOCK_PREFIX "decl %0\n"
8754+ "into\n0:\n"
8755+ _ASM_EXTABLE(0b, 0b)
8756+#endif
8757+
8758+ "sete %1\n"
8759+ : "+m" (v->counter), "=qm" (c)
8760+ : : "memory");
8761+ return c != 0;
8762+}
8763+
8764+/**
8765+ * atomic_inc_and_test_unchecked - increment and test
8766+ * @v: pointer of type atomic_unchecked_t
8767+ *
8768+ * Atomically increments @v by 1
8769+ * and returns true if the result is zero, or false for all
8770+ * other cases.
8771+ */
8772+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8773+{
8774+ unsigned char c;
8775+
8776+ asm volatile(LOCK_PREFIX "incl %0\n"
8777+ "sete %1\n"
8778 : "+m" (v->counter), "=qm" (c)
8779 : : "memory");
8780 return c != 0;
8781@@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8782 {
8783 unsigned char c;
8784
8785- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8786+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
8787+
8788+#ifdef CONFIG_PAX_REFCOUNT
8789+ "jno 0f\n"
8790+ LOCK_PREFIX "subl %2,%0\n"
8791+ "int $4\n0:\n"
8792+ _ASM_EXTABLE(0b, 0b)
8793+#endif
8794+
8795+ "sets %1\n"
8796 : "+m" (v->counter), "=qm" (c)
8797 : "ir" (i) : "memory");
8798 return c;
8799@@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
8800 #endif
8801 /* Modern 486+ processor */
8802 __i = i;
8803- asm volatile(LOCK_PREFIX "xaddl %0, %1"
8804+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8805+
8806+#ifdef CONFIG_PAX_REFCOUNT
8807+ "jno 0f\n"
8808+ "movl %0, %1\n"
8809+ "int $4\n0:\n"
8810+ _ASM_EXTABLE(0b, 0b)
8811+#endif
8812+
8813 : "+r" (i), "+m" (v->counter)
8814 : : "memory");
8815 return i + __i;
8816@@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
8817 }
8818
8819 /**
8820+ * atomic_add_return_unchecked - add integer and return
8821+ * @v: pointer of type atomic_unchecked_t
8822+ * @i: integer value to add
8823+ *
8824+ * Atomically adds @i to @v and returns @i + @v
8825+ */
8826+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8827+{
8828+ int __i;
8829+#ifdef CONFIG_M386
8830+ unsigned long flags;
8831+ if (unlikely(boot_cpu_data.x86 <= 3))
8832+ goto no_xadd;
8833+#endif
8834+ /* Modern 486+ processor */
8835+ __i = i;
8836+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
8837+ : "+r" (i), "+m" (v->counter)
8838+ : : "memory");
8839+ return i + __i;
8840+
8841+#ifdef CONFIG_M386
8842+no_xadd: /* Legacy 386 processor */
8843+ local_irq_save(flags);
8844+ __i = atomic_read_unchecked(v);
8845+ atomic_set_unchecked(v, i + __i);
8846+ local_irq_restore(flags);
8847+ return i + __i;
8848+#endif
8849+}
8850+
8851+/**
8852 * atomic_sub_return - subtract integer and return
8853 * @v: pointer of type atomic_t
8854 * @i: integer value to subtract
8855@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8856 return cmpxchg(&v->counter, old, new);
8857 }
8858
8859+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8860+{
8861+ return cmpxchg(&v->counter, old, new);
8862+}
8863+
8864 static inline int atomic_xchg(atomic_t *v, int new)
8865 {
8866 return xchg(&v->counter, new);
8867 }
8868
8869+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8870+{
8871+ return xchg(&v->counter, new);
8872+}
8873+
8874 /**
8875 * atomic_add_unless - add unless the number is already a given value
8876 * @v: pointer of type atomic_t
8877@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8878 */
8879 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8880 {
8881- int c, old;
8882+ int c, old, new;
8883 c = atomic_read(v);
8884 for (;;) {
8885- if (unlikely(c == (u)))
8886+ if (unlikely(c == u))
8887 break;
8888- old = atomic_cmpxchg((v), c, c + (a));
8889+
8890+ asm volatile("addl %2,%0\n"
8891+
8892+#ifdef CONFIG_PAX_REFCOUNT
8893+ "jno 0f\n"
8894+ "subl %2,%0\n"
8895+ "int $4\n0:\n"
8896+ _ASM_EXTABLE(0b, 0b)
8897+#endif
8898+
8899+ : "=r" (new)
8900+ : "0" (c), "ir" (a));
8901+
8902+ old = atomic_cmpxchg(v, c, new);
8903 if (likely(old == c))
8904 break;
8905 c = old;
8906 }
8907- return c != (u);
8908+ return c != u;
8909 }
8910
8911 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8912
8913 #define atomic_inc_return(v) (atomic_add_return(1, v))
8914+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8915+{
8916+ return atomic_add_return_unchecked(1, v);
8917+}
8918 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8919
8920 /* These are x86-specific, used by some header files */
8921@@ -266,9 +495,18 @@ typedef struct {
8922 u64 __aligned(8) counter;
8923 } atomic64_t;
8924
8925+#ifdef CONFIG_PAX_REFCOUNT
8926+typedef struct {
8927+ u64 __aligned(8) counter;
8928+} atomic64_unchecked_t;
8929+#else
8930+typedef atomic64_t atomic64_unchecked_t;
8931+#endif
8932+
8933 #define ATOMIC64_INIT(val) { (val) }
8934
8935 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8936+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8937
8938 /**
8939 * atomic64_xchg - xchg atomic64 variable
8940@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8941 * the old value.
8942 */
8943 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8944+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8945
8946 /**
8947 * atomic64_set - set atomic64 variable
8948@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8949 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8950
8951 /**
8952+ * atomic64_unchecked_set - set atomic64 variable
8953+ * @ptr: pointer to type atomic64_unchecked_t
8954+ * @new_val: value to assign
8955+ *
8956+ * Atomically sets the value of @ptr to @new_val.
8957+ */
8958+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8959+
8960+/**
8961 * atomic64_read - read atomic64 variable
8962 * @ptr: pointer to type atomic64_t
8963 *
8964@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8965 return res;
8966 }
8967
8968-extern u64 atomic64_read(atomic64_t *ptr);
8969+/**
8970+ * atomic64_read_unchecked - read atomic64 variable
8971+ * @ptr: pointer to type atomic64_unchecked_t
8972+ *
8973+ * Atomically reads the value of @ptr and returns it.
8974+ */
8975+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8976+{
8977+ u64 res;
8978+
8979+ /*
8980+ * Note, we inline this atomic64_unchecked_t primitive because
8981+ * it only clobbers EAX/EDX and leaves the others
8982+ * untouched. We also (somewhat subtly) rely on the
8983+ * fact that cmpxchg8b returns the current 64-bit value
8984+ * of the memory location we are touching:
8985+ */
8986+ asm volatile(
8987+ "mov %%ebx, %%eax\n\t"
8988+ "mov %%ecx, %%edx\n\t"
8989+ LOCK_PREFIX "cmpxchg8b %1\n"
8990+ : "=&A" (res)
8991+ : "m" (*ptr)
8992+ );
8993+
8994+ return res;
8995+}
8996
8997 /**
8998 * atomic64_add_return - add and return
8999@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
9000 * Other variants with different arithmetic operators:
9001 */
9002 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
9003+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9004 extern u64 atomic64_inc_return(atomic64_t *ptr);
9005+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
9006 extern u64 atomic64_dec_return(atomic64_t *ptr);
9007+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
9008
9009 /**
9010 * atomic64_add - add integer to atomic64 variable
9011@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
9012 extern void atomic64_add(u64 delta, atomic64_t *ptr);
9013
9014 /**
9015+ * atomic64_add_unchecked - add integer to atomic64 variable
9016+ * @delta: integer value to add
9017+ * @ptr: pointer to type atomic64_unchecked_t
9018+ *
9019+ * Atomically adds @delta to @ptr.
9020+ */
9021+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9022+
9023+/**
9024 * atomic64_sub - subtract the atomic64 variable
9025 * @delta: integer value to subtract
9026 * @ptr: pointer to type atomic64_t
9027@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
9028 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
9029
9030 /**
9031+ * atomic64_sub_unchecked - subtract the atomic64 variable
9032+ * @delta: integer value to subtract
9033+ * @ptr: pointer to type atomic64_unchecked_t
9034+ *
9035+ * Atomically subtracts @delta from @ptr.
9036+ */
9037+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9038+
9039+/**
9040 * atomic64_sub_and_test - subtract value from variable and test result
9041 * @delta: integer value to subtract
9042 * @ptr: pointer to type atomic64_t
9043@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
9044 extern void atomic64_inc(atomic64_t *ptr);
9045
9046 /**
9047+ * atomic64_inc_unchecked - increment atomic64 variable
9048+ * @ptr: pointer to type atomic64_unchecked_t
9049+ *
9050+ * Atomically increments @ptr by 1.
9051+ */
9052+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
9053+
9054+/**
9055 * atomic64_dec - decrement atomic64 variable
9056 * @ptr: pointer to type atomic64_t
9057 *
9058@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
9059 extern void atomic64_dec(atomic64_t *ptr);
9060
9061 /**
9062+ * atomic64_dec_unchecked - decrement atomic64 variable
9063+ * @ptr: pointer to type atomic64_unchecked_t
9064+ *
9065+ * Atomically decrements @ptr by 1.
9066+ */
9067+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
9068+
9069+/**
9070 * atomic64_dec_and_test - decrement and test
9071 * @ptr: pointer to type atomic64_t
9072 *
9073diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
9074index d605dc2..fafd7bd 100644
9075--- a/arch/x86/include/asm/atomic_64.h
9076+++ b/arch/x86/include/asm/atomic_64.h
9077@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
9078 }
9079
9080 /**
9081+ * atomic_read_unchecked - read atomic variable
9082+ * @v: pointer of type atomic_unchecked_t
9083+ *
9084+ * Atomically reads the value of @v.
9085+ */
9086+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9087+{
9088+ return v->counter;
9089+}
9090+
9091+/**
9092 * atomic_set - set atomic variable
9093 * @v: pointer of type atomic_t
9094 * @i: required value
9095@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
9096 }
9097
9098 /**
9099+ * atomic_set_unchecked - set atomic variable
9100+ * @v: pointer of type atomic_unchecked_t
9101+ * @i: required value
9102+ *
9103+ * Atomically sets the value of @v to @i.
9104+ */
9105+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9106+{
9107+ v->counter = i;
9108+}
9109+
9110+/**
9111 * atomic_add - add integer to atomic variable
9112 * @i: integer value to add
9113 * @v: pointer of type atomic_t
9114@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
9115 */
9116 static inline void atomic_add(int i, atomic_t *v)
9117 {
9118- asm volatile(LOCK_PREFIX "addl %1,%0"
9119+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9120+
9121+#ifdef CONFIG_PAX_REFCOUNT
9122+ "jno 0f\n"
9123+ LOCK_PREFIX "subl %1,%0\n"
9124+ "int $4\n0:\n"
9125+ _ASM_EXTABLE(0b, 0b)
9126+#endif
9127+
9128+ : "=m" (v->counter)
9129+ : "ir" (i), "m" (v->counter));
9130+}
9131+
9132+/**
9133+ * atomic_add_unchecked - add integer to atomic variable
9134+ * @i: integer value to add
9135+ * @v: pointer of type atomic_unchecked_t
9136+ *
9137+ * Atomically adds @i to @v.
9138+ */
9139+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9140+{
9141+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9142 : "=m" (v->counter)
9143 : "ir" (i), "m" (v->counter));
9144 }
9145@@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
9146 */
9147 static inline void atomic_sub(int i, atomic_t *v)
9148 {
9149- asm volatile(LOCK_PREFIX "subl %1,%0"
9150+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9151+
9152+#ifdef CONFIG_PAX_REFCOUNT
9153+ "jno 0f\n"
9154+ LOCK_PREFIX "addl %1,%0\n"
9155+ "int $4\n0:\n"
9156+ _ASM_EXTABLE(0b, 0b)
9157+#endif
9158+
9159+ : "=m" (v->counter)
9160+ : "ir" (i), "m" (v->counter));
9161+}
9162+
9163+/**
9164+ * atomic_sub_unchecked - subtract the atomic variable
9165+ * @i: integer value to subtract
9166+ * @v: pointer of type atomic_unchecked_t
9167+ *
9168+ * Atomically subtracts @i from @v.
9169+ */
9170+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9171+{
9172+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9173 : "=m" (v->counter)
9174 : "ir" (i), "m" (v->counter));
9175 }
9176@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9177 {
9178 unsigned char c;
9179
9180- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9181+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
9182+
9183+#ifdef CONFIG_PAX_REFCOUNT
9184+ "jno 0f\n"
9185+ LOCK_PREFIX "addl %2,%0\n"
9186+ "int $4\n0:\n"
9187+ _ASM_EXTABLE(0b, 0b)
9188+#endif
9189+
9190+ "sete %1\n"
9191 : "=m" (v->counter), "=qm" (c)
9192 : "ir" (i), "m" (v->counter) : "memory");
9193 return c;
9194@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9195 */
9196 static inline void atomic_inc(atomic_t *v)
9197 {
9198- asm volatile(LOCK_PREFIX "incl %0"
9199+ asm volatile(LOCK_PREFIX "incl %0\n"
9200+
9201+#ifdef CONFIG_PAX_REFCOUNT
9202+ "jno 0f\n"
9203+ LOCK_PREFIX "decl %0\n"
9204+ "int $4\n0:\n"
9205+ _ASM_EXTABLE(0b, 0b)
9206+#endif
9207+
9208+ : "=m" (v->counter)
9209+ : "m" (v->counter));
9210+}
9211+
9212+/**
9213+ * atomic_inc_unchecked - increment atomic variable
9214+ * @v: pointer of type atomic_unchecked_t
9215+ *
9216+ * Atomically increments @v by 1.
9217+ */
9218+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9219+{
9220+ asm volatile(LOCK_PREFIX "incl %0\n"
9221 : "=m" (v->counter)
9222 : "m" (v->counter));
9223 }
9224@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
9225 */
9226 static inline void atomic_dec(atomic_t *v)
9227 {
9228- asm volatile(LOCK_PREFIX "decl %0"
9229+ asm volatile(LOCK_PREFIX "decl %0\n"
9230+
9231+#ifdef CONFIG_PAX_REFCOUNT
9232+ "jno 0f\n"
9233+ LOCK_PREFIX "incl %0\n"
9234+ "int $4\n0:\n"
9235+ _ASM_EXTABLE(0b, 0b)
9236+#endif
9237+
9238+ : "=m" (v->counter)
9239+ : "m" (v->counter));
9240+}
9241+
9242+/**
9243+ * atomic_dec_unchecked - decrement atomic variable
9244+ * @v: pointer of type atomic_unchecked_t
9245+ *
9246+ * Atomically decrements @v by 1.
9247+ */
9248+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9249+{
9250+ asm volatile(LOCK_PREFIX "decl %0\n"
9251 : "=m" (v->counter)
9252 : "m" (v->counter));
9253 }
9254@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9255 {
9256 unsigned char c;
9257
9258- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9259+ asm volatile(LOCK_PREFIX "decl %0\n"
9260+
9261+#ifdef CONFIG_PAX_REFCOUNT
9262+ "jno 0f\n"
9263+ LOCK_PREFIX "incl %0\n"
9264+ "int $4\n0:\n"
9265+ _ASM_EXTABLE(0b, 0b)
9266+#endif
9267+
9268+ "sete %1\n"
9269 : "=m" (v->counter), "=qm" (c)
9270 : "m" (v->counter) : "memory");
9271 return c != 0;
9272@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9273 {
9274 unsigned char c;
9275
9276- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9277+ asm volatile(LOCK_PREFIX "incl %0\n"
9278+
9279+#ifdef CONFIG_PAX_REFCOUNT
9280+ "jno 0f\n"
9281+ LOCK_PREFIX "decl %0\n"
9282+ "int $4\n0:\n"
9283+ _ASM_EXTABLE(0b, 0b)
9284+#endif
9285+
9286+ "sete %1\n"
9287+ : "=m" (v->counter), "=qm" (c)
9288+ : "m" (v->counter) : "memory");
9289+ return c != 0;
9290+}
9291+
9292+/**
9293+ * atomic_inc_and_test_unchecked - increment and test
9294+ * @v: pointer of type atomic_unchecked_t
9295+ *
9296+ * Atomically increments @v by 1
9297+ * and returns true if the result is zero, or false for all
9298+ * other cases.
9299+ */
9300+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9301+{
9302+ unsigned char c;
9303+
9304+ asm volatile(LOCK_PREFIX "incl %0\n"
9305+ "sete %1\n"
9306 : "=m" (v->counter), "=qm" (c)
9307 : "m" (v->counter) : "memory");
9308 return c != 0;
9309@@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9310 {
9311 unsigned char c;
9312
9313- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9314+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9315+
9316+#ifdef CONFIG_PAX_REFCOUNT
9317+ "jno 0f\n"
9318+ LOCK_PREFIX "subl %2,%0\n"
9319+ "int $4\n0:\n"
9320+ _ASM_EXTABLE(0b, 0b)
9321+#endif
9322+
9323+ "sets %1\n"
9324 : "=m" (v->counter), "=qm" (c)
9325 : "ir" (i), "m" (v->counter) : "memory");
9326 return c;
9327@@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9328 static inline int atomic_add_return(int i, atomic_t *v)
9329 {
9330 int __i = i;
9331- asm volatile(LOCK_PREFIX "xaddl %0, %1"
9332+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9333+
9334+#ifdef CONFIG_PAX_REFCOUNT
9335+ "jno 0f\n"
9336+ "movl %0, %1\n"
9337+ "int $4\n0:\n"
9338+ _ASM_EXTABLE(0b, 0b)
9339+#endif
9340+
9341+ : "+r" (i), "+m" (v->counter)
9342+ : : "memory");
9343+ return i + __i;
9344+}
9345+
9346+/**
9347+ * atomic_add_return_unchecked - add and return
9348+ * @i: integer value to add
9349+ * @v: pointer of type atomic_unchecked_t
9350+ *
9351+ * Atomically adds @i to @v and returns @i + @v
9352+ */
9353+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9354+{
9355+ int __i = i;
9356+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9357 : "+r" (i), "+m" (v->counter)
9358 : : "memory");
9359 return i + __i;
9360@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9361 }
9362
9363 #define atomic_inc_return(v) (atomic_add_return(1, v))
9364+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9365+{
9366+ return atomic_add_return_unchecked(1, v);
9367+}
9368 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9369
9370 /* The 64-bit atomic type */
9371@@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9372 }
9373
9374 /**
9375+ * atomic64_read_unchecked - read atomic64 variable
9376+ * @v: pointer of type atomic64_unchecked_t
9377+ *
9378+ * Atomically reads the value of @v.
9379+ * Doesn't imply a read memory barrier.
9380+ */
9381+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9382+{
9383+ return v->counter;
9384+}
9385+
9386+/**
9387 * atomic64_set - set atomic64 variable
9388 * @v: pointer to type atomic64_t
9389 * @i: required value
9390@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9391 }
9392
9393 /**
9394+ * atomic64_set_unchecked - set atomic64 variable
9395+ * @v: pointer to type atomic64_unchecked_t
9396+ * @i: required value
9397+ *
9398+ * Atomically sets the value of @v to @i.
9399+ */
9400+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9401+{
9402+ v->counter = i;
9403+}
9404+
9405+/**
9406 * atomic64_add - add integer to atomic64 variable
9407 * @i: integer value to add
9408 * @v: pointer to type atomic64_t
9409@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9410 */
9411 static inline void atomic64_add(long i, atomic64_t *v)
9412 {
9413+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9414+
9415+#ifdef CONFIG_PAX_REFCOUNT
9416+ "jno 0f\n"
9417+ LOCK_PREFIX "subq %1,%0\n"
9418+ "int $4\n0:\n"
9419+ _ASM_EXTABLE(0b, 0b)
9420+#endif
9421+
9422+ : "=m" (v->counter)
9423+ : "er" (i), "m" (v->counter));
9424+}
9425+
9426+/**
9427+ * atomic64_add_unchecked - add integer to atomic64 variable
9428+ * @i: integer value to add
9429+ * @v: pointer to type atomic64_unchecked_t
9430+ *
9431+ * Atomically adds @i to @v.
9432+ */
9433+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9434+{
9435 asm volatile(LOCK_PREFIX "addq %1,%0"
9436 : "=m" (v->counter)
9437 : "er" (i), "m" (v->counter));
9438@@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9439 */
9440 static inline void atomic64_sub(long i, atomic64_t *v)
9441 {
9442- asm volatile(LOCK_PREFIX "subq %1,%0"
9443+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9444+
9445+#ifdef CONFIG_PAX_REFCOUNT
9446+ "jno 0f\n"
9447+ LOCK_PREFIX "addq %1,%0\n"
9448+ "int $4\n0:\n"
9449+ _ASM_EXTABLE(0b, 0b)
9450+#endif
9451+
9452 : "=m" (v->counter)
9453 : "er" (i), "m" (v->counter));
9454 }
9455@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9456 {
9457 unsigned char c;
9458
9459- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9460+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9461+
9462+#ifdef CONFIG_PAX_REFCOUNT
9463+ "jno 0f\n"
9464+ LOCK_PREFIX "addq %2,%0\n"
9465+ "int $4\n0:\n"
9466+ _ASM_EXTABLE(0b, 0b)
9467+#endif
9468+
9469+ "sete %1\n"
9470 : "=m" (v->counter), "=qm" (c)
9471 : "er" (i), "m" (v->counter) : "memory");
9472 return c;
9473@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9474 */
9475 static inline void atomic64_inc(atomic64_t *v)
9476 {
9477+ asm volatile(LOCK_PREFIX "incq %0\n"
9478+
9479+#ifdef CONFIG_PAX_REFCOUNT
9480+ "jno 0f\n"
9481+ LOCK_PREFIX "decq %0\n"
9482+ "int $4\n0:\n"
9483+ _ASM_EXTABLE(0b, 0b)
9484+#endif
9485+
9486+ : "=m" (v->counter)
9487+ : "m" (v->counter));
9488+}
9489+
9490+/**
9491+ * atomic64_inc_unchecked - increment atomic64 variable
9492+ * @v: pointer to type atomic64_unchecked_t
9493+ *
9494+ * Atomically increments @v by 1.
9495+ */
9496+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9497+{
9498 asm volatile(LOCK_PREFIX "incq %0"
9499 : "=m" (v->counter)
9500 : "m" (v->counter));
9501@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9502 */
9503 static inline void atomic64_dec(atomic64_t *v)
9504 {
9505- asm volatile(LOCK_PREFIX "decq %0"
9506+ asm volatile(LOCK_PREFIX "decq %0\n"
9507+
9508+#ifdef CONFIG_PAX_REFCOUNT
9509+ "jno 0f\n"
9510+ LOCK_PREFIX "incq %0\n"
9511+ "int $4\n0:\n"
9512+ _ASM_EXTABLE(0b, 0b)
9513+#endif
9514+
9515+ : "=m" (v->counter)
9516+ : "m" (v->counter));
9517+}
9518+
9519+/**
9520+ * atomic64_dec_unchecked - decrement atomic64 variable
9521+ * @v: pointer to type atomic64_t
9522+ *
9523+ * Atomically decrements @v by 1.
9524+ */
9525+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9526+{
9527+ asm volatile(LOCK_PREFIX "decq %0\n"
9528 : "=m" (v->counter)
9529 : "m" (v->counter));
9530 }
9531@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9532 {
9533 unsigned char c;
9534
9535- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9536+ asm volatile(LOCK_PREFIX "decq %0\n"
9537+
9538+#ifdef CONFIG_PAX_REFCOUNT
9539+ "jno 0f\n"
9540+ LOCK_PREFIX "incq %0\n"
9541+ "int $4\n0:\n"
9542+ _ASM_EXTABLE(0b, 0b)
9543+#endif
9544+
9545+ "sete %1\n"
9546 : "=m" (v->counter), "=qm" (c)
9547 : "m" (v->counter) : "memory");
9548 return c != 0;
9549@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9550 {
9551 unsigned char c;
9552
9553- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9554+ asm volatile(LOCK_PREFIX "incq %0\n"
9555+
9556+#ifdef CONFIG_PAX_REFCOUNT
9557+ "jno 0f\n"
9558+ LOCK_PREFIX "decq %0\n"
9559+ "int $4\n0:\n"
9560+ _ASM_EXTABLE(0b, 0b)
9561+#endif
9562+
9563+ "sete %1\n"
9564 : "=m" (v->counter), "=qm" (c)
9565 : "m" (v->counter) : "memory");
9566 return c != 0;
9567@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9568 {
9569 unsigned char c;
9570
9571- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9572+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9573+
9574+#ifdef CONFIG_PAX_REFCOUNT
9575+ "jno 0f\n"
9576+ LOCK_PREFIX "subq %2,%0\n"
9577+ "int $4\n0:\n"
9578+ _ASM_EXTABLE(0b, 0b)
9579+#endif
9580+
9581+ "sets %1\n"
9582 : "=m" (v->counter), "=qm" (c)
9583 : "er" (i), "m" (v->counter) : "memory");
9584 return c;
9585@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9586 static inline long atomic64_add_return(long i, atomic64_t *v)
9587 {
9588 long __i = i;
9589- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9590+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9591+
9592+#ifdef CONFIG_PAX_REFCOUNT
9593+ "jno 0f\n"
9594+ "movq %0, %1\n"
9595+ "int $4\n0:\n"
9596+ _ASM_EXTABLE(0b, 0b)
9597+#endif
9598+
9599+ : "+r" (i), "+m" (v->counter)
9600+ : : "memory");
9601+ return i + __i;
9602+}
9603+
9604+/**
9605+ * atomic64_add_return_unchecked - add and return
9606+ * @i: integer value to add
9607+ * @v: pointer to type atomic64_unchecked_t
9608+ *
9609+ * Atomically adds @i to @v and returns @i + @v
9610+ */
9611+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9612+{
9613+ long __i = i;
9614+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
9615 : "+r" (i), "+m" (v->counter)
9616 : : "memory");
9617 return i + __i;
9618@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9619 }
9620
9621 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9622+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9623+{
9624+ return atomic64_add_return_unchecked(1, v);
9625+}
9626 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9627
9628 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9629@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9630 return cmpxchg(&v->counter, old, new);
9631 }
9632
9633+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9634+{
9635+ return cmpxchg(&v->counter, old, new);
9636+}
9637+
9638 static inline long atomic64_xchg(atomic64_t *v, long new)
9639 {
9640 return xchg(&v->counter, new);
9641 }
9642
9643+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9644+{
9645+ return xchg(&v->counter, new);
9646+}
9647+
9648 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9649 {
9650 return cmpxchg(&v->counter, old, new);
9651 }
9652
9653+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9654+{
9655+ return cmpxchg(&v->counter, old, new);
9656+}
9657+
9658 static inline long atomic_xchg(atomic_t *v, int new)
9659 {
9660 return xchg(&v->counter, new);
9661 }
9662
9663+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9664+{
9665+ return xchg(&v->counter, new);
9666+}
9667+
9668 /**
9669 * atomic_add_unless - add unless the number is a given value
9670 * @v: pointer of type atomic_t
9671@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9672 */
9673 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9674 {
9675- int c, old;
9676+ int c, old, new;
9677 c = atomic_read(v);
9678 for (;;) {
9679- if (unlikely(c == (u)))
9680+ if (unlikely(c == u))
9681 break;
9682- old = atomic_cmpxchg((v), c, c + (a));
9683+
9684+ asm volatile("addl %2,%0\n"
9685+
9686+#ifdef CONFIG_PAX_REFCOUNT
9687+ "jno 0f\n"
9688+ "subl %2,%0\n"
9689+ "int $4\n0:\n"
9690+ _ASM_EXTABLE(0b, 0b)
9691+#endif
9692+
9693+ : "=r" (new)
9694+ : "0" (c), "ir" (a));
9695+
9696+ old = atomic_cmpxchg(v, c, new);
9697 if (likely(old == c))
9698 break;
9699 c = old;
9700 }
9701- return c != (u);
9702+ return c != u;
9703 }
9704
9705 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9706@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9707 */
9708 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9709 {
9710- long c, old;
9711+ long c, old, new;
9712 c = atomic64_read(v);
9713 for (;;) {
9714- if (unlikely(c == (u)))
9715+ if (unlikely(c == u))
9716 break;
9717- old = atomic64_cmpxchg((v), c, c + (a));
9718+
9719+ asm volatile("addq %2,%0\n"
9720+
9721+#ifdef CONFIG_PAX_REFCOUNT
9722+ "jno 0f\n"
9723+ "subq %2,%0\n"
9724+ "int $4\n0:\n"
9725+ _ASM_EXTABLE(0b, 0b)
9726+#endif
9727+
9728+ : "=r" (new)
9729+ : "0" (c), "er" (a));
9730+
9731+ old = atomic64_cmpxchg(v, c, new);
9732 if (likely(old == c))
9733 break;
9734 c = old;
9735 }
9736- return c != (u);
9737+ return c != u;
9738 }
9739
9740 /**
9741diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9742index 02b47a6..d5c4b15 100644
9743--- a/arch/x86/include/asm/bitops.h
9744+++ b/arch/x86/include/asm/bitops.h
9745@@ -38,7 +38,7 @@
9746 * a mask operation on a byte.
9747 */
9748 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9749-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9750+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9751 #define CONST_MASK(nr) (1 << ((nr) & 7))
9752
9753 /**
9754diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9755index 7a10659..8bbf355 100644
9756--- a/arch/x86/include/asm/boot.h
9757+++ b/arch/x86/include/asm/boot.h
9758@@ -11,10 +11,15 @@
9759 #include <asm/pgtable_types.h>
9760
9761 /* Physical address where kernel should be loaded. */
9762-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9763+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9764 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9765 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9766
9767+#ifndef __ASSEMBLY__
9768+extern unsigned char __LOAD_PHYSICAL_ADDR[];
9769+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9770+#endif
9771+
9772 /* Minimum kernel alignment, as a power of two */
9773 #ifdef CONFIG_X86_64
9774 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9775diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9776index 549860d..7d45f68 100644
9777--- a/arch/x86/include/asm/cache.h
9778+++ b/arch/x86/include/asm/cache.h
9779@@ -5,9 +5,10 @@
9780
9781 /* L1 cache line size */
9782 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9783-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9784+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9785
9786 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9787+#define __read_only __attribute__((__section__(".data.read_only")))
9788
9789 #ifdef CONFIG_X86_VSMP
9790 /* vSMP Internode cacheline shift */
9791diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9792index b54f6af..5b376a6 100644
9793--- a/arch/x86/include/asm/cacheflush.h
9794+++ b/arch/x86/include/asm/cacheflush.h
9795@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9796 static inline unsigned long get_page_memtype(struct page *pg)
9797 {
9798 if (!PageUncached(pg) && !PageWC(pg))
9799- return -1;
9800+ return ~0UL;
9801 else if (!PageUncached(pg) && PageWC(pg))
9802 return _PAGE_CACHE_WC;
9803 else if (PageUncached(pg) && !PageWC(pg))
9804@@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9805 SetPageWC(pg);
9806 break;
9807 default:
9808- case -1:
9809+ case ~0UL:
9810 ClearPageUncached(pg);
9811 ClearPageWC(pg);
9812 break;
9813diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9814index 0e63c9a..ab8d972 100644
9815--- a/arch/x86/include/asm/calling.h
9816+++ b/arch/x86/include/asm/calling.h
9817@@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9818 * for assembly code:
9819 */
9820
9821-#define R15 0
9822-#define R14 8
9823-#define R13 16
9824-#define R12 24
9825-#define RBP 32
9826-#define RBX 40
9827+#define R15 (0)
9828+#define R14 (8)
9829+#define R13 (16)
9830+#define R12 (24)
9831+#define RBP (32)
9832+#define RBX (40)
9833
9834 /* arguments: interrupts/non tracing syscalls only save up to here: */
9835-#define R11 48
9836-#define R10 56
9837-#define R9 64
9838-#define R8 72
9839-#define RAX 80
9840-#define RCX 88
9841-#define RDX 96
9842-#define RSI 104
9843-#define RDI 112
9844-#define ORIG_RAX 120 /* + error_code */
9845+#define R11 (48)
9846+#define R10 (56)
9847+#define R9 (64)
9848+#define R8 (72)
9849+#define RAX (80)
9850+#define RCX (88)
9851+#define RDX (96)
9852+#define RSI (104)
9853+#define RDI (112)
9854+#define ORIG_RAX (120) /* + error_code */
9855 /* end of arguments */
9856
9857 /* cpu exception frame or undefined in case of fast syscall: */
9858-#define RIP 128
9859-#define CS 136
9860-#define EFLAGS 144
9861-#define RSP 152
9862-#define SS 160
9863+#define RIP (128)
9864+#define CS (136)
9865+#define EFLAGS (144)
9866+#define RSP (152)
9867+#define SS (160)
9868
9869 #define ARGOFFSET R11
9870 #define SWFRAME ORIG_RAX
9871diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9872index 46fc474..b02b0f9 100644
9873--- a/arch/x86/include/asm/checksum_32.h
9874+++ b/arch/x86/include/asm/checksum_32.h
9875@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9876 int len, __wsum sum,
9877 int *src_err_ptr, int *dst_err_ptr);
9878
9879+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9880+ int len, __wsum sum,
9881+ int *src_err_ptr, int *dst_err_ptr);
9882+
9883+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9884+ int len, __wsum sum,
9885+ int *src_err_ptr, int *dst_err_ptr);
9886+
9887 /*
9888 * Note: when you get a NULL pointer exception here this means someone
9889 * passed in an incorrect kernel address to one of these functions.
9890@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9891 int *err_ptr)
9892 {
9893 might_sleep();
9894- return csum_partial_copy_generic((__force void *)src, dst,
9895+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
9896 len, sum, err_ptr, NULL);
9897 }
9898
9899@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9900 {
9901 might_sleep();
9902 if (access_ok(VERIFY_WRITE, dst, len))
9903- return csum_partial_copy_generic(src, (__force void *)dst,
9904+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9905 len, sum, NULL, err_ptr);
9906
9907 if (len)
9908diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9909index 617bd56..7b047a1 100644
9910--- a/arch/x86/include/asm/desc.h
9911+++ b/arch/x86/include/asm/desc.h
9912@@ -4,6 +4,7 @@
9913 #include <asm/desc_defs.h>
9914 #include <asm/ldt.h>
9915 #include <asm/mmu.h>
9916+#include <asm/pgtable.h>
9917 #include <linux/smp.h>
9918
9919 static inline void fill_ldt(struct desc_struct *desc,
9920@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9921 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9922 desc->type = (info->read_exec_only ^ 1) << 1;
9923 desc->type |= info->contents << 2;
9924+ desc->type |= info->seg_not_present ^ 1;
9925 desc->s = 1;
9926 desc->dpl = 0x3;
9927 desc->p = info->seg_not_present ^ 1;
9928@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9929 }
9930
9931 extern struct desc_ptr idt_descr;
9932-extern gate_desc idt_table[];
9933-
9934-struct gdt_page {
9935- struct desc_struct gdt[GDT_ENTRIES];
9936-} __attribute__((aligned(PAGE_SIZE)));
9937-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9938+extern gate_desc idt_table[256];
9939
9940+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9941 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9942 {
9943- return per_cpu(gdt_page, cpu).gdt;
9944+ return cpu_gdt_table[cpu];
9945 }
9946
9947 #ifdef CONFIG_X86_64
9948@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9949 unsigned long base, unsigned dpl, unsigned flags,
9950 unsigned short seg)
9951 {
9952- gate->a = (seg << 16) | (base & 0xffff);
9953- gate->b = (base & 0xffff0000) |
9954- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9955+ gate->gate.offset_low = base;
9956+ gate->gate.seg = seg;
9957+ gate->gate.reserved = 0;
9958+ gate->gate.type = type;
9959+ gate->gate.s = 0;
9960+ gate->gate.dpl = dpl;
9961+ gate->gate.p = 1;
9962+ gate->gate.offset_high = base >> 16;
9963 }
9964
9965 #endif
9966@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9967 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9968 const gate_desc *gate)
9969 {
9970+ pax_open_kernel();
9971 memcpy(&idt[entry], gate, sizeof(*gate));
9972+ pax_close_kernel();
9973 }
9974
9975 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9976 const void *desc)
9977 {
9978+ pax_open_kernel();
9979 memcpy(&ldt[entry], desc, 8);
9980+ pax_close_kernel();
9981 }
9982
9983 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9984@@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9985 size = sizeof(struct desc_struct);
9986 break;
9987 }
9988+
9989+ pax_open_kernel();
9990 memcpy(&gdt[entry], desc, size);
9991+ pax_close_kernel();
9992 }
9993
9994 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9995@@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9996
9997 static inline void native_load_tr_desc(void)
9998 {
9999+ pax_open_kernel();
10000 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10001+ pax_close_kernel();
10002 }
10003
10004 static inline void native_load_gdt(const struct desc_ptr *dtr)
10005@@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10006 unsigned int i;
10007 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10008
10009+ pax_open_kernel();
10010 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10011 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10012+ pax_close_kernel();
10013 }
10014
10015 #define _LDT_empty(info) \
10016@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10017 desc->limit = (limit >> 16) & 0xf;
10018 }
10019
10020-static inline void _set_gate(int gate, unsigned type, void *addr,
10021+static inline void _set_gate(int gate, unsigned type, const void *addr,
10022 unsigned dpl, unsigned ist, unsigned seg)
10023 {
10024 gate_desc s;
10025@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10026 * Pentium F0 0F bugfix can have resulted in the mapped
10027 * IDT being write-protected.
10028 */
10029-static inline void set_intr_gate(unsigned int n, void *addr)
10030+static inline void set_intr_gate(unsigned int n, const void *addr)
10031 {
10032 BUG_ON((unsigned)n > 0xFF);
10033 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10034@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10035 /*
10036 * This routine sets up an interrupt gate at directory privilege level 3.
10037 */
10038-static inline void set_system_intr_gate(unsigned int n, void *addr)
10039+static inline void set_system_intr_gate(unsigned int n, const void *addr)
10040 {
10041 BUG_ON((unsigned)n > 0xFF);
10042 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10043 }
10044
10045-static inline void set_system_trap_gate(unsigned int n, void *addr)
10046+static inline void set_system_trap_gate(unsigned int n, const void *addr)
10047 {
10048 BUG_ON((unsigned)n > 0xFF);
10049 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10050 }
10051
10052-static inline void set_trap_gate(unsigned int n, void *addr)
10053+static inline void set_trap_gate(unsigned int n, const void *addr)
10054 {
10055 BUG_ON((unsigned)n > 0xFF);
10056 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10057@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10058 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10059 {
10060 BUG_ON((unsigned)n > 0xFF);
10061- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10062+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10063 }
10064
10065-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10066+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10067 {
10068 BUG_ON((unsigned)n > 0xFF);
10069 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10070 }
10071
10072-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10073+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10074 {
10075 BUG_ON((unsigned)n > 0xFF);
10076 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10077 }
10078
10079+#ifdef CONFIG_X86_32
10080+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10081+{
10082+ struct desc_struct d;
10083+
10084+ if (likely(limit))
10085+ limit = (limit - 1UL) >> PAGE_SHIFT;
10086+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
10087+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10088+}
10089+#endif
10090+
10091 #endif /* _ASM_X86_DESC_H */
10092diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10093index 9d66848..6b4a691 100644
10094--- a/arch/x86/include/asm/desc_defs.h
10095+++ b/arch/x86/include/asm/desc_defs.h
10096@@ -31,6 +31,12 @@ struct desc_struct {
10097 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10098 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10099 };
10100+ struct {
10101+ u16 offset_low;
10102+ u16 seg;
10103+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10104+ unsigned offset_high: 16;
10105+ } gate;
10106 };
10107 } __attribute__((packed));
10108
10109diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
10110index cee34e9..a7c3fa2 100644
10111--- a/arch/x86/include/asm/device.h
10112+++ b/arch/x86/include/asm/device.h
10113@@ -6,7 +6,7 @@ struct dev_archdata {
10114 void *acpi_handle;
10115 #endif
10116 #ifdef CONFIG_X86_64
10117-struct dma_map_ops *dma_ops;
10118+ const struct dma_map_ops *dma_ops;
10119 #endif
10120 #ifdef CONFIG_DMAR
10121 void *iommu; /* hook for IOMMU specific extension */
10122diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
10123index 6a25d5d..786b202 100644
10124--- a/arch/x86/include/asm/dma-mapping.h
10125+++ b/arch/x86/include/asm/dma-mapping.h
10126@@ -25,9 +25,9 @@ extern int iommu_merge;
10127 extern struct device x86_dma_fallback_dev;
10128 extern int panic_on_overflow;
10129
10130-extern struct dma_map_ops *dma_ops;
10131+extern const struct dma_map_ops *dma_ops;
10132
10133-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10134+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
10135 {
10136 #ifdef CONFIG_X86_32
10137 return dma_ops;
10138@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10139 /* Make sure we keep the same behaviour */
10140 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
10141 {
10142- struct dma_map_ops *ops = get_dma_ops(dev);
10143+ const struct dma_map_ops *ops = get_dma_ops(dev);
10144 if (ops->mapping_error)
10145 return ops->mapping_error(dev, dma_addr);
10146
10147@@ -122,7 +122,7 @@ static inline void *
10148 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10149 gfp_t gfp)
10150 {
10151- struct dma_map_ops *ops = get_dma_ops(dev);
10152+ const struct dma_map_ops *ops = get_dma_ops(dev);
10153 void *memory;
10154
10155 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
10156@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10157 static inline void dma_free_coherent(struct device *dev, size_t size,
10158 void *vaddr, dma_addr_t bus)
10159 {
10160- struct dma_map_ops *ops = get_dma_ops(dev);
10161+ const struct dma_map_ops *ops = get_dma_ops(dev);
10162
10163 WARN_ON(irqs_disabled()); /* for portability */
10164
10165diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10166index 40b4e61..40d8133 100644
10167--- a/arch/x86/include/asm/e820.h
10168+++ b/arch/x86/include/asm/e820.h
10169@@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
10170 #define ISA_END_ADDRESS 0x100000
10171 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
10172
10173-#define BIOS_BEGIN 0x000a0000
10174+#define BIOS_BEGIN 0x000c0000
10175 #define BIOS_END 0x00100000
10176
10177 #ifdef __KERNEL__
10178diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10179index 8ac9d9a..0a6c96e 100644
10180--- a/arch/x86/include/asm/elf.h
10181+++ b/arch/x86/include/asm/elf.h
10182@@ -257,7 +257,25 @@ extern int force_personality32;
10183 the loader. We need to make sure that it is out of the way of the program
10184 that it will "exec", and that there is sufficient room for the brk. */
10185
10186+#ifdef CONFIG_PAX_SEGMEXEC
10187+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10188+#else
10189 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10190+#endif
10191+
10192+#ifdef CONFIG_PAX_ASLR
10193+#ifdef CONFIG_X86_32
10194+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10195+
10196+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10197+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10198+#else
10199+#define PAX_ELF_ET_DYN_BASE 0x400000UL
10200+
10201+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10202+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10203+#endif
10204+#endif
10205
10206 /* This yields a mask that user programs can use to figure out what
10207 instruction set this CPU supports. This could be done in user space,
10208@@ -310,9 +328,7 @@ do { \
10209
10210 #define ARCH_DLINFO \
10211 do { \
10212- if (vdso_enabled) \
10213- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10214- (unsigned long)current->mm->context.vdso); \
10215+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10216 } while (0)
10217
10218 #define AT_SYSINFO 32
10219@@ -323,7 +339,7 @@ do { \
10220
10221 #endif /* !CONFIG_X86_32 */
10222
10223-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10224+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10225
10226 #define VDSO_ENTRY \
10227 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10228@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10229 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10230 #define compat_arch_setup_additional_pages syscall32_setup_pages
10231
10232-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10233-#define arch_randomize_brk arch_randomize_brk
10234-
10235 #endif /* _ASM_X86_ELF_H */
10236diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10237index cc70c1c..d96d011 100644
10238--- a/arch/x86/include/asm/emergency-restart.h
10239+++ b/arch/x86/include/asm/emergency-restart.h
10240@@ -15,6 +15,6 @@ enum reboot_type {
10241
10242 extern enum reboot_type reboot_type;
10243
10244-extern void machine_emergency_restart(void);
10245+extern void machine_emergency_restart(void) __noreturn;
10246
10247 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10248diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10249index 1f11ce4..7caabd1 100644
10250--- a/arch/x86/include/asm/futex.h
10251+++ b/arch/x86/include/asm/futex.h
10252@@ -12,16 +12,18 @@
10253 #include <asm/system.h>
10254
10255 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10256+ typecheck(u32 __user *, uaddr); \
10257 asm volatile("1:\t" insn "\n" \
10258 "2:\t.section .fixup,\"ax\"\n" \
10259 "3:\tmov\t%3, %1\n" \
10260 "\tjmp\t2b\n" \
10261 "\t.previous\n" \
10262 _ASM_EXTABLE(1b, 3b) \
10263- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10264+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10265 : "i" (-EFAULT), "0" (oparg), "1" (0))
10266
10267 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10268+ typecheck(u32 __user *, uaddr); \
10269 asm volatile("1:\tmovl %2, %0\n" \
10270 "\tmovl\t%0, %3\n" \
10271 "\t" insn "\n" \
10272@@ -34,10 +36,10 @@
10273 _ASM_EXTABLE(1b, 4b) \
10274 _ASM_EXTABLE(2b, 4b) \
10275 : "=&a" (oldval), "=&r" (ret), \
10276- "+m" (*uaddr), "=&r" (tem) \
10277+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10278 : "r" (oparg), "i" (-EFAULT), "1" (0))
10279
10280-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10281+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10282 {
10283 int op = (encoded_op >> 28) & 7;
10284 int cmp = (encoded_op >> 24) & 15;
10285@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10286
10287 switch (op) {
10288 case FUTEX_OP_SET:
10289- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10290+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10291 break;
10292 case FUTEX_OP_ADD:
10293- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10294+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10295 uaddr, oparg);
10296 break;
10297 case FUTEX_OP_OR:
10298@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10299 return ret;
10300 }
10301
10302-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10303+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10304 int newval)
10305 {
10306
10307@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10308 return -ENOSYS;
10309 #endif
10310
10311- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10312+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10313 return -EFAULT;
10314
10315- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10316+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10317 "2:\t.section .fixup, \"ax\"\n"
10318 "3:\tmov %2, %0\n"
10319 "\tjmp 2b\n"
10320 "\t.previous\n"
10321 _ASM_EXTABLE(1b, 3b)
10322- : "=a" (oldval), "+m" (*uaddr)
10323+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10324 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10325 : "memory"
10326 );
10327diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10328index ba180d9..3bad351 100644
10329--- a/arch/x86/include/asm/hw_irq.h
10330+++ b/arch/x86/include/asm/hw_irq.h
10331@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10332 extern void enable_IO_APIC(void);
10333
10334 /* Statistics */
10335-extern atomic_t irq_err_count;
10336-extern atomic_t irq_mis_count;
10337+extern atomic_unchecked_t irq_err_count;
10338+extern atomic_unchecked_t irq_mis_count;
10339
10340 /* EISA */
10341 extern void eisa_set_level_irq(unsigned int irq);
10342diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10343index 0b20bbb..4cb1396 100644
10344--- a/arch/x86/include/asm/i387.h
10345+++ b/arch/x86/include/asm/i387.h
10346@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10347 {
10348 int err;
10349
10350+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10351+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10352+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10353+#endif
10354+
10355 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10356 "2:\n"
10357 ".section .fixup,\"ax\"\n"
10358@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10359 {
10360 int err;
10361
10362+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10363+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10364+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10365+#endif
10366+
10367 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10368 "2:\n"
10369 ".section .fixup,\"ax\"\n"
10370@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10371 }
10372
10373 /* We need a safe address that is cheap to find and that is already
10374- in L1 during context switch. The best choices are unfortunately
10375- different for UP and SMP */
10376-#ifdef CONFIG_SMP
10377-#define safe_address (__per_cpu_offset[0])
10378-#else
10379-#define safe_address (kstat_cpu(0).cpustat.user)
10380-#endif
10381+ in L1 during context switch. */
10382+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10383
10384 /*
10385 * These must be called with preempt disabled
10386@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10387 struct thread_info *me = current_thread_info();
10388 preempt_disable();
10389 if (me->status & TS_USEDFPU)
10390- __save_init_fpu(me->task);
10391+ __save_init_fpu(current);
10392 else
10393 clts();
10394 }
10395diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10396index a299900..15c5410 100644
10397--- a/arch/x86/include/asm/io_32.h
10398+++ b/arch/x86/include/asm/io_32.h
10399@@ -3,6 +3,7 @@
10400
10401 #include <linux/string.h>
10402 #include <linux/compiler.h>
10403+#include <asm/processor.h>
10404
10405 /*
10406 * This file contains the definitions for the x86 IO instructions
10407@@ -42,6 +43,17 @@
10408
10409 #ifdef __KERNEL__
10410
10411+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10412+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10413+{
10414+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10415+}
10416+
10417+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10418+{
10419+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10420+}
10421+
10422 #include <asm-generic/iomap.h>
10423
10424 #include <linux/vmalloc.h>
10425diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10426index 2440678..c158b88 100644
10427--- a/arch/x86/include/asm/io_64.h
10428+++ b/arch/x86/include/asm/io_64.h
10429@@ -140,6 +140,17 @@ __OUTS(l)
10430
10431 #include <linux/vmalloc.h>
10432
10433+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10434+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10435+{
10436+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10437+}
10438+
10439+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10440+{
10441+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10442+}
10443+
10444 #include <asm-generic/iomap.h>
10445
10446 void __memcpy_fromio(void *, unsigned long, unsigned);
10447diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10448index fd6d21b..8b13915 100644
10449--- a/arch/x86/include/asm/iommu.h
10450+++ b/arch/x86/include/asm/iommu.h
10451@@ -3,7 +3,7 @@
10452
10453 extern void pci_iommu_shutdown(void);
10454 extern void no_iommu_init(void);
10455-extern struct dma_map_ops nommu_dma_ops;
10456+extern const struct dma_map_ops nommu_dma_ops;
10457 extern int force_iommu, no_iommu;
10458 extern int iommu_detected;
10459 extern int iommu_pass_through;
10460diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10461index 9e2b952..557206e 100644
10462--- a/arch/x86/include/asm/irqflags.h
10463+++ b/arch/x86/include/asm/irqflags.h
10464@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10465 sti; \
10466 sysexit
10467
10468+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10469+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10470+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10471+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10472+
10473 #else
10474 #define INTERRUPT_RETURN iret
10475 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10476diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10477index 4fe681d..bb6d40c 100644
10478--- a/arch/x86/include/asm/kprobes.h
10479+++ b/arch/x86/include/asm/kprobes.h
10480@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10481 #define BREAKPOINT_INSTRUCTION 0xcc
10482 #define RELATIVEJUMP_INSTRUCTION 0xe9
10483 #define MAX_INSN_SIZE 16
10484-#define MAX_STACK_SIZE 64
10485-#define MIN_STACK_SIZE(ADDR) \
10486- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10487- THREAD_SIZE - (unsigned long)(ADDR))) \
10488- ? (MAX_STACK_SIZE) \
10489- : (((unsigned long)current_thread_info()) + \
10490- THREAD_SIZE - (unsigned long)(ADDR)))
10491+#define MAX_STACK_SIZE 64UL
10492+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10493
10494 #define flush_insn_slot(p) do { } while (0)
10495
10496diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10497index 08bc2ff..2e88d1f 100644
10498--- a/arch/x86/include/asm/kvm_host.h
10499+++ b/arch/x86/include/asm/kvm_host.h
10500@@ -534,9 +534,9 @@ struct kvm_x86_ops {
10501 bool (*gb_page_enable)(void);
10502
10503 const struct trace_print_flags *exit_reasons_str;
10504-};
10505+} __do_const;
10506
10507-extern struct kvm_x86_ops *kvm_x86_ops;
10508+extern const struct kvm_x86_ops *kvm_x86_ops;
10509
10510 int kvm_mmu_module_init(void);
10511 void kvm_mmu_module_exit(void);
10512diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10513index 47b9b6f..815aaa1 100644
10514--- a/arch/x86/include/asm/local.h
10515+++ b/arch/x86/include/asm/local.h
10516@@ -18,26 +18,58 @@ typedef struct {
10517
10518 static inline void local_inc(local_t *l)
10519 {
10520- asm volatile(_ASM_INC "%0"
10521+ asm volatile(_ASM_INC "%0\n"
10522+
10523+#ifdef CONFIG_PAX_REFCOUNT
10524+ "jno 0f\n"
10525+ _ASM_DEC "%0\n"
10526+ "int $4\n0:\n"
10527+ _ASM_EXTABLE(0b, 0b)
10528+#endif
10529+
10530 : "+m" (l->a.counter));
10531 }
10532
10533 static inline void local_dec(local_t *l)
10534 {
10535- asm volatile(_ASM_DEC "%0"
10536+ asm volatile(_ASM_DEC "%0\n"
10537+
10538+#ifdef CONFIG_PAX_REFCOUNT
10539+ "jno 0f\n"
10540+ _ASM_INC "%0\n"
10541+ "int $4\n0:\n"
10542+ _ASM_EXTABLE(0b, 0b)
10543+#endif
10544+
10545 : "+m" (l->a.counter));
10546 }
10547
10548 static inline void local_add(long i, local_t *l)
10549 {
10550- asm volatile(_ASM_ADD "%1,%0"
10551+ asm volatile(_ASM_ADD "%1,%0\n"
10552+
10553+#ifdef CONFIG_PAX_REFCOUNT
10554+ "jno 0f\n"
10555+ _ASM_SUB "%1,%0\n"
10556+ "int $4\n0:\n"
10557+ _ASM_EXTABLE(0b, 0b)
10558+#endif
10559+
10560 : "+m" (l->a.counter)
10561 : "ir" (i));
10562 }
10563
10564 static inline void local_sub(long i, local_t *l)
10565 {
10566- asm volatile(_ASM_SUB "%1,%0"
10567+ asm volatile(_ASM_SUB "%1,%0\n"
10568+
10569+#ifdef CONFIG_PAX_REFCOUNT
10570+ "jno 0f\n"
10571+ _ASM_ADD "%1,%0\n"
10572+ "int $4\n0:\n"
10573+ _ASM_EXTABLE(0b, 0b)
10574+#endif
10575+
10576 : "+m" (l->a.counter)
10577 : "ir" (i));
10578 }
10579@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10580 {
10581 unsigned char c;
10582
10583- asm volatile(_ASM_SUB "%2,%0; sete %1"
10584+ asm volatile(_ASM_SUB "%2,%0\n"
10585+
10586+#ifdef CONFIG_PAX_REFCOUNT
10587+ "jno 0f\n"
10588+ _ASM_ADD "%2,%0\n"
10589+ "int $4\n0:\n"
10590+ _ASM_EXTABLE(0b, 0b)
10591+#endif
10592+
10593+ "sete %1\n"
10594 : "+m" (l->a.counter), "=qm" (c)
10595 : "ir" (i) : "memory");
10596 return c;
10597@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10598 {
10599 unsigned char c;
10600
10601- asm volatile(_ASM_DEC "%0; sete %1"
10602+ asm volatile(_ASM_DEC "%0\n"
10603+
10604+#ifdef CONFIG_PAX_REFCOUNT
10605+ "jno 0f\n"
10606+ _ASM_INC "%0\n"
10607+ "int $4\n0:\n"
10608+ _ASM_EXTABLE(0b, 0b)
10609+#endif
10610+
10611+ "sete %1\n"
10612 : "+m" (l->a.counter), "=qm" (c)
10613 : : "memory");
10614 return c != 0;
10615@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10616 {
10617 unsigned char c;
10618
10619- asm volatile(_ASM_INC "%0; sete %1"
10620+ asm volatile(_ASM_INC "%0\n"
10621+
10622+#ifdef CONFIG_PAX_REFCOUNT
10623+ "jno 0f\n"
10624+ _ASM_DEC "%0\n"
10625+ "int $4\n0:\n"
10626+ _ASM_EXTABLE(0b, 0b)
10627+#endif
10628+
10629+ "sete %1\n"
10630 : "+m" (l->a.counter), "=qm" (c)
10631 : : "memory");
10632 return c != 0;
10633@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10634 {
10635 unsigned char c;
10636
10637- asm volatile(_ASM_ADD "%2,%0; sets %1"
10638+ asm volatile(_ASM_ADD "%2,%0\n"
10639+
10640+#ifdef CONFIG_PAX_REFCOUNT
10641+ "jno 0f\n"
10642+ _ASM_SUB "%2,%0\n"
10643+ "int $4\n0:\n"
10644+ _ASM_EXTABLE(0b, 0b)
10645+#endif
10646+
10647+ "sets %1\n"
10648 : "+m" (l->a.counter), "=qm" (c)
10649 : "ir" (i) : "memory");
10650 return c;
10651@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10652 #endif
10653 /* Modern 486+ processor */
10654 __i = i;
10655- asm volatile(_ASM_XADD "%0, %1;"
10656+ asm volatile(_ASM_XADD "%0, %1\n"
10657+
10658+#ifdef CONFIG_PAX_REFCOUNT
10659+ "jno 0f\n"
10660+ _ASM_MOV "%0,%1\n"
10661+ "int $4\n0:\n"
10662+ _ASM_EXTABLE(0b, 0b)
10663+#endif
10664+
10665 : "+r" (i), "+m" (l->a.counter)
10666 : : "memory");
10667 return i + __i;
10668diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10669index ef51b50..514ba37 100644
10670--- a/arch/x86/include/asm/microcode.h
10671+++ b/arch/x86/include/asm/microcode.h
10672@@ -12,13 +12,13 @@ struct device;
10673 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10674
10675 struct microcode_ops {
10676- enum ucode_state (*request_microcode_user) (int cpu,
10677+ enum ucode_state (* const request_microcode_user) (int cpu,
10678 const void __user *buf, size_t size);
10679
10680- enum ucode_state (*request_microcode_fw) (int cpu,
10681+ enum ucode_state (* const request_microcode_fw) (int cpu,
10682 struct device *device);
10683
10684- void (*microcode_fini_cpu) (int cpu);
10685+ void (* const microcode_fini_cpu) (int cpu);
10686
10687 /*
10688 * The generic 'microcode_core' part guarantees that
10689@@ -38,18 +38,18 @@ struct ucode_cpu_info {
10690 extern struct ucode_cpu_info ucode_cpu_info[];
10691
10692 #ifdef CONFIG_MICROCODE_INTEL
10693-extern struct microcode_ops * __init init_intel_microcode(void);
10694+extern const struct microcode_ops * __init init_intel_microcode(void);
10695 #else
10696-static inline struct microcode_ops * __init init_intel_microcode(void)
10697+static inline const struct microcode_ops * __init init_intel_microcode(void)
10698 {
10699 return NULL;
10700 }
10701 #endif /* CONFIG_MICROCODE_INTEL */
10702
10703 #ifdef CONFIG_MICROCODE_AMD
10704-extern struct microcode_ops * __init init_amd_microcode(void);
10705+extern const struct microcode_ops * __init init_amd_microcode(void);
10706 #else
10707-static inline struct microcode_ops * __init init_amd_microcode(void)
10708+static inline const struct microcode_ops * __init init_amd_microcode(void)
10709 {
10710 return NULL;
10711 }
10712diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10713index 593e51d..fa69c9a 100644
10714--- a/arch/x86/include/asm/mman.h
10715+++ b/arch/x86/include/asm/mman.h
10716@@ -5,4 +5,14 @@
10717
10718 #include <asm-generic/mman.h>
10719
10720+#ifdef __KERNEL__
10721+#ifndef __ASSEMBLY__
10722+#ifdef CONFIG_X86_32
10723+#define arch_mmap_check i386_mmap_check
10724+int i386_mmap_check(unsigned long addr, unsigned long len,
10725+ unsigned long flags);
10726+#endif
10727+#endif
10728+#endif
10729+
10730 #endif /* _ASM_X86_MMAN_H */
10731diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10732index 80a1dee..239c67d 100644
10733--- a/arch/x86/include/asm/mmu.h
10734+++ b/arch/x86/include/asm/mmu.h
10735@@ -9,10 +9,23 @@
10736 * we put the segment information here.
10737 */
10738 typedef struct {
10739- void *ldt;
10740+ struct desc_struct *ldt;
10741 int size;
10742 struct mutex lock;
10743- void *vdso;
10744+ unsigned long vdso;
10745+
10746+#ifdef CONFIG_X86_32
10747+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10748+ unsigned long user_cs_base;
10749+ unsigned long user_cs_limit;
10750+
10751+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10752+ cpumask_t cpu_user_cs_mask;
10753+#endif
10754+
10755+#endif
10756+#endif
10757+
10758 } mm_context_t;
10759
10760 #ifdef CONFIG_SMP
10761diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10762index 8b5393e..8143173 100644
10763--- a/arch/x86/include/asm/mmu_context.h
10764+++ b/arch/x86/include/asm/mmu_context.h
10765@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10766
10767 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10768 {
10769+
10770+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10771+ unsigned int i;
10772+ pgd_t *pgd;
10773+
10774+ pax_open_kernel();
10775+ pgd = get_cpu_pgd(smp_processor_id());
10776+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10777+ set_pgd_batched(pgd+i, native_make_pgd(0));
10778+ pax_close_kernel();
10779+#endif
10780+
10781 #ifdef CONFIG_SMP
10782 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10783 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10784@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10785 struct task_struct *tsk)
10786 {
10787 unsigned cpu = smp_processor_id();
10788+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10789+ int tlbstate = TLBSTATE_OK;
10790+#endif
10791
10792 if (likely(prev != next)) {
10793 #ifdef CONFIG_SMP
10794+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10795+ tlbstate = percpu_read(cpu_tlbstate.state);
10796+#endif
10797 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10798 percpu_write(cpu_tlbstate.active_mm, next);
10799 #endif
10800 cpumask_set_cpu(cpu, mm_cpumask(next));
10801
10802 /* Re-load page tables */
10803+#ifdef CONFIG_PAX_PER_CPU_PGD
10804+ pax_open_kernel();
10805+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10806+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10807+ pax_close_kernel();
10808+ load_cr3(get_cpu_pgd(cpu));
10809+#else
10810 load_cr3(next->pgd);
10811+#endif
10812
10813 /* stop flush ipis for the previous mm */
10814 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10815@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10816 */
10817 if (unlikely(prev->context.ldt != next->context.ldt))
10818 load_LDT_nolock(&next->context);
10819- }
10820+
10821+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10822+ if (!nx_enabled) {
10823+ smp_mb__before_clear_bit();
10824+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10825+ smp_mb__after_clear_bit();
10826+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10827+ }
10828+#endif
10829+
10830+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10831+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10832+ prev->context.user_cs_limit != next->context.user_cs_limit))
10833+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10834 #ifdef CONFIG_SMP
10835+ else if (unlikely(tlbstate != TLBSTATE_OK))
10836+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10837+#endif
10838+#endif
10839+
10840+ }
10841 else {
10842+
10843+#ifdef CONFIG_PAX_PER_CPU_PGD
10844+ pax_open_kernel();
10845+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10846+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10847+ pax_close_kernel();
10848+ load_cr3(get_cpu_pgd(cpu));
10849+#endif
10850+
10851+#ifdef CONFIG_SMP
10852 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10853 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10854
10855@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10856 * tlb flush IPI delivery. We must reload CR3
10857 * to make sure to use no freed page tables.
10858 */
10859+
10860+#ifndef CONFIG_PAX_PER_CPU_PGD
10861 load_cr3(next->pgd);
10862+#endif
10863+
10864 load_LDT_nolock(&next->context);
10865+
10866+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10867+ if (!nx_enabled)
10868+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10869+#endif
10870+
10871+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10872+#ifdef CONFIG_PAX_PAGEEXEC
10873+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10874+#endif
10875+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10876+#endif
10877+
10878 }
10879+#endif
10880 }
10881-#endif
10882 }
10883
10884 #define activate_mm(prev, next) \
10885diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10886index 3e2ce58..caaf478 100644
10887--- a/arch/x86/include/asm/module.h
10888+++ b/arch/x86/include/asm/module.h
10889@@ -5,6 +5,7 @@
10890
10891 #ifdef CONFIG_X86_64
10892 /* X86_64 does not define MODULE_PROC_FAMILY */
10893+#define MODULE_PROC_FAMILY ""
10894 #elif defined CONFIG_M386
10895 #define MODULE_PROC_FAMILY "386 "
10896 #elif defined CONFIG_M486
10897@@ -59,13 +60,26 @@
10898 #error unknown processor family
10899 #endif
10900
10901-#ifdef CONFIG_X86_32
10902-# ifdef CONFIG_4KSTACKS
10903-# define MODULE_STACKSIZE "4KSTACKS "
10904-# else
10905-# define MODULE_STACKSIZE ""
10906-# endif
10907-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10908+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10909+#define MODULE_STACKSIZE "4KSTACKS "
10910+#else
10911+#define MODULE_STACKSIZE ""
10912 #endif
10913
10914+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10915+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10916+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10917+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10918+#else
10919+#define MODULE_PAX_KERNEXEC ""
10920+#endif
10921+
10922+#ifdef CONFIG_PAX_MEMORY_UDEREF
10923+#define MODULE_PAX_UDEREF "UDEREF "
10924+#else
10925+#define MODULE_PAX_UDEREF ""
10926+#endif
10927+
10928+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10929+
10930 #endif /* _ASM_X86_MODULE_H */
10931diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10932index 7639dbf..e08a58c 100644
10933--- a/arch/x86/include/asm/page_64_types.h
10934+++ b/arch/x86/include/asm/page_64_types.h
10935@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10936
10937 /* duplicated to the one in bootmem.h */
10938 extern unsigned long max_pfn;
10939-extern unsigned long phys_base;
10940+extern const unsigned long phys_base;
10941
10942 extern unsigned long __phys_addr(unsigned long);
10943 #define __phys_reloc_hide(x) (x)
10944diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10945index efb3899..ef30687 100644
10946--- a/arch/x86/include/asm/paravirt.h
10947+++ b/arch/x86/include/asm/paravirt.h
10948@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10949 val);
10950 }
10951
10952+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10953+{
10954+ pgdval_t val = native_pgd_val(pgd);
10955+
10956+ if (sizeof(pgdval_t) > sizeof(long))
10957+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10958+ val, (u64)val >> 32);
10959+ else
10960+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10961+ val);
10962+}
10963+
10964 static inline void pgd_clear(pgd_t *pgdp)
10965 {
10966 set_pgd(pgdp, __pgd(0));
10967@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10968 pv_mmu_ops.set_fixmap(idx, phys, flags);
10969 }
10970
10971+#ifdef CONFIG_PAX_KERNEXEC
10972+static inline unsigned long pax_open_kernel(void)
10973+{
10974+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10975+}
10976+
10977+static inline unsigned long pax_close_kernel(void)
10978+{
10979+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10980+}
10981+#else
10982+static inline unsigned long pax_open_kernel(void) { return 0; }
10983+static inline unsigned long pax_close_kernel(void) { return 0; }
10984+#endif
10985+
10986 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10987
10988 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10989@@ -945,7 +972,7 @@ extern void default_banner(void);
10990
10991 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10992 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10993-#define PARA_INDIRECT(addr) *%cs:addr
10994+#define PARA_INDIRECT(addr) *%ss:addr
10995 #endif
10996
10997 #define INTERRUPT_RETURN \
10998@@ -1022,6 +1049,21 @@ extern void default_banner(void);
10999 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11000 CLBR_NONE, \
11001 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11002+
11003+#define GET_CR0_INTO_RDI \
11004+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11005+ mov %rax,%rdi
11006+
11007+#define SET_RDI_INTO_CR0 \
11008+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11009+
11010+#define GET_CR3_INTO_RDI \
11011+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11012+ mov %rax,%rdi
11013+
11014+#define SET_RDI_INTO_CR3 \
11015+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11016+
11017 #endif /* CONFIG_X86_32 */
11018
11019 #endif /* __ASSEMBLY__ */
11020diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11021index 9357473..aeb2de5 100644
11022--- a/arch/x86/include/asm/paravirt_types.h
11023+++ b/arch/x86/include/asm/paravirt_types.h
11024@@ -78,19 +78,19 @@ struct pv_init_ops {
11025 */
11026 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11027 unsigned long addr, unsigned len);
11028-};
11029+} __no_const;
11030
11031
11032 struct pv_lazy_ops {
11033 /* Set deferred update mode, used for batching operations. */
11034 void (*enter)(void);
11035 void (*leave)(void);
11036-};
11037+} __no_const;
11038
11039 struct pv_time_ops {
11040 unsigned long long (*sched_clock)(void);
11041 unsigned long (*get_tsc_khz)(void);
11042-};
11043+} __no_const;
11044
11045 struct pv_cpu_ops {
11046 /* hooks for various privileged instructions */
11047@@ -186,7 +186,7 @@ struct pv_cpu_ops {
11048
11049 void (*start_context_switch)(struct task_struct *prev);
11050 void (*end_context_switch)(struct task_struct *next);
11051-};
11052+} __no_const;
11053
11054 struct pv_irq_ops {
11055 /*
11056@@ -217,7 +217,7 @@ struct pv_apic_ops {
11057 unsigned long start_eip,
11058 unsigned long start_esp);
11059 #endif
11060-};
11061+} __no_const;
11062
11063 struct pv_mmu_ops {
11064 unsigned long (*read_cr2)(void);
11065@@ -301,6 +301,7 @@ struct pv_mmu_ops {
11066 struct paravirt_callee_save make_pud;
11067
11068 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11069+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11070 #endif /* PAGETABLE_LEVELS == 4 */
11071 #endif /* PAGETABLE_LEVELS >= 3 */
11072
11073@@ -316,6 +317,12 @@ struct pv_mmu_ops {
11074 an mfn. We can tell which is which from the index. */
11075 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11076 phys_addr_t phys, pgprot_t flags);
11077+
11078+#ifdef CONFIG_PAX_KERNEXEC
11079+ unsigned long (*pax_open_kernel)(void);
11080+ unsigned long (*pax_close_kernel)(void);
11081+#endif
11082+
11083 };
11084
11085 struct raw_spinlock;
11086@@ -326,7 +333,7 @@ struct pv_lock_ops {
11087 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
11088 int (*spin_trylock)(struct raw_spinlock *lock);
11089 void (*spin_unlock)(struct raw_spinlock *lock);
11090-};
11091+} __no_const;
11092
11093 /* This contains all the paravirt structures: we get a convenient
11094 * number for each function using the offset which we use to indicate
11095diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
11096index b399988..3f47c38 100644
11097--- a/arch/x86/include/asm/pci_x86.h
11098+++ b/arch/x86/include/asm/pci_x86.h
11099@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
11100 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
11101
11102 struct pci_raw_ops {
11103- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11104+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11105 int reg, int len, u32 *val);
11106- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11107+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11108 int reg, int len, u32 val);
11109 };
11110
11111-extern struct pci_raw_ops *raw_pci_ops;
11112-extern struct pci_raw_ops *raw_pci_ext_ops;
11113+extern const struct pci_raw_ops *raw_pci_ops;
11114+extern const struct pci_raw_ops *raw_pci_ext_ops;
11115
11116-extern struct pci_raw_ops pci_direct_conf1;
11117+extern const struct pci_raw_ops pci_direct_conf1;
11118 extern bool port_cf9_safe;
11119
11120 /* arch_initcall level */
11121diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
11122index b65a36d..50345a4 100644
11123--- a/arch/x86/include/asm/percpu.h
11124+++ b/arch/x86/include/asm/percpu.h
11125@@ -78,6 +78,7 @@ do { \
11126 if (0) { \
11127 T__ tmp__; \
11128 tmp__ = (val); \
11129+ (void)tmp__; \
11130 } \
11131 switch (sizeof(var)) { \
11132 case 1: \
11133diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11134index 271de94..ef944d6 100644
11135--- a/arch/x86/include/asm/pgalloc.h
11136+++ b/arch/x86/include/asm/pgalloc.h
11137@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11138 pmd_t *pmd, pte_t *pte)
11139 {
11140 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11141+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11142+}
11143+
11144+static inline void pmd_populate_user(struct mm_struct *mm,
11145+ pmd_t *pmd, pte_t *pte)
11146+{
11147+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11148 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11149 }
11150
11151diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11152index 2334982..70bc412 100644
11153--- a/arch/x86/include/asm/pgtable-2level.h
11154+++ b/arch/x86/include/asm/pgtable-2level.h
11155@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11156
11157 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11158 {
11159+ pax_open_kernel();
11160 *pmdp = pmd;
11161+ pax_close_kernel();
11162 }
11163
11164 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11165diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11166index 33927d2..ccde329 100644
11167--- a/arch/x86/include/asm/pgtable-3level.h
11168+++ b/arch/x86/include/asm/pgtable-3level.h
11169@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11170
11171 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11172 {
11173+ pax_open_kernel();
11174 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11175+ pax_close_kernel();
11176 }
11177
11178 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11179 {
11180+ pax_open_kernel();
11181 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11182+ pax_close_kernel();
11183 }
11184
11185 /*
11186diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11187index af6fd36..867ff74 100644
11188--- a/arch/x86/include/asm/pgtable.h
11189+++ b/arch/x86/include/asm/pgtable.h
11190@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
11191
11192 #ifndef __PAGETABLE_PUD_FOLDED
11193 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11194+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11195 #define pgd_clear(pgd) native_pgd_clear(pgd)
11196 #endif
11197
11198@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
11199
11200 #define arch_end_context_switch(prev) do {} while(0)
11201
11202+#define pax_open_kernel() native_pax_open_kernel()
11203+#define pax_close_kernel() native_pax_close_kernel()
11204 #endif /* CONFIG_PARAVIRT */
11205
11206+#define __HAVE_ARCH_PAX_OPEN_KERNEL
11207+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11208+
11209+#ifdef CONFIG_PAX_KERNEXEC
11210+static inline unsigned long native_pax_open_kernel(void)
11211+{
11212+ unsigned long cr0;
11213+
11214+ preempt_disable();
11215+ barrier();
11216+ cr0 = read_cr0() ^ X86_CR0_WP;
11217+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
11218+ write_cr0(cr0);
11219+ return cr0 ^ X86_CR0_WP;
11220+}
11221+
11222+static inline unsigned long native_pax_close_kernel(void)
11223+{
11224+ unsigned long cr0;
11225+
11226+ cr0 = read_cr0() ^ X86_CR0_WP;
11227+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11228+ write_cr0(cr0);
11229+ barrier();
11230+ preempt_enable_no_resched();
11231+ return cr0 ^ X86_CR0_WP;
11232+}
11233+#else
11234+static inline unsigned long native_pax_open_kernel(void) { return 0; }
11235+static inline unsigned long native_pax_close_kernel(void) { return 0; }
11236+#endif
11237+
11238 /*
11239 * The following only work if pte_present() is true.
11240 * Undefined behaviour if not..
11241 */
11242+static inline int pte_user(pte_t pte)
11243+{
11244+ return pte_val(pte) & _PAGE_USER;
11245+}
11246+
11247 static inline int pte_dirty(pte_t pte)
11248 {
11249 return pte_flags(pte) & _PAGE_DIRTY;
11250@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11251 return pte_clear_flags(pte, _PAGE_RW);
11252 }
11253
11254+static inline pte_t pte_mkread(pte_t pte)
11255+{
11256+ return __pte(pte_val(pte) | _PAGE_USER);
11257+}
11258+
11259 static inline pte_t pte_mkexec(pte_t pte)
11260 {
11261- return pte_clear_flags(pte, _PAGE_NX);
11262+#ifdef CONFIG_X86_PAE
11263+ if (__supported_pte_mask & _PAGE_NX)
11264+ return pte_clear_flags(pte, _PAGE_NX);
11265+ else
11266+#endif
11267+ return pte_set_flags(pte, _PAGE_USER);
11268+}
11269+
11270+static inline pte_t pte_exprotect(pte_t pte)
11271+{
11272+#ifdef CONFIG_X86_PAE
11273+ if (__supported_pte_mask & _PAGE_NX)
11274+ return pte_set_flags(pte, _PAGE_NX);
11275+ else
11276+#endif
11277+ return pte_clear_flags(pte, _PAGE_USER);
11278 }
11279
11280 static inline pte_t pte_mkdirty(pte_t pte)
11281@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11282 #endif
11283
11284 #ifndef __ASSEMBLY__
11285+
11286+#ifdef CONFIG_PAX_PER_CPU_PGD
11287+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11288+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11289+{
11290+ return cpu_pgd[cpu];
11291+}
11292+#endif
11293+
11294 #include <linux/mm_types.h>
11295
11296 static inline int pte_none(pte_t pte)
11297@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11298
11299 static inline int pgd_bad(pgd_t pgd)
11300 {
11301- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11302+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11303 }
11304
11305 static inline int pgd_none(pgd_t pgd)
11306@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11307 * pgd_offset() returns a (pgd_t *)
11308 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11309 */
11310-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11311+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11312+
11313+#ifdef CONFIG_PAX_PER_CPU_PGD
11314+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11315+#endif
11316+
11317 /*
11318 * a shortcut which implies the use of the kernel's pgd, instead
11319 * of a process's
11320@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11321 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11322 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11323
11324+#ifdef CONFIG_X86_32
11325+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11326+#else
11327+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11328+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11329+
11330+#ifdef CONFIG_PAX_MEMORY_UDEREF
11331+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11332+#else
11333+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11334+#endif
11335+
11336+#endif
11337+
11338 #ifndef __ASSEMBLY__
11339
11340 extern int direct_gbpages;
11341@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11342 * dst and src can be on the same page, but the range must not overlap,
11343 * and must not cross a page boundary.
11344 */
11345-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11346+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11347 {
11348- memcpy(dst, src, count * sizeof(pgd_t));
11349+ pax_open_kernel();
11350+ while (count--)
11351+ *dst++ = *src++;
11352+ pax_close_kernel();
11353 }
11354
11355+#ifdef CONFIG_PAX_PER_CPU_PGD
11356+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11357+#endif
11358+
11359+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11360+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11361+#else
11362+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11363+#endif
11364
11365 #include <asm-generic/pgtable.h>
11366 #endif /* __ASSEMBLY__ */
11367diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11368index 750f1bf..971e839 100644
11369--- a/arch/x86/include/asm/pgtable_32.h
11370+++ b/arch/x86/include/asm/pgtable_32.h
11371@@ -26,9 +26,6 @@
11372 struct mm_struct;
11373 struct vm_area_struct;
11374
11375-extern pgd_t swapper_pg_dir[1024];
11376-extern pgd_t trampoline_pg_dir[1024];
11377-
11378 static inline void pgtable_cache_init(void) { }
11379 static inline void check_pgt_cache(void) { }
11380 void paging_init(void);
11381@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11382 # include <asm/pgtable-2level.h>
11383 #endif
11384
11385+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11386+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11387+#ifdef CONFIG_X86_PAE
11388+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11389+#endif
11390+
11391 #if defined(CONFIG_HIGHPTE)
11392 #define __KM_PTE \
11393 (in_nmi() ? KM_NMI_PTE : \
11394@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11395 /* Clear a kernel PTE and flush it from the TLB */
11396 #define kpte_clear_flush(ptep, vaddr) \
11397 do { \
11398+ pax_open_kernel(); \
11399 pte_clear(&init_mm, (vaddr), (ptep)); \
11400+ pax_close_kernel(); \
11401 __flush_tlb_one((vaddr)); \
11402 } while (0)
11403
11404@@ -85,6 +90,9 @@ do { \
11405
11406 #endif /* !__ASSEMBLY__ */
11407
11408+#define HAVE_ARCH_UNMAPPED_AREA
11409+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11410+
11411 /*
11412 * kern_addr_valid() is (1) for FLATMEM and (0) for
11413 * SPARSEMEM and DISCONTIGMEM
11414diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11415index 5e67c15..12d5c47 100644
11416--- a/arch/x86/include/asm/pgtable_32_types.h
11417+++ b/arch/x86/include/asm/pgtable_32_types.h
11418@@ -8,7 +8,7 @@
11419 */
11420 #ifdef CONFIG_X86_PAE
11421 # include <asm/pgtable-3level_types.h>
11422-# define PMD_SIZE (1UL << PMD_SHIFT)
11423+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11424 # define PMD_MASK (~(PMD_SIZE - 1))
11425 #else
11426 # include <asm/pgtable-2level_types.h>
11427@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11428 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11429 #endif
11430
11431+#ifdef CONFIG_PAX_KERNEXEC
11432+#ifndef __ASSEMBLY__
11433+extern unsigned char MODULES_EXEC_VADDR[];
11434+extern unsigned char MODULES_EXEC_END[];
11435+#endif
11436+#include <asm/boot.h>
11437+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11438+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11439+#else
11440+#define ktla_ktva(addr) (addr)
11441+#define ktva_ktla(addr) (addr)
11442+#endif
11443+
11444 #define MODULES_VADDR VMALLOC_START
11445 #define MODULES_END VMALLOC_END
11446 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11447diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11448index c57a301..6b414ff 100644
11449--- a/arch/x86/include/asm/pgtable_64.h
11450+++ b/arch/x86/include/asm/pgtable_64.h
11451@@ -16,10 +16,14 @@
11452
11453 extern pud_t level3_kernel_pgt[512];
11454 extern pud_t level3_ident_pgt[512];
11455+extern pud_t level3_vmalloc_start_pgt[512];
11456+extern pud_t level3_vmalloc_end_pgt[512];
11457+extern pud_t level3_vmemmap_pgt[512];
11458+extern pud_t level2_vmemmap_pgt[512];
11459 extern pmd_t level2_kernel_pgt[512];
11460 extern pmd_t level2_fixmap_pgt[512];
11461-extern pmd_t level2_ident_pgt[512];
11462-extern pgd_t init_level4_pgt[];
11463+extern pmd_t level2_ident_pgt[512*2];
11464+extern pgd_t init_level4_pgt[512];
11465
11466 #define swapper_pg_dir init_level4_pgt
11467
11468@@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11469
11470 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11471 {
11472+ pax_open_kernel();
11473 *pmdp = pmd;
11474+ pax_close_kernel();
11475 }
11476
11477 static inline void native_pmd_clear(pmd_t *pmd)
11478@@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
11479
11480 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11481 {
11482+ pax_open_kernel();
11483+ *pgdp = pgd;
11484+ pax_close_kernel();
11485+}
11486+
11487+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11488+{
11489 *pgdp = pgd;
11490 }
11491
11492diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11493index 766ea16..5b96cb3 100644
11494--- a/arch/x86/include/asm/pgtable_64_types.h
11495+++ b/arch/x86/include/asm/pgtable_64_types.h
11496@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11497 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11498 #define MODULES_END _AC(0xffffffffff000000, UL)
11499 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11500+#define MODULES_EXEC_VADDR MODULES_VADDR
11501+#define MODULES_EXEC_END MODULES_END
11502+
11503+#define ktla_ktva(addr) (addr)
11504+#define ktva_ktla(addr) (addr)
11505
11506 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11507diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11508index d1f4a76..2f46ba1 100644
11509--- a/arch/x86/include/asm/pgtable_types.h
11510+++ b/arch/x86/include/asm/pgtable_types.h
11511@@ -16,12 +16,11 @@
11512 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11513 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11514 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11515-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11516+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11517 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11518 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11519 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11520-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11521-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11522+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11523 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11524
11525 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11526@@ -39,7 +38,6 @@
11527 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11528 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11529 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11530-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11531 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11532 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11533 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11534@@ -55,8 +53,10 @@
11535
11536 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11537 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11538-#else
11539+#elif defined(CONFIG_KMEMCHECK)
11540 #define _PAGE_NX (_AT(pteval_t, 0))
11541+#else
11542+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11543 #endif
11544
11545 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11546@@ -93,6 +93,9 @@
11547 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11548 _PAGE_ACCESSED)
11549
11550+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11551+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11552+
11553 #define __PAGE_KERNEL_EXEC \
11554 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11555 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11556@@ -103,8 +106,8 @@
11557 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11558 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11559 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11560-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11561-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11562+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11563+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11564 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11565 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11566 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11567@@ -163,8 +166,8 @@
11568 * bits are combined, this will alow user to access the high address mapped
11569 * VDSO in the presence of CONFIG_COMPAT_VDSO
11570 */
11571-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11572-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11573+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11574+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11575 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11576 #endif
11577
11578@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11579 {
11580 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11581 }
11582+#endif
11583
11584+#if PAGETABLE_LEVELS == 3
11585+#include <asm-generic/pgtable-nopud.h>
11586+#endif
11587+
11588+#if PAGETABLE_LEVELS == 2
11589+#include <asm-generic/pgtable-nopmd.h>
11590+#endif
11591+
11592+#ifndef __ASSEMBLY__
11593 #if PAGETABLE_LEVELS > 3
11594 typedef struct { pudval_t pud; } pud_t;
11595
11596@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11597 return pud.pud;
11598 }
11599 #else
11600-#include <asm-generic/pgtable-nopud.h>
11601-
11602 static inline pudval_t native_pud_val(pud_t pud)
11603 {
11604 return native_pgd_val(pud.pgd);
11605@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11606 return pmd.pmd;
11607 }
11608 #else
11609-#include <asm-generic/pgtable-nopmd.h>
11610-
11611 static inline pmdval_t native_pmd_val(pmd_t pmd)
11612 {
11613 return native_pgd_val(pmd.pud.pgd);
11614@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11615
11616 extern pteval_t __supported_pte_mask;
11617 extern void set_nx(void);
11618+
11619+#ifdef CONFIG_X86_32
11620+#ifdef CONFIG_X86_PAE
11621 extern int nx_enabled;
11622+#else
11623+#define nx_enabled (0)
11624+#endif
11625+#else
11626+#define nx_enabled (1)
11627+#endif
11628
11629 #define pgprot_writecombine pgprot_writecombine
11630 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11631diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11632index fa04dea..5f823fc 100644
11633--- a/arch/x86/include/asm/processor.h
11634+++ b/arch/x86/include/asm/processor.h
11635@@ -272,7 +272,7 @@ struct tss_struct {
11636
11637 } ____cacheline_aligned;
11638
11639-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11640+extern struct tss_struct init_tss[NR_CPUS];
11641
11642 /*
11643 * Save the original ist values for checking stack pointers during debugging
11644@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11645 */
11646 #define TASK_SIZE PAGE_OFFSET
11647 #define TASK_SIZE_MAX TASK_SIZE
11648+
11649+#ifdef CONFIG_PAX_SEGMEXEC
11650+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11651+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11652+#else
11653 #define STACK_TOP TASK_SIZE
11654-#define STACK_TOP_MAX STACK_TOP
11655+#endif
11656+
11657+#define STACK_TOP_MAX TASK_SIZE
11658
11659 #define INIT_THREAD { \
11660- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11661+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11662 .vm86_info = NULL, \
11663 .sysenter_cs = __KERNEL_CS, \
11664 .io_bitmap_ptr = NULL, \
11665@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11666 */
11667 #define INIT_TSS { \
11668 .x86_tss = { \
11669- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11670+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11671 .ss0 = __KERNEL_DS, \
11672 .ss1 = __KERNEL_CS, \
11673 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11674@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11675 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11676
11677 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11678-#define KSTK_TOP(info) \
11679-({ \
11680- unsigned long *__ptr = (unsigned long *)(info); \
11681- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11682-})
11683+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11684
11685 /*
11686 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11687@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11688 #define task_pt_regs(task) \
11689 ({ \
11690 struct pt_regs *__regs__; \
11691- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11692+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11693 __regs__ - 1; \
11694 })
11695
11696@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11697 /*
11698 * User space process size. 47bits minus one guard page.
11699 */
11700-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11701+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11702
11703 /* This decides where the kernel will search for a free chunk of vm
11704 * space during mmap's.
11705 */
11706 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11707- 0xc0000000 : 0xFFFFe000)
11708+ 0xc0000000 : 0xFFFFf000)
11709
11710 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11711 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11712@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11713 #define STACK_TOP_MAX TASK_SIZE_MAX
11714
11715 #define INIT_THREAD { \
11716- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11717+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11718 }
11719
11720 #define INIT_TSS { \
11721- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11722+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11723 }
11724
11725 /*
11726@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11727 */
11728 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11729
11730+#ifdef CONFIG_PAX_SEGMEXEC
11731+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11732+#endif
11733+
11734 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11735
11736 /* Get/set a process' ability to use the timestamp counter instruction */
11737diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11738index 0f0d908..f2e3da2 100644
11739--- a/arch/x86/include/asm/ptrace.h
11740+++ b/arch/x86/include/asm/ptrace.h
11741@@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11742 }
11743
11744 /*
11745- * user_mode_vm(regs) determines whether a register set came from user mode.
11746+ * user_mode(regs) determines whether a register set came from user mode.
11747 * This is true if V8086 mode was enabled OR if the register set was from
11748 * protected mode with RPL-3 CS value. This tricky test checks that with
11749 * one comparison. Many places in the kernel can bypass this full check
11750- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11751+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11752+ * be used.
11753 */
11754-static inline int user_mode(struct pt_regs *regs)
11755+static inline int user_mode_novm(struct pt_regs *regs)
11756 {
11757 #ifdef CONFIG_X86_32
11758 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11759 #else
11760- return !!(regs->cs & 3);
11761+ return !!(regs->cs & SEGMENT_RPL_MASK);
11762 #endif
11763 }
11764
11765-static inline int user_mode_vm(struct pt_regs *regs)
11766+static inline int user_mode(struct pt_regs *regs)
11767 {
11768 #ifdef CONFIG_X86_32
11769 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11770 USER_RPL;
11771 #else
11772- return user_mode(regs);
11773+ return user_mode_novm(regs);
11774 #endif
11775 }
11776
11777diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11778index 562d4fd..6e39df1 100644
11779--- a/arch/x86/include/asm/reboot.h
11780+++ b/arch/x86/include/asm/reboot.h
11781@@ -6,19 +6,19 @@
11782 struct pt_regs;
11783
11784 struct machine_ops {
11785- void (*restart)(char *cmd);
11786- void (*halt)(void);
11787- void (*power_off)(void);
11788+ void (* __noreturn restart)(char *cmd);
11789+ void (* __noreturn halt)(void);
11790+ void (* __noreturn power_off)(void);
11791 void (*shutdown)(void);
11792 void (*crash_shutdown)(struct pt_regs *);
11793- void (*emergency_restart)(void);
11794-};
11795+ void (* __noreturn emergency_restart)(void);
11796+} __no_const;
11797
11798 extern struct machine_ops machine_ops;
11799
11800 void native_machine_crash_shutdown(struct pt_regs *regs);
11801 void native_machine_shutdown(void);
11802-void machine_real_restart(const unsigned char *code, int length);
11803+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11804
11805 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11806 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11807diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11808index 606ede1..dbfff37 100644
11809--- a/arch/x86/include/asm/rwsem.h
11810+++ b/arch/x86/include/asm/rwsem.h
11811@@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11812 {
11813 asm volatile("# beginning down_read\n\t"
11814 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11815+
11816+#ifdef CONFIG_PAX_REFCOUNT
11817+ "jno 0f\n"
11818+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11819+ "int $4\n0:\n"
11820+ _ASM_EXTABLE(0b, 0b)
11821+#endif
11822+
11823 /* adds 0x00000001, returns the old value */
11824 " jns 1f\n"
11825 " call call_rwsem_down_read_failed\n"
11826@@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11827 "1:\n\t"
11828 " mov %1,%2\n\t"
11829 " add %3,%2\n\t"
11830+
11831+#ifdef CONFIG_PAX_REFCOUNT
11832+ "jno 0f\n"
11833+ "sub %3,%2\n"
11834+ "int $4\n0:\n"
11835+ _ASM_EXTABLE(0b, 0b)
11836+#endif
11837+
11838 " jle 2f\n\t"
11839 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11840 " jnz 1b\n\t"
11841@@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11842 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11843 asm volatile("# beginning down_write\n\t"
11844 LOCK_PREFIX " xadd %1,(%2)\n\t"
11845+
11846+#ifdef CONFIG_PAX_REFCOUNT
11847+ "jno 0f\n"
11848+ "mov %1,(%2)\n"
11849+ "int $4\n0:\n"
11850+ _ASM_EXTABLE(0b, 0b)
11851+#endif
11852+
11853 /* subtract 0x0000ffff, returns the old value */
11854 " test %1,%1\n\t"
11855 /* was the count 0 before? */
11856@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11857 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11858 asm volatile("# beginning __up_read\n\t"
11859 LOCK_PREFIX " xadd %1,(%2)\n\t"
11860+
11861+#ifdef CONFIG_PAX_REFCOUNT
11862+ "jno 0f\n"
11863+ "mov %1,(%2)\n"
11864+ "int $4\n0:\n"
11865+ _ASM_EXTABLE(0b, 0b)
11866+#endif
11867+
11868 /* subtracts 1, returns the old value */
11869 " jns 1f\n\t"
11870 " call call_rwsem_wake\n"
11871@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11872 rwsem_count_t tmp;
11873 asm volatile("# beginning __up_write\n\t"
11874 LOCK_PREFIX " xadd %1,(%2)\n\t"
11875+
11876+#ifdef CONFIG_PAX_REFCOUNT
11877+ "jno 0f\n"
11878+ "mov %1,(%2)\n"
11879+ "int $4\n0:\n"
11880+ _ASM_EXTABLE(0b, 0b)
11881+#endif
11882+
11883 /* tries to transition
11884 0xffff0001 -> 0x00000000 */
11885 " jz 1f\n"
11886@@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11887 {
11888 asm volatile("# beginning __downgrade_write\n\t"
11889 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11890+
11891+#ifdef CONFIG_PAX_REFCOUNT
11892+ "jno 0f\n"
11893+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11894+ "int $4\n0:\n"
11895+ _ASM_EXTABLE(0b, 0b)
11896+#endif
11897+
11898 /*
11899 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11900 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11901@@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11902 static inline void rwsem_atomic_add(rwsem_count_t delta,
11903 struct rw_semaphore *sem)
11904 {
11905- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11906+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11907+
11908+#ifdef CONFIG_PAX_REFCOUNT
11909+ "jno 0f\n"
11910+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
11911+ "int $4\n0:\n"
11912+ _ASM_EXTABLE(0b, 0b)
11913+#endif
11914+
11915 : "+m" (sem->count)
11916 : "er" (delta));
11917 }
11918@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11919 {
11920 rwsem_count_t tmp = delta;
11921
11922- asm volatile(LOCK_PREFIX "xadd %0,%1"
11923+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11924+
11925+#ifdef CONFIG_PAX_REFCOUNT
11926+ "jno 0f\n"
11927+ "mov %0,%1\n"
11928+ "int $4\n0:\n"
11929+ _ASM_EXTABLE(0b, 0b)
11930+#endif
11931+
11932 : "+r" (tmp), "+m" (sem->count)
11933 : : "memory");
11934
11935diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11936index 14e0ed8..7f7dd5e 100644
11937--- a/arch/x86/include/asm/segment.h
11938+++ b/arch/x86/include/asm/segment.h
11939@@ -62,10 +62,15 @@
11940 * 26 - ESPFIX small SS
11941 * 27 - per-cpu [ offset to per-cpu data area ]
11942 * 28 - stack_canary-20 [ for stack protector ]
11943- * 29 - unused
11944- * 30 - unused
11945+ * 29 - PCI BIOS CS
11946+ * 30 - PCI BIOS DS
11947 * 31 - TSS for double fault handler
11948 */
11949+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11950+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11951+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11952+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11953+
11954 #define GDT_ENTRY_TLS_MIN 6
11955 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11956
11957@@ -77,6 +82,8 @@
11958
11959 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11960
11961+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11962+
11963 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11964
11965 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11966@@ -88,7 +95,7 @@
11967 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11968 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11969
11970-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11971+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11972 #ifdef CONFIG_SMP
11973 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11974 #else
11975@@ -102,6 +109,12 @@
11976 #define __KERNEL_STACK_CANARY 0
11977 #endif
11978
11979+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11980+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11981+
11982+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11983+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11984+
11985 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11986
11987 /*
11988@@ -139,7 +152,7 @@
11989 */
11990
11991 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11992-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11993+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11994
11995
11996 #else
11997@@ -163,6 +176,8 @@
11998 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
11999 #define __USER32_DS __USER_DS
12000
12001+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12002+
12003 #define GDT_ENTRY_TSS 8 /* needs two entries */
12004 #define GDT_ENTRY_LDT 10 /* needs two entries */
12005 #define GDT_ENTRY_TLS_MIN 12
12006@@ -183,6 +198,7 @@
12007 #endif
12008
12009 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
12010+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
12011 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
12012 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
12013 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
12014diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12015index 4c2f63c..5685db2 100644
12016--- a/arch/x86/include/asm/smp.h
12017+++ b/arch/x86/include/asm/smp.h
12018@@ -24,7 +24,7 @@ extern unsigned int num_processors;
12019 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
12020 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12021 DECLARE_PER_CPU(u16, cpu_llc_id);
12022-DECLARE_PER_CPU(int, cpu_number);
12023+DECLARE_PER_CPU(unsigned int, cpu_number);
12024
12025 static inline struct cpumask *cpu_sibling_mask(int cpu)
12026 {
12027@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
12028 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
12029
12030 /* Static state in head.S used to set up a CPU */
12031-extern struct {
12032- void *sp;
12033- unsigned short ss;
12034-} stack_start;
12035+extern unsigned long stack_start; /* Initial stack pointer address */
12036
12037 struct smp_ops {
12038 void (*smp_prepare_boot_cpu)(void);
12039@@ -60,7 +57,7 @@ struct smp_ops {
12040
12041 void (*send_call_func_ipi)(const struct cpumask *mask);
12042 void (*send_call_func_single_ipi)(int cpu);
12043-};
12044+} __no_const;
12045
12046 /* Globals due to paravirt */
12047 extern void set_cpu_sibling_map(int cpu);
12048@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12049 extern int safe_smp_processor_id(void);
12050
12051 #elif defined(CONFIG_X86_64_SMP)
12052-#define raw_smp_processor_id() (percpu_read(cpu_number))
12053-
12054-#define stack_smp_processor_id() \
12055-({ \
12056- struct thread_info *ti; \
12057- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12058- ti->cpu; \
12059-})
12060+#define raw_smp_processor_id() (percpu_read(cpu_number))
12061+#define stack_smp_processor_id() raw_smp_processor_id()
12062 #define safe_smp_processor_id() smp_processor_id()
12063
12064 #endif
12065diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12066index 4e77853..4359783 100644
12067--- a/arch/x86/include/asm/spinlock.h
12068+++ b/arch/x86/include/asm/spinlock.h
12069@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
12070 static inline void __raw_read_lock(raw_rwlock_t *rw)
12071 {
12072 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
12073+
12074+#ifdef CONFIG_PAX_REFCOUNT
12075+ "jno 0f\n"
12076+ LOCK_PREFIX " addl $1,(%0)\n"
12077+ "int $4\n0:\n"
12078+ _ASM_EXTABLE(0b, 0b)
12079+#endif
12080+
12081 "jns 1f\n"
12082 "call __read_lock_failed\n\t"
12083 "1:\n"
12084@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
12085 static inline void __raw_write_lock(raw_rwlock_t *rw)
12086 {
12087 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
12088+
12089+#ifdef CONFIG_PAX_REFCOUNT
12090+ "jno 0f\n"
12091+ LOCK_PREFIX " addl %1,(%0)\n"
12092+ "int $4\n0:\n"
12093+ _ASM_EXTABLE(0b, 0b)
12094+#endif
12095+
12096 "jz 1f\n"
12097 "call __write_lock_failed\n\t"
12098 "1:\n"
12099@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
12100
12101 static inline void __raw_read_unlock(raw_rwlock_t *rw)
12102 {
12103- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
12104+ asm volatile(LOCK_PREFIX "incl %0\n"
12105+
12106+#ifdef CONFIG_PAX_REFCOUNT
12107+ "jno 0f\n"
12108+ LOCK_PREFIX "decl %0\n"
12109+ "int $4\n0:\n"
12110+ _ASM_EXTABLE(0b, 0b)
12111+#endif
12112+
12113+ :"+m" (rw->lock) : : "memory");
12114 }
12115
12116 static inline void __raw_write_unlock(raw_rwlock_t *rw)
12117 {
12118- asm volatile(LOCK_PREFIX "addl %1, %0"
12119+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
12120+
12121+#ifdef CONFIG_PAX_REFCOUNT
12122+ "jno 0f\n"
12123+ LOCK_PREFIX "subl %1, %0\n"
12124+ "int $4\n0:\n"
12125+ _ASM_EXTABLE(0b, 0b)
12126+#endif
12127+
12128 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
12129 }
12130
12131diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12132index 1575177..cb23f52 100644
12133--- a/arch/x86/include/asm/stackprotector.h
12134+++ b/arch/x86/include/asm/stackprotector.h
12135@@ -48,7 +48,7 @@
12136 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12137 */
12138 #define GDT_STACK_CANARY_INIT \
12139- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12140+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12141
12142 /*
12143 * Initialize the stackprotector canary value.
12144@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
12145
12146 static inline void load_stack_canary_segment(void)
12147 {
12148-#ifdef CONFIG_X86_32
12149+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12150 asm volatile ("mov %0, %%gs" : : "r" (0));
12151 #endif
12152 }
12153diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
12154index e0fbf29..858ef4a 100644
12155--- a/arch/x86/include/asm/system.h
12156+++ b/arch/x86/include/asm/system.h
12157@@ -132,7 +132,7 @@ do { \
12158 "thread_return:\n\t" \
12159 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12160 __switch_canary \
12161- "movq %P[thread_info](%%rsi),%%r8\n\t" \
12162+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12163 "movq %%rax,%%rdi\n\t" \
12164 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12165 "jnz ret_from_fork\n\t" \
12166@@ -143,7 +143,7 @@ do { \
12167 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12168 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12169 [_tif_fork] "i" (_TIF_FORK), \
12170- [thread_info] "i" (offsetof(struct task_struct, stack)), \
12171+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
12172 [current_task] "m" (per_cpu_var(current_task)) \
12173 __switch_canary_iparam \
12174 : "memory", "cc" __EXTRA_CLOBBER)
12175@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
12176 {
12177 unsigned long __limit;
12178 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12179- return __limit + 1;
12180+ return __limit;
12181 }
12182
12183 static inline void native_clts(void)
12184@@ -340,12 +340,12 @@ void enable_hlt(void);
12185
12186 void cpu_idle_wait(void);
12187
12188-extern unsigned long arch_align_stack(unsigned long sp);
12189+#define arch_align_stack(x) ((x) & ~0xfUL)
12190 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
12191
12192 void default_idle(void);
12193
12194-void stop_this_cpu(void *dummy);
12195+void stop_this_cpu(void *dummy) __noreturn;
12196
12197 /*
12198 * Force strict CPU ordering.
12199diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12200index 19c3ce4..8962535 100644
12201--- a/arch/x86/include/asm/thread_info.h
12202+++ b/arch/x86/include/asm/thread_info.h
12203@@ -10,6 +10,7 @@
12204 #include <linux/compiler.h>
12205 #include <asm/page.h>
12206 #include <asm/types.h>
12207+#include <asm/percpu.h>
12208
12209 /*
12210 * low level task data that entry.S needs immediate access to
12211@@ -24,7 +25,6 @@ struct exec_domain;
12212 #include <asm/atomic.h>
12213
12214 struct thread_info {
12215- struct task_struct *task; /* main task structure */
12216 struct exec_domain *exec_domain; /* execution domain */
12217 __u32 flags; /* low level flags */
12218 __u32 status; /* thread synchronous flags */
12219@@ -34,18 +34,12 @@ struct thread_info {
12220 mm_segment_t addr_limit;
12221 struct restart_block restart_block;
12222 void __user *sysenter_return;
12223-#ifdef CONFIG_X86_32
12224- unsigned long previous_esp; /* ESP of the previous stack in
12225- case of nested (IRQ) stacks
12226- */
12227- __u8 supervisor_stack[0];
12228-#endif
12229+ unsigned long lowest_stack;
12230 int uaccess_err;
12231 };
12232
12233-#define INIT_THREAD_INFO(tsk) \
12234+#define INIT_THREAD_INFO \
12235 { \
12236- .task = &tsk, \
12237 .exec_domain = &default_exec_domain, \
12238 .flags = 0, \
12239 .cpu = 0, \
12240@@ -56,7 +50,7 @@ struct thread_info {
12241 }, \
12242 }
12243
12244-#define init_thread_info (init_thread_union.thread_info)
12245+#define init_thread_info (init_thread_union.stack)
12246 #define init_stack (init_thread_union.stack)
12247
12248 #else /* !__ASSEMBLY__ */
12249@@ -163,45 +157,40 @@ struct thread_info {
12250 #define alloc_thread_info(tsk) \
12251 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12252
12253-#ifdef CONFIG_X86_32
12254-
12255-#define STACK_WARN (THREAD_SIZE/8)
12256-/*
12257- * macros/functions for gaining access to the thread information structure
12258- *
12259- * preempt_count needs to be 1 initially, until the scheduler is functional.
12260- */
12261-#ifndef __ASSEMBLY__
12262-
12263-
12264-/* how to get the current stack pointer from C */
12265-register unsigned long current_stack_pointer asm("esp") __used;
12266-
12267-/* how to get the thread information struct from C */
12268-static inline struct thread_info *current_thread_info(void)
12269-{
12270- return (struct thread_info *)
12271- (current_stack_pointer & ~(THREAD_SIZE - 1));
12272-}
12273-
12274-#else /* !__ASSEMBLY__ */
12275-
12276+#ifdef __ASSEMBLY__
12277 /* how to get the thread information struct from ASM */
12278 #define GET_THREAD_INFO(reg) \
12279- movl $-THREAD_SIZE, reg; \
12280- andl %esp, reg
12281+ mov PER_CPU_VAR(current_tinfo), reg
12282
12283 /* use this one if reg already contains %esp */
12284-#define GET_THREAD_INFO_WITH_ESP(reg) \
12285- andl $-THREAD_SIZE, reg
12286+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12287+#else
12288+/* how to get the thread information struct from C */
12289+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12290+
12291+static __always_inline struct thread_info *current_thread_info(void)
12292+{
12293+ return percpu_read_stable(current_tinfo);
12294+}
12295+#endif
12296+
12297+#ifdef CONFIG_X86_32
12298+
12299+#define STACK_WARN (THREAD_SIZE/8)
12300+/*
12301+ * macros/functions for gaining access to the thread information structure
12302+ *
12303+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12304+ */
12305+#ifndef __ASSEMBLY__
12306+
12307+/* how to get the current stack pointer from C */
12308+register unsigned long current_stack_pointer asm("esp") __used;
12309
12310 #endif
12311
12312 #else /* X86_32 */
12313
12314-#include <asm/percpu.h>
12315-#define KERNEL_STACK_OFFSET (5*8)
12316-
12317 /*
12318 * macros/functions for gaining access to the thread information structure
12319 * preempt_count needs to be 1 initially, until the scheduler is functional.
12320@@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12321 #ifndef __ASSEMBLY__
12322 DECLARE_PER_CPU(unsigned long, kernel_stack);
12323
12324-static inline struct thread_info *current_thread_info(void)
12325-{
12326- struct thread_info *ti;
12327- ti = (void *)(percpu_read_stable(kernel_stack) +
12328- KERNEL_STACK_OFFSET - THREAD_SIZE);
12329- return ti;
12330-}
12331-
12332-#else /* !__ASSEMBLY__ */
12333-
12334-/* how to get the thread information struct from ASM */
12335-#define GET_THREAD_INFO(reg) \
12336- movq PER_CPU_VAR(kernel_stack),reg ; \
12337- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12338-
12339+/* how to get the current stack pointer from C */
12340+register unsigned long current_stack_pointer asm("rsp") __used;
12341 #endif
12342
12343 #endif /* !X86_32 */
12344@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12345 extern void free_thread_info(struct thread_info *ti);
12346 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12347 #define arch_task_cache_init arch_task_cache_init
12348+
12349+#define __HAVE_THREAD_FUNCTIONS
12350+#define task_thread_info(task) (&(task)->tinfo)
12351+#define task_stack_page(task) ((task)->stack)
12352+#define setup_thread_stack(p, org) do {} while (0)
12353+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12354+
12355+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12356+extern struct task_struct *alloc_task_struct(void);
12357+extern void free_task_struct(struct task_struct *);
12358+
12359 #endif
12360 #endif /* _ASM_X86_THREAD_INFO_H */
12361diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12362index 61c5874..8a046e9 100644
12363--- a/arch/x86/include/asm/uaccess.h
12364+++ b/arch/x86/include/asm/uaccess.h
12365@@ -8,12 +8,15 @@
12366 #include <linux/thread_info.h>
12367 #include <linux/prefetch.h>
12368 #include <linux/string.h>
12369+#include <linux/sched.h>
12370 #include <asm/asm.h>
12371 #include <asm/page.h>
12372
12373 #define VERIFY_READ 0
12374 #define VERIFY_WRITE 1
12375
12376+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12377+
12378 /*
12379 * The fs value determines whether argument validity checking should be
12380 * performed or not. If get_fs() == USER_DS, checking is performed, with
12381@@ -29,7 +32,12 @@
12382
12383 #define get_ds() (KERNEL_DS)
12384 #define get_fs() (current_thread_info()->addr_limit)
12385+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12386+void __set_fs(mm_segment_t x);
12387+void set_fs(mm_segment_t x);
12388+#else
12389 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12390+#endif
12391
12392 #define segment_eq(a, b) ((a).seg == (b).seg)
12393
12394@@ -77,7 +85,33 @@
12395 * checks that the pointer is in the user space range - after calling
12396 * this function, memory access functions may still return -EFAULT.
12397 */
12398-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12399+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12400+#define access_ok(type, addr, size) \
12401+({ \
12402+ long __size = size; \
12403+ unsigned long __addr = (unsigned long)addr; \
12404+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12405+ unsigned long __end_ao = __addr + __size - 1; \
12406+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12407+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12408+ while(__addr_ao <= __end_ao) { \
12409+ char __c_ao; \
12410+ __addr_ao += PAGE_SIZE; \
12411+ if (__size > PAGE_SIZE) \
12412+ cond_resched(); \
12413+ if (__get_user(__c_ao, (char __user *)__addr)) \
12414+ break; \
12415+ if (type != VERIFY_WRITE) { \
12416+ __addr = __addr_ao; \
12417+ continue; \
12418+ } \
12419+ if (__put_user(__c_ao, (char __user *)__addr)) \
12420+ break; \
12421+ __addr = __addr_ao; \
12422+ } \
12423+ } \
12424+ __ret_ao; \
12425+})
12426
12427 /*
12428 * The exception table consists of pairs of addresses: the first is the
12429@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12430 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12431 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12432
12433-
12434+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12435+#define __copyuser_seg "gs;"
12436+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12437+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12438+#else
12439+#define __copyuser_seg
12440+#define __COPYUSER_SET_ES
12441+#define __COPYUSER_RESTORE_ES
12442+#endif
12443
12444 #ifdef CONFIG_X86_32
12445 #define __put_user_asm_u64(x, addr, err, errret) \
12446- asm volatile("1: movl %%eax,0(%2)\n" \
12447- "2: movl %%edx,4(%2)\n" \
12448+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12449+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12450 "3:\n" \
12451 ".section .fixup,\"ax\"\n" \
12452 "4: movl %3,%0\n" \
12453@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12454 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12455
12456 #define __put_user_asm_ex_u64(x, addr) \
12457- asm volatile("1: movl %%eax,0(%1)\n" \
12458- "2: movl %%edx,4(%1)\n" \
12459+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12460+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12461 "3:\n" \
12462 _ASM_EXTABLE(1b, 2b - 1b) \
12463 _ASM_EXTABLE(2b, 3b - 2b) \
12464@@ -253,7 +295,7 @@ extern void __put_user_8(void);
12465 __typeof__(*(ptr)) __pu_val; \
12466 __chk_user_ptr(ptr); \
12467 might_fault(); \
12468- __pu_val = x; \
12469+ __pu_val = (x); \
12470 switch (sizeof(*(ptr))) { \
12471 case 1: \
12472 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12473@@ -374,7 +416,7 @@ do { \
12474 } while (0)
12475
12476 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12477- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12478+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12479 "2:\n" \
12480 ".section .fixup,\"ax\"\n" \
12481 "3: mov %3,%0\n" \
12482@@ -382,7 +424,7 @@ do { \
12483 " jmp 2b\n" \
12484 ".previous\n" \
12485 _ASM_EXTABLE(1b, 3b) \
12486- : "=r" (err), ltype(x) \
12487+ : "=r" (err), ltype (x) \
12488 : "m" (__m(addr)), "i" (errret), "0" (err))
12489
12490 #define __get_user_size_ex(x, ptr, size) \
12491@@ -407,7 +449,7 @@ do { \
12492 } while (0)
12493
12494 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12495- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12496+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12497 "2:\n" \
12498 _ASM_EXTABLE(1b, 2b - 1b) \
12499 : ltype(x) : "m" (__m(addr)))
12500@@ -424,13 +466,24 @@ do { \
12501 int __gu_err; \
12502 unsigned long __gu_val; \
12503 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12504- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12505+ (x) = (__typeof__(*(ptr)))__gu_val; \
12506 __gu_err; \
12507 })
12508
12509 /* FIXME: this hack is definitely wrong -AK */
12510 struct __large_struct { unsigned long buf[100]; };
12511-#define __m(x) (*(struct __large_struct __user *)(x))
12512+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12513+#define ____m(x) \
12514+({ \
12515+ unsigned long ____x = (unsigned long)(x); \
12516+ if (____x < PAX_USER_SHADOW_BASE) \
12517+ ____x += PAX_USER_SHADOW_BASE; \
12518+ (void __user *)____x; \
12519+})
12520+#else
12521+#define ____m(x) (x)
12522+#endif
12523+#define __m(x) (*(struct __large_struct __user *)____m(x))
12524
12525 /*
12526 * Tell gcc we read from memory instead of writing: this is because
12527@@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12528 * aliasing issues.
12529 */
12530 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12531- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12532+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12533 "2:\n" \
12534 ".section .fixup,\"ax\"\n" \
12535 "3: mov %3,%0\n" \
12536@@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12537 ".previous\n" \
12538 _ASM_EXTABLE(1b, 3b) \
12539 : "=r"(err) \
12540- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12541+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12542
12543 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12544- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12545+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12546 "2:\n" \
12547 _ASM_EXTABLE(1b, 2b - 1b) \
12548 : : ltype(x), "m" (__m(addr)))
12549@@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12550 * On error, the variable @x is set to zero.
12551 */
12552
12553+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12554+#define __get_user(x, ptr) get_user((x), (ptr))
12555+#else
12556 #define __get_user(x, ptr) \
12557 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12558+#endif
12559
12560 /**
12561 * __put_user: - Write a simple value into user space, with less checking.
12562@@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12563 * Returns zero on success, or -EFAULT on error.
12564 */
12565
12566+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12567+#define __put_user(x, ptr) put_user((x), (ptr))
12568+#else
12569 #define __put_user(x, ptr) \
12570 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12571+#endif
12572
12573 #define __get_user_unaligned __get_user
12574 #define __put_user_unaligned __put_user
12575@@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12576 #define get_user_ex(x, ptr) do { \
12577 unsigned long __gue_val; \
12578 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12579- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12580+ (x) = (__typeof__(*(ptr)))__gue_val; \
12581 } while (0)
12582
12583 #ifdef CONFIG_X86_WP_WORKS_OK
12584@@ -567,6 +628,7 @@ extern struct movsl_mask {
12585
12586 #define ARCH_HAS_NOCACHE_UACCESS 1
12587
12588+#define ARCH_HAS_SORT_EXTABLE
12589 #ifdef CONFIG_X86_32
12590 # include "uaccess_32.h"
12591 #else
12592diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12593index 632fb44..e30e334 100644
12594--- a/arch/x86/include/asm/uaccess_32.h
12595+++ b/arch/x86/include/asm/uaccess_32.h
12596@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12597 static __always_inline unsigned long __must_check
12598 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12599 {
12600+ pax_track_stack();
12601+
12602+ if ((long)n < 0)
12603+ return n;
12604+
12605 if (__builtin_constant_p(n)) {
12606 unsigned long ret;
12607
12608@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12609 return ret;
12610 }
12611 }
12612+ if (!__builtin_constant_p(n))
12613+ check_object_size(from, n, true);
12614 return __copy_to_user_ll(to, from, n);
12615 }
12616
12617@@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12618 __copy_to_user(void __user *to, const void *from, unsigned long n)
12619 {
12620 might_fault();
12621+
12622 return __copy_to_user_inatomic(to, from, n);
12623 }
12624
12625 static __always_inline unsigned long
12626 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12627 {
12628+ if ((long)n < 0)
12629+ return n;
12630+
12631 /* Avoid zeroing the tail if the copy fails..
12632 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12633 * but as the zeroing behaviour is only significant when n is not
12634@@ -138,6 +149,12 @@ static __always_inline unsigned long
12635 __copy_from_user(void *to, const void __user *from, unsigned long n)
12636 {
12637 might_fault();
12638+
12639+ pax_track_stack();
12640+
12641+ if ((long)n < 0)
12642+ return n;
12643+
12644 if (__builtin_constant_p(n)) {
12645 unsigned long ret;
12646
12647@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12648 return ret;
12649 }
12650 }
12651+ if (!__builtin_constant_p(n))
12652+ check_object_size(to, n, false);
12653 return __copy_from_user_ll(to, from, n);
12654 }
12655
12656@@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12657 const void __user *from, unsigned long n)
12658 {
12659 might_fault();
12660+
12661+ if ((long)n < 0)
12662+ return n;
12663+
12664 if (__builtin_constant_p(n)) {
12665 unsigned long ret;
12666
12667@@ -182,14 +205,62 @@ static __always_inline unsigned long
12668 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12669 unsigned long n)
12670 {
12671- return __copy_from_user_ll_nocache_nozero(to, from, n);
12672+ if ((long)n < 0)
12673+ return n;
12674+
12675+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12676+}
12677+
12678+/**
12679+ * copy_to_user: - Copy a block of data into user space.
12680+ * @to: Destination address, in user space.
12681+ * @from: Source address, in kernel space.
12682+ * @n: Number of bytes to copy.
12683+ *
12684+ * Context: User context only. This function may sleep.
12685+ *
12686+ * Copy data from kernel space to user space.
12687+ *
12688+ * Returns number of bytes that could not be copied.
12689+ * On success, this will be zero.
12690+ */
12691+static __always_inline unsigned long __must_check
12692+copy_to_user(void __user *to, const void *from, unsigned long n)
12693+{
12694+ if (access_ok(VERIFY_WRITE, to, n))
12695+ n = __copy_to_user(to, from, n);
12696+ return n;
12697+}
12698+
12699+/**
12700+ * copy_from_user: - Copy a block of data from user space.
12701+ * @to: Destination address, in kernel space.
12702+ * @from: Source address, in user space.
12703+ * @n: Number of bytes to copy.
12704+ *
12705+ * Context: User context only. This function may sleep.
12706+ *
12707+ * Copy data from user space to kernel space.
12708+ *
12709+ * Returns number of bytes that could not be copied.
12710+ * On success, this will be zero.
12711+ *
12712+ * If some data could not be copied, this function will pad the copied
12713+ * data to the requested size using zero bytes.
12714+ */
12715+static __always_inline unsigned long __must_check
12716+copy_from_user(void *to, const void __user *from, unsigned long n)
12717+{
12718+ if (access_ok(VERIFY_READ, from, n))
12719+ n = __copy_from_user(to, from, n);
12720+ else if ((long)n > 0) {
12721+ if (!__builtin_constant_p(n))
12722+ check_object_size(to, n, false);
12723+ memset(to, 0, n);
12724+ }
12725+ return n;
12726 }
12727
12728-unsigned long __must_check copy_to_user(void __user *to,
12729- const void *from, unsigned long n);
12730-unsigned long __must_check copy_from_user(void *to,
12731- const void __user *from,
12732- unsigned long n);
12733 long __must_check strncpy_from_user(char *dst, const char __user *src,
12734 long count);
12735 long __must_check __strncpy_from_user(char *dst,
12736diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12737index db24b21..f595ae7 100644
12738--- a/arch/x86/include/asm/uaccess_64.h
12739+++ b/arch/x86/include/asm/uaccess_64.h
12740@@ -9,6 +9,9 @@
12741 #include <linux/prefetch.h>
12742 #include <linux/lockdep.h>
12743 #include <asm/page.h>
12744+#include <asm/pgtable.h>
12745+
12746+#define set_fs(x) (current_thread_info()->addr_limit = (x))
12747
12748 /*
12749 * Copy To/From Userspace
12750@@ -16,116 +19,205 @@
12751
12752 /* Handles exceptions in both to and from, but doesn't do access_ok */
12753 __must_check unsigned long
12754-copy_user_generic(void *to, const void *from, unsigned len);
12755+copy_user_generic(void *to, const void *from, unsigned long len);
12756
12757 __must_check unsigned long
12758-copy_to_user(void __user *to, const void *from, unsigned len);
12759-__must_check unsigned long
12760-copy_from_user(void *to, const void __user *from, unsigned len);
12761-__must_check unsigned long
12762-copy_in_user(void __user *to, const void __user *from, unsigned len);
12763+copy_in_user(void __user *to, const void __user *from, unsigned long len);
12764
12765 static __always_inline __must_check
12766-int __copy_from_user(void *dst, const void __user *src, unsigned size)
12767+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12768 {
12769- int ret = 0;
12770+ unsigned ret = 0;
12771
12772 might_fault();
12773- if (!__builtin_constant_p(size))
12774- return copy_user_generic(dst, (__force void *)src, size);
12775+
12776+ if (size > INT_MAX)
12777+ return size;
12778+
12779+#ifdef CONFIG_PAX_MEMORY_UDEREF
12780+ if (!__access_ok(VERIFY_READ, src, size))
12781+ return size;
12782+#endif
12783+
12784+ if (!__builtin_constant_p(size)) {
12785+ check_object_size(dst, size, false);
12786+
12787+#ifdef CONFIG_PAX_MEMORY_UDEREF
12788+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12789+ src += PAX_USER_SHADOW_BASE;
12790+#endif
12791+
12792+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12793+ }
12794 switch (size) {
12795- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12796+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12797 ret, "b", "b", "=q", 1);
12798 return ret;
12799- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12800+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12801 ret, "w", "w", "=r", 2);
12802 return ret;
12803- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12804+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12805 ret, "l", "k", "=r", 4);
12806 return ret;
12807- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12808+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12809 ret, "q", "", "=r", 8);
12810 return ret;
12811 case 10:
12812- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12813+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12814 ret, "q", "", "=r", 10);
12815 if (unlikely(ret))
12816 return ret;
12817 __get_user_asm(*(u16 *)(8 + (char *)dst),
12818- (u16 __user *)(8 + (char __user *)src),
12819+ (const u16 __user *)(8 + (const char __user *)src),
12820 ret, "w", "w", "=r", 2);
12821 return ret;
12822 case 16:
12823- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12824+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12825 ret, "q", "", "=r", 16);
12826 if (unlikely(ret))
12827 return ret;
12828 __get_user_asm(*(u64 *)(8 + (char *)dst),
12829- (u64 __user *)(8 + (char __user *)src),
12830+ (const u64 __user *)(8 + (const char __user *)src),
12831 ret, "q", "", "=r", 8);
12832 return ret;
12833 default:
12834- return copy_user_generic(dst, (__force void *)src, size);
12835+
12836+#ifdef CONFIG_PAX_MEMORY_UDEREF
12837+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12838+ src += PAX_USER_SHADOW_BASE;
12839+#endif
12840+
12841+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12842 }
12843 }
12844
12845 static __always_inline __must_check
12846-int __copy_to_user(void __user *dst, const void *src, unsigned size)
12847+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12848 {
12849- int ret = 0;
12850+ unsigned ret = 0;
12851
12852 might_fault();
12853- if (!__builtin_constant_p(size))
12854- return copy_user_generic((__force void *)dst, src, size);
12855+
12856+ pax_track_stack();
12857+
12858+ if (size > INT_MAX)
12859+ return size;
12860+
12861+#ifdef CONFIG_PAX_MEMORY_UDEREF
12862+ if (!__access_ok(VERIFY_WRITE, dst, size))
12863+ return size;
12864+#endif
12865+
12866+ if (!__builtin_constant_p(size)) {
12867+ check_object_size(src, size, true);
12868+
12869+#ifdef CONFIG_PAX_MEMORY_UDEREF
12870+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12871+ dst += PAX_USER_SHADOW_BASE;
12872+#endif
12873+
12874+ return copy_user_generic((__force_kernel void *)dst, src, size);
12875+ }
12876 switch (size) {
12877- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12878+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12879 ret, "b", "b", "iq", 1);
12880 return ret;
12881- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12882+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12883 ret, "w", "w", "ir", 2);
12884 return ret;
12885- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12886+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12887 ret, "l", "k", "ir", 4);
12888 return ret;
12889- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12890+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12891 ret, "q", "", "er", 8);
12892 return ret;
12893 case 10:
12894- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12895+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12896 ret, "q", "", "er", 10);
12897 if (unlikely(ret))
12898 return ret;
12899 asm("":::"memory");
12900- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12901+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12902 ret, "w", "w", "ir", 2);
12903 return ret;
12904 case 16:
12905- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12906+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12907 ret, "q", "", "er", 16);
12908 if (unlikely(ret))
12909 return ret;
12910 asm("":::"memory");
12911- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12912+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12913 ret, "q", "", "er", 8);
12914 return ret;
12915 default:
12916- return copy_user_generic((__force void *)dst, src, size);
12917+
12918+#ifdef CONFIG_PAX_MEMORY_UDEREF
12919+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12920+ dst += PAX_USER_SHADOW_BASE;
12921+#endif
12922+
12923+ return copy_user_generic((__force_kernel void *)dst, src, size);
12924+ }
12925+}
12926+
12927+static __always_inline __must_check
12928+unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
12929+{
12930+ if (access_ok(VERIFY_WRITE, to, len))
12931+ len = __copy_to_user(to, from, len);
12932+ return len;
12933+}
12934+
12935+static __always_inline __must_check
12936+unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
12937+{
12938+ might_fault();
12939+
12940+ if (access_ok(VERIFY_READ, from, len))
12941+ len = __copy_from_user(to, from, len);
12942+ else if (len < INT_MAX) {
12943+ if (!__builtin_constant_p(len))
12944+ check_object_size(to, len, false);
12945+ memset(to, 0, len);
12946 }
12947+ return len;
12948 }
12949
12950 static __always_inline __must_check
12951-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12952+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12953 {
12954- int ret = 0;
12955+ unsigned ret = 0;
12956
12957 might_fault();
12958- if (!__builtin_constant_p(size))
12959- return copy_user_generic((__force void *)dst,
12960- (__force void *)src, size);
12961+
12962+ pax_track_stack();
12963+
12964+ if (size > INT_MAX)
12965+ return size;
12966+
12967+#ifdef CONFIG_PAX_MEMORY_UDEREF
12968+ if (!__access_ok(VERIFY_READ, src, size))
12969+ return size;
12970+ if (!__access_ok(VERIFY_WRITE, dst, size))
12971+ return size;
12972+#endif
12973+
12974+ if (!__builtin_constant_p(size)) {
12975+
12976+#ifdef CONFIG_PAX_MEMORY_UDEREF
12977+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12978+ src += PAX_USER_SHADOW_BASE;
12979+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12980+ dst += PAX_USER_SHADOW_BASE;
12981+#endif
12982+
12983+ return copy_user_generic((__force_kernel void *)dst,
12984+ (__force_kernel const void *)src, size);
12985+ }
12986 switch (size) {
12987 case 1: {
12988 u8 tmp;
12989- __get_user_asm(tmp, (u8 __user *)src,
12990+ __get_user_asm(tmp, (const u8 __user *)src,
12991 ret, "b", "b", "=q", 1);
12992 if (likely(!ret))
12993 __put_user_asm(tmp, (u8 __user *)dst,
12994@@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12995 }
12996 case 2: {
12997 u16 tmp;
12998- __get_user_asm(tmp, (u16 __user *)src,
12999+ __get_user_asm(tmp, (const u16 __user *)src,
13000 ret, "w", "w", "=r", 2);
13001 if (likely(!ret))
13002 __put_user_asm(tmp, (u16 __user *)dst,
13003@@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13004
13005 case 4: {
13006 u32 tmp;
13007- __get_user_asm(tmp, (u32 __user *)src,
13008+ __get_user_asm(tmp, (const u32 __user *)src,
13009 ret, "l", "k", "=r", 4);
13010 if (likely(!ret))
13011 __put_user_asm(tmp, (u32 __user *)dst,
13012@@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13013 }
13014 case 8: {
13015 u64 tmp;
13016- __get_user_asm(tmp, (u64 __user *)src,
13017+ __get_user_asm(tmp, (const u64 __user *)src,
13018 ret, "q", "", "=r", 8);
13019 if (likely(!ret))
13020 __put_user_asm(tmp, (u64 __user *)dst,
13021@@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13022 return ret;
13023 }
13024 default:
13025- return copy_user_generic((__force void *)dst,
13026- (__force void *)src, size);
13027+
13028+#ifdef CONFIG_PAX_MEMORY_UDEREF
13029+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13030+ src += PAX_USER_SHADOW_BASE;
13031+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13032+ dst += PAX_USER_SHADOW_BASE;
13033+#endif
13034+
13035+ return copy_user_generic((__force_kernel void *)dst,
13036+ (__force_kernel const void *)src, size);
13037 }
13038 }
13039
13040@@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
13041 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
13042 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13043
13044-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
13045- unsigned size);
13046+static __must_check __always_inline unsigned long
13047+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13048+{
13049+ pax_track_stack();
13050+
13051+ if (size > INT_MAX)
13052+ return size;
13053+
13054+#ifdef CONFIG_PAX_MEMORY_UDEREF
13055+ if (!__access_ok(VERIFY_READ, src, size))
13056+ return size;
13057
13058-static __must_check __always_inline int
13059-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13060+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13061+ src += PAX_USER_SHADOW_BASE;
13062+#endif
13063+
13064+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13065+}
13066+
13067+static __must_check __always_inline unsigned long
13068+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13069 {
13070- return copy_user_generic((__force void *)dst, src, size);
13071+ if (size > INT_MAX)
13072+ return size;
13073+
13074+#ifdef CONFIG_PAX_MEMORY_UDEREF
13075+ if (!__access_ok(VERIFY_WRITE, dst, size))
13076+ return size;
13077+
13078+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13079+ dst += PAX_USER_SHADOW_BASE;
13080+#endif
13081+
13082+ return copy_user_generic((__force_kernel void *)dst, src, size);
13083 }
13084
13085-extern long __copy_user_nocache(void *dst, const void __user *src,
13086- unsigned size, int zerorest);
13087+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13088+ unsigned long size, int zerorest);
13089
13090-static inline int
13091-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13092+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13093 {
13094 might_sleep();
13095+
13096+ if (size > INT_MAX)
13097+ return size;
13098+
13099+#ifdef CONFIG_PAX_MEMORY_UDEREF
13100+ if (!__access_ok(VERIFY_READ, src, size))
13101+ return size;
13102+#endif
13103+
13104 return __copy_user_nocache(dst, src, size, 1);
13105 }
13106
13107-static inline int
13108-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13109- unsigned size)
13110+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13111+ unsigned long size)
13112 {
13113+ if (size > INT_MAX)
13114+ return size;
13115+
13116+#ifdef CONFIG_PAX_MEMORY_UDEREF
13117+ if (!__access_ok(VERIFY_READ, src, size))
13118+ return size;
13119+#endif
13120+
13121 return __copy_user_nocache(dst, src, size, 0);
13122 }
13123
13124-unsigned long
13125-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13126+extern unsigned long
13127+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
13128
13129 #endif /* _ASM_X86_UACCESS_64_H */
13130diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13131index 9064052..786cfbc 100644
13132--- a/arch/x86/include/asm/vdso.h
13133+++ b/arch/x86/include/asm/vdso.h
13134@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
13135 #define VDSO32_SYMBOL(base, name) \
13136 ({ \
13137 extern const char VDSO32_##name[]; \
13138- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13139+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13140 })
13141 #endif
13142
13143diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
13144index 3d61e20..9507180 100644
13145--- a/arch/x86/include/asm/vgtod.h
13146+++ b/arch/x86/include/asm/vgtod.h
13147@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
13148 int sysctl_enabled;
13149 struct timezone sys_tz;
13150 struct { /* extract of a clocksource struct */
13151+ char name[8];
13152 cycle_t (*vread)(void);
13153 cycle_t cycle_last;
13154 cycle_t mask;
13155diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
13156index 61e08c0..b0da582 100644
13157--- a/arch/x86/include/asm/vmi.h
13158+++ b/arch/x86/include/asm/vmi.h
13159@@ -191,6 +191,7 @@ struct vrom_header {
13160 u8 reserved[96]; /* Reserved for headers */
13161 char vmi_init[8]; /* VMI_Init jump point */
13162 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
13163+ char rom_data[8048]; /* rest of the option ROM */
13164 } __attribute__((packed));
13165
13166 struct pnp_header {
13167diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
13168index c6e0bee..fcb9f74 100644
13169--- a/arch/x86/include/asm/vmi_time.h
13170+++ b/arch/x86/include/asm/vmi_time.h
13171@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
13172 int (*wallclock_updated)(void);
13173 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
13174 void (*cancel_alarm)(u32 flags);
13175-} vmi_timer_ops;
13176+} __no_const vmi_timer_ops;
13177
13178 /* Prototypes */
13179 extern void __init vmi_time_init(void);
13180diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
13181index d0983d2..1f7c9e9 100644
13182--- a/arch/x86/include/asm/vsyscall.h
13183+++ b/arch/x86/include/asm/vsyscall.h
13184@@ -15,9 +15,10 @@ enum vsyscall_num {
13185
13186 #ifdef __KERNEL__
13187 #include <linux/seqlock.h>
13188+#include <linux/getcpu.h>
13189+#include <linux/time.h>
13190
13191 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
13192-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
13193
13194 /* Definitions for CONFIG_GENERIC_TIME definitions */
13195 #define __section_vsyscall_gtod_data __attribute__ \
13196@@ -31,7 +32,6 @@ enum vsyscall_num {
13197 #define VGETCPU_LSL 2
13198
13199 extern int __vgetcpu_mode;
13200-extern volatile unsigned long __jiffies;
13201
13202 /* kernel space (writeable) */
13203 extern int vgetcpu_mode;
13204@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
13205
13206 extern void map_vsyscall(void);
13207
13208+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
13209+extern time_t vtime(time_t *t);
13210+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
13211 #endif /* __KERNEL__ */
13212
13213 #endif /* _ASM_X86_VSYSCALL_H */
13214diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13215index 2c756fd..3377e37 100644
13216--- a/arch/x86/include/asm/x86_init.h
13217+++ b/arch/x86/include/asm/x86_init.h
13218@@ -28,7 +28,7 @@ struct x86_init_mpparse {
13219 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13220 void (*find_smp_config)(unsigned int reserve);
13221 void (*get_smp_config)(unsigned int early);
13222-};
13223+} __no_const;
13224
13225 /**
13226 * struct x86_init_resources - platform specific resource related ops
13227@@ -42,7 +42,7 @@ struct x86_init_resources {
13228 void (*probe_roms)(void);
13229 void (*reserve_resources)(void);
13230 char *(*memory_setup)(void);
13231-};
13232+} __no_const;
13233
13234 /**
13235 * struct x86_init_irqs - platform specific interrupt setup
13236@@ -55,7 +55,7 @@ struct x86_init_irqs {
13237 void (*pre_vector_init)(void);
13238 void (*intr_init)(void);
13239 void (*trap_init)(void);
13240-};
13241+} __no_const;
13242
13243 /**
13244 * struct x86_init_oem - oem platform specific customizing functions
13245@@ -65,7 +65,7 @@ struct x86_init_irqs {
13246 struct x86_init_oem {
13247 void (*arch_setup)(void);
13248 void (*banner)(void);
13249-};
13250+} __no_const;
13251
13252 /**
13253 * struct x86_init_paging - platform specific paging functions
13254@@ -75,7 +75,7 @@ struct x86_init_oem {
13255 struct x86_init_paging {
13256 void (*pagetable_setup_start)(pgd_t *base);
13257 void (*pagetable_setup_done)(pgd_t *base);
13258-};
13259+} __no_const;
13260
13261 /**
13262 * struct x86_init_timers - platform specific timer setup
13263@@ -88,7 +88,7 @@ struct x86_init_timers {
13264 void (*setup_percpu_clockev)(void);
13265 void (*tsc_pre_init)(void);
13266 void (*timer_init)(void);
13267-};
13268+} __no_const;
13269
13270 /**
13271 * struct x86_init_ops - functions for platform specific setup
13272@@ -101,7 +101,7 @@ struct x86_init_ops {
13273 struct x86_init_oem oem;
13274 struct x86_init_paging paging;
13275 struct x86_init_timers timers;
13276-};
13277+} __no_const;
13278
13279 /**
13280 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13281@@ -109,7 +109,7 @@ struct x86_init_ops {
13282 */
13283 struct x86_cpuinit_ops {
13284 void (*setup_percpu_clockev)(void);
13285-};
13286+} __no_const;
13287
13288 /**
13289 * struct x86_platform_ops - platform specific runtime functions
13290@@ -121,7 +121,7 @@ struct x86_platform_ops {
13291 unsigned long (*calibrate_tsc)(void);
13292 unsigned long (*get_wallclock)(void);
13293 int (*set_wallclock)(unsigned long nowtime);
13294-};
13295+} __no_const;
13296
13297 extern struct x86_init_ops x86_init;
13298 extern struct x86_cpuinit_ops x86_cpuinit;
13299diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13300index 727acc1..554f3eb 100644
13301--- a/arch/x86/include/asm/xsave.h
13302+++ b/arch/x86/include/asm/xsave.h
13303@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13304 static inline int xsave_user(struct xsave_struct __user *buf)
13305 {
13306 int err;
13307+
13308+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13309+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13310+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13311+#endif
13312+
13313 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13314 "2:\n"
13315 ".section .fixup,\"ax\"\n"
13316@@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13317 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13318 {
13319 int err;
13320- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13321+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13322 u32 lmask = mask;
13323 u32 hmask = mask >> 32;
13324
13325+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13326+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13327+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13328+#endif
13329+
13330 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13331 "2:\n"
13332 ".section .fixup,\"ax\"\n"
13333diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13334index 6a564ac..9b1340c 100644
13335--- a/arch/x86/kernel/acpi/realmode/Makefile
13336+++ b/arch/x86/kernel/acpi/realmode/Makefile
13337@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13338 $(call cc-option, -fno-stack-protector) \
13339 $(call cc-option, -mpreferred-stack-boundary=2)
13340 KBUILD_CFLAGS += $(call cc-option, -m32)
13341+ifdef CONSTIFY_PLUGIN
13342+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13343+endif
13344 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13345 GCOV_PROFILE := n
13346
13347diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13348index 580b4e2..d4129e4 100644
13349--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13350+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13351@@ -91,6 +91,9 @@ _start:
13352 /* Do any other stuff... */
13353
13354 #ifndef CONFIG_64BIT
13355+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
13356+ call verify_cpu
13357+
13358 /* This could also be done in C code... */
13359 movl pmode_cr3, %eax
13360 movl %eax, %cr3
13361@@ -104,7 +107,7 @@ _start:
13362 movl %eax, %ecx
13363 orl %edx, %ecx
13364 jz 1f
13365- movl $0xc0000080, %ecx
13366+ mov $MSR_EFER, %ecx
13367 wrmsr
13368 1:
13369
13370@@ -114,6 +117,7 @@ _start:
13371 movl pmode_cr0, %eax
13372 movl %eax, %cr0
13373 jmp pmode_return
13374+# include "../../verify_cpu.S"
13375 #else
13376 pushw $0
13377 pushw trampoline_segment
13378diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13379index ca93638..7042f24 100644
13380--- a/arch/x86/kernel/acpi/sleep.c
13381+++ b/arch/x86/kernel/acpi/sleep.c
13382@@ -11,11 +11,12 @@
13383 #include <linux/cpumask.h>
13384 #include <asm/segment.h>
13385 #include <asm/desc.h>
13386+#include <asm/e820.h>
13387
13388 #include "realmode/wakeup.h"
13389 #include "sleep.h"
13390
13391-unsigned long acpi_wakeup_address;
13392+unsigned long acpi_wakeup_address = 0x2000;
13393 unsigned long acpi_realmode_flags;
13394
13395 /* address in low memory of the wakeup routine. */
13396@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13397 #else /* CONFIG_64BIT */
13398 header->trampoline_segment = setup_trampoline() >> 4;
13399 #ifdef CONFIG_SMP
13400- stack_start.sp = temp_stack + sizeof(temp_stack);
13401+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13402+
13403+ pax_open_kernel();
13404 early_gdt_descr.address =
13405 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13406+ pax_close_kernel();
13407+
13408 initial_gs = per_cpu_offset(smp_processor_id());
13409 #endif
13410 initial_code = (unsigned long)wakeup_long64;
13411@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13412 return;
13413 }
13414
13415- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13416-
13417- if (!acpi_realmode) {
13418- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13419- return;
13420- }
13421-
13422- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13423+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13424+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13425 }
13426
13427
13428diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13429index 8ded418..079961e 100644
13430--- a/arch/x86/kernel/acpi/wakeup_32.S
13431+++ b/arch/x86/kernel/acpi/wakeup_32.S
13432@@ -30,13 +30,11 @@ wakeup_pmode_return:
13433 # and restore the stack ... but you need gdt for this to work
13434 movl saved_context_esp, %esp
13435
13436- movl %cs:saved_magic, %eax
13437- cmpl $0x12345678, %eax
13438+ cmpl $0x12345678, saved_magic
13439 jne bogus_magic
13440
13441 # jump to place where we left off
13442- movl saved_eip, %eax
13443- jmp *%eax
13444+ jmp *(saved_eip)
13445
13446 bogus_magic:
13447 jmp bogus_magic
13448diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13449index de7353c..075da5f 100644
13450--- a/arch/x86/kernel/alternative.c
13451+++ b/arch/x86/kernel/alternative.c
13452@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13453
13454 BUG_ON(p->len > MAX_PATCH_LEN);
13455 /* prep the buffer with the original instructions */
13456- memcpy(insnbuf, p->instr, p->len);
13457+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13458 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13459 (unsigned long)p->instr, p->len);
13460
13461@@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13462 if (smp_alt_once)
13463 free_init_pages("SMP alternatives",
13464 (unsigned long)__smp_locks,
13465- (unsigned long)__smp_locks_end);
13466+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13467
13468 restart_nmi();
13469 }
13470@@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13471 * instructions. And on the local CPU you need to be protected again NMI or MCE
13472 * handlers seeing an inconsistent instruction while you patch.
13473 */
13474-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13475+static void *__kprobes text_poke_early(void *addr, const void *opcode,
13476 size_t len)
13477 {
13478 unsigned long flags;
13479 local_irq_save(flags);
13480- memcpy(addr, opcode, len);
13481+
13482+ pax_open_kernel();
13483+ memcpy(ktla_ktva(addr), opcode, len);
13484 sync_core();
13485+ pax_close_kernel();
13486+
13487 local_irq_restore(flags);
13488 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13489 that causes hangs on some VIA CPUs. */
13490@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13491 */
13492 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13493 {
13494- unsigned long flags;
13495- char *vaddr;
13496+ unsigned char *vaddr = ktla_ktva(addr);
13497 struct page *pages[2];
13498- int i;
13499+ size_t i;
13500
13501 if (!core_kernel_text((unsigned long)addr)) {
13502- pages[0] = vmalloc_to_page(addr);
13503- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13504+ pages[0] = vmalloc_to_page(vaddr);
13505+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13506 } else {
13507- pages[0] = virt_to_page(addr);
13508+ pages[0] = virt_to_page(vaddr);
13509 WARN_ON(!PageReserved(pages[0]));
13510- pages[1] = virt_to_page(addr + PAGE_SIZE);
13511+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13512 }
13513 BUG_ON(!pages[0]);
13514- local_irq_save(flags);
13515- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13516- if (pages[1])
13517- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13518- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13519- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13520- clear_fixmap(FIX_TEXT_POKE0);
13521- if (pages[1])
13522- clear_fixmap(FIX_TEXT_POKE1);
13523- local_flush_tlb();
13524- sync_core();
13525- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13526- that causes hangs on some VIA CPUs. */
13527+ text_poke_early(addr, opcode, len);
13528 for (i = 0; i < len; i++)
13529- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13530- local_irq_restore(flags);
13531+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13532 return addr;
13533 }
13534diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13535index 3a44b75..1601800 100644
13536--- a/arch/x86/kernel/amd_iommu.c
13537+++ b/arch/x86/kernel/amd_iommu.c
13538@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13539 }
13540 }
13541
13542-static struct dma_map_ops amd_iommu_dma_ops = {
13543+static const struct dma_map_ops amd_iommu_dma_ops = {
13544 .alloc_coherent = alloc_coherent,
13545 .free_coherent = free_coherent,
13546 .map_page = map_page,
13547diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13548index 1d2d670..8e3f477 100644
13549--- a/arch/x86/kernel/apic/apic.c
13550+++ b/arch/x86/kernel/apic/apic.c
13551@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13552 /*
13553 * Debug level, exported for io_apic.c
13554 */
13555-unsigned int apic_verbosity;
13556+int apic_verbosity;
13557
13558 int pic_mode;
13559
13560@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13561 apic_write(APIC_ESR, 0);
13562 v1 = apic_read(APIC_ESR);
13563 ack_APIC_irq();
13564- atomic_inc(&irq_err_count);
13565+ atomic_inc_unchecked(&irq_err_count);
13566
13567 /*
13568 * Here is what the APIC error bits mean:
13569@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13570 u16 *bios_cpu_apicid;
13571 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13572
13573+ pax_track_stack();
13574+
13575 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13576 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13577
13578diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13579index 8928d97..f799cea 100644
13580--- a/arch/x86/kernel/apic/io_apic.c
13581+++ b/arch/x86/kernel/apic/io_apic.c
13582@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13583 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13584 GFP_ATOMIC);
13585 if (!ioapic_entries)
13586- return 0;
13587+ return NULL;
13588
13589 for (apic = 0; apic < nr_ioapics; apic++) {
13590 ioapic_entries[apic] =
13591@@ -733,7 +733,7 @@ nomem:
13592 kfree(ioapic_entries[apic]);
13593 kfree(ioapic_entries);
13594
13595- return 0;
13596+ return NULL;
13597 }
13598
13599 /*
13600@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13601 }
13602 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13603
13604-void lock_vector_lock(void)
13605+void lock_vector_lock(void) __acquires(vector_lock)
13606 {
13607 /* Used to the online set of cpus does not change
13608 * during assign_irq_vector.
13609@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13610 spin_lock(&vector_lock);
13611 }
13612
13613-void unlock_vector_lock(void)
13614+void unlock_vector_lock(void) __releases(vector_lock)
13615 {
13616 spin_unlock(&vector_lock);
13617 }
13618@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13619 ack_APIC_irq();
13620 }
13621
13622-atomic_t irq_mis_count;
13623+atomic_unchecked_t irq_mis_count;
13624
13625 static void ack_apic_level(unsigned int irq)
13626 {
13627@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13628
13629 /* Tail end of version 0x11 I/O APIC bug workaround */
13630 if (!(v & (1 << (i & 0x1f)))) {
13631- atomic_inc(&irq_mis_count);
13632+ atomic_inc_unchecked(&irq_mis_count);
13633 spin_lock(&ioapic_lock);
13634 __mask_and_edge_IO_APIC_irq(cfg);
13635 __unmask_and_level_IO_APIC_irq(cfg);
13636diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13637index 151ace6..f317474 100644
13638--- a/arch/x86/kernel/apm_32.c
13639+++ b/arch/x86/kernel/apm_32.c
13640@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13641 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13642 * even though they are called in protected mode.
13643 */
13644-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13645+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13646 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13647
13648 static const char driver_version[] = "1.16ac"; /* no spaces */
13649@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13650 BUG_ON(cpu != 0);
13651 gdt = get_cpu_gdt_table(cpu);
13652 save_desc_40 = gdt[0x40 / 8];
13653+
13654+ pax_open_kernel();
13655 gdt[0x40 / 8] = bad_bios_desc;
13656+ pax_close_kernel();
13657
13658 apm_irq_save(flags);
13659 APM_DO_SAVE_SEGS;
13660@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13661 &call->esi);
13662 APM_DO_RESTORE_SEGS;
13663 apm_irq_restore(flags);
13664+
13665+ pax_open_kernel();
13666 gdt[0x40 / 8] = save_desc_40;
13667+ pax_close_kernel();
13668+
13669 put_cpu();
13670
13671 return call->eax & 0xff;
13672@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13673 BUG_ON(cpu != 0);
13674 gdt = get_cpu_gdt_table(cpu);
13675 save_desc_40 = gdt[0x40 / 8];
13676+
13677+ pax_open_kernel();
13678 gdt[0x40 / 8] = bad_bios_desc;
13679+ pax_close_kernel();
13680
13681 apm_irq_save(flags);
13682 APM_DO_SAVE_SEGS;
13683@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13684 &call->eax);
13685 APM_DO_RESTORE_SEGS;
13686 apm_irq_restore(flags);
13687+
13688+ pax_open_kernel();
13689 gdt[0x40 / 8] = save_desc_40;
13690+ pax_close_kernel();
13691+
13692 put_cpu();
13693 return error;
13694 }
13695@@ -975,7 +989,7 @@ recalc:
13696
13697 static void apm_power_off(void)
13698 {
13699- unsigned char po_bios_call[] = {
13700+ const unsigned char po_bios_call[] = {
13701 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13702 0x8e, 0xd0, /* movw ax,ss */
13703 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13704@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13705 * code to that CPU.
13706 */
13707 gdt = get_cpu_gdt_table(0);
13708+
13709+ pax_open_kernel();
13710 set_desc_base(&gdt[APM_CS >> 3],
13711 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13712 set_desc_base(&gdt[APM_CS_16 >> 3],
13713 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13714 set_desc_base(&gdt[APM_DS >> 3],
13715 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13716+ pax_close_kernel();
13717
13718 proc_create("apm", 0, NULL, &apm_file_ops);
13719
13720diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13721index dfdbf64..9b2b6ce 100644
13722--- a/arch/x86/kernel/asm-offsets_32.c
13723+++ b/arch/x86/kernel/asm-offsets_32.c
13724@@ -51,7 +51,6 @@ void foo(void)
13725 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13726 BLANK();
13727
13728- OFFSET(TI_task, thread_info, task);
13729 OFFSET(TI_exec_domain, thread_info, exec_domain);
13730 OFFSET(TI_flags, thread_info, flags);
13731 OFFSET(TI_status, thread_info, status);
13732@@ -60,6 +59,8 @@ void foo(void)
13733 OFFSET(TI_restart_block, thread_info, restart_block);
13734 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13735 OFFSET(TI_cpu, thread_info, cpu);
13736+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13737+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13738 BLANK();
13739
13740 OFFSET(GDS_size, desc_ptr, size);
13741@@ -99,6 +100,7 @@ void foo(void)
13742
13743 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13744 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13745+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13746 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13747 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13748 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13749@@ -115,6 +117,11 @@ void foo(void)
13750 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13751 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13752 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13753+
13754+#ifdef CONFIG_PAX_KERNEXEC
13755+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13756+#endif
13757+
13758 #endif
13759
13760 #ifdef CONFIG_XEN
13761diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13762index 4a6aeed..371de20 100644
13763--- a/arch/x86/kernel/asm-offsets_64.c
13764+++ b/arch/x86/kernel/asm-offsets_64.c
13765@@ -44,6 +44,8 @@ int main(void)
13766 ENTRY(addr_limit);
13767 ENTRY(preempt_count);
13768 ENTRY(status);
13769+ ENTRY(lowest_stack);
13770+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13771 #ifdef CONFIG_IA32_EMULATION
13772 ENTRY(sysenter_return);
13773 #endif
13774@@ -63,6 +65,18 @@ int main(void)
13775 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13776 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13777 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13778+
13779+#ifdef CONFIG_PAX_KERNEXEC
13780+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13781+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13782+#endif
13783+
13784+#ifdef CONFIG_PAX_MEMORY_UDEREF
13785+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13786+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13787+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13788+#endif
13789+
13790 #endif
13791
13792
13793@@ -115,6 +129,7 @@ int main(void)
13794 ENTRY(cr8);
13795 BLANK();
13796 #undef ENTRY
13797+ DEFINE(TSS_size, sizeof(struct tss_struct));
13798 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13799 BLANK();
13800 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13801@@ -130,6 +145,7 @@ int main(void)
13802
13803 BLANK();
13804 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13805+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13806 #ifdef CONFIG_XEN
13807 BLANK();
13808 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13809diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13810index ff502cc..dc5133e 100644
13811--- a/arch/x86/kernel/cpu/Makefile
13812+++ b/arch/x86/kernel/cpu/Makefile
13813@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13814 CFLAGS_REMOVE_common.o = -pg
13815 endif
13816
13817-# Make sure load_percpu_segment has no stackprotector
13818-nostackp := $(call cc-option, -fno-stack-protector)
13819-CFLAGS_common.o := $(nostackp)
13820-
13821 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13822 obj-y += proc.o capflags.o powerflags.o common.o
13823 obj-y += vmware.o hypervisor.o sched.o
13824diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13825index 6e082dc..a0b5f36 100644
13826--- a/arch/x86/kernel/cpu/amd.c
13827+++ b/arch/x86/kernel/cpu/amd.c
13828@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13829 unsigned int size)
13830 {
13831 /* AMD errata T13 (order #21922) */
13832- if ((c->x86 == 6)) {
13833+ if (c->x86 == 6) {
13834 /* Duron Rev A0 */
13835 if (c->x86_model == 3 && c->x86_mask == 0)
13836 size = 64;
13837diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13838index 4e34d10..ba6bc97 100644
13839--- a/arch/x86/kernel/cpu/common.c
13840+++ b/arch/x86/kernel/cpu/common.c
13841@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13842
13843 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13844
13845-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13846-#ifdef CONFIG_X86_64
13847- /*
13848- * We need valid kernel segments for data and code in long mode too
13849- * IRET will check the segment types kkeil 2000/10/28
13850- * Also sysret mandates a special GDT layout
13851- *
13852- * TLS descriptors are currently at a different place compared to i386.
13853- * Hopefully nobody expects them at a fixed place (Wine?)
13854- */
13855- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13856- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13857- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13858- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13859- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13860- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13861-#else
13862- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13863- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13864- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13865- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13866- /*
13867- * Segments used for calling PnP BIOS have byte granularity.
13868- * They code segments and data segments have fixed 64k limits,
13869- * the transfer segment sizes are set at run time.
13870- */
13871- /* 32-bit code */
13872- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13873- /* 16-bit code */
13874- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13875- /* 16-bit data */
13876- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13877- /* 16-bit data */
13878- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13879- /* 16-bit data */
13880- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13881- /*
13882- * The APM segments have byte granularity and their bases
13883- * are set at run time. All have 64k limits.
13884- */
13885- /* 32-bit code */
13886- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13887- /* 16-bit code */
13888- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13889- /* data */
13890- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13891-
13892- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13893- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13894- GDT_STACK_CANARY_INIT
13895-#endif
13896-} };
13897-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13898-
13899 static int __init x86_xsave_setup(char *s)
13900 {
13901 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13902@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13903 {
13904 struct desc_ptr gdt_descr;
13905
13906- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13907+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13908 gdt_descr.size = GDT_SIZE - 1;
13909 load_gdt(&gdt_descr);
13910 /* Reload the per-cpu base */
13911@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13912 /* Filter out anything that depends on CPUID levels we don't have */
13913 filter_cpuid_features(c, true);
13914
13915+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13916+ setup_clear_cpu_cap(X86_FEATURE_SEP);
13917+#endif
13918+
13919 /* If the model name is still unset, do table lookup. */
13920 if (!c->x86_model_id[0]) {
13921 const char *p;
13922@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13923 }
13924 __setup("clearcpuid=", setup_disablecpuid);
13925
13926+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13927+EXPORT_PER_CPU_SYMBOL(current_tinfo);
13928+
13929 #ifdef CONFIG_X86_64
13930 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13931
13932@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13933 EXPORT_PER_CPU_SYMBOL(current_task);
13934
13935 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13936- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13937+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13938 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13939
13940 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13941@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13942 {
13943 memset(regs, 0, sizeof(struct pt_regs));
13944 regs->fs = __KERNEL_PERCPU;
13945- regs->gs = __KERNEL_STACK_CANARY;
13946+ savesegment(gs, regs->gs);
13947
13948 return regs;
13949 }
13950@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13951 int i;
13952
13953 cpu = stack_smp_processor_id();
13954- t = &per_cpu(init_tss, cpu);
13955+ t = init_tss + cpu;
13956 orig_ist = &per_cpu(orig_ist, cpu);
13957
13958 #ifdef CONFIG_NUMA
13959@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13960 switch_to_new_gdt(cpu);
13961 loadsegment(fs, 0);
13962
13963- load_idt((const struct desc_ptr *)&idt_descr);
13964+ load_idt(&idt_descr);
13965
13966 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13967 syscall_init();
13968@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13969 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13970 barrier();
13971
13972- check_efer();
13973 if (cpu != 0)
13974 enable_x2apic();
13975
13976@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13977 {
13978 int cpu = smp_processor_id();
13979 struct task_struct *curr = current;
13980- struct tss_struct *t = &per_cpu(init_tss, cpu);
13981+ struct tss_struct *t = init_tss + cpu;
13982 struct thread_struct *thread = &curr->thread;
13983
13984 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13985diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13986index 6a77cca..4f4fca0 100644
13987--- a/arch/x86/kernel/cpu/intel.c
13988+++ b/arch/x86/kernel/cpu/intel.c
13989@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13990 * Update the IDT descriptor and reload the IDT so that
13991 * it uses the read-only mapped virtual address.
13992 */
13993- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13994+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13995 load_idt(&idt_descr);
13996 }
13997 #endif
13998diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
13999index 417990f..96dc36b 100644
14000--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
14001+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
14002@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14003 return ret;
14004 }
14005
14006-static struct sysfs_ops sysfs_ops = {
14007+static const struct sysfs_ops sysfs_ops = {
14008 .show = show,
14009 .store = store,
14010 };
14011diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14012index 472763d..9831e11 100644
14013--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
14014+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14015@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14016 static int inject_init(void)
14017 {
14018 printk(KERN_INFO "Machine check injector initialized\n");
14019- mce_chrdev_ops.write = mce_write;
14020+ pax_open_kernel();
14021+ *(void **)&mce_chrdev_ops.write = mce_write;
14022+ pax_close_kernel();
14023 register_die_notifier(&mce_raise_nb);
14024 return 0;
14025 }
14026diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14027index 0f16a2b..21740f5 100644
14028--- a/arch/x86/kernel/cpu/mcheck/mce.c
14029+++ b/arch/x86/kernel/cpu/mcheck/mce.c
14030@@ -43,6 +43,7 @@
14031 #include <asm/ipi.h>
14032 #include <asm/mce.h>
14033 #include <asm/msr.h>
14034+#include <asm/local.h>
14035
14036 #include "mce-internal.h"
14037
14038@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
14039 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14040 m->cs, m->ip);
14041
14042- if (m->cs == __KERNEL_CS)
14043+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14044 print_symbol("{%s}", m->ip);
14045 pr_cont("\n");
14046 }
14047@@ -221,10 +222,10 @@ static void print_mce_tail(void)
14048
14049 #define PANIC_TIMEOUT 5 /* 5 seconds */
14050
14051-static atomic_t mce_paniced;
14052+static atomic_unchecked_t mce_paniced;
14053
14054 static int fake_panic;
14055-static atomic_t mce_fake_paniced;
14056+static atomic_unchecked_t mce_fake_paniced;
14057
14058 /* Panic in progress. Enable interrupts and wait for final IPI */
14059 static void wait_for_panic(void)
14060@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14061 /*
14062 * Make sure only one CPU runs in machine check panic
14063 */
14064- if (atomic_inc_return(&mce_paniced) > 1)
14065+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14066 wait_for_panic();
14067 barrier();
14068
14069@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14070 console_verbose();
14071 } else {
14072 /* Don't log too much for fake panic */
14073- if (atomic_inc_return(&mce_fake_paniced) > 1)
14074+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14075 return;
14076 }
14077 print_mce_head();
14078@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
14079 * might have been modified by someone else.
14080 */
14081 rmb();
14082- if (atomic_read(&mce_paniced))
14083+ if (atomic_read_unchecked(&mce_paniced))
14084 wait_for_panic();
14085 if (!monarch_timeout)
14086 goto out;
14087@@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14088 }
14089
14090 /* Call the installed machine check handler for this CPU setup. */
14091-void (*machine_check_vector)(struct pt_regs *, long error_code) =
14092+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14093 unexpected_machine_check;
14094
14095 /*
14096@@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14097 return;
14098 }
14099
14100+ pax_open_kernel();
14101 machine_check_vector = do_machine_check;
14102+ pax_close_kernel();
14103
14104 mce_init();
14105 mce_cpu_features(c);
14106@@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14107 */
14108
14109 static DEFINE_SPINLOCK(mce_state_lock);
14110-static int open_count; /* #times opened */
14111+static local_t open_count; /* #times opened */
14112 static int open_exclu; /* already open exclusive? */
14113
14114 static int mce_open(struct inode *inode, struct file *file)
14115 {
14116 spin_lock(&mce_state_lock);
14117
14118- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
14119+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
14120 spin_unlock(&mce_state_lock);
14121
14122 return -EBUSY;
14123@@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
14124
14125 if (file->f_flags & O_EXCL)
14126 open_exclu = 1;
14127- open_count++;
14128+ local_inc(&open_count);
14129
14130 spin_unlock(&mce_state_lock);
14131
14132@@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
14133 {
14134 spin_lock(&mce_state_lock);
14135
14136- open_count--;
14137+ local_dec(&open_count);
14138 open_exclu = 0;
14139
14140 spin_unlock(&mce_state_lock);
14141@@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
14142 static void mce_reset(void)
14143 {
14144 cpu_missing = 0;
14145- atomic_set(&mce_fake_paniced, 0);
14146+ atomic_set_unchecked(&mce_fake_paniced, 0);
14147 atomic_set(&mce_executing, 0);
14148 atomic_set(&mce_callin, 0);
14149 atomic_set(&global_nwo, 0);
14150diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14151index ef3cd31..9d2f6ab 100644
14152--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
14153+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14154@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14155 return ret;
14156 }
14157
14158-static struct sysfs_ops threshold_ops = {
14159+static const struct sysfs_ops threshold_ops = {
14160 .show = show,
14161 .store = store,
14162 };
14163diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14164index 5c0e653..0882b0a 100644
14165--- a/arch/x86/kernel/cpu/mcheck/p5.c
14166+++ b/arch/x86/kernel/cpu/mcheck/p5.c
14167@@ -12,6 +12,7 @@
14168 #include <asm/system.h>
14169 #include <asm/mce.h>
14170 #include <asm/msr.h>
14171+#include <asm/pgtable.h>
14172
14173 /* By default disabled */
14174 int mce_p5_enabled __read_mostly;
14175@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14176 if (!cpu_has(c, X86_FEATURE_MCE))
14177 return;
14178
14179+ pax_open_kernel();
14180 machine_check_vector = pentium_machine_check;
14181+ pax_close_kernel();
14182 /* Make sure the vector pointer is visible before we enable MCEs: */
14183 wmb();
14184
14185diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14186index 54060f5..c1a7577 100644
14187--- a/arch/x86/kernel/cpu/mcheck/winchip.c
14188+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14189@@ -11,6 +11,7 @@
14190 #include <asm/system.h>
14191 #include <asm/mce.h>
14192 #include <asm/msr.h>
14193+#include <asm/pgtable.h>
14194
14195 /* Machine check handler for WinChip C6: */
14196 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14197@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14198 {
14199 u32 lo, hi;
14200
14201+ pax_open_kernel();
14202 machine_check_vector = winchip_machine_check;
14203+ pax_close_kernel();
14204 /* Make sure the vector pointer is visible before we enable MCEs: */
14205 wmb();
14206
14207diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
14208index 33af141..92ba9cd 100644
14209--- a/arch/x86/kernel/cpu/mtrr/amd.c
14210+++ b/arch/x86/kernel/cpu/mtrr/amd.c
14211@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
14212 return 0;
14213 }
14214
14215-static struct mtrr_ops amd_mtrr_ops = {
14216+static const struct mtrr_ops amd_mtrr_ops = {
14217 .vendor = X86_VENDOR_AMD,
14218 .set = amd_set_mtrr,
14219 .get = amd_get_mtrr,
14220diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
14221index de89f14..316fe3e 100644
14222--- a/arch/x86/kernel/cpu/mtrr/centaur.c
14223+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
14224@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
14225 return 0;
14226 }
14227
14228-static struct mtrr_ops centaur_mtrr_ops = {
14229+static const struct mtrr_ops centaur_mtrr_ops = {
14230 .vendor = X86_VENDOR_CENTAUR,
14231 .set = centaur_set_mcr,
14232 .get = centaur_get_mcr,
14233diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
14234index 228d982..68a3343 100644
14235--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
14236+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
14237@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
14238 post_set();
14239 }
14240
14241-static struct mtrr_ops cyrix_mtrr_ops = {
14242+static const struct mtrr_ops cyrix_mtrr_ops = {
14243 .vendor = X86_VENDOR_CYRIX,
14244 .set_all = cyrix_set_all,
14245 .set = cyrix_set_arr,
14246diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
14247index 55da0c5..4d75584 100644
14248--- a/arch/x86/kernel/cpu/mtrr/generic.c
14249+++ b/arch/x86/kernel/cpu/mtrr/generic.c
14250@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
14251 /*
14252 * Generic structure...
14253 */
14254-struct mtrr_ops generic_mtrr_ops = {
14255+const struct mtrr_ops generic_mtrr_ops = {
14256 .use_intel_if = 1,
14257 .set_all = generic_set_all,
14258 .get = generic_get_mtrr,
14259diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14260index fd60f09..c94ef52 100644
14261--- a/arch/x86/kernel/cpu/mtrr/main.c
14262+++ b/arch/x86/kernel/cpu/mtrr/main.c
14263@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14264 u64 size_or_mask, size_and_mask;
14265 static bool mtrr_aps_delayed_init;
14266
14267-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14268+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14269
14270-struct mtrr_ops *mtrr_if;
14271+const struct mtrr_ops *mtrr_if;
14272
14273 static void set_mtrr(unsigned int reg, unsigned long base,
14274 unsigned long size, mtrr_type type);
14275
14276-void set_mtrr_ops(struct mtrr_ops *ops)
14277+void set_mtrr_ops(const struct mtrr_ops *ops)
14278 {
14279 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14280 mtrr_ops[ops->vendor] = ops;
14281diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14282index a501dee..816c719 100644
14283--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14284+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14285@@ -25,14 +25,14 @@ struct mtrr_ops {
14286 int (*validate_add_page)(unsigned long base, unsigned long size,
14287 unsigned int type);
14288 int (*have_wrcomb)(void);
14289-};
14290+} __do_const;
14291
14292 extern int generic_get_free_region(unsigned long base, unsigned long size,
14293 int replace_reg);
14294 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14295 unsigned int type);
14296
14297-extern struct mtrr_ops generic_mtrr_ops;
14298+extern const struct mtrr_ops generic_mtrr_ops;
14299
14300 extern int positive_have_wrcomb(void);
14301
14302@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14303 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14304 void get_mtrr_state(void);
14305
14306-extern void set_mtrr_ops(struct mtrr_ops *ops);
14307+extern void set_mtrr_ops(const struct mtrr_ops *ops);
14308
14309 extern u64 size_or_mask, size_and_mask;
14310-extern struct mtrr_ops *mtrr_if;
14311+extern const struct mtrr_ops *mtrr_if;
14312
14313 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14314 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14315diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14316index 0ff02ca..fc49a60 100644
14317--- a/arch/x86/kernel/cpu/perf_event.c
14318+++ b/arch/x86/kernel/cpu/perf_event.c
14319@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14320 * count to the generic event atomically:
14321 */
14322 again:
14323- prev_raw_count = atomic64_read(&hwc->prev_count);
14324+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14325 rdmsrl(hwc->event_base + idx, new_raw_count);
14326
14327- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14328+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14329 new_raw_count) != prev_raw_count)
14330 goto again;
14331
14332@@ -741,7 +741,7 @@ again:
14333 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14334 delta >>= shift;
14335
14336- atomic64_add(delta, &event->count);
14337+ atomic64_add_unchecked(delta, &event->count);
14338 atomic64_sub(delta, &hwc->period_left);
14339
14340 return new_raw_count;
14341@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14342 * The hw event starts counting from this event offset,
14343 * mark it to be able to extra future deltas:
14344 */
14345- atomic64_set(&hwc->prev_count, (u64)-left);
14346+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14347
14348 err = checking_wrmsrl(hwc->event_base + idx,
14349 (u64)(-left) & x86_pmu.event_mask);
14350@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14351 break;
14352
14353 callchain_store(entry, frame.return_address);
14354- fp = frame.next_frame;
14355+ fp = (__force const void __user *)frame.next_frame;
14356 }
14357 }
14358
14359diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14360index 898df97..9e82503 100644
14361--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14362+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14363@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14364
14365 /* Interface defining a CPU specific perfctr watchdog */
14366 struct wd_ops {
14367- int (*reserve)(void);
14368- void (*unreserve)(void);
14369- int (*setup)(unsigned nmi_hz);
14370- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14371- void (*stop)(void);
14372+ int (* const reserve)(void);
14373+ void (* const unreserve)(void);
14374+ int (* const setup)(unsigned nmi_hz);
14375+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14376+ void (* const stop)(void);
14377 unsigned perfctr;
14378 unsigned evntsel;
14379 u64 checkbit;
14380@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14381 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14382 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14383
14384+/* cannot be const */
14385 static struct wd_ops intel_arch_wd_ops;
14386
14387 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14388@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14389 return 1;
14390 }
14391
14392+/* cannot be const */
14393 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14394 .reserve = single_msr_reserve,
14395 .unreserve = single_msr_unreserve,
14396diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14397index ff95824..2ffdcb5 100644
14398--- a/arch/x86/kernel/crash.c
14399+++ b/arch/x86/kernel/crash.c
14400@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14401 regs = args->regs;
14402
14403 #ifdef CONFIG_X86_32
14404- if (!user_mode_vm(regs)) {
14405+ if (!user_mode(regs)) {
14406 crash_fixup_ss_esp(&fixed_regs, regs);
14407 regs = &fixed_regs;
14408 }
14409diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14410index 37250fe..bf2ec74 100644
14411--- a/arch/x86/kernel/doublefault_32.c
14412+++ b/arch/x86/kernel/doublefault_32.c
14413@@ -11,7 +11,7 @@
14414
14415 #define DOUBLEFAULT_STACKSIZE (1024)
14416 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14417-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14418+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14419
14420 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14421
14422@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14423 unsigned long gdt, tss;
14424
14425 store_gdt(&gdt_desc);
14426- gdt = gdt_desc.address;
14427+ gdt = (unsigned long)gdt_desc.address;
14428
14429 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14430
14431@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14432 /* 0x2 bit is always set */
14433 .flags = X86_EFLAGS_SF | 0x2,
14434 .sp = STACK_START,
14435- .es = __USER_DS,
14436+ .es = __KERNEL_DS,
14437 .cs = __KERNEL_CS,
14438 .ss = __KERNEL_DS,
14439- .ds = __USER_DS,
14440+ .ds = __KERNEL_DS,
14441 .fs = __KERNEL_PERCPU,
14442
14443 .__cr3 = __pa_nodebug(swapper_pg_dir),
14444diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14445index 2d8a371..4fa6ae6 100644
14446--- a/arch/x86/kernel/dumpstack.c
14447+++ b/arch/x86/kernel/dumpstack.c
14448@@ -2,6 +2,9 @@
14449 * Copyright (C) 1991, 1992 Linus Torvalds
14450 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14451 */
14452+#ifdef CONFIG_GRKERNSEC_HIDESYM
14453+#define __INCLUDED_BY_HIDESYM 1
14454+#endif
14455 #include <linux/kallsyms.h>
14456 #include <linux/kprobes.h>
14457 #include <linux/uaccess.h>
14458@@ -28,7 +31,7 @@ static int die_counter;
14459
14460 void printk_address(unsigned long address, int reliable)
14461 {
14462- printk(" [<%p>] %s%pS\n", (void *) address,
14463+ printk(" [<%p>] %s%pA\n", (void *) address,
14464 reliable ? "" : "? ", (void *) address);
14465 }
14466
14467@@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14468 static void
14469 print_ftrace_graph_addr(unsigned long addr, void *data,
14470 const struct stacktrace_ops *ops,
14471- struct thread_info *tinfo, int *graph)
14472+ struct task_struct *task, int *graph)
14473 {
14474- struct task_struct *task = tinfo->task;
14475 unsigned long ret_addr;
14476 int index = task->curr_ret_stack;
14477
14478@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14479 static inline void
14480 print_ftrace_graph_addr(unsigned long addr, void *data,
14481 const struct stacktrace_ops *ops,
14482- struct thread_info *tinfo, int *graph)
14483+ struct task_struct *task, int *graph)
14484 { }
14485 #endif
14486
14487@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14488 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14489 */
14490
14491-static inline int valid_stack_ptr(struct thread_info *tinfo,
14492- void *p, unsigned int size, void *end)
14493+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14494 {
14495- void *t = tinfo;
14496 if (end) {
14497 if (p < end && p >= (end-THREAD_SIZE))
14498 return 1;
14499@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14500 }
14501
14502 unsigned long
14503-print_context_stack(struct thread_info *tinfo,
14504+print_context_stack(struct task_struct *task, void *stack_start,
14505 unsigned long *stack, unsigned long bp,
14506 const struct stacktrace_ops *ops, void *data,
14507 unsigned long *end, int *graph)
14508 {
14509 struct stack_frame *frame = (struct stack_frame *)bp;
14510
14511- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14512+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14513 unsigned long addr;
14514
14515 addr = *stack;
14516@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14517 } else {
14518 ops->address(data, addr, 0);
14519 }
14520- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14521+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14522 }
14523 stack++;
14524 }
14525@@ -180,7 +180,7 @@ void dump_stack(void)
14526 #endif
14527
14528 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14529- current->pid, current->comm, print_tainted(),
14530+ task_pid_nr(current), current->comm, print_tainted(),
14531 init_utsname()->release,
14532 (int)strcspn(init_utsname()->version, " "),
14533 init_utsname()->version);
14534@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14535 return flags;
14536 }
14537
14538+extern void gr_handle_kernel_exploit(void);
14539+
14540 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14541 {
14542 if (regs && kexec_should_crash(current))
14543@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14544 panic("Fatal exception in interrupt");
14545 if (panic_on_oops)
14546 panic("Fatal exception");
14547- do_exit(signr);
14548+
14549+ gr_handle_kernel_exploit();
14550+
14551+ do_group_exit(signr);
14552 }
14553
14554 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14555@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14556 unsigned long flags = oops_begin();
14557 int sig = SIGSEGV;
14558
14559- if (!user_mode_vm(regs))
14560+ if (!user_mode(regs))
14561 report_bug(regs->ip, regs);
14562
14563 if (__die(str, regs, err))
14564diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14565index 81086c2..13e8b17 100644
14566--- a/arch/x86/kernel/dumpstack.h
14567+++ b/arch/x86/kernel/dumpstack.h
14568@@ -15,7 +15,7 @@
14569 #endif
14570
14571 extern unsigned long
14572-print_context_stack(struct thread_info *tinfo,
14573+print_context_stack(struct task_struct *task, void *stack_start,
14574 unsigned long *stack, unsigned long bp,
14575 const struct stacktrace_ops *ops, void *data,
14576 unsigned long *end, int *graph);
14577diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14578index f7dd2a7..504f53b 100644
14579--- a/arch/x86/kernel/dumpstack_32.c
14580+++ b/arch/x86/kernel/dumpstack_32.c
14581@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14582 #endif
14583
14584 for (;;) {
14585- struct thread_info *context;
14586+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14587+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14588
14589- context = (struct thread_info *)
14590- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14591- bp = print_context_stack(context, stack, bp, ops,
14592- data, NULL, &graph);
14593-
14594- stack = (unsigned long *)context->previous_esp;
14595- if (!stack)
14596+ if (stack_start == task_stack_page(task))
14597 break;
14598+ stack = *(unsigned long **)stack_start;
14599 if (ops->stack(data, "IRQ") < 0)
14600 break;
14601 touch_nmi_watchdog();
14602@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14603 * When in-kernel, we also print out the stack and code at the
14604 * time of the fault..
14605 */
14606- if (!user_mode_vm(regs)) {
14607+ if (!user_mode(regs)) {
14608 unsigned int code_prologue = code_bytes * 43 / 64;
14609 unsigned int code_len = code_bytes;
14610 unsigned char c;
14611 u8 *ip;
14612+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14613
14614 printk(KERN_EMERG "Stack:\n");
14615 show_stack_log_lvl(NULL, regs, &regs->sp,
14616@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14617
14618 printk(KERN_EMERG "Code: ");
14619
14620- ip = (u8 *)regs->ip - code_prologue;
14621+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14622 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14623 /* try starting at IP */
14624- ip = (u8 *)regs->ip;
14625+ ip = (u8 *)regs->ip + cs_base;
14626 code_len = code_len - code_prologue + 1;
14627 }
14628 for (i = 0; i < code_len; i++, ip++) {
14629@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14630 printk(" Bad EIP value.");
14631 break;
14632 }
14633- if (ip == (u8 *)regs->ip)
14634+ if (ip == (u8 *)regs->ip + cs_base)
14635 printk("<%02x> ", c);
14636 else
14637 printk("%02x ", c);
14638@@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14639 printk("\n");
14640 }
14641
14642+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14643+void pax_check_alloca(unsigned long size)
14644+{
14645+ unsigned long sp = (unsigned long)&sp, stack_left;
14646+
14647+ /* all kernel stacks are of the same size */
14648+ stack_left = sp & (THREAD_SIZE - 1);
14649+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14650+}
14651+EXPORT_SYMBOL(pax_check_alloca);
14652+#endif
14653+
14654 int is_valid_bugaddr(unsigned long ip)
14655 {
14656 unsigned short ud2;
14657
14658+ ip = ktla_ktva(ip);
14659 if (ip < PAGE_OFFSET)
14660 return 0;
14661 if (probe_kernel_address((unsigned short *)ip, ud2))
14662diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14663index a071e6b..36cd585 100644
14664--- a/arch/x86/kernel/dumpstack_64.c
14665+++ b/arch/x86/kernel/dumpstack_64.c
14666@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14667 unsigned long *irq_stack_end =
14668 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14669 unsigned used = 0;
14670- struct thread_info *tinfo;
14671 int graph = 0;
14672+ void *stack_start;
14673
14674 if (!task)
14675 task = current;
14676@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14677 * current stack address. If the stacks consist of nested
14678 * exceptions
14679 */
14680- tinfo = task_thread_info(task);
14681 for (;;) {
14682 char *id;
14683 unsigned long *estack_end;
14684+
14685 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14686 &used, &id);
14687
14688@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14689 if (ops->stack(data, id) < 0)
14690 break;
14691
14692- bp = print_context_stack(tinfo, stack, bp, ops,
14693+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14694 data, estack_end, &graph);
14695 ops->stack(data, "<EOE>");
14696 /*
14697@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14698 if (stack >= irq_stack && stack < irq_stack_end) {
14699 if (ops->stack(data, "IRQ") < 0)
14700 break;
14701- bp = print_context_stack(tinfo, stack, bp,
14702+ bp = print_context_stack(task, irq_stack, stack, bp,
14703 ops, data, irq_stack_end, &graph);
14704 /*
14705 * We link to the next stack (which would be
14706@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14707 /*
14708 * This handles the process stack:
14709 */
14710- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14711+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14712+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14713 put_cpu();
14714 }
14715 EXPORT_SYMBOL(dump_trace);
14716@@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14717 return ud2 == 0x0b0f;
14718 }
14719
14720+
14721+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14722+void pax_check_alloca(unsigned long size)
14723+{
14724+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14725+ unsigned cpu, used;
14726+ char *id;
14727+
14728+ /* check the process stack first */
14729+ stack_start = (unsigned long)task_stack_page(current);
14730+ stack_end = stack_start + THREAD_SIZE;
14731+ if (likely(stack_start <= sp && sp < stack_end)) {
14732+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14733+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14734+ return;
14735+ }
14736+
14737+ cpu = get_cpu();
14738+
14739+ /* check the irq stacks */
14740+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14741+ stack_start = stack_end - IRQ_STACK_SIZE;
14742+ if (stack_start <= sp && sp < stack_end) {
14743+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14744+ put_cpu();
14745+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14746+ return;
14747+ }
14748+
14749+ /* check the exception stacks */
14750+ used = 0;
14751+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14752+ stack_start = stack_end - EXCEPTION_STKSZ;
14753+ if (stack_end && stack_start <= sp && sp < stack_end) {
14754+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14755+ put_cpu();
14756+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14757+ return;
14758+ }
14759+
14760+ put_cpu();
14761+
14762+ /* unknown stack */
14763+ BUG();
14764+}
14765+EXPORT_SYMBOL(pax_check_alloca);
14766+#endif
14767diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14768index a89739a..95e0c48 100644
14769--- a/arch/x86/kernel/e820.c
14770+++ b/arch/x86/kernel/e820.c
14771@@ -733,7 +733,7 @@ struct early_res {
14772 };
14773 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14774 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14775- {}
14776+ { 0, 0, {0}, 0 }
14777 };
14778
14779 static int __init find_overlapped_early(u64 start, u64 end)
14780diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14781index b9c830c..1e41a96 100644
14782--- a/arch/x86/kernel/early_printk.c
14783+++ b/arch/x86/kernel/early_printk.c
14784@@ -7,6 +7,7 @@
14785 #include <linux/pci_regs.h>
14786 #include <linux/pci_ids.h>
14787 #include <linux/errno.h>
14788+#include <linux/sched.h>
14789 #include <asm/io.h>
14790 #include <asm/processor.h>
14791 #include <asm/fcntl.h>
14792@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14793 int n;
14794 va_list ap;
14795
14796+ pax_track_stack();
14797+
14798 va_start(ap, fmt);
14799 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14800 early_console->write(early_console, buf, n);
14801diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14802index 5cab48e..b025f9b 100644
14803--- a/arch/x86/kernel/efi_32.c
14804+++ b/arch/x86/kernel/efi_32.c
14805@@ -38,70 +38,56 @@
14806 */
14807
14808 static unsigned long efi_rt_eflags;
14809-static pgd_t efi_bak_pg_dir_pointer[2];
14810+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14811
14812-void efi_call_phys_prelog(void)
14813+void __init efi_call_phys_prelog(void)
14814 {
14815- unsigned long cr4;
14816- unsigned long temp;
14817 struct desc_ptr gdt_descr;
14818
14819+#ifdef CONFIG_PAX_KERNEXEC
14820+ struct desc_struct d;
14821+#endif
14822+
14823 local_irq_save(efi_rt_eflags);
14824
14825- /*
14826- * If I don't have PAE, I should just duplicate two entries in page
14827- * directory. If I have PAE, I just need to duplicate one entry in
14828- * page directory.
14829- */
14830- cr4 = read_cr4_safe();
14831-
14832- if (cr4 & X86_CR4_PAE) {
14833- efi_bak_pg_dir_pointer[0].pgd =
14834- swapper_pg_dir[pgd_index(0)].pgd;
14835- swapper_pg_dir[0].pgd =
14836- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14837- } else {
14838- efi_bak_pg_dir_pointer[0].pgd =
14839- swapper_pg_dir[pgd_index(0)].pgd;
14840- efi_bak_pg_dir_pointer[1].pgd =
14841- swapper_pg_dir[pgd_index(0x400000)].pgd;
14842- swapper_pg_dir[pgd_index(0)].pgd =
14843- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14844- temp = PAGE_OFFSET + 0x400000;
14845- swapper_pg_dir[pgd_index(0x400000)].pgd =
14846- swapper_pg_dir[pgd_index(temp)].pgd;
14847- }
14848+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14849+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14850+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14851
14852 /*
14853 * After the lock is released, the original page table is restored.
14854 */
14855 __flush_tlb_all();
14856
14857+#ifdef CONFIG_PAX_KERNEXEC
14858+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14859+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14860+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14861+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14862+#endif
14863+
14864 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14865 gdt_descr.size = GDT_SIZE - 1;
14866 load_gdt(&gdt_descr);
14867 }
14868
14869-void efi_call_phys_epilog(void)
14870+void __init efi_call_phys_epilog(void)
14871 {
14872- unsigned long cr4;
14873 struct desc_ptr gdt_descr;
14874
14875+#ifdef CONFIG_PAX_KERNEXEC
14876+ struct desc_struct d;
14877+
14878+ memset(&d, 0, sizeof d);
14879+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14880+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14881+#endif
14882+
14883 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14884 gdt_descr.size = GDT_SIZE - 1;
14885 load_gdt(&gdt_descr);
14886
14887- cr4 = read_cr4_safe();
14888-
14889- if (cr4 & X86_CR4_PAE) {
14890- swapper_pg_dir[pgd_index(0)].pgd =
14891- efi_bak_pg_dir_pointer[0].pgd;
14892- } else {
14893- swapper_pg_dir[pgd_index(0)].pgd =
14894- efi_bak_pg_dir_pointer[0].pgd;
14895- swapper_pg_dir[pgd_index(0x400000)].pgd =
14896- efi_bak_pg_dir_pointer[1].pgd;
14897- }
14898+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14899
14900 /*
14901 * After the lock is released, the original page table is restored.
14902diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14903index fbe66e6..c5c0dd2 100644
14904--- a/arch/x86/kernel/efi_stub_32.S
14905+++ b/arch/x86/kernel/efi_stub_32.S
14906@@ -6,7 +6,9 @@
14907 */
14908
14909 #include <linux/linkage.h>
14910+#include <linux/init.h>
14911 #include <asm/page_types.h>
14912+#include <asm/segment.h>
14913
14914 /*
14915 * efi_call_phys(void *, ...) is a function with variable parameters.
14916@@ -20,7 +22,7 @@
14917 * service functions will comply with gcc calling convention, too.
14918 */
14919
14920-.text
14921+__INIT
14922 ENTRY(efi_call_phys)
14923 /*
14924 * 0. The function can only be called in Linux kernel. So CS has been
14925@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14926 * The mapping of lower virtual memory has been created in prelog and
14927 * epilog.
14928 */
14929- movl $1f, %edx
14930- subl $__PAGE_OFFSET, %edx
14931- jmp *%edx
14932+ movl $(__KERNEXEC_EFI_DS), %edx
14933+ mov %edx, %ds
14934+ mov %edx, %es
14935+ mov %edx, %ss
14936+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14937 1:
14938
14939 /*
14940@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14941 * parameter 2, ..., param n. To make things easy, we save the return
14942 * address of efi_call_phys in a global variable.
14943 */
14944- popl %edx
14945- movl %edx, saved_return_addr
14946- /* get the function pointer into ECX*/
14947- popl %ecx
14948- movl %ecx, efi_rt_function_ptr
14949- movl $2f, %edx
14950- subl $__PAGE_OFFSET, %edx
14951- pushl %edx
14952+ popl (saved_return_addr)
14953+ popl (efi_rt_function_ptr)
14954
14955 /*
14956 * 3. Clear PG bit in %CR0.
14957@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14958 /*
14959 * 5. Call the physical function.
14960 */
14961- jmp *%ecx
14962+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
14963
14964-2:
14965 /*
14966 * 6. After EFI runtime service returns, control will return to
14967 * following instruction. We'd better readjust stack pointer first.
14968@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14969 movl %cr0, %edx
14970 orl $0x80000000, %edx
14971 movl %edx, %cr0
14972- jmp 1f
14973-1:
14974+
14975 /*
14976 * 8. Now restore the virtual mode from flat mode by
14977 * adding EIP with PAGE_OFFSET.
14978 */
14979- movl $1f, %edx
14980- jmp *%edx
14981+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14982 1:
14983+ movl $(__KERNEL_DS), %edx
14984+ mov %edx, %ds
14985+ mov %edx, %es
14986+ mov %edx, %ss
14987
14988 /*
14989 * 9. Balance the stack. And because EAX contain the return value,
14990 * we'd better not clobber it.
14991 */
14992- leal efi_rt_function_ptr, %edx
14993- movl (%edx), %ecx
14994- pushl %ecx
14995+ pushl (efi_rt_function_ptr)
14996
14997 /*
14998- * 10. Push the saved return address onto the stack and return.
14999+ * 10. Return to the saved return address.
15000 */
15001- leal saved_return_addr, %edx
15002- movl (%edx), %ecx
15003- pushl %ecx
15004- ret
15005+ jmpl *(saved_return_addr)
15006 ENDPROC(efi_call_phys)
15007 .previous
15008
15009-.data
15010+__INITDATA
15011 saved_return_addr:
15012 .long 0
15013 efi_rt_function_ptr:
15014diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
15015index 4c07cca..2c8427d 100644
15016--- a/arch/x86/kernel/efi_stub_64.S
15017+++ b/arch/x86/kernel/efi_stub_64.S
15018@@ -7,6 +7,7 @@
15019 */
15020
15021 #include <linux/linkage.h>
15022+#include <asm/alternative-asm.h>
15023
15024 #define SAVE_XMM \
15025 mov %rsp, %rax; \
15026@@ -40,6 +41,7 @@ ENTRY(efi_call0)
15027 call *%rdi
15028 addq $32, %rsp
15029 RESTORE_XMM
15030+ pax_force_retaddr 0, 1
15031 ret
15032 ENDPROC(efi_call0)
15033
15034@@ -50,6 +52,7 @@ ENTRY(efi_call1)
15035 call *%rdi
15036 addq $32, %rsp
15037 RESTORE_XMM
15038+ pax_force_retaddr 0, 1
15039 ret
15040 ENDPROC(efi_call1)
15041
15042@@ -60,6 +63,7 @@ ENTRY(efi_call2)
15043 call *%rdi
15044 addq $32, %rsp
15045 RESTORE_XMM
15046+ pax_force_retaddr 0, 1
15047 ret
15048 ENDPROC(efi_call2)
15049
15050@@ -71,6 +75,7 @@ ENTRY(efi_call3)
15051 call *%rdi
15052 addq $32, %rsp
15053 RESTORE_XMM
15054+ pax_force_retaddr 0, 1
15055 ret
15056 ENDPROC(efi_call3)
15057
15058@@ -83,6 +88,7 @@ ENTRY(efi_call4)
15059 call *%rdi
15060 addq $32, %rsp
15061 RESTORE_XMM
15062+ pax_force_retaddr 0, 1
15063 ret
15064 ENDPROC(efi_call4)
15065
15066@@ -96,6 +102,7 @@ ENTRY(efi_call5)
15067 call *%rdi
15068 addq $48, %rsp
15069 RESTORE_XMM
15070+ pax_force_retaddr 0, 1
15071 ret
15072 ENDPROC(efi_call5)
15073
15074@@ -112,5 +119,6 @@ ENTRY(efi_call6)
15075 call *%rdi
15076 addq $48, %rsp
15077 RESTORE_XMM
15078+ pax_force_retaddr 0, 1
15079 ret
15080 ENDPROC(efi_call6)
15081diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
15082index c097e7d..c689cf4 100644
15083--- a/arch/x86/kernel/entry_32.S
15084+++ b/arch/x86/kernel/entry_32.S
15085@@ -185,13 +185,146 @@
15086 /*CFI_REL_OFFSET gs, PT_GS*/
15087 .endm
15088 .macro SET_KERNEL_GS reg
15089+
15090+#ifdef CONFIG_CC_STACKPROTECTOR
15091 movl $(__KERNEL_STACK_CANARY), \reg
15092+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15093+ movl $(__USER_DS), \reg
15094+#else
15095+ xorl \reg, \reg
15096+#endif
15097+
15098 movl \reg, %gs
15099 .endm
15100
15101 #endif /* CONFIG_X86_32_LAZY_GS */
15102
15103-.macro SAVE_ALL
15104+.macro pax_enter_kernel
15105+#ifdef CONFIG_PAX_KERNEXEC
15106+ call pax_enter_kernel
15107+#endif
15108+.endm
15109+
15110+.macro pax_exit_kernel
15111+#ifdef CONFIG_PAX_KERNEXEC
15112+ call pax_exit_kernel
15113+#endif
15114+.endm
15115+
15116+#ifdef CONFIG_PAX_KERNEXEC
15117+ENTRY(pax_enter_kernel)
15118+#ifdef CONFIG_PARAVIRT
15119+ pushl %eax
15120+ pushl %ecx
15121+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
15122+ mov %eax, %esi
15123+#else
15124+ mov %cr0, %esi
15125+#endif
15126+ bts $16, %esi
15127+ jnc 1f
15128+ mov %cs, %esi
15129+ cmp $__KERNEL_CS, %esi
15130+ jz 3f
15131+ ljmp $__KERNEL_CS, $3f
15132+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
15133+2:
15134+#ifdef CONFIG_PARAVIRT
15135+ mov %esi, %eax
15136+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15137+#else
15138+ mov %esi, %cr0
15139+#endif
15140+3:
15141+#ifdef CONFIG_PARAVIRT
15142+ popl %ecx
15143+ popl %eax
15144+#endif
15145+ ret
15146+ENDPROC(pax_enter_kernel)
15147+
15148+ENTRY(pax_exit_kernel)
15149+#ifdef CONFIG_PARAVIRT
15150+ pushl %eax
15151+ pushl %ecx
15152+#endif
15153+ mov %cs, %esi
15154+ cmp $__KERNEXEC_KERNEL_CS, %esi
15155+ jnz 2f
15156+#ifdef CONFIG_PARAVIRT
15157+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15158+ mov %eax, %esi
15159+#else
15160+ mov %cr0, %esi
15161+#endif
15162+ btr $16, %esi
15163+ ljmp $__KERNEL_CS, $1f
15164+1:
15165+#ifdef CONFIG_PARAVIRT
15166+ mov %esi, %eax
15167+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15168+#else
15169+ mov %esi, %cr0
15170+#endif
15171+2:
15172+#ifdef CONFIG_PARAVIRT
15173+ popl %ecx
15174+ popl %eax
15175+#endif
15176+ ret
15177+ENDPROC(pax_exit_kernel)
15178+#endif
15179+
15180+.macro pax_erase_kstack
15181+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15182+ call pax_erase_kstack
15183+#endif
15184+.endm
15185+
15186+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15187+/*
15188+ * ebp: thread_info
15189+ * ecx, edx: can be clobbered
15190+ */
15191+ENTRY(pax_erase_kstack)
15192+ pushl %edi
15193+ pushl %eax
15194+
15195+ mov TI_lowest_stack(%ebp), %edi
15196+ mov $-0xBEEF, %eax
15197+ std
15198+
15199+1: mov %edi, %ecx
15200+ and $THREAD_SIZE_asm - 1, %ecx
15201+ shr $2, %ecx
15202+ repne scasl
15203+ jecxz 2f
15204+
15205+ cmp $2*16, %ecx
15206+ jc 2f
15207+
15208+ mov $2*16, %ecx
15209+ repe scasl
15210+ jecxz 2f
15211+ jne 1b
15212+
15213+2: cld
15214+ mov %esp, %ecx
15215+ sub %edi, %ecx
15216+ shr $2, %ecx
15217+ rep stosl
15218+
15219+ mov TI_task_thread_sp0(%ebp), %edi
15220+ sub $128, %edi
15221+ mov %edi, TI_lowest_stack(%ebp)
15222+
15223+ popl %eax
15224+ popl %edi
15225+ ret
15226+ENDPROC(pax_erase_kstack)
15227+#endif
15228+
15229+.macro __SAVE_ALL _DS
15230 cld
15231 PUSH_GS
15232 pushl %fs
15233@@ -224,7 +357,7 @@
15234 pushl %ebx
15235 CFI_ADJUST_CFA_OFFSET 4
15236 CFI_REL_OFFSET ebx, 0
15237- movl $(__USER_DS), %edx
15238+ movl $\_DS, %edx
15239 movl %edx, %ds
15240 movl %edx, %es
15241 movl $(__KERNEL_PERCPU), %edx
15242@@ -232,6 +365,15 @@
15243 SET_KERNEL_GS %edx
15244 .endm
15245
15246+.macro SAVE_ALL
15247+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15248+ __SAVE_ALL __KERNEL_DS
15249+ pax_enter_kernel
15250+#else
15251+ __SAVE_ALL __USER_DS
15252+#endif
15253+.endm
15254+
15255 .macro RESTORE_INT_REGS
15256 popl %ebx
15257 CFI_ADJUST_CFA_OFFSET -4
15258@@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15259 CFI_ADJUST_CFA_OFFSET -4
15260 jmp syscall_exit
15261 CFI_ENDPROC
15262-END(ret_from_fork)
15263+ENDPROC(ret_from_fork)
15264
15265 /*
15266 * Return to user mode is not as complex as all this looks,
15267@@ -352,7 +494,15 @@ check_userspace:
15268 movb PT_CS(%esp), %al
15269 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15270 cmpl $USER_RPL, %eax
15271+
15272+#ifdef CONFIG_PAX_KERNEXEC
15273+ jae resume_userspace
15274+
15275+ PAX_EXIT_KERNEL
15276+ jmp resume_kernel
15277+#else
15278 jb resume_kernel # not returning to v8086 or userspace
15279+#endif
15280
15281 ENTRY(resume_userspace)
15282 LOCKDEP_SYS_EXIT
15283@@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15284 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15285 # int/exception return?
15286 jne work_pending
15287- jmp restore_all
15288-END(ret_from_exception)
15289+ jmp restore_all_pax
15290+ENDPROC(ret_from_exception)
15291
15292 #ifdef CONFIG_PREEMPT
15293 ENTRY(resume_kernel)
15294@@ -380,7 +530,7 @@ need_resched:
15295 jz restore_all
15296 call preempt_schedule_irq
15297 jmp need_resched
15298-END(resume_kernel)
15299+ENDPROC(resume_kernel)
15300 #endif
15301 CFI_ENDPROC
15302
15303@@ -414,25 +564,36 @@ sysenter_past_esp:
15304 /*CFI_REL_OFFSET cs, 0*/
15305 /*
15306 * Push current_thread_info()->sysenter_return to the stack.
15307- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15308- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15309 */
15310- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15311+ pushl $0
15312 CFI_ADJUST_CFA_OFFSET 4
15313 CFI_REL_OFFSET eip, 0
15314
15315 pushl %eax
15316 CFI_ADJUST_CFA_OFFSET 4
15317 SAVE_ALL
15318+ GET_THREAD_INFO(%ebp)
15319+ movl TI_sysenter_return(%ebp),%ebp
15320+ movl %ebp,PT_EIP(%esp)
15321 ENABLE_INTERRUPTS(CLBR_NONE)
15322
15323 /*
15324 * Load the potential sixth argument from user stack.
15325 * Careful about security.
15326 */
15327+ movl PT_OLDESP(%esp),%ebp
15328+
15329+#ifdef CONFIG_PAX_MEMORY_UDEREF
15330+ mov PT_OLDSS(%esp),%ds
15331+1: movl %ds:(%ebp),%ebp
15332+ push %ss
15333+ pop %ds
15334+#else
15335 cmpl $__PAGE_OFFSET-3,%ebp
15336 jae syscall_fault
15337 1: movl (%ebp),%ebp
15338+#endif
15339+
15340 movl %ebp,PT_EBP(%esp)
15341 .section __ex_table,"a"
15342 .align 4
15343@@ -455,12 +616,24 @@ sysenter_do_call:
15344 testl $_TIF_ALLWORK_MASK, %ecx
15345 jne sysexit_audit
15346 sysenter_exit:
15347+
15348+#ifdef CONFIG_PAX_RANDKSTACK
15349+ pushl_cfi %eax
15350+ movl %esp, %eax
15351+ call pax_randomize_kstack
15352+ popl_cfi %eax
15353+#endif
15354+
15355+ pax_erase_kstack
15356+
15357 /* if something modifies registers it must also disable sysexit */
15358 movl PT_EIP(%esp), %edx
15359 movl PT_OLDESP(%esp), %ecx
15360 xorl %ebp,%ebp
15361 TRACE_IRQS_ON
15362 1: mov PT_FS(%esp), %fs
15363+2: mov PT_DS(%esp), %ds
15364+3: mov PT_ES(%esp), %es
15365 PTGS_TO_GS
15366 ENABLE_INTERRUPTS_SYSEXIT
15367
15368@@ -477,6 +650,9 @@ sysenter_audit:
15369 movl %eax,%edx /* 2nd arg: syscall number */
15370 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15371 call audit_syscall_entry
15372+
15373+ pax_erase_kstack
15374+
15375 pushl %ebx
15376 CFI_ADJUST_CFA_OFFSET 4
15377 movl PT_EAX(%esp),%eax /* reload syscall number */
15378@@ -504,11 +680,17 @@ sysexit_audit:
15379
15380 CFI_ENDPROC
15381 .pushsection .fixup,"ax"
15382-2: movl $0,PT_FS(%esp)
15383+4: movl $0,PT_FS(%esp)
15384+ jmp 1b
15385+5: movl $0,PT_DS(%esp)
15386+ jmp 1b
15387+6: movl $0,PT_ES(%esp)
15388 jmp 1b
15389 .section __ex_table,"a"
15390 .align 4
15391- .long 1b,2b
15392+ .long 1b,4b
15393+ .long 2b,5b
15394+ .long 3b,6b
15395 .popsection
15396 PTGS_TO_GS_EX
15397 ENDPROC(ia32_sysenter_target)
15398@@ -538,6 +720,15 @@ syscall_exit:
15399 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15400 jne syscall_exit_work
15401
15402+restore_all_pax:
15403+
15404+#ifdef CONFIG_PAX_RANDKSTACK
15405+ movl %esp, %eax
15406+ call pax_randomize_kstack
15407+#endif
15408+
15409+ pax_erase_kstack
15410+
15411 restore_all:
15412 TRACE_IRQS_IRET
15413 restore_all_notrace:
15414@@ -602,10 +793,29 @@ ldt_ss:
15415 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15416 mov %dx, %ax /* eax: new kernel esp */
15417 sub %eax, %edx /* offset (low word is 0) */
15418- PER_CPU(gdt_page, %ebx)
15419+#ifdef CONFIG_SMP
15420+ movl PER_CPU_VAR(cpu_number), %ebx
15421+ shll $PAGE_SHIFT_asm, %ebx
15422+ addl $cpu_gdt_table, %ebx
15423+#else
15424+ movl $cpu_gdt_table, %ebx
15425+#endif
15426 shr $16, %edx
15427+
15428+#ifdef CONFIG_PAX_KERNEXEC
15429+ mov %cr0, %esi
15430+ btr $16, %esi
15431+ mov %esi, %cr0
15432+#endif
15433+
15434 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15435 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15436+
15437+#ifdef CONFIG_PAX_KERNEXEC
15438+ bts $16, %esi
15439+ mov %esi, %cr0
15440+#endif
15441+
15442 pushl $__ESPFIX_SS
15443 CFI_ADJUST_CFA_OFFSET 4
15444 push %eax /* new kernel esp */
15445@@ -636,36 +846,30 @@ work_resched:
15446 movl TI_flags(%ebp), %ecx
15447 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15448 # than syscall tracing?
15449- jz restore_all
15450+ jz restore_all_pax
15451 testb $_TIF_NEED_RESCHED, %cl
15452 jnz work_resched
15453
15454 work_notifysig: # deal with pending signals and
15455 # notify-resume requests
15456+ movl %esp, %eax
15457 #ifdef CONFIG_VM86
15458 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15459- movl %esp, %eax
15460- jne work_notifysig_v86 # returning to kernel-space or
15461+ jz 1f # returning to kernel-space or
15462 # vm86-space
15463- xorl %edx, %edx
15464- call do_notify_resume
15465- jmp resume_userspace_sig
15466
15467- ALIGN
15468-work_notifysig_v86:
15469 pushl %ecx # save ti_flags for do_notify_resume
15470 CFI_ADJUST_CFA_OFFSET 4
15471 call save_v86_state # %eax contains pt_regs pointer
15472 popl %ecx
15473 CFI_ADJUST_CFA_OFFSET -4
15474 movl %eax, %esp
15475-#else
15476- movl %esp, %eax
15477+1:
15478 #endif
15479 xorl %edx, %edx
15480 call do_notify_resume
15481 jmp resume_userspace_sig
15482-END(work_pending)
15483+ENDPROC(work_pending)
15484
15485 # perform syscall exit tracing
15486 ALIGN
15487@@ -673,11 +877,14 @@ syscall_trace_entry:
15488 movl $-ENOSYS,PT_EAX(%esp)
15489 movl %esp, %eax
15490 call syscall_trace_enter
15491+
15492+ pax_erase_kstack
15493+
15494 /* What it returned is what we'll actually use. */
15495 cmpl $(nr_syscalls), %eax
15496 jnae syscall_call
15497 jmp syscall_exit
15498-END(syscall_trace_entry)
15499+ENDPROC(syscall_trace_entry)
15500
15501 # perform syscall exit tracing
15502 ALIGN
15503@@ -690,20 +897,24 @@ syscall_exit_work:
15504 movl %esp, %eax
15505 call syscall_trace_leave
15506 jmp resume_userspace
15507-END(syscall_exit_work)
15508+ENDPROC(syscall_exit_work)
15509 CFI_ENDPROC
15510
15511 RING0_INT_FRAME # can't unwind into user space anyway
15512 syscall_fault:
15513+#ifdef CONFIG_PAX_MEMORY_UDEREF
15514+ push %ss
15515+ pop %ds
15516+#endif
15517 GET_THREAD_INFO(%ebp)
15518 movl $-EFAULT,PT_EAX(%esp)
15519 jmp resume_userspace
15520-END(syscall_fault)
15521+ENDPROC(syscall_fault)
15522
15523 syscall_badsys:
15524 movl $-ENOSYS,PT_EAX(%esp)
15525 jmp resume_userspace
15526-END(syscall_badsys)
15527+ENDPROC(syscall_badsys)
15528 CFI_ENDPROC
15529
15530 /*
15531@@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15532 PTREGSCALL(vm86)
15533 PTREGSCALL(vm86old)
15534
15535+ ALIGN;
15536+ENTRY(kernel_execve)
15537+ push %ebp
15538+ sub $PT_OLDSS+4,%esp
15539+ push %edi
15540+ push %ecx
15541+ push %eax
15542+ lea 3*4(%esp),%edi
15543+ mov $PT_OLDSS/4+1,%ecx
15544+ xorl %eax,%eax
15545+ rep stosl
15546+ pop %eax
15547+ pop %ecx
15548+ pop %edi
15549+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15550+ mov %eax,PT_EBX(%esp)
15551+ mov %edx,PT_ECX(%esp)
15552+ mov %ecx,PT_EDX(%esp)
15553+ mov %esp,%eax
15554+ call sys_execve
15555+ GET_THREAD_INFO(%ebp)
15556+ test %eax,%eax
15557+ jz syscall_exit
15558+ add $PT_OLDSS+4,%esp
15559+ pop %ebp
15560+ ret
15561+
15562 .macro FIXUP_ESPFIX_STACK
15563 /*
15564 * Switch back for ESPFIX stack to the normal zerobased stack
15565@@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15566 * normal stack and adjusts ESP with the matching offset.
15567 */
15568 /* fixup the stack */
15569- PER_CPU(gdt_page, %ebx)
15570+#ifdef CONFIG_SMP
15571+ movl PER_CPU_VAR(cpu_number), %ebx
15572+ shll $PAGE_SHIFT_asm, %ebx
15573+ addl $cpu_gdt_table, %ebx
15574+#else
15575+ movl $cpu_gdt_table, %ebx
15576+#endif
15577 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15578 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15579 shl $16, %eax
15580@@ -793,7 +1037,7 @@ vector=vector+1
15581 .endr
15582 2: jmp common_interrupt
15583 .endr
15584-END(irq_entries_start)
15585+ENDPROC(irq_entries_start)
15586
15587 .previous
15588 END(interrupt)
15589@@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15590 CFI_ADJUST_CFA_OFFSET 4
15591 jmp error_code
15592 CFI_ENDPROC
15593-END(coprocessor_error)
15594+ENDPROC(coprocessor_error)
15595
15596 ENTRY(simd_coprocessor_error)
15597 RING0_INT_FRAME
15598@@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15599 CFI_ADJUST_CFA_OFFSET 4
15600 jmp error_code
15601 CFI_ENDPROC
15602-END(simd_coprocessor_error)
15603+ENDPROC(simd_coprocessor_error)
15604
15605 ENTRY(device_not_available)
15606 RING0_INT_FRAME
15607@@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15608 CFI_ADJUST_CFA_OFFSET 4
15609 jmp error_code
15610 CFI_ENDPROC
15611-END(device_not_available)
15612+ENDPROC(device_not_available)
15613
15614 #ifdef CONFIG_PARAVIRT
15615 ENTRY(native_iret)
15616@@ -869,12 +1113,12 @@ ENTRY(native_iret)
15617 .align 4
15618 .long native_iret, iret_exc
15619 .previous
15620-END(native_iret)
15621+ENDPROC(native_iret)
15622
15623 ENTRY(native_irq_enable_sysexit)
15624 sti
15625 sysexit
15626-END(native_irq_enable_sysexit)
15627+ENDPROC(native_irq_enable_sysexit)
15628 #endif
15629
15630 ENTRY(overflow)
15631@@ -885,7 +1129,7 @@ ENTRY(overflow)
15632 CFI_ADJUST_CFA_OFFSET 4
15633 jmp error_code
15634 CFI_ENDPROC
15635-END(overflow)
15636+ENDPROC(overflow)
15637
15638 ENTRY(bounds)
15639 RING0_INT_FRAME
15640@@ -895,7 +1139,7 @@ ENTRY(bounds)
15641 CFI_ADJUST_CFA_OFFSET 4
15642 jmp error_code
15643 CFI_ENDPROC
15644-END(bounds)
15645+ENDPROC(bounds)
15646
15647 ENTRY(invalid_op)
15648 RING0_INT_FRAME
15649@@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15650 CFI_ADJUST_CFA_OFFSET 4
15651 jmp error_code
15652 CFI_ENDPROC
15653-END(invalid_op)
15654+ENDPROC(invalid_op)
15655
15656 ENTRY(coprocessor_segment_overrun)
15657 RING0_INT_FRAME
15658@@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15659 CFI_ADJUST_CFA_OFFSET 4
15660 jmp error_code
15661 CFI_ENDPROC
15662-END(coprocessor_segment_overrun)
15663+ENDPROC(coprocessor_segment_overrun)
15664
15665 ENTRY(invalid_TSS)
15666 RING0_EC_FRAME
15667@@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15668 CFI_ADJUST_CFA_OFFSET 4
15669 jmp error_code
15670 CFI_ENDPROC
15671-END(invalid_TSS)
15672+ENDPROC(invalid_TSS)
15673
15674 ENTRY(segment_not_present)
15675 RING0_EC_FRAME
15676@@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15677 CFI_ADJUST_CFA_OFFSET 4
15678 jmp error_code
15679 CFI_ENDPROC
15680-END(segment_not_present)
15681+ENDPROC(segment_not_present)
15682
15683 ENTRY(stack_segment)
15684 RING0_EC_FRAME
15685@@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15686 CFI_ADJUST_CFA_OFFSET 4
15687 jmp error_code
15688 CFI_ENDPROC
15689-END(stack_segment)
15690+ENDPROC(stack_segment)
15691
15692 ENTRY(alignment_check)
15693 RING0_EC_FRAME
15694@@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15695 CFI_ADJUST_CFA_OFFSET 4
15696 jmp error_code
15697 CFI_ENDPROC
15698-END(alignment_check)
15699+ENDPROC(alignment_check)
15700
15701 ENTRY(divide_error)
15702 RING0_INT_FRAME
15703@@ -957,7 +1201,7 @@ ENTRY(divide_error)
15704 CFI_ADJUST_CFA_OFFSET 4
15705 jmp error_code
15706 CFI_ENDPROC
15707-END(divide_error)
15708+ENDPROC(divide_error)
15709
15710 #ifdef CONFIG_X86_MCE
15711 ENTRY(machine_check)
15712@@ -968,7 +1212,7 @@ ENTRY(machine_check)
15713 CFI_ADJUST_CFA_OFFSET 4
15714 jmp error_code
15715 CFI_ENDPROC
15716-END(machine_check)
15717+ENDPROC(machine_check)
15718 #endif
15719
15720 ENTRY(spurious_interrupt_bug)
15721@@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15722 CFI_ADJUST_CFA_OFFSET 4
15723 jmp error_code
15724 CFI_ENDPROC
15725-END(spurious_interrupt_bug)
15726+ENDPROC(spurious_interrupt_bug)
15727
15728 ENTRY(kernel_thread_helper)
15729 pushl $0 # fake return address for unwinder
15730@@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15731
15732 ENTRY(mcount)
15733 ret
15734-END(mcount)
15735+ENDPROC(mcount)
15736
15737 ENTRY(ftrace_caller)
15738 cmpl $0, function_trace_stop
15739@@ -1124,7 +1368,7 @@ ftrace_graph_call:
15740 .globl ftrace_stub
15741 ftrace_stub:
15742 ret
15743-END(ftrace_caller)
15744+ENDPROC(ftrace_caller)
15745
15746 #else /* ! CONFIG_DYNAMIC_FTRACE */
15747
15748@@ -1160,7 +1404,7 @@ trace:
15749 popl %ecx
15750 popl %eax
15751 jmp ftrace_stub
15752-END(mcount)
15753+ENDPROC(mcount)
15754 #endif /* CONFIG_DYNAMIC_FTRACE */
15755 #endif /* CONFIG_FUNCTION_TRACER */
15756
15757@@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15758 popl %ecx
15759 popl %eax
15760 ret
15761-END(ftrace_graph_caller)
15762+ENDPROC(ftrace_graph_caller)
15763
15764 .globl return_to_handler
15765 return_to_handler:
15766@@ -1198,7 +1442,6 @@ return_to_handler:
15767 ret
15768 #endif
15769
15770-.section .rodata,"a"
15771 #include "syscall_table_32.S"
15772
15773 syscall_table_size=(.-sys_call_table)
15774@@ -1255,15 +1498,18 @@ error_code:
15775 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15776 REG_TO_PTGS %ecx
15777 SET_KERNEL_GS %ecx
15778- movl $(__USER_DS), %ecx
15779+ movl $(__KERNEL_DS), %ecx
15780 movl %ecx, %ds
15781 movl %ecx, %es
15782+
15783+ pax_enter_kernel
15784+
15785 TRACE_IRQS_OFF
15786 movl %esp,%eax # pt_regs pointer
15787 call *%edi
15788 jmp ret_from_exception
15789 CFI_ENDPROC
15790-END(page_fault)
15791+ENDPROC(page_fault)
15792
15793 /*
15794 * Debug traps and NMI can happen at the one SYSENTER instruction
15795@@ -1309,7 +1555,7 @@ debug_stack_correct:
15796 call do_debug
15797 jmp ret_from_exception
15798 CFI_ENDPROC
15799-END(debug)
15800+ENDPROC(debug)
15801
15802 /*
15803 * NMI is doubly nasty. It can happen _while_ we're handling
15804@@ -1351,6 +1597,9 @@ nmi_stack_correct:
15805 xorl %edx,%edx # zero error code
15806 movl %esp,%eax # pt_regs pointer
15807 call do_nmi
15808+
15809+ pax_exit_kernel
15810+
15811 jmp restore_all_notrace
15812 CFI_ENDPROC
15813
15814@@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15815 FIXUP_ESPFIX_STACK # %eax == %esp
15816 xorl %edx,%edx # zero error code
15817 call do_nmi
15818+
15819+ pax_exit_kernel
15820+
15821 RESTORE_REGS
15822 lss 12+4(%esp), %esp # back to espfix stack
15823 CFI_ADJUST_CFA_OFFSET -24
15824 jmp irq_return
15825 CFI_ENDPROC
15826-END(nmi)
15827+ENDPROC(nmi)
15828
15829 ENTRY(int3)
15830 RING0_INT_FRAME
15831@@ -1409,7 +1661,7 @@ ENTRY(int3)
15832 call do_int3
15833 jmp ret_from_exception
15834 CFI_ENDPROC
15835-END(int3)
15836+ENDPROC(int3)
15837
15838 ENTRY(general_protection)
15839 RING0_EC_FRAME
15840@@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15841 CFI_ADJUST_CFA_OFFSET 4
15842 jmp error_code
15843 CFI_ENDPROC
15844-END(general_protection)
15845+ENDPROC(general_protection)
15846
15847 /*
15848 * End of kprobes section
15849diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15850index 34a56a9..87790b4 100644
15851--- a/arch/x86/kernel/entry_64.S
15852+++ b/arch/x86/kernel/entry_64.S
15853@@ -53,6 +53,8 @@
15854 #include <asm/paravirt.h>
15855 #include <asm/ftrace.h>
15856 #include <asm/percpu.h>
15857+#include <asm/pgtable.h>
15858+#include <asm/alternative-asm.h>
15859
15860 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15861 #include <linux/elf-em.h>
15862@@ -64,8 +66,9 @@
15863 #ifdef CONFIG_FUNCTION_TRACER
15864 #ifdef CONFIG_DYNAMIC_FTRACE
15865 ENTRY(mcount)
15866+ pax_force_retaddr
15867 retq
15868-END(mcount)
15869+ENDPROC(mcount)
15870
15871 ENTRY(ftrace_caller)
15872 cmpl $0, function_trace_stop
15873@@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15874 #endif
15875
15876 GLOBAL(ftrace_stub)
15877+ pax_force_retaddr
15878 retq
15879-END(ftrace_caller)
15880+ENDPROC(ftrace_caller)
15881
15882 #else /* ! CONFIG_DYNAMIC_FTRACE */
15883 ENTRY(mcount)
15884@@ -108,6 +112,7 @@ ENTRY(mcount)
15885 #endif
15886
15887 GLOBAL(ftrace_stub)
15888+ pax_force_retaddr
15889 retq
15890
15891 trace:
15892@@ -117,12 +122,13 @@ trace:
15893 movq 8(%rbp), %rsi
15894 subq $MCOUNT_INSN_SIZE, %rdi
15895
15896+ pax_force_fptr ftrace_trace_function
15897 call *ftrace_trace_function
15898
15899 MCOUNT_RESTORE_FRAME
15900
15901 jmp ftrace_stub
15902-END(mcount)
15903+ENDPROC(mcount)
15904 #endif /* CONFIG_DYNAMIC_FTRACE */
15905 #endif /* CONFIG_FUNCTION_TRACER */
15906
15907@@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15908
15909 MCOUNT_RESTORE_FRAME
15910
15911+ pax_force_retaddr
15912 retq
15913-END(ftrace_graph_caller)
15914+ENDPROC(ftrace_graph_caller)
15915
15916 GLOBAL(return_to_handler)
15917 subq $24, %rsp
15918@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15919 movq 8(%rsp), %rdx
15920 movq (%rsp), %rax
15921 addq $16, %rsp
15922+ pax_force_retaddr
15923 retq
15924 #endif
15925
15926@@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15927 ENDPROC(native_usergs_sysret64)
15928 #endif /* CONFIG_PARAVIRT */
15929
15930+ .macro ljmpq sel, off
15931+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15932+ .byte 0x48; ljmp *1234f(%rip)
15933+ .pushsection .rodata
15934+ .align 16
15935+ 1234: .quad \off; .word \sel
15936+ .popsection
15937+#else
15938+ pushq $\sel
15939+ pushq $\off
15940+ lretq
15941+#endif
15942+ .endm
15943+
15944+ .macro pax_enter_kernel
15945+ pax_set_fptr_mask
15946+#ifdef CONFIG_PAX_KERNEXEC
15947+ call pax_enter_kernel
15948+#endif
15949+ .endm
15950+
15951+ .macro pax_exit_kernel
15952+#ifdef CONFIG_PAX_KERNEXEC
15953+ call pax_exit_kernel
15954+#endif
15955+ .endm
15956+
15957+#ifdef CONFIG_PAX_KERNEXEC
15958+ENTRY(pax_enter_kernel)
15959+ pushq %rdi
15960+
15961+#ifdef CONFIG_PARAVIRT
15962+ PV_SAVE_REGS(CLBR_RDI)
15963+#endif
15964+
15965+ GET_CR0_INTO_RDI
15966+ bts $16,%rdi
15967+ jnc 3f
15968+ mov %cs,%edi
15969+ cmp $__KERNEL_CS,%edi
15970+ jnz 2f
15971+1:
15972+
15973+#ifdef CONFIG_PARAVIRT
15974+ PV_RESTORE_REGS(CLBR_RDI)
15975+#endif
15976+
15977+ popq %rdi
15978+ pax_force_retaddr
15979+ retq
15980+
15981+2: ljmpq __KERNEL_CS,1f
15982+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15983+4: SET_RDI_INTO_CR0
15984+ jmp 1b
15985+ENDPROC(pax_enter_kernel)
15986+
15987+ENTRY(pax_exit_kernel)
15988+ pushq %rdi
15989+
15990+#ifdef CONFIG_PARAVIRT
15991+ PV_SAVE_REGS(CLBR_RDI)
15992+#endif
15993+
15994+ mov %cs,%rdi
15995+ cmp $__KERNEXEC_KERNEL_CS,%edi
15996+ jz 2f
15997+1:
15998+
15999+#ifdef CONFIG_PARAVIRT
16000+ PV_RESTORE_REGS(CLBR_RDI);
16001+#endif
16002+
16003+ popq %rdi
16004+ pax_force_retaddr
16005+ retq
16006+
16007+2: GET_CR0_INTO_RDI
16008+ btr $16,%rdi
16009+ ljmpq __KERNEL_CS,3f
16010+3: SET_RDI_INTO_CR0
16011+ jmp 1b
16012+#ifdef CONFIG_PARAVIRT
16013+ PV_RESTORE_REGS(CLBR_RDI);
16014+#endif
16015+
16016+ popq %rdi
16017+ pax_force_retaddr
16018+ retq
16019+ENDPROC(pax_exit_kernel)
16020+#endif
16021+
16022+ .macro pax_enter_kernel_user
16023+ pax_set_fptr_mask
16024+#ifdef CONFIG_PAX_MEMORY_UDEREF
16025+ call pax_enter_kernel_user
16026+#endif
16027+ .endm
16028+
16029+ .macro pax_exit_kernel_user
16030+#ifdef CONFIG_PAX_MEMORY_UDEREF
16031+ call pax_exit_kernel_user
16032+#endif
16033+#ifdef CONFIG_PAX_RANDKSTACK
16034+ pushq %rax
16035+ call pax_randomize_kstack
16036+ popq %rax
16037+#endif
16038+ .endm
16039+
16040+#ifdef CONFIG_PAX_MEMORY_UDEREF
16041+ENTRY(pax_enter_kernel_user)
16042+ pushq %rdi
16043+ pushq %rbx
16044+
16045+#ifdef CONFIG_PARAVIRT
16046+ PV_SAVE_REGS(CLBR_RDI)
16047+#endif
16048+
16049+ GET_CR3_INTO_RDI
16050+ mov %rdi,%rbx
16051+ add $__START_KERNEL_map,%rbx
16052+ sub phys_base(%rip),%rbx
16053+
16054+#ifdef CONFIG_PARAVIRT
16055+ pushq %rdi
16056+ cmpl $0, pv_info+PARAVIRT_enabled
16057+ jz 1f
16058+ i = 0
16059+ .rept USER_PGD_PTRS
16060+ mov i*8(%rbx),%rsi
16061+ mov $0,%sil
16062+ lea i*8(%rbx),%rdi
16063+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16064+ i = i + 1
16065+ .endr
16066+ jmp 2f
16067+1:
16068+#endif
16069+
16070+ i = 0
16071+ .rept USER_PGD_PTRS
16072+ movb $0,i*8(%rbx)
16073+ i = i + 1
16074+ .endr
16075+
16076+#ifdef CONFIG_PARAVIRT
16077+2: popq %rdi
16078+#endif
16079+ SET_RDI_INTO_CR3
16080+
16081+#ifdef CONFIG_PAX_KERNEXEC
16082+ GET_CR0_INTO_RDI
16083+ bts $16,%rdi
16084+ SET_RDI_INTO_CR0
16085+#endif
16086+
16087+#ifdef CONFIG_PARAVIRT
16088+ PV_RESTORE_REGS(CLBR_RDI)
16089+#endif
16090+
16091+ popq %rbx
16092+ popq %rdi
16093+ pax_force_retaddr
16094+ retq
16095+ENDPROC(pax_enter_kernel_user)
16096+
16097+ENTRY(pax_exit_kernel_user)
16098+ push %rdi
16099+
16100+#ifdef CONFIG_PARAVIRT
16101+ pushq %rbx
16102+ PV_SAVE_REGS(CLBR_RDI)
16103+#endif
16104+
16105+#ifdef CONFIG_PAX_KERNEXEC
16106+ GET_CR0_INTO_RDI
16107+ btr $16,%rdi
16108+ SET_RDI_INTO_CR0
16109+#endif
16110+
16111+ GET_CR3_INTO_RDI
16112+ add $__START_KERNEL_map,%rdi
16113+ sub phys_base(%rip),%rdi
16114+
16115+#ifdef CONFIG_PARAVIRT
16116+ cmpl $0, pv_info+PARAVIRT_enabled
16117+ jz 1f
16118+ mov %rdi,%rbx
16119+ i = 0
16120+ .rept USER_PGD_PTRS
16121+ mov i*8(%rbx),%rsi
16122+ mov $0x67,%sil
16123+ lea i*8(%rbx),%rdi
16124+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16125+ i = i + 1
16126+ .endr
16127+ jmp 2f
16128+1:
16129+#endif
16130+
16131+ i = 0
16132+ .rept USER_PGD_PTRS
16133+ movb $0x67,i*8(%rdi)
16134+ i = i + 1
16135+ .endr
16136+
16137+#ifdef CONFIG_PARAVIRT
16138+2: PV_RESTORE_REGS(CLBR_RDI)
16139+ popq %rbx
16140+#endif
16141+
16142+ popq %rdi
16143+ pax_force_retaddr
16144+ retq
16145+ENDPROC(pax_exit_kernel_user)
16146+#endif
16147+
16148+.macro pax_erase_kstack
16149+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16150+ call pax_erase_kstack
16151+#endif
16152+.endm
16153+
16154+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16155+/*
16156+ * r11: thread_info
16157+ * rcx, rdx: can be clobbered
16158+ */
16159+ENTRY(pax_erase_kstack)
16160+ pushq %rdi
16161+ pushq %rax
16162+ pushq %r11
16163+
16164+ GET_THREAD_INFO(%r11)
16165+ mov TI_lowest_stack(%r11), %rdi
16166+ mov $-0xBEEF, %rax
16167+ std
16168+
16169+1: mov %edi, %ecx
16170+ and $THREAD_SIZE_asm - 1, %ecx
16171+ shr $3, %ecx
16172+ repne scasq
16173+ jecxz 2f
16174+
16175+ cmp $2*8, %ecx
16176+ jc 2f
16177+
16178+ mov $2*8, %ecx
16179+ repe scasq
16180+ jecxz 2f
16181+ jne 1b
16182+
16183+2: cld
16184+ mov %esp, %ecx
16185+ sub %edi, %ecx
16186+
16187+ cmp $THREAD_SIZE_asm, %rcx
16188+ jb 3f
16189+ ud2
16190+3:
16191+
16192+ shr $3, %ecx
16193+ rep stosq
16194+
16195+ mov TI_task_thread_sp0(%r11), %rdi
16196+ sub $256, %rdi
16197+ mov %rdi, TI_lowest_stack(%r11)
16198+
16199+ popq %r11
16200+ popq %rax
16201+ popq %rdi
16202+ pax_force_retaddr
16203+ ret
16204+ENDPROC(pax_erase_kstack)
16205+#endif
16206
16207 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16208 #ifdef CONFIG_TRACE_IRQFLAGS
16209@@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
16210 .endm
16211
16212 .macro UNFAKE_STACK_FRAME
16213- addq $8*6, %rsp
16214- CFI_ADJUST_CFA_OFFSET -(6*8)
16215+ addq $8*6 + ARG_SKIP, %rsp
16216+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16217 .endm
16218
16219 /*
16220@@ -317,7 +601,7 @@ ENTRY(save_args)
16221 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
16222 movq_cfi rbp, 8 /* push %rbp */
16223 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
16224- testl $3, CS(%rdi)
16225+ testb $3, CS(%rdi)
16226 je 1f
16227 SWAPGS
16228 /*
16229@@ -337,9 +621,10 @@ ENTRY(save_args)
16230 * We entered an interrupt context - irqs are off:
16231 */
16232 2: TRACE_IRQS_OFF
16233+ pax_force_retaddr
16234 ret
16235 CFI_ENDPROC
16236-END(save_args)
16237+ENDPROC(save_args)
16238
16239 ENTRY(save_rest)
16240 PARTIAL_FRAME 1 REST_SKIP+8
16241@@ -352,9 +637,10 @@ ENTRY(save_rest)
16242 movq_cfi r15, R15+16
16243 movq %r11, 8(%rsp) /* return address */
16244 FIXUP_TOP_OF_STACK %r11, 16
16245+ pax_force_retaddr
16246 ret
16247 CFI_ENDPROC
16248-END(save_rest)
16249+ENDPROC(save_rest)
16250
16251 /* save complete stack frame */
16252 .pushsection .kprobes.text, "ax"
16253@@ -383,9 +669,10 @@ ENTRY(save_paranoid)
16254 js 1f /* negative -> in kernel */
16255 SWAPGS
16256 xorl %ebx,%ebx
16257-1: ret
16258+1: pax_force_retaddr_bts
16259+ ret
16260 CFI_ENDPROC
16261-END(save_paranoid)
16262+ENDPROC(save_paranoid)
16263 .popsection
16264
16265 /*
16266@@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16267
16268 RESTORE_REST
16269
16270- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16271+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16272 je int_ret_from_sys_call
16273
16274 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16275@@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16276 jmp ret_from_sys_call # go to the SYSRET fastpath
16277
16278 CFI_ENDPROC
16279-END(ret_from_fork)
16280+ENDPROC(ret_from_fork)
16281
16282 /*
16283 * System call entry. Upto 6 arguments in registers are supported.
16284@@ -455,7 +742,7 @@ END(ret_from_fork)
16285 ENTRY(system_call)
16286 CFI_STARTPROC simple
16287 CFI_SIGNAL_FRAME
16288- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16289+ CFI_DEF_CFA rsp,0
16290 CFI_REGISTER rip,rcx
16291 /*CFI_REGISTER rflags,r11*/
16292 SWAPGS_UNSAFE_STACK
16293@@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16294
16295 movq %rsp,PER_CPU_VAR(old_rsp)
16296 movq PER_CPU_VAR(kernel_stack),%rsp
16297+ SAVE_ARGS 8*6,1
16298+ pax_enter_kernel_user
16299 /*
16300 * No need to follow this irqs off/on section - it's straight
16301 * and short:
16302 */
16303 ENABLE_INTERRUPTS(CLBR_NONE)
16304- SAVE_ARGS 8,1
16305 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16306 movq %rcx,RIP-ARGOFFSET(%rsp)
16307 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16308@@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16309 system_call_fastpath:
16310 cmpq $__NR_syscall_max,%rax
16311 ja badsys
16312- movq %r10,%rcx
16313+ movq R10-ARGOFFSET(%rsp),%rcx
16314 call *sys_call_table(,%rax,8) # XXX: rip relative
16315 movq %rax,RAX-ARGOFFSET(%rsp)
16316 /*
16317@@ -502,6 +790,8 @@ sysret_check:
16318 andl %edi,%edx
16319 jnz sysret_careful
16320 CFI_REMEMBER_STATE
16321+ pax_exit_kernel_user
16322+ pax_erase_kstack
16323 /*
16324 * sysretq will re-enable interrupts:
16325 */
16326@@ -555,14 +845,18 @@ badsys:
16327 * jump back to the normal fast path.
16328 */
16329 auditsys:
16330- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16331+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16332 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16333 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16334 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16335 movq %rax,%rsi /* 2nd arg: syscall number */
16336 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16337 call audit_syscall_entry
16338+
16339+ pax_erase_kstack
16340+
16341 LOAD_ARGS 0 /* reload call-clobbered registers */
16342+ pax_set_fptr_mask
16343 jmp system_call_fastpath
16344
16345 /*
16346@@ -592,16 +886,20 @@ tracesys:
16347 FIXUP_TOP_OF_STACK %rdi
16348 movq %rsp,%rdi
16349 call syscall_trace_enter
16350+
16351+ pax_erase_kstack
16352+
16353 /*
16354 * Reload arg registers from stack in case ptrace changed them.
16355 * We don't reload %rax because syscall_trace_enter() returned
16356 * the value it wants us to use in the table lookup.
16357 */
16358 LOAD_ARGS ARGOFFSET, 1
16359+ pax_set_fptr_mask
16360 RESTORE_REST
16361 cmpq $__NR_syscall_max,%rax
16362 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16363- movq %r10,%rcx /* fixup for C */
16364+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16365 call *sys_call_table(,%rax,8)
16366 movq %rax,RAX-ARGOFFSET(%rsp)
16367 /* Use IRET because user could have changed frame */
16368@@ -613,7 +911,7 @@ tracesys:
16369 GLOBAL(int_ret_from_sys_call)
16370 DISABLE_INTERRUPTS(CLBR_NONE)
16371 TRACE_IRQS_OFF
16372- testl $3,CS-ARGOFFSET(%rsp)
16373+ testb $3,CS-ARGOFFSET(%rsp)
16374 je retint_restore_args
16375 movl $_TIF_ALLWORK_MASK,%edi
16376 /* edi: mask to check */
16377@@ -624,6 +922,7 @@ GLOBAL(int_with_check)
16378 andl %edi,%edx
16379 jnz int_careful
16380 andl $~TS_COMPAT,TI_status(%rcx)
16381+ pax_erase_kstack
16382 jmp retint_swapgs
16383
16384 /* Either reschedule or signal or syscall exit tracking needed. */
16385@@ -674,7 +973,7 @@ int_restore_rest:
16386 TRACE_IRQS_OFF
16387 jmp int_with_check
16388 CFI_ENDPROC
16389-END(system_call)
16390+ENDPROC(system_call)
16391
16392 /*
16393 * Certain special system calls that need to save a complete full stack frame.
16394@@ -690,7 +989,7 @@ ENTRY(\label)
16395 call \func
16396 jmp ptregscall_common
16397 CFI_ENDPROC
16398-END(\label)
16399+ENDPROC(\label)
16400 .endm
16401
16402 PTREGSCALL stub_clone, sys_clone, %r8
16403@@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
16404 movq_cfi_restore R12+8, r12
16405 movq_cfi_restore RBP+8, rbp
16406 movq_cfi_restore RBX+8, rbx
16407+ pax_force_retaddr
16408 ret $REST_SKIP /* pop extended registers */
16409 CFI_ENDPROC
16410-END(ptregscall_common)
16411+ENDPROC(ptregscall_common)
16412
16413 ENTRY(stub_execve)
16414 CFI_STARTPROC
16415@@ -726,7 +1026,7 @@ ENTRY(stub_execve)
16416 RESTORE_REST
16417 jmp int_ret_from_sys_call
16418 CFI_ENDPROC
16419-END(stub_execve)
16420+ENDPROC(stub_execve)
16421
16422 /*
16423 * sigreturn is special because it needs to restore all registers on return.
16424@@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16425 RESTORE_REST
16426 jmp int_ret_from_sys_call
16427 CFI_ENDPROC
16428-END(stub_rt_sigreturn)
16429+ENDPROC(stub_rt_sigreturn)
16430
16431 /*
16432 * Build the entry stubs and pointer table with some assembler magic.
16433@@ -780,7 +1080,7 @@ vector=vector+1
16434 2: jmp common_interrupt
16435 .endr
16436 CFI_ENDPROC
16437-END(irq_entries_start)
16438+ENDPROC(irq_entries_start)
16439
16440 .previous
16441 END(interrupt)
16442@@ -800,6 +1100,16 @@ END(interrupt)
16443 CFI_ADJUST_CFA_OFFSET 10*8
16444 call save_args
16445 PARTIAL_FRAME 0
16446+#ifdef CONFIG_PAX_MEMORY_UDEREF
16447+ testb $3, CS(%rdi)
16448+ jnz 1f
16449+ pax_enter_kernel
16450+ jmp 2f
16451+1: pax_enter_kernel_user
16452+2:
16453+#else
16454+ pax_enter_kernel
16455+#endif
16456 call \func
16457 .endm
16458
16459@@ -822,7 +1132,7 @@ ret_from_intr:
16460 CFI_ADJUST_CFA_OFFSET -8
16461 exit_intr:
16462 GET_THREAD_INFO(%rcx)
16463- testl $3,CS-ARGOFFSET(%rsp)
16464+ testb $3,CS-ARGOFFSET(%rsp)
16465 je retint_kernel
16466
16467 /* Interrupt came from user space */
16468@@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
16469 * The iretq could re-enable interrupts:
16470 */
16471 DISABLE_INTERRUPTS(CLBR_ANY)
16472+ pax_exit_kernel_user
16473 TRACE_IRQS_IRETQ
16474 SWAPGS
16475 jmp restore_args
16476
16477 retint_restore_args: /* return to kernel space */
16478 DISABLE_INTERRUPTS(CLBR_ANY)
16479+ pax_exit_kernel
16480+ pax_force_retaddr RIP-ARGOFFSET
16481 /*
16482 * The iretq could re-enable interrupts:
16483 */
16484@@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16485 #endif
16486
16487 CFI_ENDPROC
16488-END(common_interrupt)
16489+ENDPROC(common_interrupt)
16490
16491 /*
16492 * APIC interrupts.
16493@@ -953,7 +1266,7 @@ ENTRY(\sym)
16494 interrupt \do_sym
16495 jmp ret_from_intr
16496 CFI_ENDPROC
16497-END(\sym)
16498+ENDPROC(\sym)
16499 .endm
16500
16501 #ifdef CONFIG_SMP
16502@@ -1032,12 +1345,22 @@ ENTRY(\sym)
16503 CFI_ADJUST_CFA_OFFSET 15*8
16504 call error_entry
16505 DEFAULT_FRAME 0
16506+#ifdef CONFIG_PAX_MEMORY_UDEREF
16507+ testb $3, CS(%rsp)
16508+ jnz 1f
16509+ pax_enter_kernel
16510+ jmp 2f
16511+1: pax_enter_kernel_user
16512+2:
16513+#else
16514+ pax_enter_kernel
16515+#endif
16516 movq %rsp,%rdi /* pt_regs pointer */
16517 xorl %esi,%esi /* no error code */
16518 call \do_sym
16519 jmp error_exit /* %ebx: no swapgs flag */
16520 CFI_ENDPROC
16521-END(\sym)
16522+ENDPROC(\sym)
16523 .endm
16524
16525 .macro paranoidzeroentry sym do_sym
16526@@ -1049,12 +1372,22 @@ ENTRY(\sym)
16527 subq $15*8, %rsp
16528 call save_paranoid
16529 TRACE_IRQS_OFF
16530+#ifdef CONFIG_PAX_MEMORY_UDEREF
16531+ testb $3, CS(%rsp)
16532+ jnz 1f
16533+ pax_enter_kernel
16534+ jmp 2f
16535+1: pax_enter_kernel_user
16536+2:
16537+#else
16538+ pax_enter_kernel
16539+#endif
16540 movq %rsp,%rdi /* pt_regs pointer */
16541 xorl %esi,%esi /* no error code */
16542 call \do_sym
16543 jmp paranoid_exit /* %ebx: no swapgs flag */
16544 CFI_ENDPROC
16545-END(\sym)
16546+ENDPROC(\sym)
16547 .endm
16548
16549 .macro paranoidzeroentry_ist sym do_sym ist
16550@@ -1066,15 +1399,30 @@ ENTRY(\sym)
16551 subq $15*8, %rsp
16552 call save_paranoid
16553 TRACE_IRQS_OFF
16554+#ifdef CONFIG_PAX_MEMORY_UDEREF
16555+ testb $3, CS(%rsp)
16556+ jnz 1f
16557+ pax_enter_kernel
16558+ jmp 2f
16559+1: pax_enter_kernel_user
16560+2:
16561+#else
16562+ pax_enter_kernel
16563+#endif
16564 movq %rsp,%rdi /* pt_regs pointer */
16565 xorl %esi,%esi /* no error code */
16566- PER_CPU(init_tss, %rbp)
16567+#ifdef CONFIG_SMP
16568+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16569+ lea init_tss(%rbp), %rbp
16570+#else
16571+ lea init_tss(%rip), %rbp
16572+#endif
16573 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16574 call \do_sym
16575 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16576 jmp paranoid_exit /* %ebx: no swapgs flag */
16577 CFI_ENDPROC
16578-END(\sym)
16579+ENDPROC(\sym)
16580 .endm
16581
16582 .macro errorentry sym do_sym
16583@@ -1085,13 +1433,23 @@ ENTRY(\sym)
16584 CFI_ADJUST_CFA_OFFSET 15*8
16585 call error_entry
16586 DEFAULT_FRAME 0
16587+#ifdef CONFIG_PAX_MEMORY_UDEREF
16588+ testb $3, CS(%rsp)
16589+ jnz 1f
16590+ pax_enter_kernel
16591+ jmp 2f
16592+1: pax_enter_kernel_user
16593+2:
16594+#else
16595+ pax_enter_kernel
16596+#endif
16597 movq %rsp,%rdi /* pt_regs pointer */
16598 movq ORIG_RAX(%rsp),%rsi /* get error code */
16599 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16600 call \do_sym
16601 jmp error_exit /* %ebx: no swapgs flag */
16602 CFI_ENDPROC
16603-END(\sym)
16604+ENDPROC(\sym)
16605 .endm
16606
16607 /* error code is on the stack already */
16608@@ -1104,13 +1462,23 @@ ENTRY(\sym)
16609 call save_paranoid
16610 DEFAULT_FRAME 0
16611 TRACE_IRQS_OFF
16612+#ifdef CONFIG_PAX_MEMORY_UDEREF
16613+ testb $3, CS(%rsp)
16614+ jnz 1f
16615+ pax_enter_kernel
16616+ jmp 2f
16617+1: pax_enter_kernel_user
16618+2:
16619+#else
16620+ pax_enter_kernel
16621+#endif
16622 movq %rsp,%rdi /* pt_regs pointer */
16623 movq ORIG_RAX(%rsp),%rsi /* get error code */
16624 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16625 call \do_sym
16626 jmp paranoid_exit /* %ebx: no swapgs flag */
16627 CFI_ENDPROC
16628-END(\sym)
16629+ENDPROC(\sym)
16630 .endm
16631
16632 zeroentry divide_error do_divide_error
16633@@ -1141,9 +1509,10 @@ gs_change:
16634 SWAPGS
16635 popf
16636 CFI_ADJUST_CFA_OFFSET -8
16637+ pax_force_retaddr
16638 ret
16639 CFI_ENDPROC
16640-END(native_load_gs_index)
16641+ENDPROC(native_load_gs_index)
16642
16643 .section __ex_table,"a"
16644 .align 8
16645@@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
16646 * of hacks for example to fork off the per-CPU idle tasks.
16647 * [Hopefully no generic code relies on the reschedule -AK]
16648 */
16649- RESTORE_ALL
16650+ RESTORE_REST
16651 UNFAKE_STACK_FRAME
16652+ pax_force_retaddr
16653 ret
16654 CFI_ENDPROC
16655-END(kernel_thread)
16656+ENDPROC(kernel_thread)
16657
16658 ENTRY(child_rip)
16659 pushq $0 # fake return address
16660@@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16661 */
16662 movq %rdi, %rax
16663 movq %rsi, %rdi
16664+ pax_force_fptr %rax
16665 call *%rax
16666 # exit
16667 mov %eax, %edi
16668 call do_exit
16669 ud2 # padding for call trace
16670 CFI_ENDPROC
16671-END(child_rip)
16672+ENDPROC(child_rip)
16673
16674 /*
16675 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16676@@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
16677 RESTORE_REST
16678 testq %rax,%rax
16679 je int_ret_from_sys_call
16680- RESTORE_ARGS
16681 UNFAKE_STACK_FRAME
16682+ pax_force_retaddr
16683 ret
16684 CFI_ENDPROC
16685-END(kernel_execve)
16686+ENDPROC(kernel_execve)
16687
16688 /* Call softirq on interrupt stack. Interrupts are off. */
16689 ENTRY(call_softirq)
16690@@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
16691 CFI_DEF_CFA_REGISTER rsp
16692 CFI_ADJUST_CFA_OFFSET -8
16693 decl PER_CPU_VAR(irq_count)
16694+ pax_force_retaddr
16695 ret
16696 CFI_ENDPROC
16697-END(call_softirq)
16698+ENDPROC(call_softirq)
16699
16700 #ifdef CONFIG_XEN
16701 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16702@@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16703 decl PER_CPU_VAR(irq_count)
16704 jmp error_exit
16705 CFI_ENDPROC
16706-END(xen_do_hypervisor_callback)
16707+ENDPROC(xen_do_hypervisor_callback)
16708
16709 /*
16710 * Hypervisor uses this for application faults while it executes.
16711@@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
16712 SAVE_ALL
16713 jmp error_exit
16714 CFI_ENDPROC
16715-END(xen_failsafe_callback)
16716+ENDPROC(xen_failsafe_callback)
16717
16718 #endif /* CONFIG_XEN */
16719
16720@@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
16721 TRACE_IRQS_OFF
16722 testl %ebx,%ebx /* swapgs needed? */
16723 jnz paranoid_restore
16724- testl $3,CS(%rsp)
16725+ testb $3,CS(%rsp)
16726 jnz paranoid_userspace
16727+#ifdef CONFIG_PAX_MEMORY_UDEREF
16728+ pax_exit_kernel
16729+ TRACE_IRQS_IRETQ 0
16730+ SWAPGS_UNSAFE_STACK
16731+ RESTORE_ALL 8
16732+ pax_force_retaddr_bts
16733+ jmp irq_return
16734+#endif
16735 paranoid_swapgs:
16736+#ifdef CONFIG_PAX_MEMORY_UDEREF
16737+ pax_exit_kernel_user
16738+#else
16739+ pax_exit_kernel
16740+#endif
16741 TRACE_IRQS_IRETQ 0
16742 SWAPGS_UNSAFE_STACK
16743 RESTORE_ALL 8
16744 jmp irq_return
16745 paranoid_restore:
16746+ pax_exit_kernel
16747 TRACE_IRQS_IRETQ 0
16748 RESTORE_ALL 8
16749+ pax_force_retaddr_bts
16750 jmp irq_return
16751 paranoid_userspace:
16752 GET_THREAD_INFO(%rcx)
16753@@ -1443,7 +1830,7 @@ paranoid_schedule:
16754 TRACE_IRQS_OFF
16755 jmp paranoid_userspace
16756 CFI_ENDPROC
16757-END(paranoid_exit)
16758+ENDPROC(paranoid_exit)
16759
16760 /*
16761 * Exception entry point. This expects an error code/orig_rax on the stack.
16762@@ -1470,12 +1857,13 @@ ENTRY(error_entry)
16763 movq_cfi r14, R14+8
16764 movq_cfi r15, R15+8
16765 xorl %ebx,%ebx
16766- testl $3,CS+8(%rsp)
16767+ testb $3,CS+8(%rsp)
16768 je error_kernelspace
16769 error_swapgs:
16770 SWAPGS
16771 error_sti:
16772 TRACE_IRQS_OFF
16773+ pax_force_retaddr_bts
16774 ret
16775 CFI_ENDPROC
16776
16777@@ -1497,7 +1885,7 @@ error_kernelspace:
16778 cmpq $gs_change,RIP+8(%rsp)
16779 je error_swapgs
16780 jmp error_sti
16781-END(error_entry)
16782+ENDPROC(error_entry)
16783
16784
16785 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16786@@ -1517,7 +1905,7 @@ ENTRY(error_exit)
16787 jnz retint_careful
16788 jmp retint_swapgs
16789 CFI_ENDPROC
16790-END(error_exit)
16791+ENDPROC(error_exit)
16792
16793
16794 /* runs on exception stack */
16795@@ -1529,6 +1917,16 @@ ENTRY(nmi)
16796 CFI_ADJUST_CFA_OFFSET 15*8
16797 call save_paranoid
16798 DEFAULT_FRAME 0
16799+#ifdef CONFIG_PAX_MEMORY_UDEREF
16800+ testb $3, CS(%rsp)
16801+ jnz 1f
16802+ pax_enter_kernel
16803+ jmp 2f
16804+1: pax_enter_kernel_user
16805+2:
16806+#else
16807+ pax_enter_kernel
16808+#endif
16809 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16810 movq %rsp,%rdi
16811 movq $-1,%rsi
16812@@ -1539,12 +1937,28 @@ ENTRY(nmi)
16813 DISABLE_INTERRUPTS(CLBR_NONE)
16814 testl %ebx,%ebx /* swapgs needed? */
16815 jnz nmi_restore
16816- testl $3,CS(%rsp)
16817+ testb $3,CS(%rsp)
16818 jnz nmi_userspace
16819+#ifdef CONFIG_PAX_MEMORY_UDEREF
16820+ pax_exit_kernel
16821+ SWAPGS_UNSAFE_STACK
16822+ RESTORE_ALL 8
16823+ pax_force_retaddr_bts
16824+ jmp irq_return
16825+#endif
16826 nmi_swapgs:
16827+#ifdef CONFIG_PAX_MEMORY_UDEREF
16828+ pax_exit_kernel_user
16829+#else
16830+ pax_exit_kernel
16831+#endif
16832 SWAPGS_UNSAFE_STACK
16833+ RESTORE_ALL 8
16834+ jmp irq_return
16835 nmi_restore:
16836+ pax_exit_kernel
16837 RESTORE_ALL 8
16838+ pax_force_retaddr_bts
16839 jmp irq_return
16840 nmi_userspace:
16841 GET_THREAD_INFO(%rcx)
16842@@ -1573,14 +1987,14 @@ nmi_schedule:
16843 jmp paranoid_exit
16844 CFI_ENDPROC
16845 #endif
16846-END(nmi)
16847+ENDPROC(nmi)
16848
16849 ENTRY(ignore_sysret)
16850 CFI_STARTPROC
16851 mov $-ENOSYS,%eax
16852 sysret
16853 CFI_ENDPROC
16854-END(ignore_sysret)
16855+ENDPROC(ignore_sysret)
16856
16857 /*
16858 * End of kprobes section
16859diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16860index 9dbb527..7b3615a 100644
16861--- a/arch/x86/kernel/ftrace.c
16862+++ b/arch/x86/kernel/ftrace.c
16863@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16864 static void *mod_code_newcode; /* holds the text to write to the IP */
16865
16866 static unsigned nmi_wait_count;
16867-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16868+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16869
16870 int ftrace_arch_read_dyn_info(char *buf, int size)
16871 {
16872@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16873
16874 r = snprintf(buf, size, "%u %u",
16875 nmi_wait_count,
16876- atomic_read(&nmi_update_count));
16877+ atomic_read_unchecked(&nmi_update_count));
16878 return r;
16879 }
16880
16881@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16882 {
16883 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16884 smp_rmb();
16885+ pax_open_kernel();
16886 ftrace_mod_code();
16887- atomic_inc(&nmi_update_count);
16888+ pax_close_kernel();
16889+ atomic_inc_unchecked(&nmi_update_count);
16890 }
16891 /* Must have previous changes seen before executions */
16892 smp_mb();
16893@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16894
16895
16896
16897-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16898+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16899
16900 static unsigned char *ftrace_nop_replace(void)
16901 {
16902@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16903 {
16904 unsigned char replaced[MCOUNT_INSN_SIZE];
16905
16906+ ip = ktla_ktva(ip);
16907+
16908 /*
16909 * Note: Due to modules and __init, code can
16910 * disappear and change, we need to protect against faulting
16911@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16912 unsigned char old[MCOUNT_INSN_SIZE], *new;
16913 int ret;
16914
16915- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16916+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16917 new = ftrace_call_replace(ip, (unsigned long)func);
16918 ret = ftrace_modify_code(ip, old, new);
16919
16920@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16921 switch (faulted) {
16922 case 0:
16923 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16924- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16925+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16926 break;
16927 case 1:
16928 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16929- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16930+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16931 break;
16932 case 2:
16933 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16934- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16935+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16936 break;
16937 }
16938
16939@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16940 {
16941 unsigned char code[MCOUNT_INSN_SIZE];
16942
16943+ ip = ktla_ktva(ip);
16944+
16945 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16946 return -EFAULT;
16947
16948diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16949index 4f8e250..df24706 100644
16950--- a/arch/x86/kernel/head32.c
16951+++ b/arch/x86/kernel/head32.c
16952@@ -16,6 +16,7 @@
16953 #include <asm/apic.h>
16954 #include <asm/io_apic.h>
16955 #include <asm/bios_ebda.h>
16956+#include <asm/boot.h>
16957
16958 static void __init i386_default_early_setup(void)
16959 {
16960@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16961 {
16962 reserve_trampoline_memory();
16963
16964- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16965+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16966
16967 #ifdef CONFIG_BLK_DEV_INITRD
16968 /* Reserve INITRD */
16969diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16970index 34c3308..6fc4e76 100644
16971--- a/arch/x86/kernel/head_32.S
16972+++ b/arch/x86/kernel/head_32.S
16973@@ -19,10 +19,17 @@
16974 #include <asm/setup.h>
16975 #include <asm/processor-flags.h>
16976 #include <asm/percpu.h>
16977+#include <asm/msr-index.h>
16978
16979 /* Physical address */
16980 #define pa(X) ((X) - __PAGE_OFFSET)
16981
16982+#ifdef CONFIG_PAX_KERNEXEC
16983+#define ta(X) (X)
16984+#else
16985+#define ta(X) ((X) - __PAGE_OFFSET)
16986+#endif
16987+
16988 /*
16989 * References to members of the new_cpu_data structure.
16990 */
16991@@ -52,11 +59,7 @@
16992 * and small than max_low_pfn, otherwise will waste some page table entries
16993 */
16994
16995-#if PTRS_PER_PMD > 1
16996-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16997-#else
16998-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16999-#endif
17000+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
17001
17002 /* Enough space to fit pagetables for the low memory linear map */
17003 MAPPING_BEYOND_END = \
17004@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
17005 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17006
17007 /*
17008+ * Real beginning of normal "text" segment
17009+ */
17010+ENTRY(stext)
17011+ENTRY(_stext)
17012+
17013+/*
17014 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
17015 * %esi points to the real-mode code as a 32-bit pointer.
17016 * CS and DS must be 4 GB flat segments, but we don't depend on
17017@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17018 * can.
17019 */
17020 __HEAD
17021+
17022+#ifdef CONFIG_PAX_KERNEXEC
17023+ jmp startup_32
17024+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
17025+.fill PAGE_SIZE-5,1,0xcc
17026+#endif
17027+
17028 ENTRY(startup_32)
17029+ movl pa(stack_start),%ecx
17030+
17031 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
17032 us to not reload segments */
17033 testb $(1<<6), BP_loadflags(%esi)
17034@@ -95,7 +113,60 @@ ENTRY(startup_32)
17035 movl %eax,%es
17036 movl %eax,%fs
17037 movl %eax,%gs
17038+ movl %eax,%ss
17039 2:
17040+ leal -__PAGE_OFFSET(%ecx),%esp
17041+
17042+#ifdef CONFIG_SMP
17043+ movl $pa(cpu_gdt_table),%edi
17044+ movl $__per_cpu_load,%eax
17045+ movw %ax,__KERNEL_PERCPU + 2(%edi)
17046+ rorl $16,%eax
17047+ movb %al,__KERNEL_PERCPU + 4(%edi)
17048+ movb %ah,__KERNEL_PERCPU + 7(%edi)
17049+ movl $__per_cpu_end - 1,%eax
17050+ subl $__per_cpu_start,%eax
17051+ movw %ax,__KERNEL_PERCPU + 0(%edi)
17052+#endif
17053+
17054+#ifdef CONFIG_PAX_MEMORY_UDEREF
17055+ movl $NR_CPUS,%ecx
17056+ movl $pa(cpu_gdt_table),%edi
17057+1:
17058+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
17059+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
17060+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
17061+ addl $PAGE_SIZE_asm,%edi
17062+ loop 1b
17063+#endif
17064+
17065+#ifdef CONFIG_PAX_KERNEXEC
17066+ movl $pa(boot_gdt),%edi
17067+ movl $__LOAD_PHYSICAL_ADDR,%eax
17068+ movw %ax,__BOOT_CS + 2(%edi)
17069+ rorl $16,%eax
17070+ movb %al,__BOOT_CS + 4(%edi)
17071+ movb %ah,__BOOT_CS + 7(%edi)
17072+ rorl $16,%eax
17073+
17074+ ljmp $(__BOOT_CS),$1f
17075+1:
17076+
17077+ movl $NR_CPUS,%ecx
17078+ movl $pa(cpu_gdt_table),%edi
17079+ addl $__PAGE_OFFSET,%eax
17080+1:
17081+ movw %ax,__KERNEL_CS + 2(%edi)
17082+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
17083+ rorl $16,%eax
17084+ movb %al,__KERNEL_CS + 4(%edi)
17085+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
17086+ movb %ah,__KERNEL_CS + 7(%edi)
17087+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
17088+ rorl $16,%eax
17089+ addl $PAGE_SIZE_asm,%edi
17090+ loop 1b
17091+#endif
17092
17093 /*
17094 * Clear BSS first so that there are no surprises...
17095@@ -140,9 +211,7 @@ ENTRY(startup_32)
17096 cmpl $num_subarch_entries, %eax
17097 jae bad_subarch
17098
17099- movl pa(subarch_entries)(,%eax,4), %eax
17100- subl $__PAGE_OFFSET, %eax
17101- jmp *%eax
17102+ jmp *pa(subarch_entries)(,%eax,4)
17103
17104 bad_subarch:
17105 WEAK(lguest_entry)
17106@@ -154,10 +223,10 @@ WEAK(xen_entry)
17107 __INITDATA
17108
17109 subarch_entries:
17110- .long default_entry /* normal x86/PC */
17111- .long lguest_entry /* lguest hypervisor */
17112- .long xen_entry /* Xen hypervisor */
17113- .long default_entry /* Moorestown MID */
17114+ .long ta(default_entry) /* normal x86/PC */
17115+ .long ta(lguest_entry) /* lguest hypervisor */
17116+ .long ta(xen_entry) /* Xen hypervisor */
17117+ .long ta(default_entry) /* Moorestown MID */
17118 num_subarch_entries = (. - subarch_entries) / 4
17119 .previous
17120 #endif /* CONFIG_PARAVIRT */
17121@@ -218,8 +287,11 @@ default_entry:
17122 movl %eax, pa(max_pfn_mapped)
17123
17124 /* Do early initialization of the fixmap area */
17125- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17126- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17127+#ifdef CONFIG_COMPAT_VDSO
17128+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17129+#else
17130+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17131+#endif
17132 #else /* Not PAE */
17133
17134 page_pde_offset = (__PAGE_OFFSET >> 20);
17135@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
17136 movl %eax, pa(max_pfn_mapped)
17137
17138 /* Do early initialization of the fixmap area */
17139- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17140- movl %eax,pa(swapper_pg_dir+0xffc)
17141+#ifdef CONFIG_COMPAT_VDSO
17142+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
17143+#else
17144+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
17145+#endif
17146 #endif
17147 jmp 3f
17148 /*
17149@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
17150 movl %eax,%es
17151 movl %eax,%fs
17152 movl %eax,%gs
17153+ movl pa(stack_start),%ecx
17154+ movl %eax,%ss
17155+ leal -__PAGE_OFFSET(%ecx),%esp
17156 #endif /* CONFIG_SMP */
17157 3:
17158
17159@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
17160 orl %edx,%eax
17161 movl %eax,%cr4
17162
17163+#ifdef CONFIG_X86_PAE
17164 btl $5, %eax # check if PAE is enabled
17165 jnc 6f
17166
17167@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
17168 cpuid
17169 cmpl $0x80000000, %eax
17170 jbe 6f
17171+
17172+ /* Clear bogus XD_DISABLE bits */
17173+ call verify_cpu
17174+
17175 mov $0x80000001, %eax
17176 cpuid
17177 /* Execute Disable bit supported? */
17178@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
17179 jnc 6f
17180
17181 /* Setup EFER (Extended Feature Enable Register) */
17182- movl $0xc0000080, %ecx
17183+ movl $MSR_EFER, %ecx
17184 rdmsr
17185
17186 btsl $11, %eax
17187 /* Make changes effective */
17188 wrmsr
17189
17190+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
17191+ movl $1,pa(nx_enabled)
17192+#endif
17193+
17194 6:
17195
17196 /*
17197@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
17198 movl %eax,%cr0 /* ..and set paging (PG) bit */
17199 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
17200 1:
17201- /* Set up the stack pointer */
17202- lss stack_start,%esp
17203+ /* Shift the stack pointer to a virtual address */
17204+ addl $__PAGE_OFFSET, %esp
17205
17206 /*
17207 * Initialize eflags. Some BIOS's leave bits like NT set. This would
17208@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
17209
17210 #ifdef CONFIG_SMP
17211 cmpb $0, ready
17212- jz 1f /* Initial CPU cleans BSS */
17213- jmp checkCPUtype
17214-1:
17215+ jnz checkCPUtype
17216 #endif /* CONFIG_SMP */
17217
17218 /*
17219@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
17220 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17221 movl %eax,%ss # after changing gdt.
17222
17223- movl $(__USER_DS),%eax # DS/ES contains default USER segment
17224+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17225 movl %eax,%ds
17226 movl %eax,%es
17227
17228@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
17229 */
17230 cmpb $0,ready
17231 jne 1f
17232- movl $per_cpu__gdt_page,%eax
17233+ movl $cpu_gdt_table,%eax
17234 movl $per_cpu__stack_canary,%ecx
17235+#ifdef CONFIG_SMP
17236+ addl $__per_cpu_load,%ecx
17237+#endif
17238 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17239 shrl $16, %ecx
17240 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17241 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17242 1:
17243-#endif
17244 movl $(__KERNEL_STACK_CANARY),%eax
17245+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17246+ movl $(__USER_DS),%eax
17247+#else
17248+ xorl %eax,%eax
17249+#endif
17250 movl %eax,%gs
17251
17252 xorl %eax,%eax # Clear LDT
17253@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
17254
17255 cld # gcc2 wants the direction flag cleared at all times
17256 pushl $0 # fake return address for unwinder
17257-#ifdef CONFIG_SMP
17258- movb ready, %cl
17259 movb $1, ready
17260- cmpb $0,%cl # the first CPU calls start_kernel
17261- je 1f
17262- movl (stack_start), %esp
17263-1:
17264-#endif /* CONFIG_SMP */
17265 jmp *(initial_code)
17266
17267 /*
17268@@ -546,22 +631,22 @@ early_page_fault:
17269 jmp early_fault
17270
17271 early_fault:
17272- cld
17273 #ifdef CONFIG_PRINTK
17274+ cmpl $1,%ss:early_recursion_flag
17275+ je hlt_loop
17276+ incl %ss:early_recursion_flag
17277+ cld
17278 pusha
17279 movl $(__KERNEL_DS),%eax
17280 movl %eax,%ds
17281 movl %eax,%es
17282- cmpl $2,early_recursion_flag
17283- je hlt_loop
17284- incl early_recursion_flag
17285 movl %cr2,%eax
17286 pushl %eax
17287 pushl %edx /* trapno */
17288 pushl $fault_msg
17289 call printk
17290+; call dump_stack
17291 #endif
17292- call dump_stack
17293 hlt_loop:
17294 hlt
17295 jmp hlt_loop
17296@@ -569,8 +654,11 @@ hlt_loop:
17297 /* This is the default interrupt "handler" :-) */
17298 ALIGN
17299 ignore_int:
17300- cld
17301 #ifdef CONFIG_PRINTK
17302+ cmpl $2,%ss:early_recursion_flag
17303+ je hlt_loop
17304+ incl %ss:early_recursion_flag
17305+ cld
17306 pushl %eax
17307 pushl %ecx
17308 pushl %edx
17309@@ -579,9 +667,6 @@ ignore_int:
17310 movl $(__KERNEL_DS),%eax
17311 movl %eax,%ds
17312 movl %eax,%es
17313- cmpl $2,early_recursion_flag
17314- je hlt_loop
17315- incl early_recursion_flag
17316 pushl 16(%esp)
17317 pushl 24(%esp)
17318 pushl 32(%esp)
17319@@ -600,6 +685,8 @@ ignore_int:
17320 #endif
17321 iret
17322
17323+#include "verify_cpu.S"
17324+
17325 __REFDATA
17326 .align 4
17327 ENTRY(initial_code)
17328@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17329 /*
17330 * BSS section
17331 */
17332-__PAGE_ALIGNED_BSS
17333- .align PAGE_SIZE_asm
17334 #ifdef CONFIG_X86_PAE
17335+.section .swapper_pg_pmd,"a",@progbits
17336 swapper_pg_pmd:
17337 .fill 1024*KPMDS,4,0
17338 #else
17339+.section .swapper_pg_dir,"a",@progbits
17340 ENTRY(swapper_pg_dir)
17341 .fill 1024,4,0
17342 #endif
17343+.section .swapper_pg_fixmap,"a",@progbits
17344 swapper_pg_fixmap:
17345 .fill 1024,4,0
17346 #ifdef CONFIG_X86_TRAMPOLINE
17347+.section .trampoline_pg_dir,"a",@progbits
17348 ENTRY(trampoline_pg_dir)
17349+#ifdef CONFIG_X86_PAE
17350+ .fill 4,8,0
17351+#else
17352 .fill 1024,4,0
17353 #endif
17354+#endif
17355+
17356+.section .empty_zero_page,"a",@progbits
17357 ENTRY(empty_zero_page)
17358 .fill 4096,1,0
17359
17360 /*
17361+ * The IDT has to be page-aligned to simplify the Pentium
17362+ * F0 0F bug workaround.. We have a special link segment
17363+ * for this.
17364+ */
17365+.section .idt,"a",@progbits
17366+ENTRY(idt_table)
17367+ .fill 256,8,0
17368+
17369+/*
17370 * This starts the data section.
17371 */
17372 #ifdef CONFIG_X86_PAE
17373-__PAGE_ALIGNED_DATA
17374- /* Page-aligned for the benefit of paravirt? */
17375- .align PAGE_SIZE_asm
17376+.section .swapper_pg_dir,"a",@progbits
17377+
17378 ENTRY(swapper_pg_dir)
17379 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17380 # if KPMDS == 3
17381@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17382 # error "Kernel PMDs should be 1, 2 or 3"
17383 # endif
17384 .align PAGE_SIZE_asm /* needs to be page-sized too */
17385+
17386+#ifdef CONFIG_PAX_PER_CPU_PGD
17387+ENTRY(cpu_pgd)
17388+ .rept NR_CPUS
17389+ .fill 4,8,0
17390+ .endr
17391+#endif
17392+
17393 #endif
17394
17395 .data
17396+.balign 4
17397 ENTRY(stack_start)
17398- .long init_thread_union+THREAD_SIZE
17399- .long __BOOT_DS
17400+ .long init_thread_union+THREAD_SIZE-8
17401
17402 ready: .byte 0
17403
17404+.section .rodata,"a",@progbits
17405 early_recursion_flag:
17406 .long 0
17407
17408@@ -697,7 +809,7 @@ fault_msg:
17409 .word 0 # 32 bit align gdt_desc.address
17410 boot_gdt_descr:
17411 .word __BOOT_DS+7
17412- .long boot_gdt - __PAGE_OFFSET
17413+ .long pa(boot_gdt)
17414
17415 .word 0 # 32-bit align idt_desc.address
17416 idt_descr:
17417@@ -708,7 +820,7 @@ idt_descr:
17418 .word 0 # 32 bit align gdt_desc.address
17419 ENTRY(early_gdt_descr)
17420 .word GDT_ENTRIES*8-1
17421- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17422+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17423
17424 /*
17425 * The boot_gdt must mirror the equivalent in setup.S and is
17426@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17427 .align L1_CACHE_BYTES
17428 ENTRY(boot_gdt)
17429 .fill GDT_ENTRY_BOOT_CS,8,0
17430- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17431- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17432+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17433+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17434+
17435+ .align PAGE_SIZE_asm
17436+ENTRY(cpu_gdt_table)
17437+ .rept NR_CPUS
17438+ .quad 0x0000000000000000 /* NULL descriptor */
17439+ .quad 0x0000000000000000 /* 0x0b reserved */
17440+ .quad 0x0000000000000000 /* 0x13 reserved */
17441+ .quad 0x0000000000000000 /* 0x1b reserved */
17442+
17443+#ifdef CONFIG_PAX_KERNEXEC
17444+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17445+#else
17446+ .quad 0x0000000000000000 /* 0x20 unused */
17447+#endif
17448+
17449+ .quad 0x0000000000000000 /* 0x28 unused */
17450+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17451+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17452+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17453+ .quad 0x0000000000000000 /* 0x4b reserved */
17454+ .quad 0x0000000000000000 /* 0x53 reserved */
17455+ .quad 0x0000000000000000 /* 0x5b reserved */
17456+
17457+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17458+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17459+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17460+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17461+
17462+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17463+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17464+
17465+ /*
17466+ * Segments used for calling PnP BIOS have byte granularity.
17467+ * The code segments and data segments have fixed 64k limits,
17468+ * the transfer segment sizes are set at run time.
17469+ */
17470+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17471+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17472+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17473+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17474+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17475+
17476+ /*
17477+ * The APM segments have byte granularity and their bases
17478+ * are set at run time. All have 64k limits.
17479+ */
17480+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17481+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17482+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17483+
17484+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17485+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17486+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17487+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17488+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17489+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17490+
17491+ /* Be sure this is zeroed to avoid false validations in Xen */
17492+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17493+ .endr
17494diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17495index 780cd92..758b2a6 100644
17496--- a/arch/x86/kernel/head_64.S
17497+++ b/arch/x86/kernel/head_64.S
17498@@ -19,6 +19,8 @@
17499 #include <asm/cache.h>
17500 #include <asm/processor-flags.h>
17501 #include <asm/percpu.h>
17502+#include <asm/cpufeature.h>
17503+#include <asm/alternative-asm.h>
17504
17505 #ifdef CONFIG_PARAVIRT
17506 #include <asm/asm-offsets.h>
17507@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17508 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17509 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17510 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17511+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17512+L3_VMALLOC_START = pud_index(VMALLOC_START)
17513+L4_VMALLOC_END = pgd_index(VMALLOC_END)
17514+L3_VMALLOC_END = pud_index(VMALLOC_END)
17515+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17516+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17517
17518 .text
17519 __HEAD
17520@@ -85,35 +93,23 @@ startup_64:
17521 */
17522 addq %rbp, init_level4_pgt + 0(%rip)
17523 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17524+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17525+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17526+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17527 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17528
17529 addq %rbp, level3_ident_pgt + 0(%rip)
17530+#ifndef CONFIG_XEN
17531+ addq %rbp, level3_ident_pgt + 8(%rip)
17532+#endif
17533
17534- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17535- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17536+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17537+
17538+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17539+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17540
17541 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17542-
17543- /* Add an Identity mapping if I am above 1G */
17544- leaq _text(%rip), %rdi
17545- andq $PMD_PAGE_MASK, %rdi
17546-
17547- movq %rdi, %rax
17548- shrq $PUD_SHIFT, %rax
17549- andq $(PTRS_PER_PUD - 1), %rax
17550- jz ident_complete
17551-
17552- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17553- leaq level3_ident_pgt(%rip), %rbx
17554- movq %rdx, 0(%rbx, %rax, 8)
17555-
17556- movq %rdi, %rax
17557- shrq $PMD_SHIFT, %rax
17558- andq $(PTRS_PER_PMD - 1), %rax
17559- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17560- leaq level2_spare_pgt(%rip), %rbx
17561- movq %rdx, 0(%rbx, %rax, 8)
17562-ident_complete:
17563+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17564
17565 /*
17566 * Fixup the kernel text+data virtual addresses. Note that
17567@@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
17568 * after the boot processor executes this code.
17569 */
17570
17571- /* Enable PAE mode and PGE */
17572- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17573+ /* Enable PAE mode and PSE/PGE */
17574+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17575 movq %rax, %cr4
17576
17577 /* Setup early boot stage 4 level pagetables. */
17578@@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
17579 movl $MSR_EFER, %ecx
17580 rdmsr
17581 btsl $_EFER_SCE, %eax /* Enable System Call */
17582- btl $20,%edi /* No Execute supported? */
17583+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17584 jnc 1f
17585 btsl $_EFER_NX, %eax
17586+ leaq init_level4_pgt(%rip), %rdi
17587+#ifndef CONFIG_EFI
17588+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17589+#endif
17590+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17591+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17592+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17593 1: wrmsr /* Make changes effective */
17594
17595 /* Setup cr0 */
17596@@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
17597 * jump. In addition we need to ensure %cs is set so we make this
17598 * a far return.
17599 */
17600+ pax_set_fptr_mask
17601 movq initial_code(%rip),%rax
17602 pushq $0 # fake return address to stop unwinder
17603 pushq $__KERNEL_CS # set correct cs
17604@@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
17605 .quad x86_64_start_kernel
17606 ENTRY(initial_gs)
17607 .quad INIT_PER_CPU_VAR(irq_stack_union)
17608- __FINITDATA
17609
17610 ENTRY(stack_start)
17611 .quad init_thread_union+THREAD_SIZE-8
17612 .word 0
17613+ __FINITDATA
17614
17615 bad_address:
17616 jmp bad_address
17617
17618- .section ".init.text","ax"
17619+ __INIT
17620 #ifdef CONFIG_EARLY_PRINTK
17621 .globl early_idt_handlers
17622 early_idt_handlers:
17623@@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
17624 #endif /* EARLY_PRINTK */
17625 1: hlt
17626 jmp 1b
17627+ .previous
17628
17629 #ifdef CONFIG_EARLY_PRINTK
17630+ __INITDATA
17631 early_recursion_flag:
17632 .long 0
17633+ .previous
17634
17635+ .section .rodata,"a",@progbits
17636 early_idt_msg:
17637 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17638 early_idt_ripmsg:
17639 .asciz "RIP %s\n"
17640+ .previous
17641 #endif /* CONFIG_EARLY_PRINTK */
17642- .previous
17643
17644+ .section .rodata,"a",@progbits
17645 #define NEXT_PAGE(name) \
17646 .balign PAGE_SIZE; \
17647 ENTRY(name)
17648@@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
17649 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17650 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17651 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17652+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
17653+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17654+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
17655+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17656+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17657+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17658 .org init_level4_pgt + L4_START_KERNEL*8, 0
17659 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17660 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17661
17662+#ifdef CONFIG_PAX_PER_CPU_PGD
17663+NEXT_PAGE(cpu_pgd)
17664+ .rept NR_CPUS
17665+ .fill 512,8,0
17666+ .endr
17667+#endif
17668+
17669 NEXT_PAGE(level3_ident_pgt)
17670 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17671+#ifdef CONFIG_XEN
17672 .fill 511,8,0
17673+#else
17674+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17675+ .fill 510,8,0
17676+#endif
17677+
17678+NEXT_PAGE(level3_vmalloc_start_pgt)
17679+ .fill 512,8,0
17680+
17681+NEXT_PAGE(level3_vmalloc_end_pgt)
17682+ .fill 512,8,0
17683+
17684+NEXT_PAGE(level3_vmemmap_pgt)
17685+ .fill L3_VMEMMAP_START,8,0
17686+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17687
17688 NEXT_PAGE(level3_kernel_pgt)
17689 .fill L3_START_KERNEL,8,0
17690@@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
17691 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17692 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17693
17694+NEXT_PAGE(level2_vmemmap_pgt)
17695+ .fill 512,8,0
17696+
17697 NEXT_PAGE(level2_fixmap_pgt)
17698- .fill 506,8,0
17699- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17700- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17701- .fill 5,8,0
17702+ .fill 507,8,0
17703+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17704+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17705+ .fill 4,8,0
17706
17707-NEXT_PAGE(level1_fixmap_pgt)
17708+NEXT_PAGE(level1_vsyscall_pgt)
17709 .fill 512,8,0
17710
17711-NEXT_PAGE(level2_ident_pgt)
17712- /* Since I easily can, map the first 1G.
17713+ /* Since I easily can, map the first 2G.
17714 * Don't set NX because code runs from these pages.
17715 */
17716- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17717+NEXT_PAGE(level2_ident_pgt)
17718+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17719
17720 NEXT_PAGE(level2_kernel_pgt)
17721 /*
17722@@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
17723 * If you want to increase this then increase MODULES_VADDR
17724 * too.)
17725 */
17726- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17727- KERNEL_IMAGE_SIZE/PMD_SIZE)
17728-
17729-NEXT_PAGE(level2_spare_pgt)
17730- .fill 512, 8, 0
17731+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17732
17733 #undef PMDS
17734 #undef NEXT_PAGE
17735
17736- .data
17737+ .align PAGE_SIZE
17738+ENTRY(cpu_gdt_table)
17739+ .rept NR_CPUS
17740+ .quad 0x0000000000000000 /* NULL descriptor */
17741+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17742+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17743+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17744+ .quad 0x00cffb000000ffff /* __USER32_CS */
17745+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17746+ .quad 0x00affb000000ffff /* __USER_CS */
17747+
17748+#ifdef CONFIG_PAX_KERNEXEC
17749+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17750+#else
17751+ .quad 0x0 /* unused */
17752+#endif
17753+
17754+ .quad 0,0 /* TSS */
17755+ .quad 0,0 /* LDT */
17756+ .quad 0,0,0 /* three TLS descriptors */
17757+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17758+ /* asm/segment.h:GDT_ENTRIES must match this */
17759+
17760+ /* zero the remaining page */
17761+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17762+ .endr
17763+
17764 .align 16
17765 .globl early_gdt_descr
17766 early_gdt_descr:
17767 .word GDT_ENTRIES*8-1
17768 early_gdt_descr_base:
17769- .quad INIT_PER_CPU_VAR(gdt_page)
17770+ .quad cpu_gdt_table
17771
17772 ENTRY(phys_base)
17773 /* This must match the first entry in level2_kernel_pgt */
17774 .quad 0x0000000000000000
17775
17776 #include "../../x86/xen/xen-head.S"
17777-
17778- .section .bss, "aw", @nobits
17779+
17780+ .section .rodata,"a",@progbits
17781 .align L1_CACHE_BYTES
17782 ENTRY(idt_table)
17783- .skip IDT_ENTRIES * 16
17784+ .fill 512,8,0
17785
17786 __PAGE_ALIGNED_BSS
17787 .align PAGE_SIZE
17788diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17789index 9c3bd4a..e1d9b35 100644
17790--- a/arch/x86/kernel/i386_ksyms_32.c
17791+++ b/arch/x86/kernel/i386_ksyms_32.c
17792@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17793 EXPORT_SYMBOL(cmpxchg8b_emu);
17794 #endif
17795
17796+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17797+
17798 /* Networking helper routines. */
17799 EXPORT_SYMBOL(csum_partial_copy_generic);
17800+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17801+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17802
17803 EXPORT_SYMBOL(__get_user_1);
17804 EXPORT_SYMBOL(__get_user_2);
17805@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17806
17807 EXPORT_SYMBOL(csum_partial);
17808 EXPORT_SYMBOL(empty_zero_page);
17809+
17810+#ifdef CONFIG_PAX_KERNEXEC
17811+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17812+#endif
17813diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17814index df89102..a244320 100644
17815--- a/arch/x86/kernel/i8259.c
17816+++ b/arch/x86/kernel/i8259.c
17817@@ -208,7 +208,7 @@ spurious_8259A_irq:
17818 "spurious 8259A interrupt: IRQ%d.\n", irq);
17819 spurious_irq_mask |= irqmask;
17820 }
17821- atomic_inc(&irq_err_count);
17822+ atomic_inc_unchecked(&irq_err_count);
17823 /*
17824 * Theoretically we do not have to handle this IRQ,
17825 * but in Linux this does not cause problems and is
17826diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17827index 3a54dcb..1c22348 100644
17828--- a/arch/x86/kernel/init_task.c
17829+++ b/arch/x86/kernel/init_task.c
17830@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17831 * way process stacks are handled. This is done by having a special
17832 * "init_task" linker map entry..
17833 */
17834-union thread_union init_thread_union __init_task_data =
17835- { INIT_THREAD_INFO(init_task) };
17836+union thread_union init_thread_union __init_task_data;
17837
17838 /*
17839 * Initial task structure.
17840@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17841 * section. Since TSS's are completely CPU-local, we want them
17842 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17843 */
17844-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17845-
17846+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17847+EXPORT_SYMBOL(init_tss);
17848diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17849index 99c4d30..74c84e9 100644
17850--- a/arch/x86/kernel/ioport.c
17851+++ b/arch/x86/kernel/ioport.c
17852@@ -6,6 +6,7 @@
17853 #include <linux/sched.h>
17854 #include <linux/kernel.h>
17855 #include <linux/capability.h>
17856+#include <linux/security.h>
17857 #include <linux/errno.h>
17858 #include <linux/types.h>
17859 #include <linux/ioport.h>
17860@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17861
17862 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17863 return -EINVAL;
17864+#ifdef CONFIG_GRKERNSEC_IO
17865+ if (turn_on && grsec_disable_privio) {
17866+ gr_handle_ioperm();
17867+ return -EPERM;
17868+ }
17869+#endif
17870 if (turn_on && !capable(CAP_SYS_RAWIO))
17871 return -EPERM;
17872
17873@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17874 * because the ->io_bitmap_max value must match the bitmap
17875 * contents:
17876 */
17877- tss = &per_cpu(init_tss, get_cpu());
17878+ tss = init_tss + get_cpu();
17879
17880 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17881
17882@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17883 return -EINVAL;
17884 /* Trying to gain more privileges? */
17885 if (level > old) {
17886+#ifdef CONFIG_GRKERNSEC_IO
17887+ if (grsec_disable_privio) {
17888+ gr_handle_iopl();
17889+ return -EPERM;
17890+ }
17891+#endif
17892 if (!capable(CAP_SYS_RAWIO))
17893 return -EPERM;
17894 }
17895diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17896index 04bbd52..83a07d9 100644
17897--- a/arch/x86/kernel/irq.c
17898+++ b/arch/x86/kernel/irq.c
17899@@ -15,7 +15,7 @@
17900 #include <asm/mce.h>
17901 #include <asm/hw_irq.h>
17902
17903-atomic_t irq_err_count;
17904+atomic_unchecked_t irq_err_count;
17905
17906 /* Function pointer for generic interrupt vector handling */
17907 void (*generic_interrupt_extension)(void) = NULL;
17908@@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17909 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17910 seq_printf(p, " Machine check polls\n");
17911 #endif
17912- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17913+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17914 #if defined(CONFIG_X86_IO_APIC)
17915- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17916+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17917 #endif
17918 return 0;
17919 }
17920@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17921
17922 u64 arch_irq_stat(void)
17923 {
17924- u64 sum = atomic_read(&irq_err_count);
17925+ u64 sum = atomic_read_unchecked(&irq_err_count);
17926
17927 #ifdef CONFIG_X86_IO_APIC
17928- sum += atomic_read(&irq_mis_count);
17929+ sum += atomic_read_unchecked(&irq_mis_count);
17930 #endif
17931 return sum;
17932 }
17933diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17934index 7d35d0f..03f1d52 100644
17935--- a/arch/x86/kernel/irq_32.c
17936+++ b/arch/x86/kernel/irq_32.c
17937@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17938 __asm__ __volatile__("andl %%esp,%0" :
17939 "=r" (sp) : "0" (THREAD_SIZE - 1));
17940
17941- return sp < (sizeof(struct thread_info) + STACK_WARN);
17942+ return sp < STACK_WARN;
17943 }
17944
17945 static void print_stack_overflow(void)
17946@@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17947 * per-CPU IRQ handling contexts (thread information and stack)
17948 */
17949 union irq_ctx {
17950- struct thread_info tinfo;
17951- u32 stack[THREAD_SIZE/sizeof(u32)];
17952-} __attribute__((aligned(PAGE_SIZE)));
17953+ unsigned long previous_esp;
17954+ u32 stack[THREAD_SIZE/sizeof(u32)];
17955+} __attribute__((aligned(THREAD_SIZE)));
17956
17957 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17958 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17959@@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17960 static inline int
17961 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17962 {
17963- union irq_ctx *curctx, *irqctx;
17964+ union irq_ctx *irqctx;
17965 u32 *isp, arg1, arg2;
17966
17967- curctx = (union irq_ctx *) current_thread_info();
17968 irqctx = __get_cpu_var(hardirq_ctx);
17969
17970 /*
17971@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17972 * handler) we can't do that and just have to keep using the
17973 * current stack (which is the irq stack already after all)
17974 */
17975- if (unlikely(curctx == irqctx))
17976+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17977 return 0;
17978
17979 /* build the stack frame on the IRQ stack */
17980- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17981- irqctx->tinfo.task = curctx->tinfo.task;
17982- irqctx->tinfo.previous_esp = current_stack_pointer;
17983+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17984+ irqctx->previous_esp = current_stack_pointer;
17985
17986- /*
17987- * Copy the softirq bits in preempt_count so that the
17988- * softirq checks work in the hardirq context.
17989- */
17990- irqctx->tinfo.preempt_count =
17991- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17992- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17993+#ifdef CONFIG_PAX_MEMORY_UDEREF
17994+ __set_fs(MAKE_MM_SEG(0));
17995+#endif
17996
17997 if (unlikely(overflow))
17998 call_on_stack(print_stack_overflow, isp);
17999@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18000 : "0" (irq), "1" (desc), "2" (isp),
18001 "D" (desc->handle_irq)
18002 : "memory", "cc", "ecx");
18003+
18004+#ifdef CONFIG_PAX_MEMORY_UDEREF
18005+ __set_fs(current_thread_info()->addr_limit);
18006+#endif
18007+
18008 return 1;
18009 }
18010
18011@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18012 */
18013 void __cpuinit irq_ctx_init(int cpu)
18014 {
18015- union irq_ctx *irqctx;
18016-
18017 if (per_cpu(hardirq_ctx, cpu))
18018 return;
18019
18020- irqctx = &per_cpu(hardirq_stack, cpu);
18021- irqctx->tinfo.task = NULL;
18022- irqctx->tinfo.exec_domain = NULL;
18023- irqctx->tinfo.cpu = cpu;
18024- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
18025- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18026-
18027- per_cpu(hardirq_ctx, cpu) = irqctx;
18028-
18029- irqctx = &per_cpu(softirq_stack, cpu);
18030- irqctx->tinfo.task = NULL;
18031- irqctx->tinfo.exec_domain = NULL;
18032- irqctx->tinfo.cpu = cpu;
18033- irqctx->tinfo.preempt_count = 0;
18034- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18035-
18036- per_cpu(softirq_ctx, cpu) = irqctx;
18037+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
18038+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
18039
18040 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18041 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18042@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
18043 asmlinkage void do_softirq(void)
18044 {
18045 unsigned long flags;
18046- struct thread_info *curctx;
18047 union irq_ctx *irqctx;
18048 u32 *isp;
18049
18050@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
18051 local_irq_save(flags);
18052
18053 if (local_softirq_pending()) {
18054- curctx = current_thread_info();
18055 irqctx = __get_cpu_var(softirq_ctx);
18056- irqctx->tinfo.task = curctx->task;
18057- irqctx->tinfo.previous_esp = current_stack_pointer;
18058+ irqctx->previous_esp = current_stack_pointer;
18059
18060 /* build the stack frame on the softirq stack */
18061- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18062+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18063+
18064+#ifdef CONFIG_PAX_MEMORY_UDEREF
18065+ __set_fs(MAKE_MM_SEG(0));
18066+#endif
18067
18068 call_on_stack(__do_softirq, isp);
18069+
18070+#ifdef CONFIG_PAX_MEMORY_UDEREF
18071+ __set_fs(current_thread_info()->addr_limit);
18072+#endif
18073+
18074 /*
18075 * Shouldnt happen, we returned above if in_interrupt():
18076 */
18077diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
18078index 8d82a77..0baf312 100644
18079--- a/arch/x86/kernel/kgdb.c
18080+++ b/arch/x86/kernel/kgdb.c
18081@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
18082
18083 /* clear the trace bit */
18084 linux_regs->flags &= ~X86_EFLAGS_TF;
18085- atomic_set(&kgdb_cpu_doing_single_step, -1);
18086+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
18087
18088 /* set the trace bit if we're stepping */
18089 if (remcomInBuffer[0] == 's') {
18090 linux_regs->flags |= X86_EFLAGS_TF;
18091 kgdb_single_step = 1;
18092- atomic_set(&kgdb_cpu_doing_single_step,
18093+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
18094 raw_smp_processor_id());
18095 }
18096
18097@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
18098 break;
18099
18100 case DIE_DEBUG:
18101- if (atomic_read(&kgdb_cpu_doing_single_step) ==
18102+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
18103 raw_smp_processor_id()) {
18104 if (user_mode(regs))
18105 return single_step_cont(regs, args);
18106@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
18107 return instruction_pointer(regs);
18108 }
18109
18110-struct kgdb_arch arch_kgdb_ops = {
18111+const struct kgdb_arch arch_kgdb_ops = {
18112 /* Breakpoint instruction: */
18113 .gdb_bpt_instr = { 0xcc },
18114 .flags = KGDB_HW_BREAKPOINT,
18115diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
18116index 7a67820..70ea187 100644
18117--- a/arch/x86/kernel/kprobes.c
18118+++ b/arch/x86/kernel/kprobes.c
18119@@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
18120 char op;
18121 s32 raddr;
18122 } __attribute__((packed)) * jop;
18123- jop = (struct __arch_jmp_op *)from;
18124+
18125+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
18126+
18127+ pax_open_kernel();
18128 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
18129 jop->op = RELATIVEJUMP_INSTRUCTION;
18130+ pax_close_kernel();
18131 }
18132
18133 /*
18134@@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
18135 kprobe_opcode_t opcode;
18136 kprobe_opcode_t *orig_opcodes = opcodes;
18137
18138- if (search_exception_tables((unsigned long)opcodes))
18139+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
18140 return 0; /* Page fault may occur on this address. */
18141
18142 retry:
18143@@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
18144 disp = (u8 *) p->addr + *((s32 *) insn) -
18145 (u8 *) p->ainsn.insn;
18146 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
18147+ pax_open_kernel();
18148 *(s32 *)insn = (s32) disp;
18149+ pax_close_kernel();
18150 }
18151 }
18152 #endif
18153@@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
18154
18155 static void __kprobes arch_copy_kprobe(struct kprobe *p)
18156 {
18157- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18158+ pax_open_kernel();
18159+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18160+ pax_close_kernel();
18161
18162 fix_riprel(p);
18163
18164- if (can_boost(p->addr))
18165+ if (can_boost(ktla_ktva(p->addr)))
18166 p->ainsn.boostable = 0;
18167 else
18168 p->ainsn.boostable = -1;
18169
18170- p->opcode = *p->addr;
18171+ p->opcode = *(ktla_ktva(p->addr));
18172 }
18173
18174 int __kprobes arch_prepare_kprobe(struct kprobe *p)
18175@@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
18176 if (p->opcode == BREAKPOINT_INSTRUCTION)
18177 regs->ip = (unsigned long)p->addr;
18178 else
18179- regs->ip = (unsigned long)p->ainsn.insn;
18180+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18181 }
18182
18183 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
18184@@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
18185 if (p->ainsn.boostable == 1 && !p->post_handler) {
18186 /* Boost up -- we can execute copied instructions directly */
18187 reset_current_kprobe();
18188- regs->ip = (unsigned long)p->ainsn.insn;
18189+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18190 preempt_enable_no_resched();
18191 return;
18192 }
18193@@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
18194 struct kprobe_ctlblk *kcb;
18195
18196 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
18197- if (*addr != BREAKPOINT_INSTRUCTION) {
18198+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
18199 /*
18200 * The breakpoint instruction was removed right
18201 * after we hit it. Another cpu has removed
18202@@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
18203 /* Skip orig_ax, ip, cs */
18204 " addq $24, %rsp\n"
18205 " popfq\n"
18206+#ifdef KERNEXEC_PLUGIN
18207+ " btsq $63,(%rsp)\n"
18208+#endif
18209 #else
18210 " pushf\n"
18211 /*
18212@@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
18213 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18214 {
18215 unsigned long *tos = stack_addr(regs);
18216- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18217+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18218 unsigned long orig_ip = (unsigned long)p->addr;
18219 kprobe_opcode_t *insn = p->ainsn.insn;
18220
18221@@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
18222 struct die_args *args = data;
18223 int ret = NOTIFY_DONE;
18224
18225- if (args->regs && user_mode_vm(args->regs))
18226+ if (args->regs && user_mode(args->regs))
18227 return ret;
18228
18229 switch (val) {
18230diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
18231index 63b0ec8..6d92227 100644
18232--- a/arch/x86/kernel/kvm.c
18233+++ b/arch/x86/kernel/kvm.c
18234@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
18235 pv_mmu_ops.set_pud = kvm_set_pud;
18236 #if PAGETABLE_LEVELS == 4
18237 pv_mmu_ops.set_pgd = kvm_set_pgd;
18238+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
18239 #endif
18240 #endif
18241 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
18242diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18243index ec6ef60..ab2c824 100644
18244--- a/arch/x86/kernel/ldt.c
18245+++ b/arch/x86/kernel/ldt.c
18246@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18247 if (reload) {
18248 #ifdef CONFIG_SMP
18249 preempt_disable();
18250- load_LDT(pc);
18251+ load_LDT_nolock(pc);
18252 if (!cpumask_equal(mm_cpumask(current->mm),
18253 cpumask_of(smp_processor_id())))
18254 smp_call_function(flush_ldt, current->mm, 1);
18255 preempt_enable();
18256 #else
18257- load_LDT(pc);
18258+ load_LDT_nolock(pc);
18259 #endif
18260 }
18261 if (oldsize) {
18262@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18263 return err;
18264
18265 for (i = 0; i < old->size; i++)
18266- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18267+ write_ldt_entry(new->ldt, i, old->ldt + i);
18268 return 0;
18269 }
18270
18271@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18272 retval = copy_ldt(&mm->context, &old_mm->context);
18273 mutex_unlock(&old_mm->context.lock);
18274 }
18275+
18276+ if (tsk == current) {
18277+ mm->context.vdso = 0;
18278+
18279+#ifdef CONFIG_X86_32
18280+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18281+ mm->context.user_cs_base = 0UL;
18282+ mm->context.user_cs_limit = ~0UL;
18283+
18284+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18285+ cpus_clear(mm->context.cpu_user_cs_mask);
18286+#endif
18287+
18288+#endif
18289+#endif
18290+
18291+ }
18292+
18293 return retval;
18294 }
18295
18296@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18297 }
18298 }
18299
18300+#ifdef CONFIG_PAX_SEGMEXEC
18301+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18302+ error = -EINVAL;
18303+ goto out_unlock;
18304+ }
18305+#endif
18306+
18307 fill_ldt(&ldt, &ldt_info);
18308 if (oldmode)
18309 ldt.avl = 0;
18310diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18311index c1c429d..f02eaf9 100644
18312--- a/arch/x86/kernel/machine_kexec_32.c
18313+++ b/arch/x86/kernel/machine_kexec_32.c
18314@@ -26,7 +26,7 @@
18315 #include <asm/system.h>
18316 #include <asm/cacheflush.h>
18317
18318-static void set_idt(void *newidt, __u16 limit)
18319+static void set_idt(struct desc_struct *newidt, __u16 limit)
18320 {
18321 struct desc_ptr curidt;
18322
18323@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18324 }
18325
18326
18327-static void set_gdt(void *newgdt, __u16 limit)
18328+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18329 {
18330 struct desc_ptr curgdt;
18331
18332@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18333 }
18334
18335 control_page = page_address(image->control_code_page);
18336- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18337+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18338
18339 relocate_kernel_ptr = control_page;
18340 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18341diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18342index 1e47679..e73449d 100644
18343--- a/arch/x86/kernel/microcode_amd.c
18344+++ b/arch/x86/kernel/microcode_amd.c
18345@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18346 uci->mc = NULL;
18347 }
18348
18349-static struct microcode_ops microcode_amd_ops = {
18350+static const struct microcode_ops microcode_amd_ops = {
18351 .request_microcode_user = request_microcode_user,
18352 .request_microcode_fw = request_microcode_fw,
18353 .collect_cpu_info = collect_cpu_info_amd,
18354@@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18355 .microcode_fini_cpu = microcode_fini_cpu_amd,
18356 };
18357
18358-struct microcode_ops * __init init_amd_microcode(void)
18359+const struct microcode_ops * __init init_amd_microcode(void)
18360 {
18361 return &microcode_amd_ops;
18362 }
18363diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18364index 378e9a8..b5a6ea9 100644
18365--- a/arch/x86/kernel/microcode_core.c
18366+++ b/arch/x86/kernel/microcode_core.c
18367@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18368
18369 #define MICROCODE_VERSION "2.00"
18370
18371-static struct microcode_ops *microcode_ops;
18372+static const struct microcode_ops *microcode_ops;
18373
18374 /*
18375 * Synchronization.
18376diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18377index 0d334dd..14cedaf 100644
18378--- a/arch/x86/kernel/microcode_intel.c
18379+++ b/arch/x86/kernel/microcode_intel.c
18380@@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18381
18382 static int get_ucode_user(void *to, const void *from, size_t n)
18383 {
18384- return copy_from_user(to, from, n);
18385+ return copy_from_user(to, (const void __force_user *)from, n);
18386 }
18387
18388 static enum ucode_state
18389 request_microcode_user(int cpu, const void __user *buf, size_t size)
18390 {
18391- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18392+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18393 }
18394
18395 static void microcode_fini_cpu(int cpu)
18396@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18397 uci->mc = NULL;
18398 }
18399
18400-static struct microcode_ops microcode_intel_ops = {
18401+static const struct microcode_ops microcode_intel_ops = {
18402 .request_microcode_user = request_microcode_user,
18403 .request_microcode_fw = request_microcode_fw,
18404 .collect_cpu_info = collect_cpu_info,
18405@@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18406 .microcode_fini_cpu = microcode_fini_cpu,
18407 };
18408
18409-struct microcode_ops * __init init_intel_microcode(void)
18410+const struct microcode_ops * __init init_intel_microcode(void)
18411 {
18412 return &microcode_intel_ops;
18413 }
18414diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18415index 89f386f..9028f51 100644
18416--- a/arch/x86/kernel/module.c
18417+++ b/arch/x86/kernel/module.c
18418@@ -34,7 +34,7 @@
18419 #define DEBUGP(fmt...)
18420 #endif
18421
18422-void *module_alloc(unsigned long size)
18423+static void *__module_alloc(unsigned long size, pgprot_t prot)
18424 {
18425 struct vm_struct *area;
18426
18427@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18428 if (!area)
18429 return NULL;
18430
18431- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18432- PAGE_KERNEL_EXEC);
18433+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18434+}
18435+
18436+void *module_alloc(unsigned long size)
18437+{
18438+
18439+#ifdef CONFIG_PAX_KERNEXEC
18440+ return __module_alloc(size, PAGE_KERNEL);
18441+#else
18442+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18443+#endif
18444+
18445 }
18446
18447 /* Free memory returned from module_alloc */
18448@@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18449 vfree(module_region);
18450 }
18451
18452+#ifdef CONFIG_PAX_KERNEXEC
18453+#ifdef CONFIG_X86_32
18454+void *module_alloc_exec(unsigned long size)
18455+{
18456+ struct vm_struct *area;
18457+
18458+ if (size == 0)
18459+ return NULL;
18460+
18461+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18462+ return area ? area->addr : NULL;
18463+}
18464+EXPORT_SYMBOL(module_alloc_exec);
18465+
18466+void module_free_exec(struct module *mod, void *module_region)
18467+{
18468+ vunmap(module_region);
18469+}
18470+EXPORT_SYMBOL(module_free_exec);
18471+#else
18472+void module_free_exec(struct module *mod, void *module_region)
18473+{
18474+ module_free(mod, module_region);
18475+}
18476+EXPORT_SYMBOL(module_free_exec);
18477+
18478+void *module_alloc_exec(unsigned long size)
18479+{
18480+ return __module_alloc(size, PAGE_KERNEL_RX);
18481+}
18482+EXPORT_SYMBOL(module_alloc_exec);
18483+#endif
18484+#endif
18485+
18486 /* We don't need anything special. */
18487 int module_frob_arch_sections(Elf_Ehdr *hdr,
18488 Elf_Shdr *sechdrs,
18489@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18490 unsigned int i;
18491 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18492 Elf32_Sym *sym;
18493- uint32_t *location;
18494+ uint32_t *plocation, location;
18495
18496 DEBUGP("Applying relocate section %u to %u\n", relsec,
18497 sechdrs[relsec].sh_info);
18498 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18499 /* This is where to make the change */
18500- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18501- + rel[i].r_offset;
18502+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18503+ location = (uint32_t)plocation;
18504+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18505+ plocation = ktla_ktva((void *)plocation);
18506 /* This is the symbol it is referring to. Note that all
18507 undefined symbols have been resolved. */
18508 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18509@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18510 switch (ELF32_R_TYPE(rel[i].r_info)) {
18511 case R_386_32:
18512 /* We add the value into the location given */
18513- *location += sym->st_value;
18514+ pax_open_kernel();
18515+ *plocation += sym->st_value;
18516+ pax_close_kernel();
18517 break;
18518 case R_386_PC32:
18519 /* Add the value, subtract its postition */
18520- *location += sym->st_value - (uint32_t)location;
18521+ pax_open_kernel();
18522+ *plocation += sym->st_value - location;
18523+ pax_close_kernel();
18524 break;
18525 default:
18526 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18527@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18528 case R_X86_64_NONE:
18529 break;
18530 case R_X86_64_64:
18531+ pax_open_kernel();
18532 *(u64 *)loc = val;
18533+ pax_close_kernel();
18534 break;
18535 case R_X86_64_32:
18536+ pax_open_kernel();
18537 *(u32 *)loc = val;
18538+ pax_close_kernel();
18539 if (val != *(u32 *)loc)
18540 goto overflow;
18541 break;
18542 case R_X86_64_32S:
18543+ pax_open_kernel();
18544 *(s32 *)loc = val;
18545+ pax_close_kernel();
18546 if ((s64)val != *(s32 *)loc)
18547 goto overflow;
18548 break;
18549 case R_X86_64_PC32:
18550 val -= (u64)loc;
18551+ pax_open_kernel();
18552 *(u32 *)loc = val;
18553+ pax_close_kernel();
18554+
18555 #if 0
18556 if ((s64)val != *(s32 *)loc)
18557 goto overflow;
18558diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18559index 3a7c5a4..9191528 100644
18560--- a/arch/x86/kernel/paravirt-spinlocks.c
18561+++ b/arch/x86/kernel/paravirt-spinlocks.c
18562@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18563 __raw_spin_lock(lock);
18564 }
18565
18566-struct pv_lock_ops pv_lock_ops = {
18567+struct pv_lock_ops pv_lock_ops __read_only = {
18568 #ifdef CONFIG_SMP
18569 .spin_is_locked = __ticket_spin_is_locked,
18570 .spin_is_contended = __ticket_spin_is_contended,
18571diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18572index 1b1739d..dea6077 100644
18573--- a/arch/x86/kernel/paravirt.c
18574+++ b/arch/x86/kernel/paravirt.c
18575@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18576 {
18577 return x;
18578 }
18579+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18580+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18581+#endif
18582
18583 void __init default_banner(void)
18584 {
18585@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18586 * corresponding structure. */
18587 static void *get_call_destination(u8 type)
18588 {
18589- struct paravirt_patch_template tmpl = {
18590+ const struct paravirt_patch_template tmpl = {
18591 .pv_init_ops = pv_init_ops,
18592 .pv_time_ops = pv_time_ops,
18593 .pv_cpu_ops = pv_cpu_ops,
18594@@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18595 .pv_lock_ops = pv_lock_ops,
18596 #endif
18597 };
18598+
18599+ pax_track_stack();
18600 return *((void **)&tmpl + type);
18601 }
18602
18603@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18604 if (opfunc == NULL)
18605 /* If there's no function, patch it with a ud2a (BUG) */
18606 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18607- else if (opfunc == _paravirt_nop)
18608+ else if (opfunc == (void *)_paravirt_nop)
18609 /* If the operation is a nop, then nop the callsite */
18610 ret = paravirt_patch_nop();
18611
18612 /* identity functions just return their single argument */
18613- else if (opfunc == _paravirt_ident_32)
18614+ else if (opfunc == (void *)_paravirt_ident_32)
18615 ret = paravirt_patch_ident_32(insnbuf, len);
18616- else if (opfunc == _paravirt_ident_64)
18617+ else if (opfunc == (void *)_paravirt_ident_64)
18618 ret = paravirt_patch_ident_64(insnbuf, len);
18619+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18620+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18621+ ret = paravirt_patch_ident_64(insnbuf, len);
18622+#endif
18623
18624 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18625 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18626@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18627 if (insn_len > len || start == NULL)
18628 insn_len = len;
18629 else
18630- memcpy(insnbuf, start, insn_len);
18631+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18632
18633 return insn_len;
18634 }
18635@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18636 preempt_enable();
18637 }
18638
18639-struct pv_info pv_info = {
18640+struct pv_info pv_info __read_only = {
18641 .name = "bare hardware",
18642 .paravirt_enabled = 0,
18643 .kernel_rpl = 0,
18644 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18645 };
18646
18647-struct pv_init_ops pv_init_ops = {
18648+struct pv_init_ops pv_init_ops __read_only = {
18649 .patch = native_patch,
18650 };
18651
18652-struct pv_time_ops pv_time_ops = {
18653+struct pv_time_ops pv_time_ops __read_only = {
18654 .sched_clock = native_sched_clock,
18655 };
18656
18657-struct pv_irq_ops pv_irq_ops = {
18658+struct pv_irq_ops pv_irq_ops __read_only = {
18659 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18660 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18661 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18662@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18663 #endif
18664 };
18665
18666-struct pv_cpu_ops pv_cpu_ops = {
18667+struct pv_cpu_ops pv_cpu_ops __read_only = {
18668 .cpuid = native_cpuid,
18669 .get_debugreg = native_get_debugreg,
18670 .set_debugreg = native_set_debugreg,
18671@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18672 .end_context_switch = paravirt_nop,
18673 };
18674
18675-struct pv_apic_ops pv_apic_ops = {
18676+struct pv_apic_ops pv_apic_ops __read_only = {
18677 #ifdef CONFIG_X86_LOCAL_APIC
18678 .startup_ipi_hook = paravirt_nop,
18679 #endif
18680 };
18681
18682-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18683+#ifdef CONFIG_X86_32
18684+#ifdef CONFIG_X86_PAE
18685+/* 64-bit pagetable entries */
18686+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18687+#else
18688 /* 32-bit pagetable entries */
18689 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18690+#endif
18691 #else
18692 /* 64-bit pagetable entries */
18693 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18694 #endif
18695
18696-struct pv_mmu_ops pv_mmu_ops = {
18697+struct pv_mmu_ops pv_mmu_ops __read_only = {
18698
18699 .read_cr2 = native_read_cr2,
18700 .write_cr2 = native_write_cr2,
18701@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18702 .make_pud = PTE_IDENT,
18703
18704 .set_pgd = native_set_pgd,
18705+ .set_pgd_batched = native_set_pgd_batched,
18706 #endif
18707 #endif /* PAGETABLE_LEVELS >= 3 */
18708
18709@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18710 },
18711
18712 .set_fixmap = native_set_fixmap,
18713+
18714+#ifdef CONFIG_PAX_KERNEXEC
18715+ .pax_open_kernel = native_pax_open_kernel,
18716+ .pax_close_kernel = native_pax_close_kernel,
18717+#endif
18718+
18719 };
18720
18721 EXPORT_SYMBOL_GPL(pv_time_ops);
18722diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18723index 1a2d4b1..6a0dd55 100644
18724--- a/arch/x86/kernel/pci-calgary_64.c
18725+++ b/arch/x86/kernel/pci-calgary_64.c
18726@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18727 free_pages((unsigned long)vaddr, get_order(size));
18728 }
18729
18730-static struct dma_map_ops calgary_dma_ops = {
18731+static const struct dma_map_ops calgary_dma_ops = {
18732 .alloc_coherent = calgary_alloc_coherent,
18733 .free_coherent = calgary_free_coherent,
18734 .map_sg = calgary_map_sg,
18735diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18736index 6ac3931..42b4414 100644
18737--- a/arch/x86/kernel/pci-dma.c
18738+++ b/arch/x86/kernel/pci-dma.c
18739@@ -14,7 +14,7 @@
18740
18741 static int forbid_dac __read_mostly;
18742
18743-struct dma_map_ops *dma_ops;
18744+const struct dma_map_ops *dma_ops;
18745 EXPORT_SYMBOL(dma_ops);
18746
18747 static int iommu_sac_force __read_mostly;
18748@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18749
18750 int dma_supported(struct device *dev, u64 mask)
18751 {
18752- struct dma_map_ops *ops = get_dma_ops(dev);
18753+ const struct dma_map_ops *ops = get_dma_ops(dev);
18754
18755 #ifdef CONFIG_PCI
18756 if (mask > 0xffffffff && forbid_dac > 0) {
18757diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18758index 1c76691..e3632db 100644
18759--- a/arch/x86/kernel/pci-gart_64.c
18760+++ b/arch/x86/kernel/pci-gart_64.c
18761@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18762 return -1;
18763 }
18764
18765-static struct dma_map_ops gart_dma_ops = {
18766+static const struct dma_map_ops gart_dma_ops = {
18767 .map_sg = gart_map_sg,
18768 .unmap_sg = gart_unmap_sg,
18769 .map_page = gart_map_page,
18770diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18771index a3933d4..c898869 100644
18772--- a/arch/x86/kernel/pci-nommu.c
18773+++ b/arch/x86/kernel/pci-nommu.c
18774@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18775 flush_write_buffers();
18776 }
18777
18778-struct dma_map_ops nommu_dma_ops = {
18779+const struct dma_map_ops nommu_dma_ops = {
18780 .alloc_coherent = dma_generic_alloc_coherent,
18781 .free_coherent = nommu_free_coherent,
18782 .map_sg = nommu_map_sg,
18783diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18784index aaa6b78..4de1881 100644
18785--- a/arch/x86/kernel/pci-swiotlb.c
18786+++ b/arch/x86/kernel/pci-swiotlb.c
18787@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18788 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18789 }
18790
18791-static struct dma_map_ops swiotlb_dma_ops = {
18792+static const struct dma_map_ops swiotlb_dma_ops = {
18793 .mapping_error = swiotlb_dma_mapping_error,
18794 .alloc_coherent = x86_swiotlb_alloc_coherent,
18795 .free_coherent = swiotlb_free_coherent,
18796diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18797index fc6c84d..0312ca2 100644
18798--- a/arch/x86/kernel/process.c
18799+++ b/arch/x86/kernel/process.c
18800@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18801
18802 void free_thread_info(struct thread_info *ti)
18803 {
18804- free_thread_xstate(ti->task);
18805 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18806 }
18807
18808+static struct kmem_cache *task_struct_cachep;
18809+
18810 void arch_task_cache_init(void)
18811 {
18812- task_xstate_cachep =
18813- kmem_cache_create("task_xstate", xstate_size,
18814+ /* create a slab on which task_structs can be allocated */
18815+ task_struct_cachep =
18816+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18817+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18818+
18819+ task_xstate_cachep =
18820+ kmem_cache_create("task_xstate", xstate_size,
18821 __alignof__(union thread_xstate),
18822- SLAB_PANIC | SLAB_NOTRACK, NULL);
18823+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18824+}
18825+
18826+struct task_struct *alloc_task_struct(void)
18827+{
18828+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18829+}
18830+
18831+void free_task_struct(struct task_struct *task)
18832+{
18833+ free_thread_xstate(task);
18834+ kmem_cache_free(task_struct_cachep, task);
18835 }
18836
18837 /*
18838@@ -73,7 +90,7 @@ void exit_thread(void)
18839 unsigned long *bp = t->io_bitmap_ptr;
18840
18841 if (bp) {
18842- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18843+ struct tss_struct *tss = init_tss + get_cpu();
18844
18845 t->io_bitmap_ptr = NULL;
18846 clear_thread_flag(TIF_IO_BITMAP);
18847@@ -93,6 +110,9 @@ void flush_thread(void)
18848
18849 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18850
18851+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18852+ loadsegment(gs, 0);
18853+#endif
18854 tsk->thread.debugreg0 = 0;
18855 tsk->thread.debugreg1 = 0;
18856 tsk->thread.debugreg2 = 0;
18857@@ -307,7 +327,7 @@ void default_idle(void)
18858 EXPORT_SYMBOL(default_idle);
18859 #endif
18860
18861-void stop_this_cpu(void *dummy)
18862+__noreturn void stop_this_cpu(void *dummy)
18863 {
18864 local_irq_disable();
18865 /*
18866@@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18867 }
18868 early_param("idle", idle_setup);
18869
18870-unsigned long arch_align_stack(unsigned long sp)
18871+#ifdef CONFIG_PAX_RANDKSTACK
18872+void pax_randomize_kstack(struct pt_regs *regs)
18873 {
18874- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18875- sp -= get_random_int() % 8192;
18876- return sp & ~0xf;
18877-}
18878+ struct thread_struct *thread = &current->thread;
18879+ unsigned long time;
18880
18881-unsigned long arch_randomize_brk(struct mm_struct *mm)
18882-{
18883- unsigned long range_end = mm->brk + 0x02000000;
18884- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18885+ if (!randomize_va_space)
18886+ return;
18887+
18888+ if (v8086_mode(regs))
18889+ return;
18890+
18891+ rdtscl(time);
18892+
18893+ /* P4 seems to return a 0 LSB, ignore it */
18894+#ifdef CONFIG_MPENTIUM4
18895+ time &= 0x3EUL;
18896+ time <<= 2;
18897+#elif defined(CONFIG_X86_64)
18898+ time &= 0xFUL;
18899+ time <<= 4;
18900+#else
18901+ time &= 0x1FUL;
18902+ time <<= 3;
18903+#endif
18904+
18905+ thread->sp0 ^= time;
18906+ load_sp0(init_tss + smp_processor_id(), thread);
18907+
18908+#ifdef CONFIG_X86_64
18909+ percpu_write(kernel_stack, thread->sp0);
18910+#endif
18911 }
18912+#endif
18913
18914diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18915index c40c432..6e1df72 100644
18916--- a/arch/x86/kernel/process_32.c
18917+++ b/arch/x86/kernel/process_32.c
18918@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18919 unsigned long thread_saved_pc(struct task_struct *tsk)
18920 {
18921 return ((unsigned long *)tsk->thread.sp)[3];
18922+//XXX return tsk->thread.eip;
18923 }
18924
18925 #ifndef CONFIG_SMP
18926@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18927 unsigned short ss, gs;
18928 const char *board;
18929
18930- if (user_mode_vm(regs)) {
18931+ if (user_mode(regs)) {
18932 sp = regs->sp;
18933 ss = regs->ss & 0xffff;
18934- gs = get_user_gs(regs);
18935 } else {
18936 sp = (unsigned long) (&regs->sp);
18937 savesegment(ss, ss);
18938- savesegment(gs, gs);
18939 }
18940+ gs = get_user_gs(regs);
18941
18942 printk("\n");
18943
18944@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18945 regs.bx = (unsigned long) fn;
18946 regs.dx = (unsigned long) arg;
18947
18948- regs.ds = __USER_DS;
18949- regs.es = __USER_DS;
18950+ regs.ds = __KERNEL_DS;
18951+ regs.es = __KERNEL_DS;
18952 regs.fs = __KERNEL_PERCPU;
18953- regs.gs = __KERNEL_STACK_CANARY;
18954+ savesegment(gs, regs.gs);
18955 regs.orig_ax = -1;
18956 regs.ip = (unsigned long) kernel_thread_helper;
18957 regs.cs = __KERNEL_CS | get_kernel_rpl();
18958@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18959 struct task_struct *tsk;
18960 int err;
18961
18962- childregs = task_pt_regs(p);
18963+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18964 *childregs = *regs;
18965 childregs->ax = 0;
18966 childregs->sp = sp;
18967
18968 p->thread.sp = (unsigned long) childregs;
18969 p->thread.sp0 = (unsigned long) (childregs+1);
18970+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18971
18972 p->thread.ip = (unsigned long) ret_from_fork;
18973
18974@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18975 struct thread_struct *prev = &prev_p->thread,
18976 *next = &next_p->thread;
18977 int cpu = smp_processor_id();
18978- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18979+ struct tss_struct *tss = init_tss + cpu;
18980 bool preload_fpu;
18981
18982 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18983@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18984 */
18985 lazy_save_gs(prev->gs);
18986
18987+#ifdef CONFIG_PAX_MEMORY_UDEREF
18988+ __set_fs(task_thread_info(next_p)->addr_limit);
18989+#endif
18990+
18991 /*
18992 * Load the per-thread Thread-Local Storage descriptor.
18993 */
18994@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18995 */
18996 arch_end_context_switch(next_p);
18997
18998+ percpu_write(current_task, next_p);
18999+ percpu_write(current_tinfo, &next_p->tinfo);
19000+
19001 if (preload_fpu)
19002 __math_state_restore();
19003
19004@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19005 if (prev->gs | next->gs)
19006 lazy_load_gs(next->gs);
19007
19008- percpu_write(current_task, next_p);
19009-
19010 return prev_p;
19011 }
19012
19013@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
19014 } while (count++ < 16);
19015 return 0;
19016 }
19017-
19018diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
19019index 39493bc..196816d 100644
19020--- a/arch/x86/kernel/process_64.c
19021+++ b/arch/x86/kernel/process_64.c
19022@@ -91,7 +91,7 @@ static void __exit_idle(void)
19023 void exit_idle(void)
19024 {
19025 /* idle loop has pid 0 */
19026- if (current->pid)
19027+ if (task_pid_nr(current))
19028 return;
19029 __exit_idle();
19030 }
19031@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
19032 if (!board)
19033 board = "";
19034 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
19035- current->pid, current->comm, print_tainted(),
19036+ task_pid_nr(current), current->comm, print_tainted(),
19037 init_utsname()->release,
19038 (int)strcspn(init_utsname()->version, " "),
19039 init_utsname()->version, board);
19040@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19041 struct pt_regs *childregs;
19042 struct task_struct *me = current;
19043
19044- childregs = ((struct pt_regs *)
19045- (THREAD_SIZE + task_stack_page(p))) - 1;
19046+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
19047 *childregs = *regs;
19048
19049 childregs->ax = 0;
19050@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19051 p->thread.sp = (unsigned long) childregs;
19052 p->thread.sp0 = (unsigned long) (childregs+1);
19053 p->thread.usersp = me->thread.usersp;
19054+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19055
19056 set_tsk_thread_flag(p, TIF_FORK);
19057
19058@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19059 struct thread_struct *prev = &prev_p->thread;
19060 struct thread_struct *next = &next_p->thread;
19061 int cpu = smp_processor_id();
19062- struct tss_struct *tss = &per_cpu(init_tss, cpu);
19063+ struct tss_struct *tss = init_tss + cpu;
19064 unsigned fsindex, gsindex;
19065 bool preload_fpu;
19066
19067@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19068 prev->usersp = percpu_read(old_rsp);
19069 percpu_write(old_rsp, next->usersp);
19070 percpu_write(current_task, next_p);
19071+ percpu_write(current_tinfo, &next_p->tinfo);
19072
19073- percpu_write(kernel_stack,
19074- (unsigned long)task_stack_page(next_p) +
19075- THREAD_SIZE - KERNEL_STACK_OFFSET);
19076+ percpu_write(kernel_stack, next->sp0);
19077
19078 /*
19079 * Now maybe reload the debug registers and handle I/O bitmaps
19080@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
19081 if (!p || p == current || p->state == TASK_RUNNING)
19082 return 0;
19083 stack = (unsigned long)task_stack_page(p);
19084- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
19085+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
19086 return 0;
19087 fp = *(u64 *)(p->thread.sp);
19088 do {
19089- if (fp < (unsigned long)stack ||
19090- fp >= (unsigned long)stack+THREAD_SIZE)
19091+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
19092 return 0;
19093 ip = *(u64 *)(fp+8);
19094 if (!in_sched_functions(ip))
19095diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
19096index c06acdd..3f5fff5 100644
19097--- a/arch/x86/kernel/ptrace.c
19098+++ b/arch/x86/kernel/ptrace.c
19099@@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
19100 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19101 {
19102 int ret;
19103- unsigned long __user *datap = (unsigned long __user *)data;
19104+ unsigned long __user *datap = (__force unsigned long __user *)data;
19105
19106 switch (request) {
19107 /* read the word at location addr in the USER area. */
19108@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19109 if (addr < 0)
19110 return -EIO;
19111 ret = do_get_thread_area(child, addr,
19112- (struct user_desc __user *) data);
19113+ (__force struct user_desc __user *) data);
19114 break;
19115
19116 case PTRACE_SET_THREAD_AREA:
19117 if (addr < 0)
19118 return -EIO;
19119 ret = do_set_thread_area(child, addr,
19120- (struct user_desc __user *) data, 0);
19121+ (__force struct user_desc __user *) data, 0);
19122 break;
19123 #endif
19124
19125@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19126 #ifdef CONFIG_X86_PTRACE_BTS
19127 case PTRACE_BTS_CONFIG:
19128 ret = ptrace_bts_config
19129- (child, data, (struct ptrace_bts_config __user *)addr);
19130+ (child, data, (__force struct ptrace_bts_config __user *)addr);
19131 break;
19132
19133 case PTRACE_BTS_STATUS:
19134 ret = ptrace_bts_status
19135- (child, data, (struct ptrace_bts_config __user *)addr);
19136+ (child, data, (__force struct ptrace_bts_config __user *)addr);
19137 break;
19138
19139 case PTRACE_BTS_SIZE:
19140@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19141
19142 case PTRACE_BTS_GET:
19143 ret = ptrace_bts_read_record
19144- (child, data, (struct bts_struct __user *) addr);
19145+ (child, data, (__force struct bts_struct __user *) addr);
19146 break;
19147
19148 case PTRACE_BTS_CLEAR:
19149@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19150
19151 case PTRACE_BTS_DRAIN:
19152 ret = ptrace_bts_drain
19153- (child, data, (struct bts_struct __user *) addr);
19154+ (child, data, (__force struct bts_struct __user *) addr);
19155 break;
19156 #endif /* CONFIG_X86_PTRACE_BTS */
19157
19158@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19159 info.si_code = si_code;
19160
19161 /* User-mode ip? */
19162- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
19163+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
19164
19165 /* Send us the fake SIGTRAP */
19166 force_sig_info(SIGTRAP, &info, tsk);
19167@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19168 * We must return the syscall number to actually look up in the table.
19169 * This can be -1L to skip running any syscall at all.
19170 */
19171-asmregparm long syscall_trace_enter(struct pt_regs *regs)
19172+long syscall_trace_enter(struct pt_regs *regs)
19173 {
19174 long ret = 0;
19175
19176@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
19177 return ret ?: regs->orig_ax;
19178 }
19179
19180-asmregparm void syscall_trace_leave(struct pt_regs *regs)
19181+void syscall_trace_leave(struct pt_regs *regs)
19182 {
19183 if (unlikely(current->audit_context))
19184 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
19185diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
19186index cf98100..e76e03d 100644
19187--- a/arch/x86/kernel/reboot.c
19188+++ b/arch/x86/kernel/reboot.c
19189@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
19190 EXPORT_SYMBOL(pm_power_off);
19191
19192 static const struct desc_ptr no_idt = {};
19193-static int reboot_mode;
19194+static unsigned short reboot_mode;
19195 enum reboot_type reboot_type = BOOT_KBD;
19196 int reboot_force;
19197
19198@@ -292,12 +292,12 @@ core_initcall(reboot_init);
19199 controller to pulse the CPU reset line, which is more thorough, but
19200 doesn't work with at least one type of 486 motherboard. It is easy
19201 to stop this code working; hence the copious comments. */
19202-static const unsigned long long
19203-real_mode_gdt_entries [3] =
19204+static struct desc_struct
19205+real_mode_gdt_entries [3] __read_only =
19206 {
19207- 0x0000000000000000ULL, /* Null descriptor */
19208- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
19209- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
19210+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
19211+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
19212+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
19213 };
19214
19215 static const struct desc_ptr
19216@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
19217 * specified by the code and length parameters.
19218 * We assume that length will aways be less that 100!
19219 */
19220-void machine_real_restart(const unsigned char *code, int length)
19221+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
19222 {
19223 local_irq_disable();
19224
19225@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
19226 /* Remap the kernel at virtual address zero, as well as offset zero
19227 from the kernel segment. This assumes the kernel segment starts at
19228 virtual address PAGE_OFFSET. */
19229- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19230- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
19231+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19232+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
19233
19234 /*
19235 * Use `swapper_pg_dir' as our page directory.
19236@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
19237 boot)". This seems like a fairly standard thing that gets set by
19238 REBOOT.COM programs, and the previous reset routine did this
19239 too. */
19240- *((unsigned short *)0x472) = reboot_mode;
19241+ *(unsigned short *)(__va(0x472)) = reboot_mode;
19242
19243 /* For the switch to real mode, copy some code to low memory. It has
19244 to be in the first 64k because it is running in 16-bit mode, and it
19245 has to have the same physical and virtual address, because it turns
19246 off paging. Copy it near the end of the first page, out of the way
19247 of BIOS variables. */
19248- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
19249- real_mode_switch, sizeof (real_mode_switch));
19250- memcpy((void *)(0x1000 - 100), code, length);
19251+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
19252+ memcpy(__va(0x1000 - 100), code, length);
19253
19254 /* Set up the IDT for real mode. */
19255 load_idt(&real_mode_idt);
19256@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
19257 __asm__ __volatile__ ("ljmp $0x0008,%0"
19258 :
19259 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
19260+ do { } while (1);
19261 }
19262 #ifdef CONFIG_APM_MODULE
19263 EXPORT_SYMBOL(machine_real_restart);
19264@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19265 {
19266 }
19267
19268-static void native_machine_emergency_restart(void)
19269+__noreturn static void native_machine_emergency_restart(void)
19270 {
19271 int i;
19272
19273@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19274 #endif
19275 }
19276
19277-static void __machine_emergency_restart(int emergency)
19278+static __noreturn void __machine_emergency_restart(int emergency)
19279 {
19280 reboot_emergency = emergency;
19281 machine_ops.emergency_restart();
19282 }
19283
19284-static void native_machine_restart(char *__unused)
19285+static __noreturn void native_machine_restart(char *__unused)
19286 {
19287 printk("machine restart\n");
19288
19289@@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19290 __machine_emergency_restart(0);
19291 }
19292
19293-static void native_machine_halt(void)
19294+static __noreturn void native_machine_halt(void)
19295 {
19296 /* stop other cpus and apics */
19297 machine_shutdown();
19298@@ -685,7 +685,7 @@ static void native_machine_halt(void)
19299 stop_this_cpu(NULL);
19300 }
19301
19302-static void native_machine_power_off(void)
19303+__noreturn static void native_machine_power_off(void)
19304 {
19305 if (pm_power_off) {
19306 if (!reboot_force)
19307@@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19308 }
19309 /* a fallback in case there is no PM info available */
19310 tboot_shutdown(TB_SHUTDOWN_HALT);
19311+ do { } while (1);
19312 }
19313
19314 struct machine_ops machine_ops = {
19315diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19316index 7a6f3b3..976a959 100644
19317--- a/arch/x86/kernel/relocate_kernel_64.S
19318+++ b/arch/x86/kernel/relocate_kernel_64.S
19319@@ -11,6 +11,7 @@
19320 #include <asm/kexec.h>
19321 #include <asm/processor-flags.h>
19322 #include <asm/pgtable_types.h>
19323+#include <asm/alternative-asm.h>
19324
19325 /*
19326 * Must be relocatable PIC code callable as a C function
19327@@ -167,6 +168,7 @@ identity_mapped:
19328 xorq %r14, %r14
19329 xorq %r15, %r15
19330
19331+ pax_force_retaddr 0, 1
19332 ret
19333
19334 1:
19335diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19336index 5449a26..0b6c759 100644
19337--- a/arch/x86/kernel/setup.c
19338+++ b/arch/x86/kernel/setup.c
19339@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19340
19341 if (!boot_params.hdr.root_flags)
19342 root_mountflags &= ~MS_RDONLY;
19343- init_mm.start_code = (unsigned long) _text;
19344- init_mm.end_code = (unsigned long) _etext;
19345+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19346+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19347 init_mm.end_data = (unsigned long) _edata;
19348 init_mm.brk = _brk_end;
19349
19350- code_resource.start = virt_to_phys(_text);
19351- code_resource.end = virt_to_phys(_etext)-1;
19352- data_resource.start = virt_to_phys(_etext);
19353+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19354+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19355+ data_resource.start = virt_to_phys(_sdata);
19356 data_resource.end = virt_to_phys(_edata)-1;
19357 bss_resource.start = virt_to_phys(&__bss_start);
19358 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19359diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19360index d559af9..524c6ad 100644
19361--- a/arch/x86/kernel/setup_percpu.c
19362+++ b/arch/x86/kernel/setup_percpu.c
19363@@ -25,19 +25,17 @@
19364 # define DBG(x...)
19365 #endif
19366
19367-DEFINE_PER_CPU(int, cpu_number);
19368+#ifdef CONFIG_SMP
19369+DEFINE_PER_CPU(unsigned int, cpu_number);
19370 EXPORT_PER_CPU_SYMBOL(cpu_number);
19371+#endif
19372
19373-#ifdef CONFIG_X86_64
19374 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19375-#else
19376-#define BOOT_PERCPU_OFFSET 0
19377-#endif
19378
19379 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19380 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19381
19382-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19383+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19384 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19385 };
19386 EXPORT_SYMBOL(__per_cpu_offset);
19387@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19388 {
19389 #ifdef CONFIG_X86_32
19390 struct desc_struct gdt;
19391+ unsigned long base = per_cpu_offset(cpu);
19392
19393- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19394- 0x2 | DESCTYPE_S, 0x8);
19395- gdt.s = 1;
19396+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19397+ 0x83 | DESCTYPE_S, 0xC);
19398 write_gdt_entry(get_cpu_gdt_table(cpu),
19399 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19400 #endif
19401@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19402 /* alrighty, percpu areas up and running */
19403 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19404 for_each_possible_cpu(cpu) {
19405+#ifdef CONFIG_CC_STACKPROTECTOR
19406+#ifdef CONFIG_X86_32
19407+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
19408+#endif
19409+#endif
19410 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19411 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19412 per_cpu(cpu_number, cpu) = cpu;
19413@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19414 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19415 #endif
19416 #endif
19417+#ifdef CONFIG_CC_STACKPROTECTOR
19418+#ifdef CONFIG_X86_32
19419+ if (!cpu)
19420+ per_cpu(stack_canary.canary, cpu) = canary;
19421+#endif
19422+#endif
19423 /*
19424 * Up to this point, the boot CPU has been using .data.init
19425 * area. Reload any changed state for the boot CPU.
19426diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19427index 6a44a76..a9287a1 100644
19428--- a/arch/x86/kernel/signal.c
19429+++ b/arch/x86/kernel/signal.c
19430@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19431 * Align the stack pointer according to the i386 ABI,
19432 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19433 */
19434- sp = ((sp + 4) & -16ul) - 4;
19435+ sp = ((sp - 12) & -16ul) - 4;
19436 #else /* !CONFIG_X86_32 */
19437 sp = round_down(sp, 16) - 8;
19438 #endif
19439@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19440 * Return an always-bogus address instead so we will die with SIGSEGV.
19441 */
19442 if (onsigstack && !likely(on_sig_stack(sp)))
19443- return (void __user *)-1L;
19444+ return (__force void __user *)-1L;
19445
19446 /* save i387 state */
19447 if (used_math() && save_i387_xstate(*fpstate) < 0)
19448- return (void __user *)-1L;
19449+ return (__force void __user *)-1L;
19450
19451 return (void __user *)sp;
19452 }
19453@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19454 }
19455
19456 if (current->mm->context.vdso)
19457- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19458+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19459 else
19460- restorer = &frame->retcode;
19461+ restorer = (void __user *)&frame->retcode;
19462 if (ka->sa.sa_flags & SA_RESTORER)
19463 restorer = ka->sa.sa_restorer;
19464
19465@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19466 * reasons and because gdb uses it as a signature to notice
19467 * signal handler stack frames.
19468 */
19469- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19470+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19471
19472 if (err)
19473 return -EFAULT;
19474@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19475 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19476
19477 /* Set up to return from userspace. */
19478- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19479+ if (current->mm->context.vdso)
19480+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19481+ else
19482+ restorer = (void __user *)&frame->retcode;
19483 if (ka->sa.sa_flags & SA_RESTORER)
19484 restorer = ka->sa.sa_restorer;
19485 put_user_ex(restorer, &frame->pretcode);
19486@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19487 * reasons and because gdb uses it as a signature to notice
19488 * signal handler stack frames.
19489 */
19490- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19491+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19492 } put_user_catch(err);
19493
19494 if (err)
19495@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19496 int signr;
19497 sigset_t *oldset;
19498
19499+ pax_track_stack();
19500+
19501 /*
19502 * We want the common case to go fast, which is why we may in certain
19503 * cases get here from kernel mode. Just return without doing anything
19504@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19505 * X86_32: vm86 regs switched out by assembly code before reaching
19506 * here, so testing against kernel CS suffices.
19507 */
19508- if (!user_mode(regs))
19509+ if (!user_mode_novm(regs))
19510 return;
19511
19512 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19513diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19514index 7e8e905..64d5c32 100644
19515--- a/arch/x86/kernel/smpboot.c
19516+++ b/arch/x86/kernel/smpboot.c
19517@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19518 */
19519 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19520
19521-void cpu_hotplug_driver_lock()
19522+void cpu_hotplug_driver_lock(void)
19523 {
19524- mutex_lock(&x86_cpu_hotplug_driver_mutex);
19525+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
19526 }
19527
19528-void cpu_hotplug_driver_unlock()
19529+void cpu_hotplug_driver_unlock(void)
19530 {
19531- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19532+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19533 }
19534
19535 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19536@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19537 * target processor state.
19538 */
19539 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19540- (unsigned long)stack_start.sp);
19541+ stack_start);
19542
19543 /*
19544 * Run STARTUP IPI loop.
19545@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19546 set_idle_for_cpu(cpu, c_idle.idle);
19547 do_rest:
19548 per_cpu(current_task, cpu) = c_idle.idle;
19549+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19550 #ifdef CONFIG_X86_32
19551 /* Stack for startup_32 can be just as for start_secondary onwards */
19552 irq_ctx_init(cpu);
19553@@ -750,13 +751,15 @@ do_rest:
19554 #else
19555 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19556 initial_gs = per_cpu_offset(cpu);
19557- per_cpu(kernel_stack, cpu) =
19558- (unsigned long)task_stack_page(c_idle.idle) -
19559- KERNEL_STACK_OFFSET + THREAD_SIZE;
19560+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19561 #endif
19562+
19563+ pax_open_kernel();
19564 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19565+ pax_close_kernel();
19566+
19567 initial_code = (unsigned long)start_secondary;
19568- stack_start.sp = (void *) c_idle.idle->thread.sp;
19569+ stack_start = c_idle.idle->thread.sp;
19570
19571 /* start_ip had better be page-aligned! */
19572 start_ip = setup_trampoline();
19573@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19574
19575 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19576
19577+#ifdef CONFIG_PAX_PER_CPU_PGD
19578+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19579+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19580+ KERNEL_PGD_PTRS);
19581+#endif
19582+
19583 err = do_boot_cpu(apicid, cpu);
19584
19585 if (err) {
19586diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19587index 3149032..14f1053 100644
19588--- a/arch/x86/kernel/step.c
19589+++ b/arch/x86/kernel/step.c
19590@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19591 struct desc_struct *desc;
19592 unsigned long base;
19593
19594- seg &= ~7UL;
19595+ seg >>= 3;
19596
19597 mutex_lock(&child->mm->context.lock);
19598- if (unlikely((seg >> 3) >= child->mm->context.size))
19599+ if (unlikely(seg >= child->mm->context.size))
19600 addr = -1L; /* bogus selector, access would fault */
19601 else {
19602 desc = child->mm->context.ldt + seg;
19603@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19604 addr += base;
19605 }
19606 mutex_unlock(&child->mm->context.lock);
19607- }
19608+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19609+ addr = ktla_ktva(addr);
19610
19611 return addr;
19612 }
19613@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19614 unsigned char opcode[15];
19615 unsigned long addr = convert_ip_to_linear(child, regs);
19616
19617+ if (addr == -EINVAL)
19618+ return 0;
19619+
19620 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19621 for (i = 0; i < copied; i++) {
19622 switch (opcode[i]) {
19623@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19624
19625 #ifdef CONFIG_X86_64
19626 case 0x40 ... 0x4f:
19627- if (regs->cs != __USER_CS)
19628+ if ((regs->cs & 0xffff) != __USER_CS)
19629 /* 32-bit mode: register increment */
19630 return 0;
19631 /* 64-bit mode: REX prefix */
19632diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19633index dee1ff7..a397f7f 100644
19634--- a/arch/x86/kernel/sys_i386_32.c
19635+++ b/arch/x86/kernel/sys_i386_32.c
19636@@ -24,6 +24,21 @@
19637
19638 #include <asm/syscalls.h>
19639
19640+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19641+{
19642+ unsigned long pax_task_size = TASK_SIZE;
19643+
19644+#ifdef CONFIG_PAX_SEGMEXEC
19645+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19646+ pax_task_size = SEGMEXEC_TASK_SIZE;
19647+#endif
19648+
19649+ if (len > pax_task_size || addr > pax_task_size - len)
19650+ return -EINVAL;
19651+
19652+ return 0;
19653+}
19654+
19655 /*
19656 * Perform the select(nd, in, out, ex, tv) and mmap() system
19657 * calls. Linux/i386 didn't use to be able to handle more than
19658@@ -58,6 +73,212 @@ out:
19659 return err;
19660 }
19661
19662+unsigned long
19663+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19664+ unsigned long len, unsigned long pgoff, unsigned long flags)
19665+{
19666+ struct mm_struct *mm = current->mm;
19667+ struct vm_area_struct *vma;
19668+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19669+
19670+#ifdef CONFIG_PAX_SEGMEXEC
19671+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19672+ pax_task_size = SEGMEXEC_TASK_SIZE;
19673+#endif
19674+
19675+ pax_task_size -= PAGE_SIZE;
19676+
19677+ if (len > pax_task_size)
19678+ return -ENOMEM;
19679+
19680+ if (flags & MAP_FIXED)
19681+ return addr;
19682+
19683+#ifdef CONFIG_PAX_RANDMMAP
19684+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19685+#endif
19686+
19687+ if (addr) {
19688+ addr = PAGE_ALIGN(addr);
19689+ if (pax_task_size - len >= addr) {
19690+ vma = find_vma(mm, addr);
19691+ if (check_heap_stack_gap(vma, addr, len))
19692+ return addr;
19693+ }
19694+ }
19695+ if (len > mm->cached_hole_size) {
19696+ start_addr = addr = mm->free_area_cache;
19697+ } else {
19698+ start_addr = addr = mm->mmap_base;
19699+ mm->cached_hole_size = 0;
19700+ }
19701+
19702+#ifdef CONFIG_PAX_PAGEEXEC
19703+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19704+ start_addr = 0x00110000UL;
19705+
19706+#ifdef CONFIG_PAX_RANDMMAP
19707+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19708+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19709+#endif
19710+
19711+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19712+ start_addr = addr = mm->mmap_base;
19713+ else
19714+ addr = start_addr;
19715+ }
19716+#endif
19717+
19718+full_search:
19719+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19720+ /* At this point: (!vma || addr < vma->vm_end). */
19721+ if (pax_task_size - len < addr) {
19722+ /*
19723+ * Start a new search - just in case we missed
19724+ * some holes.
19725+ */
19726+ if (start_addr != mm->mmap_base) {
19727+ start_addr = addr = mm->mmap_base;
19728+ mm->cached_hole_size = 0;
19729+ goto full_search;
19730+ }
19731+ return -ENOMEM;
19732+ }
19733+ if (check_heap_stack_gap(vma, addr, len))
19734+ break;
19735+ if (addr + mm->cached_hole_size < vma->vm_start)
19736+ mm->cached_hole_size = vma->vm_start - addr;
19737+ addr = vma->vm_end;
19738+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19739+ start_addr = addr = mm->mmap_base;
19740+ mm->cached_hole_size = 0;
19741+ goto full_search;
19742+ }
19743+ }
19744+
19745+ /*
19746+ * Remember the place where we stopped the search:
19747+ */
19748+ mm->free_area_cache = addr + len;
19749+ return addr;
19750+}
19751+
19752+unsigned long
19753+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19754+ const unsigned long len, const unsigned long pgoff,
19755+ const unsigned long flags)
19756+{
19757+ struct vm_area_struct *vma;
19758+ struct mm_struct *mm = current->mm;
19759+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19760+
19761+#ifdef CONFIG_PAX_SEGMEXEC
19762+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19763+ pax_task_size = SEGMEXEC_TASK_SIZE;
19764+#endif
19765+
19766+ pax_task_size -= PAGE_SIZE;
19767+
19768+ /* requested length too big for entire address space */
19769+ if (len > pax_task_size)
19770+ return -ENOMEM;
19771+
19772+ if (flags & MAP_FIXED)
19773+ return addr;
19774+
19775+#ifdef CONFIG_PAX_PAGEEXEC
19776+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19777+ goto bottomup;
19778+#endif
19779+
19780+#ifdef CONFIG_PAX_RANDMMAP
19781+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19782+#endif
19783+
19784+ /* requesting a specific address */
19785+ if (addr) {
19786+ addr = PAGE_ALIGN(addr);
19787+ if (pax_task_size - len >= addr) {
19788+ vma = find_vma(mm, addr);
19789+ if (check_heap_stack_gap(vma, addr, len))
19790+ return addr;
19791+ }
19792+ }
19793+
19794+ /* check if free_area_cache is useful for us */
19795+ if (len <= mm->cached_hole_size) {
19796+ mm->cached_hole_size = 0;
19797+ mm->free_area_cache = mm->mmap_base;
19798+ }
19799+
19800+ /* either no address requested or can't fit in requested address hole */
19801+ addr = mm->free_area_cache;
19802+
19803+ /* make sure it can fit in the remaining address space */
19804+ if (addr > len) {
19805+ vma = find_vma(mm, addr-len);
19806+ if (check_heap_stack_gap(vma, addr - len, len))
19807+ /* remember the address as a hint for next time */
19808+ return (mm->free_area_cache = addr-len);
19809+ }
19810+
19811+ if (mm->mmap_base < len)
19812+ goto bottomup;
19813+
19814+ addr = mm->mmap_base-len;
19815+
19816+ do {
19817+ /*
19818+ * Lookup failure means no vma is above this address,
19819+ * else if new region fits below vma->vm_start,
19820+ * return with success:
19821+ */
19822+ vma = find_vma(mm, addr);
19823+ if (check_heap_stack_gap(vma, addr, len))
19824+ /* remember the address as a hint for next time */
19825+ return (mm->free_area_cache = addr);
19826+
19827+ /* remember the largest hole we saw so far */
19828+ if (addr + mm->cached_hole_size < vma->vm_start)
19829+ mm->cached_hole_size = vma->vm_start - addr;
19830+
19831+ /* try just below the current vma->vm_start */
19832+ addr = skip_heap_stack_gap(vma, len);
19833+ } while (!IS_ERR_VALUE(addr));
19834+
19835+bottomup:
19836+ /*
19837+ * A failed mmap() very likely causes application failure,
19838+ * so fall back to the bottom-up function here. This scenario
19839+ * can happen with large stack limits and large mmap()
19840+ * allocations.
19841+ */
19842+
19843+#ifdef CONFIG_PAX_SEGMEXEC
19844+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19845+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19846+ else
19847+#endif
19848+
19849+ mm->mmap_base = TASK_UNMAPPED_BASE;
19850+
19851+#ifdef CONFIG_PAX_RANDMMAP
19852+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19853+ mm->mmap_base += mm->delta_mmap;
19854+#endif
19855+
19856+ mm->free_area_cache = mm->mmap_base;
19857+ mm->cached_hole_size = ~0UL;
19858+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19859+ /*
19860+ * Restore the topdown base:
19861+ */
19862+ mm->mmap_base = base;
19863+ mm->free_area_cache = base;
19864+ mm->cached_hole_size = ~0UL;
19865+
19866+ return addr;
19867+}
19868
19869 struct sel_arg_struct {
19870 unsigned long n;
19871@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19872 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19873 case SEMTIMEDOP:
19874 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19875- (const struct timespec __user *)fifth);
19876+ (__force const struct timespec __user *)fifth);
19877
19878 case SEMGET:
19879 return sys_semget(first, second, third);
19880@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19881 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19882 if (ret)
19883 return ret;
19884- return put_user(raddr, (ulong __user *) third);
19885+ return put_user(raddr, (__force ulong __user *) third);
19886 }
19887 case 1: /* iBCS2 emulator entry point */
19888 if (!segment_eq(get_fs(), get_ds()))
19889@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19890
19891 return error;
19892 }
19893-
19894-
19895-/*
19896- * Do a system call from kernel instead of calling sys_execve so we
19897- * end up with proper pt_regs.
19898- */
19899-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19900-{
19901- long __res;
19902- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19903- : "=a" (__res)
19904- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19905- return __res;
19906-}
19907diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19908index 8aa2057..b604bc1 100644
19909--- a/arch/x86/kernel/sys_x86_64.c
19910+++ b/arch/x86/kernel/sys_x86_64.c
19911@@ -32,8 +32,8 @@ out:
19912 return error;
19913 }
19914
19915-static void find_start_end(unsigned long flags, unsigned long *begin,
19916- unsigned long *end)
19917+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19918+ unsigned long *begin, unsigned long *end)
19919 {
19920 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19921 unsigned long new_begin;
19922@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19923 *begin = new_begin;
19924 }
19925 } else {
19926- *begin = TASK_UNMAPPED_BASE;
19927+ *begin = mm->mmap_base;
19928 *end = TASK_SIZE;
19929 }
19930 }
19931@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19932 if (flags & MAP_FIXED)
19933 return addr;
19934
19935- find_start_end(flags, &begin, &end);
19936+ find_start_end(mm, flags, &begin, &end);
19937
19938 if (len > end)
19939 return -ENOMEM;
19940
19941+#ifdef CONFIG_PAX_RANDMMAP
19942+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19943+#endif
19944+
19945 if (addr) {
19946 addr = PAGE_ALIGN(addr);
19947 vma = find_vma(mm, addr);
19948- if (end - len >= addr &&
19949- (!vma || addr + len <= vma->vm_start))
19950+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19951 return addr;
19952 }
19953 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19954@@ -106,7 +109,7 @@ full_search:
19955 }
19956 return -ENOMEM;
19957 }
19958- if (!vma || addr + len <= vma->vm_start) {
19959+ if (check_heap_stack_gap(vma, addr, len)) {
19960 /*
19961 * Remember the place where we stopped the search:
19962 */
19963@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19964 {
19965 struct vm_area_struct *vma;
19966 struct mm_struct *mm = current->mm;
19967- unsigned long addr = addr0;
19968+ unsigned long base = mm->mmap_base, addr = addr0;
19969
19970 /* requested length too big for entire address space */
19971 if (len > TASK_SIZE)
19972@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19973 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19974 goto bottomup;
19975
19976+#ifdef CONFIG_PAX_RANDMMAP
19977+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19978+#endif
19979+
19980 /* requesting a specific address */
19981 if (addr) {
19982 addr = PAGE_ALIGN(addr);
19983- vma = find_vma(mm, addr);
19984- if (TASK_SIZE - len >= addr &&
19985- (!vma || addr + len <= vma->vm_start))
19986- return addr;
19987+ if (TASK_SIZE - len >= addr) {
19988+ vma = find_vma(mm, addr);
19989+ if (check_heap_stack_gap(vma, addr, len))
19990+ return addr;
19991+ }
19992 }
19993
19994 /* check if free_area_cache is useful for us */
19995@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19996 /* make sure it can fit in the remaining address space */
19997 if (addr > len) {
19998 vma = find_vma(mm, addr-len);
19999- if (!vma || addr <= vma->vm_start)
20000+ if (check_heap_stack_gap(vma, addr - len, len))
20001 /* remember the address as a hint for next time */
20002 return mm->free_area_cache = addr-len;
20003 }
20004@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20005 * return with success:
20006 */
20007 vma = find_vma(mm, addr);
20008- if (!vma || addr+len <= vma->vm_start)
20009+ if (check_heap_stack_gap(vma, addr, len))
20010 /* remember the address as a hint for next time */
20011 return mm->free_area_cache = addr;
20012
20013@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20014 mm->cached_hole_size = vma->vm_start - addr;
20015
20016 /* try just below the current vma->vm_start */
20017- addr = vma->vm_start-len;
20018- } while (len < vma->vm_start);
20019+ addr = skip_heap_stack_gap(vma, len);
20020+ } while (!IS_ERR_VALUE(addr));
20021
20022 bottomup:
20023 /*
20024@@ -198,13 +206,21 @@ bottomup:
20025 * can happen with large stack limits and large mmap()
20026 * allocations.
20027 */
20028+ mm->mmap_base = TASK_UNMAPPED_BASE;
20029+
20030+#ifdef CONFIG_PAX_RANDMMAP
20031+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20032+ mm->mmap_base += mm->delta_mmap;
20033+#endif
20034+
20035+ mm->free_area_cache = mm->mmap_base;
20036 mm->cached_hole_size = ~0UL;
20037- mm->free_area_cache = TASK_UNMAPPED_BASE;
20038 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20039 /*
20040 * Restore the topdown base:
20041 */
20042- mm->free_area_cache = mm->mmap_base;
20043+ mm->mmap_base = base;
20044+ mm->free_area_cache = base;
20045 mm->cached_hole_size = ~0UL;
20046
20047 return addr;
20048diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
20049index 76d70a4..4c94a44 100644
20050--- a/arch/x86/kernel/syscall_table_32.S
20051+++ b/arch/x86/kernel/syscall_table_32.S
20052@@ -1,3 +1,4 @@
20053+.section .rodata,"a",@progbits
20054 ENTRY(sys_call_table)
20055 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
20056 .long sys_exit
20057diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
20058index 46b8277..3349d55 100644
20059--- a/arch/x86/kernel/tboot.c
20060+++ b/arch/x86/kernel/tboot.c
20061@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
20062
20063 void tboot_shutdown(u32 shutdown_type)
20064 {
20065- void (*shutdown)(void);
20066+ void (* __noreturn shutdown)(void);
20067
20068 if (!tboot_enabled())
20069 return;
20070@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
20071
20072 switch_to_tboot_pt();
20073
20074- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
20075+ shutdown = (void *)tboot->shutdown_entry;
20076 shutdown();
20077
20078 /* should not reach here */
20079@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
20080 tboot_shutdown(acpi_shutdown_map[sleep_state]);
20081 }
20082
20083-static atomic_t ap_wfs_count;
20084+static atomic_unchecked_t ap_wfs_count;
20085
20086 static int tboot_wait_for_aps(int num_aps)
20087 {
20088@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
20089 {
20090 switch (action) {
20091 case CPU_DYING:
20092- atomic_inc(&ap_wfs_count);
20093+ atomic_inc_unchecked(&ap_wfs_count);
20094 if (num_online_cpus() == 1)
20095- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
20096+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
20097 return NOTIFY_BAD;
20098 break;
20099 }
20100@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
20101
20102 tboot_create_trampoline();
20103
20104- atomic_set(&ap_wfs_count, 0);
20105+ atomic_set_unchecked(&ap_wfs_count, 0);
20106 register_hotcpu_notifier(&tboot_cpu_notifier);
20107 return 0;
20108 }
20109diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
20110index be25734..87fe232 100644
20111--- a/arch/x86/kernel/time.c
20112+++ b/arch/x86/kernel/time.c
20113@@ -26,17 +26,13 @@
20114 int timer_ack;
20115 #endif
20116
20117-#ifdef CONFIG_X86_64
20118-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
20119-#endif
20120-
20121 unsigned long profile_pc(struct pt_regs *regs)
20122 {
20123 unsigned long pc = instruction_pointer(regs);
20124
20125- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
20126+ if (!user_mode(regs) && in_lock_functions(pc)) {
20127 #ifdef CONFIG_FRAME_POINTER
20128- return *(unsigned long *)(regs->bp + sizeof(long));
20129+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
20130 #else
20131 unsigned long *sp =
20132 (unsigned long *)kernel_stack_pointer(regs);
20133@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
20134 * or above a saved flags. Eflags has bits 22-31 zero,
20135 * kernel addresses don't.
20136 */
20137+
20138+#ifdef CONFIG_PAX_KERNEXEC
20139+ return ktla_ktva(sp[0]);
20140+#else
20141 if (sp[0] >> 22)
20142 return sp[0];
20143 if (sp[1] >> 22)
20144 return sp[1];
20145 #endif
20146+
20147+#endif
20148 }
20149 return pc;
20150 }
20151diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
20152index 6bb7b85..dd853e1 100644
20153--- a/arch/x86/kernel/tls.c
20154+++ b/arch/x86/kernel/tls.c
20155@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
20156 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
20157 return -EINVAL;
20158
20159+#ifdef CONFIG_PAX_SEGMEXEC
20160+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
20161+ return -EINVAL;
20162+#endif
20163+
20164 set_tls_desc(p, idx, &info, 1);
20165
20166 return 0;
20167diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
20168index 8508237..229b664 100644
20169--- a/arch/x86/kernel/trampoline_32.S
20170+++ b/arch/x86/kernel/trampoline_32.S
20171@@ -32,6 +32,12 @@
20172 #include <asm/segment.h>
20173 #include <asm/page_types.h>
20174
20175+#ifdef CONFIG_PAX_KERNEXEC
20176+#define ta(X) (X)
20177+#else
20178+#define ta(X) ((X) - __PAGE_OFFSET)
20179+#endif
20180+
20181 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
20182 __CPUINITRODATA
20183 .code16
20184@@ -60,7 +66,7 @@ r_base = .
20185 inc %ax # protected mode (PE) bit
20186 lmsw %ax # into protected mode
20187 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
20188- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
20189+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
20190
20191 # These need to be in the same 64K segment as the above;
20192 # hence we don't use the boot_gdt_descr defined in head.S
20193diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
20194index 3af2dff..ba8aa49 100644
20195--- a/arch/x86/kernel/trampoline_64.S
20196+++ b/arch/x86/kernel/trampoline_64.S
20197@@ -91,7 +91,7 @@ startup_32:
20198 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
20199 movl %eax, %ds
20200
20201- movl $X86_CR4_PAE, %eax
20202+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20203 movl %eax, %cr4 # Enable PAE mode
20204
20205 # Setup trampoline 4 level pagetables
20206@@ -127,7 +127,7 @@ startup_64:
20207 no_longmode:
20208 hlt
20209 jmp no_longmode
20210-#include "verify_cpu_64.S"
20211+#include "verify_cpu.S"
20212
20213 # Careful these need to be in the same 64K segment as the above;
20214 tidt:
20215@@ -138,7 +138,7 @@ tidt:
20216 # so the kernel can live anywhere
20217 .balign 4
20218 tgdt:
20219- .short tgdt_end - tgdt # gdt limit
20220+ .short tgdt_end - tgdt - 1 # gdt limit
20221 .long tgdt - r_base
20222 .short 0
20223 .quad 0x00cf9b000000ffff # __KERNEL32_CS
20224diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
20225index 7e37dce..ec3f8e5 100644
20226--- a/arch/x86/kernel/traps.c
20227+++ b/arch/x86/kernel/traps.c
20228@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
20229
20230 /* Do we ignore FPU interrupts ? */
20231 char ignore_fpu_irq;
20232-
20233-/*
20234- * The IDT has to be page-aligned to simplify the Pentium
20235- * F0 0F bug workaround.
20236- */
20237-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20238 #endif
20239
20240 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20241@@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20242 static inline void
20243 die_if_kernel(const char *str, struct pt_regs *regs, long err)
20244 {
20245- if (!user_mode_vm(regs))
20246+ if (!user_mode(regs))
20247 die(str, regs, err);
20248 }
20249 #endif
20250
20251 static void __kprobes
20252-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20253+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20254 long error_code, siginfo_t *info)
20255 {
20256 struct task_struct *tsk = current;
20257
20258 #ifdef CONFIG_X86_32
20259- if (regs->flags & X86_VM_MASK) {
20260+ if (v8086_mode(regs)) {
20261 /*
20262 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20263 * On nmi (interrupt 2), do_trap should not be called.
20264@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20265 }
20266 #endif
20267
20268- if (!user_mode(regs))
20269+ if (!user_mode_novm(regs))
20270 goto kernel_trap;
20271
20272 #ifdef CONFIG_X86_32
20273@@ -158,7 +152,7 @@ trap_signal:
20274 printk_ratelimit()) {
20275 printk(KERN_INFO
20276 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20277- tsk->comm, tsk->pid, str,
20278+ tsk->comm, task_pid_nr(tsk), str,
20279 regs->ip, regs->sp, error_code);
20280 print_vma_addr(" in ", regs->ip);
20281 printk("\n");
20282@@ -175,8 +169,20 @@ kernel_trap:
20283 if (!fixup_exception(regs)) {
20284 tsk->thread.error_code = error_code;
20285 tsk->thread.trap_no = trapnr;
20286+
20287+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20288+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20289+ str = "PAX: suspicious stack segment fault";
20290+#endif
20291+
20292 die(str, regs, error_code);
20293 }
20294+
20295+#ifdef CONFIG_PAX_REFCOUNT
20296+ if (trapnr == 4)
20297+ pax_report_refcount_overflow(regs);
20298+#endif
20299+
20300 return;
20301
20302 #ifdef CONFIG_X86_32
20303@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20304 conditional_sti(regs);
20305
20306 #ifdef CONFIG_X86_32
20307- if (regs->flags & X86_VM_MASK)
20308+ if (v8086_mode(regs))
20309 goto gp_in_vm86;
20310 #endif
20311
20312 tsk = current;
20313- if (!user_mode(regs))
20314+ if (!user_mode_novm(regs))
20315 goto gp_in_kernel;
20316
20317+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20318+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20319+ struct mm_struct *mm = tsk->mm;
20320+ unsigned long limit;
20321+
20322+ down_write(&mm->mmap_sem);
20323+ limit = mm->context.user_cs_limit;
20324+ if (limit < TASK_SIZE) {
20325+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20326+ up_write(&mm->mmap_sem);
20327+ return;
20328+ }
20329+ up_write(&mm->mmap_sem);
20330+ }
20331+#endif
20332+
20333 tsk->thread.error_code = error_code;
20334 tsk->thread.trap_no = 13;
20335
20336@@ -305,6 +327,13 @@ gp_in_kernel:
20337 if (notify_die(DIE_GPF, "general protection fault", regs,
20338 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20339 return;
20340+
20341+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20342+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20343+ die("PAX: suspicious general protection fault", regs, error_code);
20344+ else
20345+#endif
20346+
20347 die("general protection fault", regs, error_code);
20348 }
20349
20350@@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20351 dotraplinkage notrace __kprobes void
20352 do_nmi(struct pt_regs *regs, long error_code)
20353 {
20354+
20355+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20356+ if (!user_mode(regs)) {
20357+ unsigned long cs = regs->cs & 0xFFFF;
20358+ unsigned long ip = ktva_ktla(regs->ip);
20359+
20360+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20361+ regs->ip = ip;
20362+ }
20363+#endif
20364+
20365 nmi_enter();
20366
20367 inc_irq_stat(__nmi_count);
20368@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20369 }
20370
20371 #ifdef CONFIG_X86_32
20372- if (regs->flags & X86_VM_MASK)
20373+ if (v8086_mode(regs))
20374 goto debug_vm86;
20375 #endif
20376
20377@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20378 * kernel space (but re-enable TF when returning to user mode).
20379 */
20380 if (condition & DR_STEP) {
20381- if (!user_mode(regs))
20382+ if (!user_mode_novm(regs))
20383 goto clear_TF_reenable;
20384 }
20385
20386@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20387 * Handle strange cache flush from user space exception
20388 * in all other cases. This is undocumented behaviour.
20389 */
20390- if (regs->flags & X86_VM_MASK) {
20391+ if (v8086_mode(regs)) {
20392 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20393 return;
20394 }
20395@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20396 void __math_state_restore(void)
20397 {
20398 struct thread_info *thread = current_thread_info();
20399- struct task_struct *tsk = thread->task;
20400+ struct task_struct *tsk = current;
20401
20402 /*
20403 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20404@@ -825,8 +865,7 @@ void __math_state_restore(void)
20405 */
20406 asmlinkage void math_state_restore(void)
20407 {
20408- struct thread_info *thread = current_thread_info();
20409- struct task_struct *tsk = thread->task;
20410+ struct task_struct *tsk = current;
20411
20412 if (!tsk_used_math(tsk)) {
20413 local_irq_enable();
20414diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20415new file mode 100644
20416index 0000000..50c5edd
20417--- /dev/null
20418+++ b/arch/x86/kernel/verify_cpu.S
20419@@ -0,0 +1,140 @@
20420+/*
20421+ *
20422+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
20423+ * code has been borrowed from boot/setup.S and was introduced by
20424+ * Andi Kleen.
20425+ *
20426+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20427+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20428+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20429+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20430+ *
20431+ * This source code is licensed under the GNU General Public License,
20432+ * Version 2. See the file COPYING for more details.
20433+ *
20434+ * This is a common code for verification whether CPU supports
20435+ * long mode and SSE or not. It is not called directly instead this
20436+ * file is included at various places and compiled in that context.
20437+ * This file is expected to run in 32bit code. Currently:
20438+ *
20439+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20440+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
20441+ * arch/x86/kernel/head_32.S: processor startup
20442+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20443+ *
20444+ * verify_cpu, returns the status of longmode and SSE in register %eax.
20445+ * 0: Success 1: Failure
20446+ *
20447+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20448+ *
20449+ * The caller needs to check for the error code and take the action
20450+ * appropriately. Either display a message or halt.
20451+ */
20452+
20453+#include <asm/cpufeature.h>
20454+#include <asm/msr-index.h>
20455+
20456+verify_cpu:
20457+ pushfl # Save caller passed flags
20458+ pushl $0 # Kill any dangerous flags
20459+ popfl
20460+
20461+ pushfl # standard way to check for cpuid
20462+ popl %eax
20463+ movl %eax,%ebx
20464+ xorl $0x200000,%eax
20465+ pushl %eax
20466+ popfl
20467+ pushfl
20468+ popl %eax
20469+ cmpl %eax,%ebx
20470+ jz verify_cpu_no_longmode # cpu has no cpuid
20471+
20472+ movl $0x0,%eax # See if cpuid 1 is implemented
20473+ cpuid
20474+ cmpl $0x1,%eax
20475+ jb verify_cpu_no_longmode # no cpuid 1
20476+
20477+ xor %di,%di
20478+ cmpl $0x68747541,%ebx # AuthenticAMD
20479+ jnz verify_cpu_noamd
20480+ cmpl $0x69746e65,%edx
20481+ jnz verify_cpu_noamd
20482+ cmpl $0x444d4163,%ecx
20483+ jnz verify_cpu_noamd
20484+ mov $1,%di # cpu is from AMD
20485+ jmp verify_cpu_check
20486+
20487+verify_cpu_noamd:
20488+ cmpl $0x756e6547,%ebx # GenuineIntel?
20489+ jnz verify_cpu_check
20490+ cmpl $0x49656e69,%edx
20491+ jnz verify_cpu_check
20492+ cmpl $0x6c65746e,%ecx
20493+ jnz verify_cpu_check
20494+
20495+ # only call IA32_MISC_ENABLE when:
20496+ # family > 6 || (family == 6 && model >= 0xd)
20497+ movl $0x1, %eax # check CPU family and model
20498+ cpuid
20499+ movl %eax, %ecx
20500+
20501+ andl $0x0ff00f00, %eax # mask family and extended family
20502+ shrl $8, %eax
20503+ cmpl $6, %eax
20504+ ja verify_cpu_clear_xd # family > 6, ok
20505+ jb verify_cpu_check # family < 6, skip
20506+
20507+ andl $0x000f00f0, %ecx # mask model and extended model
20508+ shrl $4, %ecx
20509+ cmpl $0xd, %ecx
20510+ jb verify_cpu_check # family == 6, model < 0xd, skip
20511+
20512+verify_cpu_clear_xd:
20513+ movl $MSR_IA32_MISC_ENABLE, %ecx
20514+ rdmsr
20515+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20516+ jnc verify_cpu_check # only write MSR if bit was changed
20517+ wrmsr
20518+
20519+verify_cpu_check:
20520+ movl $0x1,%eax # Does the cpu have what it takes
20521+ cpuid
20522+ andl $REQUIRED_MASK0,%edx
20523+ xorl $REQUIRED_MASK0,%edx
20524+ jnz verify_cpu_no_longmode
20525+
20526+ movl $0x80000000,%eax # See if extended cpuid is implemented
20527+ cpuid
20528+ cmpl $0x80000001,%eax
20529+ jb verify_cpu_no_longmode # no extended cpuid
20530+
20531+ movl $0x80000001,%eax # Does the cpu have what it takes
20532+ cpuid
20533+ andl $REQUIRED_MASK1,%edx
20534+ xorl $REQUIRED_MASK1,%edx
20535+ jnz verify_cpu_no_longmode
20536+
20537+verify_cpu_sse_test:
20538+ movl $1,%eax
20539+ cpuid
20540+ andl $SSE_MASK,%edx
20541+ cmpl $SSE_MASK,%edx
20542+ je verify_cpu_sse_ok
20543+ test %di,%di
20544+ jz verify_cpu_no_longmode # only try to force SSE on AMD
20545+ movl $MSR_K7_HWCR,%ecx
20546+ rdmsr
20547+ btr $15,%eax # enable SSE
20548+ wrmsr
20549+ xor %di,%di # don't loop
20550+ jmp verify_cpu_sse_test # try again
20551+
20552+verify_cpu_no_longmode:
20553+ popfl # Restore caller passed flags
20554+ movl $1,%eax
20555+ ret
20556+verify_cpu_sse_ok:
20557+ popfl # Restore caller passed flags
20558+ xorl %eax, %eax
20559+ ret
20560diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20561deleted file mode 100644
20562index 45b6f8a..0000000
20563--- a/arch/x86/kernel/verify_cpu_64.S
20564+++ /dev/null
20565@@ -1,105 +0,0 @@
20566-/*
20567- *
20568- * verify_cpu.S - Code for cpu long mode and SSE verification. This
20569- * code has been borrowed from boot/setup.S and was introduced by
20570- * Andi Kleen.
20571- *
20572- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20573- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20574- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20575- *
20576- * This source code is licensed under the GNU General Public License,
20577- * Version 2. See the file COPYING for more details.
20578- *
20579- * This is a common code for verification whether CPU supports
20580- * long mode and SSE or not. It is not called directly instead this
20581- * file is included at various places and compiled in that context.
20582- * Following are the current usage.
20583- *
20584- * This file is included by both 16bit and 32bit code.
20585- *
20586- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20587- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20588- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20589- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20590- *
20591- * verify_cpu, returns the status of cpu check in register %eax.
20592- * 0: Success 1: Failure
20593- *
20594- * The caller needs to check for the error code and take the action
20595- * appropriately. Either display a message or halt.
20596- */
20597-
20598-#include <asm/cpufeature.h>
20599-
20600-verify_cpu:
20601- pushfl # Save caller passed flags
20602- pushl $0 # Kill any dangerous flags
20603- popfl
20604-
20605- pushfl # standard way to check for cpuid
20606- popl %eax
20607- movl %eax,%ebx
20608- xorl $0x200000,%eax
20609- pushl %eax
20610- popfl
20611- pushfl
20612- popl %eax
20613- cmpl %eax,%ebx
20614- jz verify_cpu_no_longmode # cpu has no cpuid
20615-
20616- movl $0x0,%eax # See if cpuid 1 is implemented
20617- cpuid
20618- cmpl $0x1,%eax
20619- jb verify_cpu_no_longmode # no cpuid 1
20620-
20621- xor %di,%di
20622- cmpl $0x68747541,%ebx # AuthenticAMD
20623- jnz verify_cpu_noamd
20624- cmpl $0x69746e65,%edx
20625- jnz verify_cpu_noamd
20626- cmpl $0x444d4163,%ecx
20627- jnz verify_cpu_noamd
20628- mov $1,%di # cpu is from AMD
20629-
20630-verify_cpu_noamd:
20631- movl $0x1,%eax # Does the cpu have what it takes
20632- cpuid
20633- andl $REQUIRED_MASK0,%edx
20634- xorl $REQUIRED_MASK0,%edx
20635- jnz verify_cpu_no_longmode
20636-
20637- movl $0x80000000,%eax # See if extended cpuid is implemented
20638- cpuid
20639- cmpl $0x80000001,%eax
20640- jb verify_cpu_no_longmode # no extended cpuid
20641-
20642- movl $0x80000001,%eax # Does the cpu have what it takes
20643- cpuid
20644- andl $REQUIRED_MASK1,%edx
20645- xorl $REQUIRED_MASK1,%edx
20646- jnz verify_cpu_no_longmode
20647-
20648-verify_cpu_sse_test:
20649- movl $1,%eax
20650- cpuid
20651- andl $SSE_MASK,%edx
20652- cmpl $SSE_MASK,%edx
20653- je verify_cpu_sse_ok
20654- test %di,%di
20655- jz verify_cpu_no_longmode # only try to force SSE on AMD
20656- movl $0xc0010015,%ecx # HWCR
20657- rdmsr
20658- btr $15,%eax # enable SSE
20659- wrmsr
20660- xor %di,%di # don't loop
20661- jmp verify_cpu_sse_test # try again
20662-
20663-verify_cpu_no_longmode:
20664- popfl # Restore caller passed flags
20665- movl $1,%eax
20666- ret
20667-verify_cpu_sse_ok:
20668- popfl # Restore caller passed flags
20669- xorl %eax, %eax
20670- ret
20671diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20672index 9c4e625..c992817 100644
20673--- a/arch/x86/kernel/vm86_32.c
20674+++ b/arch/x86/kernel/vm86_32.c
20675@@ -41,6 +41,7 @@
20676 #include <linux/ptrace.h>
20677 #include <linux/audit.h>
20678 #include <linux/stddef.h>
20679+#include <linux/grsecurity.h>
20680
20681 #include <asm/uaccess.h>
20682 #include <asm/io.h>
20683@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20684 do_exit(SIGSEGV);
20685 }
20686
20687- tss = &per_cpu(init_tss, get_cpu());
20688+ tss = init_tss + get_cpu();
20689 current->thread.sp0 = current->thread.saved_sp0;
20690 current->thread.sysenter_cs = __KERNEL_CS;
20691 load_sp0(tss, &current->thread);
20692@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20693 struct task_struct *tsk;
20694 int tmp, ret = -EPERM;
20695
20696+#ifdef CONFIG_GRKERNSEC_VM86
20697+ if (!capable(CAP_SYS_RAWIO)) {
20698+ gr_handle_vm86();
20699+ goto out;
20700+ }
20701+#endif
20702+
20703 tsk = current;
20704 if (tsk->thread.saved_sp0)
20705 goto out;
20706@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20707 int tmp, ret;
20708 struct vm86plus_struct __user *v86;
20709
20710+#ifdef CONFIG_GRKERNSEC_VM86
20711+ if (!capable(CAP_SYS_RAWIO)) {
20712+ gr_handle_vm86();
20713+ ret = -EPERM;
20714+ goto out;
20715+ }
20716+#endif
20717+
20718 tsk = current;
20719 switch (regs->bx) {
20720 case VM86_REQUEST_IRQ:
20721@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20722 tsk->thread.saved_fs = info->regs32->fs;
20723 tsk->thread.saved_gs = get_user_gs(info->regs32);
20724
20725- tss = &per_cpu(init_tss, get_cpu());
20726+ tss = init_tss + get_cpu();
20727 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20728 if (cpu_has_sep)
20729 tsk->thread.sysenter_cs = 0;
20730@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20731 goto cannot_handle;
20732 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20733 goto cannot_handle;
20734- intr_ptr = (unsigned long __user *) (i << 2);
20735+ intr_ptr = (__force unsigned long __user *) (i << 2);
20736 if (get_user(segoffs, intr_ptr))
20737 goto cannot_handle;
20738 if ((segoffs >> 16) == BIOSSEG)
20739diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20740index d430e4c..831f817 100644
20741--- a/arch/x86/kernel/vmi_32.c
20742+++ b/arch/x86/kernel/vmi_32.c
20743@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20744 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20745
20746 #define call_vrom_func(rom,func) \
20747- (((VROMFUNC *)(rom->func))())
20748+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
20749
20750 #define call_vrom_long_func(rom,func,arg) \
20751- (((VROMLONGFUNC *)(rom->func)) (arg))
20752+({\
20753+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20754+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20755+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20756+ __reloc;\
20757+})
20758
20759-static struct vrom_header *vmi_rom;
20760+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20761 static int disable_pge;
20762 static int disable_pse;
20763 static int disable_sep;
20764@@ -76,10 +81,10 @@ static struct {
20765 void (*set_initial_ap_state)(int, int);
20766 void (*halt)(void);
20767 void (*set_lazy_mode)(int mode);
20768-} vmi_ops;
20769+} __no_const vmi_ops __read_only;
20770
20771 /* Cached VMI operations */
20772-struct vmi_timer_ops vmi_timer_ops;
20773+struct vmi_timer_ops vmi_timer_ops __read_only;
20774
20775 /*
20776 * VMI patching routines.
20777@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20778 static inline void patch_offset(void *insnbuf,
20779 unsigned long ip, unsigned long dest)
20780 {
20781- *(unsigned long *)(insnbuf+1) = dest-ip-5;
20782+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
20783 }
20784
20785 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20786@@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20787 {
20788 u64 reloc;
20789 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20790+
20791 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20792 switch(rel->type) {
20793 case VMI_RELOCATION_CALL_REL:
20794@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20795
20796 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20797 {
20798- const pte_t pte = { .pte = 0 };
20799+ const pte_t pte = __pte(0ULL);
20800 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20801 }
20802
20803 static void vmi_pmd_clear(pmd_t *pmd)
20804 {
20805- const pte_t pte = { .pte = 0 };
20806+ const pte_t pte = __pte(0ULL);
20807 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20808 }
20809 #endif
20810@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20811 ap.ss = __KERNEL_DS;
20812 ap.esp = (unsigned long) start_esp;
20813
20814- ap.ds = __USER_DS;
20815- ap.es = __USER_DS;
20816+ ap.ds = __KERNEL_DS;
20817+ ap.es = __KERNEL_DS;
20818 ap.fs = __KERNEL_PERCPU;
20819- ap.gs = __KERNEL_STACK_CANARY;
20820+ savesegment(gs, ap.gs);
20821
20822 ap.eflags = 0;
20823
20824@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20825 paravirt_leave_lazy_mmu();
20826 }
20827
20828+#ifdef CONFIG_PAX_KERNEXEC
20829+static unsigned long vmi_pax_open_kernel(void)
20830+{
20831+ return 0;
20832+}
20833+
20834+static unsigned long vmi_pax_close_kernel(void)
20835+{
20836+ return 0;
20837+}
20838+#endif
20839+
20840 static inline int __init check_vmi_rom(struct vrom_header *rom)
20841 {
20842 struct pci_header *pci;
20843@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20844 return 0;
20845 if (rom->vrom_signature != VMI_SIGNATURE)
20846 return 0;
20847+ if (rom->rom_length * 512 > sizeof(*rom)) {
20848+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20849+ return 0;
20850+ }
20851 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20852 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20853 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20854@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20855 struct vrom_header *romstart;
20856 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20857 if (check_vmi_rom(romstart)) {
20858- vmi_rom = romstart;
20859+ vmi_rom = *romstart;
20860 return 1;
20861 }
20862 }
20863@@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20864
20865 para_fill(pv_irq_ops.safe_halt, Halt);
20866
20867+#ifdef CONFIG_PAX_KERNEXEC
20868+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20869+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20870+#endif
20871+
20872 /*
20873 * Alternative instruction rewriting doesn't happen soon enough
20874 * to convert VMI_IRET to a call instead of a jump; so we have
20875@@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20876
20877 void __init vmi_init(void)
20878 {
20879- if (!vmi_rom)
20880+ if (!vmi_rom.rom_signature)
20881 probe_vmi_rom();
20882 else
20883- check_vmi_rom(vmi_rom);
20884+ check_vmi_rom(&vmi_rom);
20885
20886 /* In case probing for or validating the ROM failed, basil */
20887- if (!vmi_rom)
20888+ if (!vmi_rom.rom_signature)
20889 return;
20890
20891- reserve_top_address(-vmi_rom->virtual_top);
20892+ reserve_top_address(-vmi_rom.virtual_top);
20893
20894 #ifdef CONFIG_X86_IO_APIC
20895 /* This is virtual hardware; timer routing is wired correctly */
20896@@ -874,7 +901,7 @@ void __init vmi_activate(void)
20897 {
20898 unsigned long flags;
20899
20900- if (!vmi_rom)
20901+ if (!vmi_rom.rom_signature)
20902 return;
20903
20904 local_irq_save(flags);
20905diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20906index 3c68fe2..12c8280 100644
20907--- a/arch/x86/kernel/vmlinux.lds.S
20908+++ b/arch/x86/kernel/vmlinux.lds.S
20909@@ -26,6 +26,13 @@
20910 #include <asm/page_types.h>
20911 #include <asm/cache.h>
20912 #include <asm/boot.h>
20913+#include <asm/segment.h>
20914+
20915+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20916+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20917+#else
20918+#define __KERNEL_TEXT_OFFSET 0
20919+#endif
20920
20921 #undef i386 /* in case the preprocessor is a 32bit one */
20922
20923@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20924 #ifdef CONFIG_X86_32
20925 OUTPUT_ARCH(i386)
20926 ENTRY(phys_startup_32)
20927-jiffies = jiffies_64;
20928 #else
20929 OUTPUT_ARCH(i386:x86-64)
20930 ENTRY(phys_startup_64)
20931-jiffies_64 = jiffies;
20932 #endif
20933
20934 PHDRS {
20935 text PT_LOAD FLAGS(5); /* R_E */
20936- data PT_LOAD FLAGS(7); /* RWE */
20937+#ifdef CONFIG_X86_32
20938+ module PT_LOAD FLAGS(5); /* R_E */
20939+#endif
20940+#ifdef CONFIG_XEN
20941+ rodata PT_LOAD FLAGS(5); /* R_E */
20942+#else
20943+ rodata PT_LOAD FLAGS(4); /* R__ */
20944+#endif
20945+ data PT_LOAD FLAGS(6); /* RW_ */
20946 #ifdef CONFIG_X86_64
20947 user PT_LOAD FLAGS(5); /* R_E */
20948+#endif
20949+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20950 #ifdef CONFIG_SMP
20951 percpu PT_LOAD FLAGS(6); /* RW_ */
20952 #endif
20953+ text.init PT_LOAD FLAGS(5); /* R_E */
20954+ text.exit PT_LOAD FLAGS(5); /* R_E */
20955 init PT_LOAD FLAGS(7); /* RWE */
20956-#endif
20957 note PT_NOTE FLAGS(0); /* ___ */
20958 }
20959
20960 SECTIONS
20961 {
20962 #ifdef CONFIG_X86_32
20963- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20964- phys_startup_32 = startup_32 - LOAD_OFFSET;
20965+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20966 #else
20967- . = __START_KERNEL;
20968- phys_startup_64 = startup_64 - LOAD_OFFSET;
20969+ . = __START_KERNEL;
20970 #endif
20971
20972 /* Text and read-only data */
20973- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20974- _text = .;
20975+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20976 /* bootstrapping code */
20977+#ifdef CONFIG_X86_32
20978+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20979+#else
20980+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20981+#endif
20982+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20983+ _text = .;
20984 HEAD_TEXT
20985 #ifdef CONFIG_X86_32
20986 . = ALIGN(PAGE_SIZE);
20987@@ -82,28 +102,71 @@ SECTIONS
20988 IRQENTRY_TEXT
20989 *(.fixup)
20990 *(.gnu.warning)
20991- /* End of text section */
20992- _etext = .;
20993 } :text = 0x9090
20994
20995- NOTES :text :note
20996+ . += __KERNEL_TEXT_OFFSET;
20997
20998- EXCEPTION_TABLE(16) :text = 0x9090
20999+#ifdef CONFIG_X86_32
21000+ . = ALIGN(PAGE_SIZE);
21001+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
21002+ *(.vmi.rom)
21003+ } :module
21004+
21005+ . = ALIGN(PAGE_SIZE);
21006+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
21007+
21008+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
21009+ MODULES_EXEC_VADDR = .;
21010+ BYTE(0)
21011+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
21012+ . = ALIGN(HPAGE_SIZE);
21013+ MODULES_EXEC_END = . - 1;
21014+#endif
21015+
21016+ } :module
21017+#endif
21018+
21019+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
21020+ /* End of text section */
21021+ _etext = . - __KERNEL_TEXT_OFFSET;
21022+ }
21023+
21024+#ifdef CONFIG_X86_32
21025+ . = ALIGN(PAGE_SIZE);
21026+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
21027+ *(.idt)
21028+ . = ALIGN(PAGE_SIZE);
21029+ *(.empty_zero_page)
21030+ *(.swapper_pg_fixmap)
21031+ *(.swapper_pg_pmd)
21032+ *(.swapper_pg_dir)
21033+ *(.trampoline_pg_dir)
21034+ } :rodata
21035+#endif
21036+
21037+ . = ALIGN(PAGE_SIZE);
21038+ NOTES :rodata :note
21039+
21040+ EXCEPTION_TABLE(16) :rodata
21041
21042 RO_DATA(PAGE_SIZE)
21043
21044 /* Data */
21045 .data : AT(ADDR(.data) - LOAD_OFFSET) {
21046+
21047+#ifdef CONFIG_PAX_KERNEXEC
21048+ . = ALIGN(HPAGE_SIZE);
21049+#else
21050+ . = ALIGN(PAGE_SIZE);
21051+#endif
21052+
21053 /* Start of data section */
21054 _sdata = .;
21055
21056 /* init_task */
21057 INIT_TASK_DATA(THREAD_SIZE)
21058
21059-#ifdef CONFIG_X86_32
21060- /* 32 bit has nosave before _edata */
21061 NOSAVE_DATA
21062-#endif
21063
21064 PAGE_ALIGNED_DATA(PAGE_SIZE)
21065
21066@@ -112,6 +175,8 @@ SECTIONS
21067 DATA_DATA
21068 CONSTRUCTORS
21069
21070+ jiffies = jiffies_64;
21071+
21072 /* rarely changed data like cpu maps */
21073 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
21074
21075@@ -166,12 +231,6 @@ SECTIONS
21076 }
21077 vgetcpu_mode = VVIRT(.vgetcpu_mode);
21078
21079- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
21080- .jiffies : AT(VLOAD(.jiffies)) {
21081- *(.jiffies)
21082- }
21083- jiffies = VVIRT(.jiffies);
21084-
21085 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
21086 *(.vsyscall_3)
21087 }
21088@@ -187,12 +246,19 @@ SECTIONS
21089 #endif /* CONFIG_X86_64 */
21090
21091 /* Init code and data - will be freed after init */
21092- . = ALIGN(PAGE_SIZE);
21093 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
21094+ BYTE(0)
21095+
21096+#ifdef CONFIG_PAX_KERNEXEC
21097+ . = ALIGN(HPAGE_SIZE);
21098+#else
21099+ . = ALIGN(PAGE_SIZE);
21100+#endif
21101+
21102 __init_begin = .; /* paired with __init_end */
21103- }
21104+ } :init.begin
21105
21106-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
21107+#ifdef CONFIG_SMP
21108 /*
21109 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
21110 * output PHDR, so the next output section - .init.text - should
21111@@ -201,12 +267,27 @@ SECTIONS
21112 PERCPU_VADDR(0, :percpu)
21113 #endif
21114
21115- INIT_TEXT_SECTION(PAGE_SIZE)
21116-#ifdef CONFIG_X86_64
21117- :init
21118-#endif
21119+ . = ALIGN(PAGE_SIZE);
21120+ init_begin = .;
21121+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
21122+ VMLINUX_SYMBOL(_sinittext) = .;
21123+ INIT_TEXT
21124+ VMLINUX_SYMBOL(_einittext) = .;
21125+ . = ALIGN(PAGE_SIZE);
21126+ } :text.init
21127
21128- INIT_DATA_SECTION(16)
21129+ /*
21130+ * .exit.text is discard at runtime, not link time, to deal with
21131+ * references from .altinstructions and .eh_frame
21132+ */
21133+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21134+ EXIT_TEXT
21135+ . = ALIGN(16);
21136+ } :text.exit
21137+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
21138+
21139+ . = ALIGN(PAGE_SIZE);
21140+ INIT_DATA_SECTION(16) :init
21141
21142 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
21143 __x86_cpu_dev_start = .;
21144@@ -232,19 +313,11 @@ SECTIONS
21145 *(.altinstr_replacement)
21146 }
21147
21148- /*
21149- * .exit.text is discard at runtime, not link time, to deal with
21150- * references from .altinstructions and .eh_frame
21151- */
21152- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
21153- EXIT_TEXT
21154- }
21155-
21156 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
21157 EXIT_DATA
21158 }
21159
21160-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
21161+#ifndef CONFIG_SMP
21162 PERCPU(PAGE_SIZE)
21163 #endif
21164
21165@@ -267,12 +340,6 @@ SECTIONS
21166 . = ALIGN(PAGE_SIZE);
21167 }
21168
21169-#ifdef CONFIG_X86_64
21170- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
21171- NOSAVE_DATA
21172- }
21173-#endif
21174-
21175 /* BSS */
21176 . = ALIGN(PAGE_SIZE);
21177 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
21178@@ -288,6 +355,7 @@ SECTIONS
21179 __brk_base = .;
21180 . += 64 * 1024; /* 64k alignment slop space */
21181 *(.brk_reservation) /* areas brk users have reserved */
21182+ . = ALIGN(HPAGE_SIZE);
21183 __brk_limit = .;
21184 }
21185
21186@@ -316,13 +384,12 @@ SECTIONS
21187 * for the boot processor.
21188 */
21189 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
21190-INIT_PER_CPU(gdt_page);
21191 INIT_PER_CPU(irq_stack_union);
21192
21193 /*
21194 * Build-time check on the image size:
21195 */
21196-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
21197+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
21198 "kernel image bigger than KERNEL_IMAGE_SIZE");
21199
21200 #ifdef CONFIG_SMP
21201diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
21202index 62f39d7..3bc46a1 100644
21203--- a/arch/x86/kernel/vsyscall_64.c
21204+++ b/arch/x86/kernel/vsyscall_64.c
21205@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
21206
21207 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
21208 /* copy vsyscall data */
21209+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
21210 vsyscall_gtod_data.clock.vread = clock->vread;
21211 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
21212 vsyscall_gtod_data.clock.mask = clock->mask;
21213@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
21214 We do this here because otherwise user space would do it on
21215 its own in a likely inferior way (no access to jiffies).
21216 If you don't like it pass NULL. */
21217- if (tcache && tcache->blob[0] == (j = __jiffies)) {
21218+ if (tcache && tcache->blob[0] == (j = jiffies)) {
21219 p = tcache->blob[1];
21220 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
21221 /* Load per CPU data from RDTSCP */
21222diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21223index 3909e3b..5433a97 100644
21224--- a/arch/x86/kernel/x8664_ksyms_64.c
21225+++ b/arch/x86/kernel/x8664_ksyms_64.c
21226@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
21227
21228 EXPORT_SYMBOL(copy_user_generic);
21229 EXPORT_SYMBOL(__copy_user_nocache);
21230-EXPORT_SYMBOL(copy_from_user);
21231-EXPORT_SYMBOL(copy_to_user);
21232 EXPORT_SYMBOL(__copy_from_user_inatomic);
21233
21234 EXPORT_SYMBOL(copy_page);
21235diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21236index c5ee17e..d63218f 100644
21237--- a/arch/x86/kernel/xsave.c
21238+++ b/arch/x86/kernel/xsave.c
21239@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
21240 fx_sw_user->xstate_size > fx_sw_user->extended_size)
21241 return -1;
21242
21243- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
21244+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
21245 fx_sw_user->extended_size -
21246 FP_XSTATE_MAGIC2_SIZE));
21247 /*
21248@@ -196,7 +196,7 @@ fx_only:
21249 * the other extended state.
21250 */
21251 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
21252- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
21253+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
21254 }
21255
21256 /*
21257@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
21258 if (task_thread_info(tsk)->status & TS_XSAVE)
21259 err = restore_user_xstate(buf);
21260 else
21261- err = fxrstor_checking((__force struct i387_fxsave_struct *)
21262+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
21263 buf);
21264 if (unlikely(err)) {
21265 /*
21266diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21267index 1350e43..a94b011 100644
21268--- a/arch/x86/kvm/emulate.c
21269+++ b/arch/x86/kvm/emulate.c
21270@@ -81,8 +81,8 @@
21271 #define Src2CL (1<<29)
21272 #define Src2ImmByte (2<<29)
21273 #define Src2One (3<<29)
21274-#define Src2Imm16 (4<<29)
21275-#define Src2Mask (7<<29)
21276+#define Src2Imm16 (4U<<29)
21277+#define Src2Mask (7U<<29)
21278
21279 enum {
21280 Group1_80, Group1_81, Group1_82, Group1_83,
21281@@ -411,6 +411,7 @@ static u32 group2_table[] = {
21282
21283 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21284 do { \
21285+ unsigned long _tmp; \
21286 __asm__ __volatile__ ( \
21287 _PRE_EFLAGS("0", "4", "2") \
21288 _op _suffix " %"_x"3,%1; " \
21289@@ -424,8 +425,6 @@ static u32 group2_table[] = {
21290 /* Raw emulation: instruction has two explicit operands. */
21291 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21292 do { \
21293- unsigned long _tmp; \
21294- \
21295 switch ((_dst).bytes) { \
21296 case 2: \
21297 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21298@@ -441,7 +440,6 @@ static u32 group2_table[] = {
21299
21300 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21301 do { \
21302- unsigned long _tmp; \
21303 switch ((_dst).bytes) { \
21304 case 1: \
21305 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21306diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21307index 8dfeaaa..4daa395 100644
21308--- a/arch/x86/kvm/lapic.c
21309+++ b/arch/x86/kvm/lapic.c
21310@@ -52,7 +52,7 @@
21311 #define APIC_BUS_CYCLE_NS 1
21312
21313 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21314-#define apic_debug(fmt, arg...)
21315+#define apic_debug(fmt, arg...) do {} while (0)
21316
21317 #define APIC_LVT_NUM 6
21318 /* 14 is the version for Xeon and Pentium 8.4.8*/
21319diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21320index 3bc2707..dd157e2 100644
21321--- a/arch/x86/kvm/paging_tmpl.h
21322+++ b/arch/x86/kvm/paging_tmpl.h
21323@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21324 int level = PT_PAGE_TABLE_LEVEL;
21325 unsigned long mmu_seq;
21326
21327+ pax_track_stack();
21328+
21329 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21330 kvm_mmu_audit(vcpu, "pre page fault");
21331
21332@@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21333 kvm_mmu_free_some_pages(vcpu);
21334 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21335 level, &write_pt, pfn);
21336+ (void)sptep;
21337 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21338 sptep, *sptep, write_pt);
21339
21340diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21341index 7c6e63e..c5d92c1 100644
21342--- a/arch/x86/kvm/svm.c
21343+++ b/arch/x86/kvm/svm.c
21344@@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21345 int cpu = raw_smp_processor_id();
21346
21347 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21348+
21349+ pax_open_kernel();
21350 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21351+ pax_close_kernel();
21352+
21353 load_TR_desc();
21354 }
21355
21356@@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21357 return true;
21358 }
21359
21360-static struct kvm_x86_ops svm_x86_ops = {
21361+static const struct kvm_x86_ops svm_x86_ops = {
21362 .cpu_has_kvm_support = has_svm,
21363 .disabled_by_bios = is_disabled,
21364 .hardware_setup = svm_hardware_setup,
21365diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21366index e6d925f..e7a4af8 100644
21367--- a/arch/x86/kvm/vmx.c
21368+++ b/arch/x86/kvm/vmx.c
21369@@ -570,7 +570,11 @@ static void reload_tss(void)
21370
21371 kvm_get_gdt(&gdt);
21372 descs = (void *)gdt.base;
21373+
21374+ pax_open_kernel();
21375 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21376+ pax_close_kernel();
21377+
21378 load_TR_desc();
21379 }
21380
21381@@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21382 if (!cpu_has_vmx_flexpriority())
21383 flexpriority_enabled = 0;
21384
21385- if (!cpu_has_vmx_tpr_shadow())
21386- kvm_x86_ops->update_cr8_intercept = NULL;
21387+ if (!cpu_has_vmx_tpr_shadow()) {
21388+ pax_open_kernel();
21389+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21390+ pax_close_kernel();
21391+ }
21392
21393 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21394 kvm_disable_largepages();
21395@@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21396 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21397
21398 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21399- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21400+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21401 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21402 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21403 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21404@@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21405 "jmp .Lkvm_vmx_return \n\t"
21406 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21407 ".Lkvm_vmx_return: "
21408+
21409+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21410+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21411+ ".Lkvm_vmx_return2: "
21412+#endif
21413+
21414 /* Save guest registers, load host registers, keep flags */
21415 "xchg %0, (%%"R"sp) \n\t"
21416 "mov %%"R"ax, %c[rax](%0) \n\t"
21417@@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21418 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21419 #endif
21420 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21421+
21422+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21423+ ,[cs]"i"(__KERNEL_CS)
21424+#endif
21425+
21426 : "cc", "memory"
21427- , R"bx", R"di", R"si"
21428+ , R"ax", R"bx", R"di", R"si"
21429 #ifdef CONFIG_X86_64
21430 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21431 #endif
21432@@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21433 if (vmx->rmode.irq.pending)
21434 fixup_rmode_irq(vmx);
21435
21436- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21437+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21438+
21439+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21440+ loadsegment(fs, __KERNEL_PERCPU);
21441+#endif
21442+
21443+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21444+ __set_fs(current_thread_info()->addr_limit);
21445+#endif
21446+
21447 vmx->launched = 1;
21448
21449 vmx_complete_interrupts(vmx);
21450@@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21451 return false;
21452 }
21453
21454-static struct kvm_x86_ops vmx_x86_ops = {
21455+static const struct kvm_x86_ops vmx_x86_ops = {
21456 .cpu_has_kvm_support = cpu_has_kvm_support,
21457 .disabled_by_bios = vmx_disabled_by_bios,
21458 .hardware_setup = hardware_setup,
21459diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21460index df1cefb..5e882ad 100644
21461--- a/arch/x86/kvm/x86.c
21462+++ b/arch/x86/kvm/x86.c
21463@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21464 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21465 struct kvm_cpuid_entry2 __user *entries);
21466
21467-struct kvm_x86_ops *kvm_x86_ops;
21468+const struct kvm_x86_ops *kvm_x86_ops;
21469 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21470
21471 int ignore_msrs = 0;
21472@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21473 struct kvm_cpuid2 *cpuid,
21474 struct kvm_cpuid_entry2 __user *entries)
21475 {
21476- int r;
21477+ int r, i;
21478
21479 r = -E2BIG;
21480 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21481 goto out;
21482 r = -EFAULT;
21483- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21484- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21485+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21486 goto out;
21487+ for (i = 0; i < cpuid->nent; ++i) {
21488+ struct kvm_cpuid_entry2 cpuid_entry;
21489+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21490+ goto out;
21491+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
21492+ }
21493 vcpu->arch.cpuid_nent = cpuid->nent;
21494 kvm_apic_set_version(vcpu);
21495 return 0;
21496@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21497 struct kvm_cpuid2 *cpuid,
21498 struct kvm_cpuid_entry2 __user *entries)
21499 {
21500- int r;
21501+ int r, i;
21502
21503 vcpu_load(vcpu);
21504 r = -E2BIG;
21505 if (cpuid->nent < vcpu->arch.cpuid_nent)
21506 goto out;
21507 r = -EFAULT;
21508- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21509- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21510+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21511 goto out;
21512+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21513+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21514+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21515+ goto out;
21516+ }
21517 return 0;
21518
21519 out:
21520@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21521 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21522 struct kvm_interrupt *irq)
21523 {
21524- if (irq->irq < 0 || irq->irq >= 256)
21525+ if (irq->irq >= 256)
21526 return -EINVAL;
21527 if (irqchip_in_kernel(vcpu->kvm))
21528 return -ENXIO;
21529@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21530 .notifier_call = kvmclock_cpufreq_notifier
21531 };
21532
21533-int kvm_arch_init(void *opaque)
21534+int kvm_arch_init(const void *opaque)
21535 {
21536 int r, cpu;
21537- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21538+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21539
21540 if (kvm_x86_ops) {
21541 printk(KERN_ERR "kvm: already loaded the other module\n");
21542diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21543index 7e59dc1..b88c98f 100644
21544--- a/arch/x86/lguest/boot.c
21545+++ b/arch/x86/lguest/boot.c
21546@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21547 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21548 * Launcher to reboot us.
21549 */
21550-static void lguest_restart(char *reason)
21551+static __noreturn void lguest_restart(char *reason)
21552 {
21553 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21554+ BUG();
21555 }
21556
21557 /*G:050
21558diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21559index 824fa0b..c619e96 100644
21560--- a/arch/x86/lib/atomic64_32.c
21561+++ b/arch/x86/lib/atomic64_32.c
21562@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21563 }
21564 EXPORT_SYMBOL(atomic64_cmpxchg);
21565
21566+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21567+{
21568+ return cmpxchg8b(&ptr->counter, old_val, new_val);
21569+}
21570+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21571+
21572 /**
21573 * atomic64_xchg - xchg atomic64 variable
21574 * @ptr: pointer to type atomic64_t
21575@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21576 EXPORT_SYMBOL(atomic64_xchg);
21577
21578 /**
21579+ * atomic64_xchg_unchecked - xchg atomic64 variable
21580+ * @ptr: pointer to type atomic64_unchecked_t
21581+ * @new_val: value to assign
21582+ *
21583+ * Atomically xchgs the value of @ptr to @new_val and returns
21584+ * the old value.
21585+ */
21586+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21587+{
21588+ /*
21589+ * Try first with a (possibly incorrect) assumption about
21590+ * what we have there. We'll do two loops most likely,
21591+ * but we'll get an ownership MESI transaction straight away
21592+ * instead of a read transaction followed by a
21593+ * flush-for-ownership transaction:
21594+ */
21595+ u64 old_val, real_val = 0;
21596+
21597+ do {
21598+ old_val = real_val;
21599+
21600+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21601+
21602+ } while (real_val != old_val);
21603+
21604+ return old_val;
21605+}
21606+EXPORT_SYMBOL(atomic64_xchg_unchecked);
21607+
21608+/**
21609 * atomic64_set - set atomic64 variable
21610 * @ptr: pointer to type atomic64_t
21611 * @new_val: value to assign
21612@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21613 EXPORT_SYMBOL(atomic64_set);
21614
21615 /**
21616-EXPORT_SYMBOL(atomic64_read);
21617+ * atomic64_unchecked_set - set atomic64 variable
21618+ * @ptr: pointer to type atomic64_unchecked_t
21619+ * @new_val: value to assign
21620+ *
21621+ * Atomically sets the value of @ptr to @new_val.
21622+ */
21623+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21624+{
21625+ atomic64_xchg_unchecked(ptr, new_val);
21626+}
21627+EXPORT_SYMBOL(atomic64_set_unchecked);
21628+
21629+/**
21630 * atomic64_add_return - add and return
21631 * @delta: integer value to add
21632 * @ptr: pointer to type atomic64_t
21633@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21634 }
21635 EXPORT_SYMBOL(atomic64_add_return);
21636
21637+/**
21638+ * atomic64_add_return_unchecked - add and return
21639+ * @delta: integer value to add
21640+ * @ptr: pointer to type atomic64_unchecked_t
21641+ *
21642+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
21643+ */
21644+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21645+{
21646+ /*
21647+ * Try first with a (possibly incorrect) assumption about
21648+ * what we have there. We'll do two loops most likely,
21649+ * but we'll get an ownership MESI transaction straight away
21650+ * instead of a read transaction followed by a
21651+ * flush-for-ownership transaction:
21652+ */
21653+ u64 old_val, new_val, real_val = 0;
21654+
21655+ do {
21656+ old_val = real_val;
21657+ new_val = old_val + delta;
21658+
21659+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21660+
21661+ } while (real_val != old_val);
21662+
21663+ return new_val;
21664+}
21665+EXPORT_SYMBOL(atomic64_add_return_unchecked);
21666+
21667 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21668 {
21669 return atomic64_add_return(-delta, ptr);
21670 }
21671 EXPORT_SYMBOL(atomic64_sub_return);
21672
21673+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21674+{
21675+ return atomic64_add_return_unchecked(-delta, ptr);
21676+}
21677+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21678+
21679 u64 atomic64_inc_return(atomic64_t *ptr)
21680 {
21681 return atomic64_add_return(1, ptr);
21682 }
21683 EXPORT_SYMBOL(atomic64_inc_return);
21684
21685+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21686+{
21687+ return atomic64_add_return_unchecked(1, ptr);
21688+}
21689+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21690+
21691 u64 atomic64_dec_return(atomic64_t *ptr)
21692 {
21693 return atomic64_sub_return(1, ptr);
21694 }
21695 EXPORT_SYMBOL(atomic64_dec_return);
21696
21697+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21698+{
21699+ return atomic64_sub_return_unchecked(1, ptr);
21700+}
21701+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21702+
21703 /**
21704 * atomic64_add - add integer to atomic64 variable
21705 * @delta: integer value to add
21706@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21707 EXPORT_SYMBOL(atomic64_add);
21708
21709 /**
21710+ * atomic64_add_unchecked - add integer to atomic64 variable
21711+ * @delta: integer value to add
21712+ * @ptr: pointer to type atomic64_unchecked_t
21713+ *
21714+ * Atomically adds @delta to @ptr.
21715+ */
21716+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21717+{
21718+ atomic64_add_return_unchecked(delta, ptr);
21719+}
21720+EXPORT_SYMBOL(atomic64_add_unchecked);
21721+
21722+/**
21723 * atomic64_sub - subtract the atomic64 variable
21724 * @delta: integer value to subtract
21725 * @ptr: pointer to type atomic64_t
21726@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21727 EXPORT_SYMBOL(atomic64_sub);
21728
21729 /**
21730+ * atomic64_sub_unchecked - subtract the atomic64 variable
21731+ * @delta: integer value to subtract
21732+ * @ptr: pointer to type atomic64_unchecked_t
21733+ *
21734+ * Atomically subtracts @delta from @ptr.
21735+ */
21736+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21737+{
21738+ atomic64_add_unchecked(-delta, ptr);
21739+}
21740+EXPORT_SYMBOL(atomic64_sub_unchecked);
21741+
21742+/**
21743 * atomic64_sub_and_test - subtract value from variable and test result
21744 * @delta: integer value to subtract
21745 * @ptr: pointer to type atomic64_t
21746@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21747 EXPORT_SYMBOL(atomic64_inc);
21748
21749 /**
21750+ * atomic64_inc_unchecked - increment atomic64 variable
21751+ * @ptr: pointer to type atomic64_unchecked_t
21752+ *
21753+ * Atomically increments @ptr by 1.
21754+ */
21755+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21756+{
21757+ atomic64_add_unchecked(1, ptr);
21758+}
21759+EXPORT_SYMBOL(atomic64_inc_unchecked);
21760+
21761+/**
21762 * atomic64_dec - decrement atomic64 variable
21763 * @ptr: pointer to type atomic64_t
21764 *
21765@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21766 EXPORT_SYMBOL(atomic64_dec);
21767
21768 /**
21769+ * atomic64_dec_unchecked - decrement atomic64 variable
21770+ * @ptr: pointer to type atomic64_unchecked_t
21771+ *
21772+ * Atomically decrements @ptr by 1.
21773+ */
21774+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21775+{
21776+ atomic64_sub_unchecked(1, ptr);
21777+}
21778+EXPORT_SYMBOL(atomic64_dec_unchecked);
21779+
21780+/**
21781 * atomic64_dec_and_test - decrement and test
21782 * @ptr: pointer to type atomic64_t
21783 *
21784diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21785index adbccd0..98f96c8 100644
21786--- a/arch/x86/lib/checksum_32.S
21787+++ b/arch/x86/lib/checksum_32.S
21788@@ -28,7 +28,8 @@
21789 #include <linux/linkage.h>
21790 #include <asm/dwarf2.h>
21791 #include <asm/errno.h>
21792-
21793+#include <asm/segment.h>
21794+
21795 /*
21796 * computes a partial checksum, e.g. for TCP/UDP fragments
21797 */
21798@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21799
21800 #define ARGBASE 16
21801 #define FP 12
21802-
21803-ENTRY(csum_partial_copy_generic)
21804+
21805+ENTRY(csum_partial_copy_generic_to_user)
21806 CFI_STARTPROC
21807+
21808+#ifdef CONFIG_PAX_MEMORY_UDEREF
21809+ pushl %gs
21810+ CFI_ADJUST_CFA_OFFSET 4
21811+ popl %es
21812+ CFI_ADJUST_CFA_OFFSET -4
21813+ jmp csum_partial_copy_generic
21814+#endif
21815+
21816+ENTRY(csum_partial_copy_generic_from_user)
21817+
21818+#ifdef CONFIG_PAX_MEMORY_UDEREF
21819+ pushl %gs
21820+ CFI_ADJUST_CFA_OFFSET 4
21821+ popl %ds
21822+ CFI_ADJUST_CFA_OFFSET -4
21823+#endif
21824+
21825+ENTRY(csum_partial_copy_generic)
21826 subl $4,%esp
21827 CFI_ADJUST_CFA_OFFSET 4
21828 pushl %edi
21829@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21830 jmp 4f
21831 SRC(1: movw (%esi), %bx )
21832 addl $2, %esi
21833-DST( movw %bx, (%edi) )
21834+DST( movw %bx, %es:(%edi) )
21835 addl $2, %edi
21836 addw %bx, %ax
21837 adcl $0, %eax
21838@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21839 SRC(1: movl (%esi), %ebx )
21840 SRC( movl 4(%esi), %edx )
21841 adcl %ebx, %eax
21842-DST( movl %ebx, (%edi) )
21843+DST( movl %ebx, %es:(%edi) )
21844 adcl %edx, %eax
21845-DST( movl %edx, 4(%edi) )
21846+DST( movl %edx, %es:4(%edi) )
21847
21848 SRC( movl 8(%esi), %ebx )
21849 SRC( movl 12(%esi), %edx )
21850 adcl %ebx, %eax
21851-DST( movl %ebx, 8(%edi) )
21852+DST( movl %ebx, %es:8(%edi) )
21853 adcl %edx, %eax
21854-DST( movl %edx, 12(%edi) )
21855+DST( movl %edx, %es:12(%edi) )
21856
21857 SRC( movl 16(%esi), %ebx )
21858 SRC( movl 20(%esi), %edx )
21859 adcl %ebx, %eax
21860-DST( movl %ebx, 16(%edi) )
21861+DST( movl %ebx, %es:16(%edi) )
21862 adcl %edx, %eax
21863-DST( movl %edx, 20(%edi) )
21864+DST( movl %edx, %es:20(%edi) )
21865
21866 SRC( movl 24(%esi), %ebx )
21867 SRC( movl 28(%esi), %edx )
21868 adcl %ebx, %eax
21869-DST( movl %ebx, 24(%edi) )
21870+DST( movl %ebx, %es:24(%edi) )
21871 adcl %edx, %eax
21872-DST( movl %edx, 28(%edi) )
21873+DST( movl %edx, %es:28(%edi) )
21874
21875 lea 32(%esi), %esi
21876 lea 32(%edi), %edi
21877@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21878 shrl $2, %edx # This clears CF
21879 SRC(3: movl (%esi), %ebx )
21880 adcl %ebx, %eax
21881-DST( movl %ebx, (%edi) )
21882+DST( movl %ebx, %es:(%edi) )
21883 lea 4(%esi), %esi
21884 lea 4(%edi), %edi
21885 dec %edx
21886@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21887 jb 5f
21888 SRC( movw (%esi), %cx )
21889 leal 2(%esi), %esi
21890-DST( movw %cx, (%edi) )
21891+DST( movw %cx, %es:(%edi) )
21892 leal 2(%edi), %edi
21893 je 6f
21894 shll $16,%ecx
21895 SRC(5: movb (%esi), %cl )
21896-DST( movb %cl, (%edi) )
21897+DST( movb %cl, %es:(%edi) )
21898 6: addl %ecx, %eax
21899 adcl $0, %eax
21900 7:
21901@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21902
21903 6001:
21904 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21905- movl $-EFAULT, (%ebx)
21906+ movl $-EFAULT, %ss:(%ebx)
21907
21908 # zero the complete destination - computing the rest
21909 # is too much work
21910@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21911
21912 6002:
21913 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21914- movl $-EFAULT,(%ebx)
21915+ movl $-EFAULT,%ss:(%ebx)
21916 jmp 5000b
21917
21918 .previous
21919
21920+ pushl %ss
21921+ CFI_ADJUST_CFA_OFFSET 4
21922+ popl %ds
21923+ CFI_ADJUST_CFA_OFFSET -4
21924+ pushl %ss
21925+ CFI_ADJUST_CFA_OFFSET 4
21926+ popl %es
21927+ CFI_ADJUST_CFA_OFFSET -4
21928 popl %ebx
21929 CFI_ADJUST_CFA_OFFSET -4
21930 CFI_RESTORE ebx
21931@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21932 CFI_ADJUST_CFA_OFFSET -4
21933 ret
21934 CFI_ENDPROC
21935-ENDPROC(csum_partial_copy_generic)
21936+ENDPROC(csum_partial_copy_generic_to_user)
21937
21938 #else
21939
21940 /* Version for PentiumII/PPro */
21941
21942 #define ROUND1(x) \
21943+ nop; nop; nop; \
21944 SRC(movl x(%esi), %ebx ) ; \
21945 addl %ebx, %eax ; \
21946- DST(movl %ebx, x(%edi) ) ;
21947+ DST(movl %ebx, %es:x(%edi)) ;
21948
21949 #define ROUND(x) \
21950+ nop; nop; nop; \
21951 SRC(movl x(%esi), %ebx ) ; \
21952 adcl %ebx, %eax ; \
21953- DST(movl %ebx, x(%edi) ) ;
21954+ DST(movl %ebx, %es:x(%edi)) ;
21955
21956 #define ARGBASE 12
21957-
21958-ENTRY(csum_partial_copy_generic)
21959+
21960+ENTRY(csum_partial_copy_generic_to_user)
21961 CFI_STARTPROC
21962+
21963+#ifdef CONFIG_PAX_MEMORY_UDEREF
21964+ pushl %gs
21965+ CFI_ADJUST_CFA_OFFSET 4
21966+ popl %es
21967+ CFI_ADJUST_CFA_OFFSET -4
21968+ jmp csum_partial_copy_generic
21969+#endif
21970+
21971+ENTRY(csum_partial_copy_generic_from_user)
21972+
21973+#ifdef CONFIG_PAX_MEMORY_UDEREF
21974+ pushl %gs
21975+ CFI_ADJUST_CFA_OFFSET 4
21976+ popl %ds
21977+ CFI_ADJUST_CFA_OFFSET -4
21978+#endif
21979+
21980+ENTRY(csum_partial_copy_generic)
21981 pushl %ebx
21982 CFI_ADJUST_CFA_OFFSET 4
21983 CFI_REL_OFFSET ebx, 0
21984@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21985 subl %ebx, %edi
21986 lea -1(%esi),%edx
21987 andl $-32,%edx
21988- lea 3f(%ebx,%ebx), %ebx
21989+ lea 3f(%ebx,%ebx,2), %ebx
21990 testl %esi, %esi
21991 jmp *%ebx
21992 1: addl $64,%esi
21993@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
21994 jb 5f
21995 SRC( movw (%esi), %dx )
21996 leal 2(%esi), %esi
21997-DST( movw %dx, (%edi) )
21998+DST( movw %dx, %es:(%edi) )
21999 leal 2(%edi), %edi
22000 je 6f
22001 shll $16,%edx
22002 5:
22003 SRC( movb (%esi), %dl )
22004-DST( movb %dl, (%edi) )
22005+DST( movb %dl, %es:(%edi) )
22006 6: addl %edx, %eax
22007 adcl $0, %eax
22008 7:
22009 .section .fixup, "ax"
22010 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
22011- movl $-EFAULT, (%ebx)
22012+ movl $-EFAULT, %ss:(%ebx)
22013 # zero the complete destination (computing the rest is too much work)
22014 movl ARGBASE+8(%esp),%edi # dst
22015 movl ARGBASE+12(%esp),%ecx # len
22016@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
22017 rep; stosb
22018 jmp 7b
22019 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22020- movl $-EFAULT, (%ebx)
22021+ movl $-EFAULT, %ss:(%ebx)
22022 jmp 7b
22023 .previous
22024
22025+#ifdef CONFIG_PAX_MEMORY_UDEREF
22026+ pushl %ss
22027+ CFI_ADJUST_CFA_OFFSET 4
22028+ popl %ds
22029+ CFI_ADJUST_CFA_OFFSET -4
22030+ pushl %ss
22031+ CFI_ADJUST_CFA_OFFSET 4
22032+ popl %es
22033+ CFI_ADJUST_CFA_OFFSET -4
22034+#endif
22035+
22036 popl %esi
22037 CFI_ADJUST_CFA_OFFSET -4
22038 CFI_RESTORE esi
22039@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
22040 CFI_RESTORE ebx
22041 ret
22042 CFI_ENDPROC
22043-ENDPROC(csum_partial_copy_generic)
22044+ENDPROC(csum_partial_copy_generic_to_user)
22045
22046 #undef ROUND
22047 #undef ROUND1
22048diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
22049index ebeafcc..1e3a402 100644
22050--- a/arch/x86/lib/clear_page_64.S
22051+++ b/arch/x86/lib/clear_page_64.S
22052@@ -1,5 +1,6 @@
22053 #include <linux/linkage.h>
22054 #include <asm/dwarf2.h>
22055+#include <asm/alternative-asm.h>
22056
22057 /*
22058 * Zero a page.
22059@@ -10,6 +11,7 @@ ENTRY(clear_page_c)
22060 movl $4096/8,%ecx
22061 xorl %eax,%eax
22062 rep stosq
22063+ pax_force_retaddr
22064 ret
22065 CFI_ENDPROC
22066 ENDPROC(clear_page_c)
22067@@ -33,6 +35,7 @@ ENTRY(clear_page)
22068 leaq 64(%rdi),%rdi
22069 jnz .Lloop
22070 nop
22071+ pax_force_retaddr
22072 ret
22073 CFI_ENDPROC
22074 .Lclear_page_end:
22075@@ -43,7 +46,7 @@ ENDPROC(clear_page)
22076
22077 #include <asm/cpufeature.h>
22078
22079- .section .altinstr_replacement,"ax"
22080+ .section .altinstr_replacement,"a"
22081 1: .byte 0xeb /* jmp <disp8> */
22082 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
22083 2:
22084diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
22085index 727a5d4..333818a 100644
22086--- a/arch/x86/lib/copy_page_64.S
22087+++ b/arch/x86/lib/copy_page_64.S
22088@@ -2,12 +2,14 @@
22089
22090 #include <linux/linkage.h>
22091 #include <asm/dwarf2.h>
22092+#include <asm/alternative-asm.h>
22093
22094 ALIGN
22095 copy_page_c:
22096 CFI_STARTPROC
22097 movl $4096/8,%ecx
22098 rep movsq
22099+ pax_force_retaddr
22100 ret
22101 CFI_ENDPROC
22102 ENDPROC(copy_page_c)
22103@@ -38,7 +40,7 @@ ENTRY(copy_page)
22104 movq 16 (%rsi), %rdx
22105 movq 24 (%rsi), %r8
22106 movq 32 (%rsi), %r9
22107- movq 40 (%rsi), %r10
22108+ movq 40 (%rsi), %r13
22109 movq 48 (%rsi), %r11
22110 movq 56 (%rsi), %r12
22111
22112@@ -49,7 +51,7 @@ ENTRY(copy_page)
22113 movq %rdx, 16 (%rdi)
22114 movq %r8, 24 (%rdi)
22115 movq %r9, 32 (%rdi)
22116- movq %r10, 40 (%rdi)
22117+ movq %r13, 40 (%rdi)
22118 movq %r11, 48 (%rdi)
22119 movq %r12, 56 (%rdi)
22120
22121@@ -68,7 +70,7 @@ ENTRY(copy_page)
22122 movq 16 (%rsi), %rdx
22123 movq 24 (%rsi), %r8
22124 movq 32 (%rsi), %r9
22125- movq 40 (%rsi), %r10
22126+ movq 40 (%rsi), %r13
22127 movq 48 (%rsi), %r11
22128 movq 56 (%rsi), %r12
22129
22130@@ -77,7 +79,7 @@ ENTRY(copy_page)
22131 movq %rdx, 16 (%rdi)
22132 movq %r8, 24 (%rdi)
22133 movq %r9, 32 (%rdi)
22134- movq %r10, 40 (%rdi)
22135+ movq %r13, 40 (%rdi)
22136 movq %r11, 48 (%rdi)
22137 movq %r12, 56 (%rdi)
22138
22139@@ -94,6 +96,7 @@ ENTRY(copy_page)
22140 CFI_RESTORE r13
22141 addq $3*8,%rsp
22142 CFI_ADJUST_CFA_OFFSET -3*8
22143+ pax_force_retaddr
22144 ret
22145 .Lcopy_page_end:
22146 CFI_ENDPROC
22147@@ -104,7 +107,7 @@ ENDPROC(copy_page)
22148
22149 #include <asm/cpufeature.h>
22150
22151- .section .altinstr_replacement,"ax"
22152+ .section .altinstr_replacement,"a"
22153 1: .byte 0xeb /* jmp <disp8> */
22154 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
22155 2:
22156diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
22157index af8debd..40c75f3 100644
22158--- a/arch/x86/lib/copy_user_64.S
22159+++ b/arch/x86/lib/copy_user_64.S
22160@@ -15,13 +15,15 @@
22161 #include <asm/asm-offsets.h>
22162 #include <asm/thread_info.h>
22163 #include <asm/cpufeature.h>
22164+#include <asm/pgtable.h>
22165+#include <asm/alternative-asm.h>
22166
22167 .macro ALTERNATIVE_JUMP feature,orig,alt
22168 0:
22169 .byte 0xe9 /* 32bit jump */
22170 .long \orig-1f /* by default jump to orig */
22171 1:
22172- .section .altinstr_replacement,"ax"
22173+ .section .altinstr_replacement,"a"
22174 2: .byte 0xe9 /* near jump with 32bit immediate */
22175 .long \alt-1b /* offset */ /* or alternatively to alt */
22176 .previous
22177@@ -64,55 +66,26 @@
22178 #endif
22179 .endm
22180
22181-/* Standard copy_to_user with segment limit checking */
22182-ENTRY(copy_to_user)
22183- CFI_STARTPROC
22184- GET_THREAD_INFO(%rax)
22185- movq %rdi,%rcx
22186- addq %rdx,%rcx
22187- jc bad_to_user
22188- cmpq TI_addr_limit(%rax),%rcx
22189- ja bad_to_user
22190- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22191- CFI_ENDPROC
22192-ENDPROC(copy_to_user)
22193-
22194-/* Standard copy_from_user with segment limit checking */
22195-ENTRY(copy_from_user)
22196- CFI_STARTPROC
22197- GET_THREAD_INFO(%rax)
22198- movq %rsi,%rcx
22199- addq %rdx,%rcx
22200- jc bad_from_user
22201- cmpq TI_addr_limit(%rax),%rcx
22202- ja bad_from_user
22203- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22204- CFI_ENDPROC
22205-ENDPROC(copy_from_user)
22206-
22207 ENTRY(copy_user_generic)
22208 CFI_STARTPROC
22209 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22210 CFI_ENDPROC
22211 ENDPROC(copy_user_generic)
22212
22213-ENTRY(__copy_from_user_inatomic)
22214- CFI_STARTPROC
22215- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22216- CFI_ENDPROC
22217-ENDPROC(__copy_from_user_inatomic)
22218-
22219 .section .fixup,"ax"
22220 /* must zero dest */
22221 ENTRY(bad_from_user)
22222 bad_from_user:
22223 CFI_STARTPROC
22224+ testl %edx,%edx
22225+ js bad_to_user
22226 movl %edx,%ecx
22227 xorl %eax,%eax
22228 rep
22229 stosb
22230 bad_to_user:
22231 movl %edx,%eax
22232+ pax_force_retaddr
22233 ret
22234 CFI_ENDPROC
22235 ENDPROC(bad_from_user)
22236@@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22237 jz 17f
22238 1: movq (%rsi),%r8
22239 2: movq 1*8(%rsi),%r9
22240-3: movq 2*8(%rsi),%r10
22241+3: movq 2*8(%rsi),%rax
22242 4: movq 3*8(%rsi),%r11
22243 5: movq %r8,(%rdi)
22244 6: movq %r9,1*8(%rdi)
22245-7: movq %r10,2*8(%rdi)
22246+7: movq %rax,2*8(%rdi)
22247 8: movq %r11,3*8(%rdi)
22248 9: movq 4*8(%rsi),%r8
22249 10: movq 5*8(%rsi),%r9
22250-11: movq 6*8(%rsi),%r10
22251+11: movq 6*8(%rsi),%rax
22252 12: movq 7*8(%rsi),%r11
22253 13: movq %r8,4*8(%rdi)
22254 14: movq %r9,5*8(%rdi)
22255-15: movq %r10,6*8(%rdi)
22256+15: movq %rax,6*8(%rdi)
22257 16: movq %r11,7*8(%rdi)
22258 leaq 64(%rsi),%rsi
22259 leaq 64(%rdi),%rdi
22260@@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22261 decl %ecx
22262 jnz 21b
22263 23: xor %eax,%eax
22264+ pax_force_retaddr
22265 ret
22266
22267 .section .fixup,"ax"
22268@@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22269 3: rep
22270 movsb
22271 4: xorl %eax,%eax
22272+ pax_force_retaddr
22273 ret
22274
22275 .section .fixup,"ax"
22276diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22277index cb0c112..e3a6895 100644
22278--- a/arch/x86/lib/copy_user_nocache_64.S
22279+++ b/arch/x86/lib/copy_user_nocache_64.S
22280@@ -8,12 +8,14 @@
22281
22282 #include <linux/linkage.h>
22283 #include <asm/dwarf2.h>
22284+#include <asm/alternative-asm.h>
22285
22286 #define FIX_ALIGNMENT 1
22287
22288 #include <asm/current.h>
22289 #include <asm/asm-offsets.h>
22290 #include <asm/thread_info.h>
22291+#include <asm/pgtable.h>
22292
22293 .macro ALIGN_DESTINATION
22294 #ifdef FIX_ALIGNMENT
22295@@ -50,6 +52,15 @@
22296 */
22297 ENTRY(__copy_user_nocache)
22298 CFI_STARTPROC
22299+
22300+#ifdef CONFIG_PAX_MEMORY_UDEREF
22301+ mov $PAX_USER_SHADOW_BASE,%rcx
22302+ cmp %rcx,%rsi
22303+ jae 1f
22304+ add %rcx,%rsi
22305+1:
22306+#endif
22307+
22308 cmpl $8,%edx
22309 jb 20f /* less then 8 bytes, go to byte copy loop */
22310 ALIGN_DESTINATION
22311@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22312 jz 17f
22313 1: movq (%rsi),%r8
22314 2: movq 1*8(%rsi),%r9
22315-3: movq 2*8(%rsi),%r10
22316+3: movq 2*8(%rsi),%rax
22317 4: movq 3*8(%rsi),%r11
22318 5: movnti %r8,(%rdi)
22319 6: movnti %r9,1*8(%rdi)
22320-7: movnti %r10,2*8(%rdi)
22321+7: movnti %rax,2*8(%rdi)
22322 8: movnti %r11,3*8(%rdi)
22323 9: movq 4*8(%rsi),%r8
22324 10: movq 5*8(%rsi),%r9
22325-11: movq 6*8(%rsi),%r10
22326+11: movq 6*8(%rsi),%rax
22327 12: movq 7*8(%rsi),%r11
22328 13: movnti %r8,4*8(%rdi)
22329 14: movnti %r9,5*8(%rdi)
22330-15: movnti %r10,6*8(%rdi)
22331+15: movnti %rax,6*8(%rdi)
22332 16: movnti %r11,7*8(%rdi)
22333 leaq 64(%rsi),%rsi
22334 leaq 64(%rdi),%rdi
22335@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22336 jnz 21b
22337 23: xorl %eax,%eax
22338 sfence
22339+ pax_force_retaddr
22340 ret
22341
22342 .section .fixup,"ax"
22343diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22344index f0dba36..48cb4d6 100644
22345--- a/arch/x86/lib/csum-copy_64.S
22346+++ b/arch/x86/lib/csum-copy_64.S
22347@@ -8,6 +8,7 @@
22348 #include <linux/linkage.h>
22349 #include <asm/dwarf2.h>
22350 #include <asm/errno.h>
22351+#include <asm/alternative-asm.h>
22352
22353 /*
22354 * Checksum copy with exception handling.
22355@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22356 CFI_RESTORE rbp
22357 addq $7*8,%rsp
22358 CFI_ADJUST_CFA_OFFSET -7*8
22359+ pax_force_retaddr 0, 1
22360 ret
22361 CFI_RESTORE_STATE
22362
22363diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22364index 459b58a..9570bc7 100644
22365--- a/arch/x86/lib/csum-wrappers_64.c
22366+++ b/arch/x86/lib/csum-wrappers_64.c
22367@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22368 len -= 2;
22369 }
22370 }
22371- isum = csum_partial_copy_generic((__force const void *)src,
22372+
22373+#ifdef CONFIG_PAX_MEMORY_UDEREF
22374+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22375+ src += PAX_USER_SHADOW_BASE;
22376+#endif
22377+
22378+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
22379 dst, len, isum, errp, NULL);
22380 if (unlikely(*errp))
22381 goto out_err;
22382@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22383 }
22384
22385 *errp = 0;
22386- return csum_partial_copy_generic(src, (void __force *)dst,
22387+
22388+#ifdef CONFIG_PAX_MEMORY_UDEREF
22389+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22390+ dst += PAX_USER_SHADOW_BASE;
22391+#endif
22392+
22393+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22394 len, isum, NULL, errp);
22395 }
22396 EXPORT_SYMBOL(csum_partial_copy_to_user);
22397diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22398index 51f1504..ddac4c1 100644
22399--- a/arch/x86/lib/getuser.S
22400+++ b/arch/x86/lib/getuser.S
22401@@ -33,15 +33,38 @@
22402 #include <asm/asm-offsets.h>
22403 #include <asm/thread_info.h>
22404 #include <asm/asm.h>
22405+#include <asm/segment.h>
22406+#include <asm/pgtable.h>
22407+#include <asm/alternative-asm.h>
22408+
22409+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22410+#define __copyuser_seg gs;
22411+#else
22412+#define __copyuser_seg
22413+#endif
22414
22415 .text
22416 ENTRY(__get_user_1)
22417 CFI_STARTPROC
22418+
22419+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22420 GET_THREAD_INFO(%_ASM_DX)
22421 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22422 jae bad_get_user
22423-1: movzb (%_ASM_AX),%edx
22424+
22425+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22426+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22427+ cmp %_ASM_DX,%_ASM_AX
22428+ jae 1234f
22429+ add %_ASM_DX,%_ASM_AX
22430+1234:
22431+#endif
22432+
22433+#endif
22434+
22435+1: __copyuser_seg movzb (%_ASM_AX),%edx
22436 xor %eax,%eax
22437+ pax_force_retaddr
22438 ret
22439 CFI_ENDPROC
22440 ENDPROC(__get_user_1)
22441@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22442 ENTRY(__get_user_2)
22443 CFI_STARTPROC
22444 add $1,%_ASM_AX
22445+
22446+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22447 jc bad_get_user
22448 GET_THREAD_INFO(%_ASM_DX)
22449 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22450 jae bad_get_user
22451-2: movzwl -1(%_ASM_AX),%edx
22452+
22453+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22454+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22455+ cmp %_ASM_DX,%_ASM_AX
22456+ jae 1234f
22457+ add %_ASM_DX,%_ASM_AX
22458+1234:
22459+#endif
22460+
22461+#endif
22462+
22463+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22464 xor %eax,%eax
22465+ pax_force_retaddr
22466 ret
22467 CFI_ENDPROC
22468 ENDPROC(__get_user_2)
22469@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22470 ENTRY(__get_user_4)
22471 CFI_STARTPROC
22472 add $3,%_ASM_AX
22473+
22474+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22475 jc bad_get_user
22476 GET_THREAD_INFO(%_ASM_DX)
22477 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22478 jae bad_get_user
22479-3: mov -3(%_ASM_AX),%edx
22480+
22481+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22482+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22483+ cmp %_ASM_DX,%_ASM_AX
22484+ jae 1234f
22485+ add %_ASM_DX,%_ASM_AX
22486+1234:
22487+#endif
22488+
22489+#endif
22490+
22491+3: __copyuser_seg mov -3(%_ASM_AX),%edx
22492 xor %eax,%eax
22493+ pax_force_retaddr
22494 ret
22495 CFI_ENDPROC
22496 ENDPROC(__get_user_4)
22497@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22498 GET_THREAD_INFO(%_ASM_DX)
22499 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22500 jae bad_get_user
22501+
22502+#ifdef CONFIG_PAX_MEMORY_UDEREF
22503+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22504+ cmp %_ASM_DX,%_ASM_AX
22505+ jae 1234f
22506+ add %_ASM_DX,%_ASM_AX
22507+1234:
22508+#endif
22509+
22510 4: movq -7(%_ASM_AX),%_ASM_DX
22511 xor %eax,%eax
22512+ pax_force_retaddr
22513 ret
22514 CFI_ENDPROC
22515 ENDPROC(__get_user_8)
22516@@ -91,6 +152,7 @@ bad_get_user:
22517 CFI_STARTPROC
22518 xor %edx,%edx
22519 mov $(-EFAULT),%_ASM_AX
22520+ pax_force_retaddr
22521 ret
22522 CFI_ENDPROC
22523 END(bad_get_user)
22524diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22525index 05a95e7..326f2fa 100644
22526--- a/arch/x86/lib/iomap_copy_64.S
22527+++ b/arch/x86/lib/iomap_copy_64.S
22528@@ -17,6 +17,7 @@
22529
22530 #include <linux/linkage.h>
22531 #include <asm/dwarf2.h>
22532+#include <asm/alternative-asm.h>
22533
22534 /*
22535 * override generic version in lib/iomap_copy.c
22536@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22537 CFI_STARTPROC
22538 movl %edx,%ecx
22539 rep movsd
22540+ pax_force_retaddr
22541 ret
22542 CFI_ENDPROC
22543 ENDPROC(__iowrite32_copy)
22544diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22545index ad5441e..610e351 100644
22546--- a/arch/x86/lib/memcpy_64.S
22547+++ b/arch/x86/lib/memcpy_64.S
22548@@ -4,6 +4,7 @@
22549
22550 #include <asm/cpufeature.h>
22551 #include <asm/dwarf2.h>
22552+#include <asm/alternative-asm.h>
22553
22554 /*
22555 * memcpy - Copy a memory block.
22556@@ -34,6 +35,7 @@ memcpy_c:
22557 rep movsq
22558 movl %edx, %ecx
22559 rep movsb
22560+ pax_force_retaddr
22561 ret
22562 CFI_ENDPROC
22563 ENDPROC(memcpy_c)
22564@@ -118,6 +120,7 @@ ENTRY(memcpy)
22565 jnz .Lloop_1
22566
22567 .Lend:
22568+ pax_force_retaddr 0, 1
22569 ret
22570 CFI_ENDPROC
22571 ENDPROC(memcpy)
22572@@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22573 * It is also a lot simpler. Use this when possible:
22574 */
22575
22576- .section .altinstr_replacement, "ax"
22577+ .section .altinstr_replacement, "a"
22578 1: .byte 0xeb /* jmp <disp8> */
22579 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22580 2:
22581diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22582index 2c59481..7e9ba4e 100644
22583--- a/arch/x86/lib/memset_64.S
22584+++ b/arch/x86/lib/memset_64.S
22585@@ -2,6 +2,7 @@
22586
22587 #include <linux/linkage.h>
22588 #include <asm/dwarf2.h>
22589+#include <asm/alternative-asm.h>
22590
22591 /*
22592 * ISO C memset - set a memory block to a byte value.
22593@@ -28,6 +29,7 @@ memset_c:
22594 movl %r8d,%ecx
22595 rep stosb
22596 movq %r9,%rax
22597+ pax_force_retaddr
22598 ret
22599 CFI_ENDPROC
22600 ENDPROC(memset_c)
22601@@ -35,13 +37,13 @@ ENDPROC(memset_c)
22602 ENTRY(memset)
22603 ENTRY(__memset)
22604 CFI_STARTPROC
22605- movq %rdi,%r10
22606 movq %rdx,%r11
22607
22608 /* expand byte value */
22609 movzbl %sil,%ecx
22610 movabs $0x0101010101010101,%rax
22611 mul %rcx /* with rax, clobbers rdx */
22612+ movq %rdi,%rdx
22613
22614 /* align dst */
22615 movl %edi,%r9d
22616@@ -95,7 +97,8 @@ ENTRY(__memset)
22617 jnz .Lloop_1
22618
22619 .Lende:
22620- movq %r10,%rax
22621+ movq %rdx,%rax
22622+ pax_force_retaddr
22623 ret
22624
22625 CFI_RESTORE_STATE
22626@@ -118,7 +121,7 @@ ENDPROC(__memset)
22627
22628 #include <asm/cpufeature.h>
22629
22630- .section .altinstr_replacement,"ax"
22631+ .section .altinstr_replacement,"a"
22632 1: .byte 0xeb /* jmp <disp8> */
22633 .byte (memset_c - memset) - (2f - 1b) /* offset */
22634 2:
22635diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22636index c9f2d9b..e7fd2c0 100644
22637--- a/arch/x86/lib/mmx_32.c
22638+++ b/arch/x86/lib/mmx_32.c
22639@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22640 {
22641 void *p;
22642 int i;
22643+ unsigned long cr0;
22644
22645 if (unlikely(in_interrupt()))
22646 return __memcpy(to, from, len);
22647@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22648 kernel_fpu_begin();
22649
22650 __asm__ __volatile__ (
22651- "1: prefetch (%0)\n" /* This set is 28 bytes */
22652- " prefetch 64(%0)\n"
22653- " prefetch 128(%0)\n"
22654- " prefetch 192(%0)\n"
22655- " prefetch 256(%0)\n"
22656+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22657+ " prefetch 64(%1)\n"
22658+ " prefetch 128(%1)\n"
22659+ " prefetch 192(%1)\n"
22660+ " prefetch 256(%1)\n"
22661 "2: \n"
22662 ".section .fixup, \"ax\"\n"
22663- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22664+ "3: \n"
22665+
22666+#ifdef CONFIG_PAX_KERNEXEC
22667+ " movl %%cr0, %0\n"
22668+ " movl %0, %%eax\n"
22669+ " andl $0xFFFEFFFF, %%eax\n"
22670+ " movl %%eax, %%cr0\n"
22671+#endif
22672+
22673+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22674+
22675+#ifdef CONFIG_PAX_KERNEXEC
22676+ " movl %0, %%cr0\n"
22677+#endif
22678+
22679 " jmp 2b\n"
22680 ".previous\n"
22681 _ASM_EXTABLE(1b, 3b)
22682- : : "r" (from));
22683+ : "=&r" (cr0) : "r" (from) : "ax");
22684
22685 for ( ; i > 5; i--) {
22686 __asm__ __volatile__ (
22687- "1: prefetch 320(%0)\n"
22688- "2: movq (%0), %%mm0\n"
22689- " movq 8(%0), %%mm1\n"
22690- " movq 16(%0), %%mm2\n"
22691- " movq 24(%0), %%mm3\n"
22692- " movq %%mm0, (%1)\n"
22693- " movq %%mm1, 8(%1)\n"
22694- " movq %%mm2, 16(%1)\n"
22695- " movq %%mm3, 24(%1)\n"
22696- " movq 32(%0), %%mm0\n"
22697- " movq 40(%0), %%mm1\n"
22698- " movq 48(%0), %%mm2\n"
22699- " movq 56(%0), %%mm3\n"
22700- " movq %%mm0, 32(%1)\n"
22701- " movq %%mm1, 40(%1)\n"
22702- " movq %%mm2, 48(%1)\n"
22703- " movq %%mm3, 56(%1)\n"
22704+ "1: prefetch 320(%1)\n"
22705+ "2: movq (%1), %%mm0\n"
22706+ " movq 8(%1), %%mm1\n"
22707+ " movq 16(%1), %%mm2\n"
22708+ " movq 24(%1), %%mm3\n"
22709+ " movq %%mm0, (%2)\n"
22710+ " movq %%mm1, 8(%2)\n"
22711+ " movq %%mm2, 16(%2)\n"
22712+ " movq %%mm3, 24(%2)\n"
22713+ " movq 32(%1), %%mm0\n"
22714+ " movq 40(%1), %%mm1\n"
22715+ " movq 48(%1), %%mm2\n"
22716+ " movq 56(%1), %%mm3\n"
22717+ " movq %%mm0, 32(%2)\n"
22718+ " movq %%mm1, 40(%2)\n"
22719+ " movq %%mm2, 48(%2)\n"
22720+ " movq %%mm3, 56(%2)\n"
22721 ".section .fixup, \"ax\"\n"
22722- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22723+ "3:\n"
22724+
22725+#ifdef CONFIG_PAX_KERNEXEC
22726+ " movl %%cr0, %0\n"
22727+ " movl %0, %%eax\n"
22728+ " andl $0xFFFEFFFF, %%eax\n"
22729+ " movl %%eax, %%cr0\n"
22730+#endif
22731+
22732+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22733+
22734+#ifdef CONFIG_PAX_KERNEXEC
22735+ " movl %0, %%cr0\n"
22736+#endif
22737+
22738 " jmp 2b\n"
22739 ".previous\n"
22740 _ASM_EXTABLE(1b, 3b)
22741- : : "r" (from), "r" (to) : "memory");
22742+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22743
22744 from += 64;
22745 to += 64;
22746@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22747 static void fast_copy_page(void *to, void *from)
22748 {
22749 int i;
22750+ unsigned long cr0;
22751
22752 kernel_fpu_begin();
22753
22754@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22755 * but that is for later. -AV
22756 */
22757 __asm__ __volatile__(
22758- "1: prefetch (%0)\n"
22759- " prefetch 64(%0)\n"
22760- " prefetch 128(%0)\n"
22761- " prefetch 192(%0)\n"
22762- " prefetch 256(%0)\n"
22763+ "1: prefetch (%1)\n"
22764+ " prefetch 64(%1)\n"
22765+ " prefetch 128(%1)\n"
22766+ " prefetch 192(%1)\n"
22767+ " prefetch 256(%1)\n"
22768 "2: \n"
22769 ".section .fixup, \"ax\"\n"
22770- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22771+ "3: \n"
22772+
22773+#ifdef CONFIG_PAX_KERNEXEC
22774+ " movl %%cr0, %0\n"
22775+ " movl %0, %%eax\n"
22776+ " andl $0xFFFEFFFF, %%eax\n"
22777+ " movl %%eax, %%cr0\n"
22778+#endif
22779+
22780+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22781+
22782+#ifdef CONFIG_PAX_KERNEXEC
22783+ " movl %0, %%cr0\n"
22784+#endif
22785+
22786 " jmp 2b\n"
22787 ".previous\n"
22788- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22789+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22790
22791 for (i = 0; i < (4096-320)/64; i++) {
22792 __asm__ __volatile__ (
22793- "1: prefetch 320(%0)\n"
22794- "2: movq (%0), %%mm0\n"
22795- " movntq %%mm0, (%1)\n"
22796- " movq 8(%0), %%mm1\n"
22797- " movntq %%mm1, 8(%1)\n"
22798- " movq 16(%0), %%mm2\n"
22799- " movntq %%mm2, 16(%1)\n"
22800- " movq 24(%0), %%mm3\n"
22801- " movntq %%mm3, 24(%1)\n"
22802- " movq 32(%0), %%mm4\n"
22803- " movntq %%mm4, 32(%1)\n"
22804- " movq 40(%0), %%mm5\n"
22805- " movntq %%mm5, 40(%1)\n"
22806- " movq 48(%0), %%mm6\n"
22807- " movntq %%mm6, 48(%1)\n"
22808- " movq 56(%0), %%mm7\n"
22809- " movntq %%mm7, 56(%1)\n"
22810+ "1: prefetch 320(%1)\n"
22811+ "2: movq (%1), %%mm0\n"
22812+ " movntq %%mm0, (%2)\n"
22813+ " movq 8(%1), %%mm1\n"
22814+ " movntq %%mm1, 8(%2)\n"
22815+ " movq 16(%1), %%mm2\n"
22816+ " movntq %%mm2, 16(%2)\n"
22817+ " movq 24(%1), %%mm3\n"
22818+ " movntq %%mm3, 24(%2)\n"
22819+ " movq 32(%1), %%mm4\n"
22820+ " movntq %%mm4, 32(%2)\n"
22821+ " movq 40(%1), %%mm5\n"
22822+ " movntq %%mm5, 40(%2)\n"
22823+ " movq 48(%1), %%mm6\n"
22824+ " movntq %%mm6, 48(%2)\n"
22825+ " movq 56(%1), %%mm7\n"
22826+ " movntq %%mm7, 56(%2)\n"
22827 ".section .fixup, \"ax\"\n"
22828- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22829+ "3:\n"
22830+
22831+#ifdef CONFIG_PAX_KERNEXEC
22832+ " movl %%cr0, %0\n"
22833+ " movl %0, %%eax\n"
22834+ " andl $0xFFFEFFFF, %%eax\n"
22835+ " movl %%eax, %%cr0\n"
22836+#endif
22837+
22838+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22839+
22840+#ifdef CONFIG_PAX_KERNEXEC
22841+ " movl %0, %%cr0\n"
22842+#endif
22843+
22844 " jmp 2b\n"
22845 ".previous\n"
22846- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22847+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22848
22849 from += 64;
22850 to += 64;
22851@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22852 static void fast_copy_page(void *to, void *from)
22853 {
22854 int i;
22855+ unsigned long cr0;
22856
22857 kernel_fpu_begin();
22858
22859 __asm__ __volatile__ (
22860- "1: prefetch (%0)\n"
22861- " prefetch 64(%0)\n"
22862- " prefetch 128(%0)\n"
22863- " prefetch 192(%0)\n"
22864- " prefetch 256(%0)\n"
22865+ "1: prefetch (%1)\n"
22866+ " prefetch 64(%1)\n"
22867+ " prefetch 128(%1)\n"
22868+ " prefetch 192(%1)\n"
22869+ " prefetch 256(%1)\n"
22870 "2: \n"
22871 ".section .fixup, \"ax\"\n"
22872- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22873+ "3: \n"
22874+
22875+#ifdef CONFIG_PAX_KERNEXEC
22876+ " movl %%cr0, %0\n"
22877+ " movl %0, %%eax\n"
22878+ " andl $0xFFFEFFFF, %%eax\n"
22879+ " movl %%eax, %%cr0\n"
22880+#endif
22881+
22882+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22883+
22884+#ifdef CONFIG_PAX_KERNEXEC
22885+ " movl %0, %%cr0\n"
22886+#endif
22887+
22888 " jmp 2b\n"
22889 ".previous\n"
22890- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22891+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22892
22893 for (i = 0; i < 4096/64; i++) {
22894 __asm__ __volatile__ (
22895- "1: prefetch 320(%0)\n"
22896- "2: movq (%0), %%mm0\n"
22897- " movq 8(%0), %%mm1\n"
22898- " movq 16(%0), %%mm2\n"
22899- " movq 24(%0), %%mm3\n"
22900- " movq %%mm0, (%1)\n"
22901- " movq %%mm1, 8(%1)\n"
22902- " movq %%mm2, 16(%1)\n"
22903- " movq %%mm3, 24(%1)\n"
22904- " movq 32(%0), %%mm0\n"
22905- " movq 40(%0), %%mm1\n"
22906- " movq 48(%0), %%mm2\n"
22907- " movq 56(%0), %%mm3\n"
22908- " movq %%mm0, 32(%1)\n"
22909- " movq %%mm1, 40(%1)\n"
22910- " movq %%mm2, 48(%1)\n"
22911- " movq %%mm3, 56(%1)\n"
22912+ "1: prefetch 320(%1)\n"
22913+ "2: movq (%1), %%mm0\n"
22914+ " movq 8(%1), %%mm1\n"
22915+ " movq 16(%1), %%mm2\n"
22916+ " movq 24(%1), %%mm3\n"
22917+ " movq %%mm0, (%2)\n"
22918+ " movq %%mm1, 8(%2)\n"
22919+ " movq %%mm2, 16(%2)\n"
22920+ " movq %%mm3, 24(%2)\n"
22921+ " movq 32(%1), %%mm0\n"
22922+ " movq 40(%1), %%mm1\n"
22923+ " movq 48(%1), %%mm2\n"
22924+ " movq 56(%1), %%mm3\n"
22925+ " movq %%mm0, 32(%2)\n"
22926+ " movq %%mm1, 40(%2)\n"
22927+ " movq %%mm2, 48(%2)\n"
22928+ " movq %%mm3, 56(%2)\n"
22929 ".section .fixup, \"ax\"\n"
22930- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22931+ "3:\n"
22932+
22933+#ifdef CONFIG_PAX_KERNEXEC
22934+ " movl %%cr0, %0\n"
22935+ " movl %0, %%eax\n"
22936+ " andl $0xFFFEFFFF, %%eax\n"
22937+ " movl %%eax, %%cr0\n"
22938+#endif
22939+
22940+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22941+
22942+#ifdef CONFIG_PAX_KERNEXEC
22943+ " movl %0, %%cr0\n"
22944+#endif
22945+
22946 " jmp 2b\n"
22947 ".previous\n"
22948 _ASM_EXTABLE(1b, 3b)
22949- : : "r" (from), "r" (to) : "memory");
22950+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22951
22952 from += 64;
22953 to += 64;
22954diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22955index 69fa106..adda88b 100644
22956--- a/arch/x86/lib/msr-reg.S
22957+++ b/arch/x86/lib/msr-reg.S
22958@@ -3,6 +3,7 @@
22959 #include <asm/dwarf2.h>
22960 #include <asm/asm.h>
22961 #include <asm/msr.h>
22962+#include <asm/alternative-asm.h>
22963
22964 #ifdef CONFIG_X86_64
22965 /*
22966@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22967 CFI_STARTPROC
22968 pushq_cfi %rbx
22969 pushq_cfi %rbp
22970- movq %rdi, %r10 /* Save pointer */
22971+ movq %rdi, %r9 /* Save pointer */
22972 xorl %r11d, %r11d /* Return value */
22973 movl (%rdi), %eax
22974 movl 4(%rdi), %ecx
22975@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22976 movl 28(%rdi), %edi
22977 CFI_REMEMBER_STATE
22978 1: \op
22979-2: movl %eax, (%r10)
22980+2: movl %eax, (%r9)
22981 movl %r11d, %eax /* Return value */
22982- movl %ecx, 4(%r10)
22983- movl %edx, 8(%r10)
22984- movl %ebx, 12(%r10)
22985- movl %ebp, 20(%r10)
22986- movl %esi, 24(%r10)
22987- movl %edi, 28(%r10)
22988+ movl %ecx, 4(%r9)
22989+ movl %edx, 8(%r9)
22990+ movl %ebx, 12(%r9)
22991+ movl %ebp, 20(%r9)
22992+ movl %esi, 24(%r9)
22993+ movl %edi, 28(%r9)
22994 popq_cfi %rbp
22995 popq_cfi %rbx
22996+ pax_force_retaddr
22997 ret
22998 3:
22999 CFI_RESTORE_STATE
23000diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
23001index 36b0d15..d381858 100644
23002--- a/arch/x86/lib/putuser.S
23003+++ b/arch/x86/lib/putuser.S
23004@@ -15,7 +15,9 @@
23005 #include <asm/thread_info.h>
23006 #include <asm/errno.h>
23007 #include <asm/asm.h>
23008-
23009+#include <asm/segment.h>
23010+#include <asm/pgtable.h>
23011+#include <asm/alternative-asm.h>
23012
23013 /*
23014 * __put_user_X
23015@@ -29,52 +31,119 @@
23016 * as they get called from within inline assembly.
23017 */
23018
23019-#define ENTER CFI_STARTPROC ; \
23020- GET_THREAD_INFO(%_ASM_BX)
23021-#define EXIT ret ; \
23022+#define ENTER CFI_STARTPROC
23023+#define EXIT pax_force_retaddr; ret ; \
23024 CFI_ENDPROC
23025
23026+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23027+#define _DEST %_ASM_CX,%_ASM_BX
23028+#else
23029+#define _DEST %_ASM_CX
23030+#endif
23031+
23032+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23033+#define __copyuser_seg gs;
23034+#else
23035+#define __copyuser_seg
23036+#endif
23037+
23038 .text
23039 ENTRY(__put_user_1)
23040 ENTER
23041+
23042+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23043+ GET_THREAD_INFO(%_ASM_BX)
23044 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
23045 jae bad_put_user
23046-1: movb %al,(%_ASM_CX)
23047+
23048+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23049+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23050+ cmp %_ASM_BX,%_ASM_CX
23051+ jb 1234f
23052+ xor %ebx,%ebx
23053+1234:
23054+#endif
23055+
23056+#endif
23057+
23058+1: __copyuser_seg movb %al,(_DEST)
23059 xor %eax,%eax
23060 EXIT
23061 ENDPROC(__put_user_1)
23062
23063 ENTRY(__put_user_2)
23064 ENTER
23065+
23066+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23067+ GET_THREAD_INFO(%_ASM_BX)
23068 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23069 sub $1,%_ASM_BX
23070 cmp %_ASM_BX,%_ASM_CX
23071 jae bad_put_user
23072-2: movw %ax,(%_ASM_CX)
23073+
23074+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23075+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23076+ cmp %_ASM_BX,%_ASM_CX
23077+ jb 1234f
23078+ xor %ebx,%ebx
23079+1234:
23080+#endif
23081+
23082+#endif
23083+
23084+2: __copyuser_seg movw %ax,(_DEST)
23085 xor %eax,%eax
23086 EXIT
23087 ENDPROC(__put_user_2)
23088
23089 ENTRY(__put_user_4)
23090 ENTER
23091+
23092+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23093+ GET_THREAD_INFO(%_ASM_BX)
23094 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23095 sub $3,%_ASM_BX
23096 cmp %_ASM_BX,%_ASM_CX
23097 jae bad_put_user
23098-3: movl %eax,(%_ASM_CX)
23099+
23100+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23101+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23102+ cmp %_ASM_BX,%_ASM_CX
23103+ jb 1234f
23104+ xor %ebx,%ebx
23105+1234:
23106+#endif
23107+
23108+#endif
23109+
23110+3: __copyuser_seg movl %eax,(_DEST)
23111 xor %eax,%eax
23112 EXIT
23113 ENDPROC(__put_user_4)
23114
23115 ENTRY(__put_user_8)
23116 ENTER
23117+
23118+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23119+ GET_THREAD_INFO(%_ASM_BX)
23120 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23121 sub $7,%_ASM_BX
23122 cmp %_ASM_BX,%_ASM_CX
23123 jae bad_put_user
23124-4: mov %_ASM_AX,(%_ASM_CX)
23125+
23126+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23127+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23128+ cmp %_ASM_BX,%_ASM_CX
23129+ jb 1234f
23130+ xor %ebx,%ebx
23131+1234:
23132+#endif
23133+
23134+#endif
23135+
23136+4: __copyuser_seg mov %_ASM_AX,(_DEST)
23137 #ifdef CONFIG_X86_32
23138-5: movl %edx,4(%_ASM_CX)
23139+5: __copyuser_seg movl %edx,4(_DEST)
23140 #endif
23141 xor %eax,%eax
23142 EXIT
23143diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
23144index 05ea55f..6345b9a 100644
23145--- a/arch/x86/lib/rwlock_64.S
23146+++ b/arch/x86/lib/rwlock_64.S
23147@@ -2,6 +2,7 @@
23148
23149 #include <linux/linkage.h>
23150 #include <asm/rwlock.h>
23151+#include <asm/asm.h>
23152 #include <asm/alternative-asm.h>
23153 #include <asm/dwarf2.h>
23154
23155@@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
23156 CFI_STARTPROC
23157 LOCK_PREFIX
23158 addl $RW_LOCK_BIAS,(%rdi)
23159+
23160+#ifdef CONFIG_PAX_REFCOUNT
23161+ jno 1234f
23162+ LOCK_PREFIX
23163+ subl $RW_LOCK_BIAS,(%rdi)
23164+ int $4
23165+1234:
23166+ _ASM_EXTABLE(1234b, 1234b)
23167+#endif
23168+
23169 1: rep
23170 nop
23171 cmpl $RW_LOCK_BIAS,(%rdi)
23172 jne 1b
23173 LOCK_PREFIX
23174 subl $RW_LOCK_BIAS,(%rdi)
23175+
23176+#ifdef CONFIG_PAX_REFCOUNT
23177+ jno 1234f
23178+ LOCK_PREFIX
23179+ addl $RW_LOCK_BIAS,(%rdi)
23180+ int $4
23181+1234:
23182+ _ASM_EXTABLE(1234b, 1234b)
23183+#endif
23184+
23185 jnz __write_lock_failed
23186+ pax_force_retaddr
23187 ret
23188 CFI_ENDPROC
23189 END(__write_lock_failed)
23190@@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
23191 CFI_STARTPROC
23192 LOCK_PREFIX
23193 incl (%rdi)
23194+
23195+#ifdef CONFIG_PAX_REFCOUNT
23196+ jno 1234f
23197+ LOCK_PREFIX
23198+ decl (%rdi)
23199+ int $4
23200+1234:
23201+ _ASM_EXTABLE(1234b, 1234b)
23202+#endif
23203+
23204 1: rep
23205 nop
23206 cmpl $1,(%rdi)
23207 js 1b
23208 LOCK_PREFIX
23209 decl (%rdi)
23210+
23211+#ifdef CONFIG_PAX_REFCOUNT
23212+ jno 1234f
23213+ LOCK_PREFIX
23214+ incl (%rdi)
23215+ int $4
23216+1234:
23217+ _ASM_EXTABLE(1234b, 1234b)
23218+#endif
23219+
23220 js __read_lock_failed
23221+ pax_force_retaddr
23222 ret
23223 CFI_ENDPROC
23224 END(__read_lock_failed)
23225diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
23226index 15acecf..f768b10 100644
23227--- a/arch/x86/lib/rwsem_64.S
23228+++ b/arch/x86/lib/rwsem_64.S
23229@@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
23230 call rwsem_down_read_failed
23231 popq %rdx
23232 restore_common_regs
23233+ pax_force_retaddr
23234 ret
23235 ENDPROC(call_rwsem_down_read_failed)
23236
23237@@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
23238 movq %rax,%rdi
23239 call rwsem_down_write_failed
23240 restore_common_regs
23241+ pax_force_retaddr
23242 ret
23243 ENDPROC(call_rwsem_down_write_failed)
23244
23245@@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
23246 movq %rax,%rdi
23247 call rwsem_wake
23248 restore_common_regs
23249-1: ret
23250+1: pax_force_retaddr
23251+ ret
23252 ENDPROC(call_rwsem_wake)
23253
23254 /* Fix up special calling conventions */
23255@@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
23256 call rwsem_downgrade_wake
23257 popq %rdx
23258 restore_common_regs
23259+ pax_force_retaddr
23260 ret
23261 ENDPROC(call_rwsem_downgrade_wake)
23262diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23263index bf9a7d5..fb06ab5 100644
23264--- a/arch/x86/lib/thunk_64.S
23265+++ b/arch/x86/lib/thunk_64.S
23266@@ -10,7 +10,8 @@
23267 #include <asm/dwarf2.h>
23268 #include <asm/calling.h>
23269 #include <asm/rwlock.h>
23270-
23271+ #include <asm/alternative-asm.h>
23272+
23273 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23274 .macro thunk name,func
23275 .globl \name
23276@@ -70,6 +71,7 @@
23277 SAVE_ARGS
23278 restore:
23279 RESTORE_ARGS
23280+ pax_force_retaddr
23281 ret
23282 CFI_ENDPROC
23283
23284@@ -77,5 +79,6 @@ restore:
23285 SAVE_ARGS
23286 restore_norax:
23287 RESTORE_ARGS 1
23288+ pax_force_retaddr
23289 ret
23290 CFI_ENDPROC
23291diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23292index 1f118d4..ec4a953 100644
23293--- a/arch/x86/lib/usercopy_32.c
23294+++ b/arch/x86/lib/usercopy_32.c
23295@@ -43,7 +43,7 @@ do { \
23296 __asm__ __volatile__( \
23297 " testl %1,%1\n" \
23298 " jz 2f\n" \
23299- "0: lodsb\n" \
23300+ "0: "__copyuser_seg"lodsb\n" \
23301 " stosb\n" \
23302 " testb %%al,%%al\n" \
23303 " jz 1f\n" \
23304@@ -128,10 +128,12 @@ do { \
23305 int __d0; \
23306 might_fault(); \
23307 __asm__ __volatile__( \
23308+ __COPYUSER_SET_ES \
23309 "0: rep; stosl\n" \
23310 " movl %2,%0\n" \
23311 "1: rep; stosb\n" \
23312 "2:\n" \
23313+ __COPYUSER_RESTORE_ES \
23314 ".section .fixup,\"ax\"\n" \
23315 "3: lea 0(%2,%0,4),%0\n" \
23316 " jmp 2b\n" \
23317@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23318 might_fault();
23319
23320 __asm__ __volatile__(
23321+ __COPYUSER_SET_ES
23322 " testl %0, %0\n"
23323 " jz 3f\n"
23324 " andl %0,%%ecx\n"
23325@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23326 " subl %%ecx,%0\n"
23327 " addl %0,%%eax\n"
23328 "1:\n"
23329+ __COPYUSER_RESTORE_ES
23330 ".section .fixup,\"ax\"\n"
23331 "2: xorl %%eax,%%eax\n"
23332 " jmp 1b\n"
23333@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23334
23335 #ifdef CONFIG_X86_INTEL_USERCOPY
23336 static unsigned long
23337-__copy_user_intel(void __user *to, const void *from, unsigned long size)
23338+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23339 {
23340 int d0, d1;
23341 __asm__ __volatile__(
23342@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23343 " .align 2,0x90\n"
23344 "3: movl 0(%4), %%eax\n"
23345 "4: movl 4(%4), %%edx\n"
23346- "5: movl %%eax, 0(%3)\n"
23347- "6: movl %%edx, 4(%3)\n"
23348+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23349+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23350 "7: movl 8(%4), %%eax\n"
23351 "8: movl 12(%4),%%edx\n"
23352- "9: movl %%eax, 8(%3)\n"
23353- "10: movl %%edx, 12(%3)\n"
23354+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23355+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23356 "11: movl 16(%4), %%eax\n"
23357 "12: movl 20(%4), %%edx\n"
23358- "13: movl %%eax, 16(%3)\n"
23359- "14: movl %%edx, 20(%3)\n"
23360+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23361+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23362 "15: movl 24(%4), %%eax\n"
23363 "16: movl 28(%4), %%edx\n"
23364- "17: movl %%eax, 24(%3)\n"
23365- "18: movl %%edx, 28(%3)\n"
23366+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23367+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23368 "19: movl 32(%4), %%eax\n"
23369 "20: movl 36(%4), %%edx\n"
23370- "21: movl %%eax, 32(%3)\n"
23371- "22: movl %%edx, 36(%3)\n"
23372+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23373+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23374 "23: movl 40(%4), %%eax\n"
23375 "24: movl 44(%4), %%edx\n"
23376- "25: movl %%eax, 40(%3)\n"
23377- "26: movl %%edx, 44(%3)\n"
23378+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23379+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23380 "27: movl 48(%4), %%eax\n"
23381 "28: movl 52(%4), %%edx\n"
23382- "29: movl %%eax, 48(%3)\n"
23383- "30: movl %%edx, 52(%3)\n"
23384+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23385+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23386 "31: movl 56(%4), %%eax\n"
23387 "32: movl 60(%4), %%edx\n"
23388- "33: movl %%eax, 56(%3)\n"
23389- "34: movl %%edx, 60(%3)\n"
23390+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23391+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23392 " addl $-64, %0\n"
23393 " addl $64, %4\n"
23394 " addl $64, %3\n"
23395@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23396 " shrl $2, %0\n"
23397 " andl $3, %%eax\n"
23398 " cld\n"
23399+ __COPYUSER_SET_ES
23400 "99: rep; movsl\n"
23401 "36: movl %%eax, %0\n"
23402 "37: rep; movsb\n"
23403 "100:\n"
23404+ __COPYUSER_RESTORE_ES
23405+ ".section .fixup,\"ax\"\n"
23406+ "101: lea 0(%%eax,%0,4),%0\n"
23407+ " jmp 100b\n"
23408+ ".previous\n"
23409+ ".section __ex_table,\"a\"\n"
23410+ " .align 4\n"
23411+ " .long 1b,100b\n"
23412+ " .long 2b,100b\n"
23413+ " .long 3b,100b\n"
23414+ " .long 4b,100b\n"
23415+ " .long 5b,100b\n"
23416+ " .long 6b,100b\n"
23417+ " .long 7b,100b\n"
23418+ " .long 8b,100b\n"
23419+ " .long 9b,100b\n"
23420+ " .long 10b,100b\n"
23421+ " .long 11b,100b\n"
23422+ " .long 12b,100b\n"
23423+ " .long 13b,100b\n"
23424+ " .long 14b,100b\n"
23425+ " .long 15b,100b\n"
23426+ " .long 16b,100b\n"
23427+ " .long 17b,100b\n"
23428+ " .long 18b,100b\n"
23429+ " .long 19b,100b\n"
23430+ " .long 20b,100b\n"
23431+ " .long 21b,100b\n"
23432+ " .long 22b,100b\n"
23433+ " .long 23b,100b\n"
23434+ " .long 24b,100b\n"
23435+ " .long 25b,100b\n"
23436+ " .long 26b,100b\n"
23437+ " .long 27b,100b\n"
23438+ " .long 28b,100b\n"
23439+ " .long 29b,100b\n"
23440+ " .long 30b,100b\n"
23441+ " .long 31b,100b\n"
23442+ " .long 32b,100b\n"
23443+ " .long 33b,100b\n"
23444+ " .long 34b,100b\n"
23445+ " .long 35b,100b\n"
23446+ " .long 36b,100b\n"
23447+ " .long 37b,100b\n"
23448+ " .long 99b,101b\n"
23449+ ".previous"
23450+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23451+ : "1"(to), "2"(from), "0"(size)
23452+ : "eax", "edx", "memory");
23453+ return size;
23454+}
23455+
23456+static unsigned long
23457+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23458+{
23459+ int d0, d1;
23460+ __asm__ __volatile__(
23461+ " .align 2,0x90\n"
23462+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23463+ " cmpl $67, %0\n"
23464+ " jbe 3f\n"
23465+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23466+ " .align 2,0x90\n"
23467+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23468+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23469+ "5: movl %%eax, 0(%3)\n"
23470+ "6: movl %%edx, 4(%3)\n"
23471+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23472+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23473+ "9: movl %%eax, 8(%3)\n"
23474+ "10: movl %%edx, 12(%3)\n"
23475+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23476+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23477+ "13: movl %%eax, 16(%3)\n"
23478+ "14: movl %%edx, 20(%3)\n"
23479+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23480+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23481+ "17: movl %%eax, 24(%3)\n"
23482+ "18: movl %%edx, 28(%3)\n"
23483+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23484+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23485+ "21: movl %%eax, 32(%3)\n"
23486+ "22: movl %%edx, 36(%3)\n"
23487+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23488+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23489+ "25: movl %%eax, 40(%3)\n"
23490+ "26: movl %%edx, 44(%3)\n"
23491+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23492+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23493+ "29: movl %%eax, 48(%3)\n"
23494+ "30: movl %%edx, 52(%3)\n"
23495+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23496+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23497+ "33: movl %%eax, 56(%3)\n"
23498+ "34: movl %%edx, 60(%3)\n"
23499+ " addl $-64, %0\n"
23500+ " addl $64, %4\n"
23501+ " addl $64, %3\n"
23502+ " cmpl $63, %0\n"
23503+ " ja 1b\n"
23504+ "35: movl %0, %%eax\n"
23505+ " shrl $2, %0\n"
23506+ " andl $3, %%eax\n"
23507+ " cld\n"
23508+ "99: rep; "__copyuser_seg" movsl\n"
23509+ "36: movl %%eax, %0\n"
23510+ "37: rep; "__copyuser_seg" movsb\n"
23511+ "100:\n"
23512 ".section .fixup,\"ax\"\n"
23513 "101: lea 0(%%eax,%0,4),%0\n"
23514 " jmp 100b\n"
23515@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23516 int d0, d1;
23517 __asm__ __volatile__(
23518 " .align 2,0x90\n"
23519- "0: movl 32(%4), %%eax\n"
23520+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23521 " cmpl $67, %0\n"
23522 " jbe 2f\n"
23523- "1: movl 64(%4), %%eax\n"
23524+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23525 " .align 2,0x90\n"
23526- "2: movl 0(%4), %%eax\n"
23527- "21: movl 4(%4), %%edx\n"
23528+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23529+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23530 " movl %%eax, 0(%3)\n"
23531 " movl %%edx, 4(%3)\n"
23532- "3: movl 8(%4), %%eax\n"
23533- "31: movl 12(%4),%%edx\n"
23534+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23535+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23536 " movl %%eax, 8(%3)\n"
23537 " movl %%edx, 12(%3)\n"
23538- "4: movl 16(%4), %%eax\n"
23539- "41: movl 20(%4), %%edx\n"
23540+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23541+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23542 " movl %%eax, 16(%3)\n"
23543 " movl %%edx, 20(%3)\n"
23544- "10: movl 24(%4), %%eax\n"
23545- "51: movl 28(%4), %%edx\n"
23546+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23547+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23548 " movl %%eax, 24(%3)\n"
23549 " movl %%edx, 28(%3)\n"
23550- "11: movl 32(%4), %%eax\n"
23551- "61: movl 36(%4), %%edx\n"
23552+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23553+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23554 " movl %%eax, 32(%3)\n"
23555 " movl %%edx, 36(%3)\n"
23556- "12: movl 40(%4), %%eax\n"
23557- "71: movl 44(%4), %%edx\n"
23558+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23559+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23560 " movl %%eax, 40(%3)\n"
23561 " movl %%edx, 44(%3)\n"
23562- "13: movl 48(%4), %%eax\n"
23563- "81: movl 52(%4), %%edx\n"
23564+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23565+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23566 " movl %%eax, 48(%3)\n"
23567 " movl %%edx, 52(%3)\n"
23568- "14: movl 56(%4), %%eax\n"
23569- "91: movl 60(%4), %%edx\n"
23570+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23571+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23572 " movl %%eax, 56(%3)\n"
23573 " movl %%edx, 60(%3)\n"
23574 " addl $-64, %0\n"
23575@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23576 " shrl $2, %0\n"
23577 " andl $3, %%eax\n"
23578 " cld\n"
23579- "6: rep; movsl\n"
23580+ "6: rep; "__copyuser_seg" movsl\n"
23581 " movl %%eax,%0\n"
23582- "7: rep; movsb\n"
23583+ "7: rep; "__copyuser_seg" movsb\n"
23584 "8:\n"
23585 ".section .fixup,\"ax\"\n"
23586 "9: lea 0(%%eax,%0,4),%0\n"
23587@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23588
23589 __asm__ __volatile__(
23590 " .align 2,0x90\n"
23591- "0: movl 32(%4), %%eax\n"
23592+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23593 " cmpl $67, %0\n"
23594 " jbe 2f\n"
23595- "1: movl 64(%4), %%eax\n"
23596+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23597 " .align 2,0x90\n"
23598- "2: movl 0(%4), %%eax\n"
23599- "21: movl 4(%4), %%edx\n"
23600+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23601+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23602 " movnti %%eax, 0(%3)\n"
23603 " movnti %%edx, 4(%3)\n"
23604- "3: movl 8(%4), %%eax\n"
23605- "31: movl 12(%4),%%edx\n"
23606+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23607+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23608 " movnti %%eax, 8(%3)\n"
23609 " movnti %%edx, 12(%3)\n"
23610- "4: movl 16(%4), %%eax\n"
23611- "41: movl 20(%4), %%edx\n"
23612+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23613+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23614 " movnti %%eax, 16(%3)\n"
23615 " movnti %%edx, 20(%3)\n"
23616- "10: movl 24(%4), %%eax\n"
23617- "51: movl 28(%4), %%edx\n"
23618+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23619+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23620 " movnti %%eax, 24(%3)\n"
23621 " movnti %%edx, 28(%3)\n"
23622- "11: movl 32(%4), %%eax\n"
23623- "61: movl 36(%4), %%edx\n"
23624+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23625+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23626 " movnti %%eax, 32(%3)\n"
23627 " movnti %%edx, 36(%3)\n"
23628- "12: movl 40(%4), %%eax\n"
23629- "71: movl 44(%4), %%edx\n"
23630+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23631+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23632 " movnti %%eax, 40(%3)\n"
23633 " movnti %%edx, 44(%3)\n"
23634- "13: movl 48(%4), %%eax\n"
23635- "81: movl 52(%4), %%edx\n"
23636+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23637+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23638 " movnti %%eax, 48(%3)\n"
23639 " movnti %%edx, 52(%3)\n"
23640- "14: movl 56(%4), %%eax\n"
23641- "91: movl 60(%4), %%edx\n"
23642+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23643+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23644 " movnti %%eax, 56(%3)\n"
23645 " movnti %%edx, 60(%3)\n"
23646 " addl $-64, %0\n"
23647@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23648 " shrl $2, %0\n"
23649 " andl $3, %%eax\n"
23650 " cld\n"
23651- "6: rep; movsl\n"
23652+ "6: rep; "__copyuser_seg" movsl\n"
23653 " movl %%eax,%0\n"
23654- "7: rep; movsb\n"
23655+ "7: rep; "__copyuser_seg" movsb\n"
23656 "8:\n"
23657 ".section .fixup,\"ax\"\n"
23658 "9: lea 0(%%eax,%0,4),%0\n"
23659@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23660
23661 __asm__ __volatile__(
23662 " .align 2,0x90\n"
23663- "0: movl 32(%4), %%eax\n"
23664+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23665 " cmpl $67, %0\n"
23666 " jbe 2f\n"
23667- "1: movl 64(%4), %%eax\n"
23668+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23669 " .align 2,0x90\n"
23670- "2: movl 0(%4), %%eax\n"
23671- "21: movl 4(%4), %%edx\n"
23672+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23673+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23674 " movnti %%eax, 0(%3)\n"
23675 " movnti %%edx, 4(%3)\n"
23676- "3: movl 8(%4), %%eax\n"
23677- "31: movl 12(%4),%%edx\n"
23678+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23679+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23680 " movnti %%eax, 8(%3)\n"
23681 " movnti %%edx, 12(%3)\n"
23682- "4: movl 16(%4), %%eax\n"
23683- "41: movl 20(%4), %%edx\n"
23684+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23685+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23686 " movnti %%eax, 16(%3)\n"
23687 " movnti %%edx, 20(%3)\n"
23688- "10: movl 24(%4), %%eax\n"
23689- "51: movl 28(%4), %%edx\n"
23690+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23691+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23692 " movnti %%eax, 24(%3)\n"
23693 " movnti %%edx, 28(%3)\n"
23694- "11: movl 32(%4), %%eax\n"
23695- "61: movl 36(%4), %%edx\n"
23696+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23697+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23698 " movnti %%eax, 32(%3)\n"
23699 " movnti %%edx, 36(%3)\n"
23700- "12: movl 40(%4), %%eax\n"
23701- "71: movl 44(%4), %%edx\n"
23702+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23703+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23704 " movnti %%eax, 40(%3)\n"
23705 " movnti %%edx, 44(%3)\n"
23706- "13: movl 48(%4), %%eax\n"
23707- "81: movl 52(%4), %%edx\n"
23708+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23709+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23710 " movnti %%eax, 48(%3)\n"
23711 " movnti %%edx, 52(%3)\n"
23712- "14: movl 56(%4), %%eax\n"
23713- "91: movl 60(%4), %%edx\n"
23714+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23715+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23716 " movnti %%eax, 56(%3)\n"
23717 " movnti %%edx, 60(%3)\n"
23718 " addl $-64, %0\n"
23719@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23720 " shrl $2, %0\n"
23721 " andl $3, %%eax\n"
23722 " cld\n"
23723- "6: rep; movsl\n"
23724+ "6: rep; "__copyuser_seg" movsl\n"
23725 " movl %%eax,%0\n"
23726- "7: rep; movsb\n"
23727+ "7: rep; "__copyuser_seg" movsb\n"
23728 "8:\n"
23729 ".section .fixup,\"ax\"\n"
23730 "9: lea 0(%%eax,%0,4),%0\n"
23731@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23732 */
23733 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23734 unsigned long size);
23735-unsigned long __copy_user_intel(void __user *to, const void *from,
23736+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23737+ unsigned long size);
23738+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23739 unsigned long size);
23740 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23741 const void __user *from, unsigned long size);
23742 #endif /* CONFIG_X86_INTEL_USERCOPY */
23743
23744 /* Generic arbitrary sized copy. */
23745-#define __copy_user(to, from, size) \
23746+#define __copy_user(to, from, size, prefix, set, restore) \
23747 do { \
23748 int __d0, __d1, __d2; \
23749 __asm__ __volatile__( \
23750+ set \
23751 " cmp $7,%0\n" \
23752 " jbe 1f\n" \
23753 " movl %1,%0\n" \
23754 " negl %0\n" \
23755 " andl $7,%0\n" \
23756 " subl %0,%3\n" \
23757- "4: rep; movsb\n" \
23758+ "4: rep; "prefix"movsb\n" \
23759 " movl %3,%0\n" \
23760 " shrl $2,%0\n" \
23761 " andl $3,%3\n" \
23762 " .align 2,0x90\n" \
23763- "0: rep; movsl\n" \
23764+ "0: rep; "prefix"movsl\n" \
23765 " movl %3,%0\n" \
23766- "1: rep; movsb\n" \
23767+ "1: rep; "prefix"movsb\n" \
23768 "2:\n" \
23769+ restore \
23770 ".section .fixup,\"ax\"\n" \
23771 "5: addl %3,%0\n" \
23772 " jmp 2b\n" \
23773@@ -682,14 +799,14 @@ do { \
23774 " negl %0\n" \
23775 " andl $7,%0\n" \
23776 " subl %0,%3\n" \
23777- "4: rep; movsb\n" \
23778+ "4: rep; "__copyuser_seg"movsb\n" \
23779 " movl %3,%0\n" \
23780 " shrl $2,%0\n" \
23781 " andl $3,%3\n" \
23782 " .align 2,0x90\n" \
23783- "0: rep; movsl\n" \
23784+ "0: rep; "__copyuser_seg"movsl\n" \
23785 " movl %3,%0\n" \
23786- "1: rep; movsb\n" \
23787+ "1: rep; "__copyuser_seg"movsb\n" \
23788 "2:\n" \
23789 ".section .fixup,\"ax\"\n" \
23790 "5: addl %3,%0\n" \
23791@@ -775,9 +892,9 @@ survive:
23792 }
23793 #endif
23794 if (movsl_is_ok(to, from, n))
23795- __copy_user(to, from, n);
23796+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23797 else
23798- n = __copy_user_intel(to, from, n);
23799+ n = __generic_copy_to_user_intel(to, from, n);
23800 return n;
23801 }
23802 EXPORT_SYMBOL(__copy_to_user_ll);
23803@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23804 unsigned long n)
23805 {
23806 if (movsl_is_ok(to, from, n))
23807- __copy_user(to, from, n);
23808+ __copy_user(to, from, n, __copyuser_seg, "", "");
23809 else
23810- n = __copy_user_intel((void __user *)to,
23811- (const void *)from, n);
23812+ n = __generic_copy_from_user_intel(to, from, n);
23813 return n;
23814 }
23815 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23816@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23817 if (n > 64 && cpu_has_xmm2)
23818 n = __copy_user_intel_nocache(to, from, n);
23819 else
23820- __copy_user(to, from, n);
23821+ __copy_user(to, from, n, __copyuser_seg, "", "");
23822 #else
23823- __copy_user(to, from, n);
23824+ __copy_user(to, from, n, __copyuser_seg, "", "");
23825 #endif
23826 return n;
23827 }
23828 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23829
23830-/**
23831- * copy_to_user: - Copy a block of data into user space.
23832- * @to: Destination address, in user space.
23833- * @from: Source address, in kernel space.
23834- * @n: Number of bytes to copy.
23835- *
23836- * Context: User context only. This function may sleep.
23837- *
23838- * Copy data from kernel space to user space.
23839- *
23840- * Returns number of bytes that could not be copied.
23841- * On success, this will be zero.
23842- */
23843-unsigned long
23844-copy_to_user(void __user *to, const void *from, unsigned long n)
23845+#ifdef CONFIG_PAX_MEMORY_UDEREF
23846+void __set_fs(mm_segment_t x)
23847 {
23848- if (access_ok(VERIFY_WRITE, to, n))
23849- n = __copy_to_user(to, from, n);
23850- return n;
23851+ switch (x.seg) {
23852+ case 0:
23853+ loadsegment(gs, 0);
23854+ break;
23855+ case TASK_SIZE_MAX:
23856+ loadsegment(gs, __USER_DS);
23857+ break;
23858+ case -1UL:
23859+ loadsegment(gs, __KERNEL_DS);
23860+ break;
23861+ default:
23862+ BUG();
23863+ }
23864+ return;
23865 }
23866-EXPORT_SYMBOL(copy_to_user);
23867+EXPORT_SYMBOL(__set_fs);
23868
23869-/**
23870- * copy_from_user: - Copy a block of data from user space.
23871- * @to: Destination address, in kernel space.
23872- * @from: Source address, in user space.
23873- * @n: Number of bytes to copy.
23874- *
23875- * Context: User context only. This function may sleep.
23876- *
23877- * Copy data from user space to kernel space.
23878- *
23879- * Returns number of bytes that could not be copied.
23880- * On success, this will be zero.
23881- *
23882- * If some data could not be copied, this function will pad the copied
23883- * data to the requested size using zero bytes.
23884- */
23885-unsigned long
23886-copy_from_user(void *to, const void __user *from, unsigned long n)
23887+void set_fs(mm_segment_t x)
23888 {
23889- if (access_ok(VERIFY_READ, from, n))
23890- n = __copy_from_user(to, from, n);
23891- else
23892- memset(to, 0, n);
23893- return n;
23894+ current_thread_info()->addr_limit = x;
23895+ __set_fs(x);
23896 }
23897-EXPORT_SYMBOL(copy_from_user);
23898+EXPORT_SYMBOL(set_fs);
23899+#endif
23900diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23901index b7c2849..8633ad8 100644
23902--- a/arch/x86/lib/usercopy_64.c
23903+++ b/arch/x86/lib/usercopy_64.c
23904@@ -42,6 +42,12 @@ long
23905 __strncpy_from_user(char *dst, const char __user *src, long count)
23906 {
23907 long res;
23908+
23909+#ifdef CONFIG_PAX_MEMORY_UDEREF
23910+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23911+ src += PAX_USER_SHADOW_BASE;
23912+#endif
23913+
23914 __do_strncpy_from_user(dst, src, count, res);
23915 return res;
23916 }
23917@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23918 {
23919 long __d0;
23920 might_fault();
23921+
23922+#ifdef CONFIG_PAX_MEMORY_UDEREF
23923+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23924+ addr += PAX_USER_SHADOW_BASE;
23925+#endif
23926+
23927 /* no memory constraint because it doesn't change any memory gcc knows
23928 about */
23929 asm volatile(
23930@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23931 }
23932 EXPORT_SYMBOL(strlen_user);
23933
23934-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23935+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23936 {
23937- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23938- return copy_user_generic((__force void *)to, (__force void *)from, len);
23939- }
23940- return len;
23941+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23942+
23943+#ifdef CONFIG_PAX_MEMORY_UDEREF
23944+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23945+ to += PAX_USER_SHADOW_BASE;
23946+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23947+ from += PAX_USER_SHADOW_BASE;
23948+#endif
23949+
23950+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23951+ }
23952+ return len;
23953 }
23954 EXPORT_SYMBOL(copy_in_user);
23955
23956@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23957 * it is not necessary to optimize tail handling.
23958 */
23959 unsigned long
23960-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23961+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23962 {
23963 char c;
23964 unsigned zero_len;
23965diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23966index 61b41ca..5fef66a 100644
23967--- a/arch/x86/mm/extable.c
23968+++ b/arch/x86/mm/extable.c
23969@@ -1,14 +1,71 @@
23970 #include <linux/module.h>
23971 #include <linux/spinlock.h>
23972+#include <linux/sort.h>
23973 #include <asm/uaccess.h>
23974+#include <asm/pgtable.h>
23975
23976+/*
23977+ * The exception table needs to be sorted so that the binary
23978+ * search that we use to find entries in it works properly.
23979+ * This is used both for the kernel exception table and for
23980+ * the exception tables of modules that get loaded.
23981+ */
23982+static int cmp_ex(const void *a, const void *b)
23983+{
23984+ const struct exception_table_entry *x = a, *y = b;
23985+
23986+ /* avoid overflow */
23987+ if (x->insn > y->insn)
23988+ return 1;
23989+ if (x->insn < y->insn)
23990+ return -1;
23991+ return 0;
23992+}
23993+
23994+static void swap_ex(void *a, void *b, int size)
23995+{
23996+ struct exception_table_entry t, *x = a, *y = b;
23997+
23998+ t = *x;
23999+
24000+ pax_open_kernel();
24001+ *x = *y;
24002+ *y = t;
24003+ pax_close_kernel();
24004+}
24005+
24006+void sort_extable(struct exception_table_entry *start,
24007+ struct exception_table_entry *finish)
24008+{
24009+ sort(start, finish - start, sizeof(struct exception_table_entry),
24010+ cmp_ex, swap_ex);
24011+}
24012+
24013+#ifdef CONFIG_MODULES
24014+/*
24015+ * If the exception table is sorted, any referring to the module init
24016+ * will be at the beginning or the end.
24017+ */
24018+void trim_init_extable(struct module *m)
24019+{
24020+ /*trim the beginning*/
24021+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
24022+ m->extable++;
24023+ m->num_exentries--;
24024+ }
24025+ /*trim the end*/
24026+ while (m->num_exentries &&
24027+ within_module_init(m->extable[m->num_exentries-1].insn, m))
24028+ m->num_exentries--;
24029+}
24030+#endif /* CONFIG_MODULES */
24031
24032 int fixup_exception(struct pt_regs *regs)
24033 {
24034 const struct exception_table_entry *fixup;
24035
24036 #ifdef CONFIG_PNPBIOS
24037- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
24038+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
24039 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
24040 extern u32 pnp_bios_is_utter_crap;
24041 pnp_bios_is_utter_crap = 1;
24042diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
24043index 8ac0d76..ca501e2 100644
24044--- a/arch/x86/mm/fault.c
24045+++ b/arch/x86/mm/fault.c
24046@@ -11,10 +11,19 @@
24047 #include <linux/kprobes.h> /* __kprobes, ... */
24048 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
24049 #include <linux/perf_event.h> /* perf_sw_event */
24050+#include <linux/unistd.h>
24051+#include <linux/compiler.h>
24052
24053 #include <asm/traps.h> /* dotraplinkage, ... */
24054 #include <asm/pgalloc.h> /* pgd_*(), ... */
24055 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
24056+#include <asm/vsyscall.h>
24057+#include <asm/tlbflush.h>
24058+
24059+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24060+#include <asm/stacktrace.h>
24061+#include "../kernel/dumpstack.h"
24062+#endif
24063
24064 /*
24065 * Page fault error code bits:
24066@@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
24067 int ret = 0;
24068
24069 /* kprobe_running() needs smp_processor_id() */
24070- if (kprobes_built_in() && !user_mode_vm(regs)) {
24071+ if (kprobes_built_in() && !user_mode(regs)) {
24072 preempt_disable();
24073 if (kprobe_running() && kprobe_fault_handler(regs, 14))
24074 ret = 1;
24075@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
24076 return !instr_lo || (instr_lo>>1) == 1;
24077 case 0x00:
24078 /* Prefetch instruction is 0x0F0D or 0x0F18 */
24079- if (probe_kernel_address(instr, opcode))
24080+ if (user_mode(regs)) {
24081+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24082+ return 0;
24083+ } else if (probe_kernel_address(instr, opcode))
24084 return 0;
24085
24086 *prefetch = (instr_lo == 0xF) &&
24087@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
24088 while (instr < max_instr) {
24089 unsigned char opcode;
24090
24091- if (probe_kernel_address(instr, opcode))
24092+ if (user_mode(regs)) {
24093+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24094+ break;
24095+ } else if (probe_kernel_address(instr, opcode))
24096 break;
24097
24098 instr++;
24099@@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
24100 force_sig_info(si_signo, &info, tsk);
24101 }
24102
24103+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24104+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
24105+#endif
24106+
24107+#ifdef CONFIG_PAX_EMUTRAMP
24108+static int pax_handle_fetch_fault(struct pt_regs *regs);
24109+#endif
24110+
24111+#ifdef CONFIG_PAX_PAGEEXEC
24112+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
24113+{
24114+ pgd_t *pgd;
24115+ pud_t *pud;
24116+ pmd_t *pmd;
24117+
24118+ pgd = pgd_offset(mm, address);
24119+ if (!pgd_present(*pgd))
24120+ return NULL;
24121+ pud = pud_offset(pgd, address);
24122+ if (!pud_present(*pud))
24123+ return NULL;
24124+ pmd = pmd_offset(pud, address);
24125+ if (!pmd_present(*pmd))
24126+ return NULL;
24127+ return pmd;
24128+}
24129+#endif
24130+
24131 DEFINE_SPINLOCK(pgd_lock);
24132 LIST_HEAD(pgd_list);
24133
24134@@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
24135 address += PMD_SIZE) {
24136
24137 unsigned long flags;
24138+
24139+#ifdef CONFIG_PAX_PER_CPU_PGD
24140+ unsigned long cpu;
24141+#else
24142 struct page *page;
24143+#endif
24144
24145 spin_lock_irqsave(&pgd_lock, flags);
24146+
24147+#ifdef CONFIG_PAX_PER_CPU_PGD
24148+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24149+ pgd_t *pgd = get_cpu_pgd(cpu);
24150+#else
24151 list_for_each_entry(page, &pgd_list, lru) {
24152- if (!vmalloc_sync_one(page_address(page), address))
24153+ pgd_t *pgd = page_address(page);
24154+#endif
24155+
24156+ if (!vmalloc_sync_one(pgd, address))
24157 break;
24158 }
24159 spin_unlock_irqrestore(&pgd_lock, flags);
24160@@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
24161 * an interrupt in the middle of a task switch..
24162 */
24163 pgd_paddr = read_cr3();
24164+
24165+#ifdef CONFIG_PAX_PER_CPU_PGD
24166+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
24167+#endif
24168+
24169 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
24170 if (!pmd_k)
24171 return -1;
24172@@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
24173
24174 const pgd_t *pgd_ref = pgd_offset_k(address);
24175 unsigned long flags;
24176+
24177+#ifdef CONFIG_PAX_PER_CPU_PGD
24178+ unsigned long cpu;
24179+#else
24180 struct page *page;
24181+#endif
24182
24183 if (pgd_none(*pgd_ref))
24184 continue;
24185
24186 spin_lock_irqsave(&pgd_lock, flags);
24187+
24188+#ifdef CONFIG_PAX_PER_CPU_PGD
24189+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24190+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
24191+#else
24192 list_for_each_entry(page, &pgd_list, lru) {
24193 pgd_t *pgd;
24194 pgd = (pgd_t *)page_address(page) + pgd_index(address);
24195+#endif
24196+
24197 if (pgd_none(*pgd))
24198 set_pgd(pgd, *pgd_ref);
24199 else
24200@@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
24201 * happen within a race in page table update. In the later
24202 * case just flush:
24203 */
24204+
24205+#ifdef CONFIG_PAX_PER_CPU_PGD
24206+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
24207+ pgd = pgd_offset_cpu(smp_processor_id(), address);
24208+#else
24209 pgd = pgd_offset(current->active_mm, address);
24210+#endif
24211+
24212 pgd_ref = pgd_offset_k(address);
24213 if (pgd_none(*pgd_ref))
24214 return -1;
24215@@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
24216 static int is_errata100(struct pt_regs *regs, unsigned long address)
24217 {
24218 #ifdef CONFIG_X86_64
24219- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
24220+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
24221 return 1;
24222 #endif
24223 return 0;
24224@@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
24225 }
24226
24227 static const char nx_warning[] = KERN_CRIT
24228-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
24229+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
24230
24231 static void
24232 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24233@@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24234 if (!oops_may_print())
24235 return;
24236
24237- if (error_code & PF_INSTR) {
24238+ if (nx_enabled && (error_code & PF_INSTR)) {
24239 unsigned int level;
24240
24241 pte_t *pte = lookup_address(address, &level);
24242
24243 if (pte && pte_present(*pte) && !pte_exec(*pte))
24244- printk(nx_warning, current_uid());
24245+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
24246 }
24247
24248+#ifdef CONFIG_PAX_KERNEXEC
24249+ if (init_mm.start_code <= address && address < init_mm.end_code) {
24250+ if (current->signal->curr_ip)
24251+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24252+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24253+ else
24254+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24255+ current->comm, task_pid_nr(current), current_uid(), current_euid());
24256+ }
24257+#endif
24258+
24259 printk(KERN_ALERT "BUG: unable to handle kernel ");
24260 if (address < PAGE_SIZE)
24261 printk(KERN_CONT "NULL pointer dereference");
24262@@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24263 {
24264 struct task_struct *tsk = current;
24265
24266+#ifdef CONFIG_X86_64
24267+ struct mm_struct *mm = tsk->mm;
24268+
24269+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24270+ if (regs->ip == (unsigned long)vgettimeofday) {
24271+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24272+ return;
24273+ } else if (regs->ip == (unsigned long)vtime) {
24274+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24275+ return;
24276+ } else if (regs->ip == (unsigned long)vgetcpu) {
24277+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24278+ return;
24279+ }
24280+ }
24281+#endif
24282+
24283 /* User mode accesses just cause a SIGSEGV */
24284 if (error_code & PF_USER) {
24285 /*
24286@@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24287 if (is_errata100(regs, address))
24288 return;
24289
24290+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24291+ if (pax_is_fetch_fault(regs, error_code, address)) {
24292+
24293+#ifdef CONFIG_PAX_EMUTRAMP
24294+ switch (pax_handle_fetch_fault(regs)) {
24295+ case 2:
24296+ return;
24297+ }
24298+#endif
24299+
24300+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24301+ do_group_exit(SIGKILL);
24302+ }
24303+#endif
24304+
24305 if (unlikely(show_unhandled_signals))
24306 show_signal_msg(regs, error_code, address, tsk);
24307
24308@@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24309 if (fault & VM_FAULT_HWPOISON) {
24310 printk(KERN_ERR
24311 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24312- tsk->comm, tsk->pid, address);
24313+ tsk->comm, task_pid_nr(tsk), address);
24314 code = BUS_MCEERR_AR;
24315 }
24316 #endif
24317@@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24318 return 1;
24319 }
24320
24321+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24322+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24323+{
24324+ pte_t *pte;
24325+ pmd_t *pmd;
24326+ spinlock_t *ptl;
24327+ unsigned char pte_mask;
24328+
24329+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24330+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
24331+ return 0;
24332+
24333+ /* PaX: it's our fault, let's handle it if we can */
24334+
24335+ /* PaX: take a look at read faults before acquiring any locks */
24336+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24337+ /* instruction fetch attempt from a protected page in user mode */
24338+ up_read(&mm->mmap_sem);
24339+
24340+#ifdef CONFIG_PAX_EMUTRAMP
24341+ switch (pax_handle_fetch_fault(regs)) {
24342+ case 2:
24343+ return 1;
24344+ }
24345+#endif
24346+
24347+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24348+ do_group_exit(SIGKILL);
24349+ }
24350+
24351+ pmd = pax_get_pmd(mm, address);
24352+ if (unlikely(!pmd))
24353+ return 0;
24354+
24355+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24356+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24357+ pte_unmap_unlock(pte, ptl);
24358+ return 0;
24359+ }
24360+
24361+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24362+ /* write attempt to a protected page in user mode */
24363+ pte_unmap_unlock(pte, ptl);
24364+ return 0;
24365+ }
24366+
24367+#ifdef CONFIG_SMP
24368+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24369+#else
24370+ if (likely(address > get_limit(regs->cs)))
24371+#endif
24372+ {
24373+ set_pte(pte, pte_mkread(*pte));
24374+ __flush_tlb_one(address);
24375+ pte_unmap_unlock(pte, ptl);
24376+ up_read(&mm->mmap_sem);
24377+ return 1;
24378+ }
24379+
24380+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24381+
24382+ /*
24383+ * PaX: fill DTLB with user rights and retry
24384+ */
24385+ __asm__ __volatile__ (
24386+ "orb %2,(%1)\n"
24387+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24388+/*
24389+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24390+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24391+ * page fault when examined during a TLB load attempt. this is true not only
24392+ * for PTEs holding a non-present entry but also present entries that will
24393+ * raise a page fault (such as those set up by PaX, or the copy-on-write
24394+ * mechanism). in effect it means that we do *not* need to flush the TLBs
24395+ * for our target pages since their PTEs are simply not in the TLBs at all.
24396+
24397+ * the best thing in omitting it is that we gain around 15-20% speed in the
24398+ * fast path of the page fault handler and can get rid of tracing since we
24399+ * can no longer flush unintended entries.
24400+ */
24401+ "invlpg (%0)\n"
24402+#endif
24403+ __copyuser_seg"testb $0,(%0)\n"
24404+ "xorb %3,(%1)\n"
24405+ :
24406+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24407+ : "memory", "cc");
24408+ pte_unmap_unlock(pte, ptl);
24409+ up_read(&mm->mmap_sem);
24410+ return 1;
24411+}
24412+#endif
24413+
24414 /*
24415 * Handle a spurious fault caused by a stale TLB entry.
24416 *
24417@@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24418 static inline int
24419 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24420 {
24421+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24422+ return 1;
24423+
24424 if (write) {
24425 /* write, present and write, not present: */
24426 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24427@@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24428 {
24429 struct vm_area_struct *vma;
24430 struct task_struct *tsk;
24431- unsigned long address;
24432 struct mm_struct *mm;
24433 int write;
24434 int fault;
24435
24436- tsk = current;
24437- mm = tsk->mm;
24438-
24439 /* Get the faulting address: */
24440- address = read_cr2();
24441+ unsigned long address = read_cr2();
24442+
24443+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24444+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24445+ if (!search_exception_tables(regs->ip)) {
24446+ bad_area_nosemaphore(regs, error_code, address);
24447+ return;
24448+ }
24449+ if (address < PAX_USER_SHADOW_BASE) {
24450+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24451+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24452+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24453+ } else
24454+ address -= PAX_USER_SHADOW_BASE;
24455+ }
24456+#endif
24457+
24458+ tsk = current;
24459+ mm = tsk->mm;
24460
24461 /*
24462 * Detect and handle instructions that would cause a page fault for
24463@@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24464 * User-mode registers count as a user access even for any
24465 * potential system fault or CPU buglet:
24466 */
24467- if (user_mode_vm(regs)) {
24468+ if (user_mode(regs)) {
24469 local_irq_enable();
24470 error_code |= PF_USER;
24471 } else {
24472@@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24473 might_sleep();
24474 }
24475
24476+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24477+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24478+ return;
24479+#endif
24480+
24481 vma = find_vma(mm, address);
24482 if (unlikely(!vma)) {
24483 bad_area(regs, error_code, address);
24484@@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24485 bad_area(regs, error_code, address);
24486 return;
24487 }
24488- if (error_code & PF_USER) {
24489- /*
24490- * Accessing the stack below %sp is always a bug.
24491- * The large cushion allows instructions like enter
24492- * and pusha to work. ("enter $65535, $31" pushes
24493- * 32 pointers and then decrements %sp by 65535.)
24494- */
24495- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24496- bad_area(regs, error_code, address);
24497- return;
24498- }
24499+ /*
24500+ * Accessing the stack below %sp is always a bug.
24501+ * The large cushion allows instructions like enter
24502+ * and pusha to work. ("enter $65535, $31" pushes
24503+ * 32 pointers and then decrements %sp by 65535.)
24504+ */
24505+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24506+ bad_area(regs, error_code, address);
24507+ return;
24508 }
24509+
24510+#ifdef CONFIG_PAX_SEGMEXEC
24511+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24512+ bad_area(regs, error_code, address);
24513+ return;
24514+ }
24515+#endif
24516+
24517 if (unlikely(expand_stack(vma, address))) {
24518 bad_area(regs, error_code, address);
24519 return;
24520@@ -1146,3 +1390,292 @@ good_area:
24521
24522 up_read(&mm->mmap_sem);
24523 }
24524+
24525+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24526+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24527+{
24528+ struct mm_struct *mm = current->mm;
24529+ unsigned long ip = regs->ip;
24530+
24531+ if (v8086_mode(regs))
24532+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24533+
24534+#ifdef CONFIG_PAX_PAGEEXEC
24535+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24536+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24537+ return true;
24538+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24539+ return true;
24540+ return false;
24541+ }
24542+#endif
24543+
24544+#ifdef CONFIG_PAX_SEGMEXEC
24545+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24546+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24547+ return true;
24548+ return false;
24549+ }
24550+#endif
24551+
24552+ return false;
24553+}
24554+#endif
24555+
24556+#ifdef CONFIG_PAX_EMUTRAMP
24557+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24558+{
24559+ int err;
24560+
24561+ do { /* PaX: libffi trampoline emulation */
24562+ unsigned char mov, jmp;
24563+ unsigned int addr1, addr2;
24564+
24565+#ifdef CONFIG_X86_64
24566+ if ((regs->ip + 9) >> 32)
24567+ break;
24568+#endif
24569+
24570+ err = get_user(mov, (unsigned char __user *)regs->ip);
24571+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24572+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24573+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24574+
24575+ if (err)
24576+ break;
24577+
24578+ if (mov == 0xB8 && jmp == 0xE9) {
24579+ regs->ax = addr1;
24580+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24581+ return 2;
24582+ }
24583+ } while (0);
24584+
24585+ do { /* PaX: gcc trampoline emulation #1 */
24586+ unsigned char mov1, mov2;
24587+ unsigned short jmp;
24588+ unsigned int addr1, addr2;
24589+
24590+#ifdef CONFIG_X86_64
24591+ if ((regs->ip + 11) >> 32)
24592+ break;
24593+#endif
24594+
24595+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24596+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24597+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24598+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24599+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24600+
24601+ if (err)
24602+ break;
24603+
24604+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24605+ regs->cx = addr1;
24606+ regs->ax = addr2;
24607+ regs->ip = addr2;
24608+ return 2;
24609+ }
24610+ } while (0);
24611+
24612+ do { /* PaX: gcc trampoline emulation #2 */
24613+ unsigned char mov, jmp;
24614+ unsigned int addr1, addr2;
24615+
24616+#ifdef CONFIG_X86_64
24617+ if ((regs->ip + 9) >> 32)
24618+ break;
24619+#endif
24620+
24621+ err = get_user(mov, (unsigned char __user *)regs->ip);
24622+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24623+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24624+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24625+
24626+ if (err)
24627+ break;
24628+
24629+ if (mov == 0xB9 && jmp == 0xE9) {
24630+ regs->cx = addr1;
24631+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24632+ return 2;
24633+ }
24634+ } while (0);
24635+
24636+ return 1; /* PaX in action */
24637+}
24638+
24639+#ifdef CONFIG_X86_64
24640+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24641+{
24642+ int err;
24643+
24644+ do { /* PaX: libffi trampoline emulation */
24645+ unsigned short mov1, mov2, jmp1;
24646+ unsigned char stcclc, jmp2;
24647+ unsigned long addr1, addr2;
24648+
24649+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24650+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24651+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24652+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24653+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24654+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24655+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24656+
24657+ if (err)
24658+ break;
24659+
24660+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24661+ regs->r11 = addr1;
24662+ regs->r10 = addr2;
24663+ if (stcclc == 0xF8)
24664+ regs->flags &= ~X86_EFLAGS_CF;
24665+ else
24666+ regs->flags |= X86_EFLAGS_CF;
24667+ regs->ip = addr1;
24668+ return 2;
24669+ }
24670+ } while (0);
24671+
24672+ do { /* PaX: gcc trampoline emulation #1 */
24673+ unsigned short mov1, mov2, jmp1;
24674+ unsigned char jmp2;
24675+ unsigned int addr1;
24676+ unsigned long addr2;
24677+
24678+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24679+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24680+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24681+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24682+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24683+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24684+
24685+ if (err)
24686+ break;
24687+
24688+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24689+ regs->r11 = addr1;
24690+ regs->r10 = addr2;
24691+ regs->ip = addr1;
24692+ return 2;
24693+ }
24694+ } while (0);
24695+
24696+ do { /* PaX: gcc trampoline emulation #2 */
24697+ unsigned short mov1, mov2, jmp1;
24698+ unsigned char jmp2;
24699+ unsigned long addr1, addr2;
24700+
24701+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24702+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24703+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24704+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24705+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24706+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24707+
24708+ if (err)
24709+ break;
24710+
24711+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24712+ regs->r11 = addr1;
24713+ regs->r10 = addr2;
24714+ regs->ip = addr1;
24715+ return 2;
24716+ }
24717+ } while (0);
24718+
24719+ return 1; /* PaX in action */
24720+}
24721+#endif
24722+
24723+/*
24724+ * PaX: decide what to do with offenders (regs->ip = fault address)
24725+ *
24726+ * returns 1 when task should be killed
24727+ * 2 when gcc trampoline was detected
24728+ */
24729+static int pax_handle_fetch_fault(struct pt_regs *regs)
24730+{
24731+ if (v8086_mode(regs))
24732+ return 1;
24733+
24734+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24735+ return 1;
24736+
24737+#ifdef CONFIG_X86_32
24738+ return pax_handle_fetch_fault_32(regs);
24739+#else
24740+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24741+ return pax_handle_fetch_fault_32(regs);
24742+ else
24743+ return pax_handle_fetch_fault_64(regs);
24744+#endif
24745+}
24746+#endif
24747+
24748+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24749+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24750+{
24751+ long i;
24752+
24753+ printk(KERN_ERR "PAX: bytes at PC: ");
24754+ for (i = 0; i < 20; i++) {
24755+ unsigned char c;
24756+ if (get_user(c, (unsigned char __force_user *)pc+i))
24757+ printk(KERN_CONT "?? ");
24758+ else
24759+ printk(KERN_CONT "%02x ", c);
24760+ }
24761+ printk("\n");
24762+
24763+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24764+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24765+ unsigned long c;
24766+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24767+#ifdef CONFIG_X86_32
24768+ printk(KERN_CONT "???????? ");
24769+#else
24770+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24771+ printk(KERN_CONT "???????? ???????? ");
24772+ else
24773+ printk(KERN_CONT "???????????????? ");
24774+#endif
24775+ } else {
24776+#ifdef CONFIG_X86_64
24777+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24778+ printk(KERN_CONT "%08x ", (unsigned int)c);
24779+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24780+ } else
24781+#endif
24782+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24783+ }
24784+ }
24785+ printk("\n");
24786+}
24787+#endif
24788+
24789+/**
24790+ * probe_kernel_write(): safely attempt to write to a location
24791+ * @dst: address to write to
24792+ * @src: pointer to the data that shall be written
24793+ * @size: size of the data chunk
24794+ *
24795+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24796+ * happens, handle that and return -EFAULT.
24797+ */
24798+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24799+{
24800+ long ret;
24801+ mm_segment_t old_fs = get_fs();
24802+
24803+ set_fs(KERNEL_DS);
24804+ pagefault_disable();
24805+ pax_open_kernel();
24806+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24807+ pax_close_kernel();
24808+ pagefault_enable();
24809+ set_fs(old_fs);
24810+
24811+ return ret ? -EFAULT : 0;
24812+}
24813diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24814index 71da1bc..7a16bf4 100644
24815--- a/arch/x86/mm/gup.c
24816+++ b/arch/x86/mm/gup.c
24817@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24818 addr = start;
24819 len = (unsigned long) nr_pages << PAGE_SHIFT;
24820 end = start + len;
24821- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24822+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24823 (void __user *)start, len)))
24824 return 0;
24825
24826diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24827index 63a6ba6..79abd7a 100644
24828--- a/arch/x86/mm/highmem_32.c
24829+++ b/arch/x86/mm/highmem_32.c
24830@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24831 idx = type + KM_TYPE_NR*smp_processor_id();
24832 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24833 BUG_ON(!pte_none(*(kmap_pte-idx)));
24834+
24835+ pax_open_kernel();
24836 set_pte(kmap_pte-idx, mk_pte(page, prot));
24837+ pax_close_kernel();
24838
24839 return (void *)vaddr;
24840 }
24841diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24842index f46c340..6ff9a26 100644
24843--- a/arch/x86/mm/hugetlbpage.c
24844+++ b/arch/x86/mm/hugetlbpage.c
24845@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24846 struct hstate *h = hstate_file(file);
24847 struct mm_struct *mm = current->mm;
24848 struct vm_area_struct *vma;
24849- unsigned long start_addr;
24850+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24851+
24852+#ifdef CONFIG_PAX_SEGMEXEC
24853+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24854+ pax_task_size = SEGMEXEC_TASK_SIZE;
24855+#endif
24856+
24857+ pax_task_size -= PAGE_SIZE;
24858
24859 if (len > mm->cached_hole_size) {
24860- start_addr = mm->free_area_cache;
24861+ start_addr = mm->free_area_cache;
24862 } else {
24863- start_addr = TASK_UNMAPPED_BASE;
24864- mm->cached_hole_size = 0;
24865+ start_addr = mm->mmap_base;
24866+ mm->cached_hole_size = 0;
24867 }
24868
24869 full_search:
24870@@ -281,26 +288,27 @@ full_search:
24871
24872 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24873 /* At this point: (!vma || addr < vma->vm_end). */
24874- if (TASK_SIZE - len < addr) {
24875+ if (pax_task_size - len < addr) {
24876 /*
24877 * Start a new search - just in case we missed
24878 * some holes.
24879 */
24880- if (start_addr != TASK_UNMAPPED_BASE) {
24881- start_addr = TASK_UNMAPPED_BASE;
24882+ if (start_addr != mm->mmap_base) {
24883+ start_addr = mm->mmap_base;
24884 mm->cached_hole_size = 0;
24885 goto full_search;
24886 }
24887 return -ENOMEM;
24888 }
24889- if (!vma || addr + len <= vma->vm_start) {
24890- mm->free_area_cache = addr + len;
24891- return addr;
24892- }
24893+ if (check_heap_stack_gap(vma, addr, len))
24894+ break;
24895 if (addr + mm->cached_hole_size < vma->vm_start)
24896 mm->cached_hole_size = vma->vm_start - addr;
24897 addr = ALIGN(vma->vm_end, huge_page_size(h));
24898 }
24899+
24900+ mm->free_area_cache = addr + len;
24901+ return addr;
24902 }
24903
24904 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24905@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24906 {
24907 struct hstate *h = hstate_file(file);
24908 struct mm_struct *mm = current->mm;
24909- struct vm_area_struct *vma, *prev_vma;
24910- unsigned long base = mm->mmap_base, addr = addr0;
24911+ struct vm_area_struct *vma;
24912+ unsigned long base = mm->mmap_base, addr;
24913 unsigned long largest_hole = mm->cached_hole_size;
24914- int first_time = 1;
24915
24916 /* don't allow allocations above current base */
24917 if (mm->free_area_cache > base)
24918@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24919 largest_hole = 0;
24920 mm->free_area_cache = base;
24921 }
24922-try_again:
24923+
24924 /* make sure it can fit in the remaining address space */
24925 if (mm->free_area_cache < len)
24926 goto fail;
24927
24928 /* either no address requested or cant fit in requested address hole */
24929- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24930+ addr = (mm->free_area_cache - len);
24931 do {
24932+ addr &= huge_page_mask(h);
24933+ vma = find_vma(mm, addr);
24934 /*
24935 * Lookup failure means no vma is above this address,
24936 * i.e. return with success:
24937- */
24938- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24939- return addr;
24940-
24941- /*
24942 * new region fits between prev_vma->vm_end and
24943 * vma->vm_start, use it:
24944 */
24945- if (addr + len <= vma->vm_start &&
24946- (!prev_vma || (addr >= prev_vma->vm_end))) {
24947+ if (check_heap_stack_gap(vma, addr, len)) {
24948 /* remember the address as a hint for next time */
24949- mm->cached_hole_size = largest_hole;
24950- return (mm->free_area_cache = addr);
24951- } else {
24952- /* pull free_area_cache down to the first hole */
24953- if (mm->free_area_cache == vma->vm_end) {
24954- mm->free_area_cache = vma->vm_start;
24955- mm->cached_hole_size = largest_hole;
24956- }
24957+ mm->cached_hole_size = largest_hole;
24958+ return (mm->free_area_cache = addr);
24959+ }
24960+ /* pull free_area_cache down to the first hole */
24961+ if (mm->free_area_cache == vma->vm_end) {
24962+ mm->free_area_cache = vma->vm_start;
24963+ mm->cached_hole_size = largest_hole;
24964 }
24965
24966 /* remember the largest hole we saw so far */
24967 if (addr + largest_hole < vma->vm_start)
24968- largest_hole = vma->vm_start - addr;
24969+ largest_hole = vma->vm_start - addr;
24970
24971 /* try just below the current vma->vm_start */
24972- addr = (vma->vm_start - len) & huge_page_mask(h);
24973- } while (len <= vma->vm_start);
24974+ addr = skip_heap_stack_gap(vma, len);
24975+ } while (!IS_ERR_VALUE(addr));
24976
24977 fail:
24978 /*
24979- * if hint left us with no space for the requested
24980- * mapping then try again:
24981- */
24982- if (first_time) {
24983- mm->free_area_cache = base;
24984- largest_hole = 0;
24985- first_time = 0;
24986- goto try_again;
24987- }
24988- /*
24989 * A failed mmap() very likely causes application failure,
24990 * so fall back to the bottom-up function here. This scenario
24991 * can happen with large stack limits and large mmap()
24992 * allocations.
24993 */
24994- mm->free_area_cache = TASK_UNMAPPED_BASE;
24995+
24996+#ifdef CONFIG_PAX_SEGMEXEC
24997+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24998+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24999+ else
25000+#endif
25001+
25002+ mm->mmap_base = TASK_UNMAPPED_BASE;
25003+
25004+#ifdef CONFIG_PAX_RANDMMAP
25005+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25006+ mm->mmap_base += mm->delta_mmap;
25007+#endif
25008+
25009+ mm->free_area_cache = mm->mmap_base;
25010 mm->cached_hole_size = ~0UL;
25011 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
25012 len, pgoff, flags);
25013@@ -387,6 +393,7 @@ fail:
25014 /*
25015 * Restore the topdown base:
25016 */
25017+ mm->mmap_base = base;
25018 mm->free_area_cache = base;
25019 mm->cached_hole_size = ~0UL;
25020
25021@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25022 struct hstate *h = hstate_file(file);
25023 struct mm_struct *mm = current->mm;
25024 struct vm_area_struct *vma;
25025+ unsigned long pax_task_size = TASK_SIZE;
25026
25027 if (len & ~huge_page_mask(h))
25028 return -EINVAL;
25029- if (len > TASK_SIZE)
25030+
25031+#ifdef CONFIG_PAX_SEGMEXEC
25032+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25033+ pax_task_size = SEGMEXEC_TASK_SIZE;
25034+#endif
25035+
25036+ pax_task_size -= PAGE_SIZE;
25037+
25038+ if (len > pax_task_size)
25039 return -ENOMEM;
25040
25041 if (flags & MAP_FIXED) {
25042@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25043 if (addr) {
25044 addr = ALIGN(addr, huge_page_size(h));
25045 vma = find_vma(mm, addr);
25046- if (TASK_SIZE - len >= addr &&
25047- (!vma || addr + len <= vma->vm_start))
25048+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
25049 return addr;
25050 }
25051 if (mm->get_unmapped_area == arch_get_unmapped_area)
25052diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
25053index 73ffd55..f61c2a7 100644
25054--- a/arch/x86/mm/init.c
25055+++ b/arch/x86/mm/init.c
25056@@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
25057 * cause a hotspot and fill up ZONE_DMA. The page tables
25058 * need roughly 0.5KB per GB.
25059 */
25060-#ifdef CONFIG_X86_32
25061- start = 0x7000;
25062-#else
25063- start = 0x8000;
25064-#endif
25065+ start = 0x100000;
25066 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
25067 tables, PAGE_SIZE);
25068 if (e820_table_start == -1UL)
25069@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25070 #endif
25071
25072 set_nx();
25073- if (nx_enabled)
25074+ if (nx_enabled && cpu_has_nx)
25075 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
25076
25077 /* Enable PSE if available */
25078@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25079 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
25080 * mmio resources as well as potential bios/acpi data regions.
25081 */
25082+
25083 int devmem_is_allowed(unsigned long pagenr)
25084 {
25085+#ifdef CONFIG_GRKERNSEC_KMEM
25086+ /* allow BDA */
25087+ if (!pagenr)
25088+ return 1;
25089+ /* allow EBDA */
25090+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
25091+ return 1;
25092+ /* allow ISA/video mem */
25093+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25094+ return 1;
25095+ /* throw out everything else below 1MB */
25096+ if (pagenr <= 256)
25097+ return 0;
25098+#else
25099 if (pagenr <= 256)
25100 return 1;
25101+#endif
25102+
25103 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
25104 return 0;
25105 if (!page_is_ram(pagenr))
25106@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
25107
25108 void free_initmem(void)
25109 {
25110+
25111+#ifdef CONFIG_PAX_KERNEXEC
25112+#ifdef CONFIG_X86_32
25113+ /* PaX: limit KERNEL_CS to actual size */
25114+ unsigned long addr, limit;
25115+ struct desc_struct d;
25116+ int cpu;
25117+
25118+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
25119+ limit = (limit - 1UL) >> PAGE_SHIFT;
25120+
25121+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
25122+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25123+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
25124+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
25125+ }
25126+
25127+ /* PaX: make KERNEL_CS read-only */
25128+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
25129+ if (!paravirt_enabled())
25130+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
25131+/*
25132+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
25133+ pgd = pgd_offset_k(addr);
25134+ pud = pud_offset(pgd, addr);
25135+ pmd = pmd_offset(pud, addr);
25136+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25137+ }
25138+*/
25139+#ifdef CONFIG_X86_PAE
25140+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
25141+/*
25142+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
25143+ pgd = pgd_offset_k(addr);
25144+ pud = pud_offset(pgd, addr);
25145+ pmd = pmd_offset(pud, addr);
25146+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25147+ }
25148+*/
25149+#endif
25150+
25151+#ifdef CONFIG_MODULES
25152+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
25153+#endif
25154+
25155+#else
25156+ pgd_t *pgd;
25157+ pud_t *pud;
25158+ pmd_t *pmd;
25159+ unsigned long addr, end;
25160+
25161+ /* PaX: make kernel code/rodata read-only, rest non-executable */
25162+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
25163+ pgd = pgd_offset_k(addr);
25164+ pud = pud_offset(pgd, addr);
25165+ pmd = pmd_offset(pud, addr);
25166+ if (!pmd_present(*pmd))
25167+ continue;
25168+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
25169+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25170+ else
25171+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25172+ }
25173+
25174+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
25175+ end = addr + KERNEL_IMAGE_SIZE;
25176+ for (; addr < end; addr += PMD_SIZE) {
25177+ pgd = pgd_offset_k(addr);
25178+ pud = pud_offset(pgd, addr);
25179+ pmd = pmd_offset(pud, addr);
25180+ if (!pmd_present(*pmd))
25181+ continue;
25182+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
25183+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25184+ }
25185+#endif
25186+
25187+ flush_tlb_all();
25188+#endif
25189+
25190 free_init_pages("unused kernel memory",
25191 (unsigned long)(&__init_begin),
25192 (unsigned long)(&__init_end));
25193diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
25194index 30938c1..bda3d5d 100644
25195--- a/arch/x86/mm/init_32.c
25196+++ b/arch/x86/mm/init_32.c
25197@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
25198 }
25199
25200 /*
25201- * Creates a middle page table and puts a pointer to it in the
25202- * given global directory entry. This only returns the gd entry
25203- * in non-PAE compilation mode, since the middle layer is folded.
25204- */
25205-static pmd_t * __init one_md_table_init(pgd_t *pgd)
25206-{
25207- pud_t *pud;
25208- pmd_t *pmd_table;
25209-
25210-#ifdef CONFIG_X86_PAE
25211- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
25212- if (after_bootmem)
25213- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
25214- else
25215- pmd_table = (pmd_t *)alloc_low_page();
25216- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
25217- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
25218- pud = pud_offset(pgd, 0);
25219- BUG_ON(pmd_table != pmd_offset(pud, 0));
25220-
25221- return pmd_table;
25222- }
25223-#endif
25224- pud = pud_offset(pgd, 0);
25225- pmd_table = pmd_offset(pud, 0);
25226-
25227- return pmd_table;
25228-}
25229-
25230-/*
25231 * Create a page table and place a pointer to it in a middle page
25232 * directory entry:
25233 */
25234@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
25235 page_table = (pte_t *)alloc_low_page();
25236
25237 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25238+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25239+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25240+#else
25241 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25242+#endif
25243 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25244 }
25245
25246 return pte_offset_kernel(pmd, 0);
25247 }
25248
25249+static pmd_t * __init one_md_table_init(pgd_t *pgd)
25250+{
25251+ pud_t *pud;
25252+ pmd_t *pmd_table;
25253+
25254+ pud = pud_offset(pgd, 0);
25255+ pmd_table = pmd_offset(pud, 0);
25256+
25257+ return pmd_table;
25258+}
25259+
25260 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25261 {
25262 int pgd_idx = pgd_index(vaddr);
25263@@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25264 int pgd_idx, pmd_idx;
25265 unsigned long vaddr;
25266 pgd_t *pgd;
25267+ pud_t *pud;
25268 pmd_t *pmd;
25269 pte_t *pte = NULL;
25270
25271@@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25272 pgd = pgd_base + pgd_idx;
25273
25274 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25275- pmd = one_md_table_init(pgd);
25276- pmd = pmd + pmd_index(vaddr);
25277+ pud = pud_offset(pgd, vaddr);
25278+ pmd = pmd_offset(pud, vaddr);
25279+
25280+#ifdef CONFIG_X86_PAE
25281+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25282+#endif
25283+
25284 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25285 pmd++, pmd_idx++) {
25286 pte = page_table_kmap_check(one_page_table_init(pmd),
25287@@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25288 }
25289 }
25290
25291-static inline int is_kernel_text(unsigned long addr)
25292+static inline int is_kernel_text(unsigned long start, unsigned long end)
25293 {
25294- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
25295- return 1;
25296- return 0;
25297+ if ((start > ktla_ktva((unsigned long)_etext) ||
25298+ end <= ktla_ktva((unsigned long)_stext)) &&
25299+ (start > ktla_ktva((unsigned long)_einittext) ||
25300+ end <= ktla_ktva((unsigned long)_sinittext)) &&
25301+
25302+#ifdef CONFIG_ACPI_SLEEP
25303+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25304+#endif
25305+
25306+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25307+ return 0;
25308+ return 1;
25309 }
25310
25311 /*
25312@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
25313 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
25314 unsigned long start_pfn, end_pfn;
25315 pgd_t *pgd_base = swapper_pg_dir;
25316- int pgd_idx, pmd_idx, pte_ofs;
25317+ unsigned int pgd_idx, pmd_idx, pte_ofs;
25318 unsigned long pfn;
25319 pgd_t *pgd;
25320+ pud_t *pud;
25321 pmd_t *pmd;
25322 pte_t *pte;
25323 unsigned pages_2m, pages_4k;
25324@@ -278,8 +279,13 @@ repeat:
25325 pfn = start_pfn;
25326 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25327 pgd = pgd_base + pgd_idx;
25328- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25329- pmd = one_md_table_init(pgd);
25330+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25331+ pud = pud_offset(pgd, 0);
25332+ pmd = pmd_offset(pud, 0);
25333+
25334+#ifdef CONFIG_X86_PAE
25335+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25336+#endif
25337
25338 if (pfn >= end_pfn)
25339 continue;
25340@@ -291,14 +297,13 @@ repeat:
25341 #endif
25342 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25343 pmd++, pmd_idx++) {
25344- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25345+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25346
25347 /*
25348 * Map with big pages if possible, otherwise
25349 * create normal page tables:
25350 */
25351 if (use_pse) {
25352- unsigned int addr2;
25353 pgprot_t prot = PAGE_KERNEL_LARGE;
25354 /*
25355 * first pass will use the same initial
25356@@ -308,11 +313,7 @@ repeat:
25357 __pgprot(PTE_IDENT_ATTR |
25358 _PAGE_PSE);
25359
25360- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25361- PAGE_OFFSET + PAGE_SIZE-1;
25362-
25363- if (is_kernel_text(addr) ||
25364- is_kernel_text(addr2))
25365+ if (is_kernel_text(address, address + PMD_SIZE))
25366 prot = PAGE_KERNEL_LARGE_EXEC;
25367
25368 pages_2m++;
25369@@ -329,7 +330,7 @@ repeat:
25370 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25371 pte += pte_ofs;
25372 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25373- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25374+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25375 pgprot_t prot = PAGE_KERNEL;
25376 /*
25377 * first pass will use the same initial
25378@@ -337,7 +338,7 @@ repeat:
25379 */
25380 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25381
25382- if (is_kernel_text(addr))
25383+ if (is_kernel_text(address, address + PAGE_SIZE))
25384 prot = PAGE_KERNEL_EXEC;
25385
25386 pages_4k++;
25387@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25388
25389 pud = pud_offset(pgd, va);
25390 pmd = pmd_offset(pud, va);
25391- if (!pmd_present(*pmd))
25392+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
25393 break;
25394
25395 pte = pte_offset_kernel(pmd, va);
25396@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25397
25398 static void __init pagetable_init(void)
25399 {
25400- pgd_t *pgd_base = swapper_pg_dir;
25401-
25402- permanent_kmaps_init(pgd_base);
25403+ permanent_kmaps_init(swapper_pg_dir);
25404 }
25405
25406 #ifdef CONFIG_ACPI_SLEEP
25407@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25408 * ACPI suspend needs this for resume, because things like the intel-agp
25409 * driver might have split up a kernel 4MB mapping.
25410 */
25411-char swsusp_pg_dir[PAGE_SIZE]
25412+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25413 __attribute__ ((aligned(PAGE_SIZE)));
25414
25415 static inline void save_pg_dir(void)
25416 {
25417- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25418+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25419 }
25420 #else /* !CONFIG_ACPI_SLEEP */
25421 static inline void save_pg_dir(void)
25422@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25423 flush_tlb_all();
25424 }
25425
25426-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25427+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25428 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25429
25430 /* user-defined highmem size */
25431@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25432 * Initialize the boot-time allocator (with low memory only):
25433 */
25434 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25435- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25436+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25437 PAGE_SIZE);
25438 if (bootmap == -1L)
25439 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25440@@ -864,6 +863,12 @@ void __init mem_init(void)
25441
25442 pci_iommu_alloc();
25443
25444+#ifdef CONFIG_PAX_PER_CPU_PGD
25445+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25446+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25447+ KERNEL_PGD_PTRS);
25448+#endif
25449+
25450 #ifdef CONFIG_FLATMEM
25451 BUG_ON(!mem_map);
25452 #endif
25453@@ -881,7 +886,7 @@ void __init mem_init(void)
25454 set_highmem_pages_init();
25455
25456 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25457- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25458+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25459 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25460
25461 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25462@@ -923,10 +928,10 @@ void __init mem_init(void)
25463 ((unsigned long)&__init_end -
25464 (unsigned long)&__init_begin) >> 10,
25465
25466- (unsigned long)&_etext, (unsigned long)&_edata,
25467- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25468+ (unsigned long)&_sdata, (unsigned long)&_edata,
25469+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25470
25471- (unsigned long)&_text, (unsigned long)&_etext,
25472+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25473 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25474
25475 /*
25476@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25477 if (!kernel_set_to_readonly)
25478 return;
25479
25480+ start = ktla_ktva(start);
25481 pr_debug("Set kernel text: %lx - %lx for read write\n",
25482 start, start+size);
25483
25484@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25485 if (!kernel_set_to_readonly)
25486 return;
25487
25488+ start = ktla_ktva(start);
25489 pr_debug("Set kernel text: %lx - %lx for read only\n",
25490 start, start+size);
25491
25492@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25493 unsigned long start = PFN_ALIGN(_text);
25494 unsigned long size = PFN_ALIGN(_etext) - start;
25495
25496+ start = ktla_ktva(start);
25497 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25498 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25499 size >> 10);
25500diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25501index 7d095ad..25d2549 100644
25502--- a/arch/x86/mm/init_64.c
25503+++ b/arch/x86/mm/init_64.c
25504@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25505 pmd = fill_pmd(pud, vaddr);
25506 pte = fill_pte(pmd, vaddr);
25507
25508+ pax_open_kernel();
25509 set_pte(pte, new_pte);
25510+ pax_close_kernel();
25511
25512 /*
25513 * It's enough to flush this one mapping.
25514@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25515 pgd = pgd_offset_k((unsigned long)__va(phys));
25516 if (pgd_none(*pgd)) {
25517 pud = (pud_t *) spp_getpage();
25518- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25519- _PAGE_USER));
25520+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25521 }
25522 pud = pud_offset(pgd, (unsigned long)__va(phys));
25523 if (pud_none(*pud)) {
25524 pmd = (pmd_t *) spp_getpage();
25525- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25526- _PAGE_USER));
25527+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25528 }
25529 pmd = pmd_offset(pud, phys);
25530 BUG_ON(!pmd_none(*pmd));
25531@@ -675,6 +675,12 @@ void __init mem_init(void)
25532
25533 pci_iommu_alloc();
25534
25535+#ifdef CONFIG_PAX_PER_CPU_PGD
25536+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25537+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25538+ KERNEL_PGD_PTRS);
25539+#endif
25540+
25541 /* clear_bss() already clear the empty_zero_page */
25542
25543 reservedpages = 0;
25544@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25545 static struct vm_area_struct gate_vma = {
25546 .vm_start = VSYSCALL_START,
25547 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25548- .vm_page_prot = PAGE_READONLY_EXEC,
25549- .vm_flags = VM_READ | VM_EXEC
25550+ .vm_page_prot = PAGE_READONLY,
25551+ .vm_flags = VM_READ
25552 };
25553
25554 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25555@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25556
25557 const char *arch_vma_name(struct vm_area_struct *vma)
25558 {
25559- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25560+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25561 return "[vdso]";
25562 if (vma == &gate_vma)
25563 return "[vsyscall]";
25564diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25565index 84e236c..69bd3f6 100644
25566--- a/arch/x86/mm/iomap_32.c
25567+++ b/arch/x86/mm/iomap_32.c
25568@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25569 debug_kmap_atomic(type);
25570 idx = type + KM_TYPE_NR * smp_processor_id();
25571 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25572+
25573+ pax_open_kernel();
25574 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25575+ pax_close_kernel();
25576+
25577 arch_flush_lazy_mmu_mode();
25578
25579 return (void *)vaddr;
25580diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25581index 2feb9bd..ab91e7b 100644
25582--- a/arch/x86/mm/ioremap.c
25583+++ b/arch/x86/mm/ioremap.c
25584@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25585 * Second special case: Some BIOSen report the PC BIOS
25586 * area (640->1Mb) as ram even though it is not.
25587 */
25588- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25589- pagenr < (BIOS_END >> PAGE_SHIFT))
25590+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25591+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25592 return 0;
25593
25594 for (i = 0; i < e820.nr_map; i++) {
25595@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25596 /*
25597 * Don't allow anybody to remap normal RAM that we're using..
25598 */
25599- for (pfn = phys_addr >> PAGE_SHIFT;
25600- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25601- pfn++) {
25602-
25603+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25604 int is_ram = page_is_ram(pfn);
25605
25606- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25607+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25608 return NULL;
25609 WARN_ON_ONCE(is_ram);
25610 }
25611@@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25612
25613 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25614 if (page_is_ram(start >> PAGE_SHIFT))
25615+#ifdef CONFIG_HIGHMEM
25616+ if ((start >> PAGE_SHIFT) < max_low_pfn)
25617+#endif
25618 return __va(phys);
25619
25620 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
25621@@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
25622 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25623
25624 static __initdata int after_paging_init;
25625-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25626+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25627
25628 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25629 {
25630@@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
25631 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25632
25633 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25634- memset(bm_pte, 0, sizeof(bm_pte));
25635- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25636+ pmd_populate_user(&init_mm, pmd, bm_pte);
25637
25638 /*
25639 * The boot-ioremap range spans multiple pmds, for which
25640diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25641index 8cc1833..1abbc5b 100644
25642--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25643+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25644@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25645 * memory (e.g. tracked pages)? For now, we need this to avoid
25646 * invoking kmemcheck for PnP BIOS calls.
25647 */
25648- if (regs->flags & X86_VM_MASK)
25649+ if (v8086_mode(regs))
25650 return false;
25651- if (regs->cs != __KERNEL_CS)
25652+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25653 return false;
25654
25655 pte = kmemcheck_pte_lookup(address);
25656diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25657index c9e57af..07a321b 100644
25658--- a/arch/x86/mm/mmap.c
25659+++ b/arch/x86/mm/mmap.c
25660@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25661 * Leave an at least ~128 MB hole with possible stack randomization.
25662 */
25663 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25664-#define MAX_GAP (TASK_SIZE/6*5)
25665+#define MAX_GAP (pax_task_size/6*5)
25666
25667 /*
25668 * True on X86_32 or when emulating IA32 on X86_64
25669@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25670 return rnd << PAGE_SHIFT;
25671 }
25672
25673-static unsigned long mmap_base(void)
25674+static unsigned long mmap_base(struct mm_struct *mm)
25675 {
25676 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25677+ unsigned long pax_task_size = TASK_SIZE;
25678+
25679+#ifdef CONFIG_PAX_SEGMEXEC
25680+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25681+ pax_task_size = SEGMEXEC_TASK_SIZE;
25682+#endif
25683
25684 if (gap < MIN_GAP)
25685 gap = MIN_GAP;
25686 else if (gap > MAX_GAP)
25687 gap = MAX_GAP;
25688
25689- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25690+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25691 }
25692
25693 /*
25694 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25695 * does, but not when emulating X86_32
25696 */
25697-static unsigned long mmap_legacy_base(void)
25698+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25699 {
25700- if (mmap_is_ia32())
25701+ if (mmap_is_ia32()) {
25702+
25703+#ifdef CONFIG_PAX_SEGMEXEC
25704+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25705+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25706+ else
25707+#endif
25708+
25709 return TASK_UNMAPPED_BASE;
25710- else
25711+ } else
25712 return TASK_UNMAPPED_BASE + mmap_rnd();
25713 }
25714
25715@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25716 void arch_pick_mmap_layout(struct mm_struct *mm)
25717 {
25718 if (mmap_is_legacy()) {
25719- mm->mmap_base = mmap_legacy_base();
25720+ mm->mmap_base = mmap_legacy_base(mm);
25721+
25722+#ifdef CONFIG_PAX_RANDMMAP
25723+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25724+ mm->mmap_base += mm->delta_mmap;
25725+#endif
25726+
25727 mm->get_unmapped_area = arch_get_unmapped_area;
25728 mm->unmap_area = arch_unmap_area;
25729 } else {
25730- mm->mmap_base = mmap_base();
25731+ mm->mmap_base = mmap_base(mm);
25732+
25733+#ifdef CONFIG_PAX_RANDMMAP
25734+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25735+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25736+#endif
25737+
25738 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25739 mm->unmap_area = arch_unmap_area_topdown;
25740 }
25741diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25742index 132772a..b961f11 100644
25743--- a/arch/x86/mm/mmio-mod.c
25744+++ b/arch/x86/mm/mmio-mod.c
25745@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25746 break;
25747 default:
25748 {
25749- unsigned char *ip = (unsigned char *)instptr;
25750+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25751 my_trace->opcode = MMIO_UNKNOWN_OP;
25752 my_trace->width = 0;
25753 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25754@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25755 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25756 void __iomem *addr)
25757 {
25758- static atomic_t next_id;
25759+ static atomic_unchecked_t next_id;
25760 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25761 /* These are page-unaligned. */
25762 struct mmiotrace_map map = {
25763@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25764 .private = trace
25765 },
25766 .phys = offset,
25767- .id = atomic_inc_return(&next_id)
25768+ .id = atomic_inc_return_unchecked(&next_id)
25769 };
25770 map.map_id = trace->id;
25771
25772diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25773index d253006..e56dd6a 100644
25774--- a/arch/x86/mm/numa_32.c
25775+++ b/arch/x86/mm/numa_32.c
25776@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25777 }
25778 #endif
25779
25780-extern unsigned long find_max_low_pfn(void);
25781 extern unsigned long highend_pfn, highstart_pfn;
25782
25783 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25784diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25785index e1d1069..2251ff3 100644
25786--- a/arch/x86/mm/pageattr-test.c
25787+++ b/arch/x86/mm/pageattr-test.c
25788@@ -36,7 +36,7 @@ enum {
25789
25790 static int pte_testbit(pte_t pte)
25791 {
25792- return pte_flags(pte) & _PAGE_UNUSED1;
25793+ return pte_flags(pte) & _PAGE_CPA_TEST;
25794 }
25795
25796 struct split_state {
25797diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25798index dd38bfb..b72c63e 100644
25799--- a/arch/x86/mm/pageattr.c
25800+++ b/arch/x86/mm/pageattr.c
25801@@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25802 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25803 */
25804 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25805- pgprot_val(forbidden) |= _PAGE_NX;
25806+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25807
25808 /*
25809 * The kernel text needs to be executable for obvious reasons
25810 * Does not cover __inittext since that is gone later on. On
25811 * 64bit we do not enforce !NX on the low mapping
25812 */
25813- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25814- pgprot_val(forbidden) |= _PAGE_NX;
25815+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25816+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25817
25818+#ifdef CONFIG_DEBUG_RODATA
25819 /*
25820 * The .rodata section needs to be read-only. Using the pfn
25821 * catches all aliases.
25822@@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25823 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25824 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25825 pgprot_val(forbidden) |= _PAGE_RW;
25826+#endif
25827+
25828+#ifdef CONFIG_PAX_KERNEXEC
25829+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25830+ pgprot_val(forbidden) |= _PAGE_RW;
25831+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25832+ }
25833+#endif
25834
25835 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25836
25837@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25838 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25839 {
25840 /* change init_mm */
25841+ pax_open_kernel();
25842 set_pte_atomic(kpte, pte);
25843+
25844 #ifdef CONFIG_X86_32
25845 if (!SHARED_KERNEL_PMD) {
25846+
25847+#ifdef CONFIG_PAX_PER_CPU_PGD
25848+ unsigned long cpu;
25849+#else
25850 struct page *page;
25851+#endif
25852
25853+#ifdef CONFIG_PAX_PER_CPU_PGD
25854+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25855+ pgd_t *pgd = get_cpu_pgd(cpu);
25856+#else
25857 list_for_each_entry(page, &pgd_list, lru) {
25858- pgd_t *pgd;
25859+ pgd_t *pgd = (pgd_t *)page_address(page);
25860+#endif
25861+
25862 pud_t *pud;
25863 pmd_t *pmd;
25864
25865- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25866+ pgd += pgd_index(address);
25867 pud = pud_offset(pgd, address);
25868 pmd = pmd_offset(pud, address);
25869 set_pte_atomic((pte_t *)pmd, pte);
25870 }
25871 }
25872 #endif
25873+ pax_close_kernel();
25874 }
25875
25876 static int
25877diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25878index e78cd0e..de0a817 100644
25879--- a/arch/x86/mm/pat.c
25880+++ b/arch/x86/mm/pat.c
25881@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25882
25883 conflict:
25884 printk(KERN_INFO "%s:%d conflicting memory types "
25885- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25886+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25887 new->end, cattr_name(new->type), cattr_name(entry->type));
25888 return -EBUSY;
25889 }
25890@@ -559,7 +559,7 @@ unlock_ret:
25891
25892 if (err) {
25893 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25894- current->comm, current->pid, start, end);
25895+ current->comm, task_pid_nr(current), start, end);
25896 }
25897
25898 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25899@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25900 while (cursor < to) {
25901 if (!devmem_is_allowed(pfn)) {
25902 printk(KERN_INFO
25903- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25904- current->comm, from, to);
25905+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25906+ current->comm, from, to, cursor);
25907 return 0;
25908 }
25909 cursor += PAGE_SIZE;
25910@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25911 printk(KERN_INFO
25912 "%s:%d ioremap_change_attr failed %s "
25913 "for %Lx-%Lx\n",
25914- current->comm, current->pid,
25915+ current->comm, task_pid_nr(current),
25916 cattr_name(flags),
25917 base, (unsigned long long)(base + size));
25918 return -EINVAL;
25919@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25920 free_memtype(paddr, paddr + size);
25921 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25922 " for %Lx-%Lx, got %s\n",
25923- current->comm, current->pid,
25924+ current->comm, task_pid_nr(current),
25925 cattr_name(want_flags),
25926 (unsigned long long)paddr,
25927 (unsigned long long)(paddr + size),
25928diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25929index df3d5c8..c2223e1 100644
25930--- a/arch/x86/mm/pf_in.c
25931+++ b/arch/x86/mm/pf_in.c
25932@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25933 int i;
25934 enum reason_type rv = OTHERS;
25935
25936- p = (unsigned char *)ins_addr;
25937+ p = (unsigned char *)ktla_ktva(ins_addr);
25938 p += skip_prefix(p, &prf);
25939 p += get_opcode(p, &opcode);
25940
25941@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25942 struct prefix_bits prf;
25943 int i;
25944
25945- p = (unsigned char *)ins_addr;
25946+ p = (unsigned char *)ktla_ktva(ins_addr);
25947 p += skip_prefix(p, &prf);
25948 p += get_opcode(p, &opcode);
25949
25950@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25951 struct prefix_bits prf;
25952 int i;
25953
25954- p = (unsigned char *)ins_addr;
25955+ p = (unsigned char *)ktla_ktva(ins_addr);
25956 p += skip_prefix(p, &prf);
25957 p += get_opcode(p, &opcode);
25958
25959@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25960 int i;
25961 unsigned long rv;
25962
25963- p = (unsigned char *)ins_addr;
25964+ p = (unsigned char *)ktla_ktva(ins_addr);
25965 p += skip_prefix(p, &prf);
25966 p += get_opcode(p, &opcode);
25967 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25968@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25969 int i;
25970 unsigned long rv;
25971
25972- p = (unsigned char *)ins_addr;
25973+ p = (unsigned char *)ktla_ktva(ins_addr);
25974 p += skip_prefix(p, &prf);
25975 p += get_opcode(p, &opcode);
25976 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25977diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25978index e0e6fad..c56b495 100644
25979--- a/arch/x86/mm/pgtable.c
25980+++ b/arch/x86/mm/pgtable.c
25981@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25982 list_del(&page->lru);
25983 }
25984
25985-#define UNSHARED_PTRS_PER_PGD \
25986- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25987+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25988+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25989
25990+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25991+{
25992+ while (count--)
25993+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25994+}
25995+#endif
25996+
25997+#ifdef CONFIG_PAX_PER_CPU_PGD
25998+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25999+{
26000+ while (count--)
26001+
26002+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26003+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
26004+#else
26005+ *dst++ = *src++;
26006+#endif
26007+
26008+}
26009+#endif
26010+
26011+#ifdef CONFIG_X86_64
26012+#define pxd_t pud_t
26013+#define pyd_t pgd_t
26014+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
26015+#define pxd_free(mm, pud) pud_free((mm), (pud))
26016+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
26017+#define pyd_offset(mm, address) pgd_offset((mm), (address))
26018+#define PYD_SIZE PGDIR_SIZE
26019+#else
26020+#define pxd_t pmd_t
26021+#define pyd_t pud_t
26022+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
26023+#define pxd_free(mm, pud) pmd_free((mm), (pud))
26024+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
26025+#define pyd_offset(mm, address) pud_offset((mm), (address))
26026+#define PYD_SIZE PUD_SIZE
26027+#endif
26028+
26029+#ifdef CONFIG_PAX_PER_CPU_PGD
26030+static inline void pgd_ctor(pgd_t *pgd) {}
26031+static inline void pgd_dtor(pgd_t *pgd) {}
26032+#else
26033 static void pgd_ctor(pgd_t *pgd)
26034 {
26035 /* If the pgd points to a shared pagetable level (either the
26036@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
26037 pgd_list_del(pgd);
26038 spin_unlock_irqrestore(&pgd_lock, flags);
26039 }
26040+#endif
26041
26042 /*
26043 * List of all pgd's needed for non-PAE so it can invalidate entries
26044@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
26045 * -- wli
26046 */
26047
26048-#ifdef CONFIG_X86_PAE
26049+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26050 /*
26051 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
26052 * updating the top-level pagetable entries to guarantee the
26053@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
26054 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
26055 * and initialize the kernel pmds here.
26056 */
26057-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
26058+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26059
26060 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26061 {
26062@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26063 */
26064 flush_tlb_mm(mm);
26065 }
26066+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
26067+#define PREALLOCATED_PXDS USER_PGD_PTRS
26068 #else /* !CONFIG_X86_PAE */
26069
26070 /* No need to prepopulate any pagetable entries in non-PAE modes. */
26071-#define PREALLOCATED_PMDS 0
26072+#define PREALLOCATED_PXDS 0
26073
26074 #endif /* CONFIG_X86_PAE */
26075
26076-static void free_pmds(pmd_t *pmds[])
26077+static void free_pxds(pxd_t *pxds[])
26078 {
26079 int i;
26080
26081- for(i = 0; i < PREALLOCATED_PMDS; i++)
26082- if (pmds[i])
26083- free_page((unsigned long)pmds[i]);
26084+ for(i = 0; i < PREALLOCATED_PXDS; i++)
26085+ if (pxds[i])
26086+ free_page((unsigned long)pxds[i]);
26087 }
26088
26089-static int preallocate_pmds(pmd_t *pmds[])
26090+static int preallocate_pxds(pxd_t *pxds[])
26091 {
26092 int i;
26093 bool failed = false;
26094
26095- for(i = 0; i < PREALLOCATED_PMDS; i++) {
26096- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
26097- if (pmd == NULL)
26098+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
26099+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
26100+ if (pxd == NULL)
26101 failed = true;
26102- pmds[i] = pmd;
26103+ pxds[i] = pxd;
26104 }
26105
26106 if (failed) {
26107- free_pmds(pmds);
26108+ free_pxds(pxds);
26109 return -ENOMEM;
26110 }
26111
26112@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
26113 * preallocate which never got a corresponding vma will need to be
26114 * freed manually.
26115 */
26116-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
26117+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
26118 {
26119 int i;
26120
26121- for(i = 0; i < PREALLOCATED_PMDS; i++) {
26122+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
26123 pgd_t pgd = pgdp[i];
26124
26125 if (pgd_val(pgd) != 0) {
26126- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
26127+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
26128
26129- pgdp[i] = native_make_pgd(0);
26130+ set_pgd(pgdp + i, native_make_pgd(0));
26131
26132- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
26133- pmd_free(mm, pmd);
26134+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
26135+ pxd_free(mm, pxd);
26136 }
26137 }
26138 }
26139
26140-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
26141+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
26142 {
26143- pud_t *pud;
26144+ pyd_t *pyd;
26145 unsigned long addr;
26146 int i;
26147
26148- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
26149+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
26150 return;
26151
26152- pud = pud_offset(pgd, 0);
26153+#ifdef CONFIG_X86_64
26154+ pyd = pyd_offset(mm, 0L);
26155+#else
26156+ pyd = pyd_offset(pgd, 0L);
26157+#endif
26158
26159- for (addr = i = 0; i < PREALLOCATED_PMDS;
26160- i++, pud++, addr += PUD_SIZE) {
26161- pmd_t *pmd = pmds[i];
26162+ for (addr = i = 0; i < PREALLOCATED_PXDS;
26163+ i++, pyd++, addr += PYD_SIZE) {
26164+ pxd_t *pxd = pxds[i];
26165
26166 if (i >= KERNEL_PGD_BOUNDARY)
26167- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26168- sizeof(pmd_t) * PTRS_PER_PMD);
26169+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26170+ sizeof(pxd_t) * PTRS_PER_PMD);
26171
26172- pud_populate(mm, pud, pmd);
26173+ pyd_populate(mm, pyd, pxd);
26174 }
26175 }
26176
26177 pgd_t *pgd_alloc(struct mm_struct *mm)
26178 {
26179 pgd_t *pgd;
26180- pmd_t *pmds[PREALLOCATED_PMDS];
26181+ pxd_t *pxds[PREALLOCATED_PXDS];
26182+
26183 unsigned long flags;
26184
26185 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
26186@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26187
26188 mm->pgd = pgd;
26189
26190- if (preallocate_pmds(pmds) != 0)
26191+ if (preallocate_pxds(pxds) != 0)
26192 goto out_free_pgd;
26193
26194 if (paravirt_pgd_alloc(mm) != 0)
26195- goto out_free_pmds;
26196+ goto out_free_pxds;
26197
26198 /*
26199 * Make sure that pre-populating the pmds is atomic with
26200@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26201 spin_lock_irqsave(&pgd_lock, flags);
26202
26203 pgd_ctor(pgd);
26204- pgd_prepopulate_pmd(mm, pgd, pmds);
26205+ pgd_prepopulate_pxd(mm, pgd, pxds);
26206
26207 spin_unlock_irqrestore(&pgd_lock, flags);
26208
26209 return pgd;
26210
26211-out_free_pmds:
26212- free_pmds(pmds);
26213+out_free_pxds:
26214+ free_pxds(pxds);
26215 out_free_pgd:
26216 free_page((unsigned long)pgd);
26217 out:
26218@@ -287,7 +338,7 @@ out:
26219
26220 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
26221 {
26222- pgd_mop_up_pmds(mm, pgd);
26223+ pgd_mop_up_pxds(mm, pgd);
26224 pgd_dtor(pgd);
26225 paravirt_pgd_free(mm, pgd);
26226 free_page((unsigned long)pgd);
26227diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
26228index 46c8834..fcab43d 100644
26229--- a/arch/x86/mm/pgtable_32.c
26230+++ b/arch/x86/mm/pgtable_32.c
26231@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
26232 return;
26233 }
26234 pte = pte_offset_kernel(pmd, vaddr);
26235+
26236+ pax_open_kernel();
26237 if (pte_val(pteval))
26238 set_pte_at(&init_mm, vaddr, pte, pteval);
26239 else
26240 pte_clear(&init_mm, vaddr, pte);
26241+ pax_close_kernel();
26242
26243 /*
26244 * It's enough to flush this one mapping.
26245diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26246index 513d8ed..978c161 100644
26247--- a/arch/x86/mm/setup_nx.c
26248+++ b/arch/x86/mm/setup_nx.c
26249@@ -4,11 +4,10 @@
26250
26251 #include <asm/pgtable.h>
26252
26253+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26254 int nx_enabled;
26255
26256-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26257-static int disable_nx __cpuinitdata;
26258-
26259+#ifndef CONFIG_PAX_PAGEEXEC
26260 /*
26261 * noexec = on|off
26262 *
26263@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
26264 if (!str)
26265 return -EINVAL;
26266 if (!strncmp(str, "on", 2)) {
26267- __supported_pte_mask |= _PAGE_NX;
26268- disable_nx = 0;
26269+ nx_enabled = 1;
26270 } else if (!strncmp(str, "off", 3)) {
26271- disable_nx = 1;
26272- __supported_pte_mask &= ~_PAGE_NX;
26273+ nx_enabled = 0;
26274 }
26275 return 0;
26276 }
26277 early_param("noexec", noexec_setup);
26278 #endif
26279+#endif
26280
26281 #ifdef CONFIG_X86_PAE
26282 void __init set_nx(void)
26283 {
26284- unsigned int v[4], l, h;
26285+ if (!nx_enabled && cpu_has_nx) {
26286+ unsigned l, h;
26287
26288- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
26289- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
26290-
26291- if ((v[3] & (1 << 20)) && !disable_nx) {
26292- rdmsr(MSR_EFER, l, h);
26293- l |= EFER_NX;
26294- wrmsr(MSR_EFER, l, h);
26295- nx_enabled = 1;
26296- __supported_pte_mask |= _PAGE_NX;
26297- }
26298+ __supported_pte_mask &= ~_PAGE_NX;
26299+ rdmsr(MSR_EFER, l, h);
26300+ l &= ~EFER_NX;
26301+ wrmsr(MSR_EFER, l, h);
26302 }
26303 }
26304 #else
26305@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
26306 unsigned long efer;
26307
26308 rdmsrl(MSR_EFER, efer);
26309- if (!(efer & EFER_NX) || disable_nx)
26310+ if (!(efer & EFER_NX) || !nx_enabled)
26311 __supported_pte_mask &= ~_PAGE_NX;
26312 }
26313 #endif
26314diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26315index 36fe08e..b123d3a 100644
26316--- a/arch/x86/mm/tlb.c
26317+++ b/arch/x86/mm/tlb.c
26318@@ -61,7 +61,11 @@ void leave_mm(int cpu)
26319 BUG();
26320 cpumask_clear_cpu(cpu,
26321 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26322+
26323+#ifndef CONFIG_PAX_PER_CPU_PGD
26324 load_cr3(swapper_pg_dir);
26325+#endif
26326+
26327 }
26328 EXPORT_SYMBOL_GPL(leave_mm);
26329
26330diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26331index 829edf0..672adb3 100644
26332--- a/arch/x86/oprofile/backtrace.c
26333+++ b/arch/x86/oprofile/backtrace.c
26334@@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26335 {
26336 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26337
26338- if (!user_mode_vm(regs)) {
26339+ if (!user_mode(regs)) {
26340 unsigned long stack = kernel_stack_pointer(regs);
26341 if (depth)
26342 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26343diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26344index e6a160a..36deff6 100644
26345--- a/arch/x86/oprofile/op_model_p4.c
26346+++ b/arch/x86/oprofile/op_model_p4.c
26347@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26348 #endif
26349 }
26350
26351-static int inline addr_increment(void)
26352+static inline int addr_increment(void)
26353 {
26354 #ifdef CONFIG_SMP
26355 return smp_num_siblings == 2 ? 2 : 1;
26356diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26357index 1331fcf..03901b2 100644
26358--- a/arch/x86/pci/common.c
26359+++ b/arch/x86/pci/common.c
26360@@ -31,8 +31,8 @@ int noioapicreroute = 1;
26361 int pcibios_last_bus = -1;
26362 unsigned long pirq_table_addr;
26363 struct pci_bus *pci_root_bus;
26364-struct pci_raw_ops *raw_pci_ops;
26365-struct pci_raw_ops *raw_pci_ext_ops;
26366+const struct pci_raw_ops *raw_pci_ops;
26367+const struct pci_raw_ops *raw_pci_ext_ops;
26368
26369 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26370 int reg, int len, u32 *val)
26371diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26372index 347d882..4baf6b6 100644
26373--- a/arch/x86/pci/direct.c
26374+++ b/arch/x86/pci/direct.c
26375@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26376
26377 #undef PCI_CONF1_ADDRESS
26378
26379-struct pci_raw_ops pci_direct_conf1 = {
26380+const struct pci_raw_ops pci_direct_conf1 = {
26381 .read = pci_conf1_read,
26382 .write = pci_conf1_write,
26383 };
26384@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26385
26386 #undef PCI_CONF2_ADDRESS
26387
26388-struct pci_raw_ops pci_direct_conf2 = {
26389+const struct pci_raw_ops pci_direct_conf2 = {
26390 .read = pci_conf2_read,
26391 .write = pci_conf2_write,
26392 };
26393@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26394 * This should be close to trivial, but it isn't, because there are buggy
26395 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26396 */
26397-static int __init pci_sanity_check(struct pci_raw_ops *o)
26398+static int __init pci_sanity_check(const struct pci_raw_ops *o)
26399 {
26400 u32 x = 0;
26401 int year, devfn;
26402diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26403index f10a7e9..0425342 100644
26404--- a/arch/x86/pci/mmconfig_32.c
26405+++ b/arch/x86/pci/mmconfig_32.c
26406@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26407 return 0;
26408 }
26409
26410-static struct pci_raw_ops pci_mmcfg = {
26411+static const struct pci_raw_ops pci_mmcfg = {
26412 .read = pci_mmcfg_read,
26413 .write = pci_mmcfg_write,
26414 };
26415diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26416index 94349f8..41600a7 100644
26417--- a/arch/x86/pci/mmconfig_64.c
26418+++ b/arch/x86/pci/mmconfig_64.c
26419@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26420 return 0;
26421 }
26422
26423-static struct pci_raw_ops pci_mmcfg = {
26424+static const struct pci_raw_ops pci_mmcfg = {
26425 .read = pci_mmcfg_read,
26426 .write = pci_mmcfg_write,
26427 };
26428diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26429index 8eb295e..86bd657 100644
26430--- a/arch/x86/pci/numaq_32.c
26431+++ b/arch/x86/pci/numaq_32.c
26432@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26433
26434 #undef PCI_CONF1_MQ_ADDRESS
26435
26436-static struct pci_raw_ops pci_direct_conf1_mq = {
26437+static const struct pci_raw_ops pci_direct_conf1_mq = {
26438 .read = pci_conf1_mq_read,
26439 .write = pci_conf1_mq_write
26440 };
26441diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26442index b889d82..5a58a0a 100644
26443--- a/arch/x86/pci/olpc.c
26444+++ b/arch/x86/pci/olpc.c
26445@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26446 return 0;
26447 }
26448
26449-static struct pci_raw_ops pci_olpc_conf = {
26450+static const struct pci_raw_ops pci_olpc_conf = {
26451 .read = pci_olpc_read,
26452 .write = pci_olpc_write,
26453 };
26454diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26455index 1c975cc..b8e16c2 100644
26456--- a/arch/x86/pci/pcbios.c
26457+++ b/arch/x86/pci/pcbios.c
26458@@ -56,50 +56,93 @@ union bios32 {
26459 static struct {
26460 unsigned long address;
26461 unsigned short segment;
26462-} bios32_indirect = { 0, __KERNEL_CS };
26463+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26464
26465 /*
26466 * Returns the entry point for the given service, NULL on error
26467 */
26468
26469-static unsigned long bios32_service(unsigned long service)
26470+static unsigned long __devinit bios32_service(unsigned long service)
26471 {
26472 unsigned char return_code; /* %al */
26473 unsigned long address; /* %ebx */
26474 unsigned long length; /* %ecx */
26475 unsigned long entry; /* %edx */
26476 unsigned long flags;
26477+ struct desc_struct d, *gdt;
26478
26479 local_irq_save(flags);
26480- __asm__("lcall *(%%edi); cld"
26481+
26482+ gdt = get_cpu_gdt_table(smp_processor_id());
26483+
26484+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26485+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26486+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26487+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26488+
26489+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26490 : "=a" (return_code),
26491 "=b" (address),
26492 "=c" (length),
26493 "=d" (entry)
26494 : "0" (service),
26495 "1" (0),
26496- "D" (&bios32_indirect));
26497+ "D" (&bios32_indirect),
26498+ "r"(__PCIBIOS_DS)
26499+ : "memory");
26500+
26501+ pax_open_kernel();
26502+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26503+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26504+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26505+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26506+ pax_close_kernel();
26507+
26508 local_irq_restore(flags);
26509
26510 switch (return_code) {
26511- case 0:
26512- return address + entry;
26513- case 0x80: /* Not present */
26514- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26515- return 0;
26516- default: /* Shouldn't happen */
26517- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26518- service, return_code);
26519+ case 0: {
26520+ int cpu;
26521+ unsigned char flags;
26522+
26523+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26524+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26525+ printk(KERN_WARNING "bios32_service: not valid\n");
26526 return 0;
26527+ }
26528+ address = address + PAGE_OFFSET;
26529+ length += 16UL; /* some BIOSs underreport this... */
26530+ flags = 4;
26531+ if (length >= 64*1024*1024) {
26532+ length >>= PAGE_SHIFT;
26533+ flags |= 8;
26534+ }
26535+
26536+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26537+ gdt = get_cpu_gdt_table(cpu);
26538+ pack_descriptor(&d, address, length, 0x9b, flags);
26539+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26540+ pack_descriptor(&d, address, length, 0x93, flags);
26541+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26542+ }
26543+ return entry;
26544+ }
26545+ case 0x80: /* Not present */
26546+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26547+ return 0;
26548+ default: /* Shouldn't happen */
26549+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26550+ service, return_code);
26551+ return 0;
26552 }
26553 }
26554
26555 static struct {
26556 unsigned long address;
26557 unsigned short segment;
26558-} pci_indirect = { 0, __KERNEL_CS };
26559+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26560
26561-static int pci_bios_present;
26562+static int pci_bios_present __read_only;
26563
26564 static int __devinit check_pcibios(void)
26565 {
26566@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26567 unsigned long flags, pcibios_entry;
26568
26569 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26570- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26571+ pci_indirect.address = pcibios_entry;
26572
26573 local_irq_save(flags);
26574- __asm__(
26575- "lcall *(%%edi); cld\n\t"
26576+ __asm__("movw %w6, %%ds\n\t"
26577+ "lcall *%%ss:(%%edi); cld\n\t"
26578+ "push %%ss\n\t"
26579+ "pop %%ds\n\t"
26580 "jc 1f\n\t"
26581 "xor %%ah, %%ah\n"
26582 "1:"
26583@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26584 "=b" (ebx),
26585 "=c" (ecx)
26586 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26587- "D" (&pci_indirect)
26588+ "D" (&pci_indirect),
26589+ "r" (__PCIBIOS_DS)
26590 : "memory");
26591 local_irq_restore(flags);
26592
26593@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26594
26595 switch (len) {
26596 case 1:
26597- __asm__("lcall *(%%esi); cld\n\t"
26598+ __asm__("movw %w6, %%ds\n\t"
26599+ "lcall *%%ss:(%%esi); cld\n\t"
26600+ "push %%ss\n\t"
26601+ "pop %%ds\n\t"
26602 "jc 1f\n\t"
26603 "xor %%ah, %%ah\n"
26604 "1:"
26605@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26606 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26607 "b" (bx),
26608 "D" ((long)reg),
26609- "S" (&pci_indirect));
26610+ "S" (&pci_indirect),
26611+ "r" (__PCIBIOS_DS));
26612 /*
26613 * Zero-extend the result beyond 8 bits, do not trust the
26614 * BIOS having done it:
26615@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26616 *value &= 0xff;
26617 break;
26618 case 2:
26619- __asm__("lcall *(%%esi); cld\n\t"
26620+ __asm__("movw %w6, %%ds\n\t"
26621+ "lcall *%%ss:(%%esi); cld\n\t"
26622+ "push %%ss\n\t"
26623+ "pop %%ds\n\t"
26624 "jc 1f\n\t"
26625 "xor %%ah, %%ah\n"
26626 "1:"
26627@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26628 : "1" (PCIBIOS_READ_CONFIG_WORD),
26629 "b" (bx),
26630 "D" ((long)reg),
26631- "S" (&pci_indirect));
26632+ "S" (&pci_indirect),
26633+ "r" (__PCIBIOS_DS));
26634 /*
26635 * Zero-extend the result beyond 16 bits, do not trust the
26636 * BIOS having done it:
26637@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26638 *value &= 0xffff;
26639 break;
26640 case 4:
26641- __asm__("lcall *(%%esi); cld\n\t"
26642+ __asm__("movw %w6, %%ds\n\t"
26643+ "lcall *%%ss:(%%esi); cld\n\t"
26644+ "push %%ss\n\t"
26645+ "pop %%ds\n\t"
26646 "jc 1f\n\t"
26647 "xor %%ah, %%ah\n"
26648 "1:"
26649@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26650 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26651 "b" (bx),
26652 "D" ((long)reg),
26653- "S" (&pci_indirect));
26654+ "S" (&pci_indirect),
26655+ "r" (__PCIBIOS_DS));
26656 break;
26657 }
26658
26659@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26660
26661 switch (len) {
26662 case 1:
26663- __asm__("lcall *(%%esi); cld\n\t"
26664+ __asm__("movw %w6, %%ds\n\t"
26665+ "lcall *%%ss:(%%esi); cld\n\t"
26666+ "push %%ss\n\t"
26667+ "pop %%ds\n\t"
26668 "jc 1f\n\t"
26669 "xor %%ah, %%ah\n"
26670 "1:"
26671@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26672 "c" (value),
26673 "b" (bx),
26674 "D" ((long)reg),
26675- "S" (&pci_indirect));
26676+ "S" (&pci_indirect),
26677+ "r" (__PCIBIOS_DS));
26678 break;
26679 case 2:
26680- __asm__("lcall *(%%esi); cld\n\t"
26681+ __asm__("movw %w6, %%ds\n\t"
26682+ "lcall *%%ss:(%%esi); cld\n\t"
26683+ "push %%ss\n\t"
26684+ "pop %%ds\n\t"
26685 "jc 1f\n\t"
26686 "xor %%ah, %%ah\n"
26687 "1:"
26688@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26689 "c" (value),
26690 "b" (bx),
26691 "D" ((long)reg),
26692- "S" (&pci_indirect));
26693+ "S" (&pci_indirect),
26694+ "r" (__PCIBIOS_DS));
26695 break;
26696 case 4:
26697- __asm__("lcall *(%%esi); cld\n\t"
26698+ __asm__("movw %w6, %%ds\n\t"
26699+ "lcall *%%ss:(%%esi); cld\n\t"
26700+ "push %%ss\n\t"
26701+ "pop %%ds\n\t"
26702 "jc 1f\n\t"
26703 "xor %%ah, %%ah\n"
26704 "1:"
26705@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26706 "c" (value),
26707 "b" (bx),
26708 "D" ((long)reg),
26709- "S" (&pci_indirect));
26710+ "S" (&pci_indirect),
26711+ "r" (__PCIBIOS_DS));
26712 break;
26713 }
26714
26715@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26716 * Function table for BIOS32 access
26717 */
26718
26719-static struct pci_raw_ops pci_bios_access = {
26720+static const struct pci_raw_ops pci_bios_access = {
26721 .read = pci_bios_read,
26722 .write = pci_bios_write
26723 };
26724@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26725 * Try to find PCI BIOS.
26726 */
26727
26728-static struct pci_raw_ops * __devinit pci_find_bios(void)
26729+static const struct pci_raw_ops * __devinit pci_find_bios(void)
26730 {
26731 union bios32 *check;
26732 unsigned char sum;
26733@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26734
26735 DBG("PCI: Fetching IRQ routing table... ");
26736 __asm__("push %%es\n\t"
26737+ "movw %w8, %%ds\n\t"
26738 "push %%ds\n\t"
26739 "pop %%es\n\t"
26740- "lcall *(%%esi); cld\n\t"
26741+ "lcall *%%ss:(%%esi); cld\n\t"
26742 "pop %%es\n\t"
26743+ "push %%ss\n\t"
26744+ "pop %%ds\n"
26745 "jc 1f\n\t"
26746 "xor %%ah, %%ah\n"
26747 "1:"
26748@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26749 "1" (0),
26750 "D" ((long) &opt),
26751 "S" (&pci_indirect),
26752- "m" (opt)
26753+ "m" (opt),
26754+ "r" (__PCIBIOS_DS)
26755 : "memory");
26756 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26757 if (ret & 0xff00)
26758@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26759 {
26760 int ret;
26761
26762- __asm__("lcall *(%%esi); cld\n\t"
26763+ __asm__("movw %w5, %%ds\n\t"
26764+ "lcall *%%ss:(%%esi); cld\n\t"
26765+ "push %%ss\n\t"
26766+ "pop %%ds\n"
26767 "jc 1f\n\t"
26768 "xor %%ah, %%ah\n"
26769 "1:"
26770@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26771 : "0" (PCIBIOS_SET_PCI_HW_INT),
26772 "b" ((dev->bus->number << 8) | dev->devfn),
26773 "c" ((irq << 8) | (pin + 10)),
26774- "S" (&pci_indirect));
26775+ "S" (&pci_indirect),
26776+ "r" (__PCIBIOS_DS));
26777 return !(ret & 0xff00);
26778 }
26779 EXPORT_SYMBOL(pcibios_set_irq_routing);
26780diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26781index fa0f651..9d8f3d9 100644
26782--- a/arch/x86/power/cpu.c
26783+++ b/arch/x86/power/cpu.c
26784@@ -129,7 +129,7 @@ static void do_fpu_end(void)
26785 static void fix_processor_context(void)
26786 {
26787 int cpu = smp_processor_id();
26788- struct tss_struct *t = &per_cpu(init_tss, cpu);
26789+ struct tss_struct *t = init_tss + cpu;
26790
26791 set_tss_desc(cpu, t); /*
26792 * This just modifies memory; should not be
26793@@ -139,7 +139,9 @@ static void fix_processor_context(void)
26794 */
26795
26796 #ifdef CONFIG_X86_64
26797+ pax_open_kernel();
26798 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26799+ pax_close_kernel();
26800
26801 syscall_init(); /* This sets MSR_*STAR and related */
26802 #endif
26803diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26804index dd78ef6..f9d928d 100644
26805--- a/arch/x86/vdso/Makefile
26806+++ b/arch/x86/vdso/Makefile
26807@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26808 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26809 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26810
26811-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26812+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26813 GCOV_PROFILE := n
26814
26815 #
26816diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26817index ee55754..0013b2e 100644
26818--- a/arch/x86/vdso/vclock_gettime.c
26819+++ b/arch/x86/vdso/vclock_gettime.c
26820@@ -22,24 +22,48 @@
26821 #include <asm/hpet.h>
26822 #include <asm/unistd.h>
26823 #include <asm/io.h>
26824+#include <asm/fixmap.h>
26825 #include "vextern.h"
26826
26827 #define gtod vdso_vsyscall_gtod_data
26828
26829+notrace noinline long __vdso_fallback_time(long *t)
26830+{
26831+ long secs;
26832+ asm volatile("syscall"
26833+ : "=a" (secs)
26834+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26835+ return secs;
26836+}
26837+
26838 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26839 {
26840 long ret;
26841 asm("syscall" : "=a" (ret) :
26842- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26843+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26844 return ret;
26845 }
26846
26847+notrace static inline cycle_t __vdso_vread_hpet(void)
26848+{
26849+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26850+}
26851+
26852+notrace static inline cycle_t __vdso_vread_tsc(void)
26853+{
26854+ cycle_t ret = (cycle_t)vget_cycles();
26855+
26856+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26857+}
26858+
26859 notrace static inline long vgetns(void)
26860 {
26861 long v;
26862- cycles_t (*vread)(void);
26863- vread = gtod->clock.vread;
26864- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26865+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26866+ v = __vdso_vread_tsc();
26867+ else
26868+ v = __vdso_vread_hpet();
26869+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26870 return (v * gtod->clock.mult) >> gtod->clock.shift;
26871 }
26872
26873@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26874
26875 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26876 {
26877- if (likely(gtod->sysctl_enabled))
26878+ if (likely(gtod->sysctl_enabled &&
26879+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26880+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26881 switch (clock) {
26882 case CLOCK_REALTIME:
26883 if (likely(gtod->clock.vread))
26884@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26885 int clock_gettime(clockid_t, struct timespec *)
26886 __attribute__((weak, alias("__vdso_clock_gettime")));
26887
26888+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26889+{
26890+ long ret;
26891+ asm("syscall" : "=a" (ret) :
26892+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26893+ return ret;
26894+}
26895+
26896 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26897 {
26898- long ret;
26899- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26900+ if (likely(gtod->sysctl_enabled &&
26901+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26902+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26903+ {
26904 if (likely(tv != NULL)) {
26905 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26906 offsetof(struct timespec, tv_nsec) ||
26907@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26908 }
26909 return 0;
26910 }
26911- asm("syscall" : "=a" (ret) :
26912- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26913- return ret;
26914+ return __vdso_fallback_gettimeofday(tv, tz);
26915 }
26916 int gettimeofday(struct timeval *, struct timezone *)
26917 __attribute__((weak, alias("__vdso_gettimeofday")));
26918diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26919index 4e5dd3b..00ba15e 100644
26920--- a/arch/x86/vdso/vdso.lds.S
26921+++ b/arch/x86/vdso/vdso.lds.S
26922@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26923 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26924 #include "vextern.h"
26925 #undef VEXTERN
26926+
26927+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26928+VEXTERN(fallback_gettimeofday)
26929+VEXTERN(fallback_time)
26930+VEXTERN(getcpu)
26931+#undef VEXTERN
26932diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26933index 58bc00f..d53fb48 100644
26934--- a/arch/x86/vdso/vdso32-setup.c
26935+++ b/arch/x86/vdso/vdso32-setup.c
26936@@ -25,6 +25,7 @@
26937 #include <asm/tlbflush.h>
26938 #include <asm/vdso.h>
26939 #include <asm/proto.h>
26940+#include <asm/mman.h>
26941
26942 enum {
26943 VDSO_DISABLED = 0,
26944@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26945 void enable_sep_cpu(void)
26946 {
26947 int cpu = get_cpu();
26948- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26949+ struct tss_struct *tss = init_tss + cpu;
26950
26951 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26952 put_cpu();
26953@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26954 gate_vma.vm_start = FIXADDR_USER_START;
26955 gate_vma.vm_end = FIXADDR_USER_END;
26956 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26957- gate_vma.vm_page_prot = __P101;
26958+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26959 /*
26960 * Make sure the vDSO gets into every core dump.
26961 * Dumping its contents makes post-mortem fully interpretable later
26962@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26963 if (compat)
26964 addr = VDSO_HIGH_BASE;
26965 else {
26966- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26967+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26968 if (IS_ERR_VALUE(addr)) {
26969 ret = addr;
26970 goto up_fail;
26971 }
26972 }
26973
26974- current->mm->context.vdso = (void *)addr;
26975+ current->mm->context.vdso = addr;
26976
26977 if (compat_uses_vma || !compat) {
26978 /*
26979@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26980 }
26981
26982 current_thread_info()->sysenter_return =
26983- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26984+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26985
26986 up_fail:
26987 if (ret)
26988- current->mm->context.vdso = NULL;
26989+ current->mm->context.vdso = 0;
26990
26991 up_write(&mm->mmap_sem);
26992
26993@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
26994
26995 const char *arch_vma_name(struct vm_area_struct *vma)
26996 {
26997- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26998+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26999 return "[vdso]";
27000+
27001+#ifdef CONFIG_PAX_SEGMEXEC
27002+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27003+ return "[vdso]";
27004+#endif
27005+
27006 return NULL;
27007 }
27008
27009@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
27010 struct mm_struct *mm = tsk->mm;
27011
27012 /* Check to see if this task was created in compat vdso mode */
27013- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27014+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27015 return &gate_vma;
27016 return NULL;
27017 }
27018diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
27019index 1683ba2..48d07f3 100644
27020--- a/arch/x86/vdso/vextern.h
27021+++ b/arch/x86/vdso/vextern.h
27022@@ -11,6 +11,5 @@
27023 put into vextern.h and be referenced as a pointer with vdso prefix.
27024 The main kernel later fills in the values. */
27025
27026-VEXTERN(jiffies)
27027 VEXTERN(vgetcpu_mode)
27028 VEXTERN(vsyscall_gtod_data)
27029diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27030index 21e1aeb..2c0b3c4 100644
27031--- a/arch/x86/vdso/vma.c
27032+++ b/arch/x86/vdso/vma.c
27033@@ -17,8 +17,6 @@
27034 #include "vextern.h" /* Just for VMAGIC. */
27035 #undef VEXTERN
27036
27037-unsigned int __read_mostly vdso_enabled = 1;
27038-
27039 extern char vdso_start[], vdso_end[];
27040 extern unsigned short vdso_sync_cpuid;
27041
27042@@ -27,10 +25,8 @@ static unsigned vdso_size;
27043
27044 static inline void *var_ref(void *p, char *name)
27045 {
27046- if (*(void **)p != (void *)VMAGIC) {
27047- printk("VDSO: variable %s broken\n", name);
27048- vdso_enabled = 0;
27049- }
27050+ if (*(void **)p != (void *)VMAGIC)
27051+ panic("VDSO: variable %s broken\n", name);
27052 return p;
27053 }
27054
27055@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
27056 if (!vbase)
27057 goto oom;
27058
27059- if (memcmp(vbase, "\177ELF", 4)) {
27060- printk("VDSO: I'm broken; not ELF\n");
27061- vdso_enabled = 0;
27062- }
27063+ if (memcmp(vbase, ELFMAG, SELFMAG))
27064+ panic("VDSO: I'm broken; not ELF\n");
27065
27066 #define VEXTERN(x) \
27067 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
27068 #include "vextern.h"
27069 #undef VEXTERN
27070+ vunmap(vbase);
27071 return 0;
27072
27073 oom:
27074- printk("Cannot allocate vdso\n");
27075- vdso_enabled = 0;
27076- return -ENOMEM;
27077+ panic("Cannot allocate vdso\n");
27078 }
27079 __initcall(init_vdso_vars);
27080
27081@@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27082 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27083 {
27084 struct mm_struct *mm = current->mm;
27085- unsigned long addr;
27086+ unsigned long addr = 0;
27087 int ret;
27088
27089- if (!vdso_enabled)
27090- return 0;
27091-
27092 down_write(&mm->mmap_sem);
27093+
27094+#ifdef CONFIG_PAX_RANDMMAP
27095+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27096+#endif
27097+
27098 addr = vdso_addr(mm->start_stack, vdso_size);
27099 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
27100 if (IS_ERR_VALUE(addr)) {
27101@@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27102 goto up_fail;
27103 }
27104
27105- current->mm->context.vdso = (void *)addr;
27106+ current->mm->context.vdso = addr;
27107
27108 ret = install_special_mapping(mm, addr, vdso_size,
27109 VM_READ|VM_EXEC|
27110@@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27111 VM_ALWAYSDUMP,
27112 vdso_pages);
27113 if (ret) {
27114- current->mm->context.vdso = NULL;
27115+ current->mm->context.vdso = 0;
27116 goto up_fail;
27117 }
27118
27119@@ -132,10 +127,3 @@ up_fail:
27120 up_write(&mm->mmap_sem);
27121 return ret;
27122 }
27123-
27124-static __init int vdso_setup(char *s)
27125-{
27126- vdso_enabled = simple_strtoul(s, NULL, 0);
27127- return 0;
27128-}
27129-__setup("vdso=", vdso_setup);
27130diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27131index 0087b00..eecb34f 100644
27132--- a/arch/x86/xen/enlighten.c
27133+++ b/arch/x86/xen/enlighten.c
27134@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27135
27136 struct shared_info xen_dummy_shared_info;
27137
27138-void *xen_initial_gdt;
27139-
27140 /*
27141 * Point at some empty memory to start with. We map the real shared_info
27142 * page as soon as fixmap is up and running.
27143@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
27144
27145 preempt_disable();
27146
27147- start = __get_cpu_var(idt_desc).address;
27148+ start = (unsigned long)__get_cpu_var(idt_desc).address;
27149 end = start + __get_cpu_var(idt_desc).size + 1;
27150
27151 xen_mc_flush();
27152@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
27153 #endif
27154 };
27155
27156-static void xen_reboot(int reason)
27157+static __noreturn void xen_reboot(int reason)
27158 {
27159 struct sched_shutdown r = { .reason = reason };
27160
27161@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
27162 BUG();
27163 }
27164
27165-static void xen_restart(char *msg)
27166+static __noreturn void xen_restart(char *msg)
27167 {
27168 xen_reboot(SHUTDOWN_reboot);
27169 }
27170
27171-static void xen_emergency_restart(void)
27172+static __noreturn void xen_emergency_restart(void)
27173 {
27174 xen_reboot(SHUTDOWN_reboot);
27175 }
27176
27177-static void xen_machine_halt(void)
27178+static __noreturn void xen_machine_halt(void)
27179 {
27180 xen_reboot(SHUTDOWN_poweroff);
27181 }
27182@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
27183 */
27184 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27185
27186-#ifdef CONFIG_X86_64
27187 /* Work out if we support NX */
27188- check_efer();
27189+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27190+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27191+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27192+ unsigned l, h;
27193+
27194+#ifdef CONFIG_X86_PAE
27195+ nx_enabled = 1;
27196+#endif
27197+ __supported_pte_mask |= _PAGE_NX;
27198+ rdmsr(MSR_EFER, l, h);
27199+ l |= EFER_NX;
27200+ wrmsr(MSR_EFER, l, h);
27201+ }
27202 #endif
27203
27204 xen_setup_features();
27205@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
27206
27207 machine_ops = xen_machine_ops;
27208
27209- /*
27210- * The only reliable way to retain the initial address of the
27211- * percpu gdt_page is to remember it here, so we can go and
27212- * mark it RW later, when the initial percpu area is freed.
27213- */
27214- xen_initial_gdt = &per_cpu(gdt_page, 0);
27215-
27216 xen_smp_init();
27217
27218 pgd = (pgd_t *)xen_start_info->pt_base;
27219diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27220index 3f90a2c..2c2ad84 100644
27221--- a/arch/x86/xen/mmu.c
27222+++ b/arch/x86/xen/mmu.c
27223@@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27224 convert_pfn_mfn(init_level4_pgt);
27225 convert_pfn_mfn(level3_ident_pgt);
27226 convert_pfn_mfn(level3_kernel_pgt);
27227+ convert_pfn_mfn(level3_vmalloc_start_pgt);
27228+ convert_pfn_mfn(level3_vmalloc_end_pgt);
27229+ convert_pfn_mfn(level3_vmemmap_pgt);
27230
27231 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27232 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27233@@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27234 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27235 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27236 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27237+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27238+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27239+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27240 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27241+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27242 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27243 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27244
27245@@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
27246 pv_mmu_ops.set_pud = xen_set_pud;
27247 #if PAGETABLE_LEVELS == 4
27248 pv_mmu_ops.set_pgd = xen_set_pgd;
27249+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27250 #endif
27251
27252 /* This will work as long as patching hasn't happened yet
27253@@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
27254 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27255 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27256 .set_pgd = xen_set_pgd_hyper,
27257+ .set_pgd_batched = xen_set_pgd_hyper,
27258
27259 .alloc_pud = xen_alloc_pmd_init,
27260 .release_pud = xen_release_pmd_init,
27261diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27262index a96204a..fca9b8e 100644
27263--- a/arch/x86/xen/smp.c
27264+++ b/arch/x86/xen/smp.c
27265@@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27266 {
27267 BUG_ON(smp_processor_id() != 0);
27268 native_smp_prepare_boot_cpu();
27269-
27270- /* We've switched to the "real" per-cpu gdt, so make sure the
27271- old memory can be recycled */
27272- make_lowmem_page_readwrite(xen_initial_gdt);
27273-
27274 xen_setup_vcpu_info_placement();
27275 }
27276
27277@@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27278 gdt = get_cpu_gdt_table(cpu);
27279
27280 ctxt->flags = VGCF_IN_KERNEL;
27281- ctxt->user_regs.ds = __USER_DS;
27282- ctxt->user_regs.es = __USER_DS;
27283+ ctxt->user_regs.ds = __KERNEL_DS;
27284+ ctxt->user_regs.es = __KERNEL_DS;
27285 ctxt->user_regs.ss = __KERNEL_DS;
27286 #ifdef CONFIG_X86_32
27287 ctxt->user_regs.fs = __KERNEL_PERCPU;
27288- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27289+ savesegment(gs, ctxt->user_regs.gs);
27290 #else
27291 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27292 #endif
27293@@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27294 int rc;
27295
27296 per_cpu(current_task, cpu) = idle;
27297+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27298 #ifdef CONFIG_X86_32
27299 irq_ctx_init(cpu);
27300 #else
27301 clear_tsk_thread_flag(idle, TIF_FORK);
27302- per_cpu(kernel_stack, cpu) =
27303- (unsigned long)task_stack_page(idle) -
27304- KERNEL_STACK_OFFSET + THREAD_SIZE;
27305+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27306 #endif
27307 xen_setup_runstate_info(cpu);
27308 xen_setup_timer(cpu);
27309diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27310index 9a95a9c..4f39e774 100644
27311--- a/arch/x86/xen/xen-asm_32.S
27312+++ b/arch/x86/xen/xen-asm_32.S
27313@@ -83,14 +83,14 @@ ENTRY(xen_iret)
27314 ESP_OFFSET=4 # bytes pushed onto stack
27315
27316 /*
27317- * Store vcpu_info pointer for easy access. Do it this way to
27318- * avoid having to reload %fs
27319+ * Store vcpu_info pointer for easy access.
27320 */
27321 #ifdef CONFIG_SMP
27322- GET_THREAD_INFO(%eax)
27323- movl TI_cpu(%eax), %eax
27324- movl __per_cpu_offset(,%eax,4), %eax
27325- mov per_cpu__xen_vcpu(%eax), %eax
27326+ push %fs
27327+ mov $(__KERNEL_PERCPU), %eax
27328+ mov %eax, %fs
27329+ mov PER_CPU_VAR(xen_vcpu), %eax
27330+ pop %fs
27331 #else
27332 movl per_cpu__xen_vcpu, %eax
27333 #endif
27334diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27335index 1a5ff24..a187d40 100644
27336--- a/arch/x86/xen/xen-head.S
27337+++ b/arch/x86/xen/xen-head.S
27338@@ -19,6 +19,17 @@ ENTRY(startup_xen)
27339 #ifdef CONFIG_X86_32
27340 mov %esi,xen_start_info
27341 mov $init_thread_union+THREAD_SIZE,%esp
27342+#ifdef CONFIG_SMP
27343+ movl $cpu_gdt_table,%edi
27344+ movl $__per_cpu_load,%eax
27345+ movw %ax,__KERNEL_PERCPU + 2(%edi)
27346+ rorl $16,%eax
27347+ movb %al,__KERNEL_PERCPU + 4(%edi)
27348+ movb %ah,__KERNEL_PERCPU + 7(%edi)
27349+ movl $__per_cpu_end - 1,%eax
27350+ subl $__per_cpu_start,%eax
27351+ movw %ax,__KERNEL_PERCPU + 0(%edi)
27352+#endif
27353 #else
27354 mov %rsi,xen_start_info
27355 mov $init_thread_union+THREAD_SIZE,%rsp
27356diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27357index f9153a3..51eab3d 100644
27358--- a/arch/x86/xen/xen-ops.h
27359+++ b/arch/x86/xen/xen-ops.h
27360@@ -10,8 +10,6 @@
27361 extern const char xen_hypervisor_callback[];
27362 extern const char xen_failsafe_callback[];
27363
27364-extern void *xen_initial_gdt;
27365-
27366 struct trap_info;
27367 void xen_copy_trap_info(struct trap_info *traps);
27368
27369diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27370index 15c6308..96e83c2 100644
27371--- a/block/blk-integrity.c
27372+++ b/block/blk-integrity.c
27373@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27374 NULL,
27375 };
27376
27377-static struct sysfs_ops integrity_ops = {
27378+static const struct sysfs_ops integrity_ops = {
27379 .show = &integrity_attr_show,
27380 .store = &integrity_attr_store,
27381 };
27382diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27383index ca56420..f2fc409 100644
27384--- a/block/blk-iopoll.c
27385+++ b/block/blk-iopoll.c
27386@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27387 }
27388 EXPORT_SYMBOL(blk_iopoll_complete);
27389
27390-static void blk_iopoll_softirq(struct softirq_action *h)
27391+static void blk_iopoll_softirq(void)
27392 {
27393 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27394 int rearm = 0, budget = blk_iopoll_budget;
27395diff --git a/block/blk-map.c b/block/blk-map.c
27396index 30a7e51..0aeec6a 100644
27397--- a/block/blk-map.c
27398+++ b/block/blk-map.c
27399@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27400 * direct dma. else, set up kernel bounce buffers
27401 */
27402 uaddr = (unsigned long) ubuf;
27403- if (blk_rq_aligned(q, ubuf, len) && !map_data)
27404+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27405 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27406 else
27407 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27408@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27409 for (i = 0; i < iov_count; i++) {
27410 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27411
27412+ if (!iov[i].iov_len)
27413+ return -EINVAL;
27414+
27415 if (uaddr & queue_dma_alignment(q)) {
27416 unaligned = 1;
27417 break;
27418 }
27419- if (!iov[i].iov_len)
27420- return -EINVAL;
27421 }
27422
27423 if (unaligned || (q->dma_pad_mask & len) || map_data)
27424@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27425 if (!len || !kbuf)
27426 return -EINVAL;
27427
27428- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27429+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27430 if (do_copy)
27431 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27432 else
27433diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27434index ee9c216..58d410a 100644
27435--- a/block/blk-softirq.c
27436+++ b/block/blk-softirq.c
27437@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27438 * Softirq action handler - move entries to local list and loop over them
27439 * while passing them to the queue registered handler.
27440 */
27441-static void blk_done_softirq(struct softirq_action *h)
27442+static void blk_done_softirq(void)
27443 {
27444 struct list_head *cpu_list, local_list;
27445
27446diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27447index bb9c5ea..5330d48 100644
27448--- a/block/blk-sysfs.c
27449+++ b/block/blk-sysfs.c
27450@@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27451 kmem_cache_free(blk_requestq_cachep, q);
27452 }
27453
27454-static struct sysfs_ops queue_sysfs_ops = {
27455+static const struct sysfs_ops queue_sysfs_ops = {
27456 .show = queue_attr_show,
27457 .store = queue_attr_store,
27458 };
27459diff --git a/block/bsg.c b/block/bsg.c
27460index 7154a7a..08ac2f0 100644
27461--- a/block/bsg.c
27462+++ b/block/bsg.c
27463@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27464 struct sg_io_v4 *hdr, struct bsg_device *bd,
27465 fmode_t has_write_perm)
27466 {
27467+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27468+ unsigned char *cmdptr;
27469+
27470 if (hdr->request_len > BLK_MAX_CDB) {
27471 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27472 if (!rq->cmd)
27473 return -ENOMEM;
27474- }
27475+ cmdptr = rq->cmd;
27476+ } else
27477+ cmdptr = tmpcmd;
27478
27479- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27480+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27481 hdr->request_len))
27482 return -EFAULT;
27483
27484+ if (cmdptr != rq->cmd)
27485+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27486+
27487 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27488 if (blk_verify_command(rq->cmd, has_write_perm))
27489 return -EPERM;
27490@@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27491 rq->next_rq = next_rq;
27492 next_rq->cmd_type = rq->cmd_type;
27493
27494- dxferp = (void*)(unsigned long)hdr->din_xferp;
27495+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27496 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27497 hdr->din_xfer_len, GFP_KERNEL);
27498 if (ret)
27499@@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27500
27501 if (hdr->dout_xfer_len) {
27502 dxfer_len = hdr->dout_xfer_len;
27503- dxferp = (void*)(unsigned long)hdr->dout_xferp;
27504+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27505 } else if (hdr->din_xfer_len) {
27506 dxfer_len = hdr->din_xfer_len;
27507- dxferp = (void*)(unsigned long)hdr->din_xferp;
27508+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27509 } else
27510 dxfer_len = 0;
27511
27512@@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27513 int len = min_t(unsigned int, hdr->max_response_len,
27514 rq->sense_len);
27515
27516- ret = copy_to_user((void*)(unsigned long)hdr->response,
27517+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27518 rq->sense, len);
27519 if (!ret)
27520 hdr->response_len = len;
27521diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27522index 9bd086c..ca1fc22 100644
27523--- a/block/compat_ioctl.c
27524+++ b/block/compat_ioctl.c
27525@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27526 err |= __get_user(f->spec1, &uf->spec1);
27527 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27528 err |= __get_user(name, &uf->name);
27529- f->name = compat_ptr(name);
27530+ f->name = (void __force_kernel *)compat_ptr(name);
27531 if (err) {
27532 err = -EFAULT;
27533 goto out;
27534diff --git a/block/elevator.c b/block/elevator.c
27535index a847046..75a1746 100644
27536--- a/block/elevator.c
27537+++ b/block/elevator.c
27538@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27539 return error;
27540 }
27541
27542-static struct sysfs_ops elv_sysfs_ops = {
27543+static const struct sysfs_ops elv_sysfs_ops = {
27544 .show = elv_attr_show,
27545 .store = elv_attr_store,
27546 };
27547diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27548index 2be0a97..bded3fd 100644
27549--- a/block/scsi_ioctl.c
27550+++ b/block/scsi_ioctl.c
27551@@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
27552 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27553 struct sg_io_hdr *hdr, fmode_t mode)
27554 {
27555- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27556+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27557+ unsigned char *cmdptr;
27558+
27559+ if (rq->cmd != rq->__cmd)
27560+ cmdptr = rq->cmd;
27561+ else
27562+ cmdptr = tmpcmd;
27563+
27564+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27565 return -EFAULT;
27566+
27567+ if (cmdptr != rq->cmd)
27568+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27569+
27570 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27571 return -EPERM;
27572
27573@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27574 int err;
27575 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27576 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27577+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27578+ unsigned char *cmdptr;
27579
27580 if (!sic)
27581 return -EINVAL;
27582@@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27583 */
27584 err = -EFAULT;
27585 rq->cmd_len = cmdlen;
27586- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27587+
27588+ if (rq->cmd != rq->__cmd)
27589+ cmdptr = rq->cmd;
27590+ else
27591+ cmdptr = tmpcmd;
27592+
27593+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27594 goto error;
27595
27596+ if (rq->cmd != cmdptr)
27597+ memcpy(rq->cmd, cmdptr, cmdlen);
27598+
27599 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27600 goto error;
27601
27602diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27603index 3533582..f143117 100644
27604--- a/crypto/cryptd.c
27605+++ b/crypto/cryptd.c
27606@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27607
27608 struct cryptd_blkcipher_request_ctx {
27609 crypto_completion_t complete;
27610-};
27611+} __no_const;
27612
27613 struct cryptd_hash_ctx {
27614 struct crypto_shash *child;
27615diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27616index a90d260..7a9765e 100644
27617--- a/crypto/gf128mul.c
27618+++ b/crypto/gf128mul.c
27619@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27620 for (i = 0; i < 7; ++i)
27621 gf128mul_x_lle(&p[i + 1], &p[i]);
27622
27623- memset(r, 0, sizeof(r));
27624+ memset(r, 0, sizeof(*r));
27625 for (i = 0;;) {
27626 u8 ch = ((u8 *)b)[15 - i];
27627
27628@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27629 for (i = 0; i < 7; ++i)
27630 gf128mul_x_bbe(&p[i + 1], &p[i]);
27631
27632- memset(r, 0, sizeof(r));
27633+ memset(r, 0, sizeof(*r));
27634 for (i = 0;;) {
27635 u8 ch = ((u8 *)b)[i];
27636
27637diff --git a/crypto/serpent.c b/crypto/serpent.c
27638index b651a55..023297d 100644
27639--- a/crypto/serpent.c
27640+++ b/crypto/serpent.c
27641@@ -21,6 +21,7 @@
27642 #include <asm/byteorder.h>
27643 #include <linux/crypto.h>
27644 #include <linux/types.h>
27645+#include <linux/sched.h>
27646
27647 /* Key is padded to the maximum of 256 bits before round key generation.
27648 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27649@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27650 u32 r0,r1,r2,r3,r4;
27651 int i;
27652
27653+ pax_track_stack();
27654+
27655 /* Copy key, add padding */
27656
27657 for (i = 0; i < keylen; ++i)
27658diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27659index 0d2cdb8..d8de48d 100644
27660--- a/drivers/acpi/acpi_pad.c
27661+++ b/drivers/acpi/acpi_pad.c
27662@@ -30,7 +30,7 @@
27663 #include <acpi/acpi_bus.h>
27664 #include <acpi/acpi_drivers.h>
27665
27666-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27667+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27668 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27669 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27670 static DEFINE_MUTEX(isolated_cpus_lock);
27671diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27672index 3f4602b..2e41d36 100644
27673--- a/drivers/acpi/battery.c
27674+++ b/drivers/acpi/battery.c
27675@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27676 }
27677
27678 static struct battery_file {
27679- struct file_operations ops;
27680+ const struct file_operations ops;
27681 mode_t mode;
27682 const char *name;
27683 } acpi_battery_file[] = {
27684diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27685index 7338b6a..82f0257 100644
27686--- a/drivers/acpi/dock.c
27687+++ b/drivers/acpi/dock.c
27688@@ -77,7 +77,7 @@ struct dock_dependent_device {
27689 struct list_head list;
27690 struct list_head hotplug_list;
27691 acpi_handle handle;
27692- struct acpi_dock_ops *ops;
27693+ const struct acpi_dock_ops *ops;
27694 void *context;
27695 };
27696
27697@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27698 * the dock driver after _DCK is executed.
27699 */
27700 int
27701-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27702+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27703 void *context)
27704 {
27705 struct dock_dependent_device *dd;
27706diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27707index 7c1c59e..2993595 100644
27708--- a/drivers/acpi/osl.c
27709+++ b/drivers/acpi/osl.c
27710@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27711 void __iomem *virt_addr;
27712
27713 virt_addr = ioremap(phys_addr, width);
27714+ if (!virt_addr)
27715+ return AE_NO_MEMORY;
27716 if (!value)
27717 value = &dummy;
27718
27719@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27720 void __iomem *virt_addr;
27721
27722 virt_addr = ioremap(phys_addr, width);
27723+ if (!virt_addr)
27724+ return AE_NO_MEMORY;
27725
27726 switch (width) {
27727 case 8:
27728diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27729index c216062..eec10d2 100644
27730--- a/drivers/acpi/power_meter.c
27731+++ b/drivers/acpi/power_meter.c
27732@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27733 return res;
27734
27735 temp /= 1000;
27736- if (temp < 0)
27737- return -EINVAL;
27738
27739 mutex_lock(&resource->lock);
27740 resource->trip[attr->index - 7] = temp;
27741diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27742index d0d25e2..961643d 100644
27743--- a/drivers/acpi/proc.c
27744+++ b/drivers/acpi/proc.c
27745@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27746 size_t count, loff_t * ppos)
27747 {
27748 struct list_head *node, *next;
27749- char strbuf[5];
27750- char str[5] = "";
27751- unsigned int len = count;
27752+ char strbuf[5] = {0};
27753 struct acpi_device *found_dev = NULL;
27754
27755- if (len > 4)
27756- len = 4;
27757- if (len < 0)
27758- return -EFAULT;
27759+ if (count > 4)
27760+ count = 4;
27761
27762- if (copy_from_user(strbuf, buffer, len))
27763+ if (copy_from_user(strbuf, buffer, count))
27764 return -EFAULT;
27765- strbuf[len] = '\0';
27766- sscanf(strbuf, "%s", str);
27767+ strbuf[count] = '\0';
27768
27769 mutex_lock(&acpi_device_lock);
27770 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27771@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27772 if (!dev->wakeup.flags.valid)
27773 continue;
27774
27775- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27776+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27777 dev->wakeup.state.enabled =
27778 dev->wakeup.state.enabled ? 0 : 1;
27779 found_dev = dev;
27780diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27781index 7102474..de8ad22 100644
27782--- a/drivers/acpi/processor_core.c
27783+++ b/drivers/acpi/processor_core.c
27784@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27785 return 0;
27786 }
27787
27788- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27789+ BUG_ON(pr->id >= nr_cpu_ids);
27790
27791 /*
27792 * Buggy BIOS check
27793diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27794index d933980..5761f13 100644
27795--- a/drivers/acpi/sbshc.c
27796+++ b/drivers/acpi/sbshc.c
27797@@ -17,7 +17,7 @@
27798
27799 #define PREFIX "ACPI: "
27800
27801-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27802+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27803 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27804
27805 struct acpi_smb_hc {
27806diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27807index 0458094..6978e7b 100644
27808--- a/drivers/acpi/sleep.c
27809+++ b/drivers/acpi/sleep.c
27810@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27811 }
27812 }
27813
27814-static struct platform_suspend_ops acpi_suspend_ops = {
27815+static const struct platform_suspend_ops acpi_suspend_ops = {
27816 .valid = acpi_suspend_state_valid,
27817 .begin = acpi_suspend_begin,
27818 .prepare_late = acpi_pm_prepare,
27819@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27820 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27821 * been requested.
27822 */
27823-static struct platform_suspend_ops acpi_suspend_ops_old = {
27824+static const struct platform_suspend_ops acpi_suspend_ops_old = {
27825 .valid = acpi_suspend_state_valid,
27826 .begin = acpi_suspend_begin_old,
27827 .prepare_late = acpi_pm_disable_gpes,
27828@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27829 acpi_enable_all_runtime_gpes();
27830 }
27831
27832-static struct platform_hibernation_ops acpi_hibernation_ops = {
27833+static const struct platform_hibernation_ops acpi_hibernation_ops = {
27834 .begin = acpi_hibernation_begin,
27835 .end = acpi_pm_end,
27836 .pre_snapshot = acpi_hibernation_pre_snapshot,
27837@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27838 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27839 * been requested.
27840 */
27841-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27842+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27843 .begin = acpi_hibernation_begin_old,
27844 .end = acpi_pm_end,
27845 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27846diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27847index 05dff63..b662ab7 100644
27848--- a/drivers/acpi/video.c
27849+++ b/drivers/acpi/video.c
27850@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27851 vd->brightness->levels[request_level]);
27852 }
27853
27854-static struct backlight_ops acpi_backlight_ops = {
27855+static const struct backlight_ops acpi_backlight_ops = {
27856 .get_brightness = acpi_video_get_brightness,
27857 .update_status = acpi_video_set_brightness,
27858 };
27859diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27860index 6787aab..23ffb0e 100644
27861--- a/drivers/ata/ahci.c
27862+++ b/drivers/ata/ahci.c
27863@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27864 .sdev_attrs = ahci_sdev_attrs,
27865 };
27866
27867-static struct ata_port_operations ahci_ops = {
27868+static const struct ata_port_operations ahci_ops = {
27869 .inherits = &sata_pmp_port_ops,
27870
27871 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27872@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27873 .port_stop = ahci_port_stop,
27874 };
27875
27876-static struct ata_port_operations ahci_vt8251_ops = {
27877+static const struct ata_port_operations ahci_vt8251_ops = {
27878 .inherits = &ahci_ops,
27879 .hardreset = ahci_vt8251_hardreset,
27880 };
27881
27882-static struct ata_port_operations ahci_p5wdh_ops = {
27883+static const struct ata_port_operations ahci_p5wdh_ops = {
27884 .inherits = &ahci_ops,
27885 .hardreset = ahci_p5wdh_hardreset,
27886 };
27887
27888-static struct ata_port_operations ahci_sb600_ops = {
27889+static const struct ata_port_operations ahci_sb600_ops = {
27890 .inherits = &ahci_ops,
27891 .softreset = ahci_sb600_softreset,
27892 .pmp_softreset = ahci_sb600_softreset,
27893diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27894index 99e7196..4968c77 100644
27895--- a/drivers/ata/ata_generic.c
27896+++ b/drivers/ata/ata_generic.c
27897@@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27898 ATA_BMDMA_SHT(DRV_NAME),
27899 };
27900
27901-static struct ata_port_operations generic_port_ops = {
27902+static const struct ata_port_operations generic_port_ops = {
27903 .inherits = &ata_bmdma_port_ops,
27904 .cable_detect = ata_cable_unknown,
27905 .set_mode = generic_set_mode,
27906diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27907index c33591d..000c121 100644
27908--- a/drivers/ata/ata_piix.c
27909+++ b/drivers/ata/ata_piix.c
27910@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27911 ATA_BMDMA_SHT(DRV_NAME),
27912 };
27913
27914-static struct ata_port_operations piix_pata_ops = {
27915+static const struct ata_port_operations piix_pata_ops = {
27916 .inherits = &ata_bmdma32_port_ops,
27917 .cable_detect = ata_cable_40wire,
27918 .set_piomode = piix_set_piomode,
27919@@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27920 .prereset = piix_pata_prereset,
27921 };
27922
27923-static struct ata_port_operations piix_vmw_ops = {
27924+static const struct ata_port_operations piix_vmw_ops = {
27925 .inherits = &piix_pata_ops,
27926 .bmdma_status = piix_vmw_bmdma_status,
27927 };
27928
27929-static struct ata_port_operations ich_pata_ops = {
27930+static const struct ata_port_operations ich_pata_ops = {
27931 .inherits = &piix_pata_ops,
27932 .cable_detect = ich_pata_cable_detect,
27933 .set_dmamode = ich_set_dmamode,
27934 };
27935
27936-static struct ata_port_operations piix_sata_ops = {
27937+static const struct ata_port_operations piix_sata_ops = {
27938 .inherits = &ata_bmdma_port_ops,
27939 };
27940
27941-static struct ata_port_operations piix_sidpr_sata_ops = {
27942+static const struct ata_port_operations piix_sidpr_sata_ops = {
27943 .inherits = &piix_sata_ops,
27944 .hardreset = sata_std_hardreset,
27945 .scr_read = piix_sidpr_scr_read,
27946diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27947index b0882cd..c295d65 100644
27948--- a/drivers/ata/libata-acpi.c
27949+++ b/drivers/ata/libata-acpi.c
27950@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27951 ata_acpi_uevent(dev->link->ap, dev, event);
27952 }
27953
27954-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27955+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27956 .handler = ata_acpi_dev_notify_dock,
27957 .uevent = ata_acpi_dev_uevent,
27958 };
27959
27960-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27961+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27962 .handler = ata_acpi_ap_notify_dock,
27963 .uevent = ata_acpi_ap_uevent,
27964 };
27965diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27966index d4f7f99..94f603e 100644
27967--- a/drivers/ata/libata-core.c
27968+++ b/drivers/ata/libata-core.c
27969@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27970 struct ata_port *ap;
27971 unsigned int tag;
27972
27973- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27974+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27975 ap = qc->ap;
27976
27977 qc->flags = 0;
27978@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27979 struct ata_port *ap;
27980 struct ata_link *link;
27981
27982- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27983+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27984 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27985 ap = qc->ap;
27986 link = qc->dev->link;
27987@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
27988 * LOCKING:
27989 * None.
27990 */
27991-static void ata_finalize_port_ops(struct ata_port_operations *ops)
27992+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
27993 {
27994 static DEFINE_SPINLOCK(lock);
27995 const struct ata_port_operations *cur;
27996@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27997 return;
27998
27999 spin_lock(&lock);
28000+ pax_open_kernel();
28001
28002 for (cur = ops->inherits; cur; cur = cur->inherits) {
28003 void **inherit = (void **)cur;
28004@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28005 if (IS_ERR(*pp))
28006 *pp = NULL;
28007
28008- ops->inherits = NULL;
28009+ *(struct ata_port_operations **)&ops->inherits = NULL;
28010
28011+ pax_close_kernel();
28012 spin_unlock(&lock);
28013 }
28014
28015@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
28016 */
28017 /* KILLME - the only user left is ipr */
28018 void ata_host_init(struct ata_host *host, struct device *dev,
28019- unsigned long flags, struct ata_port_operations *ops)
28020+ unsigned long flags, const struct ata_port_operations *ops)
28021 {
28022 spin_lock_init(&host->lock);
28023 host->dev = dev;
28024@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
28025 /* truly dummy */
28026 }
28027
28028-struct ata_port_operations ata_dummy_port_ops = {
28029+const struct ata_port_operations ata_dummy_port_ops = {
28030 .qc_prep = ata_noop_qc_prep,
28031 .qc_issue = ata_dummy_qc_issue,
28032 .error_handler = ata_dummy_error_handler,
28033diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
28034index e5bdb9b..45a8e72 100644
28035--- a/drivers/ata/libata-eh.c
28036+++ b/drivers/ata/libata-eh.c
28037@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
28038 {
28039 struct ata_link *link;
28040
28041+ pax_track_stack();
28042+
28043 ata_for_each_link(link, ap, HOST_FIRST)
28044 ata_eh_link_report(link);
28045 }
28046@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
28047 */
28048 void ata_std_error_handler(struct ata_port *ap)
28049 {
28050- struct ata_port_operations *ops = ap->ops;
28051+ const struct ata_port_operations *ops = ap->ops;
28052 ata_reset_fn_t hardreset = ops->hardreset;
28053
28054 /* ignore built-in hardreset if SCR access is not available */
28055diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
28056index 51f0ffb..19ce3e3 100644
28057--- a/drivers/ata/libata-pmp.c
28058+++ b/drivers/ata/libata-pmp.c
28059@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
28060 */
28061 static int sata_pmp_eh_recover(struct ata_port *ap)
28062 {
28063- struct ata_port_operations *ops = ap->ops;
28064+ const struct ata_port_operations *ops = ap->ops;
28065 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
28066 struct ata_link *pmp_link = &ap->link;
28067 struct ata_device *pmp_dev = pmp_link->device;
28068diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
28069index d8f35fe..288180a 100644
28070--- a/drivers/ata/pata_acpi.c
28071+++ b/drivers/ata/pata_acpi.c
28072@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
28073 ATA_BMDMA_SHT(DRV_NAME),
28074 };
28075
28076-static struct ata_port_operations pacpi_ops = {
28077+static const struct ata_port_operations pacpi_ops = {
28078 .inherits = &ata_bmdma_port_ops,
28079 .qc_issue = pacpi_qc_issue,
28080 .cable_detect = pacpi_cable_detect,
28081diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
28082index 9434114..1f2f364 100644
28083--- a/drivers/ata/pata_ali.c
28084+++ b/drivers/ata/pata_ali.c
28085@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
28086 * Port operations for PIO only ALi
28087 */
28088
28089-static struct ata_port_operations ali_early_port_ops = {
28090+static const struct ata_port_operations ali_early_port_ops = {
28091 .inherits = &ata_sff_port_ops,
28092 .cable_detect = ata_cable_40wire,
28093 .set_piomode = ali_set_piomode,
28094@@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
28095 * Port operations for DMA capable ALi without cable
28096 * detect
28097 */
28098-static struct ata_port_operations ali_20_port_ops = {
28099+static const struct ata_port_operations ali_20_port_ops = {
28100 .inherits = &ali_dma_base_ops,
28101 .cable_detect = ata_cable_40wire,
28102 .mode_filter = ali_20_filter,
28103@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
28104 /*
28105 * Port operations for DMA capable ALi with cable detect
28106 */
28107-static struct ata_port_operations ali_c2_port_ops = {
28108+static const struct ata_port_operations ali_c2_port_ops = {
28109 .inherits = &ali_dma_base_ops,
28110 .check_atapi_dma = ali_check_atapi_dma,
28111 .cable_detect = ali_c2_cable_detect,
28112@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
28113 /*
28114 * Port operations for DMA capable ALi with cable detect
28115 */
28116-static struct ata_port_operations ali_c4_port_ops = {
28117+static const struct ata_port_operations ali_c4_port_ops = {
28118 .inherits = &ali_dma_base_ops,
28119 .check_atapi_dma = ali_check_atapi_dma,
28120 .cable_detect = ali_c2_cable_detect,
28121@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
28122 /*
28123 * Port operations for DMA capable ALi with cable detect and LBA48
28124 */
28125-static struct ata_port_operations ali_c5_port_ops = {
28126+static const struct ata_port_operations ali_c5_port_ops = {
28127 .inherits = &ali_dma_base_ops,
28128 .check_atapi_dma = ali_check_atapi_dma,
28129 .dev_config = ali_warn_atapi_dma,
28130diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
28131index 567f3f7..c8ee0da 100644
28132--- a/drivers/ata/pata_amd.c
28133+++ b/drivers/ata/pata_amd.c
28134@@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
28135 .prereset = amd_pre_reset,
28136 };
28137
28138-static struct ata_port_operations amd33_port_ops = {
28139+static const struct ata_port_operations amd33_port_ops = {
28140 .inherits = &amd_base_port_ops,
28141 .cable_detect = ata_cable_40wire,
28142 .set_piomode = amd33_set_piomode,
28143 .set_dmamode = amd33_set_dmamode,
28144 };
28145
28146-static struct ata_port_operations amd66_port_ops = {
28147+static const struct ata_port_operations amd66_port_ops = {
28148 .inherits = &amd_base_port_ops,
28149 .cable_detect = ata_cable_unknown,
28150 .set_piomode = amd66_set_piomode,
28151 .set_dmamode = amd66_set_dmamode,
28152 };
28153
28154-static struct ata_port_operations amd100_port_ops = {
28155+static const struct ata_port_operations amd100_port_ops = {
28156 .inherits = &amd_base_port_ops,
28157 .cable_detect = ata_cable_unknown,
28158 .set_piomode = amd100_set_piomode,
28159 .set_dmamode = amd100_set_dmamode,
28160 };
28161
28162-static struct ata_port_operations amd133_port_ops = {
28163+static const struct ata_port_operations amd133_port_ops = {
28164 .inherits = &amd_base_port_ops,
28165 .cable_detect = amd_cable_detect,
28166 .set_piomode = amd133_set_piomode,
28167@@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
28168 .host_stop = nv_host_stop,
28169 };
28170
28171-static struct ata_port_operations nv100_port_ops = {
28172+static const struct ata_port_operations nv100_port_ops = {
28173 .inherits = &nv_base_port_ops,
28174 .set_piomode = nv100_set_piomode,
28175 .set_dmamode = nv100_set_dmamode,
28176 };
28177
28178-static struct ata_port_operations nv133_port_ops = {
28179+static const struct ata_port_operations nv133_port_ops = {
28180 .inherits = &nv_base_port_ops,
28181 .set_piomode = nv133_set_piomode,
28182 .set_dmamode = nv133_set_dmamode,
28183diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
28184index d332cfd..4b7eaae 100644
28185--- a/drivers/ata/pata_artop.c
28186+++ b/drivers/ata/pata_artop.c
28187@@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
28188 ATA_BMDMA_SHT(DRV_NAME),
28189 };
28190
28191-static struct ata_port_operations artop6210_ops = {
28192+static const struct ata_port_operations artop6210_ops = {
28193 .inherits = &ata_bmdma_port_ops,
28194 .cable_detect = ata_cable_40wire,
28195 .set_piomode = artop6210_set_piomode,
28196@@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
28197 .qc_defer = artop6210_qc_defer,
28198 };
28199
28200-static struct ata_port_operations artop6260_ops = {
28201+static const struct ata_port_operations artop6260_ops = {
28202 .inherits = &ata_bmdma_port_ops,
28203 .cable_detect = artop6260_cable_detect,
28204 .set_piomode = artop6260_set_piomode,
28205diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
28206index 5c129f9..7bb7ccb 100644
28207--- a/drivers/ata/pata_at32.c
28208+++ b/drivers/ata/pata_at32.c
28209@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
28210 ATA_PIO_SHT(DRV_NAME),
28211 };
28212
28213-static struct ata_port_operations at32_port_ops = {
28214+static const struct ata_port_operations at32_port_ops = {
28215 .inherits = &ata_sff_port_ops,
28216 .cable_detect = ata_cable_40wire,
28217 .set_piomode = pata_at32_set_piomode,
28218diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
28219index 41c94b1..829006d 100644
28220--- a/drivers/ata/pata_at91.c
28221+++ b/drivers/ata/pata_at91.c
28222@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
28223 ATA_PIO_SHT(DRV_NAME),
28224 };
28225
28226-static struct ata_port_operations pata_at91_port_ops = {
28227+static const struct ata_port_operations pata_at91_port_ops = {
28228 .inherits = &ata_sff_port_ops,
28229
28230 .sff_data_xfer = pata_at91_data_xfer_noirq,
28231diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
28232index ae4454d..d391eb4 100644
28233--- a/drivers/ata/pata_atiixp.c
28234+++ b/drivers/ata/pata_atiixp.c
28235@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
28236 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28237 };
28238
28239-static struct ata_port_operations atiixp_port_ops = {
28240+static const struct ata_port_operations atiixp_port_ops = {
28241 .inherits = &ata_bmdma_port_ops,
28242
28243 .qc_prep = ata_sff_dumb_qc_prep,
28244diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
28245index 6fe7ded..2a425dc 100644
28246--- a/drivers/ata/pata_atp867x.c
28247+++ b/drivers/ata/pata_atp867x.c
28248@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
28249 ATA_BMDMA_SHT(DRV_NAME),
28250 };
28251
28252-static struct ata_port_operations atp867x_ops = {
28253+static const struct ata_port_operations atp867x_ops = {
28254 .inherits = &ata_bmdma_port_ops,
28255 .cable_detect = atp867x_cable_detect,
28256 .set_piomode = atp867x_set_piomode,
28257diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
28258index c4b47a3..b27a367 100644
28259--- a/drivers/ata/pata_bf54x.c
28260+++ b/drivers/ata/pata_bf54x.c
28261@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
28262 .dma_boundary = ATA_DMA_BOUNDARY,
28263 };
28264
28265-static struct ata_port_operations bfin_pata_ops = {
28266+static const struct ata_port_operations bfin_pata_ops = {
28267 .inherits = &ata_sff_port_ops,
28268
28269 .set_piomode = bfin_set_piomode,
28270diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
28271index 5acf9fa..84248be 100644
28272--- a/drivers/ata/pata_cmd640.c
28273+++ b/drivers/ata/pata_cmd640.c
28274@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
28275 ATA_BMDMA_SHT(DRV_NAME),
28276 };
28277
28278-static struct ata_port_operations cmd640_port_ops = {
28279+static const struct ata_port_operations cmd640_port_ops = {
28280 .inherits = &ata_bmdma_port_ops,
28281 /* In theory xfer_noirq is not needed once we kill the prefetcher */
28282 .sff_data_xfer = ata_sff_data_xfer_noirq,
28283diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
28284index ccd2694..c869c3d 100644
28285--- a/drivers/ata/pata_cmd64x.c
28286+++ b/drivers/ata/pata_cmd64x.c
28287@@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
28288 .set_dmamode = cmd64x_set_dmamode,
28289 };
28290
28291-static struct ata_port_operations cmd64x_port_ops = {
28292+static const struct ata_port_operations cmd64x_port_ops = {
28293 .inherits = &cmd64x_base_ops,
28294 .cable_detect = ata_cable_40wire,
28295 };
28296
28297-static struct ata_port_operations cmd646r1_port_ops = {
28298+static const struct ata_port_operations cmd646r1_port_ops = {
28299 .inherits = &cmd64x_base_ops,
28300 .bmdma_stop = cmd646r1_bmdma_stop,
28301 .cable_detect = ata_cable_40wire,
28302 };
28303
28304-static struct ata_port_operations cmd648_port_ops = {
28305+static const struct ata_port_operations cmd648_port_ops = {
28306 .inherits = &cmd64x_base_ops,
28307 .bmdma_stop = cmd648_bmdma_stop,
28308 .cable_detect = cmd648_cable_detect,
28309diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
28310index 0df83cf..d7595b0 100644
28311--- a/drivers/ata/pata_cs5520.c
28312+++ b/drivers/ata/pata_cs5520.c
28313@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
28314 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28315 };
28316
28317-static struct ata_port_operations cs5520_port_ops = {
28318+static const struct ata_port_operations cs5520_port_ops = {
28319 .inherits = &ata_bmdma_port_ops,
28320 .qc_prep = ata_sff_dumb_qc_prep,
28321 .cable_detect = ata_cable_40wire,
28322diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28323index c974b05..6d26b11 100644
28324--- a/drivers/ata/pata_cs5530.c
28325+++ b/drivers/ata/pata_cs5530.c
28326@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28327 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28328 };
28329
28330-static struct ata_port_operations cs5530_port_ops = {
28331+static const struct ata_port_operations cs5530_port_ops = {
28332 .inherits = &ata_bmdma_port_ops,
28333
28334 .qc_prep = ata_sff_dumb_qc_prep,
28335diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28336index 403f561..aacd26b 100644
28337--- a/drivers/ata/pata_cs5535.c
28338+++ b/drivers/ata/pata_cs5535.c
28339@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28340 ATA_BMDMA_SHT(DRV_NAME),
28341 };
28342
28343-static struct ata_port_operations cs5535_port_ops = {
28344+static const struct ata_port_operations cs5535_port_ops = {
28345 .inherits = &ata_bmdma_port_ops,
28346 .cable_detect = cs5535_cable_detect,
28347 .set_piomode = cs5535_set_piomode,
28348diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28349index 6da4cb4..de24a25 100644
28350--- a/drivers/ata/pata_cs5536.c
28351+++ b/drivers/ata/pata_cs5536.c
28352@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28353 ATA_BMDMA_SHT(DRV_NAME),
28354 };
28355
28356-static struct ata_port_operations cs5536_port_ops = {
28357+static const struct ata_port_operations cs5536_port_ops = {
28358 .inherits = &ata_bmdma_port_ops,
28359 .cable_detect = cs5536_cable_detect,
28360 .set_piomode = cs5536_set_piomode,
28361diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28362index 8fb040b..b16a9c9 100644
28363--- a/drivers/ata/pata_cypress.c
28364+++ b/drivers/ata/pata_cypress.c
28365@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28366 ATA_BMDMA_SHT(DRV_NAME),
28367 };
28368
28369-static struct ata_port_operations cy82c693_port_ops = {
28370+static const struct ata_port_operations cy82c693_port_ops = {
28371 .inherits = &ata_bmdma_port_ops,
28372 .cable_detect = ata_cable_40wire,
28373 .set_piomode = cy82c693_set_piomode,
28374diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28375index 2a6412f..555ee11 100644
28376--- a/drivers/ata/pata_efar.c
28377+++ b/drivers/ata/pata_efar.c
28378@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28379 ATA_BMDMA_SHT(DRV_NAME),
28380 };
28381
28382-static struct ata_port_operations efar_ops = {
28383+static const struct ata_port_operations efar_ops = {
28384 .inherits = &ata_bmdma_port_ops,
28385 .cable_detect = efar_cable_detect,
28386 .set_piomode = efar_set_piomode,
28387diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28388index b9d8836..0b92030 100644
28389--- a/drivers/ata/pata_hpt366.c
28390+++ b/drivers/ata/pata_hpt366.c
28391@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28392 * Configuration for HPT366/68
28393 */
28394
28395-static struct ata_port_operations hpt366_port_ops = {
28396+static const struct ata_port_operations hpt366_port_ops = {
28397 .inherits = &ata_bmdma_port_ops,
28398 .cable_detect = hpt36x_cable_detect,
28399 .mode_filter = hpt366_filter,
28400diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28401index 5af7f19..00c4980 100644
28402--- a/drivers/ata/pata_hpt37x.c
28403+++ b/drivers/ata/pata_hpt37x.c
28404@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28405 * Configuration for HPT370
28406 */
28407
28408-static struct ata_port_operations hpt370_port_ops = {
28409+static const struct ata_port_operations hpt370_port_ops = {
28410 .inherits = &ata_bmdma_port_ops,
28411
28412 .bmdma_stop = hpt370_bmdma_stop,
28413@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28414 * Configuration for HPT370A. Close to 370 but less filters
28415 */
28416
28417-static struct ata_port_operations hpt370a_port_ops = {
28418+static const struct ata_port_operations hpt370a_port_ops = {
28419 .inherits = &hpt370_port_ops,
28420 .mode_filter = hpt370a_filter,
28421 };
28422@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28423 * and DMA mode setting functionality.
28424 */
28425
28426-static struct ata_port_operations hpt372_port_ops = {
28427+static const struct ata_port_operations hpt372_port_ops = {
28428 .inherits = &ata_bmdma_port_ops,
28429
28430 .bmdma_stop = hpt37x_bmdma_stop,
28431@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28432 * but we have a different cable detection procedure for function 1.
28433 */
28434
28435-static struct ata_port_operations hpt374_fn1_port_ops = {
28436+static const struct ata_port_operations hpt374_fn1_port_ops = {
28437 .inherits = &hpt372_port_ops,
28438 .prereset = hpt374_fn1_pre_reset,
28439 };
28440diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28441index 100f227..2e39382 100644
28442--- a/drivers/ata/pata_hpt3x2n.c
28443+++ b/drivers/ata/pata_hpt3x2n.c
28444@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28445 * Configuration for HPT3x2n.
28446 */
28447
28448-static struct ata_port_operations hpt3x2n_port_ops = {
28449+static const struct ata_port_operations hpt3x2n_port_ops = {
28450 .inherits = &ata_bmdma_port_ops,
28451
28452 .bmdma_stop = hpt3x2n_bmdma_stop,
28453diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28454index 7e31025..6fca8f4 100644
28455--- a/drivers/ata/pata_hpt3x3.c
28456+++ b/drivers/ata/pata_hpt3x3.c
28457@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28458 ATA_BMDMA_SHT(DRV_NAME),
28459 };
28460
28461-static struct ata_port_operations hpt3x3_port_ops = {
28462+static const struct ata_port_operations hpt3x3_port_ops = {
28463 .inherits = &ata_bmdma_port_ops,
28464 .cable_detect = ata_cable_40wire,
28465 .set_piomode = hpt3x3_set_piomode,
28466diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28467index b663b7f..9a26c2a 100644
28468--- a/drivers/ata/pata_icside.c
28469+++ b/drivers/ata/pata_icside.c
28470@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28471 }
28472 }
28473
28474-static struct ata_port_operations pata_icside_port_ops = {
28475+static const struct ata_port_operations pata_icside_port_ops = {
28476 .inherits = &ata_sff_port_ops,
28477 /* no need to build any PRD tables for DMA */
28478 .qc_prep = ata_noop_qc_prep,
28479diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28480index 4bceb88..457dfb6 100644
28481--- a/drivers/ata/pata_isapnp.c
28482+++ b/drivers/ata/pata_isapnp.c
28483@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28484 ATA_PIO_SHT(DRV_NAME),
28485 };
28486
28487-static struct ata_port_operations isapnp_port_ops = {
28488+static const struct ata_port_operations isapnp_port_ops = {
28489 .inherits = &ata_sff_port_ops,
28490 .cable_detect = ata_cable_40wire,
28491 };
28492
28493-static struct ata_port_operations isapnp_noalt_port_ops = {
28494+static const struct ata_port_operations isapnp_noalt_port_ops = {
28495 .inherits = &ata_sff_port_ops,
28496 .cable_detect = ata_cable_40wire,
28497 /* No altstatus so we don't want to use the lost interrupt poll */
28498diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28499index f156da8..24976e2 100644
28500--- a/drivers/ata/pata_it8213.c
28501+++ b/drivers/ata/pata_it8213.c
28502@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28503 };
28504
28505
28506-static struct ata_port_operations it8213_ops = {
28507+static const struct ata_port_operations it8213_ops = {
28508 .inherits = &ata_bmdma_port_ops,
28509 .cable_detect = it8213_cable_detect,
28510 .set_piomode = it8213_set_piomode,
28511diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28512index 188bc2f..ca9e785 100644
28513--- a/drivers/ata/pata_it821x.c
28514+++ b/drivers/ata/pata_it821x.c
28515@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28516 ATA_BMDMA_SHT(DRV_NAME),
28517 };
28518
28519-static struct ata_port_operations it821x_smart_port_ops = {
28520+static const struct ata_port_operations it821x_smart_port_ops = {
28521 .inherits = &ata_bmdma_port_ops,
28522
28523 .check_atapi_dma= it821x_check_atapi_dma,
28524@@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28525 .port_start = it821x_port_start,
28526 };
28527
28528-static struct ata_port_operations it821x_passthru_port_ops = {
28529+static const struct ata_port_operations it821x_passthru_port_ops = {
28530 .inherits = &ata_bmdma_port_ops,
28531
28532 .check_atapi_dma= it821x_check_atapi_dma,
28533@@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28534 .port_start = it821x_port_start,
28535 };
28536
28537-static struct ata_port_operations it821x_rdc_port_ops = {
28538+static const struct ata_port_operations it821x_rdc_port_ops = {
28539 .inherits = &ata_bmdma_port_ops,
28540
28541 .check_atapi_dma= it821x_check_atapi_dma,
28542diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28543index ba54b08..4b952b7 100644
28544--- a/drivers/ata/pata_ixp4xx_cf.c
28545+++ b/drivers/ata/pata_ixp4xx_cf.c
28546@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28547 ATA_PIO_SHT(DRV_NAME),
28548 };
28549
28550-static struct ata_port_operations ixp4xx_port_ops = {
28551+static const struct ata_port_operations ixp4xx_port_ops = {
28552 .inherits = &ata_sff_port_ops,
28553 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28554 .cable_detect = ata_cable_40wire,
28555diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28556index 3a1474a..434b0ff 100644
28557--- a/drivers/ata/pata_jmicron.c
28558+++ b/drivers/ata/pata_jmicron.c
28559@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28560 ATA_BMDMA_SHT(DRV_NAME),
28561 };
28562
28563-static struct ata_port_operations jmicron_ops = {
28564+static const struct ata_port_operations jmicron_ops = {
28565 .inherits = &ata_bmdma_port_ops,
28566 .prereset = jmicron_pre_reset,
28567 };
28568diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28569index 6932e56..220e71d 100644
28570--- a/drivers/ata/pata_legacy.c
28571+++ b/drivers/ata/pata_legacy.c
28572@@ -106,7 +106,7 @@ struct legacy_probe {
28573
28574 struct legacy_controller {
28575 const char *name;
28576- struct ata_port_operations *ops;
28577+ const struct ata_port_operations *ops;
28578 unsigned int pio_mask;
28579 unsigned int flags;
28580 unsigned int pflags;
28581@@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28582 * pio_mask as well.
28583 */
28584
28585-static struct ata_port_operations simple_port_ops = {
28586+static const struct ata_port_operations simple_port_ops = {
28587 .inherits = &legacy_base_port_ops,
28588 .sff_data_xfer = ata_sff_data_xfer_noirq,
28589 };
28590
28591-static struct ata_port_operations legacy_port_ops = {
28592+static const struct ata_port_operations legacy_port_ops = {
28593 .inherits = &legacy_base_port_ops,
28594 .sff_data_xfer = ata_sff_data_xfer_noirq,
28595 .set_mode = legacy_set_mode,
28596@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28597 return buflen;
28598 }
28599
28600-static struct ata_port_operations pdc20230_port_ops = {
28601+static const struct ata_port_operations pdc20230_port_ops = {
28602 .inherits = &legacy_base_port_ops,
28603 .set_piomode = pdc20230_set_piomode,
28604 .sff_data_xfer = pdc_data_xfer_vlb,
28605@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28606 ioread8(ap->ioaddr.status_addr);
28607 }
28608
28609-static struct ata_port_operations ht6560a_port_ops = {
28610+static const struct ata_port_operations ht6560a_port_ops = {
28611 .inherits = &legacy_base_port_ops,
28612 .set_piomode = ht6560a_set_piomode,
28613 };
28614@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28615 ioread8(ap->ioaddr.status_addr);
28616 }
28617
28618-static struct ata_port_operations ht6560b_port_ops = {
28619+static const struct ata_port_operations ht6560b_port_ops = {
28620 .inherits = &legacy_base_port_ops,
28621 .set_piomode = ht6560b_set_piomode,
28622 };
28623@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28624 }
28625
28626
28627-static struct ata_port_operations opti82c611a_port_ops = {
28628+static const struct ata_port_operations opti82c611a_port_ops = {
28629 .inherits = &legacy_base_port_ops,
28630 .set_piomode = opti82c611a_set_piomode,
28631 };
28632@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28633 return ata_sff_qc_issue(qc);
28634 }
28635
28636-static struct ata_port_operations opti82c46x_port_ops = {
28637+static const struct ata_port_operations opti82c46x_port_ops = {
28638 .inherits = &legacy_base_port_ops,
28639 .set_piomode = opti82c46x_set_piomode,
28640 .qc_issue = opti82c46x_qc_issue,
28641@@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28642 return 0;
28643 }
28644
28645-static struct ata_port_operations qdi6500_port_ops = {
28646+static const struct ata_port_operations qdi6500_port_ops = {
28647 .inherits = &legacy_base_port_ops,
28648 .set_piomode = qdi6500_set_piomode,
28649 .qc_issue = qdi_qc_issue,
28650 .sff_data_xfer = vlb32_data_xfer,
28651 };
28652
28653-static struct ata_port_operations qdi6580_port_ops = {
28654+static const struct ata_port_operations qdi6580_port_ops = {
28655 .inherits = &legacy_base_port_ops,
28656 .set_piomode = qdi6580_set_piomode,
28657 .sff_data_xfer = vlb32_data_xfer,
28658 };
28659
28660-static struct ata_port_operations qdi6580dp_port_ops = {
28661+static const struct ata_port_operations qdi6580dp_port_ops = {
28662 .inherits = &legacy_base_port_ops,
28663 .set_piomode = qdi6580dp_set_piomode,
28664 .sff_data_xfer = vlb32_data_xfer,
28665@@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28666 return 0;
28667 }
28668
28669-static struct ata_port_operations winbond_port_ops = {
28670+static const struct ata_port_operations winbond_port_ops = {
28671 .inherits = &legacy_base_port_ops,
28672 .set_piomode = winbond_set_piomode,
28673 .sff_data_xfer = vlb32_data_xfer,
28674@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28675 int pio_modes = controller->pio_mask;
28676 unsigned long io = probe->port;
28677 u32 mask = (1 << probe->slot);
28678- struct ata_port_operations *ops = controller->ops;
28679+ const struct ata_port_operations *ops = controller->ops;
28680 struct legacy_data *ld = &legacy_data[probe->slot];
28681 struct ata_host *host = NULL;
28682 struct ata_port *ap;
28683diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28684index 2096fb7..4d090fc 100644
28685--- a/drivers/ata/pata_marvell.c
28686+++ b/drivers/ata/pata_marvell.c
28687@@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28688 ATA_BMDMA_SHT(DRV_NAME),
28689 };
28690
28691-static struct ata_port_operations marvell_ops = {
28692+static const struct ata_port_operations marvell_ops = {
28693 .inherits = &ata_bmdma_port_ops,
28694 .cable_detect = marvell_cable_detect,
28695 .prereset = marvell_pre_reset,
28696diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28697index 99d41be..7d56aa8 100644
28698--- a/drivers/ata/pata_mpc52xx.c
28699+++ b/drivers/ata/pata_mpc52xx.c
28700@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28701 ATA_PIO_SHT(DRV_NAME),
28702 };
28703
28704-static struct ata_port_operations mpc52xx_ata_port_ops = {
28705+static const struct ata_port_operations mpc52xx_ata_port_ops = {
28706 .inherits = &ata_bmdma_port_ops,
28707 .sff_dev_select = mpc52xx_ata_dev_select,
28708 .set_piomode = mpc52xx_ata_set_piomode,
28709diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28710index b21f002..0a27e7f 100644
28711--- a/drivers/ata/pata_mpiix.c
28712+++ b/drivers/ata/pata_mpiix.c
28713@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28714 ATA_PIO_SHT(DRV_NAME),
28715 };
28716
28717-static struct ata_port_operations mpiix_port_ops = {
28718+static const struct ata_port_operations mpiix_port_ops = {
28719 .inherits = &ata_sff_port_ops,
28720 .qc_issue = mpiix_qc_issue,
28721 .cable_detect = ata_cable_40wire,
28722diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28723index f0d52f7..89c3be3 100644
28724--- a/drivers/ata/pata_netcell.c
28725+++ b/drivers/ata/pata_netcell.c
28726@@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28727 ATA_BMDMA_SHT(DRV_NAME),
28728 };
28729
28730-static struct ata_port_operations netcell_ops = {
28731+static const struct ata_port_operations netcell_ops = {
28732 .inherits = &ata_bmdma_port_ops,
28733 .cable_detect = ata_cable_80wire,
28734 .read_id = netcell_read_id,
28735diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28736index dd53a66..a3f4317 100644
28737--- a/drivers/ata/pata_ninja32.c
28738+++ b/drivers/ata/pata_ninja32.c
28739@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28740 ATA_BMDMA_SHT(DRV_NAME),
28741 };
28742
28743-static struct ata_port_operations ninja32_port_ops = {
28744+static const struct ata_port_operations ninja32_port_ops = {
28745 .inherits = &ata_bmdma_port_ops,
28746 .sff_dev_select = ninja32_dev_select,
28747 .cable_detect = ata_cable_40wire,
28748diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28749index ca53fac..9aa93ef 100644
28750--- a/drivers/ata/pata_ns87410.c
28751+++ b/drivers/ata/pata_ns87410.c
28752@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28753 ATA_PIO_SHT(DRV_NAME),
28754 };
28755
28756-static struct ata_port_operations ns87410_port_ops = {
28757+static const struct ata_port_operations ns87410_port_ops = {
28758 .inherits = &ata_sff_port_ops,
28759 .qc_issue = ns87410_qc_issue,
28760 .cable_detect = ata_cable_40wire,
28761diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28762index 773b159..55f454e 100644
28763--- a/drivers/ata/pata_ns87415.c
28764+++ b/drivers/ata/pata_ns87415.c
28765@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28766 }
28767 #endif /* 87560 SuperIO Support */
28768
28769-static struct ata_port_operations ns87415_pata_ops = {
28770+static const struct ata_port_operations ns87415_pata_ops = {
28771 .inherits = &ata_bmdma_port_ops,
28772
28773 .check_atapi_dma = ns87415_check_atapi_dma,
28774@@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28775 };
28776
28777 #if defined(CONFIG_SUPERIO)
28778-static struct ata_port_operations ns87560_pata_ops = {
28779+static const struct ata_port_operations ns87560_pata_ops = {
28780 .inherits = &ns87415_pata_ops,
28781 .sff_tf_read = ns87560_tf_read,
28782 .sff_check_status = ns87560_check_status,
28783diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28784index d6f6956..639295b 100644
28785--- a/drivers/ata/pata_octeon_cf.c
28786+++ b/drivers/ata/pata_octeon_cf.c
28787@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28788 return 0;
28789 }
28790
28791+/* cannot be const */
28792 static struct ata_port_operations octeon_cf_ops = {
28793 .inherits = &ata_sff_port_ops,
28794 .check_atapi_dma = octeon_cf_check_atapi_dma,
28795diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28796index 84ac503..adee1cd 100644
28797--- a/drivers/ata/pata_oldpiix.c
28798+++ b/drivers/ata/pata_oldpiix.c
28799@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28800 ATA_BMDMA_SHT(DRV_NAME),
28801 };
28802
28803-static struct ata_port_operations oldpiix_pata_ops = {
28804+static const struct ata_port_operations oldpiix_pata_ops = {
28805 .inherits = &ata_bmdma_port_ops,
28806 .qc_issue = oldpiix_qc_issue,
28807 .cable_detect = ata_cable_40wire,
28808diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28809index 99eddda..3a4c0aa 100644
28810--- a/drivers/ata/pata_opti.c
28811+++ b/drivers/ata/pata_opti.c
28812@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28813 ATA_PIO_SHT(DRV_NAME),
28814 };
28815
28816-static struct ata_port_operations opti_port_ops = {
28817+static const struct ata_port_operations opti_port_ops = {
28818 .inherits = &ata_sff_port_ops,
28819 .cable_detect = ata_cable_40wire,
28820 .set_piomode = opti_set_piomode,
28821diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28822index 86885a4..8e9968d 100644
28823--- a/drivers/ata/pata_optidma.c
28824+++ b/drivers/ata/pata_optidma.c
28825@@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28826 ATA_BMDMA_SHT(DRV_NAME),
28827 };
28828
28829-static struct ata_port_operations optidma_port_ops = {
28830+static const struct ata_port_operations optidma_port_ops = {
28831 .inherits = &ata_bmdma_port_ops,
28832 .cable_detect = ata_cable_40wire,
28833 .set_piomode = optidma_set_pio_mode,
28834@@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28835 .prereset = optidma_pre_reset,
28836 };
28837
28838-static struct ata_port_operations optiplus_port_ops = {
28839+static const struct ata_port_operations optiplus_port_ops = {
28840 .inherits = &optidma_port_ops,
28841 .set_piomode = optiplus_set_pio_mode,
28842 .set_dmamode = optiplus_set_dma_mode,
28843diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28844index 11fb4cc..1a14022 100644
28845--- a/drivers/ata/pata_palmld.c
28846+++ b/drivers/ata/pata_palmld.c
28847@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28848 ATA_PIO_SHT(DRV_NAME),
28849 };
28850
28851-static struct ata_port_operations palmld_port_ops = {
28852+static const struct ata_port_operations palmld_port_ops = {
28853 .inherits = &ata_sff_port_ops,
28854 .sff_data_xfer = ata_sff_data_xfer_noirq,
28855 .cable_detect = ata_cable_40wire,
28856diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28857index dc99e26..7f4b1e4 100644
28858--- a/drivers/ata/pata_pcmcia.c
28859+++ b/drivers/ata/pata_pcmcia.c
28860@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28861 ATA_PIO_SHT(DRV_NAME),
28862 };
28863
28864-static struct ata_port_operations pcmcia_port_ops = {
28865+static const struct ata_port_operations pcmcia_port_ops = {
28866 .inherits = &ata_sff_port_ops,
28867 .sff_data_xfer = ata_sff_data_xfer_noirq,
28868 .cable_detect = ata_cable_40wire,
28869 .set_mode = pcmcia_set_mode,
28870 };
28871
28872-static struct ata_port_operations pcmcia_8bit_port_ops = {
28873+static const struct ata_port_operations pcmcia_8bit_port_ops = {
28874 .inherits = &ata_sff_port_ops,
28875 .sff_data_xfer = ata_data_xfer_8bit,
28876 .cable_detect = ata_cable_40wire,
28877@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28878 unsigned long io_base, ctl_base;
28879 void __iomem *io_addr, *ctl_addr;
28880 int n_ports = 1;
28881- struct ata_port_operations *ops = &pcmcia_port_ops;
28882+ const struct ata_port_operations *ops = &pcmcia_port_ops;
28883
28884 info = kzalloc(sizeof(*info), GFP_KERNEL);
28885 if (info == NULL)
28886diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28887index ca5cad0..3a1f125 100644
28888--- a/drivers/ata/pata_pdc2027x.c
28889+++ b/drivers/ata/pata_pdc2027x.c
28890@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28891 ATA_BMDMA_SHT(DRV_NAME),
28892 };
28893
28894-static struct ata_port_operations pdc2027x_pata100_ops = {
28895+static const struct ata_port_operations pdc2027x_pata100_ops = {
28896 .inherits = &ata_bmdma_port_ops,
28897 .check_atapi_dma = pdc2027x_check_atapi_dma,
28898 .cable_detect = pdc2027x_cable_detect,
28899 .prereset = pdc2027x_prereset,
28900 };
28901
28902-static struct ata_port_operations pdc2027x_pata133_ops = {
28903+static const struct ata_port_operations pdc2027x_pata133_ops = {
28904 .inherits = &pdc2027x_pata100_ops,
28905 .mode_filter = pdc2027x_mode_filter,
28906 .set_piomode = pdc2027x_set_piomode,
28907diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28908index 2911120..4bf62aa 100644
28909--- a/drivers/ata/pata_pdc202xx_old.c
28910+++ b/drivers/ata/pata_pdc202xx_old.c
28911@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28912 ATA_BMDMA_SHT(DRV_NAME),
28913 };
28914
28915-static struct ata_port_operations pdc2024x_port_ops = {
28916+static const struct ata_port_operations pdc2024x_port_ops = {
28917 .inherits = &ata_bmdma_port_ops,
28918
28919 .cable_detect = ata_cable_40wire,
28920@@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28921 .sff_exec_command = pdc202xx_exec_command,
28922 };
28923
28924-static struct ata_port_operations pdc2026x_port_ops = {
28925+static const struct ata_port_operations pdc2026x_port_ops = {
28926 .inherits = &pdc2024x_port_ops,
28927
28928 .check_atapi_dma = pdc2026x_check_atapi_dma,
28929diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28930index 3f6ebc6..a18c358 100644
28931--- a/drivers/ata/pata_platform.c
28932+++ b/drivers/ata/pata_platform.c
28933@@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28934 ATA_PIO_SHT(DRV_NAME),
28935 };
28936
28937-static struct ata_port_operations pata_platform_port_ops = {
28938+static const struct ata_port_operations pata_platform_port_ops = {
28939 .inherits = &ata_sff_port_ops,
28940 .sff_data_xfer = ata_sff_data_xfer_noirq,
28941 .cable_detect = ata_cable_unknown,
28942diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28943index 45879dc..165a9f9 100644
28944--- a/drivers/ata/pata_qdi.c
28945+++ b/drivers/ata/pata_qdi.c
28946@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28947 ATA_PIO_SHT(DRV_NAME),
28948 };
28949
28950-static struct ata_port_operations qdi6500_port_ops = {
28951+static const struct ata_port_operations qdi6500_port_ops = {
28952 .inherits = &ata_sff_port_ops,
28953 .qc_issue = qdi_qc_issue,
28954 .sff_data_xfer = qdi_data_xfer,
28955@@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28956 .set_piomode = qdi6500_set_piomode,
28957 };
28958
28959-static struct ata_port_operations qdi6580_port_ops = {
28960+static const struct ata_port_operations qdi6580_port_ops = {
28961 .inherits = &qdi6500_port_ops,
28962 .set_piomode = qdi6580_set_piomode,
28963 };
28964diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
28965index 4401b33..716c5cc 100644
28966--- a/drivers/ata/pata_radisys.c
28967+++ b/drivers/ata/pata_radisys.c
28968@@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
28969 ATA_BMDMA_SHT(DRV_NAME),
28970 };
28971
28972-static struct ata_port_operations radisys_pata_ops = {
28973+static const struct ata_port_operations radisys_pata_ops = {
28974 .inherits = &ata_bmdma_port_ops,
28975 .qc_issue = radisys_qc_issue,
28976 .cable_detect = ata_cable_unknown,
28977diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
28978index 45f1e10..fab6bca 100644
28979--- a/drivers/ata/pata_rb532_cf.c
28980+++ b/drivers/ata/pata_rb532_cf.c
28981@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
28982 return IRQ_HANDLED;
28983 }
28984
28985-static struct ata_port_operations rb532_pata_port_ops = {
28986+static const struct ata_port_operations rb532_pata_port_ops = {
28987 .inherits = &ata_sff_port_ops,
28988 .sff_data_xfer = ata_sff_data_xfer32,
28989 };
28990diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
28991index c843a1e..b5853c3 100644
28992--- a/drivers/ata/pata_rdc.c
28993+++ b/drivers/ata/pata_rdc.c
28994@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
28995 pci_write_config_byte(dev, 0x48, udma_enable);
28996 }
28997
28998-static struct ata_port_operations rdc_pata_ops = {
28999+static const struct ata_port_operations rdc_pata_ops = {
29000 .inherits = &ata_bmdma32_port_ops,
29001 .cable_detect = rdc_pata_cable_detect,
29002 .set_piomode = rdc_set_piomode,
29003diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
29004index a5e4dfe..080c8c9 100644
29005--- a/drivers/ata/pata_rz1000.c
29006+++ b/drivers/ata/pata_rz1000.c
29007@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
29008 ATA_PIO_SHT(DRV_NAME),
29009 };
29010
29011-static struct ata_port_operations rz1000_port_ops = {
29012+static const struct ata_port_operations rz1000_port_ops = {
29013 .inherits = &ata_sff_port_ops,
29014 .cable_detect = ata_cable_40wire,
29015 .set_mode = rz1000_set_mode,
29016diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
29017index 3bbed83..e309daf 100644
29018--- a/drivers/ata/pata_sc1200.c
29019+++ b/drivers/ata/pata_sc1200.c
29020@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
29021 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29022 };
29023
29024-static struct ata_port_operations sc1200_port_ops = {
29025+static const struct ata_port_operations sc1200_port_ops = {
29026 .inherits = &ata_bmdma_port_ops,
29027 .qc_prep = ata_sff_dumb_qc_prep,
29028 .qc_issue = sc1200_qc_issue,
29029diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
29030index 4257d6b..4c1d9d5 100644
29031--- a/drivers/ata/pata_scc.c
29032+++ b/drivers/ata/pata_scc.c
29033@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
29034 ATA_BMDMA_SHT(DRV_NAME),
29035 };
29036
29037-static struct ata_port_operations scc_pata_ops = {
29038+static const struct ata_port_operations scc_pata_ops = {
29039 .inherits = &ata_bmdma_port_ops,
29040
29041 .set_piomode = scc_set_piomode,
29042diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
29043index 99cceb4..e2e0a87 100644
29044--- a/drivers/ata/pata_sch.c
29045+++ b/drivers/ata/pata_sch.c
29046@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
29047 ATA_BMDMA_SHT(DRV_NAME),
29048 };
29049
29050-static struct ata_port_operations sch_pata_ops = {
29051+static const struct ata_port_operations sch_pata_ops = {
29052 .inherits = &ata_bmdma_port_ops,
29053 .cable_detect = ata_cable_unknown,
29054 .set_piomode = sch_set_piomode,
29055diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
29056index beaed12..39969f1 100644
29057--- a/drivers/ata/pata_serverworks.c
29058+++ b/drivers/ata/pata_serverworks.c
29059@@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
29060 ATA_BMDMA_SHT(DRV_NAME),
29061 };
29062
29063-static struct ata_port_operations serverworks_osb4_port_ops = {
29064+static const struct ata_port_operations serverworks_osb4_port_ops = {
29065 .inherits = &ata_bmdma_port_ops,
29066 .cable_detect = serverworks_cable_detect,
29067 .mode_filter = serverworks_osb4_filter,
29068@@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
29069 .set_dmamode = serverworks_set_dmamode,
29070 };
29071
29072-static struct ata_port_operations serverworks_csb_port_ops = {
29073+static const struct ata_port_operations serverworks_csb_port_ops = {
29074 .inherits = &serverworks_osb4_port_ops,
29075 .mode_filter = serverworks_csb_filter,
29076 };
29077diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
29078index a2ace48..0463b44 100644
29079--- a/drivers/ata/pata_sil680.c
29080+++ b/drivers/ata/pata_sil680.c
29081@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
29082 ATA_BMDMA_SHT(DRV_NAME),
29083 };
29084
29085-static struct ata_port_operations sil680_port_ops = {
29086+static const struct ata_port_operations sil680_port_ops = {
29087 .inherits = &ata_bmdma32_port_ops,
29088 .cable_detect = sil680_cable_detect,
29089 .set_piomode = sil680_set_piomode,
29090diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
29091index 488e77b..b3724d5 100644
29092--- a/drivers/ata/pata_sis.c
29093+++ b/drivers/ata/pata_sis.c
29094@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
29095 ATA_BMDMA_SHT(DRV_NAME),
29096 };
29097
29098-static struct ata_port_operations sis_133_for_sata_ops = {
29099+static const struct ata_port_operations sis_133_for_sata_ops = {
29100 .inherits = &ata_bmdma_port_ops,
29101 .set_piomode = sis_133_set_piomode,
29102 .set_dmamode = sis_133_set_dmamode,
29103 .cable_detect = sis_133_cable_detect,
29104 };
29105
29106-static struct ata_port_operations sis_base_ops = {
29107+static const struct ata_port_operations sis_base_ops = {
29108 .inherits = &ata_bmdma_port_ops,
29109 .prereset = sis_pre_reset,
29110 };
29111
29112-static struct ata_port_operations sis_133_ops = {
29113+static const struct ata_port_operations sis_133_ops = {
29114 .inherits = &sis_base_ops,
29115 .set_piomode = sis_133_set_piomode,
29116 .set_dmamode = sis_133_set_dmamode,
29117 .cable_detect = sis_133_cable_detect,
29118 };
29119
29120-static struct ata_port_operations sis_133_early_ops = {
29121+static const struct ata_port_operations sis_133_early_ops = {
29122 .inherits = &sis_base_ops,
29123 .set_piomode = sis_100_set_piomode,
29124 .set_dmamode = sis_133_early_set_dmamode,
29125 .cable_detect = sis_66_cable_detect,
29126 };
29127
29128-static struct ata_port_operations sis_100_ops = {
29129+static const struct ata_port_operations sis_100_ops = {
29130 .inherits = &sis_base_ops,
29131 .set_piomode = sis_100_set_piomode,
29132 .set_dmamode = sis_100_set_dmamode,
29133 .cable_detect = sis_66_cable_detect,
29134 };
29135
29136-static struct ata_port_operations sis_66_ops = {
29137+static const struct ata_port_operations sis_66_ops = {
29138 .inherits = &sis_base_ops,
29139 .set_piomode = sis_old_set_piomode,
29140 .set_dmamode = sis_66_set_dmamode,
29141 .cable_detect = sis_66_cable_detect,
29142 };
29143
29144-static struct ata_port_operations sis_old_ops = {
29145+static const struct ata_port_operations sis_old_ops = {
29146 .inherits = &sis_base_ops,
29147 .set_piomode = sis_old_set_piomode,
29148 .set_dmamode = sis_old_set_dmamode,
29149diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
29150index 29f733c..43e9ca0 100644
29151--- a/drivers/ata/pata_sl82c105.c
29152+++ b/drivers/ata/pata_sl82c105.c
29153@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
29154 ATA_BMDMA_SHT(DRV_NAME),
29155 };
29156
29157-static struct ata_port_operations sl82c105_port_ops = {
29158+static const struct ata_port_operations sl82c105_port_ops = {
29159 .inherits = &ata_bmdma_port_ops,
29160 .qc_defer = sl82c105_qc_defer,
29161 .bmdma_start = sl82c105_bmdma_start,
29162diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
29163index f1f13ff..df39e99 100644
29164--- a/drivers/ata/pata_triflex.c
29165+++ b/drivers/ata/pata_triflex.c
29166@@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
29167 ATA_BMDMA_SHT(DRV_NAME),
29168 };
29169
29170-static struct ata_port_operations triflex_port_ops = {
29171+static const struct ata_port_operations triflex_port_ops = {
29172 .inherits = &ata_bmdma_port_ops,
29173 .bmdma_start = triflex_bmdma_start,
29174 .bmdma_stop = triflex_bmdma_stop,
29175diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
29176index 1d73b8d..98a4b29 100644
29177--- a/drivers/ata/pata_via.c
29178+++ b/drivers/ata/pata_via.c
29179@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
29180 ATA_BMDMA_SHT(DRV_NAME),
29181 };
29182
29183-static struct ata_port_operations via_port_ops = {
29184+static const struct ata_port_operations via_port_ops = {
29185 .inherits = &ata_bmdma_port_ops,
29186 .cable_detect = via_cable_detect,
29187 .set_piomode = via_set_piomode,
29188@@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
29189 .port_start = via_port_start,
29190 };
29191
29192-static struct ata_port_operations via_port_ops_noirq = {
29193+static const struct ata_port_operations via_port_ops_noirq = {
29194 .inherits = &via_port_ops,
29195 .sff_data_xfer = ata_sff_data_xfer_noirq,
29196 };
29197diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
29198index 6d8619b..ad511c4 100644
29199--- a/drivers/ata/pata_winbond.c
29200+++ b/drivers/ata/pata_winbond.c
29201@@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
29202 ATA_PIO_SHT(DRV_NAME),
29203 };
29204
29205-static struct ata_port_operations winbond_port_ops = {
29206+static const struct ata_port_operations winbond_port_ops = {
29207 .inherits = &ata_sff_port_ops,
29208 .sff_data_xfer = winbond_data_xfer,
29209 .cable_detect = ata_cable_40wire,
29210diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
29211index 6c65b07..f996ec7 100644
29212--- a/drivers/ata/pdc_adma.c
29213+++ b/drivers/ata/pdc_adma.c
29214@@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
29215 .dma_boundary = ADMA_DMA_BOUNDARY,
29216 };
29217
29218-static struct ata_port_operations adma_ata_ops = {
29219+static const struct ata_port_operations adma_ata_ops = {
29220 .inherits = &ata_sff_port_ops,
29221
29222 .lost_interrupt = ATA_OP_NULL,
29223diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
29224index 172b57e..c49bc1e 100644
29225--- a/drivers/ata/sata_fsl.c
29226+++ b/drivers/ata/sata_fsl.c
29227@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
29228 .dma_boundary = ATA_DMA_BOUNDARY,
29229 };
29230
29231-static struct ata_port_operations sata_fsl_ops = {
29232+static const struct ata_port_operations sata_fsl_ops = {
29233 .inherits = &sata_pmp_port_ops,
29234
29235 .qc_defer = ata_std_qc_defer,
29236diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
29237index 4406902..60603ef 100644
29238--- a/drivers/ata/sata_inic162x.c
29239+++ b/drivers/ata/sata_inic162x.c
29240@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
29241 return 0;
29242 }
29243
29244-static struct ata_port_operations inic_port_ops = {
29245+static const struct ata_port_operations inic_port_ops = {
29246 .inherits = &sata_port_ops,
29247
29248 .check_atapi_dma = inic_check_atapi_dma,
29249diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
29250index cf41126..8107be6 100644
29251--- a/drivers/ata/sata_mv.c
29252+++ b/drivers/ata/sata_mv.c
29253@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
29254 .dma_boundary = MV_DMA_BOUNDARY,
29255 };
29256
29257-static struct ata_port_operations mv5_ops = {
29258+static const struct ata_port_operations mv5_ops = {
29259 .inherits = &ata_sff_port_ops,
29260
29261 .lost_interrupt = ATA_OP_NULL,
29262@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
29263 .port_stop = mv_port_stop,
29264 };
29265
29266-static struct ata_port_operations mv6_ops = {
29267+static const struct ata_port_operations mv6_ops = {
29268 .inherits = &mv5_ops,
29269 .dev_config = mv6_dev_config,
29270 .scr_read = mv_scr_read,
29271@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
29272 .bmdma_status = mv_bmdma_status,
29273 };
29274
29275-static struct ata_port_operations mv_iie_ops = {
29276+static const struct ata_port_operations mv_iie_ops = {
29277 .inherits = &mv6_ops,
29278 .dev_config = ATA_OP_NULL,
29279 .qc_prep = mv_qc_prep_iie,
29280diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
29281index ae2297c..d5c9c33 100644
29282--- a/drivers/ata/sata_nv.c
29283+++ b/drivers/ata/sata_nv.c
29284@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
29285 * cases. Define nv_hardreset() which only kicks in for post-boot
29286 * probing and use it for all variants.
29287 */
29288-static struct ata_port_operations nv_generic_ops = {
29289+static const struct ata_port_operations nv_generic_ops = {
29290 .inherits = &ata_bmdma_port_ops,
29291 .lost_interrupt = ATA_OP_NULL,
29292 .scr_read = nv_scr_read,
29293@@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
29294 .hardreset = nv_hardreset,
29295 };
29296
29297-static struct ata_port_operations nv_nf2_ops = {
29298+static const struct ata_port_operations nv_nf2_ops = {
29299 .inherits = &nv_generic_ops,
29300 .freeze = nv_nf2_freeze,
29301 .thaw = nv_nf2_thaw,
29302 };
29303
29304-static struct ata_port_operations nv_ck804_ops = {
29305+static const struct ata_port_operations nv_ck804_ops = {
29306 .inherits = &nv_generic_ops,
29307 .freeze = nv_ck804_freeze,
29308 .thaw = nv_ck804_thaw,
29309 .host_stop = nv_ck804_host_stop,
29310 };
29311
29312-static struct ata_port_operations nv_adma_ops = {
29313+static const struct ata_port_operations nv_adma_ops = {
29314 .inherits = &nv_ck804_ops,
29315
29316 .check_atapi_dma = nv_adma_check_atapi_dma,
29317@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
29318 .host_stop = nv_adma_host_stop,
29319 };
29320
29321-static struct ata_port_operations nv_swncq_ops = {
29322+static const struct ata_port_operations nv_swncq_ops = {
29323 .inherits = &nv_generic_ops,
29324
29325 .qc_defer = ata_std_qc_defer,
29326diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29327index 07d8d00..6cc70bb 100644
29328--- a/drivers/ata/sata_promise.c
29329+++ b/drivers/ata/sata_promise.c
29330@@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29331 .error_handler = pdc_error_handler,
29332 };
29333
29334-static struct ata_port_operations pdc_sata_ops = {
29335+static const struct ata_port_operations pdc_sata_ops = {
29336 .inherits = &pdc_common_ops,
29337 .cable_detect = pdc_sata_cable_detect,
29338 .freeze = pdc_sata_freeze,
29339@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29340
29341 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29342 and ->freeze/thaw that ignore the hotplug controls. */
29343-static struct ata_port_operations pdc_old_sata_ops = {
29344+static const struct ata_port_operations pdc_old_sata_ops = {
29345 .inherits = &pdc_sata_ops,
29346 .freeze = pdc_freeze,
29347 .thaw = pdc_thaw,
29348 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29349 };
29350
29351-static struct ata_port_operations pdc_pata_ops = {
29352+static const struct ata_port_operations pdc_pata_ops = {
29353 .inherits = &pdc_common_ops,
29354 .cable_detect = pdc_pata_cable_detect,
29355 .freeze = pdc_freeze,
29356diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29357index 326c0cf..36ecebe 100644
29358--- a/drivers/ata/sata_qstor.c
29359+++ b/drivers/ata/sata_qstor.c
29360@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29361 .dma_boundary = QS_DMA_BOUNDARY,
29362 };
29363
29364-static struct ata_port_operations qs_ata_ops = {
29365+static const struct ata_port_operations qs_ata_ops = {
29366 .inherits = &ata_sff_port_ops,
29367
29368 .check_atapi_dma = qs_check_atapi_dma,
29369diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29370index 3cb69d5..0871d3c 100644
29371--- a/drivers/ata/sata_sil.c
29372+++ b/drivers/ata/sata_sil.c
29373@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29374 .sg_tablesize = ATA_MAX_PRD
29375 };
29376
29377-static struct ata_port_operations sil_ops = {
29378+static const struct ata_port_operations sil_ops = {
29379 .inherits = &ata_bmdma32_port_ops,
29380 .dev_config = sil_dev_config,
29381 .set_mode = sil_set_mode,
29382diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29383index e6946fc..eddb794 100644
29384--- a/drivers/ata/sata_sil24.c
29385+++ b/drivers/ata/sata_sil24.c
29386@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29387 .dma_boundary = ATA_DMA_BOUNDARY,
29388 };
29389
29390-static struct ata_port_operations sil24_ops = {
29391+static const struct ata_port_operations sil24_ops = {
29392 .inherits = &sata_pmp_port_ops,
29393
29394 .qc_defer = sil24_qc_defer,
29395diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29396index f8a91bf..9cb06b6 100644
29397--- a/drivers/ata/sata_sis.c
29398+++ b/drivers/ata/sata_sis.c
29399@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29400 ATA_BMDMA_SHT(DRV_NAME),
29401 };
29402
29403-static struct ata_port_operations sis_ops = {
29404+static const struct ata_port_operations sis_ops = {
29405 .inherits = &ata_bmdma_port_ops,
29406 .scr_read = sis_scr_read,
29407 .scr_write = sis_scr_write,
29408diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29409index 7257f2d..d04c6f5 100644
29410--- a/drivers/ata/sata_svw.c
29411+++ b/drivers/ata/sata_svw.c
29412@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29413 };
29414
29415
29416-static struct ata_port_operations k2_sata_ops = {
29417+static const struct ata_port_operations k2_sata_ops = {
29418 .inherits = &ata_bmdma_port_ops,
29419 .sff_tf_load = k2_sata_tf_load,
29420 .sff_tf_read = k2_sata_tf_read,
29421diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29422index bbcf970..cd0df0d 100644
29423--- a/drivers/ata/sata_sx4.c
29424+++ b/drivers/ata/sata_sx4.c
29425@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29426 };
29427
29428 /* TODO: inherit from base port_ops after converting to new EH */
29429-static struct ata_port_operations pdc_20621_ops = {
29430+static const struct ata_port_operations pdc_20621_ops = {
29431 .inherits = &ata_sff_port_ops,
29432
29433 .check_atapi_dma = pdc_check_atapi_dma,
29434diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29435index e5bff47..089d859 100644
29436--- a/drivers/ata/sata_uli.c
29437+++ b/drivers/ata/sata_uli.c
29438@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29439 ATA_BMDMA_SHT(DRV_NAME),
29440 };
29441
29442-static struct ata_port_operations uli_ops = {
29443+static const struct ata_port_operations uli_ops = {
29444 .inherits = &ata_bmdma_port_ops,
29445 .scr_read = uli_scr_read,
29446 .scr_write = uli_scr_write,
29447diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29448index f5dcca7..77b94eb 100644
29449--- a/drivers/ata/sata_via.c
29450+++ b/drivers/ata/sata_via.c
29451@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29452 ATA_BMDMA_SHT(DRV_NAME),
29453 };
29454
29455-static struct ata_port_operations svia_base_ops = {
29456+static const struct ata_port_operations svia_base_ops = {
29457 .inherits = &ata_bmdma_port_ops,
29458 .sff_tf_load = svia_tf_load,
29459 };
29460
29461-static struct ata_port_operations vt6420_sata_ops = {
29462+static const struct ata_port_operations vt6420_sata_ops = {
29463 .inherits = &svia_base_ops,
29464 .freeze = svia_noop_freeze,
29465 .prereset = vt6420_prereset,
29466 .bmdma_start = vt6420_bmdma_start,
29467 };
29468
29469-static struct ata_port_operations vt6421_pata_ops = {
29470+static const struct ata_port_operations vt6421_pata_ops = {
29471 .inherits = &svia_base_ops,
29472 .cable_detect = vt6421_pata_cable_detect,
29473 .set_piomode = vt6421_set_pio_mode,
29474 .set_dmamode = vt6421_set_dma_mode,
29475 };
29476
29477-static struct ata_port_operations vt6421_sata_ops = {
29478+static const struct ata_port_operations vt6421_sata_ops = {
29479 .inherits = &svia_base_ops,
29480 .scr_read = svia_scr_read,
29481 .scr_write = svia_scr_write,
29482 };
29483
29484-static struct ata_port_operations vt8251_ops = {
29485+static const struct ata_port_operations vt8251_ops = {
29486 .inherits = &svia_base_ops,
29487 .hardreset = sata_std_hardreset,
29488 .scr_read = vt8251_scr_read,
29489diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29490index 8b2a278..51e65d3 100644
29491--- a/drivers/ata/sata_vsc.c
29492+++ b/drivers/ata/sata_vsc.c
29493@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29494 };
29495
29496
29497-static struct ata_port_operations vsc_sata_ops = {
29498+static const struct ata_port_operations vsc_sata_ops = {
29499 .inherits = &ata_bmdma_port_ops,
29500 /* The IRQ handling is not quite standard SFF behaviour so we
29501 cannot use the default lost interrupt handler */
29502diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29503index 5effec6..7e4019a 100644
29504--- a/drivers/atm/adummy.c
29505+++ b/drivers/atm/adummy.c
29506@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29507 vcc->pop(vcc, skb);
29508 else
29509 dev_kfree_skb_any(skb);
29510- atomic_inc(&vcc->stats->tx);
29511+ atomic_inc_unchecked(&vcc->stats->tx);
29512
29513 return 0;
29514 }
29515diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29516index 66e1813..26a27c6 100644
29517--- a/drivers/atm/ambassador.c
29518+++ b/drivers/atm/ambassador.c
29519@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29520 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29521
29522 // VC layer stats
29523- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29524+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29525
29526 // free the descriptor
29527 kfree (tx_descr);
29528@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29529 dump_skb ("<<<", vc, skb);
29530
29531 // VC layer stats
29532- atomic_inc(&atm_vcc->stats->rx);
29533+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29534 __net_timestamp(skb);
29535 // end of our responsability
29536 atm_vcc->push (atm_vcc, skb);
29537@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29538 } else {
29539 PRINTK (KERN_INFO, "dropped over-size frame");
29540 // should we count this?
29541- atomic_inc(&atm_vcc->stats->rx_drop);
29542+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29543 }
29544
29545 } else {
29546@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29547 }
29548
29549 if (check_area (skb->data, skb->len)) {
29550- atomic_inc(&atm_vcc->stats->tx_err);
29551+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29552 return -ENOMEM; // ?
29553 }
29554
29555diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29556index 02ad83d..6daffeb 100644
29557--- a/drivers/atm/atmtcp.c
29558+++ b/drivers/atm/atmtcp.c
29559@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29560 if (vcc->pop) vcc->pop(vcc,skb);
29561 else dev_kfree_skb(skb);
29562 if (dev_data) return 0;
29563- atomic_inc(&vcc->stats->tx_err);
29564+ atomic_inc_unchecked(&vcc->stats->tx_err);
29565 return -ENOLINK;
29566 }
29567 size = skb->len+sizeof(struct atmtcp_hdr);
29568@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29569 if (!new_skb) {
29570 if (vcc->pop) vcc->pop(vcc,skb);
29571 else dev_kfree_skb(skb);
29572- atomic_inc(&vcc->stats->tx_err);
29573+ atomic_inc_unchecked(&vcc->stats->tx_err);
29574 return -ENOBUFS;
29575 }
29576 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29577@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29578 if (vcc->pop) vcc->pop(vcc,skb);
29579 else dev_kfree_skb(skb);
29580 out_vcc->push(out_vcc,new_skb);
29581- atomic_inc(&vcc->stats->tx);
29582- atomic_inc(&out_vcc->stats->rx);
29583+ atomic_inc_unchecked(&vcc->stats->tx);
29584+ atomic_inc_unchecked(&out_vcc->stats->rx);
29585 return 0;
29586 }
29587
29588@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29589 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29590 read_unlock(&vcc_sklist_lock);
29591 if (!out_vcc) {
29592- atomic_inc(&vcc->stats->tx_err);
29593+ atomic_inc_unchecked(&vcc->stats->tx_err);
29594 goto done;
29595 }
29596 skb_pull(skb,sizeof(struct atmtcp_hdr));
29597@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29598 __net_timestamp(new_skb);
29599 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29600 out_vcc->push(out_vcc,new_skb);
29601- atomic_inc(&vcc->stats->tx);
29602- atomic_inc(&out_vcc->stats->rx);
29603+ atomic_inc_unchecked(&vcc->stats->tx);
29604+ atomic_inc_unchecked(&out_vcc->stats->rx);
29605 done:
29606 if (vcc->pop) vcc->pop(vcc,skb);
29607 else dev_kfree_skb(skb);
29608diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29609index 0c30261..3da356e 100644
29610--- a/drivers/atm/eni.c
29611+++ b/drivers/atm/eni.c
29612@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29613 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29614 vcc->dev->number);
29615 length = 0;
29616- atomic_inc(&vcc->stats->rx_err);
29617+ atomic_inc_unchecked(&vcc->stats->rx_err);
29618 }
29619 else {
29620 length = ATM_CELL_SIZE-1; /* no HEC */
29621@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29622 size);
29623 }
29624 eff = length = 0;
29625- atomic_inc(&vcc->stats->rx_err);
29626+ atomic_inc_unchecked(&vcc->stats->rx_err);
29627 }
29628 else {
29629 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29630@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29631 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29632 vcc->dev->number,vcc->vci,length,size << 2,descr);
29633 length = eff = 0;
29634- atomic_inc(&vcc->stats->rx_err);
29635+ atomic_inc_unchecked(&vcc->stats->rx_err);
29636 }
29637 }
29638 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29639@@ -770,7 +770,7 @@ rx_dequeued++;
29640 vcc->push(vcc,skb);
29641 pushed++;
29642 }
29643- atomic_inc(&vcc->stats->rx);
29644+ atomic_inc_unchecked(&vcc->stats->rx);
29645 }
29646 wake_up(&eni_dev->rx_wait);
29647 }
29648@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29649 PCI_DMA_TODEVICE);
29650 if (vcc->pop) vcc->pop(vcc,skb);
29651 else dev_kfree_skb_irq(skb);
29652- atomic_inc(&vcc->stats->tx);
29653+ atomic_inc_unchecked(&vcc->stats->tx);
29654 wake_up(&eni_dev->tx_wait);
29655 dma_complete++;
29656 }
29657@@ -1570,7 +1570,7 @@ tx_complete++;
29658 /*--------------------------------- entries ---------------------------------*/
29659
29660
29661-static const char *media_name[] __devinitdata = {
29662+static const char *media_name[] __devinitconst = {
29663 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29664 "UTP", "05?", "06?", "07?", /* 4- 7 */
29665 "TAXI","09?", "10?", "11?", /* 8-11 */
29666diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29667index cd5049a..a51209f 100644
29668--- a/drivers/atm/firestream.c
29669+++ b/drivers/atm/firestream.c
29670@@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29671 }
29672 }
29673
29674- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29675+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29676
29677 fs_dprintk (FS_DEBUG_TXMEM, "i");
29678 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29679@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29680 #endif
29681 skb_put (skb, qe->p1 & 0xffff);
29682 ATM_SKB(skb)->vcc = atm_vcc;
29683- atomic_inc(&atm_vcc->stats->rx);
29684+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29685 __net_timestamp(skb);
29686 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29687 atm_vcc->push (atm_vcc, skb);
29688@@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29689 kfree (pe);
29690 }
29691 if (atm_vcc)
29692- atomic_inc(&atm_vcc->stats->rx_drop);
29693+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29694 break;
29695 case 0x1f: /* Reassembly abort: no buffers. */
29696 /* Silently increment error counter. */
29697 if (atm_vcc)
29698- atomic_inc(&atm_vcc->stats->rx_drop);
29699+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29700 break;
29701 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29702 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29703diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29704index f766cc4..a34002e 100644
29705--- a/drivers/atm/fore200e.c
29706+++ b/drivers/atm/fore200e.c
29707@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29708 #endif
29709 /* check error condition */
29710 if (*entry->status & STATUS_ERROR)
29711- atomic_inc(&vcc->stats->tx_err);
29712+ atomic_inc_unchecked(&vcc->stats->tx_err);
29713 else
29714- atomic_inc(&vcc->stats->tx);
29715+ atomic_inc_unchecked(&vcc->stats->tx);
29716 }
29717 }
29718
29719@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29720 if (skb == NULL) {
29721 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29722
29723- atomic_inc(&vcc->stats->rx_drop);
29724+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29725 return -ENOMEM;
29726 }
29727
29728@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29729
29730 dev_kfree_skb_any(skb);
29731
29732- atomic_inc(&vcc->stats->rx_drop);
29733+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29734 return -ENOMEM;
29735 }
29736
29737 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29738
29739 vcc->push(vcc, skb);
29740- atomic_inc(&vcc->stats->rx);
29741+ atomic_inc_unchecked(&vcc->stats->rx);
29742
29743 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29744
29745@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29746 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29747 fore200e->atm_dev->number,
29748 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29749- atomic_inc(&vcc->stats->rx_err);
29750+ atomic_inc_unchecked(&vcc->stats->rx_err);
29751 }
29752 }
29753
29754@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29755 goto retry_here;
29756 }
29757
29758- atomic_inc(&vcc->stats->tx_err);
29759+ atomic_inc_unchecked(&vcc->stats->tx_err);
29760
29761 fore200e->tx_sat++;
29762 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29763diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29764index 7066703..2b130de 100644
29765--- a/drivers/atm/he.c
29766+++ b/drivers/atm/he.c
29767@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29768
29769 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29770 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29771- atomic_inc(&vcc->stats->rx_drop);
29772+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29773 goto return_host_buffers;
29774 }
29775
29776@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29777 RBRQ_LEN_ERR(he_dev->rbrq_head)
29778 ? "LEN_ERR" : "",
29779 vcc->vpi, vcc->vci);
29780- atomic_inc(&vcc->stats->rx_err);
29781+ atomic_inc_unchecked(&vcc->stats->rx_err);
29782 goto return_host_buffers;
29783 }
29784
29785@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29786 vcc->push(vcc, skb);
29787 spin_lock(&he_dev->global_lock);
29788
29789- atomic_inc(&vcc->stats->rx);
29790+ atomic_inc_unchecked(&vcc->stats->rx);
29791
29792 return_host_buffers:
29793 ++pdus_assembled;
29794@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29795 tpd->vcc->pop(tpd->vcc, tpd->skb);
29796 else
29797 dev_kfree_skb_any(tpd->skb);
29798- atomic_inc(&tpd->vcc->stats->tx_err);
29799+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29800 }
29801 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29802 return;
29803@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29804 vcc->pop(vcc, skb);
29805 else
29806 dev_kfree_skb_any(skb);
29807- atomic_inc(&vcc->stats->tx_err);
29808+ atomic_inc_unchecked(&vcc->stats->tx_err);
29809 return -EINVAL;
29810 }
29811
29812@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29813 vcc->pop(vcc, skb);
29814 else
29815 dev_kfree_skb_any(skb);
29816- atomic_inc(&vcc->stats->tx_err);
29817+ atomic_inc_unchecked(&vcc->stats->tx_err);
29818 return -EINVAL;
29819 }
29820 #endif
29821@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29822 vcc->pop(vcc, skb);
29823 else
29824 dev_kfree_skb_any(skb);
29825- atomic_inc(&vcc->stats->tx_err);
29826+ atomic_inc_unchecked(&vcc->stats->tx_err);
29827 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29828 return -ENOMEM;
29829 }
29830@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29831 vcc->pop(vcc, skb);
29832 else
29833 dev_kfree_skb_any(skb);
29834- atomic_inc(&vcc->stats->tx_err);
29835+ atomic_inc_unchecked(&vcc->stats->tx_err);
29836 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29837 return -ENOMEM;
29838 }
29839@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29840 __enqueue_tpd(he_dev, tpd, cid);
29841 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29842
29843- atomic_inc(&vcc->stats->tx);
29844+ atomic_inc_unchecked(&vcc->stats->tx);
29845
29846 return 0;
29847 }
29848diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29849index 4e49021..01b1512 100644
29850--- a/drivers/atm/horizon.c
29851+++ b/drivers/atm/horizon.c
29852@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29853 {
29854 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29855 // VC layer stats
29856- atomic_inc(&vcc->stats->rx);
29857+ atomic_inc_unchecked(&vcc->stats->rx);
29858 __net_timestamp(skb);
29859 // end of our responsability
29860 vcc->push (vcc, skb);
29861@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29862 dev->tx_iovec = NULL;
29863
29864 // VC layer stats
29865- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29866+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29867
29868 // free the skb
29869 hrz_kfree_skb (skb);
29870diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29871index e33ae00..9deb4ab 100644
29872--- a/drivers/atm/idt77252.c
29873+++ b/drivers/atm/idt77252.c
29874@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29875 else
29876 dev_kfree_skb(skb);
29877
29878- atomic_inc(&vcc->stats->tx);
29879+ atomic_inc_unchecked(&vcc->stats->tx);
29880 }
29881
29882 atomic_dec(&scq->used);
29883@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29884 if ((sb = dev_alloc_skb(64)) == NULL) {
29885 printk("%s: Can't allocate buffers for aal0.\n",
29886 card->name);
29887- atomic_add(i, &vcc->stats->rx_drop);
29888+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
29889 break;
29890 }
29891 if (!atm_charge(vcc, sb->truesize)) {
29892 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29893 card->name);
29894- atomic_add(i - 1, &vcc->stats->rx_drop);
29895+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29896 dev_kfree_skb(sb);
29897 break;
29898 }
29899@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29900 ATM_SKB(sb)->vcc = vcc;
29901 __net_timestamp(sb);
29902 vcc->push(vcc, sb);
29903- atomic_inc(&vcc->stats->rx);
29904+ atomic_inc_unchecked(&vcc->stats->rx);
29905
29906 cell += ATM_CELL_PAYLOAD;
29907 }
29908@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29909 "(CDC: %08x)\n",
29910 card->name, len, rpp->len, readl(SAR_REG_CDC));
29911 recycle_rx_pool_skb(card, rpp);
29912- atomic_inc(&vcc->stats->rx_err);
29913+ atomic_inc_unchecked(&vcc->stats->rx_err);
29914 return;
29915 }
29916 if (stat & SAR_RSQE_CRC) {
29917 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29918 recycle_rx_pool_skb(card, rpp);
29919- atomic_inc(&vcc->stats->rx_err);
29920+ atomic_inc_unchecked(&vcc->stats->rx_err);
29921 return;
29922 }
29923 if (skb_queue_len(&rpp->queue) > 1) {
29924@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29925 RXPRINTK("%s: Can't alloc RX skb.\n",
29926 card->name);
29927 recycle_rx_pool_skb(card, rpp);
29928- atomic_inc(&vcc->stats->rx_err);
29929+ atomic_inc_unchecked(&vcc->stats->rx_err);
29930 return;
29931 }
29932 if (!atm_charge(vcc, skb->truesize)) {
29933@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29934 __net_timestamp(skb);
29935
29936 vcc->push(vcc, skb);
29937- atomic_inc(&vcc->stats->rx);
29938+ atomic_inc_unchecked(&vcc->stats->rx);
29939
29940 return;
29941 }
29942@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29943 __net_timestamp(skb);
29944
29945 vcc->push(vcc, skb);
29946- atomic_inc(&vcc->stats->rx);
29947+ atomic_inc_unchecked(&vcc->stats->rx);
29948
29949 if (skb->truesize > SAR_FB_SIZE_3)
29950 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29951@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29952 if (vcc->qos.aal != ATM_AAL0) {
29953 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29954 card->name, vpi, vci);
29955- atomic_inc(&vcc->stats->rx_drop);
29956+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29957 goto drop;
29958 }
29959
29960 if ((sb = dev_alloc_skb(64)) == NULL) {
29961 printk("%s: Can't allocate buffers for AAL0.\n",
29962 card->name);
29963- atomic_inc(&vcc->stats->rx_err);
29964+ atomic_inc_unchecked(&vcc->stats->rx_err);
29965 goto drop;
29966 }
29967
29968@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29969 ATM_SKB(sb)->vcc = vcc;
29970 __net_timestamp(sb);
29971 vcc->push(vcc, sb);
29972- atomic_inc(&vcc->stats->rx);
29973+ atomic_inc_unchecked(&vcc->stats->rx);
29974
29975 drop:
29976 skb_pull(queue, 64);
29977@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29978
29979 if (vc == NULL) {
29980 printk("%s: NULL connection in send().\n", card->name);
29981- atomic_inc(&vcc->stats->tx_err);
29982+ atomic_inc_unchecked(&vcc->stats->tx_err);
29983 dev_kfree_skb(skb);
29984 return -EINVAL;
29985 }
29986 if (!test_bit(VCF_TX, &vc->flags)) {
29987 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29988- atomic_inc(&vcc->stats->tx_err);
29989+ atomic_inc_unchecked(&vcc->stats->tx_err);
29990 dev_kfree_skb(skb);
29991 return -EINVAL;
29992 }
29993@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29994 break;
29995 default:
29996 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
29997- atomic_inc(&vcc->stats->tx_err);
29998+ atomic_inc_unchecked(&vcc->stats->tx_err);
29999 dev_kfree_skb(skb);
30000 return -EINVAL;
30001 }
30002
30003 if (skb_shinfo(skb)->nr_frags != 0) {
30004 printk("%s: No scatter-gather yet.\n", card->name);
30005- atomic_inc(&vcc->stats->tx_err);
30006+ atomic_inc_unchecked(&vcc->stats->tx_err);
30007 dev_kfree_skb(skb);
30008 return -EINVAL;
30009 }
30010@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30011
30012 err = queue_skb(card, vc, skb, oam);
30013 if (err) {
30014- atomic_inc(&vcc->stats->tx_err);
30015+ atomic_inc_unchecked(&vcc->stats->tx_err);
30016 dev_kfree_skb(skb);
30017 return err;
30018 }
30019@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
30020 skb = dev_alloc_skb(64);
30021 if (!skb) {
30022 printk("%s: Out of memory in send_oam().\n", card->name);
30023- atomic_inc(&vcc->stats->tx_err);
30024+ atomic_inc_unchecked(&vcc->stats->tx_err);
30025 return -ENOMEM;
30026 }
30027 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
30028diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
30029index b2c1b37..faa672b 100644
30030--- a/drivers/atm/iphase.c
30031+++ b/drivers/atm/iphase.c
30032@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
30033 status = (u_short) (buf_desc_ptr->desc_mode);
30034 if (status & (RX_CER | RX_PTE | RX_OFL))
30035 {
30036- atomic_inc(&vcc->stats->rx_err);
30037+ atomic_inc_unchecked(&vcc->stats->rx_err);
30038 IF_ERR(printk("IA: bad packet, dropping it");)
30039 if (status & RX_CER) {
30040 IF_ERR(printk(" cause: packet CRC error\n");)
30041@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
30042 len = dma_addr - buf_addr;
30043 if (len > iadev->rx_buf_sz) {
30044 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
30045- atomic_inc(&vcc->stats->rx_err);
30046+ atomic_inc_unchecked(&vcc->stats->rx_err);
30047 goto out_free_desc;
30048 }
30049
30050@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30051 ia_vcc = INPH_IA_VCC(vcc);
30052 if (ia_vcc == NULL)
30053 {
30054- atomic_inc(&vcc->stats->rx_err);
30055+ atomic_inc_unchecked(&vcc->stats->rx_err);
30056 dev_kfree_skb_any(skb);
30057 atm_return(vcc, atm_guess_pdu2truesize(len));
30058 goto INCR_DLE;
30059@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30060 if ((length > iadev->rx_buf_sz) || (length >
30061 (skb->len - sizeof(struct cpcs_trailer))))
30062 {
30063- atomic_inc(&vcc->stats->rx_err);
30064+ atomic_inc_unchecked(&vcc->stats->rx_err);
30065 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
30066 length, skb->len);)
30067 dev_kfree_skb_any(skb);
30068@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30069
30070 IF_RX(printk("rx_dle_intr: skb push");)
30071 vcc->push(vcc,skb);
30072- atomic_inc(&vcc->stats->rx);
30073+ atomic_inc_unchecked(&vcc->stats->rx);
30074 iadev->rx_pkt_cnt++;
30075 }
30076 INCR_DLE:
30077@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
30078 {
30079 struct k_sonet_stats *stats;
30080 stats = &PRIV(_ia_dev[board])->sonet_stats;
30081- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
30082- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
30083- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
30084- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
30085- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
30086- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
30087- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
30088- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
30089- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
30090+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
30091+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
30092+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
30093+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
30094+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
30095+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
30096+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
30097+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
30098+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
30099 }
30100 ia_cmds.status = 0;
30101 break;
30102@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30103 if ((desc == 0) || (desc > iadev->num_tx_desc))
30104 {
30105 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
30106- atomic_inc(&vcc->stats->tx);
30107+ atomic_inc_unchecked(&vcc->stats->tx);
30108 if (vcc->pop)
30109 vcc->pop(vcc, skb);
30110 else
30111@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30112 ATM_DESC(skb) = vcc->vci;
30113 skb_queue_tail(&iadev->tx_dma_q, skb);
30114
30115- atomic_inc(&vcc->stats->tx);
30116+ atomic_inc_unchecked(&vcc->stats->tx);
30117 iadev->tx_pkt_cnt++;
30118 /* Increment transaction counter */
30119 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
30120
30121 #if 0
30122 /* add flow control logic */
30123- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
30124+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
30125 if (iavcc->vc_desc_cnt > 10) {
30126 vcc->tx_quota = vcc->tx_quota * 3 / 4;
30127 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
30128diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
30129index cf97c34..8d30655 100644
30130--- a/drivers/atm/lanai.c
30131+++ b/drivers/atm/lanai.c
30132@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
30133 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
30134 lanai_endtx(lanai, lvcc);
30135 lanai_free_skb(lvcc->tx.atmvcc, skb);
30136- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
30137+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
30138 }
30139
30140 /* Try to fill the buffer - don't call unless there is backlog */
30141@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
30142 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
30143 __net_timestamp(skb);
30144 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
30145- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
30146+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
30147 out:
30148 lvcc->rx.buf.ptr = end;
30149 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
30150@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30151 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
30152 "vcc %d\n", lanai->number, (unsigned int) s, vci);
30153 lanai->stats.service_rxnotaal5++;
30154- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30155+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30156 return 0;
30157 }
30158 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
30159@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30160 int bytes;
30161 read_unlock(&vcc_sklist_lock);
30162 DPRINTK("got trashed rx pdu on vci %d\n", vci);
30163- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30164+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30165 lvcc->stats.x.aal5.service_trash++;
30166 bytes = (SERVICE_GET_END(s) * 16) -
30167 (((unsigned long) lvcc->rx.buf.ptr) -
30168@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30169 }
30170 if (s & SERVICE_STREAM) {
30171 read_unlock(&vcc_sklist_lock);
30172- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30173+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30174 lvcc->stats.x.aal5.service_stream++;
30175 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
30176 "PDU on VCI %d!\n", lanai->number, vci);
30177@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30178 return 0;
30179 }
30180 DPRINTK("got rx crc error on vci %d\n", vci);
30181- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30182+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30183 lvcc->stats.x.aal5.service_rxcrc++;
30184 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
30185 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
30186diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
30187index 3da804b..d3b0eed 100644
30188--- a/drivers/atm/nicstar.c
30189+++ b/drivers/atm/nicstar.c
30190@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30191 if ((vc = (vc_map *) vcc->dev_data) == NULL)
30192 {
30193 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
30194- atomic_inc(&vcc->stats->tx_err);
30195+ atomic_inc_unchecked(&vcc->stats->tx_err);
30196 dev_kfree_skb_any(skb);
30197 return -EINVAL;
30198 }
30199@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30200 if (!vc->tx)
30201 {
30202 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
30203- atomic_inc(&vcc->stats->tx_err);
30204+ atomic_inc_unchecked(&vcc->stats->tx_err);
30205 dev_kfree_skb_any(skb);
30206 return -EINVAL;
30207 }
30208@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30209 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
30210 {
30211 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
30212- atomic_inc(&vcc->stats->tx_err);
30213+ atomic_inc_unchecked(&vcc->stats->tx_err);
30214 dev_kfree_skb_any(skb);
30215 return -EINVAL;
30216 }
30217@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30218 if (skb_shinfo(skb)->nr_frags != 0)
30219 {
30220 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30221- atomic_inc(&vcc->stats->tx_err);
30222+ atomic_inc_unchecked(&vcc->stats->tx_err);
30223 dev_kfree_skb_any(skb);
30224 return -EINVAL;
30225 }
30226@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30227
30228 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
30229 {
30230- atomic_inc(&vcc->stats->tx_err);
30231+ atomic_inc_unchecked(&vcc->stats->tx_err);
30232 dev_kfree_skb_any(skb);
30233 return -EIO;
30234 }
30235- atomic_inc(&vcc->stats->tx);
30236+ atomic_inc_unchecked(&vcc->stats->tx);
30237
30238 return 0;
30239 }
30240@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30241 {
30242 printk("nicstar%d: Can't allocate buffers for aal0.\n",
30243 card->index);
30244- atomic_add(i,&vcc->stats->rx_drop);
30245+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
30246 break;
30247 }
30248 if (!atm_charge(vcc, sb->truesize))
30249 {
30250 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
30251 card->index);
30252- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30253+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30254 dev_kfree_skb_any(sb);
30255 break;
30256 }
30257@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30258 ATM_SKB(sb)->vcc = vcc;
30259 __net_timestamp(sb);
30260 vcc->push(vcc, sb);
30261- atomic_inc(&vcc->stats->rx);
30262+ atomic_inc_unchecked(&vcc->stats->rx);
30263 cell += ATM_CELL_PAYLOAD;
30264 }
30265
30266@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30267 if (iovb == NULL)
30268 {
30269 printk("nicstar%d: Out of iovec buffers.\n", card->index);
30270- atomic_inc(&vcc->stats->rx_drop);
30271+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30272 recycle_rx_buf(card, skb);
30273 return;
30274 }
30275@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30276 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
30277 {
30278 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30279- atomic_inc(&vcc->stats->rx_err);
30280+ atomic_inc_unchecked(&vcc->stats->rx_err);
30281 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
30282 NS_SKB(iovb)->iovcnt = 0;
30283 iovb->len = 0;
30284@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30285 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
30286 card->index);
30287 which_list(card, skb);
30288- atomic_inc(&vcc->stats->rx_err);
30289+ atomic_inc_unchecked(&vcc->stats->rx_err);
30290 recycle_rx_buf(card, skb);
30291 vc->rx_iov = NULL;
30292 recycle_iov_buf(card, iovb);
30293@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30294 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
30295 card->index);
30296 which_list(card, skb);
30297- atomic_inc(&vcc->stats->rx_err);
30298+ atomic_inc_unchecked(&vcc->stats->rx_err);
30299 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30300 NS_SKB(iovb)->iovcnt);
30301 vc->rx_iov = NULL;
30302@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30303 printk(" - PDU size mismatch.\n");
30304 else
30305 printk(".\n");
30306- atomic_inc(&vcc->stats->rx_err);
30307+ atomic_inc_unchecked(&vcc->stats->rx_err);
30308 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30309 NS_SKB(iovb)->iovcnt);
30310 vc->rx_iov = NULL;
30311@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30312 if (!atm_charge(vcc, skb->truesize))
30313 {
30314 push_rxbufs(card, skb);
30315- atomic_inc(&vcc->stats->rx_drop);
30316+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30317 }
30318 else
30319 {
30320@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30321 ATM_SKB(skb)->vcc = vcc;
30322 __net_timestamp(skb);
30323 vcc->push(vcc, skb);
30324- atomic_inc(&vcc->stats->rx);
30325+ atomic_inc_unchecked(&vcc->stats->rx);
30326 }
30327 }
30328 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30329@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30330 if (!atm_charge(vcc, sb->truesize))
30331 {
30332 push_rxbufs(card, sb);
30333- atomic_inc(&vcc->stats->rx_drop);
30334+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30335 }
30336 else
30337 {
30338@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30339 ATM_SKB(sb)->vcc = vcc;
30340 __net_timestamp(sb);
30341 vcc->push(vcc, sb);
30342- atomic_inc(&vcc->stats->rx);
30343+ atomic_inc_unchecked(&vcc->stats->rx);
30344 }
30345
30346 push_rxbufs(card, skb);
30347@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30348 if (!atm_charge(vcc, skb->truesize))
30349 {
30350 push_rxbufs(card, skb);
30351- atomic_inc(&vcc->stats->rx_drop);
30352+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30353 }
30354 else
30355 {
30356@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30357 ATM_SKB(skb)->vcc = vcc;
30358 __net_timestamp(skb);
30359 vcc->push(vcc, skb);
30360- atomic_inc(&vcc->stats->rx);
30361+ atomic_inc_unchecked(&vcc->stats->rx);
30362 }
30363
30364 push_rxbufs(card, sb);
30365@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30366 if (hb == NULL)
30367 {
30368 printk("nicstar%d: Out of huge buffers.\n", card->index);
30369- atomic_inc(&vcc->stats->rx_drop);
30370+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30371 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30372 NS_SKB(iovb)->iovcnt);
30373 vc->rx_iov = NULL;
30374@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30375 }
30376 else
30377 dev_kfree_skb_any(hb);
30378- atomic_inc(&vcc->stats->rx_drop);
30379+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30380 }
30381 else
30382 {
30383@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30384 #endif /* NS_USE_DESTRUCTORS */
30385 __net_timestamp(hb);
30386 vcc->push(vcc, hb);
30387- atomic_inc(&vcc->stats->rx);
30388+ atomic_inc_unchecked(&vcc->stats->rx);
30389 }
30390 }
30391
30392diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30393index 84c93ff..e6ed269 100644
30394--- a/drivers/atm/solos-pci.c
30395+++ b/drivers/atm/solos-pci.c
30396@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30397 }
30398 atm_charge(vcc, skb->truesize);
30399 vcc->push(vcc, skb);
30400- atomic_inc(&vcc->stats->rx);
30401+ atomic_inc_unchecked(&vcc->stats->rx);
30402 break;
30403
30404 case PKT_STATUS:
30405@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30406 char msg[500];
30407 char item[10];
30408
30409+ pax_track_stack();
30410+
30411 len = buf->len;
30412 for (i = 0; i < len; i++){
30413 if(i % 8 == 0)
30414@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30415 vcc = SKB_CB(oldskb)->vcc;
30416
30417 if (vcc) {
30418- atomic_inc(&vcc->stats->tx);
30419+ atomic_inc_unchecked(&vcc->stats->tx);
30420 solos_pop(vcc, oldskb);
30421 } else
30422 dev_kfree_skb_irq(oldskb);
30423diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30424index 6dd3f59..ee377f3 100644
30425--- a/drivers/atm/suni.c
30426+++ b/drivers/atm/suni.c
30427@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30428
30429
30430 #define ADD_LIMITED(s,v) \
30431- atomic_add((v),&stats->s); \
30432- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30433+ atomic_add_unchecked((v),&stats->s); \
30434+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30435
30436
30437 static void suni_hz(unsigned long from_timer)
30438diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30439index fc8cb07..4a80e53 100644
30440--- a/drivers/atm/uPD98402.c
30441+++ b/drivers/atm/uPD98402.c
30442@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30443 struct sonet_stats tmp;
30444 int error = 0;
30445
30446- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30447+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30448 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30449 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30450 if (zero && !error) {
30451@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30452
30453
30454 #define ADD_LIMITED(s,v) \
30455- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30456- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30457- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30458+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30459+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30460+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30461
30462
30463 static void stat_event(struct atm_dev *dev)
30464@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30465 if (reason & uPD98402_INT_PFM) stat_event(dev);
30466 if (reason & uPD98402_INT_PCO) {
30467 (void) GET(PCOCR); /* clear interrupt cause */
30468- atomic_add(GET(HECCT),
30469+ atomic_add_unchecked(GET(HECCT),
30470 &PRIV(dev)->sonet_stats.uncorr_hcs);
30471 }
30472 if ((reason & uPD98402_INT_RFO) &&
30473@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30474 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30475 uPD98402_INT_LOS),PIMR); /* enable them */
30476 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30477- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30478- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30479- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30480+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30481+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30482+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30483 return 0;
30484 }
30485
30486diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30487index 2e9635b..32927b4 100644
30488--- a/drivers/atm/zatm.c
30489+++ b/drivers/atm/zatm.c
30490@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30491 }
30492 if (!size) {
30493 dev_kfree_skb_irq(skb);
30494- if (vcc) atomic_inc(&vcc->stats->rx_err);
30495+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30496 continue;
30497 }
30498 if (!atm_charge(vcc,skb->truesize)) {
30499@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30500 skb->len = size;
30501 ATM_SKB(skb)->vcc = vcc;
30502 vcc->push(vcc,skb);
30503- atomic_inc(&vcc->stats->rx);
30504+ atomic_inc_unchecked(&vcc->stats->rx);
30505 }
30506 zout(pos & 0xffff,MTA(mbx));
30507 #if 0 /* probably a stupid idea */
30508@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30509 skb_queue_head(&zatm_vcc->backlog,skb);
30510 break;
30511 }
30512- atomic_inc(&vcc->stats->tx);
30513+ atomic_inc_unchecked(&vcc->stats->tx);
30514 wake_up(&zatm_vcc->tx_wait);
30515 }
30516
30517diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30518index 63c143e..fece183 100644
30519--- a/drivers/base/bus.c
30520+++ b/drivers/base/bus.c
30521@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30522 return ret;
30523 }
30524
30525-static struct sysfs_ops driver_sysfs_ops = {
30526+static const struct sysfs_ops driver_sysfs_ops = {
30527 .show = drv_attr_show,
30528 .store = drv_attr_store,
30529 };
30530@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30531 return ret;
30532 }
30533
30534-static struct sysfs_ops bus_sysfs_ops = {
30535+static const struct sysfs_ops bus_sysfs_ops = {
30536 .show = bus_attr_show,
30537 .store = bus_attr_store,
30538 };
30539@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30540 return 0;
30541 }
30542
30543-static struct kset_uevent_ops bus_uevent_ops = {
30544+static const struct kset_uevent_ops bus_uevent_ops = {
30545 .filter = bus_uevent_filter,
30546 };
30547
30548diff --git a/drivers/base/class.c b/drivers/base/class.c
30549index 6e2c3b0..cb61871 100644
30550--- a/drivers/base/class.c
30551+++ b/drivers/base/class.c
30552@@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30553 kfree(cp);
30554 }
30555
30556-static struct sysfs_ops class_sysfs_ops = {
30557+static const struct sysfs_ops class_sysfs_ops = {
30558 .show = class_attr_show,
30559 .store = class_attr_store,
30560 };
30561diff --git a/drivers/base/core.c b/drivers/base/core.c
30562index f33d768..a9358d0 100644
30563--- a/drivers/base/core.c
30564+++ b/drivers/base/core.c
30565@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30566 return ret;
30567 }
30568
30569-static struct sysfs_ops dev_sysfs_ops = {
30570+static const struct sysfs_ops dev_sysfs_ops = {
30571 .show = dev_attr_show,
30572 .store = dev_attr_store,
30573 };
30574@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30575 return retval;
30576 }
30577
30578-static struct kset_uevent_ops device_uevent_ops = {
30579+static const struct kset_uevent_ops device_uevent_ops = {
30580 .filter = dev_uevent_filter,
30581 .name = dev_uevent_name,
30582 .uevent = dev_uevent,
30583diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30584index 989429c..2272b00 100644
30585--- a/drivers/base/memory.c
30586+++ b/drivers/base/memory.c
30587@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30588 return retval;
30589 }
30590
30591-static struct kset_uevent_ops memory_uevent_ops = {
30592+static const struct kset_uevent_ops memory_uevent_ops = {
30593 .name = memory_uevent_name,
30594 .uevent = memory_uevent,
30595 };
30596diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30597index 3f202f7..61c4a6f 100644
30598--- a/drivers/base/sys.c
30599+++ b/drivers/base/sys.c
30600@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30601 return -EIO;
30602 }
30603
30604-static struct sysfs_ops sysfs_ops = {
30605+static const struct sysfs_ops sysfs_ops = {
30606 .show = sysdev_show,
30607 .store = sysdev_store,
30608 };
30609@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30610 return -EIO;
30611 }
30612
30613-static struct sysfs_ops sysfs_class_ops = {
30614+static const struct sysfs_ops sysfs_class_ops = {
30615 .show = sysdev_class_show,
30616 .store = sysdev_class_store,
30617 };
30618diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30619index eb4fa19..1954777 100644
30620--- a/drivers/block/DAC960.c
30621+++ b/drivers/block/DAC960.c
30622@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30623 unsigned long flags;
30624 int Channel, TargetID;
30625
30626+ pax_track_stack();
30627+
30628 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30629 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30630 sizeof(DAC960_SCSI_Inquiry_T) +
30631diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30632index 68b90d9..7e2e3f3 100644
30633--- a/drivers/block/cciss.c
30634+++ b/drivers/block/cciss.c
30635@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30636 int err;
30637 u32 cp;
30638
30639+ memset(&arg64, 0, sizeof(arg64));
30640+
30641 err = 0;
30642 err |=
30643 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30644@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30645 /* Wait (up to 20 seconds) for a command to complete */
30646
30647 for (i = 20 * HZ; i > 0; i--) {
30648- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30649+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30650 if (done == FIFO_EMPTY)
30651 schedule_timeout_uninterruptible(1);
30652 else
30653@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30654 resend_cmd1:
30655
30656 /* Disable interrupt on the board. */
30657- h->access.set_intr_mask(h, CCISS_INTR_OFF);
30658+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
30659
30660 /* Make sure there is room in the command FIFO */
30661 /* Actually it should be completely empty at this time */
30662@@ -2884,13 +2886,13 @@ resend_cmd1:
30663 /* tape side of the driver. */
30664 for (i = 200000; i > 0; i--) {
30665 /* if fifo isn't full go */
30666- if (!(h->access.fifo_full(h)))
30667+ if (!(h->access->fifo_full(h)))
30668 break;
30669 udelay(10);
30670 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30671 " waiting!\n", h->ctlr);
30672 }
30673- h->access.submit_command(h, c); /* Send the cmd */
30674+ h->access->submit_command(h, c); /* Send the cmd */
30675 do {
30676 complete = pollcomplete(h->ctlr);
30677
30678@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30679 while (!hlist_empty(&h->reqQ)) {
30680 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30681 /* can't do anything if fifo is full */
30682- if ((h->access.fifo_full(h))) {
30683+ if ((h->access->fifo_full(h))) {
30684 printk(KERN_WARNING "cciss: fifo full\n");
30685 break;
30686 }
30687@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30688 h->Qdepth--;
30689
30690 /* Tell the controller execute command */
30691- h->access.submit_command(h, c);
30692+ h->access->submit_command(h, c);
30693
30694 /* Put job onto the completed Q */
30695 addQ(&h->cmpQ, c);
30696@@ -3393,17 +3395,17 @@ startio:
30697
30698 static inline unsigned long get_next_completion(ctlr_info_t *h)
30699 {
30700- return h->access.command_completed(h);
30701+ return h->access->command_completed(h);
30702 }
30703
30704 static inline int interrupt_pending(ctlr_info_t *h)
30705 {
30706- return h->access.intr_pending(h);
30707+ return h->access->intr_pending(h);
30708 }
30709
30710 static inline long interrupt_not_for_us(ctlr_info_t *h)
30711 {
30712- return (((h->access.intr_pending(h) == 0) ||
30713+ return (((h->access->intr_pending(h) == 0) ||
30714 (h->interrupts_enabled == 0)));
30715 }
30716
30717@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30718 */
30719 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30720 c->product_name = products[prod_index].product_name;
30721- c->access = *(products[prod_index].access);
30722+ c->access = products[prod_index].access;
30723 c->nr_cmds = c->max_commands - 4;
30724 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30725 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30726@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30727 }
30728
30729 /* make sure the board interrupts are off */
30730- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30731+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30732 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30733 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30734 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30735@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30736 cciss_scsi_setup(i);
30737
30738 /* Turn the interrupts on so we can service requests */
30739- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30740+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30741
30742 /* Get the firmware version */
30743 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30744diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30745index 04d6bf8..36e712d 100644
30746--- a/drivers/block/cciss.h
30747+++ b/drivers/block/cciss.h
30748@@ -90,7 +90,7 @@ struct ctlr_info
30749 // information about each logical volume
30750 drive_info_struct *drv[CISS_MAX_LUN];
30751
30752- struct access_method access;
30753+ struct access_method *access;
30754
30755 /* queue and queue Info */
30756 struct hlist_head reqQ;
30757diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30758index 6422651..bb1bdef 100644
30759--- a/drivers/block/cpqarray.c
30760+++ b/drivers/block/cpqarray.c
30761@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30762 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30763 goto Enomem4;
30764 }
30765- hba[i]->access.set_intr_mask(hba[i], 0);
30766+ hba[i]->access->set_intr_mask(hba[i], 0);
30767 if (request_irq(hba[i]->intr, do_ida_intr,
30768 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30769 {
30770@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30771 add_timer(&hba[i]->timer);
30772
30773 /* Enable IRQ now that spinlock and rate limit timer are set up */
30774- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30775+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30776
30777 for(j=0; j<NWD; j++) {
30778 struct gendisk *disk = ida_gendisk[i][j];
30779@@ -695,7 +695,7 @@ DBGINFO(
30780 for(i=0; i<NR_PRODUCTS; i++) {
30781 if (board_id == products[i].board_id) {
30782 c->product_name = products[i].product_name;
30783- c->access = *(products[i].access);
30784+ c->access = products[i].access;
30785 break;
30786 }
30787 }
30788@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30789 hba[ctlr]->intr = intr;
30790 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30791 hba[ctlr]->product_name = products[j].product_name;
30792- hba[ctlr]->access = *(products[j].access);
30793+ hba[ctlr]->access = products[j].access;
30794 hba[ctlr]->ctlr = ctlr;
30795 hba[ctlr]->board_id = board_id;
30796 hba[ctlr]->pci_dev = NULL; /* not PCI */
30797@@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30798 struct scatterlist tmp_sg[SG_MAX];
30799 int i, dir, seg;
30800
30801+ pax_track_stack();
30802+
30803 if (blk_queue_plugged(q))
30804 goto startio;
30805
30806@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30807
30808 while((c = h->reqQ) != NULL) {
30809 /* Can't do anything if we're busy */
30810- if (h->access.fifo_full(h) == 0)
30811+ if (h->access->fifo_full(h) == 0)
30812 return;
30813
30814 /* Get the first entry from the request Q */
30815@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30816 h->Qdepth--;
30817
30818 /* Tell the controller to do our bidding */
30819- h->access.submit_command(h, c);
30820+ h->access->submit_command(h, c);
30821
30822 /* Get onto the completion Q */
30823 addQ(&h->cmpQ, c);
30824@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30825 unsigned long flags;
30826 __u32 a,a1;
30827
30828- istat = h->access.intr_pending(h);
30829+ istat = h->access->intr_pending(h);
30830 /* Is this interrupt for us? */
30831 if (istat == 0)
30832 return IRQ_NONE;
30833@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30834 */
30835 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30836 if (istat & FIFO_NOT_EMPTY) {
30837- while((a = h->access.command_completed(h))) {
30838+ while((a = h->access->command_completed(h))) {
30839 a1 = a; a &= ~3;
30840 if ((c = h->cmpQ) == NULL)
30841 {
30842@@ -1434,11 +1436,11 @@ static int sendcmd(
30843 /*
30844 * Disable interrupt
30845 */
30846- info_p->access.set_intr_mask(info_p, 0);
30847+ info_p->access->set_intr_mask(info_p, 0);
30848 /* Make sure there is room in the command FIFO */
30849 /* Actually it should be completely empty at this time. */
30850 for (i = 200000; i > 0; i--) {
30851- temp = info_p->access.fifo_full(info_p);
30852+ temp = info_p->access->fifo_full(info_p);
30853 if (temp != 0) {
30854 break;
30855 }
30856@@ -1451,7 +1453,7 @@ DBG(
30857 /*
30858 * Send the cmd
30859 */
30860- info_p->access.submit_command(info_p, c);
30861+ info_p->access->submit_command(info_p, c);
30862 complete = pollcomplete(ctlr);
30863
30864 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30865@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30866 * we check the new geometry. Then turn interrupts back on when
30867 * we're done.
30868 */
30869- host->access.set_intr_mask(host, 0);
30870+ host->access->set_intr_mask(host, 0);
30871 getgeometry(ctlr);
30872- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30873+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30874
30875 for(i=0; i<NWD; i++) {
30876 struct gendisk *disk = ida_gendisk[ctlr][i];
30877@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30878 /* Wait (up to 2 seconds) for a command to complete */
30879
30880 for (i = 200000; i > 0; i--) {
30881- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30882+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30883 if (done == 0) {
30884 udelay(10); /* a short fixed delay */
30885 } else
30886diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30887index be73e9d..7fbf140 100644
30888--- a/drivers/block/cpqarray.h
30889+++ b/drivers/block/cpqarray.h
30890@@ -99,7 +99,7 @@ struct ctlr_info {
30891 drv_info_t drv[NWD];
30892 struct proc_dir_entry *proc;
30893
30894- struct access_method access;
30895+ struct access_method *access;
30896
30897 cmdlist_t *reqQ;
30898 cmdlist_t *cmpQ;
30899diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30900index 8ec2d70..2804b30 100644
30901--- a/drivers/block/loop.c
30902+++ b/drivers/block/loop.c
30903@@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30904 mm_segment_t old_fs = get_fs();
30905
30906 set_fs(get_ds());
30907- bw = file->f_op->write(file, buf, len, &pos);
30908+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30909 set_fs(old_fs);
30910 if (likely(bw == len))
30911 return 0;
30912diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30913index 26ada47..083c480 100644
30914--- a/drivers/block/nbd.c
30915+++ b/drivers/block/nbd.c
30916@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30917 struct kvec iov;
30918 sigset_t blocked, oldset;
30919
30920+ pax_track_stack();
30921+
30922 if (unlikely(!sock)) {
30923 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30924 lo->disk->disk_name, (send ? "send" : "recv"));
30925@@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30926 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30927 unsigned int cmd, unsigned long arg)
30928 {
30929+ pax_track_stack();
30930+
30931 switch (cmd) {
30932 case NBD_DISCONNECT: {
30933 struct request sreq;
30934diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30935index a5d585d..d087be3 100644
30936--- a/drivers/block/pktcdvd.c
30937+++ b/drivers/block/pktcdvd.c
30938@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30939 return len;
30940 }
30941
30942-static struct sysfs_ops kobj_pkt_ops = {
30943+static const struct sysfs_ops kobj_pkt_ops = {
30944 .show = kobj_pkt_show,
30945 .store = kobj_pkt_store
30946 };
30947diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30948index 6aad99e..89cd142 100644
30949--- a/drivers/char/Kconfig
30950+++ b/drivers/char/Kconfig
30951@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30952
30953 config DEVKMEM
30954 bool "/dev/kmem virtual device support"
30955- default y
30956+ default n
30957+ depends on !GRKERNSEC_KMEM
30958 help
30959 Say Y here if you want to support the /dev/kmem device. The
30960 /dev/kmem device is rarely used, but can be used for certain
30961@@ -1114,6 +1115,7 @@ config DEVPORT
30962 bool
30963 depends on !M68K
30964 depends on ISA || PCI
30965+ depends on !GRKERNSEC_KMEM
30966 default y
30967
30968 source "drivers/s390/char/Kconfig"
30969diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
30970index a96f319..a778a5b 100644
30971--- a/drivers/char/agp/frontend.c
30972+++ b/drivers/char/agp/frontend.c
30973@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
30974 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
30975 return -EFAULT;
30976
30977- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
30978+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
30979 return -EFAULT;
30980
30981 client = agp_find_client_by_pid(reserve.pid);
30982diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
30983index d8cff90..9628e70 100644
30984--- a/drivers/char/briq_panel.c
30985+++ b/drivers/char/briq_panel.c
30986@@ -10,6 +10,7 @@
30987 #include <linux/types.h>
30988 #include <linux/errno.h>
30989 #include <linux/tty.h>
30990+#include <linux/mutex.h>
30991 #include <linux/timer.h>
30992 #include <linux/kernel.h>
30993 #include <linux/wait.h>
30994@@ -36,6 +37,7 @@ static int vfd_is_open;
30995 static unsigned char vfd[40];
30996 static int vfd_cursor;
30997 static unsigned char ledpb, led;
30998+static DEFINE_MUTEX(vfd_mutex);
30999
31000 static void update_vfd(void)
31001 {
31002@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31003 if (!vfd_is_open)
31004 return -EBUSY;
31005
31006+ mutex_lock(&vfd_mutex);
31007 for (;;) {
31008 char c;
31009 if (!indx)
31010 break;
31011- if (get_user(c, buf))
31012+ if (get_user(c, buf)) {
31013+ mutex_unlock(&vfd_mutex);
31014 return -EFAULT;
31015+ }
31016 if (esc) {
31017 set_led(c);
31018 esc = 0;
31019@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31020 buf++;
31021 }
31022 update_vfd();
31023+ mutex_unlock(&vfd_mutex);
31024
31025 return len;
31026 }
31027diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
31028index 31e7c91..161afc0 100644
31029--- a/drivers/char/genrtc.c
31030+++ b/drivers/char/genrtc.c
31031@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
31032 switch (cmd) {
31033
31034 case RTC_PLL_GET:
31035+ memset(&pll, 0, sizeof(pll));
31036 if (get_rtc_pll(&pll))
31037 return -EINVAL;
31038 else
31039diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
31040index 006466d..a2bb21c 100644
31041--- a/drivers/char/hpet.c
31042+++ b/drivers/char/hpet.c
31043@@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
31044 return 0;
31045 }
31046
31047-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
31048+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
31049
31050 static int
31051 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
31052@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
31053 }
31054
31055 static int
31056-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31057+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
31058 {
31059 struct hpet_timer __iomem *timer;
31060 struct hpet __iomem *hpet;
31061@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31062 {
31063 struct hpet_info info;
31064
31065+ memset(&info, 0, sizeof(info));
31066+
31067 if (devp->hd_ireqfreq)
31068 info.hi_ireqfreq =
31069 hpet_time_div(hpetp, devp->hd_ireqfreq);
31070- else
31071- info.hi_ireqfreq = 0;
31072 info.hi_flags =
31073 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
31074 info.hi_hpet = hpetp->hp_which;
31075diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
31076index 0afc8b8..6913fc3 100644
31077--- a/drivers/char/hvc_beat.c
31078+++ b/drivers/char/hvc_beat.c
31079@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
31080 return cnt;
31081 }
31082
31083-static struct hv_ops hvc_beat_get_put_ops = {
31084+static const struct hv_ops hvc_beat_get_put_ops = {
31085 .get_chars = hvc_beat_get_chars,
31086 .put_chars = hvc_beat_put_chars,
31087 };
31088diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
31089index 98097f2..407dddc 100644
31090--- a/drivers/char/hvc_console.c
31091+++ b/drivers/char/hvc_console.c
31092@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
31093 * console interfaces but can still be used as a tty device. This has to be
31094 * static because kmalloc will not work during early console init.
31095 */
31096-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31097+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31098 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
31099 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
31100
31101@@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
31102 * vty adapters do NOT get an hvc_instantiate() callback since they
31103 * appear after early console init.
31104 */
31105-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
31106+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
31107 {
31108 struct hvc_struct *hp;
31109
31110@@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
31111 };
31112
31113 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
31114- struct hv_ops *ops, int outbuf_size)
31115+ const struct hv_ops *ops, int outbuf_size)
31116 {
31117 struct hvc_struct *hp;
31118 int i;
31119diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
31120index 10950ca..ed176c3 100644
31121--- a/drivers/char/hvc_console.h
31122+++ b/drivers/char/hvc_console.h
31123@@ -55,7 +55,7 @@ struct hvc_struct {
31124 int outbuf_size;
31125 int n_outbuf;
31126 uint32_t vtermno;
31127- struct hv_ops *ops;
31128+ const struct hv_ops *ops;
31129 int irq_requested;
31130 int data;
31131 struct winsize ws;
31132@@ -76,11 +76,11 @@ struct hv_ops {
31133 };
31134
31135 /* Register a vterm and a slot index for use as a console (console_init) */
31136-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
31137+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
31138
31139 /* register a vterm for hvc tty operation (module_init or hotplug add) */
31140 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
31141- struct hv_ops *ops, int outbuf_size);
31142+ const struct hv_ops *ops, int outbuf_size);
31143 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
31144 extern int hvc_remove(struct hvc_struct *hp);
31145
31146diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
31147index 936d05b..fd02426 100644
31148--- a/drivers/char/hvc_iseries.c
31149+++ b/drivers/char/hvc_iseries.c
31150@@ -197,7 +197,7 @@ done:
31151 return sent;
31152 }
31153
31154-static struct hv_ops hvc_get_put_ops = {
31155+static const struct hv_ops hvc_get_put_ops = {
31156 .get_chars = get_chars,
31157 .put_chars = put_chars,
31158 .notifier_add = notifier_add_irq,
31159diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
31160index b0e168f..69cda2a 100644
31161--- a/drivers/char/hvc_iucv.c
31162+++ b/drivers/char/hvc_iucv.c
31163@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
31164
31165
31166 /* HVC operations */
31167-static struct hv_ops hvc_iucv_ops = {
31168+static const struct hv_ops hvc_iucv_ops = {
31169 .get_chars = hvc_iucv_get_chars,
31170 .put_chars = hvc_iucv_put_chars,
31171 .notifier_add = hvc_iucv_notifier_add,
31172diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
31173index 88590d0..61c4a61 100644
31174--- a/drivers/char/hvc_rtas.c
31175+++ b/drivers/char/hvc_rtas.c
31176@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
31177 return i;
31178 }
31179
31180-static struct hv_ops hvc_rtas_get_put_ops = {
31181+static const struct hv_ops hvc_rtas_get_put_ops = {
31182 .get_chars = hvc_rtas_read_console,
31183 .put_chars = hvc_rtas_write_console,
31184 };
31185diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
31186index bd63ba8..b0957e6 100644
31187--- a/drivers/char/hvc_udbg.c
31188+++ b/drivers/char/hvc_udbg.c
31189@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
31190 return i;
31191 }
31192
31193-static struct hv_ops hvc_udbg_ops = {
31194+static const struct hv_ops hvc_udbg_ops = {
31195 .get_chars = hvc_udbg_get,
31196 .put_chars = hvc_udbg_put,
31197 };
31198diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
31199index 10be343..27370e9 100644
31200--- a/drivers/char/hvc_vio.c
31201+++ b/drivers/char/hvc_vio.c
31202@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
31203 return got;
31204 }
31205
31206-static struct hv_ops hvc_get_put_ops = {
31207+static const struct hv_ops hvc_get_put_ops = {
31208 .get_chars = filtered_get_chars,
31209 .put_chars = hvc_put_chars,
31210 .notifier_add = notifier_add_irq,
31211diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
31212index a6ee32b..94f8c26 100644
31213--- a/drivers/char/hvc_xen.c
31214+++ b/drivers/char/hvc_xen.c
31215@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
31216 return recv;
31217 }
31218
31219-static struct hv_ops hvc_ops = {
31220+static const struct hv_ops hvc_ops = {
31221 .get_chars = read_console,
31222 .put_chars = write_console,
31223 .notifier_add = notifier_add_irq,
31224diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
31225index 266b858..f3ee0bb 100644
31226--- a/drivers/char/hvcs.c
31227+++ b/drivers/char/hvcs.c
31228@@ -82,6 +82,7 @@
31229 #include <asm/hvcserver.h>
31230 #include <asm/uaccess.h>
31231 #include <asm/vio.h>
31232+#include <asm/local.h>
31233
31234 /*
31235 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
31236@@ -269,7 +270,7 @@ struct hvcs_struct {
31237 unsigned int index;
31238
31239 struct tty_struct *tty;
31240- int open_count;
31241+ local_t open_count;
31242
31243 /*
31244 * Used to tell the driver kernel_thread what operations need to take
31245@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
31246
31247 spin_lock_irqsave(&hvcsd->lock, flags);
31248
31249- if (hvcsd->open_count > 0) {
31250+ if (local_read(&hvcsd->open_count) > 0) {
31251 spin_unlock_irqrestore(&hvcsd->lock, flags);
31252 printk(KERN_INFO "HVCS: vterm state unchanged. "
31253 "The hvcs device node is still in use.\n");
31254@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
31255 if ((retval = hvcs_partner_connect(hvcsd)))
31256 goto error_release;
31257
31258- hvcsd->open_count = 1;
31259+ local_set(&hvcsd->open_count, 1);
31260 hvcsd->tty = tty;
31261 tty->driver_data = hvcsd;
31262
31263@@ -1169,7 +1170,7 @@ fast_open:
31264
31265 spin_lock_irqsave(&hvcsd->lock, flags);
31266 kref_get(&hvcsd->kref);
31267- hvcsd->open_count++;
31268+ local_inc(&hvcsd->open_count);
31269 hvcsd->todo_mask |= HVCS_SCHED_READ;
31270 spin_unlock_irqrestore(&hvcsd->lock, flags);
31271
31272@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31273 hvcsd = tty->driver_data;
31274
31275 spin_lock_irqsave(&hvcsd->lock, flags);
31276- if (--hvcsd->open_count == 0) {
31277+ if (local_dec_and_test(&hvcsd->open_count)) {
31278
31279 vio_disable_interrupts(hvcsd->vdev);
31280
31281@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31282 free_irq(irq, hvcsd);
31283 kref_put(&hvcsd->kref, destroy_hvcs_struct);
31284 return;
31285- } else if (hvcsd->open_count < 0) {
31286+ } else if (local_read(&hvcsd->open_count) < 0) {
31287 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
31288 " is missmanaged.\n",
31289- hvcsd->vdev->unit_address, hvcsd->open_count);
31290+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
31291 }
31292
31293 spin_unlock_irqrestore(&hvcsd->lock, flags);
31294@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31295
31296 spin_lock_irqsave(&hvcsd->lock, flags);
31297 /* Preserve this so that we know how many kref refs to put */
31298- temp_open_count = hvcsd->open_count;
31299+ temp_open_count = local_read(&hvcsd->open_count);
31300
31301 /*
31302 * Don't kref put inside the spinlock because the destruction
31303@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31304 hvcsd->tty->driver_data = NULL;
31305 hvcsd->tty = NULL;
31306
31307- hvcsd->open_count = 0;
31308+ local_set(&hvcsd->open_count, 0);
31309
31310 /* This will drop any buffered data on the floor which is OK in a hangup
31311 * scenario. */
31312@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
31313 * the middle of a write operation? This is a crummy place to do this
31314 * but we want to keep it all in the spinlock.
31315 */
31316- if (hvcsd->open_count <= 0) {
31317+ if (local_read(&hvcsd->open_count) <= 0) {
31318 spin_unlock_irqrestore(&hvcsd->lock, flags);
31319 return -ENODEV;
31320 }
31321@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31322 {
31323 struct hvcs_struct *hvcsd = tty->driver_data;
31324
31325- if (!hvcsd || hvcsd->open_count <= 0)
31326+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31327 return 0;
31328
31329 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31330diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31331index ec5e3f8..02455ba 100644
31332--- a/drivers/char/ipmi/ipmi_msghandler.c
31333+++ b/drivers/char/ipmi/ipmi_msghandler.c
31334@@ -414,7 +414,7 @@ struct ipmi_smi {
31335 struct proc_dir_entry *proc_dir;
31336 char proc_dir_name[10];
31337
31338- atomic_t stats[IPMI_NUM_STATS];
31339+ atomic_unchecked_t stats[IPMI_NUM_STATS];
31340
31341 /*
31342 * run_to_completion duplicate of smb_info, smi_info
31343@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31344
31345
31346 #define ipmi_inc_stat(intf, stat) \
31347- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31348+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31349 #define ipmi_get_stat(intf, stat) \
31350- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31351+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31352
31353 static int is_lan_addr(struct ipmi_addr *addr)
31354 {
31355@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31356 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31357 init_waitqueue_head(&intf->waitq);
31358 for (i = 0; i < IPMI_NUM_STATS; i++)
31359- atomic_set(&intf->stats[i], 0);
31360+ atomic_set_unchecked(&intf->stats[i], 0);
31361
31362 intf->proc_dir = NULL;
31363
31364@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31365 struct ipmi_smi_msg smi_msg;
31366 struct ipmi_recv_msg recv_msg;
31367
31368+ pax_track_stack();
31369+
31370 si = (struct ipmi_system_interface_addr *) &addr;
31371 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31372 si->channel = IPMI_BMC_CHANNEL;
31373diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31374index abae8c9..8021979 100644
31375--- a/drivers/char/ipmi/ipmi_si_intf.c
31376+++ b/drivers/char/ipmi/ipmi_si_intf.c
31377@@ -277,7 +277,7 @@ struct smi_info {
31378 unsigned char slave_addr;
31379
31380 /* Counters and things for the proc filesystem. */
31381- atomic_t stats[SI_NUM_STATS];
31382+ atomic_unchecked_t stats[SI_NUM_STATS];
31383
31384 struct task_struct *thread;
31385
31386@@ -285,9 +285,9 @@ struct smi_info {
31387 };
31388
31389 #define smi_inc_stat(smi, stat) \
31390- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31391+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31392 #define smi_get_stat(smi, stat) \
31393- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31394+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31395
31396 #define SI_MAX_PARMS 4
31397
31398@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31399 atomic_set(&new_smi->req_events, 0);
31400 new_smi->run_to_completion = 0;
31401 for (i = 0; i < SI_NUM_STATS; i++)
31402- atomic_set(&new_smi->stats[i], 0);
31403+ atomic_set_unchecked(&new_smi->stats[i], 0);
31404
31405 new_smi->interrupt_disabled = 0;
31406 atomic_set(&new_smi->stop_operation, 0);
31407diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31408index 402838f..55e2200 100644
31409--- a/drivers/char/istallion.c
31410+++ b/drivers/char/istallion.c
31411@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31412 * re-used for each stats call.
31413 */
31414 static comstats_t stli_comstats;
31415-static combrd_t stli_brdstats;
31416 static struct asystats stli_cdkstats;
31417
31418 /*****************************************************************************/
31419@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31420 {
31421 struct stlibrd *brdp;
31422 unsigned int i;
31423+ combrd_t stli_brdstats;
31424
31425 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31426 return -EFAULT;
31427@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31428 struct stliport stli_dummyport;
31429 struct stliport *portp;
31430
31431+ pax_track_stack();
31432+
31433 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31434 return -EFAULT;
31435 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31436@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31437 struct stlibrd stli_dummybrd;
31438 struct stlibrd *brdp;
31439
31440+ pax_track_stack();
31441+
31442 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31443 return -EFAULT;
31444 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31445diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31446index 950837c..e55a288 100644
31447--- a/drivers/char/keyboard.c
31448+++ b/drivers/char/keyboard.c
31449@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31450 kbd->kbdmode == VC_MEDIUMRAW) &&
31451 value != KVAL(K_SAK))
31452 return; /* SAK is allowed even in raw mode */
31453+
31454+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31455+ {
31456+ void *func = fn_handler[value];
31457+ if (func == fn_show_state || func == fn_show_ptregs ||
31458+ func == fn_show_mem)
31459+ return;
31460+ }
31461+#endif
31462+
31463 fn_handler[value](vc);
31464 }
31465
31466@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31467 .evbit = { BIT_MASK(EV_SND) },
31468 },
31469
31470- { }, /* Terminating entry */
31471+ { 0 }, /* Terminating entry */
31472 };
31473
31474 MODULE_DEVICE_TABLE(input, kbd_ids);
31475diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31476index 87c67b4..230527a 100644
31477--- a/drivers/char/mbcs.c
31478+++ b/drivers/char/mbcs.c
31479@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31480 return 0;
31481 }
31482
31483-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31484+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31485 {
31486 .part_num = MBCS_PART_NUM,
31487 .mfg_num = MBCS_MFG_NUM,
31488diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31489index 1270f64..8495f49 100644
31490--- a/drivers/char/mem.c
31491+++ b/drivers/char/mem.c
31492@@ -18,6 +18,7 @@
31493 #include <linux/raw.h>
31494 #include <linux/tty.h>
31495 #include <linux/capability.h>
31496+#include <linux/security.h>
31497 #include <linux/ptrace.h>
31498 #include <linux/device.h>
31499 #include <linux/highmem.h>
31500@@ -35,6 +36,10 @@
31501 # include <linux/efi.h>
31502 #endif
31503
31504+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31505+extern struct file_operations grsec_fops;
31506+#endif
31507+
31508 static inline unsigned long size_inside_page(unsigned long start,
31509 unsigned long size)
31510 {
31511@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31512
31513 while (cursor < to) {
31514 if (!devmem_is_allowed(pfn)) {
31515+#ifdef CONFIG_GRKERNSEC_KMEM
31516+ gr_handle_mem_readwrite(from, to);
31517+#else
31518 printk(KERN_INFO
31519 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31520 current->comm, from, to);
31521+#endif
31522 return 0;
31523 }
31524 cursor += PAGE_SIZE;
31525@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31526 }
31527 return 1;
31528 }
31529+#elif defined(CONFIG_GRKERNSEC_KMEM)
31530+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31531+{
31532+ return 0;
31533+}
31534 #else
31535 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31536 {
31537@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31538 #endif
31539
31540 while (count > 0) {
31541+ char *temp;
31542+
31543 /*
31544 * Handle first page in case it's not aligned
31545 */
31546@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31547 if (!ptr)
31548 return -EFAULT;
31549
31550- if (copy_to_user(buf, ptr, sz)) {
31551+#ifdef CONFIG_PAX_USERCOPY
31552+ temp = kmalloc(sz, GFP_KERNEL);
31553+ if (!temp) {
31554+ unxlate_dev_mem_ptr(p, ptr);
31555+ return -ENOMEM;
31556+ }
31557+ memcpy(temp, ptr, sz);
31558+#else
31559+ temp = ptr;
31560+#endif
31561+
31562+ if (copy_to_user(buf, temp, sz)) {
31563+
31564+#ifdef CONFIG_PAX_USERCOPY
31565+ kfree(temp);
31566+#endif
31567+
31568 unxlate_dev_mem_ptr(p, ptr);
31569 return -EFAULT;
31570 }
31571
31572+#ifdef CONFIG_PAX_USERCOPY
31573+ kfree(temp);
31574+#endif
31575+
31576 unxlate_dev_mem_ptr(p, ptr);
31577
31578 buf += sz;
31579@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31580 size_t count, loff_t *ppos)
31581 {
31582 unsigned long p = *ppos;
31583- ssize_t low_count, read, sz;
31584+ ssize_t low_count, read, sz, err = 0;
31585 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31586- int err = 0;
31587
31588 read = 0;
31589 if (p < (unsigned long) high_memory) {
31590@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31591 }
31592 #endif
31593 while (low_count > 0) {
31594+ char *temp;
31595+
31596 sz = size_inside_page(p, low_count);
31597
31598 /*
31599@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31600 */
31601 kbuf = xlate_dev_kmem_ptr((char *)p);
31602
31603- if (copy_to_user(buf, kbuf, sz))
31604+#ifdef CONFIG_PAX_USERCOPY
31605+ temp = kmalloc(sz, GFP_KERNEL);
31606+ if (!temp)
31607+ return -ENOMEM;
31608+ memcpy(temp, kbuf, sz);
31609+#else
31610+ temp = kbuf;
31611+#endif
31612+
31613+ err = copy_to_user(buf, temp, sz);
31614+
31615+#ifdef CONFIG_PAX_USERCOPY
31616+ kfree(temp);
31617+#endif
31618+
31619+ if (err)
31620 return -EFAULT;
31621 buf += sz;
31622 p += sz;
31623@@ -889,6 +941,9 @@ static const struct memdev {
31624 #ifdef CONFIG_CRASH_DUMP
31625 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31626 #endif
31627+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31628+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31629+#endif
31630 };
31631
31632 static int memory_open(struct inode *inode, struct file *filp)
31633diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
31634index 918711a..4ffaf5e 100644
31635--- a/drivers/char/mmtimer.c
31636+++ b/drivers/char/mmtimer.c
31637@@ -756,7 +756,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
31638 return err;
31639 }
31640
31641-static struct k_clock sgi_clock = {
31642+static k_clock_no_const sgi_clock = {
31643 .res = 0,
31644 .clock_set = sgi_clock_set,
31645 .clock_get = sgi_clock_get,
31646diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31647index 674b3ab..a8d1970 100644
31648--- a/drivers/char/pcmcia/ipwireless/tty.c
31649+++ b/drivers/char/pcmcia/ipwireless/tty.c
31650@@ -29,6 +29,7 @@
31651 #include <linux/tty_driver.h>
31652 #include <linux/tty_flip.h>
31653 #include <linux/uaccess.h>
31654+#include <asm/local.h>
31655
31656 #include "tty.h"
31657 #include "network.h"
31658@@ -51,7 +52,7 @@ struct ipw_tty {
31659 int tty_type;
31660 struct ipw_network *network;
31661 struct tty_struct *linux_tty;
31662- int open_count;
31663+ local_t open_count;
31664 unsigned int control_lines;
31665 struct mutex ipw_tty_mutex;
31666 int tx_bytes_queued;
31667@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31668 mutex_unlock(&tty->ipw_tty_mutex);
31669 return -ENODEV;
31670 }
31671- if (tty->open_count == 0)
31672+ if (local_read(&tty->open_count) == 0)
31673 tty->tx_bytes_queued = 0;
31674
31675- tty->open_count++;
31676+ local_inc(&tty->open_count);
31677
31678 tty->linux_tty = linux_tty;
31679 linux_tty->driver_data = tty;
31680@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31681
31682 static void do_ipw_close(struct ipw_tty *tty)
31683 {
31684- tty->open_count--;
31685-
31686- if (tty->open_count == 0) {
31687+ if (local_dec_return(&tty->open_count) == 0) {
31688 struct tty_struct *linux_tty = tty->linux_tty;
31689
31690 if (linux_tty != NULL) {
31691@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31692 return;
31693
31694 mutex_lock(&tty->ipw_tty_mutex);
31695- if (tty->open_count == 0) {
31696+ if (local_read(&tty->open_count) == 0) {
31697 mutex_unlock(&tty->ipw_tty_mutex);
31698 return;
31699 }
31700@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31701 return;
31702 }
31703
31704- if (!tty->open_count) {
31705+ if (!local_read(&tty->open_count)) {
31706 mutex_unlock(&tty->ipw_tty_mutex);
31707 return;
31708 }
31709@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31710 return -ENODEV;
31711
31712 mutex_lock(&tty->ipw_tty_mutex);
31713- if (!tty->open_count) {
31714+ if (!local_read(&tty->open_count)) {
31715 mutex_unlock(&tty->ipw_tty_mutex);
31716 return -EINVAL;
31717 }
31718@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31719 if (!tty)
31720 return -ENODEV;
31721
31722- if (!tty->open_count)
31723+ if (!local_read(&tty->open_count))
31724 return -EINVAL;
31725
31726 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31727@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31728 if (!tty)
31729 return 0;
31730
31731- if (!tty->open_count)
31732+ if (!local_read(&tty->open_count))
31733 return 0;
31734
31735 return tty->tx_bytes_queued;
31736@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31737 if (!tty)
31738 return -ENODEV;
31739
31740- if (!tty->open_count)
31741+ if (!local_read(&tty->open_count))
31742 return -EINVAL;
31743
31744 return get_control_lines(tty);
31745@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31746 if (!tty)
31747 return -ENODEV;
31748
31749- if (!tty->open_count)
31750+ if (!local_read(&tty->open_count))
31751 return -EINVAL;
31752
31753 return set_control_lines(tty, set, clear);
31754@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31755 if (!tty)
31756 return -ENODEV;
31757
31758- if (!tty->open_count)
31759+ if (!local_read(&tty->open_count))
31760 return -EINVAL;
31761
31762 /* FIXME: Exactly how is the tty object locked here .. */
31763@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31764 against a parallel ioctl etc */
31765 mutex_lock(&ttyj->ipw_tty_mutex);
31766 }
31767- while (ttyj->open_count)
31768+ while (local_read(&ttyj->open_count))
31769 do_ipw_close(ttyj);
31770 ipwireless_disassociate_network_ttys(network,
31771 ttyj->channel_idx);
31772diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31773index 62f282e..e45c45c 100644
31774--- a/drivers/char/pty.c
31775+++ b/drivers/char/pty.c
31776@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31777 register_sysctl_table(pty_root_table);
31778
31779 /* Now create the /dev/ptmx special device */
31780+ pax_open_kernel();
31781 tty_default_fops(&ptmx_fops);
31782- ptmx_fops.open = ptmx_open;
31783+ *(void **)&ptmx_fops.open = ptmx_open;
31784+ pax_close_kernel();
31785
31786 cdev_init(&ptmx_cdev, &ptmx_fops);
31787 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31788diff --git a/drivers/char/random.c b/drivers/char/random.c
31789index 3a19e2d..6ed09d3 100644
31790--- a/drivers/char/random.c
31791+++ b/drivers/char/random.c
31792@@ -254,8 +254,13 @@
31793 /*
31794 * Configuration information
31795 */
31796+#ifdef CONFIG_GRKERNSEC_RANDNET
31797+#define INPUT_POOL_WORDS 512
31798+#define OUTPUT_POOL_WORDS 128
31799+#else
31800 #define INPUT_POOL_WORDS 128
31801 #define OUTPUT_POOL_WORDS 32
31802+#endif
31803 #define SEC_XFER_SIZE 512
31804
31805 /*
31806@@ -292,10 +297,17 @@ static struct poolinfo {
31807 int poolwords;
31808 int tap1, tap2, tap3, tap4, tap5;
31809 } poolinfo_table[] = {
31810+#ifdef CONFIG_GRKERNSEC_RANDNET
31811+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31812+ { 512, 411, 308, 208, 104, 1 },
31813+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31814+ { 128, 103, 76, 51, 25, 1 },
31815+#else
31816 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31817 { 128, 103, 76, 51, 25, 1 },
31818 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31819 { 32, 26, 20, 14, 7, 1 },
31820+#endif
31821 #if 0
31822 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31823 { 2048, 1638, 1231, 819, 411, 1 },
31824@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31825 #include <linux/sysctl.h>
31826
31827 static int min_read_thresh = 8, min_write_thresh;
31828-static int max_read_thresh = INPUT_POOL_WORDS * 32;
31829+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31830 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31831 static char sysctl_bootid[16];
31832
31833diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31834index 0e29a23..0efc2c2 100644
31835--- a/drivers/char/rocket.c
31836+++ b/drivers/char/rocket.c
31837@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31838 struct rocket_ports tmp;
31839 int board;
31840
31841+ pax_track_stack();
31842+
31843 if (!retports)
31844 return -EFAULT;
31845 memset(&tmp, 0, sizeof (tmp));
31846diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31847index 8c262aa..4d3b058 100644
31848--- a/drivers/char/sonypi.c
31849+++ b/drivers/char/sonypi.c
31850@@ -55,6 +55,7 @@
31851 #include <asm/uaccess.h>
31852 #include <asm/io.h>
31853 #include <asm/system.h>
31854+#include <asm/local.h>
31855
31856 #include <linux/sonypi.h>
31857
31858@@ -491,7 +492,7 @@ static struct sonypi_device {
31859 spinlock_t fifo_lock;
31860 wait_queue_head_t fifo_proc_list;
31861 struct fasync_struct *fifo_async;
31862- int open_count;
31863+ local_t open_count;
31864 int model;
31865 struct input_dev *input_jog_dev;
31866 struct input_dev *input_key_dev;
31867@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31868 static int sonypi_misc_release(struct inode *inode, struct file *file)
31869 {
31870 mutex_lock(&sonypi_device.lock);
31871- sonypi_device.open_count--;
31872+ local_dec(&sonypi_device.open_count);
31873 mutex_unlock(&sonypi_device.lock);
31874 return 0;
31875 }
31876@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31877 lock_kernel();
31878 mutex_lock(&sonypi_device.lock);
31879 /* Flush input queue on first open */
31880- if (!sonypi_device.open_count)
31881+ if (!local_read(&sonypi_device.open_count))
31882 kfifo_reset(sonypi_device.fifo);
31883- sonypi_device.open_count++;
31884+ local_inc(&sonypi_device.open_count);
31885 mutex_unlock(&sonypi_device.lock);
31886 unlock_kernel();
31887 return 0;
31888diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31889index db6dcfa..13834cb 100644
31890--- a/drivers/char/stallion.c
31891+++ b/drivers/char/stallion.c
31892@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31893 struct stlport stl_dummyport;
31894 struct stlport *portp;
31895
31896+ pax_track_stack();
31897+
31898 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31899 return -EFAULT;
31900 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31901diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31902index a0789f6..cea3902 100644
31903--- a/drivers/char/tpm/tpm.c
31904+++ b/drivers/char/tpm/tpm.c
31905@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31906 chip->vendor.req_complete_val)
31907 goto out_recv;
31908
31909- if ((status == chip->vendor.req_canceled)) {
31910+ if (status == chip->vendor.req_canceled) {
31911 dev_err(chip->dev, "Operation Canceled\n");
31912 rc = -ECANCELED;
31913 goto out;
31914@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31915
31916 struct tpm_chip *chip = dev_get_drvdata(dev);
31917
31918+ pax_track_stack();
31919+
31920 tpm_cmd.header.in = tpm_readpubek_header;
31921 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31922 "attempting to read the PUBEK");
31923diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31924index bf2170f..ce8cab9 100644
31925--- a/drivers/char/tpm/tpm_bios.c
31926+++ b/drivers/char/tpm/tpm_bios.c
31927@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31928 event = addr;
31929
31930 if ((event->event_type == 0 && event->event_size == 0) ||
31931- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31932+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31933 return NULL;
31934
31935 return addr;
31936@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31937 return NULL;
31938
31939 if ((event->event_type == 0 && event->event_size == 0) ||
31940- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31941+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31942 return NULL;
31943
31944 (*pos)++;
31945@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31946 int i;
31947
31948 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31949- seq_putc(m, data[i]);
31950+ if (!seq_putc(m, data[i]))
31951+ return -EFAULT;
31952
31953 return 0;
31954 }
31955@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31956 log->bios_event_log_end = log->bios_event_log + len;
31957
31958 virt = acpi_os_map_memory(start, len);
31959+ if (!virt) {
31960+ kfree(log->bios_event_log);
31961+ log->bios_event_log = NULL;
31962+ return -EFAULT;
31963+ }
31964
31965- memcpy(log->bios_event_log, virt, len);
31966+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
31967
31968 acpi_os_unmap_memory(virt, len);
31969 return 0;
31970diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
31971index 123cedf..6664cb4 100644
31972--- a/drivers/char/tty_io.c
31973+++ b/drivers/char/tty_io.c
31974@@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
31975 static int tty_release(struct inode *, struct file *);
31976 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
31977 #ifdef CONFIG_COMPAT
31978-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31979+long tty_compat_ioctl(struct file *file, unsigned int cmd,
31980 unsigned long arg);
31981 #else
31982 #define tty_compat_ioctl NULL
31983@@ -1774,6 +1774,7 @@ got_driver:
31984
31985 if (IS_ERR(tty)) {
31986 mutex_unlock(&tty_mutex);
31987+ tty_driver_kref_put(driver);
31988 return PTR_ERR(tty);
31989 }
31990 }
31991@@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31992 return retval;
31993 }
31994
31995+EXPORT_SYMBOL(tty_ioctl);
31996+
31997 #ifdef CONFIG_COMPAT
31998-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31999+long tty_compat_ioctl(struct file *file, unsigned int cmd,
32000 unsigned long arg)
32001 {
32002 struct inode *inode = file->f_dentry->d_inode;
32003@@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32004
32005 return retval;
32006 }
32007+
32008+EXPORT_SYMBOL(tty_compat_ioctl);
32009 #endif
32010
32011 /*
32012@@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
32013
32014 void tty_default_fops(struct file_operations *fops)
32015 {
32016- *fops = tty_fops;
32017+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
32018 }
32019
32020 /*
32021diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
32022index d814a3d..b55b9c9 100644
32023--- a/drivers/char/tty_ldisc.c
32024+++ b/drivers/char/tty_ldisc.c
32025@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
32026 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
32027 struct tty_ldisc_ops *ldo = ld->ops;
32028
32029- ldo->refcount--;
32030+ atomic_dec(&ldo->refcount);
32031 module_put(ldo->owner);
32032 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32033
32034@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
32035 spin_lock_irqsave(&tty_ldisc_lock, flags);
32036 tty_ldiscs[disc] = new_ldisc;
32037 new_ldisc->num = disc;
32038- new_ldisc->refcount = 0;
32039+ atomic_set(&new_ldisc->refcount, 0);
32040 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32041
32042 return ret;
32043@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
32044 return -EINVAL;
32045
32046 spin_lock_irqsave(&tty_ldisc_lock, flags);
32047- if (tty_ldiscs[disc]->refcount)
32048+ if (atomic_read(&tty_ldiscs[disc]->refcount))
32049 ret = -EBUSY;
32050 else
32051 tty_ldiscs[disc] = NULL;
32052@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
32053 if (ldops) {
32054 ret = ERR_PTR(-EAGAIN);
32055 if (try_module_get(ldops->owner)) {
32056- ldops->refcount++;
32057+ atomic_inc(&ldops->refcount);
32058 ret = ldops;
32059 }
32060 }
32061@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
32062 unsigned long flags;
32063
32064 spin_lock_irqsave(&tty_ldisc_lock, flags);
32065- ldops->refcount--;
32066+ atomic_dec(&ldops->refcount);
32067 module_put(ldops->owner);
32068 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32069 }
32070diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
32071index a035ae3..c27fe2c 100644
32072--- a/drivers/char/virtio_console.c
32073+++ b/drivers/char/virtio_console.c
32074@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
32075 * virtqueue, so we let the drivers do some boutique early-output thing. */
32076 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
32077 {
32078- virtio_cons.put_chars = put_chars;
32079+ pax_open_kernel();
32080+ *(void **)&virtio_cons.put_chars = put_chars;
32081+ pax_close_kernel();
32082 return hvc_instantiate(0, 0, &virtio_cons);
32083 }
32084
32085@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
32086 out_vq = vqs[1];
32087
32088 /* Start using the new console output. */
32089- virtio_cons.get_chars = get_chars;
32090- virtio_cons.put_chars = put_chars;
32091- virtio_cons.notifier_add = notifier_add_vio;
32092- virtio_cons.notifier_del = notifier_del_vio;
32093- virtio_cons.notifier_hangup = notifier_del_vio;
32094+ pax_open_kernel();
32095+ *(void **)&virtio_cons.get_chars = get_chars;
32096+ *(void **)&virtio_cons.put_chars = put_chars;
32097+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
32098+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
32099+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
32100+ pax_close_kernel();
32101
32102 /* The first argument of hvc_alloc() is the virtual console number, so
32103 * we use zero. The second argument is the parameter for the
32104diff --git a/drivers/char/vt.c b/drivers/char/vt.c
32105index 0c80c68..53d59c1 100644
32106--- a/drivers/char/vt.c
32107+++ b/drivers/char/vt.c
32108@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
32109
32110 static void notify_write(struct vc_data *vc, unsigned int unicode)
32111 {
32112- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
32113+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
32114 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
32115 }
32116
32117diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
32118index 6351a26..999af95 100644
32119--- a/drivers/char/vt_ioctl.c
32120+++ b/drivers/char/vt_ioctl.c
32121@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32122 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
32123 return -EFAULT;
32124
32125- if (!capable(CAP_SYS_TTY_CONFIG))
32126- perm = 0;
32127-
32128 switch (cmd) {
32129 case KDGKBENT:
32130 key_map = key_maps[s];
32131@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32132 val = (i ? K_HOLE : K_NOSUCHMAP);
32133 return put_user(val, &user_kbe->kb_value);
32134 case KDSKBENT:
32135+ if (!capable(CAP_SYS_TTY_CONFIG))
32136+ perm = 0;
32137+
32138 if (!perm)
32139 return -EPERM;
32140+
32141 if (!i && v == K_NOSUCHMAP) {
32142 /* deallocate map */
32143 key_map = key_maps[s];
32144@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32145 int i, j, k;
32146 int ret;
32147
32148- if (!capable(CAP_SYS_TTY_CONFIG))
32149- perm = 0;
32150-
32151 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
32152 if (!kbs) {
32153 ret = -ENOMEM;
32154@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32155 kfree(kbs);
32156 return ((p && *p) ? -EOVERFLOW : 0);
32157 case KDSKBSENT:
32158+ if (!capable(CAP_SYS_TTY_CONFIG))
32159+ perm = 0;
32160+
32161 if (!perm) {
32162 ret = -EPERM;
32163 goto reterr;
32164diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
32165index c7ae026..1769c1d 100644
32166--- a/drivers/cpufreq/cpufreq.c
32167+++ b/drivers/cpufreq/cpufreq.c
32168@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
32169 complete(&policy->kobj_unregister);
32170 }
32171
32172-static struct sysfs_ops sysfs_ops = {
32173+static const struct sysfs_ops sysfs_ops = {
32174 .show = show,
32175 .store = store,
32176 };
32177diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
32178index 97b0038..2056670 100644
32179--- a/drivers/cpuidle/sysfs.c
32180+++ b/drivers/cpuidle/sysfs.c
32181@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
32182 return ret;
32183 }
32184
32185-static struct sysfs_ops cpuidle_sysfs_ops = {
32186+static const struct sysfs_ops cpuidle_sysfs_ops = {
32187 .show = cpuidle_show,
32188 .store = cpuidle_store,
32189 };
32190@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
32191 return ret;
32192 }
32193
32194-static struct sysfs_ops cpuidle_state_sysfs_ops = {
32195+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
32196 .show = cpuidle_state_show,
32197 };
32198
32199@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
32200 .release = cpuidle_state_sysfs_release,
32201 };
32202
32203-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32204+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32205 {
32206 kobject_put(&device->kobjs[i]->kobj);
32207 wait_for_completion(&device->kobjs[i]->kobj_unregister);
32208diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
32209index 5f753fc..0377ae9 100644
32210--- a/drivers/crypto/hifn_795x.c
32211+++ b/drivers/crypto/hifn_795x.c
32212@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
32213 0xCA, 0x34, 0x2B, 0x2E};
32214 struct scatterlist sg;
32215
32216+ pax_track_stack();
32217+
32218 memset(src, 0, sizeof(src));
32219 memset(ctx.key, 0, sizeof(ctx.key));
32220
32221diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
32222index 71e6482..de8d96c 100644
32223--- a/drivers/crypto/padlock-aes.c
32224+++ b/drivers/crypto/padlock-aes.c
32225@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
32226 struct crypto_aes_ctx gen_aes;
32227 int cpu;
32228
32229+ pax_track_stack();
32230+
32231 if (key_len % 8) {
32232 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
32233 return -EINVAL;
32234diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
32235index dcc4ab7..cc834bb 100644
32236--- a/drivers/dma/ioat/dma.c
32237+++ b/drivers/dma/ioat/dma.c
32238@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
32239 return entry->show(&chan->common, page);
32240 }
32241
32242-struct sysfs_ops ioat_sysfs_ops = {
32243+const struct sysfs_ops ioat_sysfs_ops = {
32244 .show = ioat_attr_show,
32245 };
32246
32247diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
32248index bbc3e78..f2db62c 100644
32249--- a/drivers/dma/ioat/dma.h
32250+++ b/drivers/dma/ioat/dma.h
32251@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
32252 unsigned long *phys_complete);
32253 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
32254 void ioat_kobject_del(struct ioatdma_device *device);
32255-extern struct sysfs_ops ioat_sysfs_ops;
32256+extern const struct sysfs_ops ioat_sysfs_ops;
32257 extern struct ioat_sysfs_entry ioat_version_attr;
32258 extern struct ioat_sysfs_entry ioat_cap_attr;
32259 #endif /* IOATDMA_H */
32260diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
32261index 9908c9e..3ceb0e5 100644
32262--- a/drivers/dma/ioat/dma_v3.c
32263+++ b/drivers/dma/ioat/dma_v3.c
32264@@ -71,10 +71,10 @@
32265 /* provide a lookup table for setting the source address in the base or
32266 * extended descriptor of an xor or pq descriptor
32267 */
32268-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
32269-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
32270-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
32271-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
32272+static const u8 xor_idx_to_desc = 0xd0;
32273+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
32274+static const u8 pq_idx_to_desc = 0xf8;
32275+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
32276
32277 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
32278 {
32279diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
32280index 85c464a..afd1e73 100644
32281--- a/drivers/edac/amd64_edac.c
32282+++ b/drivers/edac/amd64_edac.c
32283@@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
32284 * PCI core identifies what devices are on a system during boot, and then
32285 * inquiry this table to see if this driver is for a given device found.
32286 */
32287-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
32288+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
32289 {
32290 .vendor = PCI_VENDOR_ID_AMD,
32291 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
32292diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
32293index 2b95f1a..4f52793 100644
32294--- a/drivers/edac/amd76x_edac.c
32295+++ b/drivers/edac/amd76x_edac.c
32296@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
32297 edac_mc_free(mci);
32298 }
32299
32300-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
32301+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
32302 {
32303 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32304 AMD762},
32305diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
32306index d205d49..74c9672 100644
32307--- a/drivers/edac/e752x_edac.c
32308+++ b/drivers/edac/e752x_edac.c
32309@@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
32310 edac_mc_free(mci);
32311 }
32312
32313-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
32314+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
32315 {
32316 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32317 E7520},
32318diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
32319index c7d11cc..c59c1ca 100644
32320--- a/drivers/edac/e7xxx_edac.c
32321+++ b/drivers/edac/e7xxx_edac.c
32322@@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
32323 edac_mc_free(mci);
32324 }
32325
32326-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
32327+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
32328 {
32329 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32330 E7205},
32331diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
32332index 5376457..5fdedbc 100644
32333--- a/drivers/edac/edac_device_sysfs.c
32334+++ b/drivers/edac/edac_device_sysfs.c
32335@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
32336 }
32337
32338 /* edac_dev file operations for an 'ctl_info' */
32339-static struct sysfs_ops device_ctl_info_ops = {
32340+static const struct sysfs_ops device_ctl_info_ops = {
32341 .show = edac_dev_ctl_info_show,
32342 .store = edac_dev_ctl_info_store
32343 };
32344@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32345 }
32346
32347 /* edac_dev file operations for an 'instance' */
32348-static struct sysfs_ops device_instance_ops = {
32349+static const struct sysfs_ops device_instance_ops = {
32350 .show = edac_dev_instance_show,
32351 .store = edac_dev_instance_store
32352 };
32353@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32354 }
32355
32356 /* edac_dev file operations for a 'block' */
32357-static struct sysfs_ops device_block_ops = {
32358+static const struct sysfs_ops device_block_ops = {
32359 .show = edac_dev_block_show,
32360 .store = edac_dev_block_store
32361 };
32362diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32363index e1d4ce0..88840e9 100644
32364--- a/drivers/edac/edac_mc_sysfs.c
32365+++ b/drivers/edac/edac_mc_sysfs.c
32366@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32367 return -EIO;
32368 }
32369
32370-static struct sysfs_ops csrowfs_ops = {
32371+static const struct sysfs_ops csrowfs_ops = {
32372 .show = csrowdev_show,
32373 .store = csrowdev_store
32374 };
32375@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32376 }
32377
32378 /* Intermediate show/store table */
32379-static struct sysfs_ops mci_ops = {
32380+static const struct sysfs_ops mci_ops = {
32381 .show = mcidev_show,
32382 .store = mcidev_store
32383 };
32384diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32385index 422728c..d8d9c88 100644
32386--- a/drivers/edac/edac_pci_sysfs.c
32387+++ b/drivers/edac/edac_pci_sysfs.c
32388@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32389 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32390 static int edac_pci_poll_msec = 1000; /* one second workq period */
32391
32392-static atomic_t pci_parity_count = ATOMIC_INIT(0);
32393-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32394+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32395+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32396
32397 static struct kobject *edac_pci_top_main_kobj;
32398 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32399@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32400 }
32401
32402 /* fs_ops table */
32403-static struct sysfs_ops pci_instance_ops = {
32404+static const struct sysfs_ops pci_instance_ops = {
32405 .show = edac_pci_instance_show,
32406 .store = edac_pci_instance_store
32407 };
32408@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32409 return -EIO;
32410 }
32411
32412-static struct sysfs_ops edac_pci_sysfs_ops = {
32413+static const struct sysfs_ops edac_pci_sysfs_ops = {
32414 .show = edac_pci_dev_show,
32415 .store = edac_pci_dev_store
32416 };
32417@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32418 edac_printk(KERN_CRIT, EDAC_PCI,
32419 "Signaled System Error on %s\n",
32420 pci_name(dev));
32421- atomic_inc(&pci_nonparity_count);
32422+ atomic_inc_unchecked(&pci_nonparity_count);
32423 }
32424
32425 if (status & (PCI_STATUS_PARITY)) {
32426@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32427 "Master Data Parity Error on %s\n",
32428 pci_name(dev));
32429
32430- atomic_inc(&pci_parity_count);
32431+ atomic_inc_unchecked(&pci_parity_count);
32432 }
32433
32434 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32435@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32436 "Detected Parity Error on %s\n",
32437 pci_name(dev));
32438
32439- atomic_inc(&pci_parity_count);
32440+ atomic_inc_unchecked(&pci_parity_count);
32441 }
32442 }
32443
32444@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32445 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32446 "Signaled System Error on %s\n",
32447 pci_name(dev));
32448- atomic_inc(&pci_nonparity_count);
32449+ atomic_inc_unchecked(&pci_nonparity_count);
32450 }
32451
32452 if (status & (PCI_STATUS_PARITY)) {
32453@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32454 "Master Data Parity Error on "
32455 "%s\n", pci_name(dev));
32456
32457- atomic_inc(&pci_parity_count);
32458+ atomic_inc_unchecked(&pci_parity_count);
32459 }
32460
32461 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32462@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32463 "Detected Parity Error on %s\n",
32464 pci_name(dev));
32465
32466- atomic_inc(&pci_parity_count);
32467+ atomic_inc_unchecked(&pci_parity_count);
32468 }
32469 }
32470 }
32471@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32472 if (!check_pci_errors)
32473 return;
32474
32475- before_count = atomic_read(&pci_parity_count);
32476+ before_count = atomic_read_unchecked(&pci_parity_count);
32477
32478 /* scan all PCI devices looking for a Parity Error on devices and
32479 * bridges.
32480@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32481 /* Only if operator has selected panic on PCI Error */
32482 if (edac_pci_get_panic_on_pe()) {
32483 /* If the count is different 'after' from 'before' */
32484- if (before_count != atomic_read(&pci_parity_count))
32485+ if (before_count != atomic_read_unchecked(&pci_parity_count))
32486 panic("EDAC: PCI Parity Error");
32487 }
32488 }
32489diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32490index 6c9a0f2..9c1cf7e 100644
32491--- a/drivers/edac/i3000_edac.c
32492+++ b/drivers/edac/i3000_edac.c
32493@@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32494 edac_mc_free(mci);
32495 }
32496
32497-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32498+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32499 {
32500 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32501 I3000},
32502diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32503index fde4db9..fe108f9 100644
32504--- a/drivers/edac/i3200_edac.c
32505+++ b/drivers/edac/i3200_edac.c
32506@@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32507 edac_mc_free(mci);
32508 }
32509
32510-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32511+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32512 {
32513 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32514 I3200},
32515diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32516index adc10a2..57d4ccf 100644
32517--- a/drivers/edac/i5000_edac.c
32518+++ b/drivers/edac/i5000_edac.c
32519@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32520 *
32521 * The "E500P" device is the first device supported.
32522 */
32523-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32524+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32525 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32526 .driver_data = I5000P},
32527
32528diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32529index 22db05a..b2b5503 100644
32530--- a/drivers/edac/i5100_edac.c
32531+++ b/drivers/edac/i5100_edac.c
32532@@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32533 edac_mc_free(mci);
32534 }
32535
32536-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32537+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32538 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32539 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32540 { 0, }
32541diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32542index f99d106..f050710 100644
32543--- a/drivers/edac/i5400_edac.c
32544+++ b/drivers/edac/i5400_edac.c
32545@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32546 *
32547 * The "E500P" device is the first device supported.
32548 */
32549-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32550+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32551 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32552 {0,} /* 0 terminated list. */
32553 };
32554diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32555index 577760a..9ce16ce 100644
32556--- a/drivers/edac/i82443bxgx_edac.c
32557+++ b/drivers/edac/i82443bxgx_edac.c
32558@@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32559
32560 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32561
32562-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32563+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32564 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32565 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32566 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32567diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32568index c0088ba..64a7b98 100644
32569--- a/drivers/edac/i82860_edac.c
32570+++ b/drivers/edac/i82860_edac.c
32571@@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32572 edac_mc_free(mci);
32573 }
32574
32575-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32576+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32577 {
32578 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32579 I82860},
32580diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32581index b2d83b9..a34357b 100644
32582--- a/drivers/edac/i82875p_edac.c
32583+++ b/drivers/edac/i82875p_edac.c
32584@@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32585 edac_mc_free(mci);
32586 }
32587
32588-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32589+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32590 {
32591 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32592 I82875P},
32593diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32594index 2eed3ea..87bbbd1 100644
32595--- a/drivers/edac/i82975x_edac.c
32596+++ b/drivers/edac/i82975x_edac.c
32597@@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32598 edac_mc_free(mci);
32599 }
32600
32601-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32602+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32603 {
32604 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32605 I82975X
32606diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32607index 9900675..78ac2b6 100644
32608--- a/drivers/edac/r82600_edac.c
32609+++ b/drivers/edac/r82600_edac.c
32610@@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32611 edac_mc_free(mci);
32612 }
32613
32614-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32615+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32616 {
32617 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32618 },
32619diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32620index d4ec605..4cfec4e 100644
32621--- a/drivers/edac/x38_edac.c
32622+++ b/drivers/edac/x38_edac.c
32623@@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32624 edac_mc_free(mci);
32625 }
32626
32627-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32628+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32629 {
32630 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32631 X38},
32632diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32633index 3fc2ceb..daf098f 100644
32634--- a/drivers/firewire/core-card.c
32635+++ b/drivers/firewire/core-card.c
32636@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32637
32638 void fw_core_remove_card(struct fw_card *card)
32639 {
32640- struct fw_card_driver dummy_driver = dummy_driver_template;
32641+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
32642
32643 card->driver->update_phy_reg(card, 4,
32644 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32645diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32646index 4560d8f..36db24a 100644
32647--- a/drivers/firewire/core-cdev.c
32648+++ b/drivers/firewire/core-cdev.c
32649@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32650 int ret;
32651
32652 if ((request->channels == 0 && request->bandwidth == 0) ||
32653- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32654- request->bandwidth < 0)
32655+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32656 return -EINVAL;
32657
32658 r = kmalloc(sizeof(*r), GFP_KERNEL);
32659diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32660index da628c7..cf54a2c 100644
32661--- a/drivers/firewire/core-transaction.c
32662+++ b/drivers/firewire/core-transaction.c
32663@@ -36,6 +36,7 @@
32664 #include <linux/string.h>
32665 #include <linux/timer.h>
32666 #include <linux/types.h>
32667+#include <linux/sched.h>
32668
32669 #include <asm/byteorder.h>
32670
32671@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32672 struct transaction_callback_data d;
32673 struct fw_transaction t;
32674
32675+ pax_track_stack();
32676+
32677 init_completion(&d.done);
32678 d.payload = payload;
32679 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32680diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32681index 7ff6e75..a2965d9 100644
32682--- a/drivers/firewire/core.h
32683+++ b/drivers/firewire/core.h
32684@@ -86,6 +86,7 @@ struct fw_card_driver {
32685
32686 int (*stop_iso)(struct fw_iso_context *ctx);
32687 };
32688+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32689
32690 void fw_card_initialize(struct fw_card *card,
32691 const struct fw_card_driver *driver, struct device *device);
32692diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32693index 3a2ccb0..82fd7c4 100644
32694--- a/drivers/firmware/dmi_scan.c
32695+++ b/drivers/firmware/dmi_scan.c
32696@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32697 }
32698 }
32699 else {
32700- /*
32701- * no iounmap() for that ioremap(); it would be a no-op, but
32702- * it's so early in setup that sucker gets confused into doing
32703- * what it shouldn't if we actually call it.
32704- */
32705 p = dmi_ioremap(0xF0000, 0x10000);
32706 if (p == NULL)
32707 goto error;
32708@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32709 if (buf == NULL)
32710 return -1;
32711
32712- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32713+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32714
32715 iounmap(buf);
32716 return 0;
32717diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32718index 9e4f59d..110e24e 100644
32719--- a/drivers/firmware/edd.c
32720+++ b/drivers/firmware/edd.c
32721@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32722 return ret;
32723 }
32724
32725-static struct sysfs_ops edd_attr_ops = {
32726+static const struct sysfs_ops edd_attr_ops = {
32727 .show = edd_attr_show,
32728 };
32729
32730diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32731index f4f709d..082f06e 100644
32732--- a/drivers/firmware/efivars.c
32733+++ b/drivers/firmware/efivars.c
32734@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32735 return ret;
32736 }
32737
32738-static struct sysfs_ops efivar_attr_ops = {
32739+static const struct sysfs_ops efivar_attr_ops = {
32740 .show = efivar_attr_show,
32741 .store = efivar_attr_store,
32742 };
32743diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32744index 051d1eb..0a5d4e7 100644
32745--- a/drivers/firmware/iscsi_ibft.c
32746+++ b/drivers/firmware/iscsi_ibft.c
32747@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32748 return ret;
32749 }
32750
32751-static struct sysfs_ops ibft_attr_ops = {
32752+static const struct sysfs_ops ibft_attr_ops = {
32753 .show = ibft_show_attribute,
32754 };
32755
32756diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32757index 56f9234..8c58c7b 100644
32758--- a/drivers/firmware/memmap.c
32759+++ b/drivers/firmware/memmap.c
32760@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32761 NULL
32762 };
32763
32764-static struct sysfs_ops memmap_attr_ops = {
32765+static const struct sysfs_ops memmap_attr_ops = {
32766 .show = memmap_attr_show,
32767 };
32768
32769diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32770index b16c9a8..2af7d3f 100644
32771--- a/drivers/gpio/vr41xx_giu.c
32772+++ b/drivers/gpio/vr41xx_giu.c
32773@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32774 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32775 maskl, pendl, maskh, pendh);
32776
32777- atomic_inc(&irq_err_count);
32778+ atomic_inc_unchecked(&irq_err_count);
32779
32780 return -EINVAL;
32781 }
32782diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32783index bea6efc..3dc0f42 100644
32784--- a/drivers/gpu/drm/drm_crtc.c
32785+++ b/drivers/gpu/drm/drm_crtc.c
32786@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32787 */
32788 if ((out_resp->count_modes >= mode_count) && mode_count) {
32789 copied = 0;
32790- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32791+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32792 list_for_each_entry(mode, &connector->modes, head) {
32793 drm_crtc_convert_to_umode(&u_mode, mode);
32794 if (copy_to_user(mode_ptr + copied,
32795@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32796
32797 if ((out_resp->count_props >= props_count) && props_count) {
32798 copied = 0;
32799- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32800- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32801+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32802+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32803 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32804 if (connector->property_ids[i] != 0) {
32805 if (put_user(connector->property_ids[i],
32806@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32807
32808 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32809 copied = 0;
32810- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32811+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32812 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32813 if (connector->encoder_ids[i] != 0) {
32814 if (put_user(connector->encoder_ids[i],
32815@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32816 }
32817
32818 for (i = 0; i < crtc_req->count_connectors; i++) {
32819- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32820+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32821 if (get_user(out_id, &set_connectors_ptr[i])) {
32822 ret = -EFAULT;
32823 goto out;
32824@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32825 out_resp->flags = property->flags;
32826
32827 if ((out_resp->count_values >= value_count) && value_count) {
32828- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32829+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32830 for (i = 0; i < value_count; i++) {
32831 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32832 ret = -EFAULT;
32833@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32834 if (property->flags & DRM_MODE_PROP_ENUM) {
32835 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32836 copied = 0;
32837- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32838+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32839 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32840
32841 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32842@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32843 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32844 copied = 0;
32845 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32846- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32847+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32848
32849 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32850 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32851@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32852 blob = obj_to_blob(obj);
32853
32854 if (out_resp->length == blob->length) {
32855- blob_ptr = (void *)(unsigned long)out_resp->data;
32856+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
32857 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32858 ret = -EFAULT;
32859 goto done;
32860diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32861index 1b8745d..92fdbf6 100644
32862--- a/drivers/gpu/drm/drm_crtc_helper.c
32863+++ b/drivers/gpu/drm/drm_crtc_helper.c
32864@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32865 struct drm_crtc *tmp;
32866 int crtc_mask = 1;
32867
32868- WARN(!crtc, "checking null crtc?");
32869+ BUG_ON(!crtc);
32870
32871 dev = crtc->dev;
32872
32873@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32874
32875 adjusted_mode = drm_mode_duplicate(dev, mode);
32876
32877+ pax_track_stack();
32878+
32879 crtc->enabled = drm_helper_crtc_in_use(crtc);
32880
32881 if (!crtc->enabled)
32882diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32883index 0e27d98..dec8768 100644
32884--- a/drivers/gpu/drm/drm_drv.c
32885+++ b/drivers/gpu/drm/drm_drv.c
32886@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32887 char *kdata = NULL;
32888
32889 atomic_inc(&dev->ioctl_count);
32890- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32891+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32892 ++file_priv->ioctl_count;
32893
32894 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32895diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32896index 519161e..98c840c 100644
32897--- a/drivers/gpu/drm/drm_fops.c
32898+++ b/drivers/gpu/drm/drm_fops.c
32899@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32900 }
32901
32902 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32903- atomic_set(&dev->counts[i], 0);
32904+ atomic_set_unchecked(&dev->counts[i], 0);
32905
32906 dev->sigdata.lock = NULL;
32907
32908@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32909
32910 retcode = drm_open_helper(inode, filp, dev);
32911 if (!retcode) {
32912- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32913+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32914 spin_lock(&dev->count_lock);
32915- if (!dev->open_count++) {
32916+ if (local_inc_return(&dev->open_count) == 1) {
32917 spin_unlock(&dev->count_lock);
32918 retcode = drm_setup(dev);
32919 goto out;
32920@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32921
32922 lock_kernel();
32923
32924- DRM_DEBUG("open_count = %d\n", dev->open_count);
32925+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32926
32927 if (dev->driver->preclose)
32928 dev->driver->preclose(dev, file_priv);
32929@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32930 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32931 task_pid_nr(current),
32932 (long)old_encode_dev(file_priv->minor->device),
32933- dev->open_count);
32934+ local_read(&dev->open_count));
32935
32936 /* Release any auth tokens that might point to this file_priv,
32937 (do that under the drm_global_mutex) */
32938@@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
32939 * End inline drm_release
32940 */
32941
32942- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32943+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32944 spin_lock(&dev->count_lock);
32945- if (!--dev->open_count) {
32946+ if (local_dec_and_test(&dev->open_count)) {
32947 if (atomic_read(&dev->ioctl_count)) {
32948 DRM_ERROR("Device busy: %d\n",
32949 atomic_read(&dev->ioctl_count));
32950diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32951index 8bf3770..79422805 100644
32952--- a/drivers/gpu/drm/drm_gem.c
32953+++ b/drivers/gpu/drm/drm_gem.c
32954@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32955 spin_lock_init(&dev->object_name_lock);
32956 idr_init(&dev->object_name_idr);
32957 atomic_set(&dev->object_count, 0);
32958- atomic_set(&dev->object_memory, 0);
32959+ atomic_set_unchecked(&dev->object_memory, 0);
32960 atomic_set(&dev->pin_count, 0);
32961- atomic_set(&dev->pin_memory, 0);
32962+ atomic_set_unchecked(&dev->pin_memory, 0);
32963 atomic_set(&dev->gtt_count, 0);
32964- atomic_set(&dev->gtt_memory, 0);
32965+ atomic_set_unchecked(&dev->gtt_memory, 0);
32966
32967 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
32968 if (!mm) {
32969@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
32970 goto fput;
32971 }
32972 atomic_inc(&dev->object_count);
32973- atomic_add(obj->size, &dev->object_memory);
32974+ atomic_add_unchecked(obj->size, &dev->object_memory);
32975 return obj;
32976 fput:
32977 fput(obj->filp);
32978@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
32979
32980 fput(obj->filp);
32981 atomic_dec(&dev->object_count);
32982- atomic_sub(obj->size, &dev->object_memory);
32983+ atomic_sub_unchecked(obj->size, &dev->object_memory);
32984 kfree(obj);
32985 }
32986 EXPORT_SYMBOL(drm_gem_object_free);
32987diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32988index f0f6c6b..34af322 100644
32989--- a/drivers/gpu/drm/drm_info.c
32990+++ b/drivers/gpu/drm/drm_info.c
32991@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
32992 struct drm_local_map *map;
32993 struct drm_map_list *r_list;
32994
32995- /* Hardcoded from _DRM_FRAME_BUFFER,
32996- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
32997- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
32998- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
32999+ static const char * const types[] = {
33000+ [_DRM_FRAME_BUFFER] = "FB",
33001+ [_DRM_REGISTERS] = "REG",
33002+ [_DRM_SHM] = "SHM",
33003+ [_DRM_AGP] = "AGP",
33004+ [_DRM_SCATTER_GATHER] = "SG",
33005+ [_DRM_CONSISTENT] = "PCI",
33006+ [_DRM_GEM] = "GEM" };
33007 const char *type;
33008 int i;
33009
33010@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
33011 map = r_list->map;
33012 if (!map)
33013 continue;
33014- if (map->type < 0 || map->type > 5)
33015+ if (map->type >= ARRAY_SIZE(types))
33016 type = "??";
33017 else
33018 type = types[map->type];
33019@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
33020 struct drm_device *dev = node->minor->dev;
33021
33022 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
33023- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
33024+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
33025 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
33026- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
33027- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
33028+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
33029+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
33030 seq_printf(m, "%d gtt total\n", dev->gtt_total);
33031 return 0;
33032 }
33033@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
33034 mutex_lock(&dev->struct_mutex);
33035 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
33036 atomic_read(&dev->vma_count),
33037+#ifdef CONFIG_GRKERNSEC_HIDESYM
33038+ NULL, 0);
33039+#else
33040 high_memory, (u64)virt_to_phys(high_memory));
33041+#endif
33042
33043 list_for_each_entry(pt, &dev->vmalist, head) {
33044 vma = pt->vma;
33045@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
33046 continue;
33047 seq_printf(m,
33048 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
33049- pt->pid, vma->vm_start, vma->vm_end,
33050+ pt->pid,
33051+#ifdef CONFIG_GRKERNSEC_HIDESYM
33052+ 0, 0,
33053+#else
33054+ vma->vm_start, vma->vm_end,
33055+#endif
33056 vma->vm_flags & VM_READ ? 'r' : '-',
33057 vma->vm_flags & VM_WRITE ? 'w' : '-',
33058 vma->vm_flags & VM_EXEC ? 'x' : '-',
33059 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
33060 vma->vm_flags & VM_LOCKED ? 'l' : '-',
33061 vma->vm_flags & VM_IO ? 'i' : '-',
33062+#ifdef CONFIG_GRKERNSEC_HIDESYM
33063+ 0);
33064+#else
33065 vma->vm_pgoff);
33066+#endif
33067
33068 #if defined(__i386__)
33069 pgprot = pgprot_val(vma->vm_page_prot);
33070diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
33071index 282d9fd..71e5f11 100644
33072--- a/drivers/gpu/drm/drm_ioc32.c
33073+++ b/drivers/gpu/drm/drm_ioc32.c
33074@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
33075 request = compat_alloc_user_space(nbytes);
33076 if (!access_ok(VERIFY_WRITE, request, nbytes))
33077 return -EFAULT;
33078- list = (struct drm_buf_desc *) (request + 1);
33079+ list = (struct drm_buf_desc __user *) (request + 1);
33080
33081 if (__put_user(count, &request->count)
33082 || __put_user(list, &request->list))
33083@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
33084 request = compat_alloc_user_space(nbytes);
33085 if (!access_ok(VERIFY_WRITE, request, nbytes))
33086 return -EFAULT;
33087- list = (struct drm_buf_pub *) (request + 1);
33088+ list = (struct drm_buf_pub __user *) (request + 1);
33089
33090 if (__put_user(count, &request->count)
33091 || __put_user(list, &request->list))
33092diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
33093index 9b9ff46..4ea724c 100644
33094--- a/drivers/gpu/drm/drm_ioctl.c
33095+++ b/drivers/gpu/drm/drm_ioctl.c
33096@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
33097 stats->data[i].value =
33098 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
33099 else
33100- stats->data[i].value = atomic_read(&dev->counts[i]);
33101+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
33102 stats->data[i].type = dev->types[i];
33103 }
33104
33105diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
33106index e2f70a5..c703e86 100644
33107--- a/drivers/gpu/drm/drm_lock.c
33108+++ b/drivers/gpu/drm/drm_lock.c
33109@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33110 if (drm_lock_take(&master->lock, lock->context)) {
33111 master->lock.file_priv = file_priv;
33112 master->lock.lock_time = jiffies;
33113- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
33114+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
33115 break; /* Got lock */
33116 }
33117
33118@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33119 return -EINVAL;
33120 }
33121
33122- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
33123+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
33124
33125 /* kernel_context_switch isn't used by any of the x86 drm
33126 * modules but is required by the Sparc driver.
33127diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
33128index 7d1d88c..b9131b2 100644
33129--- a/drivers/gpu/drm/i810/i810_dma.c
33130+++ b/drivers/gpu/drm/i810/i810_dma.c
33131@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
33132 dma->buflist[vertex->idx],
33133 vertex->discard, vertex->used);
33134
33135- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33136- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33137+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33138+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33139 sarea_priv->last_enqueue = dev_priv->counter - 1;
33140 sarea_priv->last_dispatch = (int)hw_status[5];
33141
33142@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
33143 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
33144 mc->last_render);
33145
33146- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33147- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33148+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33149+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33150 sarea_priv->last_enqueue = dev_priv->counter - 1;
33151 sarea_priv->last_dispatch = (int)hw_status[5];
33152
33153diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
33154index 21e2691..7321edd 100644
33155--- a/drivers/gpu/drm/i810/i810_drv.h
33156+++ b/drivers/gpu/drm/i810/i810_drv.h
33157@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
33158 int page_flipping;
33159
33160 wait_queue_head_t irq_queue;
33161- atomic_t irq_received;
33162- atomic_t irq_emitted;
33163+ atomic_unchecked_t irq_received;
33164+ atomic_unchecked_t irq_emitted;
33165
33166 int front_offset;
33167 } drm_i810_private_t;
33168diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
33169index da82afe..48a45de 100644
33170--- a/drivers/gpu/drm/i830/i830_drv.h
33171+++ b/drivers/gpu/drm/i830/i830_drv.h
33172@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
33173 int page_flipping;
33174
33175 wait_queue_head_t irq_queue;
33176- atomic_t irq_received;
33177- atomic_t irq_emitted;
33178+ atomic_unchecked_t irq_received;
33179+ atomic_unchecked_t irq_emitted;
33180
33181 int use_mi_batchbuffer_start;
33182
33183diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
33184index 91ec2bb..6f21fab 100644
33185--- a/drivers/gpu/drm/i830/i830_irq.c
33186+++ b/drivers/gpu/drm/i830/i830_irq.c
33187@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
33188
33189 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
33190
33191- atomic_inc(&dev_priv->irq_received);
33192+ atomic_inc_unchecked(&dev_priv->irq_received);
33193 wake_up_interruptible(&dev_priv->irq_queue);
33194
33195 return IRQ_HANDLED;
33196@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
33197
33198 DRM_DEBUG("%s\n", __func__);
33199
33200- atomic_inc(&dev_priv->irq_emitted);
33201+ atomic_inc_unchecked(&dev_priv->irq_emitted);
33202
33203 BEGIN_LP_RING(2);
33204 OUT_RING(0);
33205 OUT_RING(GFX_OP_USER_INTERRUPT);
33206 ADVANCE_LP_RING();
33207
33208- return atomic_read(&dev_priv->irq_emitted);
33209+ return atomic_read_unchecked(&dev_priv->irq_emitted);
33210 }
33211
33212 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33213@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33214
33215 DRM_DEBUG("%s\n", __func__);
33216
33217- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33218+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33219 return 0;
33220
33221 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
33222@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33223
33224 for (;;) {
33225 __set_current_state(TASK_INTERRUPTIBLE);
33226- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33227+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33228 break;
33229 if ((signed)(end - jiffies) <= 0) {
33230 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
33231@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
33232 I830_WRITE16(I830REG_HWSTAM, 0xffff);
33233 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
33234 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
33235- atomic_set(&dev_priv->irq_received, 0);
33236- atomic_set(&dev_priv->irq_emitted, 0);
33237+ atomic_set_unchecked(&dev_priv->irq_received, 0);
33238+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
33239 init_waitqueue_head(&dev_priv->irq_queue);
33240 }
33241
33242diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
33243index 288fc50..c6092055 100644
33244--- a/drivers/gpu/drm/i915/dvo.h
33245+++ b/drivers/gpu/drm/i915/dvo.h
33246@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
33247 *
33248 * \return singly-linked list of modes or NULL if no modes found.
33249 */
33250- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
33251+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
33252
33253 /**
33254 * Clean up driver-specific bits of the output
33255 */
33256- void (*destroy) (struct intel_dvo_device *dvo);
33257+ void (* const destroy) (struct intel_dvo_device *dvo);
33258
33259 /**
33260 * Debugging hook to dump device registers to log file
33261 */
33262- void (*dump_regs)(struct intel_dvo_device *dvo);
33263+ void (* const dump_regs)(struct intel_dvo_device *dvo);
33264 };
33265
33266-extern struct intel_dvo_dev_ops sil164_ops;
33267-extern struct intel_dvo_dev_ops ch7xxx_ops;
33268-extern struct intel_dvo_dev_ops ivch_ops;
33269-extern struct intel_dvo_dev_ops tfp410_ops;
33270-extern struct intel_dvo_dev_ops ch7017_ops;
33271+extern const struct intel_dvo_dev_ops sil164_ops;
33272+extern const struct intel_dvo_dev_ops ch7xxx_ops;
33273+extern const struct intel_dvo_dev_ops ivch_ops;
33274+extern const struct intel_dvo_dev_ops tfp410_ops;
33275+extern const struct intel_dvo_dev_ops ch7017_ops;
33276
33277 #endif /* _INTEL_DVO_H */
33278diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
33279index 621815b..499d82e 100644
33280--- a/drivers/gpu/drm/i915/dvo_ch7017.c
33281+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
33282@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
33283 }
33284 }
33285
33286-struct intel_dvo_dev_ops ch7017_ops = {
33287+const struct intel_dvo_dev_ops ch7017_ops = {
33288 .init = ch7017_init,
33289 .detect = ch7017_detect,
33290 .mode_valid = ch7017_mode_valid,
33291diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33292index a9b8962..ac769ba 100644
33293--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
33294+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33295@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
33296 }
33297 }
33298
33299-struct intel_dvo_dev_ops ch7xxx_ops = {
33300+const struct intel_dvo_dev_ops ch7xxx_ops = {
33301 .init = ch7xxx_init,
33302 .detect = ch7xxx_detect,
33303 .mode_valid = ch7xxx_mode_valid,
33304diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
33305index aa176f9..ed2930c 100644
33306--- a/drivers/gpu/drm/i915/dvo_ivch.c
33307+++ b/drivers/gpu/drm/i915/dvo_ivch.c
33308@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
33309 }
33310 }
33311
33312-struct intel_dvo_dev_ops ivch_ops= {
33313+const struct intel_dvo_dev_ops ivch_ops= {
33314 .init = ivch_init,
33315 .dpms = ivch_dpms,
33316 .save = ivch_save,
33317diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
33318index e1c1f73..7dbebcf 100644
33319--- a/drivers/gpu/drm/i915/dvo_sil164.c
33320+++ b/drivers/gpu/drm/i915/dvo_sil164.c
33321@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
33322 }
33323 }
33324
33325-struct intel_dvo_dev_ops sil164_ops = {
33326+const struct intel_dvo_dev_ops sil164_ops = {
33327 .init = sil164_init,
33328 .detect = sil164_detect,
33329 .mode_valid = sil164_mode_valid,
33330diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
33331index 16dce84..7e1b6f8 100644
33332--- a/drivers/gpu/drm/i915/dvo_tfp410.c
33333+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
33334@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
33335 }
33336 }
33337
33338-struct intel_dvo_dev_ops tfp410_ops = {
33339+const struct intel_dvo_dev_ops tfp410_ops = {
33340 .init = tfp410_init,
33341 .detect = tfp410_detect,
33342 .mode_valid = tfp410_mode_valid,
33343diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33344index 7e859d6..7d1cf2b 100644
33345--- a/drivers/gpu/drm/i915/i915_debugfs.c
33346+++ b/drivers/gpu/drm/i915/i915_debugfs.c
33347@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33348 I915_READ(GTIMR));
33349 }
33350 seq_printf(m, "Interrupts received: %d\n",
33351- atomic_read(&dev_priv->irq_received));
33352+ atomic_read_unchecked(&dev_priv->irq_received));
33353 if (dev_priv->hw_status_page != NULL) {
33354 seq_printf(m, "Current sequence: %d\n",
33355 i915_get_gem_seqno(dev));
33356diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33357index 5449239..7e4f68d 100644
33358--- a/drivers/gpu/drm/i915/i915_drv.c
33359+++ b/drivers/gpu/drm/i915/i915_drv.c
33360@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33361 return i915_resume(dev);
33362 }
33363
33364-static struct vm_operations_struct i915_gem_vm_ops = {
33365+static const struct vm_operations_struct i915_gem_vm_ops = {
33366 .fault = i915_gem_fault,
33367 .open = drm_gem_vm_open,
33368 .close = drm_gem_vm_close,
33369diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33370index 97163f7..c24c7c7 100644
33371--- a/drivers/gpu/drm/i915/i915_drv.h
33372+++ b/drivers/gpu/drm/i915/i915_drv.h
33373@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33374 /* display clock increase/decrease */
33375 /* pll clock increase/decrease */
33376 /* clock gating init */
33377-};
33378+} __no_const;
33379
33380 typedef struct drm_i915_private {
33381 struct drm_device *dev;
33382@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33383 int page_flipping;
33384
33385 wait_queue_head_t irq_queue;
33386- atomic_t irq_received;
33387+ atomic_unchecked_t irq_received;
33388 /** Protects user_irq_refcount and irq_mask_reg */
33389 spinlock_t user_irq_lock;
33390 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33391diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33392index 27a3074..eb3f959 100644
33393--- a/drivers/gpu/drm/i915/i915_gem.c
33394+++ b/drivers/gpu/drm/i915/i915_gem.c
33395@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33396
33397 args->aper_size = dev->gtt_total;
33398 args->aper_available_size = (args->aper_size -
33399- atomic_read(&dev->pin_memory));
33400+ atomic_read_unchecked(&dev->pin_memory));
33401
33402 return 0;
33403 }
33404@@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33405
33406 if (obj_priv->gtt_space) {
33407 atomic_dec(&dev->gtt_count);
33408- atomic_sub(obj->size, &dev->gtt_memory);
33409+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33410
33411 drm_mm_put_block(obj_priv->gtt_space);
33412 obj_priv->gtt_space = NULL;
33413@@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33414 goto search_free;
33415 }
33416 atomic_inc(&dev->gtt_count);
33417- atomic_add(obj->size, &dev->gtt_memory);
33418+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
33419
33420 /* Assert that the object is not currently in any GPU domain. As it
33421 * wasn't in the GTT, there shouldn't be any way it could have been in
33422@@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33423 "%d/%d gtt bytes\n",
33424 atomic_read(&dev->object_count),
33425 atomic_read(&dev->pin_count),
33426- atomic_read(&dev->object_memory),
33427- atomic_read(&dev->pin_memory),
33428- atomic_read(&dev->gtt_memory),
33429+ atomic_read_unchecked(&dev->object_memory),
33430+ atomic_read_unchecked(&dev->pin_memory),
33431+ atomic_read_unchecked(&dev->gtt_memory),
33432 dev->gtt_total);
33433 }
33434 goto err;
33435@@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33436 */
33437 if (obj_priv->pin_count == 1) {
33438 atomic_inc(&dev->pin_count);
33439- atomic_add(obj->size, &dev->pin_memory);
33440+ atomic_add_unchecked(obj->size, &dev->pin_memory);
33441 if (!obj_priv->active &&
33442 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
33443 !list_empty(&obj_priv->list))
33444@@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
33445 list_move_tail(&obj_priv->list,
33446 &dev_priv->mm.inactive_list);
33447 atomic_dec(&dev->pin_count);
33448- atomic_sub(obj->size, &dev->pin_memory);
33449+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
33450 }
33451 i915_verify_inactive(dev, __FILE__, __LINE__);
33452 }
33453diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33454index 63f28ad..f5469da 100644
33455--- a/drivers/gpu/drm/i915/i915_irq.c
33456+++ b/drivers/gpu/drm/i915/i915_irq.c
33457@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33458 int irq_received;
33459 int ret = IRQ_NONE;
33460
33461- atomic_inc(&dev_priv->irq_received);
33462+ atomic_inc_unchecked(&dev_priv->irq_received);
33463
33464 if (IS_IGDNG(dev))
33465 return igdng_irq_handler(dev);
33466@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33467 {
33468 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33469
33470- atomic_set(&dev_priv->irq_received, 0);
33471+ atomic_set_unchecked(&dev_priv->irq_received, 0);
33472
33473 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33474 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33475diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33476index 5d9c6a7..d1b0e29 100644
33477--- a/drivers/gpu/drm/i915/intel_sdvo.c
33478+++ b/drivers/gpu/drm/i915/intel_sdvo.c
33479@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33480 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33481
33482 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33483- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33484+ pax_open_kernel();
33485+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33486+ pax_close_kernel();
33487
33488 /* Read the regs to test if we can talk to the device */
33489 for (i = 0; i < 0x40; i++) {
33490diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33491index be6c6b9..8615d9c 100644
33492--- a/drivers/gpu/drm/mga/mga_drv.h
33493+++ b/drivers/gpu/drm/mga/mga_drv.h
33494@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33495 u32 clear_cmd;
33496 u32 maccess;
33497
33498- atomic_t vbl_received; /**< Number of vblanks received. */
33499+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33500 wait_queue_head_t fence_queue;
33501- atomic_t last_fence_retired;
33502+ atomic_unchecked_t last_fence_retired;
33503 u32 next_fence_to_post;
33504
33505 unsigned int fb_cpp;
33506diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33507index daa6041..a28a5da 100644
33508--- a/drivers/gpu/drm/mga/mga_irq.c
33509+++ b/drivers/gpu/drm/mga/mga_irq.c
33510@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33511 if (crtc != 0)
33512 return 0;
33513
33514- return atomic_read(&dev_priv->vbl_received);
33515+ return atomic_read_unchecked(&dev_priv->vbl_received);
33516 }
33517
33518
33519@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33520 /* VBLANK interrupt */
33521 if (status & MGA_VLINEPEN) {
33522 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33523- atomic_inc(&dev_priv->vbl_received);
33524+ atomic_inc_unchecked(&dev_priv->vbl_received);
33525 drm_handle_vblank(dev, 0);
33526 handled = 1;
33527 }
33528@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33529 MGA_WRITE(MGA_PRIMEND, prim_end);
33530 }
33531
33532- atomic_inc(&dev_priv->last_fence_retired);
33533+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
33534 DRM_WAKEUP(&dev_priv->fence_queue);
33535 handled = 1;
33536 }
33537@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33538 * using fences.
33539 */
33540 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33541- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33542+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33543 - *sequence) <= (1 << 23)));
33544
33545 *sequence = cur_fence;
33546diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33547index 4c39a40..b22a9ea 100644
33548--- a/drivers/gpu/drm/r128/r128_cce.c
33549+++ b/drivers/gpu/drm/r128/r128_cce.c
33550@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33551
33552 /* GH: Simple idle check.
33553 */
33554- atomic_set(&dev_priv->idle_count, 0);
33555+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33556
33557 /* We don't support anything other than bus-mastering ring mode,
33558 * but the ring can be in either AGP or PCI space for the ring
33559diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33560index 3c60829..4faf484 100644
33561--- a/drivers/gpu/drm/r128/r128_drv.h
33562+++ b/drivers/gpu/drm/r128/r128_drv.h
33563@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33564 int is_pci;
33565 unsigned long cce_buffers_offset;
33566
33567- atomic_t idle_count;
33568+ atomic_unchecked_t idle_count;
33569
33570 int page_flipping;
33571 int current_page;
33572 u32 crtc_offset;
33573 u32 crtc_offset_cntl;
33574
33575- atomic_t vbl_received;
33576+ atomic_unchecked_t vbl_received;
33577
33578 u32 color_fmt;
33579 unsigned int front_offset;
33580diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33581index 69810fb..97bf17a 100644
33582--- a/drivers/gpu/drm/r128/r128_irq.c
33583+++ b/drivers/gpu/drm/r128/r128_irq.c
33584@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33585 if (crtc != 0)
33586 return 0;
33587
33588- return atomic_read(&dev_priv->vbl_received);
33589+ return atomic_read_unchecked(&dev_priv->vbl_received);
33590 }
33591
33592 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33593@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33594 /* VBLANK interrupt */
33595 if (status & R128_CRTC_VBLANK_INT) {
33596 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33597- atomic_inc(&dev_priv->vbl_received);
33598+ atomic_inc_unchecked(&dev_priv->vbl_received);
33599 drm_handle_vblank(dev, 0);
33600 return IRQ_HANDLED;
33601 }
33602diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33603index af2665c..51922d2 100644
33604--- a/drivers/gpu/drm/r128/r128_state.c
33605+++ b/drivers/gpu/drm/r128/r128_state.c
33606@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33607
33608 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33609 {
33610- if (atomic_read(&dev_priv->idle_count) == 0) {
33611+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33612 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33613 } else {
33614- atomic_set(&dev_priv->idle_count, 0);
33615+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33616 }
33617 }
33618
33619diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33620index dd72b91..8644b3c 100644
33621--- a/drivers/gpu/drm/radeon/atom.c
33622+++ b/drivers/gpu/drm/radeon/atom.c
33623@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33624 char name[512];
33625 int i;
33626
33627+ pax_track_stack();
33628+
33629 ctx->card = card;
33630 ctx->bios = bios;
33631
33632diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33633index 0d79577..efaa7a5 100644
33634--- a/drivers/gpu/drm/radeon/mkregtable.c
33635+++ b/drivers/gpu/drm/radeon/mkregtable.c
33636@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33637 regex_t mask_rex;
33638 regmatch_t match[4];
33639 char buf[1024];
33640- size_t end;
33641+ long end;
33642 int len;
33643 int done = 0;
33644 int r;
33645 unsigned o;
33646 struct offset *offset;
33647 char last_reg_s[10];
33648- int last_reg;
33649+ unsigned long last_reg;
33650
33651 if (regcomp
33652 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33653diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33654index 6735213..38c2c67 100644
33655--- a/drivers/gpu/drm/radeon/radeon.h
33656+++ b/drivers/gpu/drm/radeon/radeon.h
33657@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33658 */
33659 struct radeon_fence_driver {
33660 uint32_t scratch_reg;
33661- atomic_t seq;
33662+ atomic_unchecked_t seq;
33663 uint32_t last_seq;
33664 unsigned long count_timeout;
33665 wait_queue_head_t queue;
33666@@ -640,7 +640,7 @@ struct radeon_asic {
33667 uint32_t offset, uint32_t obj_size);
33668 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33669 void (*bandwidth_update)(struct radeon_device *rdev);
33670-};
33671+} __no_const;
33672
33673 /*
33674 * Asic structures
33675diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33676index 4e928b9..d8b6008 100644
33677--- a/drivers/gpu/drm/radeon/radeon_atombios.c
33678+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33679@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33680 bool linkb;
33681 struct radeon_i2c_bus_rec ddc_bus;
33682
33683+ pax_track_stack();
33684+
33685 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33686
33687 if (data_offset == 0)
33688@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33689 }
33690 }
33691
33692-struct bios_connector {
33693+static struct bios_connector {
33694 bool valid;
33695 uint16_t line_mux;
33696 uint16_t devices;
33697 int connector_type;
33698 struct radeon_i2c_bus_rec ddc_bus;
33699-};
33700+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33701
33702 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33703 drm_device
33704@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33705 uint8_t dac;
33706 union atom_supported_devices *supported_devices;
33707 int i, j;
33708- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33709
33710 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33711
33712diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33713index 083a181..ccccae0 100644
33714--- a/drivers/gpu/drm/radeon/radeon_display.c
33715+++ b/drivers/gpu/drm/radeon/radeon_display.c
33716@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33717
33718 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33719 error = freq - current_freq;
33720- error = error < 0 ? 0xffffffff : error;
33721+ error = (int32_t)error < 0 ? 0xffffffff : error;
33722 } else
33723 error = abs(current_freq - freq);
33724 vco_diff = abs(vco - best_vco);
33725diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33726index 76e4070..193fa7f 100644
33727--- a/drivers/gpu/drm/radeon/radeon_drv.h
33728+++ b/drivers/gpu/drm/radeon/radeon_drv.h
33729@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33730
33731 /* SW interrupt */
33732 wait_queue_head_t swi_queue;
33733- atomic_t swi_emitted;
33734+ atomic_unchecked_t swi_emitted;
33735 int vblank_crtc;
33736 uint32_t irq_enable_reg;
33737 uint32_t r500_disp_irq_reg;
33738diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33739index 3beb26d..6ce9c4a 100644
33740--- a/drivers/gpu/drm/radeon/radeon_fence.c
33741+++ b/drivers/gpu/drm/radeon/radeon_fence.c
33742@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33743 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33744 return 0;
33745 }
33746- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33747+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33748 if (!rdev->cp.ready) {
33749 /* FIXME: cp is not running assume everythings is done right
33750 * away
33751@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33752 return r;
33753 }
33754 WREG32(rdev->fence_drv.scratch_reg, 0);
33755- atomic_set(&rdev->fence_drv.seq, 0);
33756+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33757 INIT_LIST_HEAD(&rdev->fence_drv.created);
33758 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33759 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33760diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33761index a1bf11d..4a123c0 100644
33762--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33763+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33764@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33765 request = compat_alloc_user_space(sizeof(*request));
33766 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33767 || __put_user(req32.param, &request->param)
33768- || __put_user((void __user *)(unsigned long)req32.value,
33769+ || __put_user((unsigned long)req32.value,
33770 &request->value))
33771 return -EFAULT;
33772
33773diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33774index b79ecc4..8dab92d 100644
33775--- a/drivers/gpu/drm/radeon/radeon_irq.c
33776+++ b/drivers/gpu/drm/radeon/radeon_irq.c
33777@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33778 unsigned int ret;
33779 RING_LOCALS;
33780
33781- atomic_inc(&dev_priv->swi_emitted);
33782- ret = atomic_read(&dev_priv->swi_emitted);
33783+ atomic_inc_unchecked(&dev_priv->swi_emitted);
33784+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33785
33786 BEGIN_RING(4);
33787 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33788@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33789 drm_radeon_private_t *dev_priv =
33790 (drm_radeon_private_t *) dev->dev_private;
33791
33792- atomic_set(&dev_priv->swi_emitted, 0);
33793+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33794 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33795
33796 dev->max_vblank_count = 0x001fffff;
33797diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33798index 4747910..48ca4b3 100644
33799--- a/drivers/gpu/drm/radeon/radeon_state.c
33800+++ b/drivers/gpu/drm/radeon/radeon_state.c
33801@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33802 {
33803 drm_radeon_private_t *dev_priv = dev->dev_private;
33804 drm_radeon_getparam_t *param = data;
33805- int value;
33806+ int value = 0;
33807
33808 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33809
33810diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33811index 1381e06..0e53b17 100644
33812--- a/drivers/gpu/drm/radeon/radeon_ttm.c
33813+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33814@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33815 DRM_INFO("radeon: ttm finalized\n");
33816 }
33817
33818-static struct vm_operations_struct radeon_ttm_vm_ops;
33819-static const struct vm_operations_struct *ttm_vm_ops = NULL;
33820-
33821-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33822-{
33823- struct ttm_buffer_object *bo;
33824- int r;
33825-
33826- bo = (struct ttm_buffer_object *)vma->vm_private_data;
33827- if (bo == NULL) {
33828- return VM_FAULT_NOPAGE;
33829- }
33830- r = ttm_vm_ops->fault(vma, vmf);
33831- return r;
33832-}
33833-
33834 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33835 {
33836 struct drm_file *file_priv;
33837 struct radeon_device *rdev;
33838- int r;
33839
33840 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33841 return drm_mmap(filp, vma);
33842@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33843
33844 file_priv = (struct drm_file *)filp->private_data;
33845 rdev = file_priv->minor->dev->dev_private;
33846- if (rdev == NULL) {
33847+ if (!rdev)
33848 return -EINVAL;
33849- }
33850- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33851- if (unlikely(r != 0)) {
33852- return r;
33853- }
33854- if (unlikely(ttm_vm_ops == NULL)) {
33855- ttm_vm_ops = vma->vm_ops;
33856- radeon_ttm_vm_ops = *ttm_vm_ops;
33857- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33858- }
33859- vma->vm_ops = &radeon_ttm_vm_ops;
33860- return 0;
33861+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33862 }
33863
33864
33865diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33866index b12ff76..0bd0c6e 100644
33867--- a/drivers/gpu/drm/radeon/rs690.c
33868+++ b/drivers/gpu/drm/radeon/rs690.c
33869@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33870 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33871 rdev->pm.sideport_bandwidth.full)
33872 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33873- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33874+ read_delay_latency.full = rfixed_const(800 * 1000);
33875 read_delay_latency.full = rfixed_div(read_delay_latency,
33876 rdev->pm.igp_sideport_mclk);
33877+ a.full = rfixed_const(370);
33878+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33879 } else {
33880 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33881 rdev->pm.k8_bandwidth.full)
33882diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33883index 0ed436e..e6e7ce3 100644
33884--- a/drivers/gpu/drm/ttm/ttm_bo.c
33885+++ b/drivers/gpu/drm/ttm/ttm_bo.c
33886@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33887 NULL
33888 };
33889
33890-static struct sysfs_ops ttm_bo_global_ops = {
33891+static const struct sysfs_ops ttm_bo_global_ops = {
33892 .show = &ttm_bo_global_show
33893 };
33894
33895diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33896index 1c040d0..f9e4af8 100644
33897--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33898+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33899@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33900 {
33901 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33902 vma->vm_private_data;
33903- struct ttm_bo_device *bdev = bo->bdev;
33904+ struct ttm_bo_device *bdev;
33905 unsigned long bus_base;
33906 unsigned long bus_offset;
33907 unsigned long bus_size;
33908@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33909 unsigned long address = (unsigned long)vmf->virtual_address;
33910 int retval = VM_FAULT_NOPAGE;
33911
33912+ if (!bo)
33913+ return VM_FAULT_NOPAGE;
33914+ bdev = bo->bdev;
33915+
33916 /*
33917 * Work around locking order reversal in fault / nopfn
33918 * between mmap_sem and bo_reserve: Perform a trylock operation
33919diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33920index b170071..28ae90e 100644
33921--- a/drivers/gpu/drm/ttm/ttm_global.c
33922+++ b/drivers/gpu/drm/ttm/ttm_global.c
33923@@ -36,7 +36,7 @@
33924 struct ttm_global_item {
33925 struct mutex mutex;
33926 void *object;
33927- int refcount;
33928+ atomic_t refcount;
33929 };
33930
33931 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33932@@ -49,7 +49,7 @@ void ttm_global_init(void)
33933 struct ttm_global_item *item = &glob[i];
33934 mutex_init(&item->mutex);
33935 item->object = NULL;
33936- item->refcount = 0;
33937+ atomic_set(&item->refcount, 0);
33938 }
33939 }
33940
33941@@ -59,7 +59,7 @@ void ttm_global_release(void)
33942 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33943 struct ttm_global_item *item = &glob[i];
33944 BUG_ON(item->object != NULL);
33945- BUG_ON(item->refcount != 0);
33946+ BUG_ON(atomic_read(&item->refcount) != 0);
33947 }
33948 }
33949
33950@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33951 void *object;
33952
33953 mutex_lock(&item->mutex);
33954- if (item->refcount == 0) {
33955+ if (atomic_read(&item->refcount) == 0) {
33956 item->object = kzalloc(ref->size, GFP_KERNEL);
33957 if (unlikely(item->object == NULL)) {
33958 ret = -ENOMEM;
33959@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33960 goto out_err;
33961
33962 }
33963- ++item->refcount;
33964+ atomic_inc(&item->refcount);
33965 ref->object = item->object;
33966 object = item->object;
33967 mutex_unlock(&item->mutex);
33968@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
33969 struct ttm_global_item *item = &glob[ref->global_type];
33970
33971 mutex_lock(&item->mutex);
33972- BUG_ON(item->refcount == 0);
33973+ BUG_ON(atomic_read(&item->refcount) == 0);
33974 BUG_ON(ref->object != item->object);
33975- if (--item->refcount == 0) {
33976+ if (atomic_dec_and_test(&item->refcount)) {
33977 ref->release(ref);
33978 item->object = NULL;
33979 }
33980diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
33981index 072c281..d8ef483 100644
33982--- a/drivers/gpu/drm/ttm/ttm_memory.c
33983+++ b/drivers/gpu/drm/ttm/ttm_memory.c
33984@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
33985 NULL
33986 };
33987
33988-static struct sysfs_ops ttm_mem_zone_ops = {
33989+static const struct sysfs_ops ttm_mem_zone_ops = {
33990 .show = &ttm_mem_zone_show,
33991 .store = &ttm_mem_zone_store
33992 };
33993diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
33994index cafcb84..b8e66cc 100644
33995--- a/drivers/gpu/drm/via/via_drv.h
33996+++ b/drivers/gpu/drm/via/via_drv.h
33997@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
33998 typedef uint32_t maskarray_t[5];
33999
34000 typedef struct drm_via_irq {
34001- atomic_t irq_received;
34002+ atomic_unchecked_t irq_received;
34003 uint32_t pending_mask;
34004 uint32_t enable_mask;
34005 wait_queue_head_t irq_queue;
34006@@ -75,7 +75,7 @@ typedef struct drm_via_private {
34007 struct timeval last_vblank;
34008 int last_vblank_valid;
34009 unsigned usec_per_vblank;
34010- atomic_t vbl_received;
34011+ atomic_unchecked_t vbl_received;
34012 drm_via_state_t hc_state;
34013 char pci_buf[VIA_PCI_BUF_SIZE];
34014 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
34015diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
34016index 5935b88..127a8a6 100644
34017--- a/drivers/gpu/drm/via/via_irq.c
34018+++ b/drivers/gpu/drm/via/via_irq.c
34019@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
34020 if (crtc != 0)
34021 return 0;
34022
34023- return atomic_read(&dev_priv->vbl_received);
34024+ return atomic_read_unchecked(&dev_priv->vbl_received);
34025 }
34026
34027 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34028@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34029
34030 status = VIA_READ(VIA_REG_INTERRUPT);
34031 if (status & VIA_IRQ_VBLANK_PENDING) {
34032- atomic_inc(&dev_priv->vbl_received);
34033- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
34034+ atomic_inc_unchecked(&dev_priv->vbl_received);
34035+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
34036 do_gettimeofday(&cur_vblank);
34037 if (dev_priv->last_vblank_valid) {
34038 dev_priv->usec_per_vblank =
34039@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34040 dev_priv->last_vblank = cur_vblank;
34041 dev_priv->last_vblank_valid = 1;
34042 }
34043- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
34044+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
34045 DRM_DEBUG("US per vblank is: %u\n",
34046 dev_priv->usec_per_vblank);
34047 }
34048@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34049
34050 for (i = 0; i < dev_priv->num_irqs; ++i) {
34051 if (status & cur_irq->pending_mask) {
34052- atomic_inc(&cur_irq->irq_received);
34053+ atomic_inc_unchecked(&cur_irq->irq_received);
34054 DRM_WAKEUP(&cur_irq->irq_queue);
34055 handled = 1;
34056 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
34057@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
34058 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34059 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
34060 masks[irq][4]));
34061- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
34062+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
34063 } else {
34064 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34065 (((cur_irq_sequence =
34066- atomic_read(&cur_irq->irq_received)) -
34067+ atomic_read_unchecked(&cur_irq->irq_received)) -
34068 *sequence) <= (1 << 23)));
34069 }
34070 *sequence = cur_irq_sequence;
34071@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
34072 }
34073
34074 for (i = 0; i < dev_priv->num_irqs; ++i) {
34075- atomic_set(&cur_irq->irq_received, 0);
34076+ atomic_set_unchecked(&cur_irq->irq_received, 0);
34077 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
34078 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
34079 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
34080@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
34081 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
34082 case VIA_IRQ_RELATIVE:
34083 irqwait->request.sequence +=
34084- atomic_read(&cur_irq->irq_received);
34085+ atomic_read_unchecked(&cur_irq->irq_received);
34086 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
34087 case VIA_IRQ_ABSOLUTE:
34088 break;
34089diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
34090index aa8688d..6a0140c 100644
34091--- a/drivers/gpu/vga/vgaarb.c
34092+++ b/drivers/gpu/vga/vgaarb.c
34093@@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
34094 uc = &priv->cards[i];
34095 }
34096
34097- if (!uc)
34098- return -EINVAL;
34099+ if (!uc) {
34100+ ret_val = -EINVAL;
34101+ goto done;
34102+ }
34103
34104- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
34105- return -EINVAL;
34106+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
34107+ ret_val = -EINVAL;
34108+ goto done;
34109+ }
34110
34111- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
34112- return -EINVAL;
34113+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
34114+ ret_val = -EINVAL;
34115+ goto done;
34116+ }
34117
34118 vga_put(pdev, io_state);
34119
34120diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
34121index 11f8069..4783396 100644
34122--- a/drivers/hid/hid-core.c
34123+++ b/drivers/hid/hid-core.c
34124@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
34125
34126 int hid_add_device(struct hid_device *hdev)
34127 {
34128- static atomic_t id = ATOMIC_INIT(0);
34129+ static atomic_unchecked_t id = ATOMIC_INIT(0);
34130 int ret;
34131
34132 if (WARN_ON(hdev->status & HID_STAT_ADDED))
34133@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
34134 /* XXX hack, any other cleaner solution after the driver core
34135 * is converted to allow more than 20 bytes as the device name? */
34136 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
34137- hdev->vendor, hdev->product, atomic_inc_return(&id));
34138+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
34139
34140 ret = device_add(&hdev->dev);
34141 if (!ret)
34142diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
34143index 8b6ee24..70f657d 100644
34144--- a/drivers/hid/usbhid/hiddev.c
34145+++ b/drivers/hid/usbhid/hiddev.c
34146@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
34147 return put_user(HID_VERSION, (int __user *)arg);
34148
34149 case HIDIOCAPPLICATION:
34150- if (arg < 0 || arg >= hid->maxapplication)
34151+ if (arg >= hid->maxapplication)
34152 return -EINVAL;
34153
34154 for (i = 0; i < hid->maxcollection; i++)
34155diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
34156index 5d5ed69..f40533e 100644
34157--- a/drivers/hwmon/lis3lv02d.c
34158+++ b/drivers/hwmon/lis3lv02d.c
34159@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
34160 * the lid is closed. This leads to interrupts as soon as a little move
34161 * is done.
34162 */
34163- atomic_inc(&lis3_dev.count);
34164+ atomic_inc_unchecked(&lis3_dev.count);
34165
34166 wake_up_interruptible(&lis3_dev.misc_wait);
34167 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
34168@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34169 if (test_and_set_bit(0, &lis3_dev.misc_opened))
34170 return -EBUSY; /* already open */
34171
34172- atomic_set(&lis3_dev.count, 0);
34173+ atomic_set_unchecked(&lis3_dev.count, 0);
34174
34175 /*
34176 * The sensor can generate interrupts for free-fall and direction
34177@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34178 add_wait_queue(&lis3_dev.misc_wait, &wait);
34179 while (true) {
34180 set_current_state(TASK_INTERRUPTIBLE);
34181- data = atomic_xchg(&lis3_dev.count, 0);
34182+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
34183 if (data)
34184 break;
34185
34186@@ -244,7 +244,7 @@ out:
34187 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34188 {
34189 poll_wait(file, &lis3_dev.misc_wait, wait);
34190- if (atomic_read(&lis3_dev.count))
34191+ if (atomic_read_unchecked(&lis3_dev.count))
34192 return POLLIN | POLLRDNORM;
34193 return 0;
34194 }
34195diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
34196index 7cdd76f..fe0efdf 100644
34197--- a/drivers/hwmon/lis3lv02d.h
34198+++ b/drivers/hwmon/lis3lv02d.h
34199@@ -201,7 +201,7 @@ struct lis3lv02d {
34200
34201 struct input_polled_dev *idev; /* input device */
34202 struct platform_device *pdev; /* platform device */
34203- atomic_t count; /* interrupt count after last read */
34204+ atomic_unchecked_t count; /* interrupt count after last read */
34205 int xcalib; /* calibrated null value for x */
34206 int ycalib; /* calibrated null value for y */
34207 int zcalib; /* calibrated null value for z */
34208diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
34209index 740785e..5a5c6c6 100644
34210--- a/drivers/hwmon/sht15.c
34211+++ b/drivers/hwmon/sht15.c
34212@@ -112,7 +112,7 @@ struct sht15_data {
34213 int supply_uV;
34214 int supply_uV_valid;
34215 struct work_struct update_supply_work;
34216- atomic_t interrupt_handled;
34217+ atomic_unchecked_t interrupt_handled;
34218 };
34219
34220 /**
34221@@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
34222 return ret;
34223
34224 gpio_direction_input(data->pdata->gpio_data);
34225- atomic_set(&data->interrupt_handled, 0);
34226+ atomic_set_unchecked(&data->interrupt_handled, 0);
34227
34228 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34229 if (gpio_get_value(data->pdata->gpio_data) == 0) {
34230 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
34231 /* Only relevant if the interrupt hasn't occured. */
34232- if (!atomic_read(&data->interrupt_handled))
34233+ if (!atomic_read_unchecked(&data->interrupt_handled))
34234 schedule_work(&data->read_work);
34235 }
34236 ret = wait_event_timeout(data->wait_queue,
34237@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
34238 struct sht15_data *data = d;
34239 /* First disable the interrupt */
34240 disable_irq_nosync(irq);
34241- atomic_inc(&data->interrupt_handled);
34242+ atomic_inc_unchecked(&data->interrupt_handled);
34243 /* Then schedule a reading work struct */
34244 if (data->flag != SHT15_READING_NOTHING)
34245 schedule_work(&data->read_work);
34246@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
34247 here as could have gone low in meantime so verify
34248 it hasn't!
34249 */
34250- atomic_set(&data->interrupt_handled, 0);
34251+ atomic_set_unchecked(&data->interrupt_handled, 0);
34252 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34253 /* If still not occured or another handler has been scheduled */
34254 if (gpio_get_value(data->pdata->gpio_data)
34255- || atomic_read(&data->interrupt_handled))
34256+ || atomic_read_unchecked(&data->interrupt_handled))
34257 return;
34258 }
34259 /* Read the data back from the device */
34260diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
34261index 97851c5..cb40626 100644
34262--- a/drivers/hwmon/w83791d.c
34263+++ b/drivers/hwmon/w83791d.c
34264@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
34265 struct i2c_board_info *info);
34266 static int w83791d_remove(struct i2c_client *client);
34267
34268-static int w83791d_read(struct i2c_client *client, u8 register);
34269-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
34270+static int w83791d_read(struct i2c_client *client, u8 reg);
34271+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
34272 static struct w83791d_data *w83791d_update_device(struct device *dev);
34273
34274 #ifdef DEBUG
34275diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
34276index 378fcb5..5e91fa8 100644
34277--- a/drivers/i2c/busses/i2c-amd756-s4882.c
34278+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
34279@@ -43,7 +43,7 @@
34280 extern struct i2c_adapter amd756_smbus;
34281
34282 static struct i2c_adapter *s4882_adapter;
34283-static struct i2c_algorithm *s4882_algo;
34284+static i2c_algorithm_no_const *s4882_algo;
34285
34286 /* Wrapper access functions for multiplexed SMBus */
34287 static DEFINE_MUTEX(amd756_lock);
34288diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
34289index 29015eb..af2d8e9 100644
34290--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
34291+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
34292@@ -41,7 +41,7 @@
34293 extern struct i2c_adapter *nforce2_smbus;
34294
34295 static struct i2c_adapter *s4985_adapter;
34296-static struct i2c_algorithm *s4985_algo;
34297+static i2c_algorithm_no_const *s4985_algo;
34298
34299 /* Wrapper access functions for multiplexed SMBus */
34300 static DEFINE_MUTEX(nforce2_lock);
34301diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
34302index 878f8ec..12376fc 100644
34303--- a/drivers/ide/aec62xx.c
34304+++ b/drivers/ide/aec62xx.c
34305@@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
34306 .cable_detect = atp86x_cable_detect,
34307 };
34308
34309-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
34310+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
34311 { /* 0: AEC6210 */
34312 .name = DRV_NAME,
34313 .init_chipset = init_chipset_aec62xx,
34314diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
34315index e59b6de..4b4fc65 100644
34316--- a/drivers/ide/alim15x3.c
34317+++ b/drivers/ide/alim15x3.c
34318@@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
34319 .dma_sff_read_status = ide_dma_sff_read_status,
34320 };
34321
34322-static const struct ide_port_info ali15x3_chipset __devinitdata = {
34323+static const struct ide_port_info ali15x3_chipset __devinitconst = {
34324 .name = DRV_NAME,
34325 .init_chipset = init_chipset_ali15x3,
34326 .init_hwif = init_hwif_ali15x3,
34327diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
34328index 628cd2e..087a414 100644
34329--- a/drivers/ide/amd74xx.c
34330+++ b/drivers/ide/amd74xx.c
34331@@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
34332 .udma_mask = udma, \
34333 }
34334
34335-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
34336+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
34337 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
34338 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
34339 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
34340diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
34341index 837322b..837fd71 100644
34342--- a/drivers/ide/atiixp.c
34343+++ b/drivers/ide/atiixp.c
34344@@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34345 .cable_detect = atiixp_cable_detect,
34346 };
34347
34348-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34349+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34350 { /* 0: IXP200/300/400/700 */
34351 .name = DRV_NAME,
34352 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34353diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34354index ca0c46f..d55318a 100644
34355--- a/drivers/ide/cmd64x.c
34356+++ b/drivers/ide/cmd64x.c
34357@@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34358 .dma_sff_read_status = ide_dma_sff_read_status,
34359 };
34360
34361-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34362+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34363 { /* 0: CMD643 */
34364 .name = DRV_NAME,
34365 .init_chipset = init_chipset_cmd64x,
34366diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34367index 09f98ed..cebc5bc 100644
34368--- a/drivers/ide/cs5520.c
34369+++ b/drivers/ide/cs5520.c
34370@@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34371 .set_dma_mode = cs5520_set_dma_mode,
34372 };
34373
34374-static const struct ide_port_info cyrix_chipset __devinitdata = {
34375+static const struct ide_port_info cyrix_chipset __devinitconst = {
34376 .name = DRV_NAME,
34377 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34378 .port_ops = &cs5520_port_ops,
34379diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34380index 40bf05e..7d58ca0 100644
34381--- a/drivers/ide/cs5530.c
34382+++ b/drivers/ide/cs5530.c
34383@@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34384 .udma_filter = cs5530_udma_filter,
34385 };
34386
34387-static const struct ide_port_info cs5530_chipset __devinitdata = {
34388+static const struct ide_port_info cs5530_chipset __devinitconst = {
34389 .name = DRV_NAME,
34390 .init_chipset = init_chipset_cs5530,
34391 .init_hwif = init_hwif_cs5530,
34392diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34393index 983d957..53e6172 100644
34394--- a/drivers/ide/cs5535.c
34395+++ b/drivers/ide/cs5535.c
34396@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34397 .cable_detect = cs5535_cable_detect,
34398 };
34399
34400-static const struct ide_port_info cs5535_chipset __devinitdata = {
34401+static const struct ide_port_info cs5535_chipset __devinitconst = {
34402 .name = DRV_NAME,
34403 .port_ops = &cs5535_port_ops,
34404 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34405diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34406index 74fc540..8e933d8 100644
34407--- a/drivers/ide/cy82c693.c
34408+++ b/drivers/ide/cy82c693.c
34409@@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34410 .set_dma_mode = cy82c693_set_dma_mode,
34411 };
34412
34413-static const struct ide_port_info cy82c693_chipset __devinitdata = {
34414+static const struct ide_port_info cy82c693_chipset __devinitconst = {
34415 .name = DRV_NAME,
34416 .init_iops = init_iops_cy82c693,
34417 .port_ops = &cy82c693_port_ops,
34418diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34419index 7ce68ef..e78197d 100644
34420--- a/drivers/ide/hpt366.c
34421+++ b/drivers/ide/hpt366.c
34422@@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34423 }
34424 };
34425
34426-static const struct hpt_info hpt36x __devinitdata = {
34427+static const struct hpt_info hpt36x __devinitconst = {
34428 .chip_name = "HPT36x",
34429 .chip_type = HPT36x,
34430 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34431@@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34432 .timings = &hpt36x_timings
34433 };
34434
34435-static const struct hpt_info hpt370 __devinitdata = {
34436+static const struct hpt_info hpt370 __devinitconst = {
34437 .chip_name = "HPT370",
34438 .chip_type = HPT370,
34439 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34440@@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
34441 .timings = &hpt37x_timings
34442 };
34443
34444-static const struct hpt_info hpt370a __devinitdata = {
34445+static const struct hpt_info hpt370a __devinitconst = {
34446 .chip_name = "HPT370A",
34447 .chip_type = HPT370A,
34448 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34449@@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34450 .timings = &hpt37x_timings
34451 };
34452
34453-static const struct hpt_info hpt374 __devinitdata = {
34454+static const struct hpt_info hpt374 __devinitconst = {
34455 .chip_name = "HPT374",
34456 .chip_type = HPT374,
34457 .udma_mask = ATA_UDMA5,
34458@@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34459 .timings = &hpt37x_timings
34460 };
34461
34462-static const struct hpt_info hpt372 __devinitdata = {
34463+static const struct hpt_info hpt372 __devinitconst = {
34464 .chip_name = "HPT372",
34465 .chip_type = HPT372,
34466 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34467@@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34468 .timings = &hpt37x_timings
34469 };
34470
34471-static const struct hpt_info hpt372a __devinitdata = {
34472+static const struct hpt_info hpt372a __devinitconst = {
34473 .chip_name = "HPT372A",
34474 .chip_type = HPT372A,
34475 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34476@@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34477 .timings = &hpt37x_timings
34478 };
34479
34480-static const struct hpt_info hpt302 __devinitdata = {
34481+static const struct hpt_info hpt302 __devinitconst = {
34482 .chip_name = "HPT302",
34483 .chip_type = HPT302,
34484 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34485@@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34486 .timings = &hpt37x_timings
34487 };
34488
34489-static const struct hpt_info hpt371 __devinitdata = {
34490+static const struct hpt_info hpt371 __devinitconst = {
34491 .chip_name = "HPT371",
34492 .chip_type = HPT371,
34493 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34494@@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34495 .timings = &hpt37x_timings
34496 };
34497
34498-static const struct hpt_info hpt372n __devinitdata = {
34499+static const struct hpt_info hpt372n __devinitconst = {
34500 .chip_name = "HPT372N",
34501 .chip_type = HPT372N,
34502 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34503@@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34504 .timings = &hpt37x_timings
34505 };
34506
34507-static const struct hpt_info hpt302n __devinitdata = {
34508+static const struct hpt_info hpt302n __devinitconst = {
34509 .chip_name = "HPT302N",
34510 .chip_type = HPT302N,
34511 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34512@@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34513 .timings = &hpt37x_timings
34514 };
34515
34516-static const struct hpt_info hpt371n __devinitdata = {
34517+static const struct hpt_info hpt371n __devinitconst = {
34518 .chip_name = "HPT371N",
34519 .chip_type = HPT371N,
34520 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34521@@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34522 .dma_sff_read_status = ide_dma_sff_read_status,
34523 };
34524
34525-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34526+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34527 { /* 0: HPT36x */
34528 .name = DRV_NAME,
34529 .init_chipset = init_chipset_hpt366,
34530diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34531index 2de76cc..74186a1 100644
34532--- a/drivers/ide/ide-cd.c
34533+++ b/drivers/ide/ide-cd.c
34534@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34535 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34536 if ((unsigned long)buf & alignment
34537 || blk_rq_bytes(rq) & q->dma_pad_mask
34538- || object_is_on_stack(buf))
34539+ || object_starts_on_stack(buf))
34540 drive->dma = 0;
34541 }
34542 }
34543diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34544index fefbdfc..62ff465 100644
34545--- a/drivers/ide/ide-floppy.c
34546+++ b/drivers/ide/ide-floppy.c
34547@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34548 u8 pc_buf[256], header_len, desc_cnt;
34549 int i, rc = 1, blocks, length;
34550
34551+ pax_track_stack();
34552+
34553 ide_debug_log(IDE_DBG_FUNC, "enter");
34554
34555 drive->bios_cyl = 0;
34556diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34557index 39d4e01..11538ce 100644
34558--- a/drivers/ide/ide-pci-generic.c
34559+++ b/drivers/ide/ide-pci-generic.c
34560@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34561 .udma_mask = ATA_UDMA6, \
34562 }
34563
34564-static const struct ide_port_info generic_chipsets[] __devinitdata = {
34565+static const struct ide_port_info generic_chipsets[] __devinitconst = {
34566 /* 0: Unknown */
34567 DECLARE_GENERIC_PCI_DEV(0),
34568
34569diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34570index 0d266a5..aaca790 100644
34571--- a/drivers/ide/it8172.c
34572+++ b/drivers/ide/it8172.c
34573@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34574 .set_dma_mode = it8172_set_dma_mode,
34575 };
34576
34577-static const struct ide_port_info it8172_port_info __devinitdata = {
34578+static const struct ide_port_info it8172_port_info __devinitconst = {
34579 .name = DRV_NAME,
34580 .port_ops = &it8172_port_ops,
34581 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34582diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34583index 4797616..4be488a 100644
34584--- a/drivers/ide/it8213.c
34585+++ b/drivers/ide/it8213.c
34586@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34587 .cable_detect = it8213_cable_detect,
34588 };
34589
34590-static const struct ide_port_info it8213_chipset __devinitdata = {
34591+static const struct ide_port_info it8213_chipset __devinitconst = {
34592 .name = DRV_NAME,
34593 .enablebits = { {0x41, 0x80, 0x80} },
34594 .port_ops = &it8213_port_ops,
34595diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34596index 51aa745..146ee60 100644
34597--- a/drivers/ide/it821x.c
34598+++ b/drivers/ide/it821x.c
34599@@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34600 .cable_detect = it821x_cable_detect,
34601 };
34602
34603-static const struct ide_port_info it821x_chipset __devinitdata = {
34604+static const struct ide_port_info it821x_chipset __devinitconst = {
34605 .name = DRV_NAME,
34606 .init_chipset = init_chipset_it821x,
34607 .init_hwif = init_hwif_it821x,
34608diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34609index bf2be64..9270098 100644
34610--- a/drivers/ide/jmicron.c
34611+++ b/drivers/ide/jmicron.c
34612@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34613 .cable_detect = jmicron_cable_detect,
34614 };
34615
34616-static const struct ide_port_info jmicron_chipset __devinitdata = {
34617+static const struct ide_port_info jmicron_chipset __devinitconst = {
34618 .name = DRV_NAME,
34619 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34620 .port_ops = &jmicron_port_ops,
34621diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34622index 95327a2..73f78d8 100644
34623--- a/drivers/ide/ns87415.c
34624+++ b/drivers/ide/ns87415.c
34625@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34626 .dma_sff_read_status = superio_dma_sff_read_status,
34627 };
34628
34629-static const struct ide_port_info ns87415_chipset __devinitdata = {
34630+static const struct ide_port_info ns87415_chipset __devinitconst = {
34631 .name = DRV_NAME,
34632 .init_hwif = init_hwif_ns87415,
34633 .tp_ops = &ns87415_tp_ops,
34634diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34635index f1d70d6..e1de05b 100644
34636--- a/drivers/ide/opti621.c
34637+++ b/drivers/ide/opti621.c
34638@@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34639 .set_pio_mode = opti621_set_pio_mode,
34640 };
34641
34642-static const struct ide_port_info opti621_chipset __devinitdata = {
34643+static const struct ide_port_info opti621_chipset __devinitconst = {
34644 .name = DRV_NAME,
34645 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34646 .port_ops = &opti621_port_ops,
34647diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34648index 65ba823..7311f4d 100644
34649--- a/drivers/ide/pdc202xx_new.c
34650+++ b/drivers/ide/pdc202xx_new.c
34651@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34652 .udma_mask = udma, \
34653 }
34654
34655-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34656+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34657 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34658 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34659 };
34660diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34661index cb812f3..af816ef 100644
34662--- a/drivers/ide/pdc202xx_old.c
34663+++ b/drivers/ide/pdc202xx_old.c
34664@@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34665 .max_sectors = sectors, \
34666 }
34667
34668-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34669+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34670 { /* 0: PDC20246 */
34671 .name = DRV_NAME,
34672 .init_chipset = init_chipset_pdc202xx,
34673diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34674index bf14f39..15c4b98 100644
34675--- a/drivers/ide/piix.c
34676+++ b/drivers/ide/piix.c
34677@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34678 .udma_mask = udma, \
34679 }
34680
34681-static const struct ide_port_info piix_pci_info[] __devinitdata = {
34682+static const struct ide_port_info piix_pci_info[] __devinitconst = {
34683 /* 0: MPIIX */
34684 { /*
34685 * MPIIX actually has only a single IDE channel mapped to
34686diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34687index a6414a8..c04173e 100644
34688--- a/drivers/ide/rz1000.c
34689+++ b/drivers/ide/rz1000.c
34690@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34691 }
34692 }
34693
34694-static const struct ide_port_info rz1000_chipset __devinitdata = {
34695+static const struct ide_port_info rz1000_chipset __devinitconst = {
34696 .name = DRV_NAME,
34697 .host_flags = IDE_HFLAG_NO_DMA,
34698 };
34699diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34700index d467478..9203942 100644
34701--- a/drivers/ide/sc1200.c
34702+++ b/drivers/ide/sc1200.c
34703@@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34704 .dma_sff_read_status = ide_dma_sff_read_status,
34705 };
34706
34707-static const struct ide_port_info sc1200_chipset __devinitdata = {
34708+static const struct ide_port_info sc1200_chipset __devinitconst = {
34709 .name = DRV_NAME,
34710 .port_ops = &sc1200_port_ops,
34711 .dma_ops = &sc1200_dma_ops,
34712diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34713index 1104bb3..59c5194 100644
34714--- a/drivers/ide/scc_pata.c
34715+++ b/drivers/ide/scc_pata.c
34716@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34717 .dma_sff_read_status = scc_dma_sff_read_status,
34718 };
34719
34720-static const struct ide_port_info scc_chipset __devinitdata = {
34721+static const struct ide_port_info scc_chipset __devinitconst = {
34722 .name = "sccIDE",
34723 .init_iops = init_iops_scc,
34724 .init_dma = scc_init_dma,
34725diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34726index b6554ef..6cc2cc3 100644
34727--- a/drivers/ide/serverworks.c
34728+++ b/drivers/ide/serverworks.c
34729@@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34730 .cable_detect = svwks_cable_detect,
34731 };
34732
34733-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34734+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34735 { /* 0: OSB4 */
34736 .name = DRV_NAME,
34737 .init_chipset = init_chipset_svwks,
34738diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34739index ab3db61..afed580 100644
34740--- a/drivers/ide/setup-pci.c
34741+++ b/drivers/ide/setup-pci.c
34742@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34743 int ret, i, n_ports = dev2 ? 4 : 2;
34744 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34745
34746+ pax_track_stack();
34747+
34748 for (i = 0; i < n_ports / 2; i++) {
34749 ret = ide_setup_pci_controller(pdev[i], d, !i);
34750 if (ret < 0)
34751diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34752index d95df52..0b03a39 100644
34753--- a/drivers/ide/siimage.c
34754+++ b/drivers/ide/siimage.c
34755@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34756 .udma_mask = ATA_UDMA6, \
34757 }
34758
34759-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34760+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34761 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34762 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34763 };
34764diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34765index 3b88eba..ca8699d 100644
34766--- a/drivers/ide/sis5513.c
34767+++ b/drivers/ide/sis5513.c
34768@@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34769 .cable_detect = sis_cable_detect,
34770 };
34771
34772-static const struct ide_port_info sis5513_chipset __devinitdata = {
34773+static const struct ide_port_info sis5513_chipset __devinitconst = {
34774 .name = DRV_NAME,
34775 .init_chipset = init_chipset_sis5513,
34776 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34777diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34778index d698da4..fca42a4 100644
34779--- a/drivers/ide/sl82c105.c
34780+++ b/drivers/ide/sl82c105.c
34781@@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34782 .dma_sff_read_status = ide_dma_sff_read_status,
34783 };
34784
34785-static const struct ide_port_info sl82c105_chipset __devinitdata = {
34786+static const struct ide_port_info sl82c105_chipset __devinitconst = {
34787 .name = DRV_NAME,
34788 .init_chipset = init_chipset_sl82c105,
34789 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34790diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34791index 1ccfb40..83d5779 100644
34792--- a/drivers/ide/slc90e66.c
34793+++ b/drivers/ide/slc90e66.c
34794@@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34795 .cable_detect = slc90e66_cable_detect,
34796 };
34797
34798-static const struct ide_port_info slc90e66_chipset __devinitdata = {
34799+static const struct ide_port_info slc90e66_chipset __devinitconst = {
34800 .name = DRV_NAME,
34801 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34802 .port_ops = &slc90e66_port_ops,
34803diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34804index 05a93d6..5f9e325 100644
34805--- a/drivers/ide/tc86c001.c
34806+++ b/drivers/ide/tc86c001.c
34807@@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34808 .dma_sff_read_status = ide_dma_sff_read_status,
34809 };
34810
34811-static const struct ide_port_info tc86c001_chipset __devinitdata = {
34812+static const struct ide_port_info tc86c001_chipset __devinitconst = {
34813 .name = DRV_NAME,
34814 .init_hwif = init_hwif_tc86c001,
34815 .port_ops = &tc86c001_port_ops,
34816diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34817index 8773c3b..7907d6c 100644
34818--- a/drivers/ide/triflex.c
34819+++ b/drivers/ide/triflex.c
34820@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34821 .set_dma_mode = triflex_set_mode,
34822 };
34823
34824-static const struct ide_port_info triflex_device __devinitdata = {
34825+static const struct ide_port_info triflex_device __devinitconst = {
34826 .name = DRV_NAME,
34827 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34828 .port_ops = &triflex_port_ops,
34829diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34830index 4b42ca0..e494a98 100644
34831--- a/drivers/ide/trm290.c
34832+++ b/drivers/ide/trm290.c
34833@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34834 .dma_check = trm290_dma_check,
34835 };
34836
34837-static const struct ide_port_info trm290_chipset __devinitdata = {
34838+static const struct ide_port_info trm290_chipset __devinitconst = {
34839 .name = DRV_NAME,
34840 .init_hwif = init_hwif_trm290,
34841 .tp_ops = &trm290_tp_ops,
34842diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34843index 028de26..520d5d5 100644
34844--- a/drivers/ide/via82cxxx.c
34845+++ b/drivers/ide/via82cxxx.c
34846@@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34847 .cable_detect = via82cxxx_cable_detect,
34848 };
34849
34850-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34851+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34852 .name = DRV_NAME,
34853 .init_chipset = init_chipset_via82cxxx,
34854 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34855diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34856index 2cd00b5..14de699 100644
34857--- a/drivers/ieee1394/dv1394.c
34858+++ b/drivers/ieee1394/dv1394.c
34859@@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34860 based upon DIF section and sequence
34861 */
34862
34863-static void inline
34864+static inline void
34865 frame_put_packet (struct frame *f, struct packet *p)
34866 {
34867 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34868diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34869index e947d8f..6a966b9 100644
34870--- a/drivers/ieee1394/hosts.c
34871+++ b/drivers/ieee1394/hosts.c
34872@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34873 }
34874
34875 static struct hpsb_host_driver dummy_driver = {
34876+ .name = "dummy",
34877 .transmit_packet = dummy_transmit_packet,
34878 .devctl = dummy_devctl,
34879 .isoctl = dummy_isoctl
34880diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34881index ddaab6e..8d37435 100644
34882--- a/drivers/ieee1394/init_ohci1394_dma.c
34883+++ b/drivers/ieee1394/init_ohci1394_dma.c
34884@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34885 for (func = 0; func < 8; func++) {
34886 u32 class = read_pci_config(num,slot,func,
34887 PCI_CLASS_REVISION);
34888- if ((class == 0xffffffff))
34889+ if (class == 0xffffffff)
34890 continue; /* No device at this func */
34891
34892 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34893diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34894index 65c1429..5d8c11f 100644
34895--- a/drivers/ieee1394/ohci1394.c
34896+++ b/drivers/ieee1394/ohci1394.c
34897@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34898 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34899
34900 /* Module Parameters */
34901-static int phys_dma = 1;
34902+static int phys_dma;
34903 module_param(phys_dma, int, 0444);
34904-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34905+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34906
34907 static void dma_trm_tasklet(unsigned long data);
34908 static void dma_trm_reset(struct dma_trm_ctx *d);
34909diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34910index f199896..78c9fc8 100644
34911--- a/drivers/ieee1394/sbp2.c
34912+++ b/drivers/ieee1394/sbp2.c
34913@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34914 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34915 MODULE_LICENSE("GPL");
34916
34917-static int sbp2_module_init(void)
34918+static int __init sbp2_module_init(void)
34919 {
34920 int ret;
34921
34922diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34923index a5dea6b..0cefe8f 100644
34924--- a/drivers/infiniband/core/cm.c
34925+++ b/drivers/infiniband/core/cm.c
34926@@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34927
34928 struct cm_counter_group {
34929 struct kobject obj;
34930- atomic_long_t counter[CM_ATTR_COUNT];
34931+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34932 };
34933
34934 struct cm_counter_attribute {
34935@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34936 struct ib_mad_send_buf *msg = NULL;
34937 int ret;
34938
34939- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34940+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34941 counter[CM_REQ_COUNTER]);
34942
34943 /* Quick state check to discard duplicate REQs. */
34944@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34945 if (!cm_id_priv)
34946 return;
34947
34948- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34949+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34950 counter[CM_REP_COUNTER]);
34951 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34952 if (ret)
34953@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34954 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34955 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34956 spin_unlock_irq(&cm_id_priv->lock);
34957- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34958+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34959 counter[CM_RTU_COUNTER]);
34960 goto out;
34961 }
34962@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
34963 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
34964 dreq_msg->local_comm_id);
34965 if (!cm_id_priv) {
34966- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34967+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34968 counter[CM_DREQ_COUNTER]);
34969 cm_issue_drep(work->port, work->mad_recv_wc);
34970 return -EINVAL;
34971@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
34972 case IB_CM_MRA_REP_RCVD:
34973 break;
34974 case IB_CM_TIMEWAIT:
34975- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34976+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34977 counter[CM_DREQ_COUNTER]);
34978 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34979 goto unlock;
34980@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
34981 cm_free_msg(msg);
34982 goto deref;
34983 case IB_CM_DREQ_RCVD:
34984- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34985+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34986 counter[CM_DREQ_COUNTER]);
34987 goto unlock;
34988 default:
34989@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
34990 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34991 cm_id_priv->msg, timeout)) {
34992 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
34993- atomic_long_inc(&work->port->
34994+ atomic_long_inc_unchecked(&work->port->
34995 counter_group[CM_RECV_DUPLICATES].
34996 counter[CM_MRA_COUNTER]);
34997 goto out;
34998@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
34999 break;
35000 case IB_CM_MRA_REQ_RCVD:
35001 case IB_CM_MRA_REP_RCVD:
35002- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35003+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35004 counter[CM_MRA_COUNTER]);
35005 /* fall through */
35006 default:
35007@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
35008 case IB_CM_LAP_IDLE:
35009 break;
35010 case IB_CM_MRA_LAP_SENT:
35011- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35012+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35013 counter[CM_LAP_COUNTER]);
35014 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35015 goto unlock;
35016@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
35017 cm_free_msg(msg);
35018 goto deref;
35019 case IB_CM_LAP_RCVD:
35020- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35021+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35022 counter[CM_LAP_COUNTER]);
35023 goto unlock;
35024 default:
35025@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
35026 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
35027 if (cur_cm_id_priv) {
35028 spin_unlock_irq(&cm.lock);
35029- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35030+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35031 counter[CM_SIDR_REQ_COUNTER]);
35032 goto out; /* Duplicate message. */
35033 }
35034@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
35035 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
35036 msg->retries = 1;
35037
35038- atomic_long_add(1 + msg->retries,
35039+ atomic_long_add_unchecked(1 + msg->retries,
35040 &port->counter_group[CM_XMIT].counter[attr_index]);
35041 if (msg->retries)
35042- atomic_long_add(msg->retries,
35043+ atomic_long_add_unchecked(msg->retries,
35044 &port->counter_group[CM_XMIT_RETRIES].
35045 counter[attr_index]);
35046
35047@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
35048 }
35049
35050 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
35051- atomic_long_inc(&port->counter_group[CM_RECV].
35052+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
35053 counter[attr_id - CM_ATTR_ID_OFFSET]);
35054
35055 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
35056@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
35057 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
35058
35059 return sprintf(buf, "%ld\n",
35060- atomic_long_read(&group->counter[cm_attr->index]));
35061+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
35062 }
35063
35064-static struct sysfs_ops cm_counter_ops = {
35065+static const struct sysfs_ops cm_counter_ops = {
35066 .show = cm_show_counter
35067 };
35068
35069diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
35070index 8fd3a6f..61d8075 100644
35071--- a/drivers/infiniband/core/cma.c
35072+++ b/drivers/infiniband/core/cma.c
35073@@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
35074
35075 req.private_data_len = sizeof(struct cma_hdr) +
35076 conn_param->private_data_len;
35077+ if (req.private_data_len < conn_param->private_data_len)
35078+ return -EINVAL;
35079+
35080 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35081 if (!req.private_data)
35082 return -ENOMEM;
35083@@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
35084 memset(&req, 0, sizeof req);
35085 offset = cma_user_data_offset(id_priv->id.ps);
35086 req.private_data_len = offset + conn_param->private_data_len;
35087+ if (req.private_data_len < conn_param->private_data_len)
35088+ return -EINVAL;
35089+
35090 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35091 if (!private_data)
35092 return -ENOMEM;
35093diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
35094index 4507043..14ad522 100644
35095--- a/drivers/infiniband/core/fmr_pool.c
35096+++ b/drivers/infiniband/core/fmr_pool.c
35097@@ -97,8 +97,8 @@ struct ib_fmr_pool {
35098
35099 struct task_struct *thread;
35100
35101- atomic_t req_ser;
35102- atomic_t flush_ser;
35103+ atomic_unchecked_t req_ser;
35104+ atomic_unchecked_t flush_ser;
35105
35106 wait_queue_head_t force_wait;
35107 };
35108@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35109 struct ib_fmr_pool *pool = pool_ptr;
35110
35111 do {
35112- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
35113+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
35114 ib_fmr_batch_release(pool);
35115
35116- atomic_inc(&pool->flush_ser);
35117+ atomic_inc_unchecked(&pool->flush_ser);
35118 wake_up_interruptible(&pool->force_wait);
35119
35120 if (pool->flush_function)
35121@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35122 }
35123
35124 set_current_state(TASK_INTERRUPTIBLE);
35125- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
35126+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
35127 !kthread_should_stop())
35128 schedule();
35129 __set_current_state(TASK_RUNNING);
35130@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
35131 pool->dirty_watermark = params->dirty_watermark;
35132 pool->dirty_len = 0;
35133 spin_lock_init(&pool->pool_lock);
35134- atomic_set(&pool->req_ser, 0);
35135- atomic_set(&pool->flush_ser, 0);
35136+ atomic_set_unchecked(&pool->req_ser, 0);
35137+ atomic_set_unchecked(&pool->flush_ser, 0);
35138 init_waitqueue_head(&pool->force_wait);
35139
35140 pool->thread = kthread_run(ib_fmr_cleanup_thread,
35141@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
35142 }
35143 spin_unlock_irq(&pool->pool_lock);
35144
35145- serial = atomic_inc_return(&pool->req_ser);
35146+ serial = atomic_inc_return_unchecked(&pool->req_ser);
35147 wake_up_process(pool->thread);
35148
35149 if (wait_event_interruptible(pool->force_wait,
35150- atomic_read(&pool->flush_ser) - serial >= 0))
35151+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
35152 return -EINTR;
35153
35154 return 0;
35155@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
35156 } else {
35157 list_add_tail(&fmr->list, &pool->dirty_list);
35158 if (++pool->dirty_len >= pool->dirty_watermark) {
35159- atomic_inc(&pool->req_ser);
35160+ atomic_inc_unchecked(&pool->req_ser);
35161 wake_up_process(pool->thread);
35162 }
35163 }
35164diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
35165index 158a214..1558bb7 100644
35166--- a/drivers/infiniband/core/sysfs.c
35167+++ b/drivers/infiniband/core/sysfs.c
35168@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
35169 return port_attr->show(p, port_attr, buf);
35170 }
35171
35172-static struct sysfs_ops port_sysfs_ops = {
35173+static const struct sysfs_ops port_sysfs_ops = {
35174 .show = port_attr_show
35175 };
35176
35177diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
35178index 5440da0..1194ecb 100644
35179--- a/drivers/infiniband/core/uverbs_marshall.c
35180+++ b/drivers/infiniband/core/uverbs_marshall.c
35181@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
35182 dst->grh.sgid_index = src->grh.sgid_index;
35183 dst->grh.hop_limit = src->grh.hop_limit;
35184 dst->grh.traffic_class = src->grh.traffic_class;
35185+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
35186 dst->dlid = src->dlid;
35187 dst->sl = src->sl;
35188 dst->src_path_bits = src->src_path_bits;
35189 dst->static_rate = src->static_rate;
35190 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
35191 dst->port_num = src->port_num;
35192+ dst->reserved = 0;
35193 }
35194 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
35195
35196 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35197 struct ib_qp_attr *src)
35198 {
35199+ dst->qp_state = src->qp_state;
35200 dst->cur_qp_state = src->cur_qp_state;
35201 dst->path_mtu = src->path_mtu;
35202 dst->path_mig_state = src->path_mig_state;
35203@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35204 dst->rnr_retry = src->rnr_retry;
35205 dst->alt_port_num = src->alt_port_num;
35206 dst->alt_timeout = src->alt_timeout;
35207+ memset(dst->reserved, 0, sizeof(dst->reserved));
35208 }
35209 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
35210
35211diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
35212index 100da85..62e6b88 100644
35213--- a/drivers/infiniband/hw/ipath/ipath_fs.c
35214+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
35215@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
35216 struct infinipath_counters counters;
35217 struct ipath_devdata *dd;
35218
35219+ pax_track_stack();
35220+
35221 dd = file->f_path.dentry->d_inode->i_private;
35222 dd->ipath_f_read_counters(dd, &counters);
35223
35224diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
35225index cbde0cf..afaf55c 100644
35226--- a/drivers/infiniband/hw/nes/nes.c
35227+++ b/drivers/infiniband/hw/nes/nes.c
35228@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
35229 LIST_HEAD(nes_adapter_list);
35230 static LIST_HEAD(nes_dev_list);
35231
35232-atomic_t qps_destroyed;
35233+atomic_unchecked_t qps_destroyed;
35234
35235 static unsigned int ee_flsh_adapter;
35236 static unsigned int sysfs_nonidx_addr;
35237@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
35238 struct nes_adapter *nesadapter = nesdev->nesadapter;
35239 u32 qp_id;
35240
35241- atomic_inc(&qps_destroyed);
35242+ atomic_inc_unchecked(&qps_destroyed);
35243
35244 /* Free the control structures */
35245
35246diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
35247index bcc6abc..9c76b2f 100644
35248--- a/drivers/infiniband/hw/nes/nes.h
35249+++ b/drivers/infiniband/hw/nes/nes.h
35250@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
35251 extern unsigned int wqm_quanta;
35252 extern struct list_head nes_adapter_list;
35253
35254-extern atomic_t cm_connects;
35255-extern atomic_t cm_accepts;
35256-extern atomic_t cm_disconnects;
35257-extern atomic_t cm_closes;
35258-extern atomic_t cm_connecteds;
35259-extern atomic_t cm_connect_reqs;
35260-extern atomic_t cm_rejects;
35261-extern atomic_t mod_qp_timouts;
35262-extern atomic_t qps_created;
35263-extern atomic_t qps_destroyed;
35264-extern atomic_t sw_qps_destroyed;
35265+extern atomic_unchecked_t cm_connects;
35266+extern atomic_unchecked_t cm_accepts;
35267+extern atomic_unchecked_t cm_disconnects;
35268+extern atomic_unchecked_t cm_closes;
35269+extern atomic_unchecked_t cm_connecteds;
35270+extern atomic_unchecked_t cm_connect_reqs;
35271+extern atomic_unchecked_t cm_rejects;
35272+extern atomic_unchecked_t mod_qp_timouts;
35273+extern atomic_unchecked_t qps_created;
35274+extern atomic_unchecked_t qps_destroyed;
35275+extern atomic_unchecked_t sw_qps_destroyed;
35276 extern u32 mh_detected;
35277 extern u32 mh_pauses_sent;
35278 extern u32 cm_packets_sent;
35279@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
35280 extern u32 cm_listens_created;
35281 extern u32 cm_listens_destroyed;
35282 extern u32 cm_backlog_drops;
35283-extern atomic_t cm_loopbacks;
35284-extern atomic_t cm_nodes_created;
35285-extern atomic_t cm_nodes_destroyed;
35286-extern atomic_t cm_accel_dropped_pkts;
35287-extern atomic_t cm_resets_recvd;
35288+extern atomic_unchecked_t cm_loopbacks;
35289+extern atomic_unchecked_t cm_nodes_created;
35290+extern atomic_unchecked_t cm_nodes_destroyed;
35291+extern atomic_unchecked_t cm_accel_dropped_pkts;
35292+extern atomic_unchecked_t cm_resets_recvd;
35293
35294 extern u32 int_mod_timer_init;
35295 extern u32 int_mod_cq_depth_256;
35296diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
35297index 73473db..5ed06e8 100644
35298--- a/drivers/infiniband/hw/nes/nes_cm.c
35299+++ b/drivers/infiniband/hw/nes/nes_cm.c
35300@@ -69,11 +69,11 @@ u32 cm_packets_received;
35301 u32 cm_listens_created;
35302 u32 cm_listens_destroyed;
35303 u32 cm_backlog_drops;
35304-atomic_t cm_loopbacks;
35305-atomic_t cm_nodes_created;
35306-atomic_t cm_nodes_destroyed;
35307-atomic_t cm_accel_dropped_pkts;
35308-atomic_t cm_resets_recvd;
35309+atomic_unchecked_t cm_loopbacks;
35310+atomic_unchecked_t cm_nodes_created;
35311+atomic_unchecked_t cm_nodes_destroyed;
35312+atomic_unchecked_t cm_accel_dropped_pkts;
35313+atomic_unchecked_t cm_resets_recvd;
35314
35315 static inline int mini_cm_accelerated(struct nes_cm_core *,
35316 struct nes_cm_node *);
35317@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
35318
35319 static struct nes_cm_core *g_cm_core;
35320
35321-atomic_t cm_connects;
35322-atomic_t cm_accepts;
35323-atomic_t cm_disconnects;
35324-atomic_t cm_closes;
35325-atomic_t cm_connecteds;
35326-atomic_t cm_connect_reqs;
35327-atomic_t cm_rejects;
35328+atomic_unchecked_t cm_connects;
35329+atomic_unchecked_t cm_accepts;
35330+atomic_unchecked_t cm_disconnects;
35331+atomic_unchecked_t cm_closes;
35332+atomic_unchecked_t cm_connecteds;
35333+atomic_unchecked_t cm_connect_reqs;
35334+atomic_unchecked_t cm_rejects;
35335
35336
35337 /**
35338@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
35339 cm_node->rem_mac);
35340
35341 add_hte_node(cm_core, cm_node);
35342- atomic_inc(&cm_nodes_created);
35343+ atomic_inc_unchecked(&cm_nodes_created);
35344
35345 return cm_node;
35346 }
35347@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
35348 }
35349
35350 atomic_dec(&cm_core->node_cnt);
35351- atomic_inc(&cm_nodes_destroyed);
35352+ atomic_inc_unchecked(&cm_nodes_destroyed);
35353 nesqp = cm_node->nesqp;
35354 if (nesqp) {
35355 nesqp->cm_node = NULL;
35356@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
35357
35358 static void drop_packet(struct sk_buff *skb)
35359 {
35360- atomic_inc(&cm_accel_dropped_pkts);
35361+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
35362 dev_kfree_skb_any(skb);
35363 }
35364
35365@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35366
35367 int reset = 0; /* whether to send reset in case of err.. */
35368 int passive_state;
35369- atomic_inc(&cm_resets_recvd);
35370+ atomic_inc_unchecked(&cm_resets_recvd);
35371 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35372 " refcnt=%d\n", cm_node, cm_node->state,
35373 atomic_read(&cm_node->ref_count));
35374@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35375 rem_ref_cm_node(cm_node->cm_core, cm_node);
35376 return NULL;
35377 }
35378- atomic_inc(&cm_loopbacks);
35379+ atomic_inc_unchecked(&cm_loopbacks);
35380 loopbackremotenode->loopbackpartner = cm_node;
35381 loopbackremotenode->tcp_cntxt.rcv_wscale =
35382 NES_CM_DEFAULT_RCV_WND_SCALE;
35383@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35384 add_ref_cm_node(cm_node);
35385 } else if (cm_node->state == NES_CM_STATE_TSA) {
35386 rem_ref_cm_node(cm_core, cm_node);
35387- atomic_inc(&cm_accel_dropped_pkts);
35388+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
35389 dev_kfree_skb_any(skb);
35390 break;
35391 }
35392@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35393
35394 if ((cm_id) && (cm_id->event_handler)) {
35395 if (issue_disconn) {
35396- atomic_inc(&cm_disconnects);
35397+ atomic_inc_unchecked(&cm_disconnects);
35398 cm_event.event = IW_CM_EVENT_DISCONNECT;
35399 cm_event.status = disconn_status;
35400 cm_event.local_addr = cm_id->local_addr;
35401@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35402 }
35403
35404 if (issue_close) {
35405- atomic_inc(&cm_closes);
35406+ atomic_inc_unchecked(&cm_closes);
35407 nes_disconnect(nesqp, 1);
35408
35409 cm_id->provider_data = nesqp;
35410@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35411
35412 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35413 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35414- atomic_inc(&cm_accepts);
35415+ atomic_inc_unchecked(&cm_accepts);
35416
35417 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35418 atomic_read(&nesvnic->netdev->refcnt));
35419@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35420
35421 struct nes_cm_core *cm_core;
35422
35423- atomic_inc(&cm_rejects);
35424+ atomic_inc_unchecked(&cm_rejects);
35425 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35426 loopback = cm_node->loopbackpartner;
35427 cm_core = cm_node->cm_core;
35428@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35429 ntohl(cm_id->local_addr.sin_addr.s_addr),
35430 ntohs(cm_id->local_addr.sin_port));
35431
35432- atomic_inc(&cm_connects);
35433+ atomic_inc_unchecked(&cm_connects);
35434 nesqp->active_conn = 1;
35435
35436 /* cache the cm_id in the qp */
35437@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35438 if (nesqp->destroyed) {
35439 return;
35440 }
35441- atomic_inc(&cm_connecteds);
35442+ atomic_inc_unchecked(&cm_connecteds);
35443 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35444 " local port 0x%04X. jiffies = %lu.\n",
35445 nesqp->hwqp.qp_id,
35446@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35447
35448 ret = cm_id->event_handler(cm_id, &cm_event);
35449 cm_id->add_ref(cm_id);
35450- atomic_inc(&cm_closes);
35451+ atomic_inc_unchecked(&cm_closes);
35452 cm_event.event = IW_CM_EVENT_CLOSE;
35453 cm_event.status = IW_CM_EVENT_STATUS_OK;
35454 cm_event.provider_data = cm_id->provider_data;
35455@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35456 return;
35457 cm_id = cm_node->cm_id;
35458
35459- atomic_inc(&cm_connect_reqs);
35460+ atomic_inc_unchecked(&cm_connect_reqs);
35461 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35462 cm_node, cm_id, jiffies);
35463
35464@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35465 return;
35466 cm_id = cm_node->cm_id;
35467
35468- atomic_inc(&cm_connect_reqs);
35469+ atomic_inc_unchecked(&cm_connect_reqs);
35470 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35471 cm_node, cm_id, jiffies);
35472
35473diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35474index e593af3..870694a 100644
35475--- a/drivers/infiniband/hw/nes/nes_nic.c
35476+++ b/drivers/infiniband/hw/nes/nes_nic.c
35477@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35478 target_stat_values[++index] = mh_detected;
35479 target_stat_values[++index] = mh_pauses_sent;
35480 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35481- target_stat_values[++index] = atomic_read(&cm_connects);
35482- target_stat_values[++index] = atomic_read(&cm_accepts);
35483- target_stat_values[++index] = atomic_read(&cm_disconnects);
35484- target_stat_values[++index] = atomic_read(&cm_connecteds);
35485- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35486- target_stat_values[++index] = atomic_read(&cm_rejects);
35487- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35488- target_stat_values[++index] = atomic_read(&qps_created);
35489- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35490- target_stat_values[++index] = atomic_read(&qps_destroyed);
35491- target_stat_values[++index] = atomic_read(&cm_closes);
35492+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35493+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35494+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35495+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35496+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35497+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35498+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35499+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35500+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35501+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35502+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35503 target_stat_values[++index] = cm_packets_sent;
35504 target_stat_values[++index] = cm_packets_bounced;
35505 target_stat_values[++index] = cm_packets_created;
35506@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35507 target_stat_values[++index] = cm_listens_created;
35508 target_stat_values[++index] = cm_listens_destroyed;
35509 target_stat_values[++index] = cm_backlog_drops;
35510- target_stat_values[++index] = atomic_read(&cm_loopbacks);
35511- target_stat_values[++index] = atomic_read(&cm_nodes_created);
35512- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35513- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35514- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35515+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35516+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35517+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35518+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35519+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35520 target_stat_values[++index] = int_mod_timer_init;
35521 target_stat_values[++index] = int_mod_cq_depth_1;
35522 target_stat_values[++index] = int_mod_cq_depth_4;
35523diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35524index a680c42..f914deb 100644
35525--- a/drivers/infiniband/hw/nes/nes_verbs.c
35526+++ b/drivers/infiniband/hw/nes/nes_verbs.c
35527@@ -45,9 +45,9 @@
35528
35529 #include <rdma/ib_umem.h>
35530
35531-atomic_t mod_qp_timouts;
35532-atomic_t qps_created;
35533-atomic_t sw_qps_destroyed;
35534+atomic_unchecked_t mod_qp_timouts;
35535+atomic_unchecked_t qps_created;
35536+atomic_unchecked_t sw_qps_destroyed;
35537
35538 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35539
35540@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35541 if (init_attr->create_flags)
35542 return ERR_PTR(-EINVAL);
35543
35544- atomic_inc(&qps_created);
35545+ atomic_inc_unchecked(&qps_created);
35546 switch (init_attr->qp_type) {
35547 case IB_QPT_RC:
35548 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35549@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35550 struct iw_cm_event cm_event;
35551 int ret;
35552
35553- atomic_inc(&sw_qps_destroyed);
35554+ atomic_inc_unchecked(&sw_qps_destroyed);
35555 nesqp->destroyed = 1;
35556
35557 /* Blow away the connection if it exists. */
35558diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35559index ac11be0..3883c04 100644
35560--- a/drivers/input/gameport/gameport.c
35561+++ b/drivers/input/gameport/gameport.c
35562@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35563 */
35564 static void gameport_init_port(struct gameport *gameport)
35565 {
35566- static atomic_t gameport_no = ATOMIC_INIT(0);
35567+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35568
35569 __module_get(THIS_MODULE);
35570
35571 mutex_init(&gameport->drv_mutex);
35572 device_initialize(&gameport->dev);
35573- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35574+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35575 gameport->dev.bus = &gameport_bus;
35576 gameport->dev.release = gameport_release_port;
35577 if (gameport->parent)
35578diff --git a/drivers/input/input.c b/drivers/input/input.c
35579index c82ae82..8cfb9cb 100644
35580--- a/drivers/input/input.c
35581+++ b/drivers/input/input.c
35582@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35583 */
35584 int input_register_device(struct input_dev *dev)
35585 {
35586- static atomic_t input_no = ATOMIC_INIT(0);
35587+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35588 struct input_handler *handler;
35589 const char *path;
35590 int error;
35591@@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35592 dev->setkeycode = input_default_setkeycode;
35593
35594 dev_set_name(&dev->dev, "input%ld",
35595- (unsigned long) atomic_inc_return(&input_no) - 1);
35596+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35597
35598 error = device_add(&dev->dev);
35599 if (error)
35600diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35601index ca13a6b..b032b0c 100644
35602--- a/drivers/input/joystick/sidewinder.c
35603+++ b/drivers/input/joystick/sidewinder.c
35604@@ -30,6 +30,7 @@
35605 #include <linux/kernel.h>
35606 #include <linux/module.h>
35607 #include <linux/slab.h>
35608+#include <linux/sched.h>
35609 #include <linux/init.h>
35610 #include <linux/input.h>
35611 #include <linux/gameport.h>
35612@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35613 unsigned char buf[SW_LENGTH];
35614 int i;
35615
35616+ pax_track_stack();
35617+
35618 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35619
35620 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35621diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35622index 79e3edc..01412b9 100644
35623--- a/drivers/input/joystick/xpad.c
35624+++ b/drivers/input/joystick/xpad.c
35625@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35626
35627 static int xpad_led_probe(struct usb_xpad *xpad)
35628 {
35629- static atomic_t led_seq = ATOMIC_INIT(0);
35630+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35631 long led_no;
35632 struct xpad_led *led;
35633 struct led_classdev *led_cdev;
35634@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35635 if (!led)
35636 return -ENOMEM;
35637
35638- led_no = (long)atomic_inc_return(&led_seq) - 1;
35639+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35640
35641 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35642 led->xpad = xpad;
35643diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35644index 0236f0d..c7327f1 100644
35645--- a/drivers/input/serio/serio.c
35646+++ b/drivers/input/serio/serio.c
35647@@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35648 */
35649 static void serio_init_port(struct serio *serio)
35650 {
35651- static atomic_t serio_no = ATOMIC_INIT(0);
35652+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35653
35654 __module_get(THIS_MODULE);
35655
35656@@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35657 mutex_init(&serio->drv_mutex);
35658 device_initialize(&serio->dev);
35659 dev_set_name(&serio->dev, "serio%ld",
35660- (long)atomic_inc_return(&serio_no) - 1);
35661+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
35662 serio->dev.bus = &serio_bus;
35663 serio->dev.release = serio_release_port;
35664 if (serio->parent) {
35665diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35666index 33dcd8d..2783d25 100644
35667--- a/drivers/isdn/gigaset/common.c
35668+++ b/drivers/isdn/gigaset/common.c
35669@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35670 cs->commands_pending = 0;
35671 cs->cur_at_seq = 0;
35672 cs->gotfwver = -1;
35673- cs->open_count = 0;
35674+ local_set(&cs->open_count, 0);
35675 cs->dev = NULL;
35676 cs->tty = NULL;
35677 cs->tty_dev = NULL;
35678diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35679index a2f6125..6a70677 100644
35680--- a/drivers/isdn/gigaset/gigaset.h
35681+++ b/drivers/isdn/gigaset/gigaset.h
35682@@ -34,6 +34,7 @@
35683 #include <linux/tty_driver.h>
35684 #include <linux/list.h>
35685 #include <asm/atomic.h>
35686+#include <asm/local.h>
35687
35688 #define GIG_VERSION {0,5,0,0}
35689 #define GIG_COMPAT {0,4,0,0}
35690@@ -446,7 +447,7 @@ struct cardstate {
35691 spinlock_t cmdlock;
35692 unsigned curlen, cmdbytes;
35693
35694- unsigned open_count;
35695+ local_t open_count;
35696 struct tty_struct *tty;
35697 struct tasklet_struct if_wake_tasklet;
35698 unsigned control_state;
35699diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35700index b3065b8..c7e8cc9 100644
35701--- a/drivers/isdn/gigaset/interface.c
35702+++ b/drivers/isdn/gigaset/interface.c
35703@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35704 return -ERESTARTSYS; // FIXME -EINTR?
35705 tty->driver_data = cs;
35706
35707- ++cs->open_count;
35708-
35709- if (cs->open_count == 1) {
35710+ if (local_inc_return(&cs->open_count) == 1) {
35711 spin_lock_irqsave(&cs->lock, flags);
35712 cs->tty = tty;
35713 spin_unlock_irqrestore(&cs->lock, flags);
35714@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35715
35716 if (!cs->connected)
35717 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35718- else if (!cs->open_count)
35719+ else if (!local_read(&cs->open_count))
35720 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35721 else {
35722- if (!--cs->open_count) {
35723+ if (!local_dec_return(&cs->open_count)) {
35724 spin_lock_irqsave(&cs->lock, flags);
35725 cs->tty = NULL;
35726 spin_unlock_irqrestore(&cs->lock, flags);
35727@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35728 if (!cs->connected) {
35729 gig_dbg(DEBUG_IF, "not connected");
35730 retval = -ENODEV;
35731- } else if (!cs->open_count)
35732+ } else if (!local_read(&cs->open_count))
35733 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35734 else {
35735 retval = 0;
35736@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35737 if (!cs->connected) {
35738 gig_dbg(DEBUG_IF, "not connected");
35739 retval = -ENODEV;
35740- } else if (!cs->open_count)
35741+ } else if (!local_read(&cs->open_count))
35742 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35743 else if (cs->mstate != MS_LOCKED) {
35744 dev_warn(cs->dev, "can't write to unlocked device\n");
35745@@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35746 if (!cs->connected) {
35747 gig_dbg(DEBUG_IF, "not connected");
35748 retval = -ENODEV;
35749- } else if (!cs->open_count)
35750+ } else if (!local_read(&cs->open_count))
35751 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35752 else if (cs->mstate != MS_LOCKED) {
35753 dev_warn(cs->dev, "can't write to unlocked device\n");
35754@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35755
35756 if (!cs->connected)
35757 gig_dbg(DEBUG_IF, "not connected");
35758- else if (!cs->open_count)
35759+ else if (!local_read(&cs->open_count))
35760 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35761 else if (cs->mstate != MS_LOCKED)
35762 dev_warn(cs->dev, "can't write to unlocked device\n");
35763@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35764
35765 if (!cs->connected)
35766 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35767- else if (!cs->open_count)
35768+ else if (!local_read(&cs->open_count))
35769 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35770 else {
35771 //FIXME
35772@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35773
35774 if (!cs->connected)
35775 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35776- else if (!cs->open_count)
35777+ else if (!local_read(&cs->open_count))
35778 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35779 else {
35780 //FIXME
35781@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35782 goto out;
35783 }
35784
35785- if (!cs->open_count) {
35786+ if (!local_read(&cs->open_count)) {
35787 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35788 goto out;
35789 }
35790diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35791index a7c0083..62a7cb6 100644
35792--- a/drivers/isdn/hardware/avm/b1.c
35793+++ b/drivers/isdn/hardware/avm/b1.c
35794@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35795 }
35796 if (left) {
35797 if (t4file->user) {
35798- if (copy_from_user(buf, dp, left))
35799+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35800 return -EFAULT;
35801 } else {
35802 memcpy(buf, dp, left);
35803@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35804 }
35805 if (left) {
35806 if (config->user) {
35807- if (copy_from_user(buf, dp, left))
35808+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35809 return -EFAULT;
35810 } else {
35811 memcpy(buf, dp, left);
35812diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35813index f130724..c373c68 100644
35814--- a/drivers/isdn/hardware/eicon/capidtmf.c
35815+++ b/drivers/isdn/hardware/eicon/capidtmf.c
35816@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35817 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35818 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35819
35820+ pax_track_stack();
35821
35822 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35823 {
35824diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35825index 4d425c6..a9be6c4 100644
35826--- a/drivers/isdn/hardware/eicon/capifunc.c
35827+++ b/drivers/isdn/hardware/eicon/capifunc.c
35828@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35829 IDI_SYNC_REQ req;
35830 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35831
35832+ pax_track_stack();
35833+
35834 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35835
35836 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35837diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35838index 3029234..ef0d9e2 100644
35839--- a/drivers/isdn/hardware/eicon/diddfunc.c
35840+++ b/drivers/isdn/hardware/eicon/diddfunc.c
35841@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35842 IDI_SYNC_REQ req;
35843 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35844
35845+ pax_track_stack();
35846+
35847 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35848
35849 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35850diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35851index d36a4c0..11e7d1a 100644
35852--- a/drivers/isdn/hardware/eicon/divasfunc.c
35853+++ b/drivers/isdn/hardware/eicon/divasfunc.c
35854@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35855 IDI_SYNC_REQ req;
35856 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35857
35858+ pax_track_stack();
35859+
35860 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35861
35862 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35863diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35864index 85784a7..a19ca98 100644
35865--- a/drivers/isdn/hardware/eicon/divasync.h
35866+++ b/drivers/isdn/hardware/eicon/divasync.h
35867@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35868 } diva_didd_add_adapter_t;
35869 typedef struct _diva_didd_remove_adapter {
35870 IDI_CALL p_request;
35871-} diva_didd_remove_adapter_t;
35872+} __no_const diva_didd_remove_adapter_t;
35873 typedef struct _diva_didd_read_adapter_array {
35874 void * buffer;
35875 dword length;
35876diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35877index db87d51..7d09acf 100644
35878--- a/drivers/isdn/hardware/eicon/idifunc.c
35879+++ b/drivers/isdn/hardware/eicon/idifunc.c
35880@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35881 IDI_SYNC_REQ req;
35882 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35883
35884+ pax_track_stack();
35885+
35886 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35887
35888 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35889diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35890index ae89fb8..0fab299 100644
35891--- a/drivers/isdn/hardware/eicon/message.c
35892+++ b/drivers/isdn/hardware/eicon/message.c
35893@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35894 dword d;
35895 word w;
35896
35897+ pax_track_stack();
35898+
35899 a = plci->adapter;
35900 Id = ((word)plci->Id<<8)|a->Id;
35901 PUT_WORD(&SS_Ind[4],0x0000);
35902@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35903 word j, n, w;
35904 dword d;
35905
35906+ pax_track_stack();
35907+
35908
35909 for(i=0;i<8;i++) bp_parms[i].length = 0;
35910 for(i=0;i<2;i++) global_config[i].length = 0;
35911@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35912 const byte llc3[] = {4,3,2,2,6,6,0};
35913 const byte header[] = {0,2,3,3,0,0,0};
35914
35915+ pax_track_stack();
35916+
35917 for(i=0;i<8;i++) bp_parms[i].length = 0;
35918 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35919 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35920@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35921 word appl_number_group_type[MAX_APPL];
35922 PLCI *auxplci;
35923
35924+ pax_track_stack();
35925+
35926 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35927
35928 if(!a->group_optimization_enabled)
35929diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35930index a564b75..f3cf8b5 100644
35931--- a/drivers/isdn/hardware/eicon/mntfunc.c
35932+++ b/drivers/isdn/hardware/eicon/mntfunc.c
35933@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35934 IDI_SYNC_REQ req;
35935 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35936
35937+ pax_track_stack();
35938+
35939 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35940
35941 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35942diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35943index a3bd163..8956575 100644
35944--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35945+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35946@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35947 typedef struct _diva_os_idi_adapter_interface {
35948 diva_init_card_proc_t cleanup_adapter_proc;
35949 diva_cmd_card_proc_t cmd_proc;
35950-} diva_os_idi_adapter_interface_t;
35951+} __no_const diva_os_idi_adapter_interface_t;
35952
35953 typedef struct _diva_os_xdi_adapter {
35954 struct list_head link;
35955diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35956index adb1e8c..21b590b 100644
35957--- a/drivers/isdn/i4l/isdn_common.c
35958+++ b/drivers/isdn/i4l/isdn_common.c
35959@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
35960 } iocpar;
35961 void __user *argp = (void __user *)arg;
35962
35963+ pax_track_stack();
35964+
35965 #define name iocpar.name
35966 #define bname iocpar.bname
35967 #define iocts iocpar.iocts
35968diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
35969index 90b56ed..5ed3305 100644
35970--- a/drivers/isdn/i4l/isdn_net.c
35971+++ b/drivers/isdn/i4l/isdn_net.c
35972@@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
35973 {
35974 isdn_net_local *lp = netdev_priv(dev);
35975 unsigned char *p;
35976- ushort len = 0;
35977+ int len = 0;
35978
35979 switch (lp->p_encap) {
35980 case ISDN_NET_ENCAP_ETHER:
35981diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
35982index bf7997a..cf091db 100644
35983--- a/drivers/isdn/icn/icn.c
35984+++ b/drivers/isdn/icn/icn.c
35985@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
35986 if (count > len)
35987 count = len;
35988 if (user) {
35989- if (copy_from_user(msg, buf, count))
35990+ if (count > sizeof msg || copy_from_user(msg, buf, count))
35991 return -EFAULT;
35992 } else
35993 memcpy(msg, buf, count);
35994diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
35995index feb0fa4..f76f830 100644
35996--- a/drivers/isdn/mISDN/socket.c
35997+++ b/drivers/isdn/mISDN/socket.c
35998@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35999 if (dev) {
36000 struct mISDN_devinfo di;
36001
36002+ memset(&di, 0, sizeof(di));
36003 di.id = dev->id;
36004 di.Dprotocols = dev->Dprotocols;
36005 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36006@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36007 if (dev) {
36008 struct mISDN_devinfo di;
36009
36010+ memset(&di, 0, sizeof(di));
36011 di.id = dev->id;
36012 di.Dprotocols = dev->Dprotocols;
36013 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36014diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
36015index 485be8b..f0225bc 100644
36016--- a/drivers/isdn/sc/interrupt.c
36017+++ b/drivers/isdn/sc/interrupt.c
36018@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36019 }
36020 else if(callid>=0x0000 && callid<=0x7FFF)
36021 {
36022+ int len;
36023+
36024 pr_debug("%s: Got Incoming Call\n",
36025 sc_adapter[card]->devicename);
36026- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
36027- strcpy(setup.eazmsn,
36028- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
36029+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
36030+ sizeof(setup.phone));
36031+ if (len >= sizeof(setup.phone))
36032+ continue;
36033+ len = strlcpy(setup.eazmsn,
36034+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36035+ sizeof(setup.eazmsn));
36036+ if (len >= sizeof(setup.eazmsn))
36037+ continue;
36038 setup.si1 = 7;
36039 setup.si2 = 0;
36040 setup.plan = 0;
36041@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36042 * Handle a GetMyNumber Rsp
36043 */
36044 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
36045- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
36046+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36047+ rcvmsg.msg_data.byte_array,
36048+ sizeof(rcvmsg.msg_data.byte_array));
36049 continue;
36050 }
36051
36052diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
36053index 8744d24..d1f9a9a 100644
36054--- a/drivers/lguest/core.c
36055+++ b/drivers/lguest/core.c
36056@@ -91,9 +91,17 @@ static __init int map_switcher(void)
36057 * it's worked so far. The end address needs +1 because __get_vm_area
36058 * allocates an extra guard page, so we need space for that.
36059 */
36060+
36061+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
36062+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36063+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
36064+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36065+#else
36066 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36067 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
36068 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36069+#endif
36070+
36071 if (!switcher_vma) {
36072 err = -ENOMEM;
36073 printk("lguest: could not map switcher pages high\n");
36074@@ -118,7 +126,7 @@ static __init int map_switcher(void)
36075 * Now the Switcher is mapped at the right address, we can't fail!
36076 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
36077 */
36078- memcpy(switcher_vma->addr, start_switcher_text,
36079+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
36080 end_switcher_text - start_switcher_text);
36081
36082 printk(KERN_INFO "lguest: mapped switcher at %p\n",
36083diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
36084index 6ae3888..8b38145 100644
36085--- a/drivers/lguest/x86/core.c
36086+++ b/drivers/lguest/x86/core.c
36087@@ -59,7 +59,7 @@ static struct {
36088 /* Offset from where switcher.S was compiled to where we've copied it */
36089 static unsigned long switcher_offset(void)
36090 {
36091- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
36092+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
36093 }
36094
36095 /* This cpu's struct lguest_pages. */
36096@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
36097 * These copies are pretty cheap, so we do them unconditionally: */
36098 /* Save the current Host top-level page directory.
36099 */
36100+
36101+#ifdef CONFIG_PAX_PER_CPU_PGD
36102+ pages->state.host_cr3 = read_cr3();
36103+#else
36104 pages->state.host_cr3 = __pa(current->mm->pgd);
36105+#endif
36106+
36107 /*
36108 * Set up the Guest's page tables to see this CPU's pages (and no
36109 * other CPU's pages).
36110@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
36111 * compiled-in switcher code and the high-mapped copy we just made.
36112 */
36113 for (i = 0; i < IDT_ENTRIES; i++)
36114- default_idt_entries[i] += switcher_offset();
36115+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
36116
36117 /*
36118 * Set up the Switcher's per-cpu areas.
36119@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
36120 * it will be undisturbed when we switch. To change %cs and jump we
36121 * need this structure to feed to Intel's "lcall" instruction.
36122 */
36123- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
36124+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
36125 lguest_entry.segment = LGUEST_CS;
36126
36127 /*
36128diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
36129index 40634b0..4f5855e 100644
36130--- a/drivers/lguest/x86/switcher_32.S
36131+++ b/drivers/lguest/x86/switcher_32.S
36132@@ -87,6 +87,7 @@
36133 #include <asm/page.h>
36134 #include <asm/segment.h>
36135 #include <asm/lguest.h>
36136+#include <asm/processor-flags.h>
36137
36138 // We mark the start of the code to copy
36139 // It's placed in .text tho it's never run here
36140@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
36141 // Changes type when we load it: damn Intel!
36142 // For after we switch over our page tables
36143 // That entry will be read-only: we'd crash.
36144+
36145+#ifdef CONFIG_PAX_KERNEXEC
36146+ mov %cr0, %edx
36147+ xor $X86_CR0_WP, %edx
36148+ mov %edx, %cr0
36149+#endif
36150+
36151 movl $(GDT_ENTRY_TSS*8), %edx
36152 ltr %dx
36153
36154@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
36155 // Let's clear it again for our return.
36156 // The GDT descriptor of the Host
36157 // Points to the table after two "size" bytes
36158- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
36159+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
36160 // Clear "used" from type field (byte 5, bit 2)
36161- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
36162+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
36163+
36164+#ifdef CONFIG_PAX_KERNEXEC
36165+ mov %cr0, %eax
36166+ xor $X86_CR0_WP, %eax
36167+ mov %eax, %cr0
36168+#endif
36169
36170 // Once our page table's switched, the Guest is live!
36171 // The Host fades as we run this final step.
36172@@ -295,13 +309,12 @@ deliver_to_host:
36173 // I consulted gcc, and it gave
36174 // These instructions, which I gladly credit:
36175 leal (%edx,%ebx,8), %eax
36176- movzwl (%eax),%edx
36177- movl 4(%eax), %eax
36178- xorw %ax, %ax
36179- orl %eax, %edx
36180+ movl 4(%eax), %edx
36181+ movw (%eax), %dx
36182 // Now the address of the handler's in %edx
36183 // We call it now: its "iret" drops us home.
36184- jmp *%edx
36185+ ljmp $__KERNEL_CS, $1f
36186+1: jmp *%edx
36187
36188 // Every interrupt can come to us here
36189 // But we must truly tell each apart.
36190diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
36191index 588a5b0..b71db89 100644
36192--- a/drivers/macintosh/macio_asic.c
36193+++ b/drivers/macintosh/macio_asic.c
36194@@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
36195 * MacIO is matched against any Apple ID, it's probe() function
36196 * will then decide wether it applies or not
36197 */
36198-static const struct pci_device_id __devinitdata pci_ids [] = { {
36199+static const struct pci_device_id __devinitconst pci_ids [] = { {
36200 .vendor = PCI_VENDOR_ID_APPLE,
36201 .device = PCI_ANY_ID,
36202 .subvendor = PCI_ANY_ID,
36203diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
36204index a348bb0..ecd9b3f 100644
36205--- a/drivers/macintosh/via-pmu-backlight.c
36206+++ b/drivers/macintosh/via-pmu-backlight.c
36207@@ -15,7 +15,7 @@
36208
36209 #define MAX_PMU_LEVEL 0xFF
36210
36211-static struct backlight_ops pmu_backlight_data;
36212+static const struct backlight_ops pmu_backlight_data;
36213 static DEFINE_SPINLOCK(pmu_backlight_lock);
36214 static int sleeping, uses_pmu_bl;
36215 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
36216@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
36217 return bd->props.brightness;
36218 }
36219
36220-static struct backlight_ops pmu_backlight_data = {
36221+static const struct backlight_ops pmu_backlight_data = {
36222 .get_brightness = pmu_backlight_get_brightness,
36223 .update_status = pmu_backlight_update_status,
36224
36225diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
36226index 6f308a4..b5f7ff7 100644
36227--- a/drivers/macintosh/via-pmu.c
36228+++ b/drivers/macintosh/via-pmu.c
36229@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
36230 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
36231 }
36232
36233-static struct platform_suspend_ops pmu_pm_ops = {
36234+static const struct platform_suspend_ops pmu_pm_ops = {
36235 .enter = powerbook_sleep,
36236 .valid = pmu_sleep_valid,
36237 };
36238diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
36239index 818b617..4656e38 100644
36240--- a/drivers/md/dm-ioctl.c
36241+++ b/drivers/md/dm-ioctl.c
36242@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
36243 cmd == DM_LIST_VERSIONS_CMD)
36244 return 0;
36245
36246- if ((cmd == DM_DEV_CREATE_CMD)) {
36247+ if (cmd == DM_DEV_CREATE_CMD) {
36248 if (!*param->name) {
36249 DMWARN("name not supplied when creating device");
36250 return -EINVAL;
36251diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
36252index 6021d0a..a878643 100644
36253--- a/drivers/md/dm-raid1.c
36254+++ b/drivers/md/dm-raid1.c
36255@@ -41,7 +41,7 @@ enum dm_raid1_error {
36256
36257 struct mirror {
36258 struct mirror_set *ms;
36259- atomic_t error_count;
36260+ atomic_unchecked_t error_count;
36261 unsigned long error_type;
36262 struct dm_dev *dev;
36263 sector_t offset;
36264@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36265 * simple way to tell if a device has encountered
36266 * errors.
36267 */
36268- atomic_inc(&m->error_count);
36269+ atomic_inc_unchecked(&m->error_count);
36270
36271 if (test_and_set_bit(error_type, &m->error_type))
36272 return;
36273@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36274 }
36275
36276 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
36277- if (!atomic_read(&new->error_count)) {
36278+ if (!atomic_read_unchecked(&new->error_count)) {
36279 set_default_mirror(new);
36280 break;
36281 }
36282@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
36283 struct mirror *m = get_default_mirror(ms);
36284
36285 do {
36286- if (likely(!atomic_read(&m->error_count)))
36287+ if (likely(!atomic_read_unchecked(&m->error_count)))
36288 return m;
36289
36290 if (m-- == ms->mirror)
36291@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
36292 {
36293 struct mirror *default_mirror = get_default_mirror(m->ms);
36294
36295- return !atomic_read(&default_mirror->error_count);
36296+ return !atomic_read_unchecked(&default_mirror->error_count);
36297 }
36298
36299 static int mirror_available(struct mirror_set *ms, struct bio *bio)
36300@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
36301 */
36302 if (likely(region_in_sync(ms, region, 1)))
36303 m = choose_mirror(ms, bio->bi_sector);
36304- else if (m && atomic_read(&m->error_count))
36305+ else if (m && atomic_read_unchecked(&m->error_count))
36306 m = NULL;
36307
36308 if (likely(m))
36309@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
36310 }
36311
36312 ms->mirror[mirror].ms = ms;
36313- atomic_set(&(ms->mirror[mirror].error_count), 0);
36314+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
36315 ms->mirror[mirror].error_type = 0;
36316 ms->mirror[mirror].offset = offset;
36317
36318@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
36319 */
36320 static char device_status_char(struct mirror *m)
36321 {
36322- if (!atomic_read(&(m->error_count)))
36323+ if (!atomic_read_unchecked(&(m->error_count)))
36324 return 'A';
36325
36326 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
36327diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
36328index bd58703..9f26571 100644
36329--- a/drivers/md/dm-stripe.c
36330+++ b/drivers/md/dm-stripe.c
36331@@ -20,7 +20,7 @@ struct stripe {
36332 struct dm_dev *dev;
36333 sector_t physical_start;
36334
36335- atomic_t error_count;
36336+ atomic_unchecked_t error_count;
36337 };
36338
36339 struct stripe_c {
36340@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
36341 kfree(sc);
36342 return r;
36343 }
36344- atomic_set(&(sc->stripe[i].error_count), 0);
36345+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
36346 }
36347
36348 ti->private = sc;
36349@@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
36350 DMEMIT("%d ", sc->stripes);
36351 for (i = 0; i < sc->stripes; i++) {
36352 DMEMIT("%s ", sc->stripe[i].dev->name);
36353- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
36354+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
36355 'D' : 'A';
36356 }
36357 buffer[i] = '\0';
36358@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
36359 */
36360 for (i = 0; i < sc->stripes; i++)
36361 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
36362- atomic_inc(&(sc->stripe[i].error_count));
36363- if (atomic_read(&(sc->stripe[i].error_count)) <
36364+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
36365+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
36366 DM_IO_ERROR_THRESHOLD)
36367 queue_work(kstriped, &sc->kstriped_ws);
36368 }
36369diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
36370index 4b04590..13a77b2 100644
36371--- a/drivers/md/dm-sysfs.c
36372+++ b/drivers/md/dm-sysfs.c
36373@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
36374 NULL,
36375 };
36376
36377-static struct sysfs_ops dm_sysfs_ops = {
36378+static const struct sysfs_ops dm_sysfs_ops = {
36379 .show = dm_attr_show,
36380 };
36381
36382diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36383index 03345bb..332250d 100644
36384--- a/drivers/md/dm-table.c
36385+++ b/drivers/md/dm-table.c
36386@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36387 if (!dev_size)
36388 return 0;
36389
36390- if ((start >= dev_size) || (start + len > dev_size)) {
36391+ if ((start >= dev_size) || (len > dev_size - start)) {
36392 DMWARN("%s: %s too small for target: "
36393 "start=%llu, len=%llu, dev_size=%llu",
36394 dm_device_name(ti->table->md), bdevname(bdev, b),
36395diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36396index c988ac2..c418141 100644
36397--- a/drivers/md/dm.c
36398+++ b/drivers/md/dm.c
36399@@ -165,9 +165,9 @@ struct mapped_device {
36400 /*
36401 * Event handling.
36402 */
36403- atomic_t event_nr;
36404+ atomic_unchecked_t event_nr;
36405 wait_queue_head_t eventq;
36406- atomic_t uevent_seq;
36407+ atomic_unchecked_t uevent_seq;
36408 struct list_head uevent_list;
36409 spinlock_t uevent_lock; /* Protect access to uevent_list */
36410
36411@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36412 rwlock_init(&md->map_lock);
36413 atomic_set(&md->holders, 1);
36414 atomic_set(&md->open_count, 0);
36415- atomic_set(&md->event_nr, 0);
36416- atomic_set(&md->uevent_seq, 0);
36417+ atomic_set_unchecked(&md->event_nr, 0);
36418+ atomic_set_unchecked(&md->uevent_seq, 0);
36419 INIT_LIST_HEAD(&md->uevent_list);
36420 spin_lock_init(&md->uevent_lock);
36421
36422@@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36423
36424 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36425
36426- atomic_inc(&md->event_nr);
36427+ atomic_inc_unchecked(&md->event_nr);
36428 wake_up(&md->eventq);
36429 }
36430
36431@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36432
36433 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36434 {
36435- return atomic_add_return(1, &md->uevent_seq);
36436+ return atomic_add_return_unchecked(1, &md->uevent_seq);
36437 }
36438
36439 uint32_t dm_get_event_nr(struct mapped_device *md)
36440 {
36441- return atomic_read(&md->event_nr);
36442+ return atomic_read_unchecked(&md->event_nr);
36443 }
36444
36445 int dm_wait_event(struct mapped_device *md, int event_nr)
36446 {
36447 return wait_event_interruptible(md->eventq,
36448- (event_nr != atomic_read(&md->event_nr)));
36449+ (event_nr != atomic_read_unchecked(&md->event_nr)));
36450 }
36451
36452 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36453diff --git a/drivers/md/md.c b/drivers/md/md.c
36454index 4ce6e2f..7a9530a 100644
36455--- a/drivers/md/md.c
36456+++ b/drivers/md/md.c
36457@@ -153,10 +153,10 @@ static int start_readonly;
36458 * start build, activate spare
36459 */
36460 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36461-static atomic_t md_event_count;
36462+static atomic_unchecked_t md_event_count;
36463 void md_new_event(mddev_t *mddev)
36464 {
36465- atomic_inc(&md_event_count);
36466+ atomic_inc_unchecked(&md_event_count);
36467 wake_up(&md_event_waiters);
36468 }
36469 EXPORT_SYMBOL_GPL(md_new_event);
36470@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36471 */
36472 static void md_new_event_inintr(mddev_t *mddev)
36473 {
36474- atomic_inc(&md_event_count);
36475+ atomic_inc_unchecked(&md_event_count);
36476 wake_up(&md_event_waiters);
36477 }
36478
36479@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
36480
36481 rdev->preferred_minor = 0xffff;
36482 rdev->data_offset = le64_to_cpu(sb->data_offset);
36483- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36484+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36485
36486 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36487 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36488@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36489 else
36490 sb->resync_offset = cpu_to_le64(0);
36491
36492- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36493+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36494
36495 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36496 sb->size = cpu_to_le64(mddev->dev_sectors);
36497@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36498 static ssize_t
36499 errors_show(mdk_rdev_t *rdev, char *page)
36500 {
36501- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36502+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36503 }
36504
36505 static ssize_t
36506@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36507 char *e;
36508 unsigned long n = simple_strtoul(buf, &e, 10);
36509 if (*buf && (*e == 0 || *e == '\n')) {
36510- atomic_set(&rdev->corrected_errors, n);
36511+ atomic_set_unchecked(&rdev->corrected_errors, n);
36512 return len;
36513 }
36514 return -EINVAL;
36515@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36516 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36517 kfree(rdev);
36518 }
36519-static struct sysfs_ops rdev_sysfs_ops = {
36520+static const struct sysfs_ops rdev_sysfs_ops = {
36521 .show = rdev_attr_show,
36522 .store = rdev_attr_store,
36523 };
36524@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36525 rdev->data_offset = 0;
36526 rdev->sb_events = 0;
36527 atomic_set(&rdev->nr_pending, 0);
36528- atomic_set(&rdev->read_errors, 0);
36529- atomic_set(&rdev->corrected_errors, 0);
36530+ atomic_set_unchecked(&rdev->read_errors, 0);
36531+ atomic_set_unchecked(&rdev->corrected_errors, 0);
36532
36533 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36534 if (!size) {
36535@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36536 kfree(mddev);
36537 }
36538
36539-static struct sysfs_ops md_sysfs_ops = {
36540+static const struct sysfs_ops md_sysfs_ops = {
36541 .show = md_attr_show,
36542 .store = md_attr_store,
36543 };
36544@@ -4482,7 +4482,8 @@ out:
36545 err = 0;
36546 blk_integrity_unregister(disk);
36547 md_new_event(mddev);
36548- sysfs_notify_dirent(mddev->sysfs_state);
36549+ if (mddev->sysfs_state)
36550+ sysfs_notify_dirent(mddev->sysfs_state);
36551 return err;
36552 }
36553
36554@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36555
36556 spin_unlock(&pers_lock);
36557 seq_printf(seq, "\n");
36558- mi->event = atomic_read(&md_event_count);
36559+ mi->event = atomic_read_unchecked(&md_event_count);
36560 return 0;
36561 }
36562 if (v == (void*)2) {
36563@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36564 chunk_kb ? "KB" : "B");
36565 if (bitmap->file) {
36566 seq_printf(seq, ", file: ");
36567- seq_path(seq, &bitmap->file->f_path, " \t\n");
36568+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36569 }
36570
36571 seq_printf(seq, "\n");
36572@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36573 else {
36574 struct seq_file *p = file->private_data;
36575 p->private = mi;
36576- mi->event = atomic_read(&md_event_count);
36577+ mi->event = atomic_read_unchecked(&md_event_count);
36578 }
36579 return error;
36580 }
36581@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36582 /* always allow read */
36583 mask = POLLIN | POLLRDNORM;
36584
36585- if (mi->event != atomic_read(&md_event_count))
36586+ if (mi->event != atomic_read_unchecked(&md_event_count))
36587 mask |= POLLERR | POLLPRI;
36588 return mask;
36589 }
36590@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36591 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36592 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36593 (int)part_stat_read(&disk->part0, sectors[1]) -
36594- atomic_read(&disk->sync_io);
36595+ atomic_read_unchecked(&disk->sync_io);
36596 /* sync IO will cause sync_io to increase before the disk_stats
36597 * as sync_io is counted when a request starts, and
36598 * disk_stats is counted when it completes.
36599diff --git a/drivers/md/md.h b/drivers/md/md.h
36600index 87430fe..0024a4c 100644
36601--- a/drivers/md/md.h
36602+++ b/drivers/md/md.h
36603@@ -94,10 +94,10 @@ struct mdk_rdev_s
36604 * only maintained for arrays that
36605 * support hot removal
36606 */
36607- atomic_t read_errors; /* number of consecutive read errors that
36608+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
36609 * we have tried to ignore.
36610 */
36611- atomic_t corrected_errors; /* number of corrected read errors,
36612+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36613 * for reporting to userspace and storing
36614 * in superblock.
36615 */
36616@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36617
36618 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36619 {
36620- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36621+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36622 }
36623
36624 struct mdk_personality
36625diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36626index 968cb14..f0ad2e4 100644
36627--- a/drivers/md/raid1.c
36628+++ b/drivers/md/raid1.c
36629@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36630 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36631 continue;
36632 rdev = conf->mirrors[d].rdev;
36633- atomic_add(s, &rdev->corrected_errors);
36634+ atomic_add_unchecked(s, &rdev->corrected_errors);
36635 if (sync_page_io(rdev->bdev,
36636 sect + rdev->data_offset,
36637 s<<9,
36638@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36639 /* Well, this device is dead */
36640 md_error(mddev, rdev);
36641 else {
36642- atomic_add(s, &rdev->corrected_errors);
36643+ atomic_add_unchecked(s, &rdev->corrected_errors);
36644 printk(KERN_INFO
36645 "raid1:%s: read error corrected "
36646 "(%d sectors at %llu on %s)\n",
36647diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36648index 1b4e232..cf0f534 100644
36649--- a/drivers/md/raid10.c
36650+++ b/drivers/md/raid10.c
36651@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36652 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36653 set_bit(R10BIO_Uptodate, &r10_bio->state);
36654 else {
36655- atomic_add(r10_bio->sectors,
36656+ atomic_add_unchecked(r10_bio->sectors,
36657 &conf->mirrors[d].rdev->corrected_errors);
36658 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36659 md_error(r10_bio->mddev,
36660@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36661 test_bit(In_sync, &rdev->flags)) {
36662 atomic_inc(&rdev->nr_pending);
36663 rcu_read_unlock();
36664- atomic_add(s, &rdev->corrected_errors);
36665+ atomic_add_unchecked(s, &rdev->corrected_errors);
36666 if (sync_page_io(rdev->bdev,
36667 r10_bio->devs[sl].addr +
36668 sect + rdev->data_offset,
36669diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36670index 883215d..675bf47 100644
36671--- a/drivers/md/raid5.c
36672+++ b/drivers/md/raid5.c
36673@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36674 bi->bi_next = NULL;
36675 if ((rw & WRITE) &&
36676 test_bit(R5_ReWrite, &sh->dev[i].flags))
36677- atomic_add(STRIPE_SECTORS,
36678+ atomic_add_unchecked(STRIPE_SECTORS,
36679 &rdev->corrected_errors);
36680 generic_make_request(bi);
36681 } else {
36682@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36683 clear_bit(R5_ReadError, &sh->dev[i].flags);
36684 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36685 }
36686- if (atomic_read(&conf->disks[i].rdev->read_errors))
36687- atomic_set(&conf->disks[i].rdev->read_errors, 0);
36688+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36689+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36690 } else {
36691 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36692 int retry = 0;
36693 rdev = conf->disks[i].rdev;
36694
36695 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36696- atomic_inc(&rdev->read_errors);
36697+ atomic_inc_unchecked(&rdev->read_errors);
36698 if (conf->mddev->degraded >= conf->max_degraded)
36699 printk_rl(KERN_WARNING
36700 "raid5:%s: read error not correctable "
36701@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36702 (unsigned long long)(sh->sector
36703 + rdev->data_offset),
36704 bdn);
36705- else if (atomic_read(&rdev->read_errors)
36706+ else if (atomic_read_unchecked(&rdev->read_errors)
36707 > conf->max_nr_stripes)
36708 printk(KERN_WARNING
36709 "raid5:%s: Too many read errors, failing device %s.\n",
36710@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36711 sector_t r_sector;
36712 struct stripe_head sh2;
36713
36714+ pax_track_stack();
36715
36716 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36717 stripe = new_sector;
36718diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36719index 05bde9c..2f31d40 100644
36720--- a/drivers/media/common/saa7146_hlp.c
36721+++ b/drivers/media/common/saa7146_hlp.c
36722@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36723
36724 int x[32], y[32], w[32], h[32];
36725
36726+ pax_track_stack();
36727+
36728 /* clear out memory */
36729 memset(&line_list[0], 0x00, sizeof(u32)*32);
36730 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36731diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36732index cb22da5..82b686e 100644
36733--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36734+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36735@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36736 u8 buf[HOST_LINK_BUF_SIZE];
36737 int i;
36738
36739+ pax_track_stack();
36740+
36741 dprintk("%s\n", __func__);
36742
36743 /* check if we have space for a link buf in the rx_buffer */
36744@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36745 unsigned long timeout;
36746 int written;
36747
36748+ pax_track_stack();
36749+
36750 dprintk("%s\n", __func__);
36751
36752 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36753diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36754index 2fe05d0..a3289c4 100644
36755--- a/drivers/media/dvb/dvb-core/dvb_demux.h
36756+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36757@@ -71,7 +71,7 @@ struct dvb_demux_feed {
36758 union {
36759 dmx_ts_cb ts;
36760 dmx_section_cb sec;
36761- } cb;
36762+ } __no_const cb;
36763
36764 struct dvb_demux *demux;
36765 void *priv;
36766diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36767index 94159b9..376bd8e 100644
36768--- a/drivers/media/dvb/dvb-core/dvbdev.c
36769+++ b/drivers/media/dvb/dvb-core/dvbdev.c
36770@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36771 const struct dvb_device *template, void *priv, int type)
36772 {
36773 struct dvb_device *dvbdev;
36774- struct file_operations *dvbdevfops;
36775+ file_operations_no_const *dvbdevfops;
36776 struct device *clsdev;
36777 int minor;
36778 int id;
36779diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36780index 2a53dd0..db8c07a 100644
36781--- a/drivers/media/dvb/dvb-usb/cxusb.c
36782+++ b/drivers/media/dvb/dvb-usb/cxusb.c
36783@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36784 struct dib0700_adapter_state {
36785 int (*set_param_save) (struct dvb_frontend *,
36786 struct dvb_frontend_parameters *);
36787-};
36788+} __no_const;
36789
36790 static int dib7070_set_param_override(struct dvb_frontend *fe,
36791 struct dvb_frontend_parameters *fep)
36792diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36793index db7f7f7..f55e96f 100644
36794--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36795+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36796@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36797
36798 u8 buf[260];
36799
36800+ pax_track_stack();
36801+
36802 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36803 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36804
36805diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36806index 524acf5..5ffc403 100644
36807--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36808+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36809@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36810
36811 struct dib0700_adapter_state {
36812 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36813-};
36814+} __no_const;
36815
36816 /* Hauppauge Nova-T 500 (aka Bristol)
36817 * has a LNA on GPIO0 which is enabled by setting 1 */
36818diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36819index ba91735..4261d84 100644
36820--- a/drivers/media/dvb/frontends/dib3000.h
36821+++ b/drivers/media/dvb/frontends/dib3000.h
36822@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36823 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36824 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36825 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36826-};
36827+} __no_const;
36828
36829 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36830 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36831diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36832index c709ce6..b3fe620 100644
36833--- a/drivers/media/dvb/frontends/or51211.c
36834+++ b/drivers/media/dvb/frontends/or51211.c
36835@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36836 u8 tudata[585];
36837 int i;
36838
36839+ pax_track_stack();
36840+
36841 dprintk("Firmware is %zd bytes\n",fw->size);
36842
36843 /* Get eprom data */
36844diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36845index 482d0f3..ee1e202 100644
36846--- a/drivers/media/radio/radio-cadet.c
36847+++ b/drivers/media/radio/radio-cadet.c
36848@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36849 while (i < count && dev->rdsin != dev->rdsout)
36850 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36851
36852- if (copy_to_user(data, readbuf, i))
36853+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36854 return -EFAULT;
36855 return i;
36856 }
36857diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36858index 6dd51e2..0359b92 100644
36859--- a/drivers/media/video/cx18/cx18-driver.c
36860+++ b/drivers/media/video/cx18/cx18-driver.c
36861@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36862
36863 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36864
36865-static atomic_t cx18_instance = ATOMIC_INIT(0);
36866+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36867
36868 /* Parameter declarations */
36869 static int cardtype[CX18_MAX_CARDS];
36870@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36871 struct i2c_client c;
36872 u8 eedata[256];
36873
36874+ pax_track_stack();
36875+
36876 memset(&c, 0, sizeof(c));
36877 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36878 c.adapter = &cx->i2c_adap[0];
36879@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36880 struct cx18 *cx;
36881
36882 /* FIXME - module parameter arrays constrain max instances */
36883- i = atomic_inc_return(&cx18_instance) - 1;
36884+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36885 if (i >= CX18_MAX_CARDS) {
36886 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36887 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36888diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36889index 463ec34..2f4625a 100644
36890--- a/drivers/media/video/ivtv/ivtv-driver.c
36891+++ b/drivers/media/video/ivtv/ivtv-driver.c
36892@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36893 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36894
36895 /* ivtv instance counter */
36896-static atomic_t ivtv_instance = ATOMIC_INIT(0);
36897+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36898
36899 /* Parameter declarations */
36900 static int cardtype[IVTV_MAX_CARDS];
36901diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36902index 5fc4ac0..652a54a 100644
36903--- a/drivers/media/video/omap24xxcam.c
36904+++ b/drivers/media/video/omap24xxcam.c
36905@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36906 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36907
36908 do_gettimeofday(&vb->ts);
36909- vb->field_count = atomic_add_return(2, &fh->field_count);
36910+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36911 if (csr & csr_error) {
36912 vb->state = VIDEOBUF_ERROR;
36913 if (!atomic_read(&fh->cam->in_reset)) {
36914diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36915index 2ce67f5..cf26a5b 100644
36916--- a/drivers/media/video/omap24xxcam.h
36917+++ b/drivers/media/video/omap24xxcam.h
36918@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36919 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36920 struct videobuf_queue vbq;
36921 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36922- atomic_t field_count; /* field counter for videobuf_buffer */
36923+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36924 /* accessing cam here doesn't need serialisation: it's constant */
36925 struct omap24xxcam_device *cam;
36926 };
36927diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36928index 299afa4..eb47459 100644
36929--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36930+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36931@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36932 u8 *eeprom;
36933 struct tveeprom tvdata;
36934
36935+ pax_track_stack();
36936+
36937 memset(&tvdata,0,sizeof(tvdata));
36938
36939 eeprom = pvr2_eeprom_fetch(hdw);
36940diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36941index 5b152ff..3320638 100644
36942--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36943+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36944@@ -195,7 +195,7 @@ struct pvr2_hdw {
36945
36946 /* I2C stuff */
36947 struct i2c_adapter i2c_adap;
36948- struct i2c_algorithm i2c_algo;
36949+ i2c_algorithm_no_const i2c_algo;
36950 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36951 int i2c_cx25840_hack_state;
36952 int i2c_linked;
36953diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36954index 1eabff6..8e2313a 100644
36955--- a/drivers/media/video/saa7134/saa6752hs.c
36956+++ b/drivers/media/video/saa7134/saa6752hs.c
36957@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
36958 unsigned char localPAT[256];
36959 unsigned char localPMT[256];
36960
36961+ pax_track_stack();
36962+
36963 /* Set video format - must be done first as it resets other settings */
36964 set_reg8(client, 0x41, h->video_format);
36965
36966diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
36967index 9c1d3ac..b1b49e9 100644
36968--- a/drivers/media/video/saa7164/saa7164-cmd.c
36969+++ b/drivers/media/video/saa7164/saa7164-cmd.c
36970@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
36971 wait_queue_head_t *q = 0;
36972 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36973
36974+ pax_track_stack();
36975+
36976 /* While any outstand message on the bus exists... */
36977 do {
36978
36979@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
36980 u8 tmp[512];
36981 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36982
36983+ pax_track_stack();
36984+
36985 while (loop) {
36986
36987 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
36988diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
36989index b085496..cde0270 100644
36990--- a/drivers/media/video/usbvideo/ibmcam.c
36991+++ b/drivers/media/video/usbvideo/ibmcam.c
36992@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
36993 static int __init ibmcam_init(void)
36994 {
36995 struct usbvideo_cb cbTbl;
36996- memset(&cbTbl, 0, sizeof(cbTbl));
36997- cbTbl.probe = ibmcam_probe;
36998- cbTbl.setupOnOpen = ibmcam_setup_on_open;
36999- cbTbl.videoStart = ibmcam_video_start;
37000- cbTbl.videoStop = ibmcam_video_stop;
37001- cbTbl.processData = ibmcam_ProcessIsocData;
37002- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37003- cbTbl.adjustPicture = ibmcam_adjust_picture;
37004- cbTbl.getFPS = ibmcam_calculate_fps;
37005+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
37006+ *(void **)&cbTbl.probe = ibmcam_probe;
37007+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
37008+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
37009+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
37010+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
37011+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37012+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
37013+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
37014 return usbvideo_register(
37015 &cams,
37016 MAX_IBMCAM,
37017diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
37018index 31d57f2..600b735 100644
37019--- a/drivers/media/video/usbvideo/konicawc.c
37020+++ b/drivers/media/video/usbvideo/konicawc.c
37021@@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
37022 int error;
37023
37024 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37025- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37026+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37027
37028 cam->input = input_dev = input_allocate_device();
37029 if (!input_dev) {
37030@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
37031 struct usbvideo_cb cbTbl;
37032 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
37033 DRIVER_DESC "\n");
37034- memset(&cbTbl, 0, sizeof(cbTbl));
37035- cbTbl.probe = konicawc_probe;
37036- cbTbl.setupOnOpen = konicawc_setup_on_open;
37037- cbTbl.processData = konicawc_process_isoc;
37038- cbTbl.getFPS = konicawc_calculate_fps;
37039- cbTbl.setVideoMode = konicawc_set_video_mode;
37040- cbTbl.startDataPump = konicawc_start_data;
37041- cbTbl.stopDataPump = konicawc_stop_data;
37042- cbTbl.adjustPicture = konicawc_adjust_picture;
37043- cbTbl.userFree = konicawc_free_uvd;
37044+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
37045+ *(void **)&cbTbl.probe = konicawc_probe;
37046+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
37047+ *(void **)&cbTbl.processData = konicawc_process_isoc;
37048+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
37049+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
37050+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
37051+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
37052+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
37053+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
37054 return usbvideo_register(
37055 &cams,
37056 MAX_CAMERAS,
37057diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
37058index 803d3e4..c4d1b96 100644
37059--- a/drivers/media/video/usbvideo/quickcam_messenger.c
37060+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
37061@@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
37062 int error;
37063
37064 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37065- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37066+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37067
37068 cam->input = input_dev = input_allocate_device();
37069 if (!input_dev) {
37070diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
37071index fbd1b63..292f9f0 100644
37072--- a/drivers/media/video/usbvideo/ultracam.c
37073+++ b/drivers/media/video/usbvideo/ultracam.c
37074@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
37075 {
37076 struct usbvideo_cb cbTbl;
37077 memset(&cbTbl, 0, sizeof(cbTbl));
37078- cbTbl.probe = ultracam_probe;
37079- cbTbl.setupOnOpen = ultracam_setup_on_open;
37080- cbTbl.videoStart = ultracam_video_start;
37081- cbTbl.videoStop = ultracam_video_stop;
37082- cbTbl.processData = ultracam_ProcessIsocData;
37083- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37084- cbTbl.adjustPicture = ultracam_adjust_picture;
37085- cbTbl.getFPS = ultracam_calculate_fps;
37086+ *(void **)&cbTbl.probe = ultracam_probe;
37087+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
37088+ *(void **)&cbTbl.videoStart = ultracam_video_start;
37089+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
37090+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
37091+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37092+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
37093+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
37094 return usbvideo_register(
37095 &cams,
37096 MAX_CAMERAS,
37097diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
37098index dea8b32..34f6878 100644
37099--- a/drivers/media/video/usbvideo/usbvideo.c
37100+++ b/drivers/media/video/usbvideo/usbvideo.c
37101@@ -697,15 +697,15 @@ int usbvideo_register(
37102 __func__, cams, base_size, num_cams);
37103
37104 /* Copy callbacks, apply defaults for those that are not set */
37105- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
37106+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
37107 if (cams->cb.getFrame == NULL)
37108- cams->cb.getFrame = usbvideo_GetFrame;
37109+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
37110 if (cams->cb.disconnect == NULL)
37111- cams->cb.disconnect = usbvideo_Disconnect;
37112+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
37113 if (cams->cb.startDataPump == NULL)
37114- cams->cb.startDataPump = usbvideo_StartDataPump;
37115+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
37116 if (cams->cb.stopDataPump == NULL)
37117- cams->cb.stopDataPump = usbvideo_StopDataPump;
37118+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
37119
37120 cams->num_cameras = num_cams;
37121 cams->cam = (struct uvd *) &cams[1];
37122diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
37123index c66985b..7fa143a 100644
37124--- a/drivers/media/video/usbvideo/usbvideo.h
37125+++ b/drivers/media/video/usbvideo/usbvideo.h
37126@@ -268,7 +268,7 @@ struct usbvideo_cb {
37127 int (*startDataPump)(struct uvd *uvd);
37128 void (*stopDataPump)(struct uvd *uvd);
37129 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
37130-};
37131+} __no_const;
37132
37133 struct usbvideo {
37134 int num_cameras; /* As allocated */
37135diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
37136index e0f91e4..37554ea 100644
37137--- a/drivers/media/video/usbvision/usbvision-core.c
37138+++ b/drivers/media/video/usbvision/usbvision-core.c
37139@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
37140 unsigned char rv, gv, bv;
37141 static unsigned char *Y, *U, *V;
37142
37143+ pax_track_stack();
37144+
37145 frame = usbvision->curFrame;
37146 imageSize = frame->frmwidth * frame->frmheight;
37147 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
37148diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
37149index 0d06e7c..3d17d24 100644
37150--- a/drivers/media/video/v4l2-device.c
37151+++ b/drivers/media/video/v4l2-device.c
37152@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
37153 EXPORT_SYMBOL_GPL(v4l2_device_register);
37154
37155 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
37156- atomic_t *instance)
37157+ atomic_unchecked_t *instance)
37158 {
37159- int num = atomic_inc_return(instance) - 1;
37160+ int num = atomic_inc_return_unchecked(instance) - 1;
37161 int len = strlen(basename);
37162
37163 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
37164diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
37165index 032ebae..6a3532c 100644
37166--- a/drivers/media/video/videobuf-dma-sg.c
37167+++ b/drivers/media/video/videobuf-dma-sg.c
37168@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
37169 {
37170 struct videobuf_queue q;
37171
37172+ pax_track_stack();
37173+
37174 /* Required to make generic handler to call __videobuf_alloc */
37175 q.int_ops = &sg_ops;
37176
37177diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
37178index b6992b7..9fa7547 100644
37179--- a/drivers/message/fusion/mptbase.c
37180+++ b/drivers/message/fusion/mptbase.c
37181@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
37182 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
37183 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
37184
37185+#ifdef CONFIG_GRKERNSEC_HIDESYM
37186+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37187+ NULL, NULL);
37188+#else
37189 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37190 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
37191+#endif
37192+
37193 /*
37194 * Rounding UP to nearest 4-kB boundary here...
37195 */
37196diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
37197index 83873e3..e360e9a 100644
37198--- a/drivers/message/fusion/mptsas.c
37199+++ b/drivers/message/fusion/mptsas.c
37200@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
37201 return 0;
37202 }
37203
37204+static inline void
37205+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37206+{
37207+ if (phy_info->port_details) {
37208+ phy_info->port_details->rphy = rphy;
37209+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37210+ ioc->name, rphy));
37211+ }
37212+
37213+ if (rphy) {
37214+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37215+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37216+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37217+ ioc->name, rphy, rphy->dev.release));
37218+ }
37219+}
37220+
37221 /* no mutex */
37222 static void
37223 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
37224@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
37225 return NULL;
37226 }
37227
37228-static inline void
37229-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37230-{
37231- if (phy_info->port_details) {
37232- phy_info->port_details->rphy = rphy;
37233- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37234- ioc->name, rphy));
37235- }
37236-
37237- if (rphy) {
37238- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37239- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37240- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37241- ioc->name, rphy, rphy->dev.release));
37242- }
37243-}
37244-
37245 static inline struct sas_port *
37246 mptsas_get_port(struct mptsas_phyinfo *phy_info)
37247 {
37248diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
37249index bd096ca..332cf76 100644
37250--- a/drivers/message/fusion/mptscsih.c
37251+++ b/drivers/message/fusion/mptscsih.c
37252@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
37253
37254 h = shost_priv(SChost);
37255
37256- if (h) {
37257- if (h->info_kbuf == NULL)
37258- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37259- return h->info_kbuf;
37260- h->info_kbuf[0] = '\0';
37261+ if (!h)
37262+ return NULL;
37263
37264- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37265- h->info_kbuf[size-1] = '\0';
37266- }
37267+ if (h->info_kbuf == NULL)
37268+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37269+ return h->info_kbuf;
37270+ h->info_kbuf[0] = '\0';
37271+
37272+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37273+ h->info_kbuf[size-1] = '\0';
37274
37275 return h->info_kbuf;
37276 }
37277diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
37278index efba702..59b2c0f 100644
37279--- a/drivers/message/i2o/i2o_config.c
37280+++ b/drivers/message/i2o/i2o_config.c
37281@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
37282 struct i2o_message *msg;
37283 unsigned int iop;
37284
37285+ pax_track_stack();
37286+
37287 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
37288 return -EFAULT;
37289
37290diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
37291index 7045c45..c07b170 100644
37292--- a/drivers/message/i2o/i2o_proc.c
37293+++ b/drivers/message/i2o/i2o_proc.c
37294@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
37295 "Array Controller Device"
37296 };
37297
37298-static char *chtostr(u8 * chars, int n)
37299-{
37300- char tmp[256];
37301- tmp[0] = 0;
37302- return strncat(tmp, (char *)chars, n);
37303-}
37304-
37305 static int i2o_report_query_status(struct seq_file *seq, int block_status,
37306 char *group)
37307 {
37308@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
37309
37310 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
37311 seq_printf(seq, "%-#8x", ddm_table.module_id);
37312- seq_printf(seq, "%-29s",
37313- chtostr(ddm_table.module_name_version, 28));
37314+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
37315 seq_printf(seq, "%9d ", ddm_table.data_size);
37316 seq_printf(seq, "%8d", ddm_table.code_size);
37317
37318@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
37319
37320 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
37321 seq_printf(seq, "%-#8x", dst->module_id);
37322- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
37323- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
37324+ seq_printf(seq, "%-.28s", dst->module_name_version);
37325+ seq_printf(seq, "%-.8s", dst->date);
37326 seq_printf(seq, "%8d ", dst->module_size);
37327 seq_printf(seq, "%8d ", dst->mpb_size);
37328 seq_printf(seq, "0x%04x", dst->module_flags);
37329@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
37330 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
37331 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
37332 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
37333- seq_printf(seq, "Vendor info : %s\n",
37334- chtostr((u8 *) (work32 + 2), 16));
37335- seq_printf(seq, "Product info : %s\n",
37336- chtostr((u8 *) (work32 + 6), 16));
37337- seq_printf(seq, "Description : %s\n",
37338- chtostr((u8 *) (work32 + 10), 16));
37339- seq_printf(seq, "Product rev. : %s\n",
37340- chtostr((u8 *) (work32 + 14), 8));
37341+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
37342+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
37343+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
37344+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
37345
37346 seq_printf(seq, "Serial number : ");
37347 print_serial_number(seq, (u8 *) (work32 + 16),
37348@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
37349 }
37350
37351 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
37352- seq_printf(seq, "Module name : %s\n",
37353- chtostr(result.module_name, 24));
37354- seq_printf(seq, "Module revision : %s\n",
37355- chtostr(result.module_rev, 8));
37356+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
37357+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
37358
37359 seq_printf(seq, "Serial number : ");
37360 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
37361@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
37362 return 0;
37363 }
37364
37365- seq_printf(seq, "Device name : %s\n",
37366- chtostr(result.device_name, 64));
37367- seq_printf(seq, "Service name : %s\n",
37368- chtostr(result.service_name, 64));
37369- seq_printf(seq, "Physical name : %s\n",
37370- chtostr(result.physical_location, 64));
37371- seq_printf(seq, "Instance number : %s\n",
37372- chtostr(result.instance_number, 4));
37373+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
37374+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
37375+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
37376+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
37377
37378 return 0;
37379 }
37380diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37381index 27cf4af..b1205b8 100644
37382--- a/drivers/message/i2o/iop.c
37383+++ b/drivers/message/i2o/iop.c
37384@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37385
37386 spin_lock_irqsave(&c->context_list_lock, flags);
37387
37388- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37389- atomic_inc(&c->context_list_counter);
37390+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37391+ atomic_inc_unchecked(&c->context_list_counter);
37392
37393- entry->context = atomic_read(&c->context_list_counter);
37394+ entry->context = atomic_read_unchecked(&c->context_list_counter);
37395
37396 list_add(&entry->list, &c->context_list);
37397
37398@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37399
37400 #if BITS_PER_LONG == 64
37401 spin_lock_init(&c->context_list_lock);
37402- atomic_set(&c->context_list_counter, 0);
37403+ atomic_set_unchecked(&c->context_list_counter, 0);
37404 INIT_LIST_HEAD(&c->context_list);
37405 #endif
37406
37407diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37408index 78e3e85..66c9a0d 100644
37409--- a/drivers/mfd/ab3100-core.c
37410+++ b/drivers/mfd/ab3100-core.c
37411@@ -777,7 +777,7 @@ struct ab_family_id {
37412 char *name;
37413 };
37414
37415-static const struct ab_family_id ids[] __initdata = {
37416+static const struct ab_family_id ids[] __initconst = {
37417 /* AB3100 */
37418 {
37419 .id = 0xc0,
37420diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37421index 8d8c932..8104515 100644
37422--- a/drivers/mfd/wm8350-i2c.c
37423+++ b/drivers/mfd/wm8350-i2c.c
37424@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37425 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37426 int ret;
37427
37428+ pax_track_stack();
37429+
37430 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37431 return -EINVAL;
37432
37433diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37434index e4ff50b..4cc3f04 100644
37435--- a/drivers/misc/kgdbts.c
37436+++ b/drivers/misc/kgdbts.c
37437@@ -118,7 +118,7 @@
37438 } while (0)
37439 #define MAX_CONFIG_LEN 40
37440
37441-static struct kgdb_io kgdbts_io_ops;
37442+static const struct kgdb_io kgdbts_io_ops;
37443 static char get_buf[BUFMAX];
37444 static int get_buf_cnt;
37445 static char put_buf[BUFMAX];
37446@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
37447 module_put(THIS_MODULE);
37448 }
37449
37450-static struct kgdb_io kgdbts_io_ops = {
37451+static const struct kgdb_io kgdbts_io_ops = {
37452 .name = "kgdbts",
37453 .read_char = kgdbts_get_char,
37454 .write_char = kgdbts_put_char,
37455diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
37456index 37e7cfc..67cfb76 100644
37457--- a/drivers/misc/sgi-gru/gruhandles.c
37458+++ b/drivers/misc/sgi-gru/gruhandles.c
37459@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37460
37461 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
37462 {
37463- atomic_long_inc(&mcs_op_statistics[op].count);
37464- atomic_long_add(clks, &mcs_op_statistics[op].total);
37465+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
37466+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
37467 if (mcs_op_statistics[op].max < clks)
37468 mcs_op_statistics[op].max = clks;
37469 }
37470diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
37471index 3f2375c..467c6e6 100644
37472--- a/drivers/misc/sgi-gru/gruprocfs.c
37473+++ b/drivers/misc/sgi-gru/gruprocfs.c
37474@@ -32,9 +32,9 @@
37475
37476 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
37477
37478-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
37479+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
37480 {
37481- unsigned long val = atomic_long_read(v);
37482+ unsigned long val = atomic_long_read_unchecked(v);
37483
37484 if (val)
37485 seq_printf(s, "%16lu %s\n", val, id);
37486@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37487 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37488
37489 for (op = 0; op < mcsop_last; op++) {
37490- count = atomic_long_read(&mcs_op_statistics[op].count);
37491- total = atomic_long_read(&mcs_op_statistics[op].total);
37492+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37493+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37494 max = mcs_op_statistics[op].max;
37495 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37496 count ? total / count : 0, max);
37497diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37498index 46990bc..4a251b5 100644
37499--- a/drivers/misc/sgi-gru/grutables.h
37500+++ b/drivers/misc/sgi-gru/grutables.h
37501@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37502 * GRU statistics.
37503 */
37504 struct gru_stats_s {
37505- atomic_long_t vdata_alloc;
37506- atomic_long_t vdata_free;
37507- atomic_long_t gts_alloc;
37508- atomic_long_t gts_free;
37509- atomic_long_t vdata_double_alloc;
37510- atomic_long_t gts_double_allocate;
37511- atomic_long_t assign_context;
37512- atomic_long_t assign_context_failed;
37513- atomic_long_t free_context;
37514- atomic_long_t load_user_context;
37515- atomic_long_t load_kernel_context;
37516- atomic_long_t lock_kernel_context;
37517- atomic_long_t unlock_kernel_context;
37518- atomic_long_t steal_user_context;
37519- atomic_long_t steal_kernel_context;
37520- atomic_long_t steal_context_failed;
37521- atomic_long_t nopfn;
37522- atomic_long_t break_cow;
37523- atomic_long_t asid_new;
37524- atomic_long_t asid_next;
37525- atomic_long_t asid_wrap;
37526- atomic_long_t asid_reuse;
37527- atomic_long_t intr;
37528- atomic_long_t intr_mm_lock_failed;
37529- atomic_long_t call_os;
37530- atomic_long_t call_os_offnode_reference;
37531- atomic_long_t call_os_check_for_bug;
37532- atomic_long_t call_os_wait_queue;
37533- atomic_long_t user_flush_tlb;
37534- atomic_long_t user_unload_context;
37535- atomic_long_t user_exception;
37536- atomic_long_t set_context_option;
37537- atomic_long_t migrate_check;
37538- atomic_long_t migrated_retarget;
37539- atomic_long_t migrated_unload;
37540- atomic_long_t migrated_unload_delay;
37541- atomic_long_t migrated_nopfn_retarget;
37542- atomic_long_t migrated_nopfn_unload;
37543- atomic_long_t tlb_dropin;
37544- atomic_long_t tlb_dropin_fail_no_asid;
37545- atomic_long_t tlb_dropin_fail_upm;
37546- atomic_long_t tlb_dropin_fail_invalid;
37547- atomic_long_t tlb_dropin_fail_range_active;
37548- atomic_long_t tlb_dropin_fail_idle;
37549- atomic_long_t tlb_dropin_fail_fmm;
37550- atomic_long_t tlb_dropin_fail_no_exception;
37551- atomic_long_t tlb_dropin_fail_no_exception_war;
37552- atomic_long_t tfh_stale_on_fault;
37553- atomic_long_t mmu_invalidate_range;
37554- atomic_long_t mmu_invalidate_page;
37555- atomic_long_t mmu_clear_flush_young;
37556- atomic_long_t flush_tlb;
37557- atomic_long_t flush_tlb_gru;
37558- atomic_long_t flush_tlb_gru_tgh;
37559- atomic_long_t flush_tlb_gru_zero_asid;
37560+ atomic_long_unchecked_t vdata_alloc;
37561+ atomic_long_unchecked_t vdata_free;
37562+ atomic_long_unchecked_t gts_alloc;
37563+ atomic_long_unchecked_t gts_free;
37564+ atomic_long_unchecked_t vdata_double_alloc;
37565+ atomic_long_unchecked_t gts_double_allocate;
37566+ atomic_long_unchecked_t assign_context;
37567+ atomic_long_unchecked_t assign_context_failed;
37568+ atomic_long_unchecked_t free_context;
37569+ atomic_long_unchecked_t load_user_context;
37570+ atomic_long_unchecked_t load_kernel_context;
37571+ atomic_long_unchecked_t lock_kernel_context;
37572+ atomic_long_unchecked_t unlock_kernel_context;
37573+ atomic_long_unchecked_t steal_user_context;
37574+ atomic_long_unchecked_t steal_kernel_context;
37575+ atomic_long_unchecked_t steal_context_failed;
37576+ atomic_long_unchecked_t nopfn;
37577+ atomic_long_unchecked_t break_cow;
37578+ atomic_long_unchecked_t asid_new;
37579+ atomic_long_unchecked_t asid_next;
37580+ atomic_long_unchecked_t asid_wrap;
37581+ atomic_long_unchecked_t asid_reuse;
37582+ atomic_long_unchecked_t intr;
37583+ atomic_long_unchecked_t intr_mm_lock_failed;
37584+ atomic_long_unchecked_t call_os;
37585+ atomic_long_unchecked_t call_os_offnode_reference;
37586+ atomic_long_unchecked_t call_os_check_for_bug;
37587+ atomic_long_unchecked_t call_os_wait_queue;
37588+ atomic_long_unchecked_t user_flush_tlb;
37589+ atomic_long_unchecked_t user_unload_context;
37590+ atomic_long_unchecked_t user_exception;
37591+ atomic_long_unchecked_t set_context_option;
37592+ atomic_long_unchecked_t migrate_check;
37593+ atomic_long_unchecked_t migrated_retarget;
37594+ atomic_long_unchecked_t migrated_unload;
37595+ atomic_long_unchecked_t migrated_unload_delay;
37596+ atomic_long_unchecked_t migrated_nopfn_retarget;
37597+ atomic_long_unchecked_t migrated_nopfn_unload;
37598+ atomic_long_unchecked_t tlb_dropin;
37599+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37600+ atomic_long_unchecked_t tlb_dropin_fail_upm;
37601+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
37602+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
37603+ atomic_long_unchecked_t tlb_dropin_fail_idle;
37604+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
37605+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37606+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37607+ atomic_long_unchecked_t tfh_stale_on_fault;
37608+ atomic_long_unchecked_t mmu_invalidate_range;
37609+ atomic_long_unchecked_t mmu_invalidate_page;
37610+ atomic_long_unchecked_t mmu_clear_flush_young;
37611+ atomic_long_unchecked_t flush_tlb;
37612+ atomic_long_unchecked_t flush_tlb_gru;
37613+ atomic_long_unchecked_t flush_tlb_gru_tgh;
37614+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37615
37616- atomic_long_t copy_gpa;
37617+ atomic_long_unchecked_t copy_gpa;
37618
37619- atomic_long_t mesq_receive;
37620- atomic_long_t mesq_receive_none;
37621- atomic_long_t mesq_send;
37622- atomic_long_t mesq_send_failed;
37623- atomic_long_t mesq_noop;
37624- atomic_long_t mesq_send_unexpected_error;
37625- atomic_long_t mesq_send_lb_overflow;
37626- atomic_long_t mesq_send_qlimit_reached;
37627- atomic_long_t mesq_send_amo_nacked;
37628- atomic_long_t mesq_send_put_nacked;
37629- atomic_long_t mesq_qf_not_full;
37630- atomic_long_t mesq_qf_locked;
37631- atomic_long_t mesq_qf_noop_not_full;
37632- atomic_long_t mesq_qf_switch_head_failed;
37633- atomic_long_t mesq_qf_unexpected_error;
37634- atomic_long_t mesq_noop_unexpected_error;
37635- atomic_long_t mesq_noop_lb_overflow;
37636- atomic_long_t mesq_noop_qlimit_reached;
37637- atomic_long_t mesq_noop_amo_nacked;
37638- atomic_long_t mesq_noop_put_nacked;
37639+ atomic_long_unchecked_t mesq_receive;
37640+ atomic_long_unchecked_t mesq_receive_none;
37641+ atomic_long_unchecked_t mesq_send;
37642+ atomic_long_unchecked_t mesq_send_failed;
37643+ atomic_long_unchecked_t mesq_noop;
37644+ atomic_long_unchecked_t mesq_send_unexpected_error;
37645+ atomic_long_unchecked_t mesq_send_lb_overflow;
37646+ atomic_long_unchecked_t mesq_send_qlimit_reached;
37647+ atomic_long_unchecked_t mesq_send_amo_nacked;
37648+ atomic_long_unchecked_t mesq_send_put_nacked;
37649+ atomic_long_unchecked_t mesq_qf_not_full;
37650+ atomic_long_unchecked_t mesq_qf_locked;
37651+ atomic_long_unchecked_t mesq_qf_noop_not_full;
37652+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
37653+ atomic_long_unchecked_t mesq_qf_unexpected_error;
37654+ atomic_long_unchecked_t mesq_noop_unexpected_error;
37655+ atomic_long_unchecked_t mesq_noop_lb_overflow;
37656+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
37657+ atomic_long_unchecked_t mesq_noop_amo_nacked;
37658+ atomic_long_unchecked_t mesq_noop_put_nacked;
37659
37660 };
37661
37662@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37663 cchop_deallocate, tghop_invalidate, mcsop_last};
37664
37665 struct mcs_op_statistic {
37666- atomic_long_t count;
37667- atomic_long_t total;
37668+ atomic_long_unchecked_t count;
37669+ atomic_long_unchecked_t total;
37670 unsigned long max;
37671 };
37672
37673@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37674
37675 #define STAT(id) do { \
37676 if (gru_options & OPT_STATS) \
37677- atomic_long_inc(&gru_stats.id); \
37678+ atomic_long_inc_unchecked(&gru_stats.id); \
37679 } while (0)
37680
37681 #ifdef CONFIG_SGI_GRU_DEBUG
37682diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37683index 2275126..12a9dbfb 100644
37684--- a/drivers/misc/sgi-xp/xp.h
37685+++ b/drivers/misc/sgi-xp/xp.h
37686@@ -289,7 +289,7 @@ struct xpc_interface {
37687 xpc_notify_func, void *);
37688 void (*received) (short, int, void *);
37689 enum xp_retval (*partid_to_nasids) (short, void *);
37690-};
37691+} __no_const;
37692
37693 extern struct xpc_interface xpc_interface;
37694
37695diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37696index b94d5f7..7f494c5 100644
37697--- a/drivers/misc/sgi-xp/xpc.h
37698+++ b/drivers/misc/sgi-xp/xpc.h
37699@@ -835,6 +835,7 @@ struct xpc_arch_operations {
37700 void (*received_payload) (struct xpc_channel *, void *);
37701 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37702 };
37703+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37704
37705 /* struct xpc_partition act_state values (for XPC HB) */
37706
37707@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37708 /* found in xpc_main.c */
37709 extern struct device *xpc_part;
37710 extern struct device *xpc_chan;
37711-extern struct xpc_arch_operations xpc_arch_ops;
37712+extern xpc_arch_operations_no_const xpc_arch_ops;
37713 extern int xpc_disengage_timelimit;
37714 extern int xpc_disengage_timedout;
37715 extern int xpc_activate_IRQ_rcvd;
37716diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37717index fd3688a..7e211a4 100644
37718--- a/drivers/misc/sgi-xp/xpc_main.c
37719+++ b/drivers/misc/sgi-xp/xpc_main.c
37720@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37721 .notifier_call = xpc_system_die,
37722 };
37723
37724-struct xpc_arch_operations xpc_arch_ops;
37725+xpc_arch_operations_no_const xpc_arch_ops;
37726
37727 /*
37728 * Timer function to enforce the timelimit on the partition disengage.
37729diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37730index 8b70e03..700bda6 100644
37731--- a/drivers/misc/sgi-xp/xpc_sn2.c
37732+++ b/drivers/misc/sgi-xp/xpc_sn2.c
37733@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37734 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37735 }
37736
37737-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37738+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37739 .setup_partitions = xpc_setup_partitions_sn2,
37740 .teardown_partitions = xpc_teardown_partitions_sn2,
37741 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37742@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37743 int ret;
37744 size_t buf_size;
37745
37746- xpc_arch_ops = xpc_arch_ops_sn2;
37747+ pax_open_kernel();
37748+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37749+ pax_close_kernel();
37750
37751 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37752 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37753diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37754index 8e08d71..7cb8c9b 100644
37755--- a/drivers/misc/sgi-xp/xpc_uv.c
37756+++ b/drivers/misc/sgi-xp/xpc_uv.c
37757@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37758 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37759 }
37760
37761-static struct xpc_arch_operations xpc_arch_ops_uv = {
37762+static const struct xpc_arch_operations xpc_arch_ops_uv = {
37763 .setup_partitions = xpc_setup_partitions_uv,
37764 .teardown_partitions = xpc_teardown_partitions_uv,
37765 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37766@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37767 int
37768 xpc_init_uv(void)
37769 {
37770- xpc_arch_ops = xpc_arch_ops_uv;
37771+ pax_open_kernel();
37772+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37773+ pax_close_kernel();
37774
37775 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37776 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37777diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37778index 6fd20b42..650efe3 100644
37779--- a/drivers/mmc/host/sdhci-pci.c
37780+++ b/drivers/mmc/host/sdhci-pci.c
37781@@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37782 .probe = via_probe,
37783 };
37784
37785-static const struct pci_device_id pci_ids[] __devinitdata = {
37786+static const struct pci_device_id pci_ids[] __devinitconst = {
37787 {
37788 .vendor = PCI_VENDOR_ID_RICOH,
37789 .device = PCI_DEVICE_ID_RICOH_R5C822,
37790diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37791index e7563a9..5f90ce5 100644
37792--- a/drivers/mtd/chips/cfi_cmdset_0001.c
37793+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37794@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37795 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37796 unsigned long timeo = jiffies + HZ;
37797
37798+ pax_track_stack();
37799+
37800 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37801 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37802 goto sleep;
37803@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37804 unsigned long initial_adr;
37805 int initial_len = len;
37806
37807+ pax_track_stack();
37808+
37809 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37810 adr += chip->start;
37811 initial_adr = adr;
37812@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37813 int retries = 3;
37814 int ret;
37815
37816+ pax_track_stack();
37817+
37818 adr += chip->start;
37819
37820 retry:
37821diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37822index 0667a67..3ab97ed 100644
37823--- a/drivers/mtd/chips/cfi_cmdset_0020.c
37824+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37825@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37826 unsigned long cmd_addr;
37827 struct cfi_private *cfi = map->fldrv_priv;
37828
37829+ pax_track_stack();
37830+
37831 adr += chip->start;
37832
37833 /* Ensure cmd read/writes are aligned. */
37834@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37835 DECLARE_WAITQUEUE(wait, current);
37836 int wbufsize, z;
37837
37838+ pax_track_stack();
37839+
37840 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37841 if (adr & (map_bankwidth(map)-1))
37842 return -EINVAL;
37843@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37844 DECLARE_WAITQUEUE(wait, current);
37845 int ret = 0;
37846
37847+ pax_track_stack();
37848+
37849 adr += chip->start;
37850
37851 /* Let's determine this according to the interleave only once */
37852@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37853 unsigned long timeo = jiffies + HZ;
37854 DECLARE_WAITQUEUE(wait, current);
37855
37856+ pax_track_stack();
37857+
37858 adr += chip->start;
37859
37860 /* Let's determine this according to the interleave only once */
37861@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37862 unsigned long timeo = jiffies + HZ;
37863 DECLARE_WAITQUEUE(wait, current);
37864
37865+ pax_track_stack();
37866+
37867 adr += chip->start;
37868
37869 /* Let's determine this according to the interleave only once */
37870diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37871index 5bf5f46..c5de373 100644
37872--- a/drivers/mtd/devices/doc2000.c
37873+++ b/drivers/mtd/devices/doc2000.c
37874@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37875
37876 /* The ECC will not be calculated correctly if less than 512 is written */
37877 /* DBB-
37878- if (len != 0x200 && eccbuf)
37879+ if (len != 0x200)
37880 printk(KERN_WARNING
37881 "ECC needs a full sector write (adr: %lx size %lx)\n",
37882 (long) to, (long) len);
37883diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37884index 0990f78..bb4e8a4 100644
37885--- a/drivers/mtd/devices/doc2001.c
37886+++ b/drivers/mtd/devices/doc2001.c
37887@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37888 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37889
37890 /* Don't allow read past end of device */
37891- if (from >= this->totlen)
37892+ if (from >= this->totlen || !len)
37893 return -EINVAL;
37894
37895 /* Don't allow a single read to cross a 512-byte block boundary */
37896diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37897index e56d6b4..f07e6cf 100644
37898--- a/drivers/mtd/ftl.c
37899+++ b/drivers/mtd/ftl.c
37900@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37901 loff_t offset;
37902 uint16_t srcunitswap = cpu_to_le16(srcunit);
37903
37904+ pax_track_stack();
37905+
37906 eun = &part->EUNInfo[srcunit];
37907 xfer = &part->XferInfo[xferunit];
37908 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37909diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37910index 8aca552..146446e 100755
37911--- a/drivers/mtd/inftlcore.c
37912+++ b/drivers/mtd/inftlcore.c
37913@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37914 struct inftl_oob oob;
37915 size_t retlen;
37916
37917+ pax_track_stack();
37918+
37919 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37920 "pending=%d)\n", inftl, thisVUC, pendingblock);
37921
37922diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37923index 32e82ae..ed50953 100644
37924--- a/drivers/mtd/inftlmount.c
37925+++ b/drivers/mtd/inftlmount.c
37926@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37927 struct INFTLPartition *ip;
37928 size_t retlen;
37929
37930+ pax_track_stack();
37931+
37932 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37933
37934 /*
37935diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37936index 79bf40f..fe5f8fd 100644
37937--- a/drivers/mtd/lpddr/qinfo_probe.c
37938+++ b/drivers/mtd/lpddr/qinfo_probe.c
37939@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37940 {
37941 map_word pfow_val[4];
37942
37943+ pax_track_stack();
37944+
37945 /* Check identification string */
37946 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37947 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37948diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37949index 726a1b8..f46b460 100644
37950--- a/drivers/mtd/mtdchar.c
37951+++ b/drivers/mtd/mtdchar.c
37952@@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37953 u_long size;
37954 struct mtd_info_user info;
37955
37956+ pax_track_stack();
37957+
37958 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
37959
37960 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
37961diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
37962index 1002e18..26d82d5 100644
37963--- a/drivers/mtd/nftlcore.c
37964+++ b/drivers/mtd/nftlcore.c
37965@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
37966 int inplace = 1;
37967 size_t retlen;
37968
37969+ pax_track_stack();
37970+
37971 memset(BlockMap, 0xff, sizeof(BlockMap));
37972 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
37973
37974diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
37975index 8b22b18..6fada85 100644
37976--- a/drivers/mtd/nftlmount.c
37977+++ b/drivers/mtd/nftlmount.c
37978@@ -23,6 +23,7 @@
37979 #include <asm/errno.h>
37980 #include <linux/delay.h>
37981 #include <linux/slab.h>
37982+#include <linux/sched.h>
37983 #include <linux/mtd/mtd.h>
37984 #include <linux/mtd/nand.h>
37985 #include <linux/mtd/nftl.h>
37986@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
37987 struct mtd_info *mtd = nftl->mbd.mtd;
37988 unsigned int i;
37989
37990+ pax_track_stack();
37991+
37992 /* Assume logical EraseSize == physical erasesize for starting the scan.
37993 We'll sort it out later if we find a MediaHeader which says otherwise */
37994 /* Actually, we won't. The new DiskOnChip driver has already scanned
37995diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
37996index 14cec04..d775b87 100644
37997--- a/drivers/mtd/ubi/build.c
37998+++ b/drivers/mtd/ubi/build.c
37999@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
38000 static int __init bytes_str_to_int(const char *str)
38001 {
38002 char *endp;
38003- unsigned long result;
38004+ unsigned long result, scale = 1;
38005
38006 result = simple_strtoul(str, &endp, 0);
38007 if (str == endp || result >= INT_MAX) {
38008@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
38009
38010 switch (*endp) {
38011 case 'G':
38012- result *= 1024;
38013+ scale *= 1024;
38014 case 'M':
38015- result *= 1024;
38016+ scale *= 1024;
38017 case 'K':
38018- result *= 1024;
38019+ scale *= 1024;
38020 if (endp[1] == 'i' && endp[2] == 'B')
38021 endp += 2;
38022 case '\0':
38023@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
38024 return -EINVAL;
38025 }
38026
38027- return result;
38028+ if ((intoverflow_t)result*scale >= INT_MAX) {
38029+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
38030+ str);
38031+ return -EINVAL;
38032+ }
38033+
38034+ return result*scale;
38035 }
38036
38037 /**
38038diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
38039index ab68886..ca405e8 100644
38040--- a/drivers/net/atlx/atl2.c
38041+++ b/drivers/net/atlx/atl2.c
38042@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
38043 */
38044
38045 #define ATL2_PARAM(X, desc) \
38046- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38047+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38048 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
38049 MODULE_PARM_DESC(X, desc);
38050 #else
38051diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
38052index 4874b2b..67f8526 100644
38053--- a/drivers/net/bnx2.c
38054+++ b/drivers/net/bnx2.c
38055@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
38056 int rc = 0;
38057 u32 magic, csum;
38058
38059+ pax_track_stack();
38060+
38061 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
38062 goto test_nvram_done;
38063
38064diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
38065index fd3eb07..8a6978d 100644
38066--- a/drivers/net/cxgb3/l2t.h
38067+++ b/drivers/net/cxgb3/l2t.h
38068@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
38069 */
38070 struct l2t_skb_cb {
38071 arp_failure_handler_func arp_failure_handler;
38072-};
38073+} __no_const;
38074
38075 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
38076
38077diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
38078index 032cfe0..411af379 100644
38079--- a/drivers/net/cxgb3/t3_hw.c
38080+++ b/drivers/net/cxgb3/t3_hw.c
38081@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
38082 int i, addr, ret;
38083 struct t3_vpd vpd;
38084
38085+ pax_track_stack();
38086+
38087 /*
38088 * Card information is normally at VPD_BASE but some early cards had
38089 * it at 0.
38090diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
38091index d1e0563..b9e129c 100644
38092--- a/drivers/net/e1000e/82571.c
38093+++ b/drivers/net/e1000e/82571.c
38094@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
38095 {
38096 struct e1000_hw *hw = &adapter->hw;
38097 struct e1000_mac_info *mac = &hw->mac;
38098- struct e1000_mac_operations *func = &mac->ops;
38099+ e1000_mac_operations_no_const *func = &mac->ops;
38100 u32 swsm = 0;
38101 u32 swsm2 = 0;
38102 bool force_clear_smbi = false;
38103@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
38104 temp = er32(ICRXDMTC);
38105 }
38106
38107-static struct e1000_mac_operations e82571_mac_ops = {
38108+static const struct e1000_mac_operations e82571_mac_ops = {
38109 /* .check_mng_mode: mac type dependent */
38110 /* .check_for_link: media type dependent */
38111 .id_led_init = e1000e_id_led_init,
38112@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
38113 .setup_led = e1000e_setup_led_generic,
38114 };
38115
38116-static struct e1000_phy_operations e82_phy_ops_igp = {
38117+static const struct e1000_phy_operations e82_phy_ops_igp = {
38118 .acquire_phy = e1000_get_hw_semaphore_82571,
38119 .check_reset_block = e1000e_check_reset_block_generic,
38120 .commit_phy = NULL,
38121@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
38122 .cfg_on_link_up = NULL,
38123 };
38124
38125-static struct e1000_phy_operations e82_phy_ops_m88 = {
38126+static const struct e1000_phy_operations e82_phy_ops_m88 = {
38127 .acquire_phy = e1000_get_hw_semaphore_82571,
38128 .check_reset_block = e1000e_check_reset_block_generic,
38129 .commit_phy = e1000e_phy_sw_reset,
38130@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
38131 .cfg_on_link_up = NULL,
38132 };
38133
38134-static struct e1000_phy_operations e82_phy_ops_bm = {
38135+static const struct e1000_phy_operations e82_phy_ops_bm = {
38136 .acquire_phy = e1000_get_hw_semaphore_82571,
38137 .check_reset_block = e1000e_check_reset_block_generic,
38138 .commit_phy = e1000e_phy_sw_reset,
38139@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
38140 .cfg_on_link_up = NULL,
38141 };
38142
38143-static struct e1000_nvm_operations e82571_nvm_ops = {
38144+static const struct e1000_nvm_operations e82571_nvm_ops = {
38145 .acquire_nvm = e1000_acquire_nvm_82571,
38146 .read_nvm = e1000e_read_nvm_eerd,
38147 .release_nvm = e1000_release_nvm_82571,
38148diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
38149index 47db9bd..fa58ccd 100644
38150--- a/drivers/net/e1000e/e1000.h
38151+++ b/drivers/net/e1000e/e1000.h
38152@@ -375,9 +375,9 @@ struct e1000_info {
38153 u32 pba;
38154 u32 max_hw_frame_size;
38155 s32 (*get_variants)(struct e1000_adapter *);
38156- struct e1000_mac_operations *mac_ops;
38157- struct e1000_phy_operations *phy_ops;
38158- struct e1000_nvm_operations *nvm_ops;
38159+ const struct e1000_mac_operations *mac_ops;
38160+ const struct e1000_phy_operations *phy_ops;
38161+ const struct e1000_nvm_operations *nvm_ops;
38162 };
38163
38164 /* hardware capability, feature, and workaround flags */
38165diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
38166index ae5d736..e9a93a1 100644
38167--- a/drivers/net/e1000e/es2lan.c
38168+++ b/drivers/net/e1000e/es2lan.c
38169@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
38170 {
38171 struct e1000_hw *hw = &adapter->hw;
38172 struct e1000_mac_info *mac = &hw->mac;
38173- struct e1000_mac_operations *func = &mac->ops;
38174+ e1000_mac_operations_no_const *func = &mac->ops;
38175
38176 /* Set media type */
38177 switch (adapter->pdev->device) {
38178@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
38179 temp = er32(ICRXDMTC);
38180 }
38181
38182-static struct e1000_mac_operations es2_mac_ops = {
38183+static const struct e1000_mac_operations es2_mac_ops = {
38184 .id_led_init = e1000e_id_led_init,
38185 .check_mng_mode = e1000e_check_mng_mode_generic,
38186 /* check_for_link dependent on media type */
38187@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
38188 .setup_led = e1000e_setup_led_generic,
38189 };
38190
38191-static struct e1000_phy_operations es2_phy_ops = {
38192+static const struct e1000_phy_operations es2_phy_ops = {
38193 .acquire_phy = e1000_acquire_phy_80003es2lan,
38194 .check_reset_block = e1000e_check_reset_block_generic,
38195 .commit_phy = e1000e_phy_sw_reset,
38196@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
38197 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
38198 };
38199
38200-static struct e1000_nvm_operations es2_nvm_ops = {
38201+static const struct e1000_nvm_operations es2_nvm_ops = {
38202 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
38203 .read_nvm = e1000e_read_nvm_eerd,
38204 .release_nvm = e1000_release_nvm_80003es2lan,
38205diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
38206index 11f3b7c..6381887 100644
38207--- a/drivers/net/e1000e/hw.h
38208+++ b/drivers/net/e1000e/hw.h
38209@@ -753,6 +753,7 @@ struct e1000_mac_operations {
38210 s32 (*setup_physical_interface)(struct e1000_hw *);
38211 s32 (*setup_led)(struct e1000_hw *);
38212 };
38213+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38214
38215 /* Function pointers for the PHY. */
38216 struct e1000_phy_operations {
38217@@ -774,6 +775,7 @@ struct e1000_phy_operations {
38218 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
38219 s32 (*cfg_on_link_up)(struct e1000_hw *);
38220 };
38221+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38222
38223 /* Function pointers for the NVM. */
38224 struct e1000_nvm_operations {
38225@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
38226 s32 (*validate_nvm)(struct e1000_hw *);
38227 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
38228 };
38229+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38230
38231 struct e1000_mac_info {
38232- struct e1000_mac_operations ops;
38233+ e1000_mac_operations_no_const ops;
38234
38235 u8 addr[6];
38236 u8 perm_addr[6];
38237@@ -823,7 +826,7 @@ struct e1000_mac_info {
38238 };
38239
38240 struct e1000_phy_info {
38241- struct e1000_phy_operations ops;
38242+ e1000_phy_operations_no_const ops;
38243
38244 enum e1000_phy_type type;
38245
38246@@ -857,7 +860,7 @@ struct e1000_phy_info {
38247 };
38248
38249 struct e1000_nvm_info {
38250- struct e1000_nvm_operations ops;
38251+ e1000_nvm_operations_no_const ops;
38252
38253 enum e1000_nvm_type type;
38254 enum e1000_nvm_override override;
38255diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
38256index de39f9a..e28d3e0 100644
38257--- a/drivers/net/e1000e/ich8lan.c
38258+++ b/drivers/net/e1000e/ich8lan.c
38259@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
38260 }
38261 }
38262
38263-static struct e1000_mac_operations ich8_mac_ops = {
38264+static const struct e1000_mac_operations ich8_mac_ops = {
38265 .id_led_init = e1000e_id_led_init,
38266 .check_mng_mode = e1000_check_mng_mode_ich8lan,
38267 .check_for_link = e1000_check_for_copper_link_ich8lan,
38268@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
38269 /* id_led_init dependent on mac type */
38270 };
38271
38272-static struct e1000_phy_operations ich8_phy_ops = {
38273+static const struct e1000_phy_operations ich8_phy_ops = {
38274 .acquire_phy = e1000_acquire_swflag_ich8lan,
38275 .check_reset_block = e1000_check_reset_block_ich8lan,
38276 .commit_phy = NULL,
38277@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
38278 .write_phy_reg = e1000e_write_phy_reg_igp,
38279 };
38280
38281-static struct e1000_nvm_operations ich8_nvm_ops = {
38282+static const struct e1000_nvm_operations ich8_nvm_ops = {
38283 .acquire_nvm = e1000_acquire_nvm_ich8lan,
38284 .read_nvm = e1000_read_nvm_ich8lan,
38285 .release_nvm = e1000_release_nvm_ich8lan,
38286diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
38287index 18d5fbb..542d96d 100644
38288--- a/drivers/net/fealnx.c
38289+++ b/drivers/net/fealnx.c
38290@@ -151,7 +151,7 @@ struct chip_info {
38291 int flags;
38292 };
38293
38294-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
38295+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
38296 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38297 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
38298 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38299diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
38300index 0e5b54b..b503f82 100644
38301--- a/drivers/net/hamradio/6pack.c
38302+++ b/drivers/net/hamradio/6pack.c
38303@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
38304 unsigned char buf[512];
38305 int count1;
38306
38307+ pax_track_stack();
38308+
38309 if (!count)
38310 return;
38311
38312diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
38313index 5862282..7cce8cb 100644
38314--- a/drivers/net/ibmveth.c
38315+++ b/drivers/net/ibmveth.c
38316@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
38317 NULL,
38318 };
38319
38320-static struct sysfs_ops veth_pool_ops = {
38321+static const struct sysfs_ops veth_pool_ops = {
38322 .show = veth_pool_show,
38323 .store = veth_pool_store,
38324 };
38325diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
38326index d617f2d..57b5309 100644
38327--- a/drivers/net/igb/e1000_82575.c
38328+++ b/drivers/net/igb/e1000_82575.c
38329@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
38330 wr32(E1000_VT_CTL, vt_ctl);
38331 }
38332
38333-static struct e1000_mac_operations e1000_mac_ops_82575 = {
38334+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
38335 .reset_hw = igb_reset_hw_82575,
38336 .init_hw = igb_init_hw_82575,
38337 .check_for_link = igb_check_for_link_82575,
38338@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
38339 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
38340 };
38341
38342-static struct e1000_phy_operations e1000_phy_ops_82575 = {
38343+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
38344 .acquire = igb_acquire_phy_82575,
38345 .get_cfg_done = igb_get_cfg_done_82575,
38346 .release = igb_release_phy_82575,
38347 };
38348
38349-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38350+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38351 .acquire = igb_acquire_nvm_82575,
38352 .read = igb_read_nvm_eerd,
38353 .release = igb_release_nvm_82575,
38354diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
38355index 72081df..d855cf5 100644
38356--- a/drivers/net/igb/e1000_hw.h
38357+++ b/drivers/net/igb/e1000_hw.h
38358@@ -288,6 +288,7 @@ struct e1000_mac_operations {
38359 s32 (*read_mac_addr)(struct e1000_hw *);
38360 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
38361 };
38362+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38363
38364 struct e1000_phy_operations {
38365 s32 (*acquire)(struct e1000_hw *);
38366@@ -303,6 +304,7 @@ struct e1000_phy_operations {
38367 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
38368 s32 (*write_reg)(struct e1000_hw *, u32, u16);
38369 };
38370+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38371
38372 struct e1000_nvm_operations {
38373 s32 (*acquire)(struct e1000_hw *);
38374@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
38375 void (*release)(struct e1000_hw *);
38376 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
38377 };
38378+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38379
38380 struct e1000_info {
38381 s32 (*get_invariants)(struct e1000_hw *);
38382@@ -321,7 +324,7 @@ struct e1000_info {
38383 extern const struct e1000_info e1000_82575_info;
38384
38385 struct e1000_mac_info {
38386- struct e1000_mac_operations ops;
38387+ e1000_mac_operations_no_const ops;
38388
38389 u8 addr[6];
38390 u8 perm_addr[6];
38391@@ -365,7 +368,7 @@ struct e1000_mac_info {
38392 };
38393
38394 struct e1000_phy_info {
38395- struct e1000_phy_operations ops;
38396+ e1000_phy_operations_no_const ops;
38397
38398 enum e1000_phy_type type;
38399
38400@@ -400,7 +403,7 @@ struct e1000_phy_info {
38401 };
38402
38403 struct e1000_nvm_info {
38404- struct e1000_nvm_operations ops;
38405+ e1000_nvm_operations_no_const ops;
38406
38407 enum e1000_nvm_type type;
38408 enum e1000_nvm_override override;
38409@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38410 s32 (*check_for_ack)(struct e1000_hw *, u16);
38411 s32 (*check_for_rst)(struct e1000_hw *, u16);
38412 };
38413+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38414
38415 struct e1000_mbx_stats {
38416 u32 msgs_tx;
38417@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38418 };
38419
38420 struct e1000_mbx_info {
38421- struct e1000_mbx_operations ops;
38422+ e1000_mbx_operations_no_const ops;
38423 struct e1000_mbx_stats stats;
38424 u32 timeout;
38425 u32 usec_delay;
38426diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38427index 1e8ce37..549c453 100644
38428--- a/drivers/net/igbvf/vf.h
38429+++ b/drivers/net/igbvf/vf.h
38430@@ -187,9 +187,10 @@ struct e1000_mac_operations {
38431 s32 (*read_mac_addr)(struct e1000_hw *);
38432 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38433 };
38434+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38435
38436 struct e1000_mac_info {
38437- struct e1000_mac_operations ops;
38438+ e1000_mac_operations_no_const ops;
38439 u8 addr[6];
38440 u8 perm_addr[6];
38441
38442@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
38443 s32 (*check_for_ack)(struct e1000_hw *);
38444 s32 (*check_for_rst)(struct e1000_hw *);
38445 };
38446+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38447
38448 struct e1000_mbx_stats {
38449 u32 msgs_tx;
38450@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
38451 };
38452
38453 struct e1000_mbx_info {
38454- struct e1000_mbx_operations ops;
38455+ e1000_mbx_operations_no_const ops;
38456 struct e1000_mbx_stats stats;
38457 u32 timeout;
38458 u32 usec_delay;
38459diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
38460index aa7286b..a61394f 100644
38461--- a/drivers/net/iseries_veth.c
38462+++ b/drivers/net/iseries_veth.c
38463@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
38464 NULL
38465 };
38466
38467-static struct sysfs_ops veth_cnx_sysfs_ops = {
38468+static const struct sysfs_ops veth_cnx_sysfs_ops = {
38469 .show = veth_cnx_attribute_show
38470 };
38471
38472@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
38473 NULL
38474 };
38475
38476-static struct sysfs_ops veth_port_sysfs_ops = {
38477+static const struct sysfs_ops veth_port_sysfs_ops = {
38478 .show = veth_port_attribute_show
38479 };
38480
38481diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
38482index 8aa44dc..fa1e797 100644
38483--- a/drivers/net/ixgb/ixgb_main.c
38484+++ b/drivers/net/ixgb/ixgb_main.c
38485@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38486 u32 rctl;
38487 int i;
38488
38489+ pax_track_stack();
38490+
38491 /* Check for Promiscuous and All Multicast modes */
38492
38493 rctl = IXGB_READ_REG(hw, RCTL);
38494diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38495index af35e1d..8781785 100644
38496--- a/drivers/net/ixgb/ixgb_param.c
38497+++ b/drivers/net/ixgb/ixgb_param.c
38498@@ -260,6 +260,9 @@ void __devinit
38499 ixgb_check_options(struct ixgb_adapter *adapter)
38500 {
38501 int bd = adapter->bd_number;
38502+
38503+ pax_track_stack();
38504+
38505 if (bd >= IXGB_MAX_NIC) {
38506 printk(KERN_NOTICE
38507 "Warning: no configuration for board #%i\n", bd);
38508diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38509index b17aa73..ed74540 100644
38510--- a/drivers/net/ixgbe/ixgbe_type.h
38511+++ b/drivers/net/ixgbe/ixgbe_type.h
38512@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38513 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38514 s32 (*update_checksum)(struct ixgbe_hw *);
38515 };
38516+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38517
38518 struct ixgbe_mac_operations {
38519 s32 (*init_hw)(struct ixgbe_hw *);
38520@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38521 /* Flow Control */
38522 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38523 };
38524+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38525
38526 struct ixgbe_phy_operations {
38527 s32 (*identify)(struct ixgbe_hw *);
38528@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38529 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38530 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38531 };
38532+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38533
38534 struct ixgbe_eeprom_info {
38535- struct ixgbe_eeprom_operations ops;
38536+ ixgbe_eeprom_operations_no_const ops;
38537 enum ixgbe_eeprom_type type;
38538 u32 semaphore_delay;
38539 u16 word_size;
38540@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38541 };
38542
38543 struct ixgbe_mac_info {
38544- struct ixgbe_mac_operations ops;
38545+ ixgbe_mac_operations_no_const ops;
38546 enum ixgbe_mac_type type;
38547 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38548 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38549@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38550 };
38551
38552 struct ixgbe_phy_info {
38553- struct ixgbe_phy_operations ops;
38554+ ixgbe_phy_operations_no_const ops;
38555 struct mdio_if_info mdio;
38556 enum ixgbe_phy_type type;
38557 u32 id;
38558diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38559index 291a505..2543756 100644
38560--- a/drivers/net/mlx4/main.c
38561+++ b/drivers/net/mlx4/main.c
38562@@ -38,6 +38,7 @@
38563 #include <linux/errno.h>
38564 #include <linux/pci.h>
38565 #include <linux/dma-mapping.h>
38566+#include <linux/sched.h>
38567
38568 #include <linux/mlx4/device.h>
38569 #include <linux/mlx4/doorbell.h>
38570@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38571 u64 icm_size;
38572 int err;
38573
38574+ pax_track_stack();
38575+
38576 err = mlx4_QUERY_FW(dev);
38577 if (err) {
38578 if (err == -EACCES)
38579diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38580index 2dce134..fa5ce75 100644
38581--- a/drivers/net/niu.c
38582+++ b/drivers/net/niu.c
38583@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38584 int i, num_irqs, err;
38585 u8 first_ldg;
38586
38587+ pax_track_stack();
38588+
38589 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38590 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38591 ldg_num_map[i] = first_ldg + i;
38592diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38593index c1b3f09..97cd8c4 100644
38594--- a/drivers/net/pcnet32.c
38595+++ b/drivers/net/pcnet32.c
38596@@ -79,7 +79,7 @@ static int cards_found;
38597 /*
38598 * VLB I/O addresses
38599 */
38600-static unsigned int pcnet32_portlist[] __initdata =
38601+static unsigned int pcnet32_portlist[] __devinitdata =
38602 { 0x300, 0x320, 0x340, 0x360, 0 };
38603
38604 static int pcnet32_debug = 0;
38605@@ -267,7 +267,7 @@ struct pcnet32_private {
38606 struct sk_buff **rx_skbuff;
38607 dma_addr_t *tx_dma_addr;
38608 dma_addr_t *rx_dma_addr;
38609- struct pcnet32_access a;
38610+ struct pcnet32_access *a;
38611 spinlock_t lock; /* Guard lock */
38612 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38613 unsigned int rx_ring_size; /* current rx ring size */
38614@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38615 u16 val;
38616
38617 netif_wake_queue(dev);
38618- val = lp->a.read_csr(ioaddr, CSR3);
38619+ val = lp->a->read_csr(ioaddr, CSR3);
38620 val &= 0x00ff;
38621- lp->a.write_csr(ioaddr, CSR3, val);
38622+ lp->a->write_csr(ioaddr, CSR3, val);
38623 napi_enable(&lp->napi);
38624 }
38625
38626@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38627 r = mii_link_ok(&lp->mii_if);
38628 } else if (lp->chip_version >= PCNET32_79C970A) {
38629 ulong ioaddr = dev->base_addr; /* card base I/O address */
38630- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38631+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38632 } else { /* can not detect link on really old chips */
38633 r = 1;
38634 }
38635@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38636 pcnet32_netif_stop(dev);
38637
38638 spin_lock_irqsave(&lp->lock, flags);
38639- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38640+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38641
38642 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38643
38644@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38645 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38646 {
38647 struct pcnet32_private *lp = netdev_priv(dev);
38648- struct pcnet32_access *a = &lp->a; /* access to registers */
38649+ struct pcnet32_access *a = lp->a; /* access to registers */
38650 ulong ioaddr = dev->base_addr; /* card base I/O address */
38651 struct sk_buff *skb; /* sk buff */
38652 int x, i; /* counters */
38653@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38654 pcnet32_netif_stop(dev);
38655
38656 spin_lock_irqsave(&lp->lock, flags);
38657- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38658+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38659
38660 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38661
38662 /* Reset the PCNET32 */
38663- lp->a.reset(ioaddr);
38664- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38665+ lp->a->reset(ioaddr);
38666+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38667
38668 /* switch pcnet32 to 32bit mode */
38669- lp->a.write_bcr(ioaddr, 20, 2);
38670+ lp->a->write_bcr(ioaddr, 20, 2);
38671
38672 /* purge & init rings but don't actually restart */
38673 pcnet32_restart(dev, 0x0000);
38674
38675- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38676+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38677
38678 /* Initialize Transmit buffers. */
38679 size = data_len + 15;
38680@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38681
38682 /* set int loopback in CSR15 */
38683 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38684- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38685+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38686
38687 teststatus = cpu_to_le16(0x8000);
38688- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38689+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38690
38691 /* Check status of descriptors */
38692 for (x = 0; x < numbuffs; x++) {
38693@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38694 }
38695 }
38696
38697- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38698+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38699 wmb();
38700 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38701 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38702@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38703 pcnet32_restart(dev, CSR0_NORMAL);
38704 } else {
38705 pcnet32_purge_rx_ring(dev);
38706- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38707+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38708 }
38709 spin_unlock_irqrestore(&lp->lock, flags);
38710
38711@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38712 static void pcnet32_led_blink_callback(struct net_device *dev)
38713 {
38714 struct pcnet32_private *lp = netdev_priv(dev);
38715- struct pcnet32_access *a = &lp->a;
38716+ struct pcnet32_access *a = lp->a;
38717 ulong ioaddr = dev->base_addr;
38718 unsigned long flags;
38719 int i;
38720@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38721 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38722 {
38723 struct pcnet32_private *lp = netdev_priv(dev);
38724- struct pcnet32_access *a = &lp->a;
38725+ struct pcnet32_access *a = lp->a;
38726 ulong ioaddr = dev->base_addr;
38727 unsigned long flags;
38728 int i, regs[4];
38729@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38730 {
38731 int csr5;
38732 struct pcnet32_private *lp = netdev_priv(dev);
38733- struct pcnet32_access *a = &lp->a;
38734+ struct pcnet32_access *a = lp->a;
38735 ulong ioaddr = dev->base_addr;
38736 int ticks;
38737
38738@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38739 spin_lock_irqsave(&lp->lock, flags);
38740 if (pcnet32_tx(dev)) {
38741 /* reset the chip to clear the error condition, then restart */
38742- lp->a.reset(ioaddr);
38743- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38744+ lp->a->reset(ioaddr);
38745+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38746 pcnet32_restart(dev, CSR0_START);
38747 netif_wake_queue(dev);
38748 }
38749@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38750 __napi_complete(napi);
38751
38752 /* clear interrupt masks */
38753- val = lp->a.read_csr(ioaddr, CSR3);
38754+ val = lp->a->read_csr(ioaddr, CSR3);
38755 val &= 0x00ff;
38756- lp->a.write_csr(ioaddr, CSR3, val);
38757+ lp->a->write_csr(ioaddr, CSR3, val);
38758
38759 /* Set interrupt enable. */
38760- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38761+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38762
38763 spin_unlock_irqrestore(&lp->lock, flags);
38764 }
38765@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38766 int i, csr0;
38767 u16 *buff = ptr;
38768 struct pcnet32_private *lp = netdev_priv(dev);
38769- struct pcnet32_access *a = &lp->a;
38770+ struct pcnet32_access *a = lp->a;
38771 ulong ioaddr = dev->base_addr;
38772 unsigned long flags;
38773
38774@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38775 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38776 if (lp->phymask & (1 << j)) {
38777 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38778- lp->a.write_bcr(ioaddr, 33,
38779+ lp->a->write_bcr(ioaddr, 33,
38780 (j << 5) | i);
38781- *buff++ = lp->a.read_bcr(ioaddr, 34);
38782+ *buff++ = lp->a->read_bcr(ioaddr, 34);
38783 }
38784 }
38785 }
38786@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38787 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38788 lp->options |= PCNET32_PORT_FD;
38789
38790- lp->a = *a;
38791+ lp->a = a;
38792
38793 /* prior to register_netdev, dev->name is not yet correct */
38794 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38795@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38796 if (lp->mii) {
38797 /* lp->phycount and lp->phymask are set to 0 by memset above */
38798
38799- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38800+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38801 /* scan for PHYs */
38802 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38803 unsigned short id1, id2;
38804@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38805 "Found PHY %04x:%04x at address %d.\n",
38806 id1, id2, i);
38807 }
38808- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38809+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38810 if (lp->phycount > 1) {
38811 lp->options |= PCNET32_PORT_MII;
38812 }
38813@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38814 }
38815
38816 /* Reset the PCNET32 */
38817- lp->a.reset(ioaddr);
38818+ lp->a->reset(ioaddr);
38819
38820 /* switch pcnet32 to 32bit mode */
38821- lp->a.write_bcr(ioaddr, 20, 2);
38822+ lp->a->write_bcr(ioaddr, 20, 2);
38823
38824 if (netif_msg_ifup(lp))
38825 printk(KERN_DEBUG
38826@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38827 (u32) (lp->init_dma_addr));
38828
38829 /* set/reset autoselect bit */
38830- val = lp->a.read_bcr(ioaddr, 2) & ~2;
38831+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
38832 if (lp->options & PCNET32_PORT_ASEL)
38833 val |= 2;
38834- lp->a.write_bcr(ioaddr, 2, val);
38835+ lp->a->write_bcr(ioaddr, 2, val);
38836
38837 /* handle full duplex setting */
38838 if (lp->mii_if.full_duplex) {
38839- val = lp->a.read_bcr(ioaddr, 9) & ~3;
38840+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
38841 if (lp->options & PCNET32_PORT_FD) {
38842 val |= 1;
38843 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38844@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38845 if (lp->chip_version == 0x2627)
38846 val |= 3;
38847 }
38848- lp->a.write_bcr(ioaddr, 9, val);
38849+ lp->a->write_bcr(ioaddr, 9, val);
38850 }
38851
38852 /* set/reset GPSI bit in test register */
38853- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38854+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38855 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38856 val |= 0x10;
38857- lp->a.write_csr(ioaddr, 124, val);
38858+ lp->a->write_csr(ioaddr, 124, val);
38859
38860 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38861 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38862@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38863 * duplex, and/or enable auto negotiation, and clear DANAS
38864 */
38865 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38866- lp->a.write_bcr(ioaddr, 32,
38867- lp->a.read_bcr(ioaddr, 32) | 0x0080);
38868+ lp->a->write_bcr(ioaddr, 32,
38869+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
38870 /* disable Auto Negotiation, set 10Mpbs, HD */
38871- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38872+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38873 if (lp->options & PCNET32_PORT_FD)
38874 val |= 0x10;
38875 if (lp->options & PCNET32_PORT_100)
38876 val |= 0x08;
38877- lp->a.write_bcr(ioaddr, 32, val);
38878+ lp->a->write_bcr(ioaddr, 32, val);
38879 } else {
38880 if (lp->options & PCNET32_PORT_ASEL) {
38881- lp->a.write_bcr(ioaddr, 32,
38882- lp->a.read_bcr(ioaddr,
38883+ lp->a->write_bcr(ioaddr, 32,
38884+ lp->a->read_bcr(ioaddr,
38885 32) | 0x0080);
38886 /* enable auto negotiate, setup, disable fd */
38887- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38888+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38889 val |= 0x20;
38890- lp->a.write_bcr(ioaddr, 32, val);
38891+ lp->a->write_bcr(ioaddr, 32, val);
38892 }
38893 }
38894 } else {
38895@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38896 * There is really no good other way to handle multiple PHYs
38897 * other than turning off all automatics
38898 */
38899- val = lp->a.read_bcr(ioaddr, 2);
38900- lp->a.write_bcr(ioaddr, 2, val & ~2);
38901- val = lp->a.read_bcr(ioaddr, 32);
38902- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38903+ val = lp->a->read_bcr(ioaddr, 2);
38904+ lp->a->write_bcr(ioaddr, 2, val & ~2);
38905+ val = lp->a->read_bcr(ioaddr, 32);
38906+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38907
38908 if (!(lp->options & PCNET32_PORT_ASEL)) {
38909 /* setup ecmd */
38910@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38911 ecmd.speed =
38912 lp->
38913 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38914- bcr9 = lp->a.read_bcr(ioaddr, 9);
38915+ bcr9 = lp->a->read_bcr(ioaddr, 9);
38916
38917 if (lp->options & PCNET32_PORT_FD) {
38918 ecmd.duplex = DUPLEX_FULL;
38919@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38920 ecmd.duplex = DUPLEX_HALF;
38921 bcr9 |= ~(1 << 0);
38922 }
38923- lp->a.write_bcr(ioaddr, 9, bcr9);
38924+ lp->a->write_bcr(ioaddr, 9, bcr9);
38925 }
38926
38927 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38928@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38929
38930 #ifdef DO_DXSUFLO
38931 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38932- val = lp->a.read_csr(ioaddr, CSR3);
38933+ val = lp->a->read_csr(ioaddr, CSR3);
38934 val |= 0x40;
38935- lp->a.write_csr(ioaddr, CSR3, val);
38936+ lp->a->write_csr(ioaddr, CSR3, val);
38937 }
38938 #endif
38939
38940@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38941 napi_enable(&lp->napi);
38942
38943 /* Re-initialize the PCNET32, and start it when done. */
38944- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38945- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38946+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38947+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38948
38949- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38950- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38951+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38952+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38953
38954 netif_start_queue(dev);
38955
38956@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38957
38958 i = 0;
38959 while (i++ < 100)
38960- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38961+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38962 break;
38963 /*
38964 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
38965 * reports that doing so triggers a bug in the '974.
38966 */
38967- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
38968+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
38969
38970 if (netif_msg_ifup(lp))
38971 printk(KERN_DEBUG
38972 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
38973 dev->name, i,
38974 (u32) (lp->init_dma_addr),
38975- lp->a.read_csr(ioaddr, CSR0));
38976+ lp->a->read_csr(ioaddr, CSR0));
38977
38978 spin_unlock_irqrestore(&lp->lock, flags);
38979
38980@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
38981 * Switch back to 16bit mode to avoid problems with dumb
38982 * DOS packet driver after a warm reboot
38983 */
38984- lp->a.write_bcr(ioaddr, 20, 4);
38985+ lp->a->write_bcr(ioaddr, 20, 4);
38986
38987 err_free_irq:
38988 spin_unlock_irqrestore(&lp->lock, flags);
38989@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38990
38991 /* wait for stop */
38992 for (i = 0; i < 100; i++)
38993- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
38994+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
38995 break;
38996
38997 if (i >= 100 && netif_msg_drv(lp))
38998@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38999 return;
39000
39001 /* ReInit Ring */
39002- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
39003+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
39004 i = 0;
39005 while (i++ < 1000)
39006- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
39007+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
39008 break;
39009
39010- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
39011+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
39012 }
39013
39014 static void pcnet32_tx_timeout(struct net_device *dev)
39015@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
39016 if (pcnet32_debug & NETIF_MSG_DRV)
39017 printk(KERN_ERR
39018 "%s: transmit timed out, status %4.4x, resetting.\n",
39019- dev->name, lp->a.read_csr(ioaddr, CSR0));
39020- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39021+ dev->name, lp->a->read_csr(ioaddr, CSR0));
39022+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39023 dev->stats.tx_errors++;
39024 if (netif_msg_tx_err(lp)) {
39025 int i;
39026@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39027 if (netif_msg_tx_queued(lp)) {
39028 printk(KERN_DEBUG
39029 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
39030- dev->name, lp->a.read_csr(ioaddr, CSR0));
39031+ dev->name, lp->a->read_csr(ioaddr, CSR0));
39032 }
39033
39034 /* Default status -- will not enable Successful-TxDone
39035@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39036 dev->stats.tx_bytes += skb->len;
39037
39038 /* Trigger an immediate send poll. */
39039- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39040+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39041
39042 dev->trans_start = jiffies;
39043
39044@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
39045
39046 spin_lock(&lp->lock);
39047
39048- csr0 = lp->a.read_csr(ioaddr, CSR0);
39049+ csr0 = lp->a->read_csr(ioaddr, CSR0);
39050 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
39051 if (csr0 == 0xffff) {
39052 break; /* PCMCIA remove happened */
39053 }
39054 /* Acknowledge all of the current interrupt sources ASAP. */
39055- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39056+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39057
39058 if (netif_msg_intr(lp))
39059 printk(KERN_DEBUG
39060 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
39061- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
39062+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
39063
39064 /* Log misc errors. */
39065 if (csr0 & 0x4000)
39066@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
39067 if (napi_schedule_prep(&lp->napi)) {
39068 u16 val;
39069 /* set interrupt masks */
39070- val = lp->a.read_csr(ioaddr, CSR3);
39071+ val = lp->a->read_csr(ioaddr, CSR3);
39072 val |= 0x5f00;
39073- lp->a.write_csr(ioaddr, CSR3, val);
39074+ lp->a->write_csr(ioaddr, CSR3, val);
39075
39076 __napi_schedule(&lp->napi);
39077 break;
39078 }
39079- csr0 = lp->a.read_csr(ioaddr, CSR0);
39080+ csr0 = lp->a->read_csr(ioaddr, CSR0);
39081 }
39082
39083 if (netif_msg_intr(lp))
39084 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
39085- dev->name, lp->a.read_csr(ioaddr, CSR0));
39086+ dev->name, lp->a->read_csr(ioaddr, CSR0));
39087
39088 spin_unlock(&lp->lock);
39089
39090@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
39091
39092 spin_lock_irqsave(&lp->lock, flags);
39093
39094- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39095+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39096
39097 if (netif_msg_ifdown(lp))
39098 printk(KERN_DEBUG
39099 "%s: Shutting down ethercard, status was %2.2x.\n",
39100- dev->name, lp->a.read_csr(ioaddr, CSR0));
39101+ dev->name, lp->a->read_csr(ioaddr, CSR0));
39102
39103 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
39104- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39105+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39106
39107 /*
39108 * Switch back to 16bit mode to avoid problems with dumb
39109 * DOS packet driver after a warm reboot
39110 */
39111- lp->a.write_bcr(ioaddr, 20, 4);
39112+ lp->a->write_bcr(ioaddr, 20, 4);
39113
39114 spin_unlock_irqrestore(&lp->lock, flags);
39115
39116@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
39117 unsigned long flags;
39118
39119 spin_lock_irqsave(&lp->lock, flags);
39120- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39121+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39122 spin_unlock_irqrestore(&lp->lock, flags);
39123
39124 return &dev->stats;
39125@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
39126 if (dev->flags & IFF_ALLMULTI) {
39127 ib->filter[0] = cpu_to_le32(~0U);
39128 ib->filter[1] = cpu_to_le32(~0U);
39129- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39130- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39131- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39132- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39133+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39134+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39135+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39136+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39137 return;
39138 }
39139 /* clear the multicast filter */
39140@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
39141 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
39142 }
39143 for (i = 0; i < 4; i++)
39144- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
39145+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
39146 le16_to_cpu(mcast_table[i]));
39147 return;
39148 }
39149@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39150
39151 spin_lock_irqsave(&lp->lock, flags);
39152 suspended = pcnet32_suspend(dev, &flags, 0);
39153- csr15 = lp->a.read_csr(ioaddr, CSR15);
39154+ csr15 = lp->a->read_csr(ioaddr, CSR15);
39155 if (dev->flags & IFF_PROMISC) {
39156 /* Log any net taps. */
39157 if (netif_msg_hw(lp))
39158@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39159 lp->init_block->mode =
39160 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
39161 7);
39162- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
39163+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
39164 } else {
39165 lp->init_block->mode =
39166 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
39167- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39168+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39169 pcnet32_load_multicast(dev);
39170 }
39171
39172 if (suspended) {
39173 int csr5;
39174 /* clear SUSPEND (SPND) - CSR5 bit 0 */
39175- csr5 = lp->a.read_csr(ioaddr, CSR5);
39176- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39177+ csr5 = lp->a->read_csr(ioaddr, CSR5);
39178+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39179 } else {
39180- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39181+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39182 pcnet32_restart(dev, CSR0_NORMAL);
39183 netif_wake_queue(dev);
39184 }
39185@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
39186 if (!lp->mii)
39187 return 0;
39188
39189- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39190- val_out = lp->a.read_bcr(ioaddr, 34);
39191+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39192+ val_out = lp->a->read_bcr(ioaddr, 34);
39193
39194 return val_out;
39195 }
39196@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
39197 if (!lp->mii)
39198 return;
39199
39200- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39201- lp->a.write_bcr(ioaddr, 34, val);
39202+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39203+ lp->a->write_bcr(ioaddr, 34, val);
39204 }
39205
39206 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39207@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39208 curr_link = mii_link_ok(&lp->mii_if);
39209 } else {
39210 ulong ioaddr = dev->base_addr; /* card base I/O address */
39211- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
39212+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
39213 }
39214 if (!curr_link) {
39215 if (prev_link || verbose) {
39216@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39217 (ecmd.duplex ==
39218 DUPLEX_FULL) ? "full" : "half");
39219 }
39220- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
39221+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
39222 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
39223 if (lp->mii_if.full_duplex)
39224 bcr9 |= (1 << 0);
39225 else
39226 bcr9 &= ~(1 << 0);
39227- lp->a.write_bcr(dev->base_addr, 9, bcr9);
39228+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
39229 }
39230 } else {
39231 if (netif_msg_link(lp))
39232diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
39233index 7cc9898..6eb50d3 100644
39234--- a/drivers/net/sis190.c
39235+++ b/drivers/net/sis190.c
39236@@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
39237 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
39238 struct net_device *dev)
39239 {
39240- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
39241+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
39242 struct sis190_private *tp = netdev_priv(dev);
39243 struct pci_dev *isa_bridge;
39244 u8 reg, tmp8;
39245diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
39246index e13685a..60c948c 100644
39247--- a/drivers/net/sundance.c
39248+++ b/drivers/net/sundance.c
39249@@ -225,7 +225,7 @@ enum {
39250 struct pci_id_info {
39251 const char *name;
39252 };
39253-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39254+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39255 {"D-Link DFE-550TX FAST Ethernet Adapter"},
39256 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
39257 {"D-Link DFE-580TX 4 port Server Adapter"},
39258diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
39259index 529f55a..cccaa18 100644
39260--- a/drivers/net/tg3.h
39261+++ b/drivers/net/tg3.h
39262@@ -95,6 +95,7 @@
39263 #define CHIPREV_ID_5750_A0 0x4000
39264 #define CHIPREV_ID_5750_A1 0x4001
39265 #define CHIPREV_ID_5750_A3 0x4003
39266+#define CHIPREV_ID_5750_C1 0x4201
39267 #define CHIPREV_ID_5750_C2 0x4202
39268 #define CHIPREV_ID_5752_A0_HW 0x5000
39269 #define CHIPREV_ID_5752_A0 0x6000
39270diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
39271index b9db1b5..720f9ce 100644
39272--- a/drivers/net/tokenring/abyss.c
39273+++ b/drivers/net/tokenring/abyss.c
39274@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
39275
39276 static int __init abyss_init (void)
39277 {
39278- abyss_netdev_ops = tms380tr_netdev_ops;
39279+ pax_open_kernel();
39280+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39281
39282- abyss_netdev_ops.ndo_open = abyss_open;
39283- abyss_netdev_ops.ndo_stop = abyss_close;
39284+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
39285+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
39286+ pax_close_kernel();
39287
39288 return pci_register_driver(&abyss_driver);
39289 }
39290diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
39291index 456f8bf..373e56d 100644
39292--- a/drivers/net/tokenring/madgemc.c
39293+++ b/drivers/net/tokenring/madgemc.c
39294@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
39295
39296 static int __init madgemc_init (void)
39297 {
39298- madgemc_netdev_ops = tms380tr_netdev_ops;
39299- madgemc_netdev_ops.ndo_open = madgemc_open;
39300- madgemc_netdev_ops.ndo_stop = madgemc_close;
39301+ pax_open_kernel();
39302+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39303+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
39304+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
39305+ pax_close_kernel();
39306
39307 return mca_register_driver (&madgemc_driver);
39308 }
39309diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
39310index 16e8783..925bd49 100644
39311--- a/drivers/net/tokenring/proteon.c
39312+++ b/drivers/net/tokenring/proteon.c
39313@@ -353,9 +353,11 @@ static int __init proteon_init(void)
39314 struct platform_device *pdev;
39315 int i, num = 0, err = 0;
39316
39317- proteon_netdev_ops = tms380tr_netdev_ops;
39318- proteon_netdev_ops.ndo_open = proteon_open;
39319- proteon_netdev_ops.ndo_stop = tms380tr_close;
39320+ pax_open_kernel();
39321+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39322+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
39323+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
39324+ pax_close_kernel();
39325
39326 err = platform_driver_register(&proteon_driver);
39327 if (err)
39328diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
39329index 46db5c5..37c1536 100644
39330--- a/drivers/net/tokenring/skisa.c
39331+++ b/drivers/net/tokenring/skisa.c
39332@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
39333 struct platform_device *pdev;
39334 int i, num = 0, err = 0;
39335
39336- sk_isa_netdev_ops = tms380tr_netdev_ops;
39337- sk_isa_netdev_ops.ndo_open = sk_isa_open;
39338- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39339+ pax_open_kernel();
39340+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39341+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
39342+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39343+ pax_close_kernel();
39344
39345 err = platform_driver_register(&sk_isa_driver);
39346 if (err)
39347diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
39348index 74e5ba4..5cf6bc9 100644
39349--- a/drivers/net/tulip/de2104x.c
39350+++ b/drivers/net/tulip/de2104x.c
39351@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
39352 struct de_srom_info_leaf *il;
39353 void *bufp;
39354
39355+ pax_track_stack();
39356+
39357 /* download entire eeprom */
39358 for (i = 0; i < DE_EEPROM_WORDS; i++)
39359 ((__le16 *)ee_data)[i] =
39360diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
39361index a8349b7..90f9dfe 100644
39362--- a/drivers/net/tulip/de4x5.c
39363+++ b/drivers/net/tulip/de4x5.c
39364@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39365 for (i=0; i<ETH_ALEN; i++) {
39366 tmp.addr[i] = dev->dev_addr[i];
39367 }
39368- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39369+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39370 break;
39371
39372 case DE4X5_SET_HWADDR: /* Set the hardware address */
39373@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39374 spin_lock_irqsave(&lp->lock, flags);
39375 memcpy(&statbuf, &lp->pktStats, ioc->len);
39376 spin_unlock_irqrestore(&lp->lock, flags);
39377- if (copy_to_user(ioc->data, &statbuf, ioc->len))
39378+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39379 return -EFAULT;
39380 break;
39381 }
39382diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39383index 391acd3..56d11cd 100644
39384--- a/drivers/net/tulip/eeprom.c
39385+++ b/drivers/net/tulip/eeprom.c
39386@@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39387 {NULL}};
39388
39389
39390-static const char *block_name[] __devinitdata = {
39391+static const char *block_name[] __devinitconst = {
39392 "21140 non-MII",
39393 "21140 MII PHY",
39394 "21142 Serial PHY",
39395diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39396index b38d3b7..b1cff23 100644
39397--- a/drivers/net/tulip/winbond-840.c
39398+++ b/drivers/net/tulip/winbond-840.c
39399@@ -235,7 +235,7 @@ struct pci_id_info {
39400 int drv_flags; /* Driver use, intended as capability flags. */
39401 };
39402
39403-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39404+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39405 { /* Sometime a Level-One switch card. */
39406 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39407 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39408diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39409index f450bc9..2b747c8 100644
39410--- a/drivers/net/usb/hso.c
39411+++ b/drivers/net/usb/hso.c
39412@@ -71,7 +71,7 @@
39413 #include <asm/byteorder.h>
39414 #include <linux/serial_core.h>
39415 #include <linux/serial.h>
39416-
39417+#include <asm/local.h>
39418
39419 #define DRIVER_VERSION "1.2"
39420 #define MOD_AUTHOR "Option Wireless"
39421@@ -258,7 +258,7 @@ struct hso_serial {
39422
39423 /* from usb_serial_port */
39424 struct tty_struct *tty;
39425- int open_count;
39426+ local_t open_count;
39427 spinlock_t serial_lock;
39428
39429 int (*write_data) (struct hso_serial *serial);
39430@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39431 struct urb *urb;
39432
39433 urb = serial->rx_urb[0];
39434- if (serial->open_count > 0) {
39435+ if (local_read(&serial->open_count) > 0) {
39436 count = put_rxbuf_data(urb, serial);
39437 if (count == -1)
39438 return;
39439@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39440 DUMP1(urb->transfer_buffer, urb->actual_length);
39441
39442 /* Anyone listening? */
39443- if (serial->open_count == 0)
39444+ if (local_read(&serial->open_count) == 0)
39445 return;
39446
39447 if (status == 0) {
39448@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39449 spin_unlock_irq(&serial->serial_lock);
39450
39451 /* check for port already opened, if not set the termios */
39452- serial->open_count++;
39453- if (serial->open_count == 1) {
39454+ if (local_inc_return(&serial->open_count) == 1) {
39455 tty->low_latency = 1;
39456 serial->rx_state = RX_IDLE;
39457 /* Force default termio settings */
39458@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39459 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39460 if (result) {
39461 hso_stop_serial_device(serial->parent);
39462- serial->open_count--;
39463+ local_dec(&serial->open_count);
39464 kref_put(&serial->parent->ref, hso_serial_ref_free);
39465 }
39466 } else {
39467@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39468
39469 /* reset the rts and dtr */
39470 /* do the actual close */
39471- serial->open_count--;
39472+ local_dec(&serial->open_count);
39473
39474- if (serial->open_count <= 0) {
39475- serial->open_count = 0;
39476+ if (local_read(&serial->open_count) <= 0) {
39477+ local_set(&serial->open_count, 0);
39478 spin_lock_irq(&serial->serial_lock);
39479 if (serial->tty == tty) {
39480 serial->tty->driver_data = NULL;
39481@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39482
39483 /* the actual setup */
39484 spin_lock_irqsave(&serial->serial_lock, flags);
39485- if (serial->open_count)
39486+ if (local_read(&serial->open_count))
39487 _hso_serial_set_termios(tty, old);
39488 else
39489 tty->termios = old;
39490@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39491 /* Start all serial ports */
39492 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39493 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39494- if (dev2ser(serial_table[i])->open_count) {
39495+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
39496 result =
39497 hso_start_serial_device(serial_table[i], GFP_NOIO);
39498 hso_kick_transmit(dev2ser(serial_table[i]));
39499diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39500index 3e94f0c..ffdd926 100644
39501--- a/drivers/net/vxge/vxge-config.h
39502+++ b/drivers/net/vxge/vxge-config.h
39503@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39504 void (*link_down)(struct __vxge_hw_device *devh);
39505 void (*crit_err)(struct __vxge_hw_device *devh,
39506 enum vxge_hw_event type, u64 ext_data);
39507-};
39508+} __no_const;
39509
39510 /*
39511 * struct __vxge_hw_blockpool_entry - Block private data structure
39512diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39513index 068d7a9..35293de 100644
39514--- a/drivers/net/vxge/vxge-main.c
39515+++ b/drivers/net/vxge/vxge-main.c
39516@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39517 struct sk_buff *completed[NR_SKB_COMPLETED];
39518 int more;
39519
39520+ pax_track_stack();
39521+
39522 do {
39523 more = 0;
39524 skb_ptr = completed;
39525@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39526 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39527 int index;
39528
39529+ pax_track_stack();
39530+
39531 /*
39532 * Filling
39533 * - itable with bucket numbers
39534diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39535index 461742b..81be42e 100644
39536--- a/drivers/net/vxge/vxge-traffic.h
39537+++ b/drivers/net/vxge/vxge-traffic.h
39538@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39539 struct vxge_hw_mempool_dma *dma_object,
39540 u32 index,
39541 u32 is_last);
39542-};
39543+} __no_const;
39544
39545 void
39546 __vxge_hw_mempool_destroy(
39547diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39548index cd8cb95..4153b79 100644
39549--- a/drivers/net/wan/cycx_x25.c
39550+++ b/drivers/net/wan/cycx_x25.c
39551@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39552 unsigned char hex[1024],
39553 * phex = hex;
39554
39555+ pax_track_stack();
39556+
39557 if (len >= (sizeof(hex) / 2))
39558 len = (sizeof(hex) / 2) - 1;
39559
39560diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39561index aa9248f..a4e3c3b 100644
39562--- a/drivers/net/wan/hdlc_x25.c
39563+++ b/drivers/net/wan/hdlc_x25.c
39564@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39565
39566 static int x25_open(struct net_device *dev)
39567 {
39568- struct lapb_register_struct cb;
39569+ static struct lapb_register_struct cb = {
39570+ .connect_confirmation = x25_connected,
39571+ .connect_indication = x25_connected,
39572+ .disconnect_confirmation = x25_disconnected,
39573+ .disconnect_indication = x25_disconnected,
39574+ .data_indication = x25_data_indication,
39575+ .data_transmit = x25_data_transmit
39576+ };
39577 int result;
39578
39579- cb.connect_confirmation = x25_connected;
39580- cb.connect_indication = x25_connected;
39581- cb.disconnect_confirmation = x25_disconnected;
39582- cb.disconnect_indication = x25_disconnected;
39583- cb.data_indication = x25_data_indication;
39584- cb.data_transmit = x25_data_transmit;
39585-
39586 result = lapb_register(dev, &cb);
39587 if (result != LAPB_OK)
39588 return result;
39589diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39590index 5ad287c..783b020 100644
39591--- a/drivers/net/wimax/i2400m/usb-fw.c
39592+++ b/drivers/net/wimax/i2400m/usb-fw.c
39593@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39594 int do_autopm = 1;
39595 DECLARE_COMPLETION_ONSTACK(notif_completion);
39596
39597+ pax_track_stack();
39598+
39599 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39600 i2400m, ack, ack_size);
39601 BUG_ON(_ack == i2400m->bm_ack_buf);
39602diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39603index 6c26840..62c97c3 100644
39604--- a/drivers/net/wireless/airo.c
39605+++ b/drivers/net/wireless/airo.c
39606@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39607 BSSListElement * loop_net;
39608 BSSListElement * tmp_net;
39609
39610+ pax_track_stack();
39611+
39612 /* Blow away current list of scan results */
39613 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39614 list_move_tail (&loop_net->list, &ai->network_free_list);
39615@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39616 WepKeyRid wkr;
39617 int rc;
39618
39619+ pax_track_stack();
39620+
39621 memset( &mySsid, 0, sizeof( mySsid ) );
39622 kfree (ai->flash);
39623 ai->flash = NULL;
39624@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39625 __le32 *vals = stats.vals;
39626 int len;
39627
39628+ pax_track_stack();
39629+
39630 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39631 return -ENOMEM;
39632 data = (struct proc_data *)file->private_data;
39633@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39634 /* If doLoseSync is not 1, we won't do a Lose Sync */
39635 int doLoseSync = -1;
39636
39637+ pax_track_stack();
39638+
39639 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39640 return -ENOMEM;
39641 data = (struct proc_data *)file->private_data;
39642@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39643 int i;
39644 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39645
39646+ pax_track_stack();
39647+
39648 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39649 if (!qual)
39650 return -ENOMEM;
39651@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39652 CapabilityRid cap_rid;
39653 __le32 *vals = stats_rid.vals;
39654
39655+ pax_track_stack();
39656+
39657 /* Get stats out of the card */
39658 clear_bit(JOB_WSTATS, &local->jobs);
39659 if (local->power.event) {
39660diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39661index 747508c..82e965d 100644
39662--- a/drivers/net/wireless/ath/ath5k/debug.c
39663+++ b/drivers/net/wireless/ath/ath5k/debug.c
39664@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39665 unsigned int v;
39666 u64 tsf;
39667
39668+ pax_track_stack();
39669+
39670 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39671 len += snprintf(buf+len, sizeof(buf)-len,
39672 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39673@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39674 unsigned int len = 0;
39675 unsigned int i;
39676
39677+ pax_track_stack();
39678+
39679 len += snprintf(buf+len, sizeof(buf)-len,
39680 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39681
39682diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39683index 2be4c22..593b1eb 100644
39684--- a/drivers/net/wireless/ath/ath9k/debug.c
39685+++ b/drivers/net/wireless/ath/ath9k/debug.c
39686@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39687 char buf[512];
39688 unsigned int len = 0;
39689
39690+ pax_track_stack();
39691+
39692 len += snprintf(buf + len, sizeof(buf) - len,
39693 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39694 len += snprintf(buf + len, sizeof(buf) - len,
39695@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39696 int i;
39697 u8 addr[ETH_ALEN];
39698
39699+ pax_track_stack();
39700+
39701 len += snprintf(buf + len, sizeof(buf) - len,
39702 "primary: %s (%s chan=%d ht=%d)\n",
39703 wiphy_name(sc->pri_wiphy->hw->wiphy),
39704diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39705index 80b19a4..dab3a45 100644
39706--- a/drivers/net/wireless/b43/debugfs.c
39707+++ b/drivers/net/wireless/b43/debugfs.c
39708@@ -43,7 +43,7 @@ static struct dentry *rootdir;
39709 struct b43_debugfs_fops {
39710 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39711 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39712- struct file_operations fops;
39713+ const struct file_operations fops;
39714 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39715 size_t file_struct_offset;
39716 };
39717diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39718index 1f85ac5..c99b4b4 100644
39719--- a/drivers/net/wireless/b43legacy/debugfs.c
39720+++ b/drivers/net/wireless/b43legacy/debugfs.c
39721@@ -44,7 +44,7 @@ static struct dentry *rootdir;
39722 struct b43legacy_debugfs_fops {
39723 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39724 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39725- struct file_operations fops;
39726+ const struct file_operations fops;
39727 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39728 size_t file_struct_offset;
39729 /* Take wl->irq_lock before calling read/write? */
39730diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39731index 43102bf..3b569c3 100644
39732--- a/drivers/net/wireless/ipw2x00/ipw2100.c
39733+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39734@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39735 int err;
39736 DECLARE_SSID_BUF(ssid);
39737
39738+ pax_track_stack();
39739+
39740 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39741
39742 if (ssid_len)
39743@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39744 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39745 int err;
39746
39747+ pax_track_stack();
39748+
39749 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39750 idx, keylen, len);
39751
39752diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39753index 282b1f7..169f0cf 100644
39754--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39755+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39756@@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39757 unsigned long flags;
39758 DECLARE_SSID_BUF(ssid);
39759
39760+ pax_track_stack();
39761+
39762 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39763 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39764 print_ssid(ssid, info_element->data, info_element->len),
39765diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39766index 950267a..80d5fd2 100644
39767--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39768+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39769@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39770 },
39771 };
39772
39773-static struct iwl_ops iwl1000_ops = {
39774+static const struct iwl_ops iwl1000_ops = {
39775 .ucode = &iwl5000_ucode,
39776 .lib = &iwl1000_lib,
39777 .hcmd = &iwl5000_hcmd,
39778diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39779index 56bfcc3..b348020 100644
39780--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39781+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39782@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39783 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39784 };
39785
39786-static struct iwl_ops iwl3945_ops = {
39787+static const struct iwl_ops iwl3945_ops = {
39788 .ucode = &iwl3945_ucode,
39789 .lib = &iwl3945_lib,
39790 .hcmd = &iwl3945_hcmd,
39791diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39792index 585b8d4..e142963 100644
39793--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39794+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39795@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39796 },
39797 };
39798
39799-static struct iwl_ops iwl4965_ops = {
39800+static const struct iwl_ops iwl4965_ops = {
39801 .ucode = &iwl4965_ucode,
39802 .lib = &iwl4965_lib,
39803 .hcmd = &iwl4965_hcmd,
39804diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39805index 1f423f2..e37c192 100644
39806--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39807+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39808@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39809 },
39810 };
39811
39812-struct iwl_ops iwl5000_ops = {
39813+const struct iwl_ops iwl5000_ops = {
39814 .ucode = &iwl5000_ucode,
39815 .lib = &iwl5000_lib,
39816 .hcmd = &iwl5000_hcmd,
39817 .utils = &iwl5000_hcmd_utils,
39818 };
39819
39820-static struct iwl_ops iwl5150_ops = {
39821+static const struct iwl_ops iwl5150_ops = {
39822 .ucode = &iwl5000_ucode,
39823 .lib = &iwl5150_lib,
39824 .hcmd = &iwl5000_hcmd,
39825diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39826index 1473452..f07d5e1 100644
39827--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39828+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39829@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39830 .calc_rssi = iwl5000_calc_rssi,
39831 };
39832
39833-static struct iwl_ops iwl6000_ops = {
39834+static const struct iwl_ops iwl6000_ops = {
39835 .ucode = &iwl5000_ucode,
39836 .lib = &iwl6000_lib,
39837 .hcmd = &iwl5000_hcmd,
39838diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39839index 1a3dfa2..b3e0a61 100644
39840--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39841+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39842@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39843 u8 active_index = 0;
39844 s32 tpt = 0;
39845
39846+ pax_track_stack();
39847+
39848 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39849
39850 if (!ieee80211_is_data(hdr->frame_control) ||
39851@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39852 u8 valid_tx_ant = 0;
39853 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39854
39855+ pax_track_stack();
39856+
39857 /* Override starting rate (index 0) if needed for debug purposes */
39858 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39859
39860diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39861index 0e56d78..6a3c107 100644
39862--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39863+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39864@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39865 if (iwl_debug_level & IWL_DL_INFO)
39866 dev_printk(KERN_DEBUG, &(pdev->dev),
39867 "Disabling hw_scan\n");
39868- iwl_hw_ops.hw_scan = NULL;
39869+ pax_open_kernel();
39870+ *(void **)&iwl_hw_ops.hw_scan = NULL;
39871+ pax_close_kernel();
39872 }
39873
39874 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39875diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39876index cbc6290..eb323d7 100644
39877--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39878+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39879@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39880 #endif
39881
39882 #else
39883-#define IWL_DEBUG(__priv, level, fmt, args...)
39884-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39885+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39886+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39887 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39888 void *p, u32 len)
39889 {}
39890diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39891index a198bcf..8e68233 100644
39892--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39893+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39894@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39895 int pos = 0;
39896 const size_t bufsz = sizeof(buf);
39897
39898+ pax_track_stack();
39899+
39900 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39901 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39902 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39903@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39904 const size_t bufsz = sizeof(buf);
39905 ssize_t ret;
39906
39907+ pax_track_stack();
39908+
39909 for (i = 0; i < AC_NUM; i++) {
39910 pos += scnprintf(buf + pos, bufsz - pos,
39911 "\tcw_min\tcw_max\taifsn\ttxop\n");
39912diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39913index 3539ea4..b174bfa 100644
39914--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39915+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39916@@ -68,7 +68,7 @@ struct iwl_tx_queue;
39917
39918 /* shared structures from iwl-5000.c */
39919 extern struct iwl_mod_params iwl50_mod_params;
39920-extern struct iwl_ops iwl5000_ops;
39921+extern const struct iwl_ops iwl5000_ops;
39922 extern struct iwl_ucode_ops iwl5000_ucode;
39923 extern struct iwl_lib_ops iwl5000_lib;
39924 extern struct iwl_hcmd_ops iwl5000_hcmd;
39925diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39926index 619590d..69235ee 100644
39927--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39928+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39929@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39930 */
39931 if (iwl3945_mod_params.disable_hw_scan) {
39932 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39933- iwl3945_hw_ops.hw_scan = NULL;
39934+ pax_open_kernel();
39935+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39936+ pax_close_kernel();
39937 }
39938
39939
39940diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39941index 1465379..fe4d78b 100644
39942--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39943+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39944@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39945 int buf_len = 512;
39946 size_t len = 0;
39947
39948+ pax_track_stack();
39949+
39950 if (*ppos != 0)
39951 return 0;
39952 if (count < sizeof(buf))
39953diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39954index 893a55c..7f66a50 100644
39955--- a/drivers/net/wireless/libertas/debugfs.c
39956+++ b/drivers/net/wireless/libertas/debugfs.c
39957@@ -708,7 +708,7 @@ out_unlock:
39958 struct lbs_debugfs_files {
39959 const char *name;
39960 int perm;
39961- struct file_operations fops;
39962+ const struct file_operations fops;
39963 };
39964
39965 static const struct lbs_debugfs_files debugfs_files[] = {
39966diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
39967index 2ecbedb..42704f0 100644
39968--- a/drivers/net/wireless/rndis_wlan.c
39969+++ b/drivers/net/wireless/rndis_wlan.c
39970@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
39971
39972 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
39973
39974- if (rts_threshold < 0 || rts_threshold > 2347)
39975+ if (rts_threshold > 2347)
39976 rts_threshold = 2347;
39977
39978 tmp = cpu_to_le32(rts_threshold);
39979diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
39980index 334ccd6..47f8944 100644
39981--- a/drivers/oprofile/buffer_sync.c
39982+++ b/drivers/oprofile/buffer_sync.c
39983@@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
39984 if (cookie == NO_COOKIE)
39985 offset = pc;
39986 if (cookie == INVALID_COOKIE) {
39987- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39988+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39989 offset = pc;
39990 }
39991 if (cookie != last_cookie) {
39992@@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
39993 /* add userspace sample */
39994
39995 if (!mm) {
39996- atomic_inc(&oprofile_stats.sample_lost_no_mm);
39997+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
39998 return 0;
39999 }
40000
40001 cookie = lookup_dcookie(mm, s->eip, &offset);
40002
40003 if (cookie == INVALID_COOKIE) {
40004- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40005+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40006 return 0;
40007 }
40008
40009@@ -562,7 +562,7 @@ void sync_buffer(int cpu)
40010 /* ignore backtraces if failed to add a sample */
40011 if (state == sb_bt_start) {
40012 state = sb_bt_ignore;
40013- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
40014+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
40015 }
40016 }
40017 release_mm(mm);
40018diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
40019index 5df60a6..72f5c1c 100644
40020--- a/drivers/oprofile/event_buffer.c
40021+++ b/drivers/oprofile/event_buffer.c
40022@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
40023 }
40024
40025 if (buffer_pos == buffer_size) {
40026- atomic_inc(&oprofile_stats.event_lost_overflow);
40027+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
40028 return;
40029 }
40030
40031diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
40032index dc8a042..fe5f315 100644
40033--- a/drivers/oprofile/oprof.c
40034+++ b/drivers/oprofile/oprof.c
40035@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
40036 if (oprofile_ops.switch_events())
40037 return;
40038
40039- atomic_inc(&oprofile_stats.multiplex_counter);
40040+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
40041 start_switch_worker();
40042 }
40043
40044diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
40045index 61689e8..387f7f8 100644
40046--- a/drivers/oprofile/oprofile_stats.c
40047+++ b/drivers/oprofile/oprofile_stats.c
40048@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
40049 cpu_buf->sample_invalid_eip = 0;
40050 }
40051
40052- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
40053- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
40054- atomic_set(&oprofile_stats.event_lost_overflow, 0);
40055- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
40056- atomic_set(&oprofile_stats.multiplex_counter, 0);
40057+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
40058+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
40059+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
40060+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
40061+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
40062 }
40063
40064
40065diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
40066index 0b54e46..a37c527 100644
40067--- a/drivers/oprofile/oprofile_stats.h
40068+++ b/drivers/oprofile/oprofile_stats.h
40069@@ -13,11 +13,11 @@
40070 #include <asm/atomic.h>
40071
40072 struct oprofile_stat_struct {
40073- atomic_t sample_lost_no_mm;
40074- atomic_t sample_lost_no_mapping;
40075- atomic_t bt_lost_no_mapping;
40076- atomic_t event_lost_overflow;
40077- atomic_t multiplex_counter;
40078+ atomic_unchecked_t sample_lost_no_mm;
40079+ atomic_unchecked_t sample_lost_no_mapping;
40080+ atomic_unchecked_t bt_lost_no_mapping;
40081+ atomic_unchecked_t event_lost_overflow;
40082+ atomic_unchecked_t multiplex_counter;
40083 };
40084
40085 extern struct oprofile_stat_struct oprofile_stats;
40086diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
40087index 2766a6d..80c77e2 100644
40088--- a/drivers/oprofile/oprofilefs.c
40089+++ b/drivers/oprofile/oprofilefs.c
40090@@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
40091
40092
40093 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
40094- char const *name, atomic_t *val)
40095+ char const *name, atomic_unchecked_t *val)
40096 {
40097 struct dentry *d = __oprofilefs_create_file(sb, root, name,
40098 &atomic_ro_fops, 0444);
40099diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
40100index 13a64bc..ad62835 100644
40101--- a/drivers/parisc/pdc_stable.c
40102+++ b/drivers/parisc/pdc_stable.c
40103@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
40104 return ret;
40105 }
40106
40107-static struct sysfs_ops pdcspath_attr_ops = {
40108+static const struct sysfs_ops pdcspath_attr_ops = {
40109 .show = pdcspath_attr_show,
40110 .store = pdcspath_attr_store,
40111 };
40112diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40113index 8eefe56..40751a7 100644
40114--- a/drivers/parport/procfs.c
40115+++ b/drivers/parport/procfs.c
40116@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40117
40118 *ppos += len;
40119
40120- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40121+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40122 }
40123
40124 #ifdef CONFIG_PARPORT_1284
40125@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40126
40127 *ppos += len;
40128
40129- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40130+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40131 }
40132 #endif /* IEEE1284.3 support. */
40133
40134diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
40135index 73e7d8e..c80f3d2 100644
40136--- a/drivers/pci/hotplug/acpiphp_glue.c
40137+++ b/drivers/pci/hotplug/acpiphp_glue.c
40138@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
40139 }
40140
40141
40142-static struct acpi_dock_ops acpiphp_dock_ops = {
40143+static const struct acpi_dock_ops acpiphp_dock_ops = {
40144 .handler = handle_hotplug_event_func,
40145 };
40146
40147diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
40148index 9fff878..ad0ad53 100644
40149--- a/drivers/pci/hotplug/cpci_hotplug.h
40150+++ b/drivers/pci/hotplug/cpci_hotplug.h
40151@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
40152 int (*hardware_test) (struct slot* slot, u32 value);
40153 u8 (*get_power) (struct slot* slot);
40154 int (*set_power) (struct slot* slot, int value);
40155-};
40156+} __no_const;
40157
40158 struct cpci_hp_controller {
40159 unsigned int irq;
40160diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
40161index 76ba8a1..20ca857 100644
40162--- a/drivers/pci/hotplug/cpqphp_nvram.c
40163+++ b/drivers/pci/hotplug/cpqphp_nvram.c
40164@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
40165
40166 void compaq_nvram_init (void __iomem *rom_start)
40167 {
40168+
40169+#ifndef CONFIG_PAX_KERNEXEC
40170 if (rom_start) {
40171 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
40172 }
40173+#endif
40174+
40175 dbg("int15 entry = %p\n", compaq_int15_entry_point);
40176
40177 /* initialize our int15 lock */
40178diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
40179index 6151389..0a894ef 100644
40180--- a/drivers/pci/hotplug/fakephp.c
40181+++ b/drivers/pci/hotplug/fakephp.c
40182@@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
40183 }
40184
40185 static struct kobj_type legacy_ktype = {
40186- .sysfs_ops = &(struct sysfs_ops){
40187+ .sysfs_ops = &(const struct sysfs_ops){
40188 .store = legacy_store, .show = legacy_show
40189 },
40190 .release = &legacy_release,
40191diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
40192index 5b680df..fe05b7e 100644
40193--- a/drivers/pci/intel-iommu.c
40194+++ b/drivers/pci/intel-iommu.c
40195@@ -2643,7 +2643,7 @@ error:
40196 return 0;
40197 }
40198
40199-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
40200+dma_addr_t intel_map_page(struct device *dev, struct page *page,
40201 unsigned long offset, size_t size,
40202 enum dma_data_direction dir,
40203 struct dma_attrs *attrs)
40204@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
40205 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
40206 }
40207
40208-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40209+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40210 size_t size, enum dma_data_direction dir,
40211 struct dma_attrs *attrs)
40212 {
40213@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40214 }
40215 }
40216
40217-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40218+void *intel_alloc_coherent(struct device *hwdev, size_t size,
40219 dma_addr_t *dma_handle, gfp_t flags)
40220 {
40221 void *vaddr;
40222@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40223 return NULL;
40224 }
40225
40226-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40227+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40228 dma_addr_t dma_handle)
40229 {
40230 int order;
40231@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40232 free_pages((unsigned long)vaddr, order);
40233 }
40234
40235-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40236+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40237 int nelems, enum dma_data_direction dir,
40238 struct dma_attrs *attrs)
40239 {
40240@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
40241 return nelems;
40242 }
40243
40244-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40245+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40246 enum dma_data_direction dir, struct dma_attrs *attrs)
40247 {
40248 int i;
40249@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
40250 return nelems;
40251 }
40252
40253-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40254+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40255 {
40256 return !dma_addr;
40257 }
40258
40259-struct dma_map_ops intel_dma_ops = {
40260+const struct dma_map_ops intel_dma_ops = {
40261 .alloc_coherent = intel_alloc_coherent,
40262 .free_coherent = intel_free_coherent,
40263 .map_sg = intel_map_sg,
40264diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40265index 5b7056c..607bc94 100644
40266--- a/drivers/pci/pcie/aspm.c
40267+++ b/drivers/pci/pcie/aspm.c
40268@@ -27,9 +27,9 @@
40269 #define MODULE_PARAM_PREFIX "pcie_aspm."
40270
40271 /* Note: those are not register definitions */
40272-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40273-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40274-#define ASPM_STATE_L1 (4) /* L1 state */
40275+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40276+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40277+#define ASPM_STATE_L1 (4U) /* L1 state */
40278 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40279 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40280
40281diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40282index 8105e32..ca10419 100644
40283--- a/drivers/pci/probe.c
40284+++ b/drivers/pci/probe.c
40285@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
40286 return ret;
40287 }
40288
40289-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
40290+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
40291 struct device_attribute *attr,
40292 char *buf)
40293 {
40294 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
40295 }
40296
40297-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
40298+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
40299 struct device_attribute *attr,
40300 char *buf)
40301 {
40302diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40303index a03ad8c..024b0da 100644
40304--- a/drivers/pci/proc.c
40305+++ b/drivers/pci/proc.c
40306@@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40307 static int __init pci_proc_init(void)
40308 {
40309 struct pci_dev *dev = NULL;
40310+
40311+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40312+#ifdef CONFIG_GRKERNSEC_PROC_USER
40313+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40314+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40315+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40316+#endif
40317+#else
40318 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40319+#endif
40320 proc_create("devices", 0, proc_bus_pci_dir,
40321 &proc_bus_pci_dev_operations);
40322 proc_initialized = 1;
40323diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
40324index 8c02b6c..5584d8e 100644
40325--- a/drivers/pci/slot.c
40326+++ b/drivers/pci/slot.c
40327@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
40328 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
40329 }
40330
40331-static struct sysfs_ops pci_slot_sysfs_ops = {
40332+static const struct sysfs_ops pci_slot_sysfs_ops = {
40333 .show = pci_slot_attr_show,
40334 .store = pci_slot_attr_store,
40335 };
40336diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
40337index 30cf71d2..50938f1 100644
40338--- a/drivers/pcmcia/pcmcia_ioctl.c
40339+++ b/drivers/pcmcia/pcmcia_ioctl.c
40340@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
40341 return -EFAULT;
40342 }
40343 }
40344- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40345+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40346 if (!buf)
40347 return -ENOMEM;
40348
40349diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
40350index 52183c4..b224c69 100644
40351--- a/drivers/platform/x86/acer-wmi.c
40352+++ b/drivers/platform/x86/acer-wmi.c
40353@@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
40354 return 0;
40355 }
40356
40357-static struct backlight_ops acer_bl_ops = {
40358+static const struct backlight_ops acer_bl_ops = {
40359 .get_brightness = read_brightness,
40360 .update_status = update_bl_status,
40361 };
40362diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
40363index 767cb61..a87380b 100644
40364--- a/drivers/platform/x86/asus-laptop.c
40365+++ b/drivers/platform/x86/asus-laptop.c
40366@@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
40367 */
40368 static int read_brightness(struct backlight_device *bd);
40369 static int update_bl_status(struct backlight_device *bd);
40370-static struct backlight_ops asusbl_ops = {
40371+static const struct backlight_ops asusbl_ops = {
40372 .get_brightness = read_brightness,
40373 .update_status = update_bl_status,
40374 };
40375diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
40376index d66c07a..a4abaac 100644
40377--- a/drivers/platform/x86/asus_acpi.c
40378+++ b/drivers/platform/x86/asus_acpi.c
40379@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40380 return 0;
40381 }
40382
40383-static struct backlight_ops asus_backlight_data = {
40384+static const struct backlight_ops asus_backlight_data = {
40385 .get_brightness = read_brightness,
40386 .update_status = set_brightness_status,
40387 };
40388diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40389index 11003bb..550ff1b 100644
40390--- a/drivers/platform/x86/compal-laptop.c
40391+++ b/drivers/platform/x86/compal-laptop.c
40392@@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40393 return set_lcd_level(b->props.brightness);
40394 }
40395
40396-static struct backlight_ops compalbl_ops = {
40397+static const struct backlight_ops compalbl_ops = {
40398 .get_brightness = bl_get_brightness,
40399 .update_status = bl_update_status,
40400 };
40401diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40402index 07a74da..9dc99fa 100644
40403--- a/drivers/platform/x86/dell-laptop.c
40404+++ b/drivers/platform/x86/dell-laptop.c
40405@@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40406 return buffer.output[1];
40407 }
40408
40409-static struct backlight_ops dell_ops = {
40410+static const struct backlight_ops dell_ops = {
40411 .get_brightness = dell_get_intensity,
40412 .update_status = dell_send_intensity,
40413 };
40414diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40415index c533b1c..5c81f22 100644
40416--- a/drivers/platform/x86/eeepc-laptop.c
40417+++ b/drivers/platform/x86/eeepc-laptop.c
40418@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40419 */
40420 static int read_brightness(struct backlight_device *bd);
40421 static int update_bl_status(struct backlight_device *bd);
40422-static struct backlight_ops eeepcbl_ops = {
40423+static const struct backlight_ops eeepcbl_ops = {
40424 .get_brightness = read_brightness,
40425 .update_status = update_bl_status,
40426 };
40427diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40428index bcd4ba8..a249b35 100644
40429--- a/drivers/platform/x86/fujitsu-laptop.c
40430+++ b/drivers/platform/x86/fujitsu-laptop.c
40431@@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40432 return ret;
40433 }
40434
40435-static struct backlight_ops fujitsubl_ops = {
40436+static const struct backlight_ops fujitsubl_ops = {
40437 .get_brightness = bl_get_brightness,
40438 .update_status = bl_update_status,
40439 };
40440diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40441index 759763d..1093ba2 100644
40442--- a/drivers/platform/x86/msi-laptop.c
40443+++ b/drivers/platform/x86/msi-laptop.c
40444@@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
40445 return set_lcd_level(b->props.brightness);
40446 }
40447
40448-static struct backlight_ops msibl_ops = {
40449+static const struct backlight_ops msibl_ops = {
40450 .get_brightness = bl_get_brightness,
40451 .update_status = bl_update_status,
40452 };
40453diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
40454index fe7cf01..9012d8d 100644
40455--- a/drivers/platform/x86/panasonic-laptop.c
40456+++ b/drivers/platform/x86/panasonic-laptop.c
40457@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
40458 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
40459 }
40460
40461-static struct backlight_ops pcc_backlight_ops = {
40462+static const struct backlight_ops pcc_backlight_ops = {
40463 .get_brightness = bl_get,
40464 .update_status = bl_set_status,
40465 };
40466diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40467index a2a742c..b37e25e 100644
40468--- a/drivers/platform/x86/sony-laptop.c
40469+++ b/drivers/platform/x86/sony-laptop.c
40470@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
40471 }
40472
40473 static struct backlight_device *sony_backlight_device;
40474-static struct backlight_ops sony_backlight_ops = {
40475+static const struct backlight_ops sony_backlight_ops = {
40476 .update_status = sony_backlight_update_status,
40477 .get_brightness = sony_backlight_get_brightness,
40478 };
40479diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40480index 68271ae..5e8fb10 100644
40481--- a/drivers/platform/x86/thinkpad_acpi.c
40482+++ b/drivers/platform/x86/thinkpad_acpi.c
40483@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40484 return 0;
40485 }
40486
40487-void static hotkey_mask_warn_incomplete_mask(void)
40488+static void hotkey_mask_warn_incomplete_mask(void)
40489 {
40490 /* log only what the user can fix... */
40491 const u32 wantedmask = hotkey_driver_mask &
40492@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40493 BACKLIGHT_UPDATE_HOTKEY);
40494 }
40495
40496-static struct backlight_ops ibm_backlight_data = {
40497+static const struct backlight_ops ibm_backlight_data = {
40498 .get_brightness = brightness_get,
40499 .update_status = brightness_update_status,
40500 };
40501diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40502index 51c0a8b..0786629 100644
40503--- a/drivers/platform/x86/toshiba_acpi.c
40504+++ b/drivers/platform/x86/toshiba_acpi.c
40505@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40506 return AE_OK;
40507 }
40508
40509-static struct backlight_ops toshiba_backlight_data = {
40510+static const struct backlight_ops toshiba_backlight_data = {
40511 .get_brightness = get_lcd,
40512 .update_status = set_lcd_status,
40513 };
40514diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40515index fc83783c..cf370d7 100644
40516--- a/drivers/pnp/pnpbios/bioscalls.c
40517+++ b/drivers/pnp/pnpbios/bioscalls.c
40518@@ -60,7 +60,7 @@ do { \
40519 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40520 } while(0)
40521
40522-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40523+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40524 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40525
40526 /*
40527@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40528
40529 cpu = get_cpu();
40530 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40531+
40532+ pax_open_kernel();
40533 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40534+ pax_close_kernel();
40535
40536 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40537 spin_lock_irqsave(&pnp_bios_lock, flags);
40538@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40539 :"memory");
40540 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40541
40542+ pax_open_kernel();
40543 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40544+ pax_close_kernel();
40545+
40546 put_cpu();
40547
40548 /* If we get here and this is set then the PnP BIOS faulted on us. */
40549@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40550 return status;
40551 }
40552
40553-void pnpbios_calls_init(union pnp_bios_install_struct *header)
40554+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40555 {
40556 int i;
40557
40558@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40559 pnp_bios_callpoint.offset = header->fields.pm16offset;
40560 pnp_bios_callpoint.segment = PNP_CS16;
40561
40562+ pax_open_kernel();
40563+
40564 for_each_possible_cpu(i) {
40565 struct desc_struct *gdt = get_cpu_gdt_table(i);
40566 if (!gdt)
40567@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40568 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40569 (unsigned long)__va(header->fields.pm16dseg));
40570 }
40571+
40572+ pax_close_kernel();
40573 }
40574diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40575index ba97654..66b99d4 100644
40576--- a/drivers/pnp/resource.c
40577+++ b/drivers/pnp/resource.c
40578@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40579 return 1;
40580
40581 /* check if the resource is valid */
40582- if (*irq < 0 || *irq > 15)
40583+ if (*irq > 15)
40584 return 0;
40585
40586 /* check if the resource is reserved */
40587@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40588 return 1;
40589
40590 /* check if the resource is valid */
40591- if (*dma < 0 || *dma == 4 || *dma > 7)
40592+ if (*dma == 4 || *dma > 7)
40593 return 0;
40594
40595 /* check if the resource is reserved */
40596diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40597index 62bb981..24a2dc9 100644
40598--- a/drivers/power/bq27x00_battery.c
40599+++ b/drivers/power/bq27x00_battery.c
40600@@ -44,7 +44,7 @@ struct bq27x00_device_info;
40601 struct bq27x00_access_methods {
40602 int (*read)(u8 reg, int *rt_value, int b_single,
40603 struct bq27x00_device_info *di);
40604-};
40605+} __no_const;
40606
40607 struct bq27x00_device_info {
40608 struct device *dev;
40609diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40610index 62227cd..b5b538b 100644
40611--- a/drivers/rtc/rtc-dev.c
40612+++ b/drivers/rtc/rtc-dev.c
40613@@ -14,6 +14,7 @@
40614 #include <linux/module.h>
40615 #include <linux/rtc.h>
40616 #include <linux/sched.h>
40617+#include <linux/grsecurity.h>
40618 #include "rtc-core.h"
40619
40620 static dev_t rtc_devt;
40621@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40622 if (copy_from_user(&tm, uarg, sizeof(tm)))
40623 return -EFAULT;
40624
40625+ gr_log_timechange();
40626+
40627 return rtc_set_time(rtc, &tm);
40628
40629 case RTC_PIE_ON:
40630diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40631index 968e3c7..fbc637a 100644
40632--- a/drivers/s390/cio/qdio_perf.c
40633+++ b/drivers/s390/cio/qdio_perf.c
40634@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40635 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40636 {
40637 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40638- (long)atomic_long_read(&perf_stats.qdio_int));
40639+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40640 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40641- (long)atomic_long_read(&perf_stats.pci_int));
40642+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40643 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40644- (long)atomic_long_read(&perf_stats.thin_int));
40645+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40646 seq_printf(m, "\n");
40647 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40648- (long)atomic_long_read(&perf_stats.tasklet_inbound));
40649+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40650 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40651- (long)atomic_long_read(&perf_stats.tasklet_outbound));
40652+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40653 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40654- (long)atomic_long_read(&perf_stats.tasklet_thinint),
40655- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40656+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40657+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40658 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40659- (long)atomic_long_read(&perf_stats.thinint_inbound),
40660- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40661+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40662+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40663 seq_printf(m, "\n");
40664 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40665- (long)atomic_long_read(&perf_stats.siga_in));
40666+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40667 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40668- (long)atomic_long_read(&perf_stats.siga_out));
40669+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40670 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40671- (long)atomic_long_read(&perf_stats.siga_sync));
40672+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40673 seq_printf(m, "\n");
40674 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40675- (long)atomic_long_read(&perf_stats.inbound_handler));
40676+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40677 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40678- (long)atomic_long_read(&perf_stats.outbound_handler));
40679+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40680 seq_printf(m, "\n");
40681 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40682- (long)atomic_long_read(&perf_stats.fast_requeue));
40683+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40684 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40685- (long)atomic_long_read(&perf_stats.outbound_target_full));
40686+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40687 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40688- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40689+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40690 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40691- (long)atomic_long_read(&perf_stats.debug_stop_polling));
40692+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40693 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40694- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40695+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40696 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40697- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40698- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40699+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40700+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40701 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40702- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40703- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40704+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40705+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40706 seq_printf(m, "\n");
40707 return 0;
40708 }
40709diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40710index ff4504c..b3604c3 100644
40711--- a/drivers/s390/cio/qdio_perf.h
40712+++ b/drivers/s390/cio/qdio_perf.h
40713@@ -13,46 +13,46 @@
40714
40715 struct qdio_perf_stats {
40716 /* interrupt handler calls */
40717- atomic_long_t qdio_int;
40718- atomic_long_t pci_int;
40719- atomic_long_t thin_int;
40720+ atomic_long_unchecked_t qdio_int;
40721+ atomic_long_unchecked_t pci_int;
40722+ atomic_long_unchecked_t thin_int;
40723
40724 /* tasklet runs */
40725- atomic_long_t tasklet_inbound;
40726- atomic_long_t tasklet_outbound;
40727- atomic_long_t tasklet_thinint;
40728- atomic_long_t tasklet_thinint_loop;
40729- atomic_long_t thinint_inbound;
40730- atomic_long_t thinint_inbound_loop;
40731- atomic_long_t thinint_inbound_loop2;
40732+ atomic_long_unchecked_t tasklet_inbound;
40733+ atomic_long_unchecked_t tasklet_outbound;
40734+ atomic_long_unchecked_t tasklet_thinint;
40735+ atomic_long_unchecked_t tasklet_thinint_loop;
40736+ atomic_long_unchecked_t thinint_inbound;
40737+ atomic_long_unchecked_t thinint_inbound_loop;
40738+ atomic_long_unchecked_t thinint_inbound_loop2;
40739
40740 /* signal adapter calls */
40741- atomic_long_t siga_out;
40742- atomic_long_t siga_in;
40743- atomic_long_t siga_sync;
40744+ atomic_long_unchecked_t siga_out;
40745+ atomic_long_unchecked_t siga_in;
40746+ atomic_long_unchecked_t siga_sync;
40747
40748 /* misc */
40749- atomic_long_t inbound_handler;
40750- atomic_long_t outbound_handler;
40751- atomic_long_t fast_requeue;
40752- atomic_long_t outbound_target_full;
40753+ atomic_long_unchecked_t inbound_handler;
40754+ atomic_long_unchecked_t outbound_handler;
40755+ atomic_long_unchecked_t fast_requeue;
40756+ atomic_long_unchecked_t outbound_target_full;
40757
40758 /* for debugging */
40759- atomic_long_t debug_tl_out_timer;
40760- atomic_long_t debug_stop_polling;
40761- atomic_long_t debug_eqbs_all;
40762- atomic_long_t debug_eqbs_incomplete;
40763- atomic_long_t debug_sqbs_all;
40764- atomic_long_t debug_sqbs_incomplete;
40765+ atomic_long_unchecked_t debug_tl_out_timer;
40766+ atomic_long_unchecked_t debug_stop_polling;
40767+ atomic_long_unchecked_t debug_eqbs_all;
40768+ atomic_long_unchecked_t debug_eqbs_incomplete;
40769+ atomic_long_unchecked_t debug_sqbs_all;
40770+ atomic_long_unchecked_t debug_sqbs_incomplete;
40771 };
40772
40773 extern struct qdio_perf_stats perf_stats;
40774 extern int qdio_performance_stats;
40775
40776-static inline void qdio_perf_stat_inc(atomic_long_t *count)
40777+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40778 {
40779 if (qdio_performance_stats)
40780- atomic_long_inc(count);
40781+ atomic_long_inc_unchecked(count);
40782 }
40783
40784 int qdio_setup_perf_stats(void);
40785diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40786index 1ddcf40..a85f062 100644
40787--- a/drivers/scsi/BusLogic.c
40788+++ b/drivers/scsi/BusLogic.c
40789@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40790 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40791 *PrototypeHostAdapter)
40792 {
40793+ pax_track_stack();
40794+
40795 /*
40796 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40797 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40798diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40799index cdbdec9..b7d560b 100644
40800--- a/drivers/scsi/aacraid/aacraid.h
40801+++ b/drivers/scsi/aacraid/aacraid.h
40802@@ -471,7 +471,7 @@ struct adapter_ops
40803 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40804 /* Administrative operations */
40805 int (*adapter_comm)(struct aac_dev * dev, int comm);
40806-};
40807+} __no_const;
40808
40809 /*
40810 * Define which interrupt handler needs to be installed
40811diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40812index a5b8e7b..a6a0e43 100644
40813--- a/drivers/scsi/aacraid/commctrl.c
40814+++ b/drivers/scsi/aacraid/commctrl.c
40815@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40816 u32 actual_fibsize64, actual_fibsize = 0;
40817 int i;
40818
40819+ pax_track_stack();
40820
40821 if (dev->in_reset) {
40822 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40823diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40824index 9b97c3e..f099725 100644
40825--- a/drivers/scsi/aacraid/linit.c
40826+++ b/drivers/scsi/aacraid/linit.c
40827@@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40828 #elif defined(__devinitconst)
40829 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40830 #else
40831-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40832+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40833 #endif
40834 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40835 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40836diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40837index 996f722..9127845 100644
40838--- a/drivers/scsi/aic94xx/aic94xx_init.c
40839+++ b/drivers/scsi/aic94xx/aic94xx_init.c
40840@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40841 flash_error_table[i].reason);
40842 }
40843
40844-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40845+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40846 asd_show_update_bios, asd_store_update_bios);
40847
40848 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40849@@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40850 .lldd_control_phy = asd_control_phy,
40851 };
40852
40853-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40854+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40855 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40856 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40857 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40858diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40859index 58efd4b..cb48dc7 100644
40860--- a/drivers/scsi/bfa/bfa_ioc.h
40861+++ b/drivers/scsi/bfa/bfa_ioc.h
40862@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40863 bfa_ioc_disable_cbfn_t disable_cbfn;
40864 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40865 bfa_ioc_reset_cbfn_t reset_cbfn;
40866-};
40867+} __no_const;
40868
40869 /**
40870 * Heartbeat failure notification queue element.
40871diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40872index 7ad177e..5503586 100644
40873--- a/drivers/scsi/bfa/bfa_iocfc.h
40874+++ b/drivers/scsi/bfa/bfa_iocfc.h
40875@@ -61,7 +61,7 @@ struct bfa_hwif_s {
40876 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40877 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40878 u32 *nvecs, u32 *maxvec);
40879-};
40880+} __no_const;
40881 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40882
40883 struct bfa_iocfc_s {
40884diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40885index 4967643..cbec06b 100644
40886--- a/drivers/scsi/dpt_i2o.c
40887+++ b/drivers/scsi/dpt_i2o.c
40888@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40889 dma_addr_t addr;
40890 ulong flags = 0;
40891
40892+ pax_track_stack();
40893+
40894 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40895 // get user msg size in u32s
40896 if(get_user(size, &user_msg[0])){
40897@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40898 s32 rcode;
40899 dma_addr_t addr;
40900
40901+ pax_track_stack();
40902+
40903 memset(msg, 0 , sizeof(msg));
40904 len = scsi_bufflen(cmd);
40905 direction = 0x00000000;
40906diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40907index c7076ce..e20c67c 100644
40908--- a/drivers/scsi/eata.c
40909+++ b/drivers/scsi/eata.c
40910@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40911 struct hostdata *ha;
40912 char name[16];
40913
40914+ pax_track_stack();
40915+
40916 sprintf(name, "%s%d", driver_name, j);
40917
40918 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40919diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40920index 11ae5c9..891daec 100644
40921--- a/drivers/scsi/fcoe/libfcoe.c
40922+++ b/drivers/scsi/fcoe/libfcoe.c
40923@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40924 size_t rlen;
40925 size_t dlen;
40926
40927+ pax_track_stack();
40928+
40929 fiph = (struct fip_header *)skb->data;
40930 sub = fiph->fip_subcode;
40931 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40932diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40933index 71c7bbe..e93088a 100644
40934--- a/drivers/scsi/fnic/fnic_main.c
40935+++ b/drivers/scsi/fnic/fnic_main.c
40936@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40937 /* Start local port initiatialization */
40938
40939 lp->link_up = 0;
40940- lp->tt = fnic_transport_template;
40941+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40942
40943 lp->max_retry_count = fnic->config.flogi_retries;
40944 lp->max_rport_retry_count = fnic->config.plogi_retries;
40945diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40946index bb96d74..9ec3ce4 100644
40947--- a/drivers/scsi/gdth.c
40948+++ b/drivers/scsi/gdth.c
40949@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40950 ulong flags;
40951 gdth_ha_str *ha;
40952
40953+ pax_track_stack();
40954+
40955 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40956 return -EFAULT;
40957 ha = gdth_find_ha(ldrv.ionode);
40958@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
40959 gdth_ha_str *ha;
40960 int rval;
40961
40962+ pax_track_stack();
40963+
40964 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
40965 res.number >= MAX_HDRIVES)
40966 return -EFAULT;
40967@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
40968 gdth_ha_str *ha;
40969 int rval;
40970
40971+ pax_track_stack();
40972+
40973 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
40974 return -EFAULT;
40975 ha = gdth_find_ha(gen.ionode);
40976@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
40977 int i;
40978 gdth_cmd_str gdtcmd;
40979 char cmnd[MAX_COMMAND_SIZE];
40980+
40981+ pax_track_stack();
40982+
40983 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
40984
40985 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
40986diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
40987index 1258da3..20d8ae6 100644
40988--- a/drivers/scsi/gdth_proc.c
40989+++ b/drivers/scsi/gdth_proc.c
40990@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
40991 ulong64 paddr;
40992
40993 char cmnd[MAX_COMMAND_SIZE];
40994+
40995+ pax_track_stack();
40996+
40997 memset(cmnd, 0xff, 12);
40998 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
40999
41000@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
41001 gdth_hget_str *phg;
41002 char cmnd[MAX_COMMAND_SIZE];
41003
41004+ pax_track_stack();
41005+
41006 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
41007 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
41008 if (!gdtcmd || !estr)
41009diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
41010index d03a926..f324286 100644
41011--- a/drivers/scsi/hosts.c
41012+++ b/drivers/scsi/hosts.c
41013@@ -40,7 +40,7 @@
41014 #include "scsi_logging.h"
41015
41016
41017-static atomic_t scsi_host_next_hn; /* host_no for next new host */
41018+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
41019
41020
41021 static void scsi_host_cls_release(struct device *dev)
41022@@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
41023 * subtract one because we increment first then return, but we need to
41024 * know what the next host number was before increment
41025 */
41026- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
41027+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
41028 shost->dma_channel = 0xff;
41029
41030 /* These three are default values which can be overridden */
41031diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
41032index a601159..55e19d2 100644
41033--- a/drivers/scsi/ipr.c
41034+++ b/drivers/scsi/ipr.c
41035@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
41036 return true;
41037 }
41038
41039-static struct ata_port_operations ipr_sata_ops = {
41040+static const struct ata_port_operations ipr_sata_ops = {
41041 .phy_reset = ipr_ata_phy_reset,
41042 .hardreset = ipr_sata_reset,
41043 .post_internal_cmd = ipr_ata_post_internal,
41044diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
41045index 4e49fbc..97907ff 100644
41046--- a/drivers/scsi/ips.h
41047+++ b/drivers/scsi/ips.h
41048@@ -1027,7 +1027,7 @@ typedef struct {
41049 int (*intr)(struct ips_ha *);
41050 void (*enableint)(struct ips_ha *);
41051 uint32_t (*statupd)(struct ips_ha *);
41052-} ips_hw_func_t;
41053+} __no_const ips_hw_func_t;
41054
41055 typedef struct ips_ha {
41056 uint8_t ha_id[IPS_MAX_CHANNELS+1];
41057diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
41058index c1c1574..a9c9348 100644
41059--- a/drivers/scsi/libfc/fc_exch.c
41060+++ b/drivers/scsi/libfc/fc_exch.c
41061@@ -86,12 +86,12 @@ struct fc_exch_mgr {
41062 * all together if not used XXX
41063 */
41064 struct {
41065- atomic_t no_free_exch;
41066- atomic_t no_free_exch_xid;
41067- atomic_t xid_not_found;
41068- atomic_t xid_busy;
41069- atomic_t seq_not_found;
41070- atomic_t non_bls_resp;
41071+ atomic_unchecked_t no_free_exch;
41072+ atomic_unchecked_t no_free_exch_xid;
41073+ atomic_unchecked_t xid_not_found;
41074+ atomic_unchecked_t xid_busy;
41075+ atomic_unchecked_t seq_not_found;
41076+ atomic_unchecked_t non_bls_resp;
41077 } stats;
41078 };
41079 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
41080@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
41081 /* allocate memory for exchange */
41082 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
41083 if (!ep) {
41084- atomic_inc(&mp->stats.no_free_exch);
41085+ atomic_inc_unchecked(&mp->stats.no_free_exch);
41086 goto out;
41087 }
41088 memset(ep, 0, sizeof(*ep));
41089@@ -557,7 +557,7 @@ out:
41090 return ep;
41091 err:
41092 spin_unlock_bh(&pool->lock);
41093- atomic_inc(&mp->stats.no_free_exch_xid);
41094+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
41095 mempool_free(ep, mp->ep_pool);
41096 return NULL;
41097 }
41098@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41099 xid = ntohs(fh->fh_ox_id); /* we originated exch */
41100 ep = fc_exch_find(mp, xid);
41101 if (!ep) {
41102- atomic_inc(&mp->stats.xid_not_found);
41103+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41104 reject = FC_RJT_OX_ID;
41105 goto out;
41106 }
41107@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41108 ep = fc_exch_find(mp, xid);
41109 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
41110 if (ep) {
41111- atomic_inc(&mp->stats.xid_busy);
41112+ atomic_inc_unchecked(&mp->stats.xid_busy);
41113 reject = FC_RJT_RX_ID;
41114 goto rel;
41115 }
41116@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41117 }
41118 xid = ep->xid; /* get our XID */
41119 } else if (!ep) {
41120- atomic_inc(&mp->stats.xid_not_found);
41121+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41122 reject = FC_RJT_RX_ID; /* XID not found */
41123 goto out;
41124 }
41125@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41126 } else {
41127 sp = &ep->seq;
41128 if (sp->id != fh->fh_seq_id) {
41129- atomic_inc(&mp->stats.seq_not_found);
41130+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41131 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
41132 goto rel;
41133 }
41134@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41135
41136 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41137 if (!ep) {
41138- atomic_inc(&mp->stats.xid_not_found);
41139+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41140 goto out;
41141 }
41142 if (ep->esb_stat & ESB_ST_COMPLETE) {
41143- atomic_inc(&mp->stats.xid_not_found);
41144+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41145 goto out;
41146 }
41147 if (ep->rxid == FC_XID_UNKNOWN)
41148 ep->rxid = ntohs(fh->fh_rx_id);
41149 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41150- atomic_inc(&mp->stats.xid_not_found);
41151+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41152 goto rel;
41153 }
41154 if (ep->did != ntoh24(fh->fh_s_id) &&
41155 ep->did != FC_FID_FLOGI) {
41156- atomic_inc(&mp->stats.xid_not_found);
41157+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41158 goto rel;
41159 }
41160 sof = fr_sof(fp);
41161@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41162 } else {
41163 sp = &ep->seq;
41164 if (sp->id != fh->fh_seq_id) {
41165- atomic_inc(&mp->stats.seq_not_found);
41166+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41167 goto rel;
41168 }
41169 }
41170@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41171 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41172
41173 if (!sp)
41174- atomic_inc(&mp->stats.xid_not_found);
41175+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41176 else
41177- atomic_inc(&mp->stats.non_bls_resp);
41178+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
41179
41180 fc_frame_free(fp);
41181 }
41182diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41183index 0ee989f..a582241 100644
41184--- a/drivers/scsi/libsas/sas_ata.c
41185+++ b/drivers/scsi/libsas/sas_ata.c
41186@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
41187 }
41188 }
41189
41190-static struct ata_port_operations sas_sata_ops = {
41191+static const struct ata_port_operations sas_sata_ops = {
41192 .phy_reset = sas_ata_phy_reset,
41193 .post_internal_cmd = sas_ata_post_internal,
41194 .qc_defer = ata_std_qc_defer,
41195diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41196index aa10f79..5cc79e4 100644
41197--- a/drivers/scsi/lpfc/lpfc.h
41198+++ b/drivers/scsi/lpfc/lpfc.h
41199@@ -400,7 +400,7 @@ struct lpfc_vport {
41200 struct dentry *debug_nodelist;
41201 struct dentry *vport_debugfs_root;
41202 struct lpfc_debugfs_trc *disc_trc;
41203- atomic_t disc_trc_cnt;
41204+ atomic_unchecked_t disc_trc_cnt;
41205 #endif
41206 uint8_t stat_data_enabled;
41207 uint8_t stat_data_blocked;
41208@@ -725,8 +725,8 @@ struct lpfc_hba {
41209 struct timer_list fabric_block_timer;
41210 unsigned long bit_flags;
41211 #define FABRIC_COMANDS_BLOCKED 0
41212- atomic_t num_rsrc_err;
41213- atomic_t num_cmd_success;
41214+ atomic_unchecked_t num_rsrc_err;
41215+ atomic_unchecked_t num_cmd_success;
41216 unsigned long last_rsrc_error_time;
41217 unsigned long last_ramp_down_time;
41218 unsigned long last_ramp_up_time;
41219@@ -740,7 +740,7 @@ struct lpfc_hba {
41220 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
41221 struct dentry *debug_slow_ring_trc;
41222 struct lpfc_debugfs_trc *slow_ring_trc;
41223- atomic_t slow_ring_trc_cnt;
41224+ atomic_unchecked_t slow_ring_trc_cnt;
41225 #endif
41226
41227 /* Used for deferred freeing of ELS data buffers */
41228diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41229index 8d0f0de..7c77a62 100644
41230--- a/drivers/scsi/lpfc/lpfc_debugfs.c
41231+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41232@@ -124,7 +124,7 @@ struct lpfc_debug {
41233 int len;
41234 };
41235
41236-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41237+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41238 static unsigned long lpfc_debugfs_start_time = 0L;
41239
41240 /**
41241@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41242 lpfc_debugfs_enable = 0;
41243
41244 len = 0;
41245- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41246+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41247 (lpfc_debugfs_max_disc_trc - 1);
41248 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41249 dtp = vport->disc_trc + i;
41250@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41251 lpfc_debugfs_enable = 0;
41252
41253 len = 0;
41254- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41255+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41256 (lpfc_debugfs_max_slow_ring_trc - 1);
41257 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41258 dtp = phba->slow_ring_trc + i;
41259@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
41260 uint32_t *ptr;
41261 char buffer[1024];
41262
41263+ pax_track_stack();
41264+
41265 off = 0;
41266 spin_lock_irq(&phba->hbalock);
41267
41268@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41269 !vport || !vport->disc_trc)
41270 return;
41271
41272- index = atomic_inc_return(&vport->disc_trc_cnt) &
41273+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41274 (lpfc_debugfs_max_disc_trc - 1);
41275 dtp = vport->disc_trc + index;
41276 dtp->fmt = fmt;
41277 dtp->data1 = data1;
41278 dtp->data2 = data2;
41279 dtp->data3 = data3;
41280- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41281+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41282 dtp->jif = jiffies;
41283 #endif
41284 return;
41285@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41286 !phba || !phba->slow_ring_trc)
41287 return;
41288
41289- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41290+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41291 (lpfc_debugfs_max_slow_ring_trc - 1);
41292 dtp = phba->slow_ring_trc + index;
41293 dtp->fmt = fmt;
41294 dtp->data1 = data1;
41295 dtp->data2 = data2;
41296 dtp->data3 = data3;
41297- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41298+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41299 dtp->jif = jiffies;
41300 #endif
41301 return;
41302@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41303 "slow_ring buffer\n");
41304 goto debug_failed;
41305 }
41306- atomic_set(&phba->slow_ring_trc_cnt, 0);
41307+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41308 memset(phba->slow_ring_trc, 0,
41309 (sizeof(struct lpfc_debugfs_trc) *
41310 lpfc_debugfs_max_slow_ring_trc));
41311@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41312 "buffer\n");
41313 goto debug_failed;
41314 }
41315- atomic_set(&vport->disc_trc_cnt, 0);
41316+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41317
41318 snprintf(name, sizeof(name), "discovery_trace");
41319 vport->debug_disc_trc =
41320diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41321index 549bc7d..8189dbb 100644
41322--- a/drivers/scsi/lpfc/lpfc_init.c
41323+++ b/drivers/scsi/lpfc/lpfc_init.c
41324@@ -8021,8 +8021,10 @@ lpfc_init(void)
41325 printk(LPFC_COPYRIGHT "\n");
41326
41327 if (lpfc_enable_npiv) {
41328- lpfc_transport_functions.vport_create = lpfc_vport_create;
41329- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41330+ pax_open_kernel();
41331+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41332+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41333+ pax_close_kernel();
41334 }
41335 lpfc_transport_template =
41336 fc_attach_transport(&lpfc_transport_functions);
41337diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41338index c88f59f..ff2a42f 100644
41339--- a/drivers/scsi/lpfc/lpfc_scsi.c
41340+++ b/drivers/scsi/lpfc/lpfc_scsi.c
41341@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41342 uint32_t evt_posted;
41343
41344 spin_lock_irqsave(&phba->hbalock, flags);
41345- atomic_inc(&phba->num_rsrc_err);
41346+ atomic_inc_unchecked(&phba->num_rsrc_err);
41347 phba->last_rsrc_error_time = jiffies;
41348
41349 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41350@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41351 unsigned long flags;
41352 struct lpfc_hba *phba = vport->phba;
41353 uint32_t evt_posted;
41354- atomic_inc(&phba->num_cmd_success);
41355+ atomic_inc_unchecked(&phba->num_cmd_success);
41356
41357 if (vport->cfg_lun_queue_depth <= queue_depth)
41358 return;
41359@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41360 int i;
41361 struct lpfc_rport_data *rdata;
41362
41363- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41364- num_cmd_success = atomic_read(&phba->num_cmd_success);
41365+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41366+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41367
41368 vports = lpfc_create_vport_work_array(phba);
41369 if (vports != NULL)
41370@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41371 }
41372 }
41373 lpfc_destroy_vport_work_array(phba, vports);
41374- atomic_set(&phba->num_rsrc_err, 0);
41375- atomic_set(&phba->num_cmd_success, 0);
41376+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41377+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41378 }
41379
41380 /**
41381@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41382 }
41383 }
41384 lpfc_destroy_vport_work_array(phba, vports);
41385- atomic_set(&phba->num_rsrc_err, 0);
41386- atomic_set(&phba->num_cmd_success, 0);
41387+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41388+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41389 }
41390
41391 /**
41392diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41393index 234f0b7..3020aea 100644
41394--- a/drivers/scsi/megaraid/megaraid_mbox.c
41395+++ b/drivers/scsi/megaraid/megaraid_mbox.c
41396@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41397 int rval;
41398 int i;
41399
41400+ pax_track_stack();
41401+
41402 // Allocate memory for the base list of scb for management module.
41403 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41404
41405diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41406index 7a117c1..ee01e9e 100644
41407--- a/drivers/scsi/osd/osd_initiator.c
41408+++ b/drivers/scsi/osd/osd_initiator.c
41409@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41410 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41411 int ret;
41412
41413+ pax_track_stack();
41414+
41415 or = osd_start_request(od, GFP_KERNEL);
41416 if (!or)
41417 return -ENOMEM;
41418diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41419index 9ab8c86..9425ad3 100644
41420--- a/drivers/scsi/pmcraid.c
41421+++ b/drivers/scsi/pmcraid.c
41422@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41423 res->scsi_dev = scsi_dev;
41424 scsi_dev->hostdata = res;
41425 res->change_detected = 0;
41426- atomic_set(&res->read_failures, 0);
41427- atomic_set(&res->write_failures, 0);
41428+ atomic_set_unchecked(&res->read_failures, 0);
41429+ atomic_set_unchecked(&res->write_failures, 0);
41430 rc = 0;
41431 }
41432 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41433@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41434
41435 /* If this was a SCSI read/write command keep count of errors */
41436 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41437- atomic_inc(&res->read_failures);
41438+ atomic_inc_unchecked(&res->read_failures);
41439 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41440- atomic_inc(&res->write_failures);
41441+ atomic_inc_unchecked(&res->write_failures);
41442
41443 if (!RES_IS_GSCSI(res->cfg_entry) &&
41444 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41445@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41446
41447 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41448 /* add resources only after host is added into system */
41449- if (!atomic_read(&pinstance->expose_resources))
41450+ if (!atomic_read_unchecked(&pinstance->expose_resources))
41451 return;
41452
41453 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
41454@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
41455 init_waitqueue_head(&pinstance->reset_wait_q);
41456
41457 atomic_set(&pinstance->outstanding_cmds, 0);
41458- atomic_set(&pinstance->expose_resources, 0);
41459+ atomic_set_unchecked(&pinstance->expose_resources, 0);
41460
41461 INIT_LIST_HEAD(&pinstance->free_res_q);
41462 INIT_LIST_HEAD(&pinstance->used_res_q);
41463@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
41464 /* Schedule worker thread to handle CCN and take care of adding and
41465 * removing devices to OS
41466 */
41467- atomic_set(&pinstance->expose_resources, 1);
41468+ atomic_set_unchecked(&pinstance->expose_resources, 1);
41469 schedule_work(&pinstance->worker_q);
41470 return rc;
41471
41472diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41473index 3441b3f..6cbe8f7 100644
41474--- a/drivers/scsi/pmcraid.h
41475+++ b/drivers/scsi/pmcraid.h
41476@@ -690,7 +690,7 @@ struct pmcraid_instance {
41477 atomic_t outstanding_cmds;
41478
41479 /* should add/delete resources to mid-layer now ?*/
41480- atomic_t expose_resources;
41481+ atomic_unchecked_t expose_resources;
41482
41483 /* Tasklet to handle deferred processing */
41484 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41485@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41486 struct list_head queue; /* link to "to be exposed" resources */
41487 struct pmcraid_config_table_entry cfg_entry;
41488 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41489- atomic_t read_failures; /* count of failed READ commands */
41490- atomic_t write_failures; /* count of failed WRITE commands */
41491+ atomic_unchecked_t read_failures; /* count of failed READ commands */
41492+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41493
41494 /* To indicate add/delete/modify during CCN */
41495 u8 change_detected;
41496diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41497index 2150618..7034215 100644
41498--- a/drivers/scsi/qla2xxx/qla_def.h
41499+++ b/drivers/scsi/qla2xxx/qla_def.h
41500@@ -2089,7 +2089,7 @@ struct isp_operations {
41501
41502 int (*get_flash_version) (struct scsi_qla_host *, void *);
41503 int (*start_scsi) (srb_t *);
41504-};
41505+} __no_const;
41506
41507 /* MSI-X Support *************************************************************/
41508
41509diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41510index 81b5f29..2ae1fad 100644
41511--- a/drivers/scsi/qla4xxx/ql4_def.h
41512+++ b/drivers/scsi/qla4xxx/ql4_def.h
41513@@ -240,7 +240,7 @@ struct ddb_entry {
41514 atomic_t retry_relogin_timer; /* Min Time between relogins
41515 * (4000 only) */
41516 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41517- atomic_t relogin_retry_count; /* Num of times relogin has been
41518+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41519 * retried */
41520
41521 uint16_t port;
41522diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41523index af8c323..515dd51 100644
41524--- a/drivers/scsi/qla4xxx/ql4_init.c
41525+++ b/drivers/scsi/qla4xxx/ql4_init.c
41526@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41527 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41528 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41529 atomic_set(&ddb_entry->relogin_timer, 0);
41530- atomic_set(&ddb_entry->relogin_retry_count, 0);
41531+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41532 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41533 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41534 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41535@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41536 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41537 atomic_set(&ddb_entry->port_down_timer,
41538 ha->port_down_retry_count);
41539- atomic_set(&ddb_entry->relogin_retry_count, 0);
41540+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41541 atomic_set(&ddb_entry->relogin_timer, 0);
41542 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41543 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41544diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41545index 83c8b5e..a82b348 100644
41546--- a/drivers/scsi/qla4xxx/ql4_os.c
41547+++ b/drivers/scsi/qla4xxx/ql4_os.c
41548@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41549 ddb_entry->fw_ddb_device_state ==
41550 DDB_DS_SESSION_FAILED) {
41551 /* Reset retry relogin timer */
41552- atomic_inc(&ddb_entry->relogin_retry_count);
41553+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41554 DEBUG2(printk("scsi%ld: index[%d] relogin"
41555 " timed out-retrying"
41556 " relogin (%d)\n",
41557 ha->host_no,
41558 ddb_entry->fw_ddb_index,
41559- atomic_read(&ddb_entry->
41560+ atomic_read_unchecked(&ddb_entry->
41561 relogin_retry_count))
41562 );
41563 start_dpc++;
41564diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41565index dd098ca..686ce01 100644
41566--- a/drivers/scsi/scsi.c
41567+++ b/drivers/scsi/scsi.c
41568@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41569 unsigned long timeout;
41570 int rtn = 0;
41571
41572- atomic_inc(&cmd->device->iorequest_cnt);
41573+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41574
41575 /* check if the device is still usable */
41576 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41577diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41578index bc3e363..e1a8e50 100644
41579--- a/drivers/scsi/scsi_debug.c
41580+++ b/drivers/scsi/scsi_debug.c
41581@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41582 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41583 unsigned char *cmd = (unsigned char *)scp->cmnd;
41584
41585+ pax_track_stack();
41586+
41587 if ((errsts = check_readiness(scp, 1, devip)))
41588 return errsts;
41589 memset(arr, 0, sizeof(arr));
41590@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41591 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41592 unsigned char *cmd = (unsigned char *)scp->cmnd;
41593
41594+ pax_track_stack();
41595+
41596 if ((errsts = check_readiness(scp, 1, devip)))
41597 return errsts;
41598 memset(arr, 0, sizeof(arr));
41599diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41600index 8df12522..c4c1472 100644
41601--- a/drivers/scsi/scsi_lib.c
41602+++ b/drivers/scsi/scsi_lib.c
41603@@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41604 shost = sdev->host;
41605 scsi_init_cmd_errh(cmd);
41606 cmd->result = DID_NO_CONNECT << 16;
41607- atomic_inc(&cmd->device->iorequest_cnt);
41608+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41609
41610 /*
41611 * SCSI request completion path will do scsi_device_unbusy(),
41612@@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
41613 */
41614 cmd->serial_number = 0;
41615
41616- atomic_inc(&cmd->device->iodone_cnt);
41617+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
41618 if (cmd->result)
41619- atomic_inc(&cmd->device->ioerr_cnt);
41620+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41621
41622 disposition = scsi_decide_disposition(cmd);
41623 if (disposition != SUCCESS &&
41624diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41625index 91a93e0..eae0fe3 100644
41626--- a/drivers/scsi/scsi_sysfs.c
41627+++ b/drivers/scsi/scsi_sysfs.c
41628@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41629 char *buf) \
41630 { \
41631 struct scsi_device *sdev = to_scsi_device(dev); \
41632- unsigned long long count = atomic_read(&sdev->field); \
41633+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
41634 return snprintf(buf, 20, "0x%llx\n", count); \
41635 } \
41636 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41637diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41638index 1030327..f91fd30 100644
41639--- a/drivers/scsi/scsi_tgt_lib.c
41640+++ b/drivers/scsi/scsi_tgt_lib.c
41641@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41642 int err;
41643
41644 dprintk("%lx %u\n", uaddr, len);
41645- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41646+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41647 if (err) {
41648 /*
41649 * TODO: need to fixup sg_tablesize, max_segment_size,
41650diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41651index db02e31..1b42ea9 100644
41652--- a/drivers/scsi/scsi_transport_fc.c
41653+++ b/drivers/scsi/scsi_transport_fc.c
41654@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41655 * Netlink Infrastructure
41656 */
41657
41658-static atomic_t fc_event_seq;
41659+static atomic_unchecked_t fc_event_seq;
41660
41661 /**
41662 * fc_get_event_number - Obtain the next sequential FC event number
41663@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41664 u32
41665 fc_get_event_number(void)
41666 {
41667- return atomic_add_return(1, &fc_event_seq);
41668+ return atomic_add_return_unchecked(1, &fc_event_seq);
41669 }
41670 EXPORT_SYMBOL(fc_get_event_number);
41671
41672@@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41673 {
41674 int error;
41675
41676- atomic_set(&fc_event_seq, 0);
41677+ atomic_set_unchecked(&fc_event_seq, 0);
41678
41679 error = transport_class_register(&fc_host_class);
41680 if (error)
41681diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41682index de2f8c4..63c5278 100644
41683--- a/drivers/scsi/scsi_transport_iscsi.c
41684+++ b/drivers/scsi/scsi_transport_iscsi.c
41685@@ -81,7 +81,7 @@ struct iscsi_internal {
41686 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41687 };
41688
41689-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41690+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41691 static struct workqueue_struct *iscsi_eh_timer_workq;
41692
41693 /*
41694@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41695 int err;
41696
41697 ihost = shost->shost_data;
41698- session->sid = atomic_add_return(1, &iscsi_session_nr);
41699+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41700
41701 if (id == ISCSI_MAX_TARGET) {
41702 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41703@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41704 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41705 ISCSI_TRANSPORT_VERSION);
41706
41707- atomic_set(&iscsi_session_nr, 0);
41708+ atomic_set_unchecked(&iscsi_session_nr, 0);
41709
41710 err = class_register(&iscsi_transport_class);
41711 if (err)
41712diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41713index 21a045e..ec89e03 100644
41714--- a/drivers/scsi/scsi_transport_srp.c
41715+++ b/drivers/scsi/scsi_transport_srp.c
41716@@ -33,7 +33,7 @@
41717 #include "scsi_transport_srp_internal.h"
41718
41719 struct srp_host_attrs {
41720- atomic_t next_port_id;
41721+ atomic_unchecked_t next_port_id;
41722 };
41723 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41724
41725@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41726 struct Scsi_Host *shost = dev_to_shost(dev);
41727 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41728
41729- atomic_set(&srp_host->next_port_id, 0);
41730+ atomic_set_unchecked(&srp_host->next_port_id, 0);
41731 return 0;
41732 }
41733
41734@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41735 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41736 rport->roles = ids->roles;
41737
41738- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41739+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41740 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41741
41742 transport_setup_device(&rport->dev);
41743diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41744index 040f751..98a5ed2 100644
41745--- a/drivers/scsi/sg.c
41746+++ b/drivers/scsi/sg.c
41747@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41748 sdp->disk->disk_name,
41749 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41750 NULL,
41751- (char *)arg);
41752+ (char __user *)arg);
41753 case BLKTRACESTART:
41754 return blk_trace_startstop(sdp->device->request_queue, 1);
41755 case BLKTRACESTOP:
41756@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41757 const struct file_operations * fops;
41758 };
41759
41760-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41761+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41762 {"allow_dio", &adio_fops},
41763 {"debug", &debug_fops},
41764 {"def_reserved_size", &dressz_fops},
41765@@ -2307,7 +2307,7 @@ sg_proc_init(void)
41766 {
41767 int k, mask;
41768 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41769- struct sg_proc_leaf * leaf;
41770+ const struct sg_proc_leaf * leaf;
41771
41772 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41773 if (!sg_proc_sgp)
41774diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41775index c19ca5e..3eb5959 100644
41776--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41777+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41778@@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41779 int do_iounmap = 0;
41780 int do_disable_device = 1;
41781
41782+ pax_track_stack();
41783+
41784 memset(&sym_dev, 0, sizeof(sym_dev));
41785 memset(&nvram, 0, sizeof(nvram));
41786 sym_dev.pdev = pdev;
41787diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41788index eadc1ab..2d81457 100644
41789--- a/drivers/serial/kgdboc.c
41790+++ b/drivers/serial/kgdboc.c
41791@@ -18,7 +18,7 @@
41792
41793 #define MAX_CONFIG_LEN 40
41794
41795-static struct kgdb_io kgdboc_io_ops;
41796+static const struct kgdb_io kgdboc_io_ops;
41797
41798 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41799 static int configured = -1;
41800@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41801 module_put(THIS_MODULE);
41802 }
41803
41804-static struct kgdb_io kgdboc_io_ops = {
41805+static const struct kgdb_io kgdboc_io_ops = {
41806 .name = "kgdboc",
41807 .read_char = kgdboc_get_char,
41808 .write_char = kgdboc_put_char,
41809diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41810index b76f246..7f41af7 100644
41811--- a/drivers/spi/spi.c
41812+++ b/drivers/spi/spi.c
41813@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41814 EXPORT_SYMBOL_GPL(spi_sync);
41815
41816 /* portable code must never pass more than 32 bytes */
41817-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41818+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41819
41820 static u8 *buf;
41821
41822diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41823index b9b37ff..19dfa23 100644
41824--- a/drivers/staging/android/binder.c
41825+++ b/drivers/staging/android/binder.c
41826@@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41827 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41828 }
41829
41830-static struct vm_operations_struct binder_vm_ops = {
41831+static const struct vm_operations_struct binder_vm_ops = {
41832 .open = binder_vma_open,
41833 .close = binder_vma_close,
41834 };
41835diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41836index cda26bb..39fed3f 100644
41837--- a/drivers/staging/b3dfg/b3dfg.c
41838+++ b/drivers/staging/b3dfg/b3dfg.c
41839@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41840 return VM_FAULT_NOPAGE;
41841 }
41842
41843-static struct vm_operations_struct b3dfg_vm_ops = {
41844+static const struct vm_operations_struct b3dfg_vm_ops = {
41845 .fault = b3dfg_vma_fault,
41846 };
41847
41848@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41849 return r;
41850 }
41851
41852-static struct file_operations b3dfg_fops = {
41853+static const struct file_operations b3dfg_fops = {
41854 .owner = THIS_MODULE,
41855 .open = b3dfg_open,
41856 .release = b3dfg_release,
41857diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41858index 908f25a..c9a579b 100644
41859--- a/drivers/staging/comedi/comedi_fops.c
41860+++ b/drivers/staging/comedi/comedi_fops.c
41861@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41862 mutex_unlock(&dev->mutex);
41863 }
41864
41865-static struct vm_operations_struct comedi_vm_ops = {
41866+static const struct vm_operations_struct comedi_vm_ops = {
41867 .close = comedi_unmap,
41868 };
41869
41870diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41871index e55a0db..577b776 100644
41872--- a/drivers/staging/dream/qdsp5/adsp_driver.c
41873+++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41874@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41875 static dev_t adsp_devno;
41876 static struct class *adsp_class;
41877
41878-static struct file_operations adsp_fops = {
41879+static const struct file_operations adsp_fops = {
41880 .owner = THIS_MODULE,
41881 .open = adsp_open,
41882 .unlocked_ioctl = adsp_ioctl,
41883diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41884index ad2390f..4116ee8 100644
41885--- a/drivers/staging/dream/qdsp5/audio_aac.c
41886+++ b/drivers/staging/dream/qdsp5/audio_aac.c
41887@@ -1022,7 +1022,7 @@ done:
41888 return rc;
41889 }
41890
41891-static struct file_operations audio_aac_fops = {
41892+static const struct file_operations audio_aac_fops = {
41893 .owner = THIS_MODULE,
41894 .open = audio_open,
41895 .release = audio_release,
41896diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41897index cd818a5..870b37b 100644
41898--- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41899+++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41900@@ -833,7 +833,7 @@ done:
41901 return rc;
41902 }
41903
41904-static struct file_operations audio_amrnb_fops = {
41905+static const struct file_operations audio_amrnb_fops = {
41906 .owner = THIS_MODULE,
41907 .open = audamrnb_open,
41908 .release = audamrnb_release,
41909diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41910index 4b43e18..cedafda 100644
41911--- a/drivers/staging/dream/qdsp5/audio_evrc.c
41912+++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41913@@ -805,7 +805,7 @@ dma_fail:
41914 return rc;
41915 }
41916
41917-static struct file_operations audio_evrc_fops = {
41918+static const struct file_operations audio_evrc_fops = {
41919 .owner = THIS_MODULE,
41920 .open = audevrc_open,
41921 .release = audevrc_release,
41922diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41923index 3d950a2..9431118 100644
41924--- a/drivers/staging/dream/qdsp5/audio_in.c
41925+++ b/drivers/staging/dream/qdsp5/audio_in.c
41926@@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41927 return 0;
41928 }
41929
41930-static struct file_operations audio_fops = {
41931+static const struct file_operations audio_fops = {
41932 .owner = THIS_MODULE,
41933 .open = audio_in_open,
41934 .release = audio_in_release,
41935@@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41936 .unlocked_ioctl = audio_in_ioctl,
41937 };
41938
41939-static struct file_operations audpre_fops = {
41940+static const struct file_operations audpre_fops = {
41941 .owner = THIS_MODULE,
41942 .open = audpre_open,
41943 .unlocked_ioctl = audpre_ioctl,
41944diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41945index b95574f..286c2f4 100644
41946--- a/drivers/staging/dream/qdsp5/audio_mp3.c
41947+++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41948@@ -941,7 +941,7 @@ done:
41949 return rc;
41950 }
41951
41952-static struct file_operations audio_mp3_fops = {
41953+static const struct file_operations audio_mp3_fops = {
41954 .owner = THIS_MODULE,
41955 .open = audio_open,
41956 .release = audio_release,
41957diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
41958index d1adcf6..f8f9833 100644
41959--- a/drivers/staging/dream/qdsp5/audio_out.c
41960+++ b/drivers/staging/dream/qdsp5/audio_out.c
41961@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
41962 return 0;
41963 }
41964
41965-static struct file_operations audio_fops = {
41966+static const struct file_operations audio_fops = {
41967 .owner = THIS_MODULE,
41968 .open = audio_open,
41969 .release = audio_release,
41970@@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
41971 .unlocked_ioctl = audio_ioctl,
41972 };
41973
41974-static struct file_operations audpp_fops = {
41975+static const struct file_operations audpp_fops = {
41976 .owner = THIS_MODULE,
41977 .open = audpp_open,
41978 .unlocked_ioctl = audpp_ioctl,
41979diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
41980index f0f50e3..f6b9dbc 100644
41981--- a/drivers/staging/dream/qdsp5/audio_qcelp.c
41982+++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
41983@@ -816,7 +816,7 @@ err:
41984 return rc;
41985 }
41986
41987-static struct file_operations audio_qcelp_fops = {
41988+static const struct file_operations audio_qcelp_fops = {
41989 .owner = THIS_MODULE,
41990 .open = audqcelp_open,
41991 .release = audqcelp_release,
41992diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
41993index 037d7ff..5469ec3 100644
41994--- a/drivers/staging/dream/qdsp5/snd.c
41995+++ b/drivers/staging/dream/qdsp5/snd.c
41996@@ -242,7 +242,7 @@ err:
41997 return rc;
41998 }
41999
42000-static struct file_operations snd_fops = {
42001+static const struct file_operations snd_fops = {
42002 .owner = THIS_MODULE,
42003 .open = snd_open,
42004 .release = snd_release,
42005diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
42006index d4e7d88..0ea632a 100644
42007--- a/drivers/staging/dream/smd/smd_qmi.c
42008+++ b/drivers/staging/dream/smd/smd_qmi.c
42009@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
42010 return 0;
42011 }
42012
42013-static struct file_operations qmi_fops = {
42014+static const struct file_operations qmi_fops = {
42015 .owner = THIS_MODULE,
42016 .read = qmi_read,
42017 .write = qmi_write,
42018diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42019index cd3910b..ff053d3 100644
42020--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
42021+++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42022@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
42023 return rc;
42024 }
42025
42026-static struct file_operations rpcrouter_server_fops = {
42027+static const struct file_operations rpcrouter_server_fops = {
42028 .owner = THIS_MODULE,
42029 .open = rpcrouter_open,
42030 .release = rpcrouter_release,
42031@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
42032 .unlocked_ioctl = rpcrouter_ioctl,
42033 };
42034
42035-static struct file_operations rpcrouter_router_fops = {
42036+static const struct file_operations rpcrouter_router_fops = {
42037 .owner = THIS_MODULE,
42038 .open = rpcrouter_open,
42039 .release = rpcrouter_release,
42040diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
42041index c24e4e0..07665be 100644
42042--- a/drivers/staging/dst/dcore.c
42043+++ b/drivers/staging/dst/dcore.c
42044@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
42045 return 0;
42046 }
42047
42048-static struct block_device_operations dst_blk_ops = {
42049+static const struct block_device_operations dst_blk_ops = {
42050 .open = dst_bdev_open,
42051 .release = dst_bdev_release,
42052 .owner = THIS_MODULE,
42053@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
42054 n->size = ctl->size;
42055
42056 atomic_set(&n->refcnt, 1);
42057- atomic_long_set(&n->gen, 0);
42058+ atomic_long_set_unchecked(&n->gen, 0);
42059 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
42060
42061 err = dst_node_sysfs_init(n);
42062diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
42063index 557d372..8d84422 100644
42064--- a/drivers/staging/dst/trans.c
42065+++ b/drivers/staging/dst/trans.c
42066@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
42067 t->error = 0;
42068 t->retries = 0;
42069 atomic_set(&t->refcnt, 1);
42070- t->gen = atomic_long_inc_return(&n->gen);
42071+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
42072
42073 t->enc = bio_data_dir(bio);
42074 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
42075diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
42076index 94f7752..d051514 100644
42077--- a/drivers/staging/et131x/et1310_tx.c
42078+++ b/drivers/staging/et131x/et1310_tx.c
42079@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
42080 struct net_device_stats *stats = &etdev->net_stats;
42081
42082 if (pMpTcb->Flags & fMP_DEST_BROAD)
42083- atomic_inc(&etdev->Stats.brdcstxmt);
42084+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
42085 else if (pMpTcb->Flags & fMP_DEST_MULTI)
42086- atomic_inc(&etdev->Stats.multixmt);
42087+ atomic_inc_unchecked(&etdev->Stats.multixmt);
42088 else
42089- atomic_inc(&etdev->Stats.unixmt);
42090+ atomic_inc_unchecked(&etdev->Stats.unixmt);
42091
42092 if (pMpTcb->Packet) {
42093 stats->tx_bytes += pMpTcb->Packet->len;
42094diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
42095index 1dfe06f..f469b4d 100644
42096--- a/drivers/staging/et131x/et131x_adapter.h
42097+++ b/drivers/staging/et131x/et131x_adapter.h
42098@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
42099 * operations
42100 */
42101 u32 unircv; /* # multicast packets received */
42102- atomic_t unixmt; /* # multicast packets for Tx */
42103+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
42104 u32 multircv; /* # multicast packets received */
42105- atomic_t multixmt; /* # multicast packets for Tx */
42106+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
42107 u32 brdcstrcv; /* # broadcast packets received */
42108- atomic_t brdcstxmt; /* # broadcast packets for Tx */
42109+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
42110 u32 norcvbuf; /* # Rx packets discarded */
42111 u32 noxmtbuf; /* # Tx packets discarded */
42112
42113diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
42114index 4bd353a..e28f455 100644
42115--- a/drivers/staging/go7007/go7007-v4l2.c
42116+++ b/drivers/staging/go7007/go7007-v4l2.c
42117@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42118 return 0;
42119 }
42120
42121-static struct vm_operations_struct go7007_vm_ops = {
42122+static const struct vm_operations_struct go7007_vm_ops = {
42123 .open = go7007_vm_open,
42124 .close = go7007_vm_close,
42125 .fault = go7007_vm_fault,
42126diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
42127index 366dc95..b974d87 100644
42128--- a/drivers/staging/hv/Channel.c
42129+++ b/drivers/staging/hv/Channel.c
42130@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
42131
42132 DPRINT_ENTER(VMBUS);
42133
42134- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
42135- atomic_inc(&gVmbusConnection.NextGpadlHandle);
42136+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
42137+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
42138
42139 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
42140 ASSERT(msgInfo != NULL);
42141diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
42142index b12237f..01ae28a 100644
42143--- a/drivers/staging/hv/Hv.c
42144+++ b/drivers/staging/hv/Hv.c
42145@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
42146 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
42147 u32 outputAddressHi = outputAddress >> 32;
42148 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
42149- volatile void *hypercallPage = gHvContext.HypercallPage;
42150+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
42151
42152 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
42153 Control, Input, Output);
42154diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
42155index d089bb1..2ebc158 100644
42156--- a/drivers/staging/hv/VmbusApi.h
42157+++ b/drivers/staging/hv/VmbusApi.h
42158@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
42159 u32 *GpadlHandle);
42160 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
42161 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
42162-};
42163+} __no_const;
42164
42165 /* Base driver object */
42166 struct hv_driver {
42167diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
42168index 5a37cce..6ecc88c 100644
42169--- a/drivers/staging/hv/VmbusPrivate.h
42170+++ b/drivers/staging/hv/VmbusPrivate.h
42171@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
42172 struct VMBUS_CONNECTION {
42173 enum VMBUS_CONNECT_STATE ConnectState;
42174
42175- atomic_t NextGpadlHandle;
42176+ atomic_unchecked_t NextGpadlHandle;
42177
42178 /*
42179 * Represents channel interrupts. Each bit position represents a
42180diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
42181index 871a202..ca50ddf 100644
42182--- a/drivers/staging/hv/blkvsc_drv.c
42183+++ b/drivers/staging/hv/blkvsc_drv.c
42184@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
42185 /* The one and only one */
42186 static struct blkvsc_driver_context g_blkvsc_drv;
42187
42188-static struct block_device_operations block_ops = {
42189+static const struct block_device_operations block_ops = {
42190 .owner = THIS_MODULE,
42191 .open = blkvsc_open,
42192 .release = blkvsc_release,
42193diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
42194index 6acc49a..fbc8d46 100644
42195--- a/drivers/staging/hv/vmbus_drv.c
42196+++ b/drivers/staging/hv/vmbus_drv.c
42197@@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42198 to_device_context(root_device_obj);
42199 struct device_context *child_device_ctx =
42200 to_device_context(child_device_obj);
42201- static atomic_t device_num = ATOMIC_INIT(0);
42202+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42203
42204 DPRINT_ENTER(VMBUS_DRV);
42205
42206@@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42207
42208 /* Set the device name. Otherwise, device_register() will fail. */
42209 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
42210- atomic_inc_return(&device_num));
42211+ atomic_inc_return_unchecked(&device_num));
42212
42213 /* The new device belongs to this bus */
42214 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
42215diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
42216index d926189..17b19fd 100644
42217--- a/drivers/staging/iio/ring_generic.h
42218+++ b/drivers/staging/iio/ring_generic.h
42219@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
42220
42221 int (*is_enabled)(struct iio_ring_buffer *ring);
42222 int (*enable)(struct iio_ring_buffer *ring);
42223-};
42224+} __no_const;
42225
42226 /**
42227 * struct iio_ring_buffer - general ring buffer structure
42228diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42229index 1b237b7..88c624e 100644
42230--- a/drivers/staging/octeon/ethernet-rx.c
42231+++ b/drivers/staging/octeon/ethernet-rx.c
42232@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42233 /* Increment RX stats for virtual ports */
42234 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42235 #ifdef CONFIG_64BIT
42236- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42237- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42238+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42239+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42240 #else
42241- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42242- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42243+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42244+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42245 #endif
42246 }
42247 netif_receive_skb(skb);
42248@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42249 dev->name);
42250 */
42251 #ifdef CONFIG_64BIT
42252- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42253+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
42254 #else
42255- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42256+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
42257 #endif
42258 dev_kfree_skb_irq(skb);
42259 }
42260diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42261index 492c502..d9909f1 100644
42262--- a/drivers/staging/octeon/ethernet.c
42263+++ b/drivers/staging/octeon/ethernet.c
42264@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42265 * since the RX tasklet also increments it.
42266 */
42267 #ifdef CONFIG_64BIT
42268- atomic64_add(rx_status.dropped_packets,
42269- (atomic64_t *)&priv->stats.rx_dropped);
42270+ atomic64_add_unchecked(rx_status.dropped_packets,
42271+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42272 #else
42273- atomic_add(rx_status.dropped_packets,
42274- (atomic_t *)&priv->stats.rx_dropped);
42275+ atomic_add_unchecked(rx_status.dropped_packets,
42276+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
42277 #endif
42278 }
42279
42280diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
42281index a35bd5d..28fff45 100644
42282--- a/drivers/staging/otus/80211core/pub_zfi.h
42283+++ b/drivers/staging/otus/80211core/pub_zfi.h
42284@@ -531,7 +531,7 @@ struct zsCbFuncTbl
42285 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
42286
42287 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
42288-};
42289+} __no_const;
42290
42291 extern void zfZeroMemory(u8_t* va, u16_t length);
42292 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
42293diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
42294index c39a25f..696f5aa 100644
42295--- a/drivers/staging/panel/panel.c
42296+++ b/drivers/staging/panel/panel.c
42297@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
42298 return 0;
42299 }
42300
42301-static struct file_operations lcd_fops = {
42302+static const struct file_operations lcd_fops = {
42303 .write = lcd_write,
42304 .open = lcd_open,
42305 .release = lcd_release,
42306@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
42307 return 0;
42308 }
42309
42310-static struct file_operations keypad_fops = {
42311+static const struct file_operations keypad_fops = {
42312 .read = keypad_read, /* read */
42313 .open = keypad_open, /* open */
42314 .release = keypad_release, /* close */
42315diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
42316index 270ebcb..37e46af 100644
42317--- a/drivers/staging/phison/phison.c
42318+++ b/drivers/staging/phison/phison.c
42319@@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
42320 ATA_BMDMA_SHT(DRV_NAME),
42321 };
42322
42323-static struct ata_port_operations phison_ops = {
42324+static const struct ata_port_operations phison_ops = {
42325 .inherits = &ata_bmdma_port_ops,
42326 .prereset = phison_pre_reset,
42327 };
42328diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
42329index 2eb8e3d..57616a7 100644
42330--- a/drivers/staging/poch/poch.c
42331+++ b/drivers/staging/poch/poch.c
42332@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
42333 return 0;
42334 }
42335
42336-static struct file_operations poch_fops = {
42337+static const struct file_operations poch_fops = {
42338 .owner = THIS_MODULE,
42339 .open = poch_open,
42340 .release = poch_release,
42341diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
42342index c94de31..19402bc 100644
42343--- a/drivers/staging/pohmelfs/inode.c
42344+++ b/drivers/staging/pohmelfs/inode.c
42345@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42346 mutex_init(&psb->mcache_lock);
42347 psb->mcache_root = RB_ROOT;
42348 psb->mcache_timeout = msecs_to_jiffies(5000);
42349- atomic_long_set(&psb->mcache_gen, 0);
42350+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
42351
42352 psb->trans_max_pages = 100;
42353
42354@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42355 INIT_LIST_HEAD(&psb->crypto_ready_list);
42356 INIT_LIST_HEAD(&psb->crypto_active_list);
42357
42358- atomic_set(&psb->trans_gen, 1);
42359+ atomic_set_unchecked(&psb->trans_gen, 1);
42360 atomic_long_set(&psb->total_inodes, 0);
42361
42362 mutex_init(&psb->state_lock);
42363diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
42364index e22665c..a2a9390 100644
42365--- a/drivers/staging/pohmelfs/mcache.c
42366+++ b/drivers/staging/pohmelfs/mcache.c
42367@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
42368 m->data = data;
42369 m->start = start;
42370 m->size = size;
42371- m->gen = atomic_long_inc_return(&psb->mcache_gen);
42372+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
42373
42374 mutex_lock(&psb->mcache_lock);
42375 err = pohmelfs_mcache_insert(psb, m);
42376diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
42377index 623a07d..4035c19 100644
42378--- a/drivers/staging/pohmelfs/netfs.h
42379+++ b/drivers/staging/pohmelfs/netfs.h
42380@@ -570,14 +570,14 @@ struct pohmelfs_config;
42381 struct pohmelfs_sb {
42382 struct rb_root mcache_root;
42383 struct mutex mcache_lock;
42384- atomic_long_t mcache_gen;
42385+ atomic_long_unchecked_t mcache_gen;
42386 unsigned long mcache_timeout;
42387
42388 unsigned int idx;
42389
42390 unsigned int trans_retries;
42391
42392- atomic_t trans_gen;
42393+ atomic_unchecked_t trans_gen;
42394
42395 unsigned int crypto_attached_size;
42396 unsigned int crypto_align_size;
42397diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42398index 36a2535..0591bf4 100644
42399--- a/drivers/staging/pohmelfs/trans.c
42400+++ b/drivers/staging/pohmelfs/trans.c
42401@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42402 int err;
42403 struct netfs_cmd *cmd = t->iovec.iov_base;
42404
42405- t->gen = atomic_inc_return(&psb->trans_gen);
42406+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42407
42408 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42409 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42410diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42411index f890a16..509ece8 100644
42412--- a/drivers/staging/sep/sep_driver.c
42413+++ b/drivers/staging/sep/sep_driver.c
42414@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42415 static dev_t sep_devno;
42416
42417 /* the files operations structure of the driver */
42418-static struct file_operations sep_file_operations = {
42419+static const struct file_operations sep_file_operations = {
42420 .owner = THIS_MODULE,
42421 .ioctl = sep_ioctl,
42422 .poll = sep_poll,
42423diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42424index 5e16bc3..7655b10 100644
42425--- a/drivers/staging/usbip/usbip_common.h
42426+++ b/drivers/staging/usbip/usbip_common.h
42427@@ -374,7 +374,7 @@ struct usbip_device {
42428 void (*shutdown)(struct usbip_device *);
42429 void (*reset)(struct usbip_device *);
42430 void (*unusable)(struct usbip_device *);
42431- } eh_ops;
42432+ } __no_const eh_ops;
42433 };
42434
42435
42436diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42437index 57f7946..d9df23d 100644
42438--- a/drivers/staging/usbip/vhci.h
42439+++ b/drivers/staging/usbip/vhci.h
42440@@ -92,7 +92,7 @@ struct vhci_hcd {
42441 unsigned resuming:1;
42442 unsigned long re_timeout;
42443
42444- atomic_t seqnum;
42445+ atomic_unchecked_t seqnum;
42446
42447 /*
42448 * NOTE:
42449diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42450index 20cd7db..c2693ff 100644
42451--- a/drivers/staging/usbip/vhci_hcd.c
42452+++ b/drivers/staging/usbip/vhci_hcd.c
42453@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
42454 return;
42455 }
42456
42457- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42458+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42459 if (priv->seqnum == 0xffff)
42460 usbip_uinfo("seqnum max\n");
42461
42462@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42463 return -ENOMEM;
42464 }
42465
42466- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42467+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42468 if (unlink->seqnum == 0xffff)
42469 usbip_uinfo("seqnum max\n");
42470
42471@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
42472 vdev->rhport = rhport;
42473 }
42474
42475- atomic_set(&vhci->seqnum, 0);
42476+ atomic_set_unchecked(&vhci->seqnum, 0);
42477 spin_lock_init(&vhci->lock);
42478
42479
42480diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42481index 7fd76fe..673695a 100644
42482--- a/drivers/staging/usbip/vhci_rx.c
42483+++ b/drivers/staging/usbip/vhci_rx.c
42484@@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42485 usbip_uerr("cannot find a urb of seqnum %u\n",
42486 pdu->base.seqnum);
42487 usbip_uinfo("max seqnum %d\n",
42488- atomic_read(&the_controller->seqnum));
42489+ atomic_read_unchecked(&the_controller->seqnum));
42490 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42491 return;
42492 }
42493diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42494index 7891288..8e31300 100644
42495--- a/drivers/staging/vme/devices/vme_user.c
42496+++ b/drivers/staging/vme/devices/vme_user.c
42497@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42498 static int __init vme_user_probe(struct device *, int, int);
42499 static int __exit vme_user_remove(struct device *, int, int);
42500
42501-static struct file_operations vme_user_fops = {
42502+static const struct file_operations vme_user_fops = {
42503 .open = vme_user_open,
42504 .release = vme_user_release,
42505 .read = vme_user_read,
42506diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42507index 58abf44..00c1fc8 100644
42508--- a/drivers/staging/vt6655/hostap.c
42509+++ b/drivers/staging/vt6655/hostap.c
42510@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42511 PSDevice apdev_priv;
42512 struct net_device *dev = pDevice->dev;
42513 int ret;
42514- const struct net_device_ops apdev_netdev_ops = {
42515+ net_device_ops_no_const apdev_netdev_ops = {
42516 .ndo_start_xmit = pDevice->tx_80211,
42517 };
42518
42519diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42520index 0c8267a..db1f363 100644
42521--- a/drivers/staging/vt6656/hostap.c
42522+++ b/drivers/staging/vt6656/hostap.c
42523@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42524 PSDevice apdev_priv;
42525 struct net_device *dev = pDevice->dev;
42526 int ret;
42527- const struct net_device_ops apdev_netdev_ops = {
42528+ net_device_ops_no_const apdev_netdev_ops = {
42529 .ndo_start_xmit = pDevice->tx_80211,
42530 };
42531
42532diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42533index 925678b..da7f5ed 100644
42534--- a/drivers/staging/wlan-ng/hfa384x_usb.c
42535+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42536@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42537
42538 struct usbctlx_completor {
42539 int (*complete) (struct usbctlx_completor *);
42540-};
42541+} __no_const;
42542 typedef struct usbctlx_completor usbctlx_completor_t;
42543
42544 static int
42545diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42546index 40de151..924f268 100644
42547--- a/drivers/telephony/ixj.c
42548+++ b/drivers/telephony/ixj.c
42549@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42550 bool mContinue;
42551 char *pIn, *pOut;
42552
42553+ pax_track_stack();
42554+
42555 if (!SCI_Prepare(j))
42556 return 0;
42557
42558diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42559index e941367..b631f5a 100644
42560--- a/drivers/uio/uio.c
42561+++ b/drivers/uio/uio.c
42562@@ -23,6 +23,7 @@
42563 #include <linux/string.h>
42564 #include <linux/kobject.h>
42565 #include <linux/uio_driver.h>
42566+#include <asm/local.h>
42567
42568 #define UIO_MAX_DEVICES 255
42569
42570@@ -30,10 +31,10 @@ struct uio_device {
42571 struct module *owner;
42572 struct device *dev;
42573 int minor;
42574- atomic_t event;
42575+ atomic_unchecked_t event;
42576 struct fasync_struct *async_queue;
42577 wait_queue_head_t wait;
42578- int vma_count;
42579+ local_t vma_count;
42580 struct uio_info *info;
42581 struct kobject *map_dir;
42582 struct kobject *portio_dir;
42583@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42584 return entry->show(mem, buf);
42585 }
42586
42587-static struct sysfs_ops map_sysfs_ops = {
42588+static const struct sysfs_ops map_sysfs_ops = {
42589 .show = map_type_show,
42590 };
42591
42592@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42593 return entry->show(port, buf);
42594 }
42595
42596-static struct sysfs_ops portio_sysfs_ops = {
42597+static const struct sysfs_ops portio_sysfs_ops = {
42598 .show = portio_type_show,
42599 };
42600
42601@@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42602 struct uio_device *idev = dev_get_drvdata(dev);
42603 if (idev)
42604 return sprintf(buf, "%u\n",
42605- (unsigned int)atomic_read(&idev->event));
42606+ (unsigned int)atomic_read_unchecked(&idev->event));
42607 else
42608 return -ENODEV;
42609 }
42610@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42611 {
42612 struct uio_device *idev = info->uio_dev;
42613
42614- atomic_inc(&idev->event);
42615+ atomic_inc_unchecked(&idev->event);
42616 wake_up_interruptible(&idev->wait);
42617 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42618 }
42619@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42620 }
42621
42622 listener->dev = idev;
42623- listener->event_count = atomic_read(&idev->event);
42624+ listener->event_count = atomic_read_unchecked(&idev->event);
42625 filep->private_data = listener;
42626
42627 if (idev->info->open) {
42628@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42629 return -EIO;
42630
42631 poll_wait(filep, &idev->wait, wait);
42632- if (listener->event_count != atomic_read(&idev->event))
42633+ if (listener->event_count != atomic_read_unchecked(&idev->event))
42634 return POLLIN | POLLRDNORM;
42635 return 0;
42636 }
42637@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42638 do {
42639 set_current_state(TASK_INTERRUPTIBLE);
42640
42641- event_count = atomic_read(&idev->event);
42642+ event_count = atomic_read_unchecked(&idev->event);
42643 if (event_count != listener->event_count) {
42644 if (copy_to_user(buf, &event_count, count))
42645 retval = -EFAULT;
42646@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42647 static void uio_vma_open(struct vm_area_struct *vma)
42648 {
42649 struct uio_device *idev = vma->vm_private_data;
42650- idev->vma_count++;
42651+ local_inc(&idev->vma_count);
42652 }
42653
42654 static void uio_vma_close(struct vm_area_struct *vma)
42655 {
42656 struct uio_device *idev = vma->vm_private_data;
42657- idev->vma_count--;
42658+ local_dec(&idev->vma_count);
42659 }
42660
42661 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42662@@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42663 idev->owner = owner;
42664 idev->info = info;
42665 init_waitqueue_head(&idev->wait);
42666- atomic_set(&idev->event, 0);
42667+ atomic_set_unchecked(&idev->event, 0);
42668
42669 ret = uio_get_minor(idev);
42670 if (ret)
42671diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42672index fbea856..06efea6 100644
42673--- a/drivers/usb/atm/usbatm.c
42674+++ b/drivers/usb/atm/usbatm.c
42675@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42676 if (printk_ratelimit())
42677 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42678 __func__, vpi, vci);
42679- atomic_inc(&vcc->stats->rx_err);
42680+ atomic_inc_unchecked(&vcc->stats->rx_err);
42681 return;
42682 }
42683
42684@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42685 if (length > ATM_MAX_AAL5_PDU) {
42686 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42687 __func__, length, vcc);
42688- atomic_inc(&vcc->stats->rx_err);
42689+ atomic_inc_unchecked(&vcc->stats->rx_err);
42690 goto out;
42691 }
42692
42693@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42694 if (sarb->len < pdu_length) {
42695 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42696 __func__, pdu_length, sarb->len, vcc);
42697- atomic_inc(&vcc->stats->rx_err);
42698+ atomic_inc_unchecked(&vcc->stats->rx_err);
42699 goto out;
42700 }
42701
42702 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42703 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42704 __func__, vcc);
42705- atomic_inc(&vcc->stats->rx_err);
42706+ atomic_inc_unchecked(&vcc->stats->rx_err);
42707 goto out;
42708 }
42709
42710@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42711 if (printk_ratelimit())
42712 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42713 __func__, length);
42714- atomic_inc(&vcc->stats->rx_drop);
42715+ atomic_inc_unchecked(&vcc->stats->rx_drop);
42716 goto out;
42717 }
42718
42719@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42720
42721 vcc->push(vcc, skb);
42722
42723- atomic_inc(&vcc->stats->rx);
42724+ atomic_inc_unchecked(&vcc->stats->rx);
42725 out:
42726 skb_trim(sarb, 0);
42727 }
42728@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42729 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42730
42731 usbatm_pop(vcc, skb);
42732- atomic_inc(&vcc->stats->tx);
42733+ atomic_inc_unchecked(&vcc->stats->tx);
42734
42735 skb = skb_dequeue(&instance->sndqueue);
42736 }
42737@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42738 if (!left--)
42739 return sprintf(page,
42740 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42741- atomic_read(&atm_dev->stats.aal5.tx),
42742- atomic_read(&atm_dev->stats.aal5.tx_err),
42743- atomic_read(&atm_dev->stats.aal5.rx),
42744- atomic_read(&atm_dev->stats.aal5.rx_err),
42745- atomic_read(&atm_dev->stats.aal5.rx_drop));
42746+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42747+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42748+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42749+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42750+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42751
42752 if (!left--) {
42753 if (instance->disconnected)
42754diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42755index 24e6205..fe5a5d4 100644
42756--- a/drivers/usb/core/hcd.c
42757+++ b/drivers/usb/core/hcd.c
42758@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42759
42760 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42761
42762-struct usb_mon_operations *mon_ops;
42763+const struct usb_mon_operations *mon_ops;
42764
42765 /*
42766 * The registration is unlocked.
42767@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42768 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42769 */
42770
42771-int usb_mon_register (struct usb_mon_operations *ops)
42772+int usb_mon_register (const struct usb_mon_operations *ops)
42773 {
42774
42775 if (mon_ops)
42776diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42777index bcbe104..9cfd1c6 100644
42778--- a/drivers/usb/core/hcd.h
42779+++ b/drivers/usb/core/hcd.h
42780@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42781 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42782
42783 struct usb_mon_operations {
42784- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42785- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42786- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42787+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42788+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42789+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42790 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42791 };
42792
42793-extern struct usb_mon_operations *mon_ops;
42794+extern const struct usb_mon_operations *mon_ops;
42795
42796 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42797 {
42798@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42799 (*mon_ops->urb_complete)(bus, urb, status);
42800 }
42801
42802-int usb_mon_register(struct usb_mon_operations *ops);
42803+int usb_mon_register(const struct usb_mon_operations *ops);
42804 void usb_mon_deregister(void);
42805
42806 #else
42807diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42808index 409cc94..a673bad 100644
42809--- a/drivers/usb/core/message.c
42810+++ b/drivers/usb/core/message.c
42811@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42812 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42813 if (buf) {
42814 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42815- if (len > 0) {
42816- smallbuf = kmalloc(++len, GFP_NOIO);
42817+ if (len++ > 0) {
42818+ smallbuf = kmalloc(len, GFP_NOIO);
42819 if (!smallbuf)
42820 return buf;
42821 memcpy(smallbuf, buf, len);
42822diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42823index 62ff5e7..530b74e 100644
42824--- a/drivers/usb/misc/appledisplay.c
42825+++ b/drivers/usb/misc/appledisplay.c
42826@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42827 return pdata->msgdata[1];
42828 }
42829
42830-static struct backlight_ops appledisplay_bl_data = {
42831+static const struct backlight_ops appledisplay_bl_data = {
42832 .get_brightness = appledisplay_bl_get_brightness,
42833 .update_status = appledisplay_bl_update_status,
42834 };
42835diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42836index e0c2db3..bd8cb66 100644
42837--- a/drivers/usb/mon/mon_main.c
42838+++ b/drivers/usb/mon/mon_main.c
42839@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42840 /*
42841 * Ops
42842 */
42843-static struct usb_mon_operations mon_ops_0 = {
42844+static const struct usb_mon_operations mon_ops_0 = {
42845 .urb_submit = mon_submit,
42846 .urb_submit_error = mon_submit_error,
42847 .urb_complete = mon_complete,
42848diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42849index d6bea3e..60b250e 100644
42850--- a/drivers/usb/wusbcore/wa-hc.h
42851+++ b/drivers/usb/wusbcore/wa-hc.h
42852@@ -192,7 +192,7 @@ struct wahc {
42853 struct list_head xfer_delayed_list;
42854 spinlock_t xfer_list_lock;
42855 struct work_struct xfer_work;
42856- atomic_t xfer_id_count;
42857+ atomic_unchecked_t xfer_id_count;
42858 };
42859
42860
42861@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42862 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42863 spin_lock_init(&wa->xfer_list_lock);
42864 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42865- atomic_set(&wa->xfer_id_count, 1);
42866+ atomic_set_unchecked(&wa->xfer_id_count, 1);
42867 }
42868
42869 /**
42870diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42871index 613a5fc..3174865 100644
42872--- a/drivers/usb/wusbcore/wa-xfer.c
42873+++ b/drivers/usb/wusbcore/wa-xfer.c
42874@@ -293,7 +293,7 @@ out:
42875 */
42876 static void wa_xfer_id_init(struct wa_xfer *xfer)
42877 {
42878- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42879+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42880 }
42881
42882 /*
42883diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42884index aa42fce..f8a828c 100644
42885--- a/drivers/uwb/wlp/messages.c
42886+++ b/drivers/uwb/wlp/messages.c
42887@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42888 size_t len = skb->len;
42889 size_t used;
42890 ssize_t result;
42891- struct wlp_nonce enonce, rnonce;
42892+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42893 enum wlp_assc_error assc_err;
42894 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42895 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42896diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42897index 0370399..6627c94 100644
42898--- a/drivers/uwb/wlp/sysfs.c
42899+++ b/drivers/uwb/wlp/sysfs.c
42900@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42901 return ret;
42902 }
42903
42904-static
42905-struct sysfs_ops wss_sysfs_ops = {
42906+static const struct sysfs_ops wss_sysfs_ops = {
42907 .show = wlp_wss_attr_show,
42908 .store = wlp_wss_attr_store,
42909 };
42910diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42911index 8c5e432..5ee90ea 100644
42912--- a/drivers/video/atmel_lcdfb.c
42913+++ b/drivers/video/atmel_lcdfb.c
42914@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42915 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42916 }
42917
42918-static struct backlight_ops atmel_lcdc_bl_ops = {
42919+static const struct backlight_ops atmel_lcdc_bl_ops = {
42920 .update_status = atmel_bl_update_status,
42921 .get_brightness = atmel_bl_get_brightness,
42922 };
42923diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42924index e4e4d43..66bcbcc 100644
42925--- a/drivers/video/aty/aty128fb.c
42926+++ b/drivers/video/aty/aty128fb.c
42927@@ -149,7 +149,7 @@ enum {
42928 };
42929
42930 /* Must match above enum */
42931-static const char *r128_family[] __devinitdata = {
42932+static const char *r128_family[] __devinitconst = {
42933 "AGP",
42934 "PCI",
42935 "PRO AGP",
42936@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42937 return bd->props.brightness;
42938 }
42939
42940-static struct backlight_ops aty128_bl_data = {
42941+static const struct backlight_ops aty128_bl_data = {
42942 .get_brightness = aty128_bl_get_brightness,
42943 .update_status = aty128_bl_update_status,
42944 };
42945diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42946index 913b4a4..9295a38 100644
42947--- a/drivers/video/aty/atyfb_base.c
42948+++ b/drivers/video/aty/atyfb_base.c
42949@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42950 return bd->props.brightness;
42951 }
42952
42953-static struct backlight_ops aty_bl_data = {
42954+static const struct backlight_ops aty_bl_data = {
42955 .get_brightness = aty_bl_get_brightness,
42956 .update_status = aty_bl_update_status,
42957 };
42958diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
42959index 1a056ad..221bd6a 100644
42960--- a/drivers/video/aty/radeon_backlight.c
42961+++ b/drivers/video/aty/radeon_backlight.c
42962@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
42963 return bd->props.brightness;
42964 }
42965
42966-static struct backlight_ops radeon_bl_data = {
42967+static const struct backlight_ops radeon_bl_data = {
42968 .get_brightness = radeon_bl_get_brightness,
42969 .update_status = radeon_bl_update_status,
42970 };
42971diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
42972index ad05da5..3cb2cb9 100644
42973--- a/drivers/video/backlight/adp5520_bl.c
42974+++ b/drivers/video/backlight/adp5520_bl.c
42975@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
42976 return error ? data->current_brightness : reg_val;
42977 }
42978
42979-static struct backlight_ops adp5520_bl_ops = {
42980+static const struct backlight_ops adp5520_bl_ops = {
42981 .update_status = adp5520_bl_update_status,
42982 .get_brightness = adp5520_bl_get_brightness,
42983 };
42984diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
42985index 2c3bdfc..d769b0b 100644
42986--- a/drivers/video/backlight/adx_bl.c
42987+++ b/drivers/video/backlight/adx_bl.c
42988@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
42989 return 1;
42990 }
42991
42992-static struct backlight_ops adx_backlight_ops = {
42993+static const struct backlight_ops adx_backlight_ops = {
42994 .options = 0,
42995 .update_status = adx_backlight_update_status,
42996 .get_brightness = adx_backlight_get_brightness,
42997diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
42998index 505c082..6b6b3cc 100644
42999--- a/drivers/video/backlight/atmel-pwm-bl.c
43000+++ b/drivers/video/backlight/atmel-pwm-bl.c
43001@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
43002 return pwm_channel_enable(&pwmbl->pwmc);
43003 }
43004
43005-static struct backlight_ops atmel_pwm_bl_ops = {
43006+static const struct backlight_ops atmel_pwm_bl_ops = {
43007 .get_brightness = atmel_pwm_bl_get_intensity,
43008 .update_status = atmel_pwm_bl_set_intensity,
43009 };
43010diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
43011index 5e20e6e..89025e6 100644
43012--- a/drivers/video/backlight/backlight.c
43013+++ b/drivers/video/backlight/backlight.c
43014@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
43015 * ERR_PTR() or a pointer to the newly allocated device.
43016 */
43017 struct backlight_device *backlight_device_register(const char *name,
43018- struct device *parent, void *devdata, struct backlight_ops *ops)
43019+ struct device *parent, void *devdata, const struct backlight_ops *ops)
43020 {
43021 struct backlight_device *new_bd;
43022 int rc;
43023diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
43024index 9677494..b4bcf80 100644
43025--- a/drivers/video/backlight/corgi_lcd.c
43026+++ b/drivers/video/backlight/corgi_lcd.c
43027@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
43028 }
43029 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
43030
43031-static struct backlight_ops corgi_bl_ops = {
43032+static const struct backlight_ops corgi_bl_ops = {
43033 .get_brightness = corgi_bl_get_intensity,
43034 .update_status = corgi_bl_update_status,
43035 };
43036diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
43037index b9fe62b..2914bf1 100644
43038--- a/drivers/video/backlight/cr_bllcd.c
43039+++ b/drivers/video/backlight/cr_bllcd.c
43040@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
43041 return intensity;
43042 }
43043
43044-static struct backlight_ops cr_backlight_ops = {
43045+static const struct backlight_ops cr_backlight_ops = {
43046 .get_brightness = cr_backlight_get_intensity,
43047 .update_status = cr_backlight_set_intensity,
43048 };
43049diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
43050index 701a108..feacfd5 100644
43051--- a/drivers/video/backlight/da903x_bl.c
43052+++ b/drivers/video/backlight/da903x_bl.c
43053@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
43054 return data->current_brightness;
43055 }
43056
43057-static struct backlight_ops da903x_backlight_ops = {
43058+static const struct backlight_ops da903x_backlight_ops = {
43059 .update_status = da903x_backlight_update_status,
43060 .get_brightness = da903x_backlight_get_brightness,
43061 };
43062diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
43063index 6d27f62..e6d348e 100644
43064--- a/drivers/video/backlight/generic_bl.c
43065+++ b/drivers/video/backlight/generic_bl.c
43066@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
43067 }
43068 EXPORT_SYMBOL(corgibl_limit_intensity);
43069
43070-static struct backlight_ops genericbl_ops = {
43071+static const struct backlight_ops genericbl_ops = {
43072 .options = BL_CORE_SUSPENDRESUME,
43073 .get_brightness = genericbl_get_intensity,
43074 .update_status = genericbl_send_intensity,
43075diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
43076index 7fb4eef..f7cc528 100644
43077--- a/drivers/video/backlight/hp680_bl.c
43078+++ b/drivers/video/backlight/hp680_bl.c
43079@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
43080 return current_intensity;
43081 }
43082
43083-static struct backlight_ops hp680bl_ops = {
43084+static const struct backlight_ops hp680bl_ops = {
43085 .get_brightness = hp680bl_get_intensity,
43086 .update_status = hp680bl_set_intensity,
43087 };
43088diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
43089index 7aed256..db9071f 100644
43090--- a/drivers/video/backlight/jornada720_bl.c
43091+++ b/drivers/video/backlight/jornada720_bl.c
43092@@ -93,7 +93,7 @@ out:
43093 return ret;
43094 }
43095
43096-static struct backlight_ops jornada_bl_ops = {
43097+static const struct backlight_ops jornada_bl_ops = {
43098 .get_brightness = jornada_bl_get_brightness,
43099 .update_status = jornada_bl_update_status,
43100 .options = BL_CORE_SUSPENDRESUME,
43101diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
43102index a38fda1..939e7b8 100644
43103--- a/drivers/video/backlight/kb3886_bl.c
43104+++ b/drivers/video/backlight/kb3886_bl.c
43105@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
43106 return kb3886bl_intensity;
43107 }
43108
43109-static struct backlight_ops kb3886bl_ops = {
43110+static const struct backlight_ops kb3886bl_ops = {
43111 .get_brightness = kb3886bl_get_intensity,
43112 .update_status = kb3886bl_send_intensity,
43113 };
43114diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
43115index 6b488b8..00a9591 100644
43116--- a/drivers/video/backlight/locomolcd.c
43117+++ b/drivers/video/backlight/locomolcd.c
43118@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
43119 return current_intensity;
43120 }
43121
43122-static struct backlight_ops locomobl_data = {
43123+static const struct backlight_ops locomobl_data = {
43124 .get_brightness = locomolcd_get_intensity,
43125 .update_status = locomolcd_set_intensity,
43126 };
43127diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
43128index 99bdfa8..3dac448 100644
43129--- a/drivers/video/backlight/mbp_nvidia_bl.c
43130+++ b/drivers/video/backlight/mbp_nvidia_bl.c
43131@@ -33,7 +33,7 @@ struct dmi_match_data {
43132 unsigned long iostart;
43133 unsigned long iolen;
43134 /* Backlight operations structure. */
43135- struct backlight_ops backlight_ops;
43136+ const struct backlight_ops backlight_ops;
43137 };
43138
43139 /* Module parameters. */
43140diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
43141index cbad67e..3cf900e 100644
43142--- a/drivers/video/backlight/omap1_bl.c
43143+++ b/drivers/video/backlight/omap1_bl.c
43144@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
43145 return bl->current_intensity;
43146 }
43147
43148-static struct backlight_ops omapbl_ops = {
43149+static const struct backlight_ops omapbl_ops = {
43150 .get_brightness = omapbl_get_intensity,
43151 .update_status = omapbl_update_status,
43152 };
43153diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
43154index 9edaf24..075786e 100644
43155--- a/drivers/video/backlight/progear_bl.c
43156+++ b/drivers/video/backlight/progear_bl.c
43157@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
43158 return intensity - HW_LEVEL_MIN;
43159 }
43160
43161-static struct backlight_ops progearbl_ops = {
43162+static const struct backlight_ops progearbl_ops = {
43163 .get_brightness = progearbl_get_intensity,
43164 .update_status = progearbl_set_intensity,
43165 };
43166diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
43167index 8871662..df9e0b3 100644
43168--- a/drivers/video/backlight/pwm_bl.c
43169+++ b/drivers/video/backlight/pwm_bl.c
43170@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
43171 return bl->props.brightness;
43172 }
43173
43174-static struct backlight_ops pwm_backlight_ops = {
43175+static const struct backlight_ops pwm_backlight_ops = {
43176 .update_status = pwm_backlight_update_status,
43177 .get_brightness = pwm_backlight_get_brightness,
43178 };
43179diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
43180index 43edbad..e14ce4d 100644
43181--- a/drivers/video/backlight/tosa_bl.c
43182+++ b/drivers/video/backlight/tosa_bl.c
43183@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
43184 return props->brightness;
43185 }
43186
43187-static struct backlight_ops bl_ops = {
43188+static const struct backlight_ops bl_ops = {
43189 .get_brightness = tosa_bl_get_brightness,
43190 .update_status = tosa_bl_update_status,
43191 };
43192diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
43193index 467bdb7..e32add3 100644
43194--- a/drivers/video/backlight/wm831x_bl.c
43195+++ b/drivers/video/backlight/wm831x_bl.c
43196@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
43197 return data->current_brightness;
43198 }
43199
43200-static struct backlight_ops wm831x_backlight_ops = {
43201+static const struct backlight_ops wm831x_backlight_ops = {
43202 .options = BL_CORE_SUSPENDRESUME,
43203 .update_status = wm831x_backlight_update_status,
43204 .get_brightness = wm831x_backlight_get_brightness,
43205diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
43206index e49ae5e..db4e6f7 100644
43207--- a/drivers/video/bf54x-lq043fb.c
43208+++ b/drivers/video/bf54x-lq043fb.c
43209@@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43210 return 0;
43211 }
43212
43213-static struct backlight_ops bfin_lq043fb_bl_ops = {
43214+static const struct backlight_ops bfin_lq043fb_bl_ops = {
43215 .get_brightness = bl_get_brightness,
43216 };
43217
43218diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
43219index 2c72a7c..d523e52 100644
43220--- a/drivers/video/bfin-t350mcqb-fb.c
43221+++ b/drivers/video/bfin-t350mcqb-fb.c
43222@@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43223 return 0;
43224 }
43225
43226-static struct backlight_ops bfin_lq043fb_bl_ops = {
43227+static const struct backlight_ops bfin_lq043fb_bl_ops = {
43228 .get_brightness = bl_get_brightness,
43229 };
43230
43231diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
43232index f53b9f1..958bf4e 100644
43233--- a/drivers/video/fbcmap.c
43234+++ b/drivers/video/fbcmap.c
43235@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
43236 rc = -ENODEV;
43237 goto out;
43238 }
43239- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
43240- !info->fbops->fb_setcmap)) {
43241+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
43242 rc = -EINVAL;
43243 goto out1;
43244 }
43245diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43246index 99bbd28..ad3829e 100644
43247--- a/drivers/video/fbmem.c
43248+++ b/drivers/video/fbmem.c
43249@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43250 image->dx += image->width + 8;
43251 }
43252 } else if (rotate == FB_ROTATE_UD) {
43253- for (x = 0; x < num && image->dx >= 0; x++) {
43254+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43255 info->fbops->fb_imageblit(info, image);
43256 image->dx -= image->width + 8;
43257 }
43258@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43259 image->dy += image->height + 8;
43260 }
43261 } else if (rotate == FB_ROTATE_CCW) {
43262- for (x = 0; x < num && image->dy >= 0; x++) {
43263+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43264 info->fbops->fb_imageblit(info, image);
43265 image->dy -= image->height + 8;
43266 }
43267@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
43268 int flags = info->flags;
43269 int ret = 0;
43270
43271+ pax_track_stack();
43272+
43273 if (var->activate & FB_ACTIVATE_INV_MODE) {
43274 struct fb_videomode mode1, mode2;
43275
43276@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43277 void __user *argp = (void __user *)arg;
43278 long ret = 0;
43279
43280+ pax_track_stack();
43281+
43282 switch (cmd) {
43283 case FBIOGET_VSCREENINFO:
43284 if (!lock_fb_info(info))
43285@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43286 return -EFAULT;
43287 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43288 return -EINVAL;
43289- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43290+ if (con2fb.framebuffer >= FB_MAX)
43291 return -EINVAL;
43292 if (!registered_fb[con2fb.framebuffer])
43293 request_module("fb%d", con2fb.framebuffer);
43294diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
43295index f20eff8..3e4f622 100644
43296--- a/drivers/video/geode/gx1fb_core.c
43297+++ b/drivers/video/geode/gx1fb_core.c
43298@@ -30,7 +30,7 @@ static int crt_option = 1;
43299 static char panel_option[32] = "";
43300
43301 /* Modes relevant to the GX1 (taken from modedb.c) */
43302-static const struct fb_videomode __initdata gx1_modedb[] = {
43303+static const struct fb_videomode __initconst gx1_modedb[] = {
43304 /* 640x480-60 VESA */
43305 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
43306 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
43307diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
43308index 896e53d..4d87d0b 100644
43309--- a/drivers/video/gxt4500.c
43310+++ b/drivers/video/gxt4500.c
43311@@ -156,7 +156,7 @@ struct gxt4500_par {
43312 static char *mode_option;
43313
43314 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
43315-static const struct fb_videomode defaultmode __devinitdata = {
43316+static const struct fb_videomode defaultmode __devinitconst = {
43317 .refresh = 60,
43318 .xres = 1280,
43319 .yres = 1024,
43320@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
43321 return 0;
43322 }
43323
43324-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
43325+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
43326 .id = "IBM GXT4500P",
43327 .type = FB_TYPE_PACKED_PIXELS,
43328 .visual = FB_VISUAL_PSEUDOCOLOR,
43329diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43330index f5bedee..28c6028 100644
43331--- a/drivers/video/i810/i810_accel.c
43332+++ b/drivers/video/i810/i810_accel.c
43333@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43334 }
43335 }
43336 printk("ringbuffer lockup!!!\n");
43337+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43338 i810_report_error(mmio);
43339 par->dev_flags |= LOCKUP;
43340 info->pixmap.scan_align = 1;
43341diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
43342index 5743ea2..457f82c 100644
43343--- a/drivers/video/i810/i810_main.c
43344+++ b/drivers/video/i810/i810_main.c
43345@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
43346 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
43347
43348 /* PCI */
43349-static const char *i810_pci_list[] __devinitdata = {
43350+static const char *i810_pci_list[] __devinitconst = {
43351 "Intel(R) 810 Framebuffer Device" ,
43352 "Intel(R) 810-DC100 Framebuffer Device" ,
43353 "Intel(R) 810E Framebuffer Device" ,
43354diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43355index 3c14e43..eafa544 100644
43356--- a/drivers/video/logo/logo_linux_clut224.ppm
43357+++ b/drivers/video/logo/logo_linux_clut224.ppm
43358@@ -1,1604 +1,1123 @@
43359 P3
43360-# Standard 224-color Linux logo
43361 80 80
43362 255
43363- 0 0 0 0 0 0 0 0 0 0 0 0
43364- 0 0 0 0 0 0 0 0 0 0 0 0
43365- 0 0 0 0 0 0 0 0 0 0 0 0
43366- 0 0 0 0 0 0 0 0 0 0 0 0
43367- 0 0 0 0 0 0 0 0 0 0 0 0
43368- 0 0 0 0 0 0 0 0 0 0 0 0
43369- 0 0 0 0 0 0 0 0 0 0 0 0
43370- 0 0 0 0 0 0 0 0 0 0 0 0
43371- 0 0 0 0 0 0 0 0 0 0 0 0
43372- 6 6 6 6 6 6 10 10 10 10 10 10
43373- 10 10 10 6 6 6 6 6 6 6 6 6
43374- 0 0 0 0 0 0 0 0 0 0 0 0
43375- 0 0 0 0 0 0 0 0 0 0 0 0
43376- 0 0 0 0 0 0 0 0 0 0 0 0
43377- 0 0 0 0 0 0 0 0 0 0 0 0
43378- 0 0 0 0 0 0 0 0 0 0 0 0
43379- 0 0 0 0 0 0 0 0 0 0 0 0
43380- 0 0 0 0 0 0 0 0 0 0 0 0
43381- 0 0 0 0 0 0 0 0 0 0 0 0
43382- 0 0 0 0 0 0 0 0 0 0 0 0
43383- 0 0 0 0 0 0 0 0 0 0 0 0
43384- 0 0 0 0 0 0 0 0 0 0 0 0
43385- 0 0 0 0 0 0 0 0 0 0 0 0
43386- 0 0 0 0 0 0 0 0 0 0 0 0
43387- 0 0 0 0 0 0 0 0 0 0 0 0
43388- 0 0 0 0 0 0 0 0 0 0 0 0
43389- 0 0 0 0 0 0 0 0 0 0 0 0
43390- 0 0 0 0 0 0 0 0 0 0 0 0
43391- 0 0 0 6 6 6 10 10 10 14 14 14
43392- 22 22 22 26 26 26 30 30 30 34 34 34
43393- 30 30 30 30 30 30 26 26 26 18 18 18
43394- 14 14 14 10 10 10 6 6 6 0 0 0
43395- 0 0 0 0 0 0 0 0 0 0 0 0
43396- 0 0 0 0 0 0 0 0 0 0 0 0
43397- 0 0 0 0 0 0 0 0 0 0 0 0
43398- 0 0 0 0 0 0 0 0 0 0 0 0
43399- 0 0 0 0 0 0 0 0 0 0 0 0
43400- 0 0 0 0 0 0 0 0 0 0 0 0
43401- 0 0 0 0 0 0 0 0 0 0 0 0
43402- 0 0 0 0 0 0 0 0 0 0 0 0
43403- 0 0 0 0 0 0 0 0 0 0 0 0
43404- 0 0 0 0 0 1 0 0 1 0 0 0
43405- 0 0 0 0 0 0 0 0 0 0 0 0
43406- 0 0 0 0 0 0 0 0 0 0 0 0
43407- 0 0 0 0 0 0 0 0 0 0 0 0
43408- 0 0 0 0 0 0 0 0 0 0 0 0
43409- 0 0 0 0 0 0 0 0 0 0 0 0
43410- 0 0 0 0 0 0 0 0 0 0 0 0
43411- 6 6 6 14 14 14 26 26 26 42 42 42
43412- 54 54 54 66 66 66 78 78 78 78 78 78
43413- 78 78 78 74 74 74 66 66 66 54 54 54
43414- 42 42 42 26 26 26 18 18 18 10 10 10
43415- 6 6 6 0 0 0 0 0 0 0 0 0
43416- 0 0 0 0 0 0 0 0 0 0 0 0
43417- 0 0 0 0 0 0 0 0 0 0 0 0
43418- 0 0 0 0 0 0 0 0 0 0 0 0
43419- 0 0 0 0 0 0 0 0 0 0 0 0
43420- 0 0 0 0 0 0 0 0 0 0 0 0
43421- 0 0 0 0 0 0 0 0 0 0 0 0
43422- 0 0 0 0 0 0 0 0 0 0 0 0
43423- 0 0 0 0 0 0 0 0 0 0 0 0
43424- 0 0 1 0 0 0 0 0 0 0 0 0
43425- 0 0 0 0 0 0 0 0 0 0 0 0
43426- 0 0 0 0 0 0 0 0 0 0 0 0
43427- 0 0 0 0 0 0 0 0 0 0 0 0
43428- 0 0 0 0 0 0 0 0 0 0 0 0
43429- 0 0 0 0 0 0 0 0 0 0 0 0
43430- 0 0 0 0 0 0 0 0 0 10 10 10
43431- 22 22 22 42 42 42 66 66 66 86 86 86
43432- 66 66 66 38 38 38 38 38 38 22 22 22
43433- 26 26 26 34 34 34 54 54 54 66 66 66
43434- 86 86 86 70 70 70 46 46 46 26 26 26
43435- 14 14 14 6 6 6 0 0 0 0 0 0
43436- 0 0 0 0 0 0 0 0 0 0 0 0
43437- 0 0 0 0 0 0 0 0 0 0 0 0
43438- 0 0 0 0 0 0 0 0 0 0 0 0
43439- 0 0 0 0 0 0 0 0 0 0 0 0
43440- 0 0 0 0 0 0 0 0 0 0 0 0
43441- 0 0 0 0 0 0 0 0 0 0 0 0
43442- 0 0 0 0 0 0 0 0 0 0 0 0
43443- 0 0 0 0 0 0 0 0 0 0 0 0
43444- 0 0 1 0 0 1 0 0 1 0 0 0
43445- 0 0 0 0 0 0 0 0 0 0 0 0
43446- 0 0 0 0 0 0 0 0 0 0 0 0
43447- 0 0 0 0 0 0 0 0 0 0 0 0
43448- 0 0 0 0 0 0 0 0 0 0 0 0
43449- 0 0 0 0 0 0 0 0 0 0 0 0
43450- 0 0 0 0 0 0 10 10 10 26 26 26
43451- 50 50 50 82 82 82 58 58 58 6 6 6
43452- 2 2 6 2 2 6 2 2 6 2 2 6
43453- 2 2 6 2 2 6 2 2 6 2 2 6
43454- 6 6 6 54 54 54 86 86 86 66 66 66
43455- 38 38 38 18 18 18 6 6 6 0 0 0
43456- 0 0 0 0 0 0 0 0 0 0 0 0
43457- 0 0 0 0 0 0 0 0 0 0 0 0
43458- 0 0 0 0 0 0 0 0 0 0 0 0
43459- 0 0 0 0 0 0 0 0 0 0 0 0
43460- 0 0 0 0 0 0 0 0 0 0 0 0
43461- 0 0 0 0 0 0 0 0 0 0 0 0
43462- 0 0 0 0 0 0 0 0 0 0 0 0
43463- 0 0 0 0 0 0 0 0 0 0 0 0
43464- 0 0 0 0 0 0 0 0 0 0 0 0
43465- 0 0 0 0 0 0 0 0 0 0 0 0
43466- 0 0 0 0 0 0 0 0 0 0 0 0
43467- 0 0 0 0 0 0 0 0 0 0 0 0
43468- 0 0 0 0 0 0 0 0 0 0 0 0
43469- 0 0 0 0 0 0 0 0 0 0 0 0
43470- 0 0 0 6 6 6 22 22 22 50 50 50
43471- 78 78 78 34 34 34 2 2 6 2 2 6
43472- 2 2 6 2 2 6 2 2 6 2 2 6
43473- 2 2 6 2 2 6 2 2 6 2 2 6
43474- 2 2 6 2 2 6 6 6 6 70 70 70
43475- 78 78 78 46 46 46 22 22 22 6 6 6
43476- 0 0 0 0 0 0 0 0 0 0 0 0
43477- 0 0 0 0 0 0 0 0 0 0 0 0
43478- 0 0 0 0 0 0 0 0 0 0 0 0
43479- 0 0 0 0 0 0 0 0 0 0 0 0
43480- 0 0 0 0 0 0 0 0 0 0 0 0
43481- 0 0 0 0 0 0 0 0 0 0 0 0
43482- 0 0 0 0 0 0 0 0 0 0 0 0
43483- 0 0 0 0 0 0 0 0 0 0 0 0
43484- 0 0 1 0 0 1 0 0 1 0 0 0
43485- 0 0 0 0 0 0 0 0 0 0 0 0
43486- 0 0 0 0 0 0 0 0 0 0 0 0
43487- 0 0 0 0 0 0 0 0 0 0 0 0
43488- 0 0 0 0 0 0 0 0 0 0 0 0
43489- 0 0 0 0 0 0 0 0 0 0 0 0
43490- 6 6 6 18 18 18 42 42 42 82 82 82
43491- 26 26 26 2 2 6 2 2 6 2 2 6
43492- 2 2 6 2 2 6 2 2 6 2 2 6
43493- 2 2 6 2 2 6 2 2 6 14 14 14
43494- 46 46 46 34 34 34 6 6 6 2 2 6
43495- 42 42 42 78 78 78 42 42 42 18 18 18
43496- 6 6 6 0 0 0 0 0 0 0 0 0
43497- 0 0 0 0 0 0 0 0 0 0 0 0
43498- 0 0 0 0 0 0 0 0 0 0 0 0
43499- 0 0 0 0 0 0 0 0 0 0 0 0
43500- 0 0 0 0 0 0 0 0 0 0 0 0
43501- 0 0 0 0 0 0 0 0 0 0 0 0
43502- 0 0 0 0 0 0 0 0 0 0 0 0
43503- 0 0 0 0 0 0 0 0 0 0 0 0
43504- 0 0 1 0 0 0 0 0 1 0 0 0
43505- 0 0 0 0 0 0 0 0 0 0 0 0
43506- 0 0 0 0 0 0 0 0 0 0 0 0
43507- 0 0 0 0 0 0 0 0 0 0 0 0
43508- 0 0 0 0 0 0 0 0 0 0 0 0
43509- 0 0 0 0 0 0 0 0 0 0 0 0
43510- 10 10 10 30 30 30 66 66 66 58 58 58
43511- 2 2 6 2 2 6 2 2 6 2 2 6
43512- 2 2 6 2 2 6 2 2 6 2 2 6
43513- 2 2 6 2 2 6 2 2 6 26 26 26
43514- 86 86 86 101 101 101 46 46 46 10 10 10
43515- 2 2 6 58 58 58 70 70 70 34 34 34
43516- 10 10 10 0 0 0 0 0 0 0 0 0
43517- 0 0 0 0 0 0 0 0 0 0 0 0
43518- 0 0 0 0 0 0 0 0 0 0 0 0
43519- 0 0 0 0 0 0 0 0 0 0 0 0
43520- 0 0 0 0 0 0 0 0 0 0 0 0
43521- 0 0 0 0 0 0 0 0 0 0 0 0
43522- 0 0 0 0 0 0 0 0 0 0 0 0
43523- 0 0 0 0 0 0 0 0 0 0 0 0
43524- 0 0 1 0 0 1 0 0 1 0 0 0
43525- 0 0 0 0 0 0 0 0 0 0 0 0
43526- 0 0 0 0 0 0 0 0 0 0 0 0
43527- 0 0 0 0 0 0 0 0 0 0 0 0
43528- 0 0 0 0 0 0 0 0 0 0 0 0
43529- 0 0 0 0 0 0 0 0 0 0 0 0
43530- 14 14 14 42 42 42 86 86 86 10 10 10
43531- 2 2 6 2 2 6 2 2 6 2 2 6
43532- 2 2 6 2 2 6 2 2 6 2 2 6
43533- 2 2 6 2 2 6 2 2 6 30 30 30
43534- 94 94 94 94 94 94 58 58 58 26 26 26
43535- 2 2 6 6 6 6 78 78 78 54 54 54
43536- 22 22 22 6 6 6 0 0 0 0 0 0
43537- 0 0 0 0 0 0 0 0 0 0 0 0
43538- 0 0 0 0 0 0 0 0 0 0 0 0
43539- 0 0 0 0 0 0 0 0 0 0 0 0
43540- 0 0 0 0 0 0 0 0 0 0 0 0
43541- 0 0 0 0 0 0 0 0 0 0 0 0
43542- 0 0 0 0 0 0 0 0 0 0 0 0
43543- 0 0 0 0 0 0 0 0 0 0 0 0
43544- 0 0 0 0 0 0 0 0 0 0 0 0
43545- 0 0 0 0 0 0 0 0 0 0 0 0
43546- 0 0 0 0 0 0 0 0 0 0 0 0
43547- 0 0 0 0 0 0 0 0 0 0 0 0
43548- 0 0 0 0 0 0 0 0 0 0 0 0
43549- 0 0 0 0 0 0 0 0 0 6 6 6
43550- 22 22 22 62 62 62 62 62 62 2 2 6
43551- 2 2 6 2 2 6 2 2 6 2 2 6
43552- 2 2 6 2 2 6 2 2 6 2 2 6
43553- 2 2 6 2 2 6 2 2 6 26 26 26
43554- 54 54 54 38 38 38 18 18 18 10 10 10
43555- 2 2 6 2 2 6 34 34 34 82 82 82
43556- 38 38 38 14 14 14 0 0 0 0 0 0
43557- 0 0 0 0 0 0 0 0 0 0 0 0
43558- 0 0 0 0 0 0 0 0 0 0 0 0
43559- 0 0 0 0 0 0 0 0 0 0 0 0
43560- 0 0 0 0 0 0 0 0 0 0 0 0
43561- 0 0 0 0 0 0 0 0 0 0 0 0
43562- 0 0 0 0 0 0 0 0 0 0 0 0
43563- 0 0 0 0 0 0 0 0 0 0 0 0
43564- 0 0 0 0 0 1 0 0 1 0 0 0
43565- 0 0 0 0 0 0 0 0 0 0 0 0
43566- 0 0 0 0 0 0 0 0 0 0 0 0
43567- 0 0 0 0 0 0 0 0 0 0 0 0
43568- 0 0 0 0 0 0 0 0 0 0 0 0
43569- 0 0 0 0 0 0 0 0 0 6 6 6
43570- 30 30 30 78 78 78 30 30 30 2 2 6
43571- 2 2 6 2 2 6 2 2 6 2 2 6
43572- 2 2 6 2 2 6 2 2 6 2 2 6
43573- 2 2 6 2 2 6 2 2 6 10 10 10
43574- 10 10 10 2 2 6 2 2 6 2 2 6
43575- 2 2 6 2 2 6 2 2 6 78 78 78
43576- 50 50 50 18 18 18 6 6 6 0 0 0
43577- 0 0 0 0 0 0 0 0 0 0 0 0
43578- 0 0 0 0 0 0 0 0 0 0 0 0
43579- 0 0 0 0 0 0 0 0 0 0 0 0
43580- 0 0 0 0 0 0 0 0 0 0 0 0
43581- 0 0 0 0 0 0 0 0 0 0 0 0
43582- 0 0 0 0 0 0 0 0 0 0 0 0
43583- 0 0 0 0 0 0 0 0 0 0 0 0
43584- 0 0 1 0 0 0 0 0 0 0 0 0
43585- 0 0 0 0 0 0 0 0 0 0 0 0
43586- 0 0 0 0 0 0 0 0 0 0 0 0
43587- 0 0 0 0 0 0 0 0 0 0 0 0
43588- 0 0 0 0 0 0 0 0 0 0 0 0
43589- 0 0 0 0 0 0 0 0 0 10 10 10
43590- 38 38 38 86 86 86 14 14 14 2 2 6
43591- 2 2 6 2 2 6 2 2 6 2 2 6
43592- 2 2 6 2 2 6 2 2 6 2 2 6
43593- 2 2 6 2 2 6 2 2 6 2 2 6
43594- 2 2 6 2 2 6 2 2 6 2 2 6
43595- 2 2 6 2 2 6 2 2 6 54 54 54
43596- 66 66 66 26 26 26 6 6 6 0 0 0
43597- 0 0 0 0 0 0 0 0 0 0 0 0
43598- 0 0 0 0 0 0 0 0 0 0 0 0
43599- 0 0 0 0 0 0 0 0 0 0 0 0
43600- 0 0 0 0 0 0 0 0 0 0 0 0
43601- 0 0 0 0 0 0 0 0 0 0 0 0
43602- 0 0 0 0 0 0 0 0 0 0 0 0
43603- 0 0 0 0 0 0 0 0 0 0 0 0
43604- 0 0 0 0 0 1 0 0 1 0 0 0
43605- 0 0 0 0 0 0 0 0 0 0 0 0
43606- 0 0 0 0 0 0 0 0 0 0 0 0
43607- 0 0 0 0 0 0 0 0 0 0 0 0
43608- 0 0 0 0 0 0 0 0 0 0 0 0
43609- 0 0 0 0 0 0 0 0 0 14 14 14
43610- 42 42 42 82 82 82 2 2 6 2 2 6
43611- 2 2 6 6 6 6 10 10 10 2 2 6
43612- 2 2 6 2 2 6 2 2 6 2 2 6
43613- 2 2 6 2 2 6 2 2 6 6 6 6
43614- 14 14 14 10 10 10 2 2 6 2 2 6
43615- 2 2 6 2 2 6 2 2 6 18 18 18
43616- 82 82 82 34 34 34 10 10 10 0 0 0
43617- 0 0 0 0 0 0 0 0 0 0 0 0
43618- 0 0 0 0 0 0 0 0 0 0 0 0
43619- 0 0 0 0 0 0 0 0 0 0 0 0
43620- 0 0 0 0 0 0 0 0 0 0 0 0
43621- 0 0 0 0 0 0 0 0 0 0 0 0
43622- 0 0 0 0 0 0 0 0 0 0 0 0
43623- 0 0 0 0 0 0 0 0 0 0 0 0
43624- 0 0 1 0 0 0 0 0 0 0 0 0
43625- 0 0 0 0 0 0 0 0 0 0 0 0
43626- 0 0 0 0 0 0 0 0 0 0 0 0
43627- 0 0 0 0 0 0 0 0 0 0 0 0
43628- 0 0 0 0 0 0 0 0 0 0 0 0
43629- 0 0 0 0 0 0 0 0 0 14 14 14
43630- 46 46 46 86 86 86 2 2 6 2 2 6
43631- 6 6 6 6 6 6 22 22 22 34 34 34
43632- 6 6 6 2 2 6 2 2 6 2 2 6
43633- 2 2 6 2 2 6 18 18 18 34 34 34
43634- 10 10 10 50 50 50 22 22 22 2 2 6
43635- 2 2 6 2 2 6 2 2 6 10 10 10
43636- 86 86 86 42 42 42 14 14 14 0 0 0
43637- 0 0 0 0 0 0 0 0 0 0 0 0
43638- 0 0 0 0 0 0 0 0 0 0 0 0
43639- 0 0 0 0 0 0 0 0 0 0 0 0
43640- 0 0 0 0 0 0 0 0 0 0 0 0
43641- 0 0 0 0 0 0 0 0 0 0 0 0
43642- 0 0 0 0 0 0 0 0 0 0 0 0
43643- 0 0 0 0 0 0 0 0 0 0 0 0
43644- 0 0 1 0 0 1 0 0 1 0 0 0
43645- 0 0 0 0 0 0 0 0 0 0 0 0
43646- 0 0 0 0 0 0 0 0 0 0 0 0
43647- 0 0 0 0 0 0 0 0 0 0 0 0
43648- 0 0 0 0 0 0 0 0 0 0 0 0
43649- 0 0 0 0 0 0 0 0 0 14 14 14
43650- 46 46 46 86 86 86 2 2 6 2 2 6
43651- 38 38 38 116 116 116 94 94 94 22 22 22
43652- 22 22 22 2 2 6 2 2 6 2 2 6
43653- 14 14 14 86 86 86 138 138 138 162 162 162
43654-154 154 154 38 38 38 26 26 26 6 6 6
43655- 2 2 6 2 2 6 2 2 6 2 2 6
43656- 86 86 86 46 46 46 14 14 14 0 0 0
43657- 0 0 0 0 0 0 0 0 0 0 0 0
43658- 0 0 0 0 0 0 0 0 0 0 0 0
43659- 0 0 0 0 0 0 0 0 0 0 0 0
43660- 0 0 0 0 0 0 0 0 0 0 0 0
43661- 0 0 0 0 0 0 0 0 0 0 0 0
43662- 0 0 0 0 0 0 0 0 0 0 0 0
43663- 0 0 0 0 0 0 0 0 0 0 0 0
43664- 0 0 0 0 0 0 0 0 0 0 0 0
43665- 0 0 0 0 0 0 0 0 0 0 0 0
43666- 0 0 0 0 0 0 0 0 0 0 0 0
43667- 0 0 0 0 0 0 0 0 0 0 0 0
43668- 0 0 0 0 0 0 0 0 0 0 0 0
43669- 0 0 0 0 0 0 0 0 0 14 14 14
43670- 46 46 46 86 86 86 2 2 6 14 14 14
43671-134 134 134 198 198 198 195 195 195 116 116 116
43672- 10 10 10 2 2 6 2 2 6 6 6 6
43673-101 98 89 187 187 187 210 210 210 218 218 218
43674-214 214 214 134 134 134 14 14 14 6 6 6
43675- 2 2 6 2 2 6 2 2 6 2 2 6
43676- 86 86 86 50 50 50 18 18 18 6 6 6
43677- 0 0 0 0 0 0 0 0 0 0 0 0
43678- 0 0 0 0 0 0 0 0 0 0 0 0
43679- 0 0 0 0 0 0 0 0 0 0 0 0
43680- 0 0 0 0 0 0 0 0 0 0 0 0
43681- 0 0 0 0 0 0 0 0 0 0 0 0
43682- 0 0 0 0 0 0 0 0 0 0 0 0
43683- 0 0 0 0 0 0 0 0 1 0 0 0
43684- 0 0 1 0 0 1 0 0 1 0 0 0
43685- 0 0 0 0 0 0 0 0 0 0 0 0
43686- 0 0 0 0 0 0 0 0 0 0 0 0
43687- 0 0 0 0 0 0 0 0 0 0 0 0
43688- 0 0 0 0 0 0 0 0 0 0 0 0
43689- 0 0 0 0 0 0 0 0 0 14 14 14
43690- 46 46 46 86 86 86 2 2 6 54 54 54
43691-218 218 218 195 195 195 226 226 226 246 246 246
43692- 58 58 58 2 2 6 2 2 6 30 30 30
43693-210 210 210 253 253 253 174 174 174 123 123 123
43694-221 221 221 234 234 234 74 74 74 2 2 6
43695- 2 2 6 2 2 6 2 2 6 2 2 6
43696- 70 70 70 58 58 58 22 22 22 6 6 6
43697- 0 0 0 0 0 0 0 0 0 0 0 0
43698- 0 0 0 0 0 0 0 0 0 0 0 0
43699- 0 0 0 0 0 0 0 0 0 0 0 0
43700- 0 0 0 0 0 0 0 0 0 0 0 0
43701- 0 0 0 0 0 0 0 0 0 0 0 0
43702- 0 0 0 0 0 0 0 0 0 0 0 0
43703- 0 0 0 0 0 0 0 0 0 0 0 0
43704- 0 0 0 0 0 0 0 0 0 0 0 0
43705- 0 0 0 0 0 0 0 0 0 0 0 0
43706- 0 0 0 0 0 0 0 0 0 0 0 0
43707- 0 0 0 0 0 0 0 0 0 0 0 0
43708- 0 0 0 0 0 0 0 0 0 0 0 0
43709- 0 0 0 0 0 0 0 0 0 14 14 14
43710- 46 46 46 82 82 82 2 2 6 106 106 106
43711-170 170 170 26 26 26 86 86 86 226 226 226
43712-123 123 123 10 10 10 14 14 14 46 46 46
43713-231 231 231 190 190 190 6 6 6 70 70 70
43714- 90 90 90 238 238 238 158 158 158 2 2 6
43715- 2 2 6 2 2 6 2 2 6 2 2 6
43716- 70 70 70 58 58 58 22 22 22 6 6 6
43717- 0 0 0 0 0 0 0 0 0 0 0 0
43718- 0 0 0 0 0 0 0 0 0 0 0 0
43719- 0 0 0 0 0 0 0 0 0 0 0 0
43720- 0 0 0 0 0 0 0 0 0 0 0 0
43721- 0 0 0 0 0 0 0 0 0 0 0 0
43722- 0 0 0 0 0 0 0 0 0 0 0 0
43723- 0 0 0 0 0 0 0 0 1 0 0 0
43724- 0 0 1 0 0 1 0 0 1 0 0 0
43725- 0 0 0 0 0 0 0 0 0 0 0 0
43726- 0 0 0 0 0 0 0 0 0 0 0 0
43727- 0 0 0 0 0 0 0 0 0 0 0 0
43728- 0 0 0 0 0 0 0 0 0 0 0 0
43729- 0 0 0 0 0 0 0 0 0 14 14 14
43730- 42 42 42 86 86 86 6 6 6 116 116 116
43731-106 106 106 6 6 6 70 70 70 149 149 149
43732-128 128 128 18 18 18 38 38 38 54 54 54
43733-221 221 221 106 106 106 2 2 6 14 14 14
43734- 46 46 46 190 190 190 198 198 198 2 2 6
43735- 2 2 6 2 2 6 2 2 6 2 2 6
43736- 74 74 74 62 62 62 22 22 22 6 6 6
43737- 0 0 0 0 0 0 0 0 0 0 0 0
43738- 0 0 0 0 0 0 0 0 0 0 0 0
43739- 0 0 0 0 0 0 0 0 0 0 0 0
43740- 0 0 0 0 0 0 0 0 0 0 0 0
43741- 0 0 0 0 0 0 0 0 0 0 0 0
43742- 0 0 0 0 0 0 0 0 0 0 0 0
43743- 0 0 0 0 0 0 0 0 1 0 0 0
43744- 0 0 1 0 0 0 0 0 1 0 0 0
43745- 0 0 0 0 0 0 0 0 0 0 0 0
43746- 0 0 0 0 0 0 0 0 0 0 0 0
43747- 0 0 0 0 0 0 0 0 0 0 0 0
43748- 0 0 0 0 0 0 0 0 0 0 0 0
43749- 0 0 0 0 0 0 0 0 0 14 14 14
43750- 42 42 42 94 94 94 14 14 14 101 101 101
43751-128 128 128 2 2 6 18 18 18 116 116 116
43752-118 98 46 121 92 8 121 92 8 98 78 10
43753-162 162 162 106 106 106 2 2 6 2 2 6
43754- 2 2 6 195 195 195 195 195 195 6 6 6
43755- 2 2 6 2 2 6 2 2 6 2 2 6
43756- 74 74 74 62 62 62 22 22 22 6 6 6
43757- 0 0 0 0 0 0 0 0 0 0 0 0
43758- 0 0 0 0 0 0 0 0 0 0 0 0
43759- 0 0 0 0 0 0 0 0 0 0 0 0
43760- 0 0 0 0 0 0 0 0 0 0 0 0
43761- 0 0 0 0 0 0 0 0 0 0 0 0
43762- 0 0 0 0 0 0 0 0 0 0 0 0
43763- 0 0 0 0 0 0 0 0 1 0 0 1
43764- 0 0 1 0 0 0 0 0 1 0 0 0
43765- 0 0 0 0 0 0 0 0 0 0 0 0
43766- 0 0 0 0 0 0 0 0 0 0 0 0
43767- 0 0 0 0 0 0 0 0 0 0 0 0
43768- 0 0 0 0 0 0 0 0 0 0 0 0
43769- 0 0 0 0 0 0 0 0 0 10 10 10
43770- 38 38 38 90 90 90 14 14 14 58 58 58
43771-210 210 210 26 26 26 54 38 6 154 114 10
43772-226 170 11 236 186 11 225 175 15 184 144 12
43773-215 174 15 175 146 61 37 26 9 2 2 6
43774- 70 70 70 246 246 246 138 138 138 2 2 6
43775- 2 2 6 2 2 6 2 2 6 2 2 6
43776- 70 70 70 66 66 66 26 26 26 6 6 6
43777- 0 0 0 0 0 0 0 0 0 0 0 0
43778- 0 0 0 0 0 0 0 0 0 0 0 0
43779- 0 0 0 0 0 0 0 0 0 0 0 0
43780- 0 0 0 0 0 0 0 0 0 0 0 0
43781- 0 0 0 0 0 0 0 0 0 0 0 0
43782- 0 0 0 0 0 0 0 0 0 0 0 0
43783- 0 0 0 0 0 0 0 0 0 0 0 0
43784- 0 0 0 0 0 0 0 0 0 0 0 0
43785- 0 0 0 0 0 0 0 0 0 0 0 0
43786- 0 0 0 0 0 0 0 0 0 0 0 0
43787- 0 0 0 0 0 0 0 0 0 0 0 0
43788- 0 0 0 0 0 0 0 0 0 0 0 0
43789- 0 0 0 0 0 0 0 0 0 10 10 10
43790- 38 38 38 86 86 86 14 14 14 10 10 10
43791-195 195 195 188 164 115 192 133 9 225 175 15
43792-239 182 13 234 190 10 232 195 16 232 200 30
43793-245 207 45 241 208 19 232 195 16 184 144 12
43794-218 194 134 211 206 186 42 42 42 2 2 6
43795- 2 2 6 2 2 6 2 2 6 2 2 6
43796- 50 50 50 74 74 74 30 30 30 6 6 6
43797- 0 0 0 0 0 0 0 0 0 0 0 0
43798- 0 0 0 0 0 0 0 0 0 0 0 0
43799- 0 0 0 0 0 0 0 0 0 0 0 0
43800- 0 0 0 0 0 0 0 0 0 0 0 0
43801- 0 0 0 0 0 0 0 0 0 0 0 0
43802- 0 0 0 0 0 0 0 0 0 0 0 0
43803- 0 0 0 0 0 0 0 0 0 0 0 0
43804- 0 0 0 0 0 0 0 0 0 0 0 0
43805- 0 0 0 0 0 0 0 0 0 0 0 0
43806- 0 0 0 0 0 0 0 0 0 0 0 0
43807- 0 0 0 0 0 0 0 0 0 0 0 0
43808- 0 0 0 0 0 0 0 0 0 0 0 0
43809- 0 0 0 0 0 0 0 0 0 10 10 10
43810- 34 34 34 86 86 86 14 14 14 2 2 6
43811-121 87 25 192 133 9 219 162 10 239 182 13
43812-236 186 11 232 195 16 241 208 19 244 214 54
43813-246 218 60 246 218 38 246 215 20 241 208 19
43814-241 208 19 226 184 13 121 87 25 2 2 6
43815- 2 2 6 2 2 6 2 2 6 2 2 6
43816- 50 50 50 82 82 82 34 34 34 10 10 10
43817- 0 0 0 0 0 0 0 0 0 0 0 0
43818- 0 0 0 0 0 0 0 0 0 0 0 0
43819- 0 0 0 0 0 0 0 0 0 0 0 0
43820- 0 0 0 0 0 0 0 0 0 0 0 0
43821- 0 0 0 0 0 0 0 0 0 0 0 0
43822- 0 0 0 0 0 0 0 0 0 0 0 0
43823- 0 0 0 0 0 0 0 0 0 0 0 0
43824- 0 0 0 0 0 0 0 0 0 0 0 0
43825- 0 0 0 0 0 0 0 0 0 0 0 0
43826- 0 0 0 0 0 0 0 0 0 0 0 0
43827- 0 0 0 0 0 0 0 0 0 0 0 0
43828- 0 0 0 0 0 0 0 0 0 0 0 0
43829- 0 0 0 0 0 0 0 0 0 10 10 10
43830- 34 34 34 82 82 82 30 30 30 61 42 6
43831-180 123 7 206 145 10 230 174 11 239 182 13
43832-234 190 10 238 202 15 241 208 19 246 218 74
43833-246 218 38 246 215 20 246 215 20 246 215 20
43834-226 184 13 215 174 15 184 144 12 6 6 6
43835- 2 2 6 2 2 6 2 2 6 2 2 6
43836- 26 26 26 94 94 94 42 42 42 14 14 14
43837- 0 0 0 0 0 0 0 0 0 0 0 0
43838- 0 0 0 0 0 0 0 0 0 0 0 0
43839- 0 0 0 0 0 0 0 0 0 0 0 0
43840- 0 0 0 0 0 0 0 0 0 0 0 0
43841- 0 0 0 0 0 0 0 0 0 0 0 0
43842- 0 0 0 0 0 0 0 0 0 0 0 0
43843- 0 0 0 0 0 0 0 0 0 0 0 0
43844- 0 0 0 0 0 0 0 0 0 0 0 0
43845- 0 0 0 0 0 0 0 0 0 0 0 0
43846- 0 0 0 0 0 0 0 0 0 0 0 0
43847- 0 0 0 0 0 0 0 0 0 0 0 0
43848- 0 0 0 0 0 0 0 0 0 0 0 0
43849- 0 0 0 0 0 0 0 0 0 10 10 10
43850- 30 30 30 78 78 78 50 50 50 104 69 6
43851-192 133 9 216 158 10 236 178 12 236 186 11
43852-232 195 16 241 208 19 244 214 54 245 215 43
43853-246 215 20 246 215 20 241 208 19 198 155 10
43854-200 144 11 216 158 10 156 118 10 2 2 6
43855- 2 2 6 2 2 6 2 2 6 2 2 6
43856- 6 6 6 90 90 90 54 54 54 18 18 18
43857- 6 6 6 0 0 0 0 0 0 0 0 0
43858- 0 0 0 0 0 0 0 0 0 0 0 0
43859- 0 0 0 0 0 0 0 0 0 0 0 0
43860- 0 0 0 0 0 0 0 0 0 0 0 0
43861- 0 0 0 0 0 0 0 0 0 0 0 0
43862- 0 0 0 0 0 0 0 0 0 0 0 0
43863- 0 0 0 0 0 0 0 0 0 0 0 0
43864- 0 0 0 0 0 0 0 0 0 0 0 0
43865- 0 0 0 0 0 0 0 0 0 0 0 0
43866- 0 0 0 0 0 0 0 0 0 0 0 0
43867- 0 0 0 0 0 0 0 0 0 0 0 0
43868- 0 0 0 0 0 0 0 0 0 0 0 0
43869- 0 0 0 0 0 0 0 0 0 10 10 10
43870- 30 30 30 78 78 78 46 46 46 22 22 22
43871-137 92 6 210 162 10 239 182 13 238 190 10
43872-238 202 15 241 208 19 246 215 20 246 215 20
43873-241 208 19 203 166 17 185 133 11 210 150 10
43874-216 158 10 210 150 10 102 78 10 2 2 6
43875- 6 6 6 54 54 54 14 14 14 2 2 6
43876- 2 2 6 62 62 62 74 74 74 30 30 30
43877- 10 10 10 0 0 0 0 0 0 0 0 0
43878- 0 0 0 0 0 0 0 0 0 0 0 0
43879- 0 0 0 0 0 0 0 0 0 0 0 0
43880- 0 0 0 0 0 0 0 0 0 0 0 0
43881- 0 0 0 0 0 0 0 0 0 0 0 0
43882- 0 0 0 0 0 0 0 0 0 0 0 0
43883- 0 0 0 0 0 0 0 0 0 0 0 0
43884- 0 0 0 0 0 0 0 0 0 0 0 0
43885- 0 0 0 0 0 0 0 0 0 0 0 0
43886- 0 0 0 0 0 0 0 0 0 0 0 0
43887- 0 0 0 0 0 0 0 0 0 0 0 0
43888- 0 0 0 0 0 0 0 0 0 0 0 0
43889- 0 0 0 0 0 0 0 0 0 10 10 10
43890- 34 34 34 78 78 78 50 50 50 6 6 6
43891- 94 70 30 139 102 15 190 146 13 226 184 13
43892-232 200 30 232 195 16 215 174 15 190 146 13
43893-168 122 10 192 133 9 210 150 10 213 154 11
43894-202 150 34 182 157 106 101 98 89 2 2 6
43895- 2 2 6 78 78 78 116 116 116 58 58 58
43896- 2 2 6 22 22 22 90 90 90 46 46 46
43897- 18 18 18 6 6 6 0 0 0 0 0 0
43898- 0 0 0 0 0 0 0 0 0 0 0 0
43899- 0 0 0 0 0 0 0 0 0 0 0 0
43900- 0 0 0 0 0 0 0 0 0 0 0 0
43901- 0 0 0 0 0 0 0 0 0 0 0 0
43902- 0 0 0 0 0 0 0 0 0 0 0 0
43903- 0 0 0 0 0 0 0 0 0 0 0 0
43904- 0 0 0 0 0 0 0 0 0 0 0 0
43905- 0 0 0 0 0 0 0 0 0 0 0 0
43906- 0 0 0 0 0 0 0 0 0 0 0 0
43907- 0 0 0 0 0 0 0 0 0 0 0 0
43908- 0 0 0 0 0 0 0 0 0 0 0 0
43909- 0 0 0 0 0 0 0 0 0 10 10 10
43910- 38 38 38 86 86 86 50 50 50 6 6 6
43911-128 128 128 174 154 114 156 107 11 168 122 10
43912-198 155 10 184 144 12 197 138 11 200 144 11
43913-206 145 10 206 145 10 197 138 11 188 164 115
43914-195 195 195 198 198 198 174 174 174 14 14 14
43915- 2 2 6 22 22 22 116 116 116 116 116 116
43916- 22 22 22 2 2 6 74 74 74 70 70 70
43917- 30 30 30 10 10 10 0 0 0 0 0 0
43918- 0 0 0 0 0 0 0 0 0 0 0 0
43919- 0 0 0 0 0 0 0 0 0 0 0 0
43920- 0 0 0 0 0 0 0 0 0 0 0 0
43921- 0 0 0 0 0 0 0 0 0 0 0 0
43922- 0 0 0 0 0 0 0 0 0 0 0 0
43923- 0 0 0 0 0 0 0 0 0 0 0 0
43924- 0 0 0 0 0 0 0 0 0 0 0 0
43925- 0 0 0 0 0 0 0 0 0 0 0 0
43926- 0 0 0 0 0 0 0 0 0 0 0 0
43927- 0 0 0 0 0 0 0 0 0 0 0 0
43928- 0 0 0 0 0 0 0 0 0 0 0 0
43929- 0 0 0 0 0 0 6 6 6 18 18 18
43930- 50 50 50 101 101 101 26 26 26 10 10 10
43931-138 138 138 190 190 190 174 154 114 156 107 11
43932-197 138 11 200 144 11 197 138 11 192 133 9
43933-180 123 7 190 142 34 190 178 144 187 187 187
43934-202 202 202 221 221 221 214 214 214 66 66 66
43935- 2 2 6 2 2 6 50 50 50 62 62 62
43936- 6 6 6 2 2 6 10 10 10 90 90 90
43937- 50 50 50 18 18 18 6 6 6 0 0 0
43938- 0 0 0 0 0 0 0 0 0 0 0 0
43939- 0 0 0 0 0 0 0 0 0 0 0 0
43940- 0 0 0 0 0 0 0 0 0 0 0 0
43941- 0 0 0 0 0 0 0 0 0 0 0 0
43942- 0 0 0 0 0 0 0 0 0 0 0 0
43943- 0 0 0 0 0 0 0 0 0 0 0 0
43944- 0 0 0 0 0 0 0 0 0 0 0 0
43945- 0 0 0 0 0 0 0 0 0 0 0 0
43946- 0 0 0 0 0 0 0 0 0 0 0 0
43947- 0 0 0 0 0 0 0 0 0 0 0 0
43948- 0 0 0 0 0 0 0 0 0 0 0 0
43949- 0 0 0 0 0 0 10 10 10 34 34 34
43950- 74 74 74 74 74 74 2 2 6 6 6 6
43951-144 144 144 198 198 198 190 190 190 178 166 146
43952-154 121 60 156 107 11 156 107 11 168 124 44
43953-174 154 114 187 187 187 190 190 190 210 210 210
43954-246 246 246 253 253 253 253 253 253 182 182 182
43955- 6 6 6 2 2 6 2 2 6 2 2 6
43956- 2 2 6 2 2 6 2 2 6 62 62 62
43957- 74 74 74 34 34 34 14 14 14 0 0 0
43958- 0 0 0 0 0 0 0 0 0 0 0 0
43959- 0 0 0 0 0 0 0 0 0 0 0 0
43960- 0 0 0 0 0 0 0 0 0 0 0 0
43961- 0 0 0 0 0 0 0 0 0 0 0 0
43962- 0 0 0 0 0 0 0 0 0 0 0 0
43963- 0 0 0 0 0 0 0 0 0 0 0 0
43964- 0 0 0 0 0 0 0 0 0 0 0 0
43965- 0 0 0 0 0 0 0 0 0 0 0 0
43966- 0 0 0 0 0 0 0 0 0 0 0 0
43967- 0 0 0 0 0 0 0 0 0 0 0 0
43968- 0 0 0 0 0 0 0 0 0 0 0 0
43969- 0 0 0 10 10 10 22 22 22 54 54 54
43970- 94 94 94 18 18 18 2 2 6 46 46 46
43971-234 234 234 221 221 221 190 190 190 190 190 190
43972-190 190 190 187 187 187 187 187 187 190 190 190
43973-190 190 190 195 195 195 214 214 214 242 242 242
43974-253 253 253 253 253 253 253 253 253 253 253 253
43975- 82 82 82 2 2 6 2 2 6 2 2 6
43976- 2 2 6 2 2 6 2 2 6 14 14 14
43977- 86 86 86 54 54 54 22 22 22 6 6 6
43978- 0 0 0 0 0 0 0 0 0 0 0 0
43979- 0 0 0 0 0 0 0 0 0 0 0 0
43980- 0 0 0 0 0 0 0 0 0 0 0 0
43981- 0 0 0 0 0 0 0 0 0 0 0 0
43982- 0 0 0 0 0 0 0 0 0 0 0 0
43983- 0 0 0 0 0 0 0 0 0 0 0 0
43984- 0 0 0 0 0 0 0 0 0 0 0 0
43985- 0 0 0 0 0 0 0 0 0 0 0 0
43986- 0 0 0 0 0 0 0 0 0 0 0 0
43987- 0 0 0 0 0 0 0 0 0 0 0 0
43988- 0 0 0 0 0 0 0 0 0 0 0 0
43989- 6 6 6 18 18 18 46 46 46 90 90 90
43990- 46 46 46 18 18 18 6 6 6 182 182 182
43991-253 253 253 246 246 246 206 206 206 190 190 190
43992-190 190 190 190 190 190 190 190 190 190 190 190
43993-206 206 206 231 231 231 250 250 250 253 253 253
43994-253 253 253 253 253 253 253 253 253 253 253 253
43995-202 202 202 14 14 14 2 2 6 2 2 6
43996- 2 2 6 2 2 6 2 2 6 2 2 6
43997- 42 42 42 86 86 86 42 42 42 18 18 18
43998- 6 6 6 0 0 0 0 0 0 0 0 0
43999- 0 0 0 0 0 0 0 0 0 0 0 0
44000- 0 0 0 0 0 0 0 0 0 0 0 0
44001- 0 0 0 0 0 0 0 0 0 0 0 0
44002- 0 0 0 0 0 0 0 0 0 0 0 0
44003- 0 0 0 0 0 0 0 0 0 0 0 0
44004- 0 0 0 0 0 0 0 0 0 0 0 0
44005- 0 0 0 0 0 0 0 0 0 0 0 0
44006- 0 0 0 0 0 0 0 0 0 0 0 0
44007- 0 0 0 0 0 0 0 0 0 0 0 0
44008- 0 0 0 0 0 0 0 0 0 6 6 6
44009- 14 14 14 38 38 38 74 74 74 66 66 66
44010- 2 2 6 6 6 6 90 90 90 250 250 250
44011-253 253 253 253 253 253 238 238 238 198 198 198
44012-190 190 190 190 190 190 195 195 195 221 221 221
44013-246 246 246 253 253 253 253 253 253 253 253 253
44014-253 253 253 253 253 253 253 253 253 253 253 253
44015-253 253 253 82 82 82 2 2 6 2 2 6
44016- 2 2 6 2 2 6 2 2 6 2 2 6
44017- 2 2 6 78 78 78 70 70 70 34 34 34
44018- 14 14 14 6 6 6 0 0 0 0 0 0
44019- 0 0 0 0 0 0 0 0 0 0 0 0
44020- 0 0 0 0 0 0 0 0 0 0 0 0
44021- 0 0 0 0 0 0 0 0 0 0 0 0
44022- 0 0 0 0 0 0 0 0 0 0 0 0
44023- 0 0 0 0 0 0 0 0 0 0 0 0
44024- 0 0 0 0 0 0 0 0 0 0 0 0
44025- 0 0 0 0 0 0 0 0 0 0 0 0
44026- 0 0 0 0 0 0 0 0 0 0 0 0
44027- 0 0 0 0 0 0 0 0 0 0 0 0
44028- 0 0 0 0 0 0 0 0 0 14 14 14
44029- 34 34 34 66 66 66 78 78 78 6 6 6
44030- 2 2 6 18 18 18 218 218 218 253 253 253
44031-253 253 253 253 253 253 253 253 253 246 246 246
44032-226 226 226 231 231 231 246 246 246 253 253 253
44033-253 253 253 253 253 253 253 253 253 253 253 253
44034-253 253 253 253 253 253 253 253 253 253 253 253
44035-253 253 253 178 178 178 2 2 6 2 2 6
44036- 2 2 6 2 2 6 2 2 6 2 2 6
44037- 2 2 6 18 18 18 90 90 90 62 62 62
44038- 30 30 30 10 10 10 0 0 0 0 0 0
44039- 0 0 0 0 0 0 0 0 0 0 0 0
44040- 0 0 0 0 0 0 0 0 0 0 0 0
44041- 0 0 0 0 0 0 0 0 0 0 0 0
44042- 0 0 0 0 0 0 0 0 0 0 0 0
44043- 0 0 0 0 0 0 0 0 0 0 0 0
44044- 0 0 0 0 0 0 0 0 0 0 0 0
44045- 0 0 0 0 0 0 0 0 0 0 0 0
44046- 0 0 0 0 0 0 0 0 0 0 0 0
44047- 0 0 0 0 0 0 0 0 0 0 0 0
44048- 0 0 0 0 0 0 10 10 10 26 26 26
44049- 58 58 58 90 90 90 18 18 18 2 2 6
44050- 2 2 6 110 110 110 253 253 253 253 253 253
44051-253 253 253 253 253 253 253 253 253 253 253 253
44052-250 250 250 253 253 253 253 253 253 253 253 253
44053-253 253 253 253 253 253 253 253 253 253 253 253
44054-253 253 253 253 253 253 253 253 253 253 253 253
44055-253 253 253 231 231 231 18 18 18 2 2 6
44056- 2 2 6 2 2 6 2 2 6 2 2 6
44057- 2 2 6 2 2 6 18 18 18 94 94 94
44058- 54 54 54 26 26 26 10 10 10 0 0 0
44059- 0 0 0 0 0 0 0 0 0 0 0 0
44060- 0 0 0 0 0 0 0 0 0 0 0 0
44061- 0 0 0 0 0 0 0 0 0 0 0 0
44062- 0 0 0 0 0 0 0 0 0 0 0 0
44063- 0 0 0 0 0 0 0 0 0 0 0 0
44064- 0 0 0 0 0 0 0 0 0 0 0 0
44065- 0 0 0 0 0 0 0 0 0 0 0 0
44066- 0 0 0 0 0 0 0 0 0 0 0 0
44067- 0 0 0 0 0 0 0 0 0 0 0 0
44068- 0 0 0 6 6 6 22 22 22 50 50 50
44069- 90 90 90 26 26 26 2 2 6 2 2 6
44070- 14 14 14 195 195 195 250 250 250 253 253 253
44071-253 253 253 253 253 253 253 253 253 253 253 253
44072-253 253 253 253 253 253 253 253 253 253 253 253
44073-253 253 253 253 253 253 253 253 253 253 253 253
44074-253 253 253 253 253 253 253 253 253 253 253 253
44075-250 250 250 242 242 242 54 54 54 2 2 6
44076- 2 2 6 2 2 6 2 2 6 2 2 6
44077- 2 2 6 2 2 6 2 2 6 38 38 38
44078- 86 86 86 50 50 50 22 22 22 6 6 6
44079- 0 0 0 0 0 0 0 0 0 0 0 0
44080- 0 0 0 0 0 0 0 0 0 0 0 0
44081- 0 0 0 0 0 0 0 0 0 0 0 0
44082- 0 0 0 0 0 0 0 0 0 0 0 0
44083- 0 0 0 0 0 0 0 0 0 0 0 0
44084- 0 0 0 0 0 0 0 0 0 0 0 0
44085- 0 0 0 0 0 0 0 0 0 0 0 0
44086- 0 0 0 0 0 0 0 0 0 0 0 0
44087- 0 0 0 0 0 0 0 0 0 0 0 0
44088- 6 6 6 14 14 14 38 38 38 82 82 82
44089- 34 34 34 2 2 6 2 2 6 2 2 6
44090- 42 42 42 195 195 195 246 246 246 253 253 253
44091-253 253 253 253 253 253 253 253 253 250 250 250
44092-242 242 242 242 242 242 250 250 250 253 253 253
44093-253 253 253 253 253 253 253 253 253 253 253 253
44094-253 253 253 250 250 250 246 246 246 238 238 238
44095-226 226 226 231 231 231 101 101 101 6 6 6
44096- 2 2 6 2 2 6 2 2 6 2 2 6
44097- 2 2 6 2 2 6 2 2 6 2 2 6
44098- 38 38 38 82 82 82 42 42 42 14 14 14
44099- 6 6 6 0 0 0 0 0 0 0 0 0
44100- 0 0 0 0 0 0 0 0 0 0 0 0
44101- 0 0 0 0 0 0 0 0 0 0 0 0
44102- 0 0 0 0 0 0 0 0 0 0 0 0
44103- 0 0 0 0 0 0 0 0 0 0 0 0
44104- 0 0 0 0 0 0 0 0 0 0 0 0
44105- 0 0 0 0 0 0 0 0 0 0 0 0
44106- 0 0 0 0 0 0 0 0 0 0 0 0
44107- 0 0 0 0 0 0 0 0 0 0 0 0
44108- 10 10 10 26 26 26 62 62 62 66 66 66
44109- 2 2 6 2 2 6 2 2 6 6 6 6
44110- 70 70 70 170 170 170 206 206 206 234 234 234
44111-246 246 246 250 250 250 250 250 250 238 238 238
44112-226 226 226 231 231 231 238 238 238 250 250 250
44113-250 250 250 250 250 250 246 246 246 231 231 231
44114-214 214 214 206 206 206 202 202 202 202 202 202
44115-198 198 198 202 202 202 182 182 182 18 18 18
44116- 2 2 6 2 2 6 2 2 6 2 2 6
44117- 2 2 6 2 2 6 2 2 6 2 2 6
44118- 2 2 6 62 62 62 66 66 66 30 30 30
44119- 10 10 10 0 0 0 0 0 0 0 0 0
44120- 0 0 0 0 0 0 0 0 0 0 0 0
44121- 0 0 0 0 0 0 0 0 0 0 0 0
44122- 0 0 0 0 0 0 0 0 0 0 0 0
44123- 0 0 0 0 0 0 0 0 0 0 0 0
44124- 0 0 0 0 0 0 0 0 0 0 0 0
44125- 0 0 0 0 0 0 0 0 0 0 0 0
44126- 0 0 0 0 0 0 0 0 0 0 0 0
44127- 0 0 0 0 0 0 0 0 0 0 0 0
44128- 14 14 14 42 42 42 82 82 82 18 18 18
44129- 2 2 6 2 2 6 2 2 6 10 10 10
44130- 94 94 94 182 182 182 218 218 218 242 242 242
44131-250 250 250 253 253 253 253 253 253 250 250 250
44132-234 234 234 253 253 253 253 253 253 253 253 253
44133-253 253 253 253 253 253 253 253 253 246 246 246
44134-238 238 238 226 226 226 210 210 210 202 202 202
44135-195 195 195 195 195 195 210 210 210 158 158 158
44136- 6 6 6 14 14 14 50 50 50 14 14 14
44137- 2 2 6 2 2 6 2 2 6 2 2 6
44138- 2 2 6 6 6 6 86 86 86 46 46 46
44139- 18 18 18 6 6 6 0 0 0 0 0 0
44140- 0 0 0 0 0 0 0 0 0 0 0 0
44141- 0 0 0 0 0 0 0 0 0 0 0 0
44142- 0 0 0 0 0 0 0 0 0 0 0 0
44143- 0 0 0 0 0 0 0 0 0 0 0 0
44144- 0 0 0 0 0 0 0 0 0 0 0 0
44145- 0 0 0 0 0 0 0 0 0 0 0 0
44146- 0 0 0 0 0 0 0 0 0 0 0 0
44147- 0 0 0 0 0 0 0 0 0 6 6 6
44148- 22 22 22 54 54 54 70 70 70 2 2 6
44149- 2 2 6 10 10 10 2 2 6 22 22 22
44150-166 166 166 231 231 231 250 250 250 253 253 253
44151-253 253 253 253 253 253 253 253 253 250 250 250
44152-242 242 242 253 253 253 253 253 253 253 253 253
44153-253 253 253 253 253 253 253 253 253 253 253 253
44154-253 253 253 253 253 253 253 253 253 246 246 246
44155-231 231 231 206 206 206 198 198 198 226 226 226
44156- 94 94 94 2 2 6 6 6 6 38 38 38
44157- 30 30 30 2 2 6 2 2 6 2 2 6
44158- 2 2 6 2 2 6 62 62 62 66 66 66
44159- 26 26 26 10 10 10 0 0 0 0 0 0
44160- 0 0 0 0 0 0 0 0 0 0 0 0
44161- 0 0 0 0 0 0 0 0 0 0 0 0
44162- 0 0 0 0 0 0 0 0 0 0 0 0
44163- 0 0 0 0 0 0 0 0 0 0 0 0
44164- 0 0 0 0 0 0 0 0 0 0 0 0
44165- 0 0 0 0 0 0 0 0 0 0 0 0
44166- 0 0 0 0 0 0 0 0 0 0 0 0
44167- 0 0 0 0 0 0 0 0 0 10 10 10
44168- 30 30 30 74 74 74 50 50 50 2 2 6
44169- 26 26 26 26 26 26 2 2 6 106 106 106
44170-238 238 238 253 253 253 253 253 253 253 253 253
44171-253 253 253 253 253 253 253 253 253 253 253 253
44172-253 253 253 253 253 253 253 253 253 253 253 253
44173-253 253 253 253 253 253 253 253 253 253 253 253
44174-253 253 253 253 253 253 253 253 253 253 253 253
44175-253 253 253 246 246 246 218 218 218 202 202 202
44176-210 210 210 14 14 14 2 2 6 2 2 6
44177- 30 30 30 22 22 22 2 2 6 2 2 6
44178- 2 2 6 2 2 6 18 18 18 86 86 86
44179- 42 42 42 14 14 14 0 0 0 0 0 0
44180- 0 0 0 0 0 0 0 0 0 0 0 0
44181- 0 0 0 0 0 0 0 0 0 0 0 0
44182- 0 0 0 0 0 0 0 0 0 0 0 0
44183- 0 0 0 0 0 0 0 0 0 0 0 0
44184- 0 0 0 0 0 0 0 0 0 0 0 0
44185- 0 0 0 0 0 0 0 0 0 0 0 0
44186- 0 0 0 0 0 0 0 0 0 0 0 0
44187- 0 0 0 0 0 0 0 0 0 14 14 14
44188- 42 42 42 90 90 90 22 22 22 2 2 6
44189- 42 42 42 2 2 6 18 18 18 218 218 218
44190-253 253 253 253 253 253 253 253 253 253 253 253
44191-253 253 253 253 253 253 253 253 253 253 253 253
44192-253 253 253 253 253 253 253 253 253 253 253 253
44193-253 253 253 253 253 253 253 253 253 253 253 253
44194-253 253 253 253 253 253 253 253 253 253 253 253
44195-253 253 253 253 253 253 250 250 250 221 221 221
44196-218 218 218 101 101 101 2 2 6 14 14 14
44197- 18 18 18 38 38 38 10 10 10 2 2 6
44198- 2 2 6 2 2 6 2 2 6 78 78 78
44199- 58 58 58 22 22 22 6 6 6 0 0 0
44200- 0 0 0 0 0 0 0 0 0 0 0 0
44201- 0 0 0 0 0 0 0 0 0 0 0 0
44202- 0 0 0 0 0 0 0 0 0 0 0 0
44203- 0 0 0 0 0 0 0 0 0 0 0 0
44204- 0 0 0 0 0 0 0 0 0 0 0 0
44205- 0 0 0 0 0 0 0 0 0 0 0 0
44206- 0 0 0 0 0 0 0 0 0 0 0 0
44207- 0 0 0 0 0 0 6 6 6 18 18 18
44208- 54 54 54 82 82 82 2 2 6 26 26 26
44209- 22 22 22 2 2 6 123 123 123 253 253 253
44210-253 253 253 253 253 253 253 253 253 253 253 253
44211-253 253 253 253 253 253 253 253 253 253 253 253
44212-253 253 253 253 253 253 253 253 253 253 253 253
44213-253 253 253 253 253 253 253 253 253 253 253 253
44214-253 253 253 253 253 253 253 253 253 253 253 253
44215-253 253 253 253 253 253 253 253 253 250 250 250
44216-238 238 238 198 198 198 6 6 6 38 38 38
44217- 58 58 58 26 26 26 38 38 38 2 2 6
44218- 2 2 6 2 2 6 2 2 6 46 46 46
44219- 78 78 78 30 30 30 10 10 10 0 0 0
44220- 0 0 0 0 0 0 0 0 0 0 0 0
44221- 0 0 0 0 0 0 0 0 0 0 0 0
44222- 0 0 0 0 0 0 0 0 0 0 0 0
44223- 0 0 0 0 0 0 0 0 0 0 0 0
44224- 0 0 0 0 0 0 0 0 0 0 0 0
44225- 0 0 0 0 0 0 0 0 0 0 0 0
44226- 0 0 0 0 0 0 0 0 0 0 0 0
44227- 0 0 0 0 0 0 10 10 10 30 30 30
44228- 74 74 74 58 58 58 2 2 6 42 42 42
44229- 2 2 6 22 22 22 231 231 231 253 253 253
44230-253 253 253 253 253 253 253 253 253 253 253 253
44231-253 253 253 253 253 253 253 253 253 250 250 250
44232-253 253 253 253 253 253 253 253 253 253 253 253
44233-253 253 253 253 253 253 253 253 253 253 253 253
44234-253 253 253 253 253 253 253 253 253 253 253 253
44235-253 253 253 253 253 253 253 253 253 253 253 253
44236-253 253 253 246 246 246 46 46 46 38 38 38
44237- 42 42 42 14 14 14 38 38 38 14 14 14
44238- 2 2 6 2 2 6 2 2 6 6 6 6
44239- 86 86 86 46 46 46 14 14 14 0 0 0
44240- 0 0 0 0 0 0 0 0 0 0 0 0
44241- 0 0 0 0 0 0 0 0 0 0 0 0
44242- 0 0 0 0 0 0 0 0 0 0 0 0
44243- 0 0 0 0 0 0 0 0 0 0 0 0
44244- 0 0 0 0 0 0 0 0 0 0 0 0
44245- 0 0 0 0 0 0 0 0 0 0 0 0
44246- 0 0 0 0 0 0 0 0 0 0 0 0
44247- 0 0 0 6 6 6 14 14 14 42 42 42
44248- 90 90 90 18 18 18 18 18 18 26 26 26
44249- 2 2 6 116 116 116 253 253 253 253 253 253
44250-253 253 253 253 253 253 253 253 253 253 253 253
44251-253 253 253 253 253 253 250 250 250 238 238 238
44252-253 253 253 253 253 253 253 253 253 253 253 253
44253-253 253 253 253 253 253 253 253 253 253 253 253
44254-253 253 253 253 253 253 253 253 253 253 253 253
44255-253 253 253 253 253 253 253 253 253 253 253 253
44256-253 253 253 253 253 253 94 94 94 6 6 6
44257- 2 2 6 2 2 6 10 10 10 34 34 34
44258- 2 2 6 2 2 6 2 2 6 2 2 6
44259- 74 74 74 58 58 58 22 22 22 6 6 6
44260- 0 0 0 0 0 0 0 0 0 0 0 0
44261- 0 0 0 0 0 0 0 0 0 0 0 0
44262- 0 0 0 0 0 0 0 0 0 0 0 0
44263- 0 0 0 0 0 0 0 0 0 0 0 0
44264- 0 0 0 0 0 0 0 0 0 0 0 0
44265- 0 0 0 0 0 0 0 0 0 0 0 0
44266- 0 0 0 0 0 0 0 0 0 0 0 0
44267- 0 0 0 10 10 10 26 26 26 66 66 66
44268- 82 82 82 2 2 6 38 38 38 6 6 6
44269- 14 14 14 210 210 210 253 253 253 253 253 253
44270-253 253 253 253 253 253 253 253 253 253 253 253
44271-253 253 253 253 253 253 246 246 246 242 242 242
44272-253 253 253 253 253 253 253 253 253 253 253 253
44273-253 253 253 253 253 253 253 253 253 253 253 253
44274-253 253 253 253 253 253 253 253 253 253 253 253
44275-253 253 253 253 253 253 253 253 253 253 253 253
44276-253 253 253 253 253 253 144 144 144 2 2 6
44277- 2 2 6 2 2 6 2 2 6 46 46 46
44278- 2 2 6 2 2 6 2 2 6 2 2 6
44279- 42 42 42 74 74 74 30 30 30 10 10 10
44280- 0 0 0 0 0 0 0 0 0 0 0 0
44281- 0 0 0 0 0 0 0 0 0 0 0 0
44282- 0 0 0 0 0 0 0 0 0 0 0 0
44283- 0 0 0 0 0 0 0 0 0 0 0 0
44284- 0 0 0 0 0 0 0 0 0 0 0 0
44285- 0 0 0 0 0 0 0 0 0 0 0 0
44286- 0 0 0 0 0 0 0 0 0 0 0 0
44287- 6 6 6 14 14 14 42 42 42 90 90 90
44288- 26 26 26 6 6 6 42 42 42 2 2 6
44289- 74 74 74 250 250 250 253 253 253 253 253 253
44290-253 253 253 253 253 253 253 253 253 253 253 253
44291-253 253 253 253 253 253 242 242 242 242 242 242
44292-253 253 253 253 253 253 253 253 253 253 253 253
44293-253 253 253 253 253 253 253 253 253 253 253 253
44294-253 253 253 253 253 253 253 253 253 253 253 253
44295-253 253 253 253 253 253 253 253 253 253 253 253
44296-253 253 253 253 253 253 182 182 182 2 2 6
44297- 2 2 6 2 2 6 2 2 6 46 46 46
44298- 2 2 6 2 2 6 2 2 6 2 2 6
44299- 10 10 10 86 86 86 38 38 38 10 10 10
44300- 0 0 0 0 0 0 0 0 0 0 0 0
44301- 0 0 0 0 0 0 0 0 0 0 0 0
44302- 0 0 0 0 0 0 0 0 0 0 0 0
44303- 0 0 0 0 0 0 0 0 0 0 0 0
44304- 0 0 0 0 0 0 0 0 0 0 0 0
44305- 0 0 0 0 0 0 0 0 0 0 0 0
44306- 0 0 0 0 0 0 0 0 0 0 0 0
44307- 10 10 10 26 26 26 66 66 66 82 82 82
44308- 2 2 6 22 22 22 18 18 18 2 2 6
44309-149 149 149 253 253 253 253 253 253 253 253 253
44310-253 253 253 253 253 253 253 253 253 253 253 253
44311-253 253 253 253 253 253 234 234 234 242 242 242
44312-253 253 253 253 253 253 253 253 253 253 253 253
44313-253 253 253 253 253 253 253 253 253 253 253 253
44314-253 253 253 253 253 253 253 253 253 253 253 253
44315-253 253 253 253 253 253 253 253 253 253 253 253
44316-253 253 253 253 253 253 206 206 206 2 2 6
44317- 2 2 6 2 2 6 2 2 6 38 38 38
44318- 2 2 6 2 2 6 2 2 6 2 2 6
44319- 6 6 6 86 86 86 46 46 46 14 14 14
44320- 0 0 0 0 0 0 0 0 0 0 0 0
44321- 0 0 0 0 0 0 0 0 0 0 0 0
44322- 0 0 0 0 0 0 0 0 0 0 0 0
44323- 0 0 0 0 0 0 0 0 0 0 0 0
44324- 0 0 0 0 0 0 0 0 0 0 0 0
44325- 0 0 0 0 0 0 0 0 0 0 0 0
44326- 0 0 0 0 0 0 0 0 0 6 6 6
44327- 18 18 18 46 46 46 86 86 86 18 18 18
44328- 2 2 6 34 34 34 10 10 10 6 6 6
44329-210 210 210 253 253 253 253 253 253 253 253 253
44330-253 253 253 253 253 253 253 253 253 253 253 253
44331-253 253 253 253 253 253 234 234 234 242 242 242
44332-253 253 253 253 253 253 253 253 253 253 253 253
44333-253 253 253 253 253 253 253 253 253 253 253 253
44334-253 253 253 253 253 253 253 253 253 253 253 253
44335-253 253 253 253 253 253 253 253 253 253 253 253
44336-253 253 253 253 253 253 221 221 221 6 6 6
44337- 2 2 6 2 2 6 6 6 6 30 30 30
44338- 2 2 6 2 2 6 2 2 6 2 2 6
44339- 2 2 6 82 82 82 54 54 54 18 18 18
44340- 6 6 6 0 0 0 0 0 0 0 0 0
44341- 0 0 0 0 0 0 0 0 0 0 0 0
44342- 0 0 0 0 0 0 0 0 0 0 0 0
44343- 0 0 0 0 0 0 0 0 0 0 0 0
44344- 0 0 0 0 0 0 0 0 0 0 0 0
44345- 0 0 0 0 0 0 0 0 0 0 0 0
44346- 0 0 0 0 0 0 0 0 0 10 10 10
44347- 26 26 26 66 66 66 62 62 62 2 2 6
44348- 2 2 6 38 38 38 10 10 10 26 26 26
44349-238 238 238 253 253 253 253 253 253 253 253 253
44350-253 253 253 253 253 253 253 253 253 253 253 253
44351-253 253 253 253 253 253 231 231 231 238 238 238
44352-253 253 253 253 253 253 253 253 253 253 253 253
44353-253 253 253 253 253 253 253 253 253 253 253 253
44354-253 253 253 253 253 253 253 253 253 253 253 253
44355-253 253 253 253 253 253 253 253 253 253 253 253
44356-253 253 253 253 253 253 231 231 231 6 6 6
44357- 2 2 6 2 2 6 10 10 10 30 30 30
44358- 2 2 6 2 2 6 2 2 6 2 2 6
44359- 2 2 6 66 66 66 58 58 58 22 22 22
44360- 6 6 6 0 0 0 0 0 0 0 0 0
44361- 0 0 0 0 0 0 0 0 0 0 0 0
44362- 0 0 0 0 0 0 0 0 0 0 0 0
44363- 0 0 0 0 0 0 0 0 0 0 0 0
44364- 0 0 0 0 0 0 0 0 0 0 0 0
44365- 0 0 0 0 0 0 0 0 0 0 0 0
44366- 0 0 0 0 0 0 0 0 0 10 10 10
44367- 38 38 38 78 78 78 6 6 6 2 2 6
44368- 2 2 6 46 46 46 14 14 14 42 42 42
44369-246 246 246 253 253 253 253 253 253 253 253 253
44370-253 253 253 253 253 253 253 253 253 253 253 253
44371-253 253 253 253 253 253 231 231 231 242 242 242
44372-253 253 253 253 253 253 253 253 253 253 253 253
44373-253 253 253 253 253 253 253 253 253 253 253 253
44374-253 253 253 253 253 253 253 253 253 253 253 253
44375-253 253 253 253 253 253 253 253 253 253 253 253
44376-253 253 253 253 253 253 234 234 234 10 10 10
44377- 2 2 6 2 2 6 22 22 22 14 14 14
44378- 2 2 6 2 2 6 2 2 6 2 2 6
44379- 2 2 6 66 66 66 62 62 62 22 22 22
44380- 6 6 6 0 0 0 0 0 0 0 0 0
44381- 0 0 0 0 0 0 0 0 0 0 0 0
44382- 0 0 0 0 0 0 0 0 0 0 0 0
44383- 0 0 0 0 0 0 0 0 0 0 0 0
44384- 0 0 0 0 0 0 0 0 0 0 0 0
44385- 0 0 0 0 0 0 0 0 0 0 0 0
44386- 0 0 0 0 0 0 6 6 6 18 18 18
44387- 50 50 50 74 74 74 2 2 6 2 2 6
44388- 14 14 14 70 70 70 34 34 34 62 62 62
44389-250 250 250 253 253 253 253 253 253 253 253 253
44390-253 253 253 253 253 253 253 253 253 253 253 253
44391-253 253 253 253 253 253 231 231 231 246 246 246
44392-253 253 253 253 253 253 253 253 253 253 253 253
44393-253 253 253 253 253 253 253 253 253 253 253 253
44394-253 253 253 253 253 253 253 253 253 253 253 253
44395-253 253 253 253 253 253 253 253 253 253 253 253
44396-253 253 253 253 253 253 234 234 234 14 14 14
44397- 2 2 6 2 2 6 30 30 30 2 2 6
44398- 2 2 6 2 2 6 2 2 6 2 2 6
44399- 2 2 6 66 66 66 62 62 62 22 22 22
44400- 6 6 6 0 0 0 0 0 0 0 0 0
44401- 0 0 0 0 0 0 0 0 0 0 0 0
44402- 0 0 0 0 0 0 0 0 0 0 0 0
44403- 0 0 0 0 0 0 0 0 0 0 0 0
44404- 0 0 0 0 0 0 0 0 0 0 0 0
44405- 0 0 0 0 0 0 0 0 0 0 0 0
44406- 0 0 0 0 0 0 6 6 6 18 18 18
44407- 54 54 54 62 62 62 2 2 6 2 2 6
44408- 2 2 6 30 30 30 46 46 46 70 70 70
44409-250 250 250 253 253 253 253 253 253 253 253 253
44410-253 253 253 253 253 253 253 253 253 253 253 253
44411-253 253 253 253 253 253 231 231 231 246 246 246
44412-253 253 253 253 253 253 253 253 253 253 253 253
44413-253 253 253 253 253 253 253 253 253 253 253 253
44414-253 253 253 253 253 253 253 253 253 253 253 253
44415-253 253 253 253 253 253 253 253 253 253 253 253
44416-253 253 253 253 253 253 226 226 226 10 10 10
44417- 2 2 6 6 6 6 30 30 30 2 2 6
44418- 2 2 6 2 2 6 2 2 6 2 2 6
44419- 2 2 6 66 66 66 58 58 58 22 22 22
44420- 6 6 6 0 0 0 0 0 0 0 0 0
44421- 0 0 0 0 0 0 0 0 0 0 0 0
44422- 0 0 0 0 0 0 0 0 0 0 0 0
44423- 0 0 0 0 0 0 0 0 0 0 0 0
44424- 0 0 0 0 0 0 0 0 0 0 0 0
44425- 0 0 0 0 0 0 0 0 0 0 0 0
44426- 0 0 0 0 0 0 6 6 6 22 22 22
44427- 58 58 58 62 62 62 2 2 6 2 2 6
44428- 2 2 6 2 2 6 30 30 30 78 78 78
44429-250 250 250 253 253 253 253 253 253 253 253 253
44430-253 253 253 253 253 253 253 253 253 253 253 253
44431-253 253 253 253 253 253 231 231 231 246 246 246
44432-253 253 253 253 253 253 253 253 253 253 253 253
44433-253 253 253 253 253 253 253 253 253 253 253 253
44434-253 253 253 253 253 253 253 253 253 253 253 253
44435-253 253 253 253 253 253 253 253 253 253 253 253
44436-253 253 253 253 253 253 206 206 206 2 2 6
44437- 22 22 22 34 34 34 18 14 6 22 22 22
44438- 26 26 26 18 18 18 6 6 6 2 2 6
44439- 2 2 6 82 82 82 54 54 54 18 18 18
44440- 6 6 6 0 0 0 0 0 0 0 0 0
44441- 0 0 0 0 0 0 0 0 0 0 0 0
44442- 0 0 0 0 0 0 0 0 0 0 0 0
44443- 0 0 0 0 0 0 0 0 0 0 0 0
44444- 0 0 0 0 0 0 0 0 0 0 0 0
44445- 0 0 0 0 0 0 0 0 0 0 0 0
44446- 0 0 0 0 0 0 6 6 6 26 26 26
44447- 62 62 62 106 106 106 74 54 14 185 133 11
44448-210 162 10 121 92 8 6 6 6 62 62 62
44449-238 238 238 253 253 253 253 253 253 253 253 253
44450-253 253 253 253 253 253 253 253 253 253 253 253
44451-253 253 253 253 253 253 231 231 231 246 246 246
44452-253 253 253 253 253 253 253 253 253 253 253 253
44453-253 253 253 253 253 253 253 253 253 253 253 253
44454-253 253 253 253 253 253 253 253 253 253 253 253
44455-253 253 253 253 253 253 253 253 253 253 253 253
44456-253 253 253 253 253 253 158 158 158 18 18 18
44457- 14 14 14 2 2 6 2 2 6 2 2 6
44458- 6 6 6 18 18 18 66 66 66 38 38 38
44459- 6 6 6 94 94 94 50 50 50 18 18 18
44460- 6 6 6 0 0 0 0 0 0 0 0 0
44461- 0 0 0 0 0 0 0 0 0 0 0 0
44462- 0 0 0 0 0 0 0 0 0 0 0 0
44463- 0 0 0 0 0 0 0 0 0 0 0 0
44464- 0 0 0 0 0 0 0 0 0 0 0 0
44465- 0 0 0 0 0 0 0 0 0 6 6 6
44466- 10 10 10 10 10 10 18 18 18 38 38 38
44467- 78 78 78 142 134 106 216 158 10 242 186 14
44468-246 190 14 246 190 14 156 118 10 10 10 10
44469- 90 90 90 238 238 238 253 253 253 253 253 253
44470-253 253 253 253 253 253 253 253 253 253 253 253
44471-253 253 253 253 253 253 231 231 231 250 250 250
44472-253 253 253 253 253 253 253 253 253 253 253 253
44473-253 253 253 253 253 253 253 253 253 253 253 253
44474-253 253 253 253 253 253 253 253 253 253 253 253
44475-253 253 253 253 253 253 253 253 253 246 230 190
44476-238 204 91 238 204 91 181 142 44 37 26 9
44477- 2 2 6 2 2 6 2 2 6 2 2 6
44478- 2 2 6 2 2 6 38 38 38 46 46 46
44479- 26 26 26 106 106 106 54 54 54 18 18 18
44480- 6 6 6 0 0 0 0 0 0 0 0 0
44481- 0 0 0 0 0 0 0 0 0 0 0 0
44482- 0 0 0 0 0 0 0 0 0 0 0 0
44483- 0 0 0 0 0 0 0 0 0 0 0 0
44484- 0 0 0 0 0 0 0 0 0 0 0 0
44485- 0 0 0 6 6 6 14 14 14 22 22 22
44486- 30 30 30 38 38 38 50 50 50 70 70 70
44487-106 106 106 190 142 34 226 170 11 242 186 14
44488-246 190 14 246 190 14 246 190 14 154 114 10
44489- 6 6 6 74 74 74 226 226 226 253 253 253
44490-253 253 253 253 253 253 253 253 253 253 253 253
44491-253 253 253 253 253 253 231 231 231 250 250 250
44492-253 253 253 253 253 253 253 253 253 253 253 253
44493-253 253 253 253 253 253 253 253 253 253 253 253
44494-253 253 253 253 253 253 253 253 253 253 253 253
44495-253 253 253 253 253 253 253 253 253 228 184 62
44496-241 196 14 241 208 19 232 195 16 38 30 10
44497- 2 2 6 2 2 6 2 2 6 2 2 6
44498- 2 2 6 6 6 6 30 30 30 26 26 26
44499-203 166 17 154 142 90 66 66 66 26 26 26
44500- 6 6 6 0 0 0 0 0 0 0 0 0
44501- 0 0 0 0 0 0 0 0 0 0 0 0
44502- 0 0 0 0 0 0 0 0 0 0 0 0
44503- 0 0 0 0 0 0 0 0 0 0 0 0
44504- 0 0 0 0 0 0 0 0 0 0 0 0
44505- 6 6 6 18 18 18 38 38 38 58 58 58
44506- 78 78 78 86 86 86 101 101 101 123 123 123
44507-175 146 61 210 150 10 234 174 13 246 186 14
44508-246 190 14 246 190 14 246 190 14 238 190 10
44509-102 78 10 2 2 6 46 46 46 198 198 198
44510-253 253 253 253 253 253 253 253 253 253 253 253
44511-253 253 253 253 253 253 234 234 234 242 242 242
44512-253 253 253 253 253 253 253 253 253 253 253 253
44513-253 253 253 253 253 253 253 253 253 253 253 253
44514-253 253 253 253 253 253 253 253 253 253 253 253
44515-253 253 253 253 253 253 253 253 253 224 178 62
44516-242 186 14 241 196 14 210 166 10 22 18 6
44517- 2 2 6 2 2 6 2 2 6 2 2 6
44518- 2 2 6 2 2 6 6 6 6 121 92 8
44519-238 202 15 232 195 16 82 82 82 34 34 34
44520- 10 10 10 0 0 0 0 0 0 0 0 0
44521- 0 0 0 0 0 0 0 0 0 0 0 0
44522- 0 0 0 0 0 0 0 0 0 0 0 0
44523- 0 0 0 0 0 0 0 0 0 0 0 0
44524- 0 0 0 0 0 0 0 0 0 0 0 0
44525- 14 14 14 38 38 38 70 70 70 154 122 46
44526-190 142 34 200 144 11 197 138 11 197 138 11
44527-213 154 11 226 170 11 242 186 14 246 190 14
44528-246 190 14 246 190 14 246 190 14 246 190 14
44529-225 175 15 46 32 6 2 2 6 22 22 22
44530-158 158 158 250 250 250 253 253 253 253 253 253
44531-253 253 253 253 253 253 253 253 253 253 253 253
44532-253 253 253 253 253 253 253 253 253 253 253 253
44533-253 253 253 253 253 253 253 253 253 253 253 253
44534-253 253 253 253 253 253 253 253 253 253 253 253
44535-253 253 253 250 250 250 242 242 242 224 178 62
44536-239 182 13 236 186 11 213 154 11 46 32 6
44537- 2 2 6 2 2 6 2 2 6 2 2 6
44538- 2 2 6 2 2 6 61 42 6 225 175 15
44539-238 190 10 236 186 11 112 100 78 42 42 42
44540- 14 14 14 0 0 0 0 0 0 0 0 0
44541- 0 0 0 0 0 0 0 0 0 0 0 0
44542- 0 0 0 0 0 0 0 0 0 0 0 0
44543- 0 0 0 0 0 0 0 0 0 0 0 0
44544- 0 0 0 0 0 0 0 0 0 6 6 6
44545- 22 22 22 54 54 54 154 122 46 213 154 11
44546-226 170 11 230 174 11 226 170 11 226 170 11
44547-236 178 12 242 186 14 246 190 14 246 190 14
44548-246 190 14 246 190 14 246 190 14 246 190 14
44549-241 196 14 184 144 12 10 10 10 2 2 6
44550- 6 6 6 116 116 116 242 242 242 253 253 253
44551-253 253 253 253 253 253 253 253 253 253 253 253
44552-253 253 253 253 253 253 253 253 253 253 253 253
44553-253 253 253 253 253 253 253 253 253 253 253 253
44554-253 253 253 253 253 253 253 253 253 253 253 253
44555-253 253 253 231 231 231 198 198 198 214 170 54
44556-236 178 12 236 178 12 210 150 10 137 92 6
44557- 18 14 6 2 2 6 2 2 6 2 2 6
44558- 6 6 6 70 47 6 200 144 11 236 178 12
44559-239 182 13 239 182 13 124 112 88 58 58 58
44560- 22 22 22 6 6 6 0 0 0 0 0 0
44561- 0 0 0 0 0 0 0 0 0 0 0 0
44562- 0 0 0 0 0 0 0 0 0 0 0 0
44563- 0 0 0 0 0 0 0 0 0 0 0 0
44564- 0 0 0 0 0 0 0 0 0 10 10 10
44565- 30 30 30 70 70 70 180 133 36 226 170 11
44566-239 182 13 242 186 14 242 186 14 246 186 14
44567-246 190 14 246 190 14 246 190 14 246 190 14
44568-246 190 14 246 190 14 246 190 14 246 190 14
44569-246 190 14 232 195 16 98 70 6 2 2 6
44570- 2 2 6 2 2 6 66 66 66 221 221 221
44571-253 253 253 253 253 253 253 253 253 253 253 253
44572-253 253 253 253 253 253 253 253 253 253 253 253
44573-253 253 253 253 253 253 253 253 253 253 253 253
44574-253 253 253 253 253 253 253 253 253 253 253 253
44575-253 253 253 206 206 206 198 198 198 214 166 58
44576-230 174 11 230 174 11 216 158 10 192 133 9
44577-163 110 8 116 81 8 102 78 10 116 81 8
44578-167 114 7 197 138 11 226 170 11 239 182 13
44579-242 186 14 242 186 14 162 146 94 78 78 78
44580- 34 34 34 14 14 14 6 6 6 0 0 0
44581- 0 0 0 0 0 0 0 0 0 0 0 0
44582- 0 0 0 0 0 0 0 0 0 0 0 0
44583- 0 0 0 0 0 0 0 0 0 0 0 0
44584- 0 0 0 0 0 0 0 0 0 6 6 6
44585- 30 30 30 78 78 78 190 142 34 226 170 11
44586-239 182 13 246 190 14 246 190 14 246 190 14
44587-246 190 14 246 190 14 246 190 14 246 190 14
44588-246 190 14 246 190 14 246 190 14 246 190 14
44589-246 190 14 241 196 14 203 166 17 22 18 6
44590- 2 2 6 2 2 6 2 2 6 38 38 38
44591-218 218 218 253 253 253 253 253 253 253 253 253
44592-253 253 253 253 253 253 253 253 253 253 253 253
44593-253 253 253 253 253 253 253 253 253 253 253 253
44594-253 253 253 253 253 253 253 253 253 253 253 253
44595-250 250 250 206 206 206 198 198 198 202 162 69
44596-226 170 11 236 178 12 224 166 10 210 150 10
44597-200 144 11 197 138 11 192 133 9 197 138 11
44598-210 150 10 226 170 11 242 186 14 246 190 14
44599-246 190 14 246 186 14 225 175 15 124 112 88
44600- 62 62 62 30 30 30 14 14 14 6 6 6
44601- 0 0 0 0 0 0 0 0 0 0 0 0
44602- 0 0 0 0 0 0 0 0 0 0 0 0
44603- 0 0 0 0 0 0 0 0 0 0 0 0
44604- 0 0 0 0 0 0 0 0 0 10 10 10
44605- 30 30 30 78 78 78 174 135 50 224 166 10
44606-239 182 13 246 190 14 246 190 14 246 190 14
44607-246 190 14 246 190 14 246 190 14 246 190 14
44608-246 190 14 246 190 14 246 190 14 246 190 14
44609-246 190 14 246 190 14 241 196 14 139 102 15
44610- 2 2 6 2 2 6 2 2 6 2 2 6
44611- 78 78 78 250 250 250 253 253 253 253 253 253
44612-253 253 253 253 253 253 253 253 253 253 253 253
44613-253 253 253 253 253 253 253 253 253 253 253 253
44614-253 253 253 253 253 253 253 253 253 253 253 253
44615-250 250 250 214 214 214 198 198 198 190 150 46
44616-219 162 10 236 178 12 234 174 13 224 166 10
44617-216 158 10 213 154 11 213 154 11 216 158 10
44618-226 170 11 239 182 13 246 190 14 246 190 14
44619-246 190 14 246 190 14 242 186 14 206 162 42
44620-101 101 101 58 58 58 30 30 30 14 14 14
44621- 6 6 6 0 0 0 0 0 0 0 0 0
44622- 0 0 0 0 0 0 0 0 0 0 0 0
44623- 0 0 0 0 0 0 0 0 0 0 0 0
44624- 0 0 0 0 0 0 0 0 0 10 10 10
44625- 30 30 30 74 74 74 174 135 50 216 158 10
44626-236 178 12 246 190 14 246 190 14 246 190 14
44627-246 190 14 246 190 14 246 190 14 246 190 14
44628-246 190 14 246 190 14 246 190 14 246 190 14
44629-246 190 14 246 190 14 241 196 14 226 184 13
44630- 61 42 6 2 2 6 2 2 6 2 2 6
44631- 22 22 22 238 238 238 253 253 253 253 253 253
44632-253 253 253 253 253 253 253 253 253 253 253 253
44633-253 253 253 253 253 253 253 253 253 253 253 253
44634-253 253 253 253 253 253 253 253 253 253 253 253
44635-253 253 253 226 226 226 187 187 187 180 133 36
44636-216 158 10 236 178 12 239 182 13 236 178 12
44637-230 174 11 226 170 11 226 170 11 230 174 11
44638-236 178 12 242 186 14 246 190 14 246 190 14
44639-246 190 14 246 190 14 246 186 14 239 182 13
44640-206 162 42 106 106 106 66 66 66 34 34 34
44641- 14 14 14 6 6 6 0 0 0 0 0 0
44642- 0 0 0 0 0 0 0 0 0 0 0 0
44643- 0 0 0 0 0 0 0 0 0 0 0 0
44644- 0 0 0 0 0 0 0 0 0 6 6 6
44645- 26 26 26 70 70 70 163 133 67 213 154 11
44646-236 178 12 246 190 14 246 190 14 246 190 14
44647-246 190 14 246 190 14 246 190 14 246 190 14
44648-246 190 14 246 190 14 246 190 14 246 190 14
44649-246 190 14 246 190 14 246 190 14 241 196 14
44650-190 146 13 18 14 6 2 2 6 2 2 6
44651- 46 46 46 246 246 246 253 253 253 253 253 253
44652-253 253 253 253 253 253 253 253 253 253 253 253
44653-253 253 253 253 253 253 253 253 253 253 253 253
44654-253 253 253 253 253 253 253 253 253 253 253 253
44655-253 253 253 221 221 221 86 86 86 156 107 11
44656-216 158 10 236 178 12 242 186 14 246 186 14
44657-242 186 14 239 182 13 239 182 13 242 186 14
44658-242 186 14 246 186 14 246 190 14 246 190 14
44659-246 190 14 246 190 14 246 190 14 246 190 14
44660-242 186 14 225 175 15 142 122 72 66 66 66
44661- 30 30 30 10 10 10 0 0 0 0 0 0
44662- 0 0 0 0 0 0 0 0 0 0 0 0
44663- 0 0 0 0 0 0 0 0 0 0 0 0
44664- 0 0 0 0 0 0 0 0 0 6 6 6
44665- 26 26 26 70 70 70 163 133 67 210 150 10
44666-236 178 12 246 190 14 246 190 14 246 190 14
44667-246 190 14 246 190 14 246 190 14 246 190 14
44668-246 190 14 246 190 14 246 190 14 246 190 14
44669-246 190 14 246 190 14 246 190 14 246 190 14
44670-232 195 16 121 92 8 34 34 34 106 106 106
44671-221 221 221 253 253 253 253 253 253 253 253 253
44672-253 253 253 253 253 253 253 253 253 253 253 253
44673-253 253 253 253 253 253 253 253 253 253 253 253
44674-253 253 253 253 253 253 253 253 253 253 253 253
44675-242 242 242 82 82 82 18 14 6 163 110 8
44676-216 158 10 236 178 12 242 186 14 246 190 14
44677-246 190 14 246 190 14 246 190 14 246 190 14
44678-246 190 14 246 190 14 246 190 14 246 190 14
44679-246 190 14 246 190 14 246 190 14 246 190 14
44680-246 190 14 246 190 14 242 186 14 163 133 67
44681- 46 46 46 18 18 18 6 6 6 0 0 0
44682- 0 0 0 0 0 0 0 0 0 0 0 0
44683- 0 0 0 0 0 0 0 0 0 0 0 0
44684- 0 0 0 0 0 0 0 0 0 10 10 10
44685- 30 30 30 78 78 78 163 133 67 210 150 10
44686-236 178 12 246 186 14 246 190 14 246 190 14
44687-246 190 14 246 190 14 246 190 14 246 190 14
44688-246 190 14 246 190 14 246 190 14 246 190 14
44689-246 190 14 246 190 14 246 190 14 246 190 14
44690-241 196 14 215 174 15 190 178 144 253 253 253
44691-253 253 253 253 253 253 253 253 253 253 253 253
44692-253 253 253 253 253 253 253 253 253 253 253 253
44693-253 253 253 253 253 253 253 253 253 253 253 253
44694-253 253 253 253 253 253 253 253 253 218 218 218
44695- 58 58 58 2 2 6 22 18 6 167 114 7
44696-216 158 10 236 178 12 246 186 14 246 190 14
44697-246 190 14 246 190 14 246 190 14 246 190 14
44698-246 190 14 246 190 14 246 190 14 246 190 14
44699-246 190 14 246 190 14 246 190 14 246 190 14
44700-246 190 14 246 186 14 242 186 14 190 150 46
44701- 54 54 54 22 22 22 6 6 6 0 0 0
44702- 0 0 0 0 0 0 0 0 0 0 0 0
44703- 0 0 0 0 0 0 0 0 0 0 0 0
44704- 0 0 0 0 0 0 0 0 0 14 14 14
44705- 38 38 38 86 86 86 180 133 36 213 154 11
44706-236 178 12 246 186 14 246 190 14 246 190 14
44707-246 190 14 246 190 14 246 190 14 246 190 14
44708-246 190 14 246 190 14 246 190 14 246 190 14
44709-246 190 14 246 190 14 246 190 14 246 190 14
44710-246 190 14 232 195 16 190 146 13 214 214 214
44711-253 253 253 253 253 253 253 253 253 253 253 253
44712-253 253 253 253 253 253 253 253 253 253 253 253
44713-253 253 253 253 253 253 253 253 253 253 253 253
44714-253 253 253 250 250 250 170 170 170 26 26 26
44715- 2 2 6 2 2 6 37 26 9 163 110 8
44716-219 162 10 239 182 13 246 186 14 246 190 14
44717-246 190 14 246 190 14 246 190 14 246 190 14
44718-246 190 14 246 190 14 246 190 14 246 190 14
44719-246 190 14 246 190 14 246 190 14 246 190 14
44720-246 186 14 236 178 12 224 166 10 142 122 72
44721- 46 46 46 18 18 18 6 6 6 0 0 0
44722- 0 0 0 0 0 0 0 0 0 0 0 0
44723- 0 0 0 0 0 0 0 0 0 0 0 0
44724- 0 0 0 0 0 0 6 6 6 18 18 18
44725- 50 50 50 109 106 95 192 133 9 224 166 10
44726-242 186 14 246 190 14 246 190 14 246 190 14
44727-246 190 14 246 190 14 246 190 14 246 190 14
44728-246 190 14 246 190 14 246 190 14 246 190 14
44729-246 190 14 246 190 14 246 190 14 246 190 14
44730-242 186 14 226 184 13 210 162 10 142 110 46
44731-226 226 226 253 253 253 253 253 253 253 253 253
44732-253 253 253 253 253 253 253 253 253 253 253 253
44733-253 253 253 253 253 253 253 253 253 253 253 253
44734-198 198 198 66 66 66 2 2 6 2 2 6
44735- 2 2 6 2 2 6 50 34 6 156 107 11
44736-219 162 10 239 182 13 246 186 14 246 190 14
44737-246 190 14 246 190 14 246 190 14 246 190 14
44738-246 190 14 246 190 14 246 190 14 246 190 14
44739-246 190 14 246 190 14 246 190 14 242 186 14
44740-234 174 13 213 154 11 154 122 46 66 66 66
44741- 30 30 30 10 10 10 0 0 0 0 0 0
44742- 0 0 0 0 0 0 0 0 0 0 0 0
44743- 0 0 0 0 0 0 0 0 0 0 0 0
44744- 0 0 0 0 0 0 6 6 6 22 22 22
44745- 58 58 58 154 121 60 206 145 10 234 174 13
44746-242 186 14 246 186 14 246 190 14 246 190 14
44747-246 190 14 246 190 14 246 190 14 246 190 14
44748-246 190 14 246 190 14 246 190 14 246 190 14
44749-246 190 14 246 190 14 246 190 14 246 190 14
44750-246 186 14 236 178 12 210 162 10 163 110 8
44751- 61 42 6 138 138 138 218 218 218 250 250 250
44752-253 253 253 253 253 253 253 253 253 250 250 250
44753-242 242 242 210 210 210 144 144 144 66 66 66
44754- 6 6 6 2 2 6 2 2 6 2 2 6
44755- 2 2 6 2 2 6 61 42 6 163 110 8
44756-216 158 10 236 178 12 246 190 14 246 190 14
44757-246 190 14 246 190 14 246 190 14 246 190 14
44758-246 190 14 246 190 14 246 190 14 246 190 14
44759-246 190 14 239 182 13 230 174 11 216 158 10
44760-190 142 34 124 112 88 70 70 70 38 38 38
44761- 18 18 18 6 6 6 0 0 0 0 0 0
44762- 0 0 0 0 0 0 0 0 0 0 0 0
44763- 0 0 0 0 0 0 0 0 0 0 0 0
44764- 0 0 0 0 0 0 6 6 6 22 22 22
44765- 62 62 62 168 124 44 206 145 10 224 166 10
44766-236 178 12 239 182 13 242 186 14 242 186 14
44767-246 186 14 246 190 14 246 190 14 246 190 14
44768-246 190 14 246 190 14 246 190 14 246 190 14
44769-246 190 14 246 190 14 246 190 14 246 190 14
44770-246 190 14 236 178 12 216 158 10 175 118 6
44771- 80 54 7 2 2 6 6 6 6 30 30 30
44772- 54 54 54 62 62 62 50 50 50 38 38 38
44773- 14 14 14 2 2 6 2 2 6 2 2 6
44774- 2 2 6 2 2 6 2 2 6 2 2 6
44775- 2 2 6 6 6 6 80 54 7 167 114 7
44776-213 154 11 236 178 12 246 190 14 246 190 14
44777-246 190 14 246 190 14 246 190 14 246 190 14
44778-246 190 14 242 186 14 239 182 13 239 182 13
44779-230 174 11 210 150 10 174 135 50 124 112 88
44780- 82 82 82 54 54 54 34 34 34 18 18 18
44781- 6 6 6 0 0 0 0 0 0 0 0 0
44782- 0 0 0 0 0 0 0 0 0 0 0 0
44783- 0 0 0 0 0 0 0 0 0 0 0 0
44784- 0 0 0 0 0 0 6 6 6 18 18 18
44785- 50 50 50 158 118 36 192 133 9 200 144 11
44786-216 158 10 219 162 10 224 166 10 226 170 11
44787-230 174 11 236 178 12 239 182 13 239 182 13
44788-242 186 14 246 186 14 246 190 14 246 190 14
44789-246 190 14 246 190 14 246 190 14 246 190 14
44790-246 186 14 230 174 11 210 150 10 163 110 8
44791-104 69 6 10 10 10 2 2 6 2 2 6
44792- 2 2 6 2 2 6 2 2 6 2 2 6
44793- 2 2 6 2 2 6 2 2 6 2 2 6
44794- 2 2 6 2 2 6 2 2 6 2 2 6
44795- 2 2 6 6 6 6 91 60 6 167 114 7
44796-206 145 10 230 174 11 242 186 14 246 190 14
44797-246 190 14 246 190 14 246 186 14 242 186 14
44798-239 182 13 230 174 11 224 166 10 213 154 11
44799-180 133 36 124 112 88 86 86 86 58 58 58
44800- 38 38 38 22 22 22 10 10 10 6 6 6
44801- 0 0 0 0 0 0 0 0 0 0 0 0
44802- 0 0 0 0 0 0 0 0 0 0 0 0
44803- 0 0 0 0 0 0 0 0 0 0 0 0
44804- 0 0 0 0 0 0 0 0 0 14 14 14
44805- 34 34 34 70 70 70 138 110 50 158 118 36
44806-167 114 7 180 123 7 192 133 9 197 138 11
44807-200 144 11 206 145 10 213 154 11 219 162 10
44808-224 166 10 230 174 11 239 182 13 242 186 14
44809-246 186 14 246 186 14 246 186 14 246 186 14
44810-239 182 13 216 158 10 185 133 11 152 99 6
44811-104 69 6 18 14 6 2 2 6 2 2 6
44812- 2 2 6 2 2 6 2 2 6 2 2 6
44813- 2 2 6 2 2 6 2 2 6 2 2 6
44814- 2 2 6 2 2 6 2 2 6 2 2 6
44815- 2 2 6 6 6 6 80 54 7 152 99 6
44816-192 133 9 219 162 10 236 178 12 239 182 13
44817-246 186 14 242 186 14 239 182 13 236 178 12
44818-224 166 10 206 145 10 192 133 9 154 121 60
44819- 94 94 94 62 62 62 42 42 42 22 22 22
44820- 14 14 14 6 6 6 0 0 0 0 0 0
44821- 0 0 0 0 0 0 0 0 0 0 0 0
44822- 0 0 0 0 0 0 0 0 0 0 0 0
44823- 0 0 0 0 0 0 0 0 0 0 0 0
44824- 0 0 0 0 0 0 0 0 0 6 6 6
44825- 18 18 18 34 34 34 58 58 58 78 78 78
44826-101 98 89 124 112 88 142 110 46 156 107 11
44827-163 110 8 167 114 7 175 118 6 180 123 7
44828-185 133 11 197 138 11 210 150 10 219 162 10
44829-226 170 11 236 178 12 236 178 12 234 174 13
44830-219 162 10 197 138 11 163 110 8 130 83 6
44831- 91 60 6 10 10 10 2 2 6 2 2 6
44832- 18 18 18 38 38 38 38 38 38 38 38 38
44833- 38 38 38 38 38 38 38 38 38 38 38 38
44834- 38 38 38 38 38 38 26 26 26 2 2 6
44835- 2 2 6 6 6 6 70 47 6 137 92 6
44836-175 118 6 200 144 11 219 162 10 230 174 11
44837-234 174 13 230 174 11 219 162 10 210 150 10
44838-192 133 9 163 110 8 124 112 88 82 82 82
44839- 50 50 50 30 30 30 14 14 14 6 6 6
44840- 0 0 0 0 0 0 0 0 0 0 0 0
44841- 0 0 0 0 0 0 0 0 0 0 0 0
44842- 0 0 0 0 0 0 0 0 0 0 0 0
44843- 0 0 0 0 0 0 0 0 0 0 0 0
44844- 0 0 0 0 0 0 0 0 0 0 0 0
44845- 6 6 6 14 14 14 22 22 22 34 34 34
44846- 42 42 42 58 58 58 74 74 74 86 86 86
44847-101 98 89 122 102 70 130 98 46 121 87 25
44848-137 92 6 152 99 6 163 110 8 180 123 7
44849-185 133 11 197 138 11 206 145 10 200 144 11
44850-180 123 7 156 107 11 130 83 6 104 69 6
44851- 50 34 6 54 54 54 110 110 110 101 98 89
44852- 86 86 86 82 82 82 78 78 78 78 78 78
44853- 78 78 78 78 78 78 78 78 78 78 78 78
44854- 78 78 78 82 82 82 86 86 86 94 94 94
44855-106 106 106 101 101 101 86 66 34 124 80 6
44856-156 107 11 180 123 7 192 133 9 200 144 11
44857-206 145 10 200 144 11 192 133 9 175 118 6
44858-139 102 15 109 106 95 70 70 70 42 42 42
44859- 22 22 22 10 10 10 0 0 0 0 0 0
44860- 0 0 0 0 0 0 0 0 0 0 0 0
44861- 0 0 0 0 0 0 0 0 0 0 0 0
44862- 0 0 0 0 0 0 0 0 0 0 0 0
44863- 0 0 0 0 0 0 0 0 0 0 0 0
44864- 0 0 0 0 0 0 0 0 0 0 0 0
44865- 0 0 0 0 0 0 6 6 6 10 10 10
44866- 14 14 14 22 22 22 30 30 30 38 38 38
44867- 50 50 50 62 62 62 74 74 74 90 90 90
44868-101 98 89 112 100 78 121 87 25 124 80 6
44869-137 92 6 152 99 6 152 99 6 152 99 6
44870-138 86 6 124 80 6 98 70 6 86 66 30
44871-101 98 89 82 82 82 58 58 58 46 46 46
44872- 38 38 38 34 34 34 34 34 34 34 34 34
44873- 34 34 34 34 34 34 34 34 34 34 34 34
44874- 34 34 34 34 34 34 38 38 38 42 42 42
44875- 54 54 54 82 82 82 94 86 76 91 60 6
44876-134 86 6 156 107 11 167 114 7 175 118 6
44877-175 118 6 167 114 7 152 99 6 121 87 25
44878-101 98 89 62 62 62 34 34 34 18 18 18
44879- 6 6 6 0 0 0 0 0 0 0 0 0
44880- 0 0 0 0 0 0 0 0 0 0 0 0
44881- 0 0 0 0 0 0 0 0 0 0 0 0
44882- 0 0 0 0 0 0 0 0 0 0 0 0
44883- 0 0 0 0 0 0 0 0 0 0 0 0
44884- 0 0 0 0 0 0 0 0 0 0 0 0
44885- 0 0 0 0 0 0 0 0 0 0 0 0
44886- 0 0 0 6 6 6 6 6 6 10 10 10
44887- 18 18 18 22 22 22 30 30 30 42 42 42
44888- 50 50 50 66 66 66 86 86 86 101 98 89
44889-106 86 58 98 70 6 104 69 6 104 69 6
44890-104 69 6 91 60 6 82 62 34 90 90 90
44891- 62 62 62 38 38 38 22 22 22 14 14 14
44892- 10 10 10 10 10 10 10 10 10 10 10 10
44893- 10 10 10 10 10 10 6 6 6 10 10 10
44894- 10 10 10 10 10 10 10 10 10 14 14 14
44895- 22 22 22 42 42 42 70 70 70 89 81 66
44896- 80 54 7 104 69 6 124 80 6 137 92 6
44897-134 86 6 116 81 8 100 82 52 86 86 86
44898- 58 58 58 30 30 30 14 14 14 6 6 6
44899- 0 0 0 0 0 0 0 0 0 0 0 0
44900- 0 0 0 0 0 0 0 0 0 0 0 0
44901- 0 0 0 0 0 0 0 0 0 0 0 0
44902- 0 0 0 0 0 0 0 0 0 0 0 0
44903- 0 0 0 0 0 0 0 0 0 0 0 0
44904- 0 0 0 0 0 0 0 0 0 0 0 0
44905- 0 0 0 0 0 0 0 0 0 0 0 0
44906- 0 0 0 0 0 0 0 0 0 0 0 0
44907- 0 0 0 6 6 6 10 10 10 14 14 14
44908- 18 18 18 26 26 26 38 38 38 54 54 54
44909- 70 70 70 86 86 86 94 86 76 89 81 66
44910- 89 81 66 86 86 86 74 74 74 50 50 50
44911- 30 30 30 14 14 14 6 6 6 0 0 0
44912- 0 0 0 0 0 0 0 0 0 0 0 0
44913- 0 0 0 0 0 0 0 0 0 0 0 0
44914- 0 0 0 0 0 0 0 0 0 0 0 0
44915- 6 6 6 18 18 18 34 34 34 58 58 58
44916- 82 82 82 89 81 66 89 81 66 89 81 66
44917- 94 86 66 94 86 76 74 74 74 50 50 50
44918- 26 26 26 14 14 14 6 6 6 0 0 0
44919- 0 0 0 0 0 0 0 0 0 0 0 0
44920- 0 0 0 0 0 0 0 0 0 0 0 0
44921- 0 0 0 0 0 0 0 0 0 0 0 0
44922- 0 0 0 0 0 0 0 0 0 0 0 0
44923- 0 0 0 0 0 0 0 0 0 0 0 0
44924- 0 0 0 0 0 0 0 0 0 0 0 0
44925- 0 0 0 0 0 0 0 0 0 0 0 0
44926- 0 0 0 0 0 0 0 0 0 0 0 0
44927- 0 0 0 0 0 0 0 0 0 0 0 0
44928- 6 6 6 6 6 6 14 14 14 18 18 18
44929- 30 30 30 38 38 38 46 46 46 54 54 54
44930- 50 50 50 42 42 42 30 30 30 18 18 18
44931- 10 10 10 0 0 0 0 0 0 0 0 0
44932- 0 0 0 0 0 0 0 0 0 0 0 0
44933- 0 0 0 0 0 0 0 0 0 0 0 0
44934- 0 0 0 0 0 0 0 0 0 0 0 0
44935- 0 0 0 6 6 6 14 14 14 26 26 26
44936- 38 38 38 50 50 50 58 58 58 58 58 58
44937- 54 54 54 42 42 42 30 30 30 18 18 18
44938- 10 10 10 0 0 0 0 0 0 0 0 0
44939- 0 0 0 0 0 0 0 0 0 0 0 0
44940- 0 0 0 0 0 0 0 0 0 0 0 0
44941- 0 0 0 0 0 0 0 0 0 0 0 0
44942- 0 0 0 0 0 0 0 0 0 0 0 0
44943- 0 0 0 0 0 0 0 0 0 0 0 0
44944- 0 0 0 0 0 0 0 0 0 0 0 0
44945- 0 0 0 0 0 0 0 0 0 0 0 0
44946- 0 0 0 0 0 0 0 0 0 0 0 0
44947- 0 0 0 0 0 0 0 0 0 0 0 0
44948- 0 0 0 0 0 0 0 0 0 6 6 6
44949- 6 6 6 10 10 10 14 14 14 18 18 18
44950- 18 18 18 14 14 14 10 10 10 6 6 6
44951- 0 0 0 0 0 0 0 0 0 0 0 0
44952- 0 0 0 0 0 0 0 0 0 0 0 0
44953- 0 0 0 0 0 0 0 0 0 0 0 0
44954- 0 0 0 0 0 0 0 0 0 0 0 0
44955- 0 0 0 0 0 0 0 0 0 6 6 6
44956- 14 14 14 18 18 18 22 22 22 22 22 22
44957- 18 18 18 14 14 14 10 10 10 6 6 6
44958- 0 0 0 0 0 0 0 0 0 0 0 0
44959- 0 0 0 0 0 0 0 0 0 0 0 0
44960- 0 0 0 0 0 0 0 0 0 0 0 0
44961- 0 0 0 0 0 0 0 0 0 0 0 0
44962- 0 0 0 0 0 0 0 0 0 0 0 0
44963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44974+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44975+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44976+4 4 4 4 4 4
44977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44988+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44989+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44990+4 4 4 4 4 4
44991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45002+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45003+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45004+4 4 4 4 4 4
45005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45018+4 4 4 4 4 4
45019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45032+4 4 4 4 4 4
45033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45046+4 4 4 4 4 4
45047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45051+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
45052+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
45053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45056+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
45057+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45058+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
45059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45060+4 4 4 4 4 4
45061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45065+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
45066+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
45067+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45070+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
45071+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
45072+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
45073+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45074+4 4 4 4 4 4
45075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45079+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
45080+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
45081+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45084+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
45085+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
45086+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
45087+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
45088+4 4 4 4 4 4
45089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45092+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
45093+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
45094+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
45095+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
45096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45097+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45098+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
45099+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
45100+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
45101+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
45102+4 4 4 4 4 4
45103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45106+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
45107+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
45108+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
45109+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
45110+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45111+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
45112+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
45113+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
45114+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
45115+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
45116+4 4 4 4 4 4
45117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45120+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
45121+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
45122+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
45123+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
45124+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45125+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
45126+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
45127+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
45128+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
45129+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
45130+4 4 4 4 4 4
45131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45133+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
45134+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
45135+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
45136+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
45137+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
45138+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
45139+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
45140+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
45141+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
45142+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
45143+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
45144+4 4 4 4 4 4
45145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45147+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
45148+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
45149+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
45150+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
45151+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
45152+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
45153+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
45154+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
45155+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
45156+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
45157+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
45158+4 4 4 4 4 4
45159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45161+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
45162+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
45163+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
45164+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
45165+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
45166+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
45167+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
45168+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
45169+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
45170+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
45171+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45172+4 4 4 4 4 4
45173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45175+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
45176+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
45177+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
45178+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
45179+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
45180+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
45181+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
45182+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
45183+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
45184+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
45185+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
45186+4 4 4 4 4 4
45187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45188+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
45189+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
45190+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
45191+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
45192+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
45193+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
45194+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
45195+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
45196+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
45197+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
45198+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
45199+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
45200+4 4 4 4 4 4
45201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45202+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
45203+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
45204+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
45205+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45206+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
45207+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
45208+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
45209+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
45210+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
45211+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
45212+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
45213+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
45214+0 0 0 4 4 4
45215+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45216+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
45217+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
45218+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
45219+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
45220+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
45221+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
45222+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
45223+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
45224+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
45225+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
45226+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
45227+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
45228+2 0 0 0 0 0
45229+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
45230+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
45231+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
45232+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
45233+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
45234+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
45235+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
45236+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
45237+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
45238+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
45239+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
45240+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
45241+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
45242+37 38 37 0 0 0
45243+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45244+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45245+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45246+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45247+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45248+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45249+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45250+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45251+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45252+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45253+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45254+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45255+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45256+85 115 134 4 0 0
45257+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45258+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45259+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45260+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45261+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45262+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45263+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45264+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45265+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45266+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45267+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45268+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45269+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45270+60 73 81 4 0 0
45271+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45272+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45273+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45274+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45275+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45276+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45277+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45278+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45279+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45280+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45281+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45282+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45283+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45284+16 19 21 4 0 0
45285+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45286+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45287+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45288+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45289+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45290+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45291+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45292+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45293+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45294+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45295+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45296+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45297+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45298+4 0 0 4 3 3
45299+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45300+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45301+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45303+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45304+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45305+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45306+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45307+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45308+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45309+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45310+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45311+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45312+3 2 2 4 4 4
45313+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45314+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45315+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45316+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45317+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45318+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45319+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45320+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45321+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45322+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45323+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45324+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45325+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45326+4 4 4 4 4 4
45327+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45328+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45329+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45330+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45331+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45332+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45333+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45334+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45335+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45336+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45337+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45338+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45339+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45340+4 4 4 4 4 4
45341+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45342+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45343+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45344+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45345+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45346+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45347+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45348+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45349+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45350+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45351+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45352+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45353+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45354+5 5 5 5 5 5
45355+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45356+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45357+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45358+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45359+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45360+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45361+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45362+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45363+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45364+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45365+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45366+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45367+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45368+5 5 5 4 4 4
45369+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45370+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45371+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45372+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45373+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45374+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45375+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45376+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45377+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45378+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45379+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45380+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45382+4 4 4 4 4 4
45383+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45384+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45385+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45386+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45387+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45388+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45389+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45390+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45391+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45392+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45393+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45394+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45395+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45396+4 4 4 4 4 4
45397+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45398+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45399+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45400+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45401+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45402+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45403+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45404+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45405+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45406+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45407+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45409+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45410+4 4 4 4 4 4
45411+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45412+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45413+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45414+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45415+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45416+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45417+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45418+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45419+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45420+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45421+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45424+4 4 4 4 4 4
45425+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45426+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45427+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45428+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45429+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45430+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45431+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45432+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45433+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45434+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45435+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45438+4 4 4 4 4 4
45439+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45440+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
45441+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
45442+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
45443+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45444+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
45445+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
45446+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
45447+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
45448+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
45449+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
45450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45452+4 4 4 4 4 4
45453+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
45454+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
45455+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
45456+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
45457+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45458+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
45459+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
45460+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
45461+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
45462+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
45463+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
45464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45466+4 4 4 4 4 4
45467+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
45468+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
45469+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
45470+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45471+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45472+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45473+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45474+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45475+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45476+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45477+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45480+4 4 4 4 4 4
45481+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45482+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45483+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45484+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45485+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45486+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45487+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45488+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45489+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45490+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45491+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45494+4 4 4 4 4 4
45495+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45496+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45497+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45498+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45499+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45500+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45501+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45502+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45503+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45504+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45505+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45508+4 4 4 4 4 4
45509+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45510+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45511+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45512+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45513+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45514+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45515+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45516+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45517+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45518+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45519+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45522+4 4 4 4 4 4
45523+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45524+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45525+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45526+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45527+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45528+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45529+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45530+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45531+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45532+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45533+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45536+4 4 4 4 4 4
45537+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45538+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45539+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45540+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45541+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45542+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45543+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45544+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45545+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45546+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45547+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45550+4 4 4 4 4 4
45551+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45552+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45553+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45554+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45555+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45556+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45557+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45558+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45559+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45560+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45561+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45564+4 4 4 4 4 4
45565+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45566+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45567+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45568+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45569+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45570+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45571+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45572+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45573+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45574+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45575+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45578+4 4 4 4 4 4
45579+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45580+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45581+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45582+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45583+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45584+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45585+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45586+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45587+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45588+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45589+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45592+4 4 4 4 4 4
45593+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45594+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45595+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45596+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45597+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45598+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45599+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45600+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45601+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45602+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45603+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45606+4 4 4 4 4 4
45607+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45608+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45609+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45610+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45611+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45612+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45613+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45614+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45615+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45616+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45617+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45620+4 4 4 4 4 4
45621+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45622+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45623+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45624+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45625+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45626+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45627+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45628+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45629+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45630+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45631+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45634+4 4 4 4 4 4
45635+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45636+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45637+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45638+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45639+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45640+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45641+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45642+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45643+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45644+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45645+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45648+4 4 4 4 4 4
45649+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45650+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45651+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45652+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45653+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45654+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45655+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45656+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45657+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45658+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45659+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45662+4 4 4 4 4 4
45663+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45664+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45665+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45666+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45667+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45668+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45669+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45670+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45671+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45672+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45673+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45676+4 4 4 4 4 4
45677+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45678+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45679+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45680+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45681+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45682+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45683+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45684+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45685+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45686+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45687+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45690+4 4 4 4 4 4
45691+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45692+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45693+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45694+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45695+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45696+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45697+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45698+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45699+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45700+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45701+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45704+4 4 4 4 4 4
45705+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45706+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45707+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45708+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45709+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45710+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45711+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45712+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45713+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45714+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45715+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45718+4 4 4 4 4 4
45719+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45720+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45721+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45722+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45723+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45724+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45725+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45726+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45727+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45728+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45729+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45732+4 4 4 4 4 4
45733+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45734+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45735+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45736+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45737+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45738+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45739+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45740+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45741+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45742+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45743+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45746+4 4 4 4 4 4
45747+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45748+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45749+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45750+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45751+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45752+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45753+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45754+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45755+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45756+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45757+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45760+4 4 4 4 4 4
45761+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45762+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45763+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45764+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45765+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45766+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45767+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45768+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45769+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45770+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45771+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45774+4 4 4 4 4 4
45775+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45776+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45777+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45778+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45779+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45780+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45781+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45782+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45783+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45784+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45788+4 4 4 4 4 4
45789+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45790+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45791+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45792+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45793+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45794+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45795+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45796+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45797+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45798+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45802+4 4 4 4 4 4
45803+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45804+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45805+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45806+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45807+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45808+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45809+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45810+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45811+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45812+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45816+4 4 4 4 4 4
45817+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45818+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45819+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45820+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45821+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45822+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45823+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45824+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45825+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45826+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45830+4 4 4 4 4 4
45831+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45832+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45833+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45834+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45835+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45836+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45837+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45838+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45839+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45844+4 4 4 4 4 4
45845+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45846+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45847+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45848+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45849+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45850+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45851+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45852+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45853+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45858+4 4 4 4 4 4
45859+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45860+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45861+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45862+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45863+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45864+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45865+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45866+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45867+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45872+4 4 4 4 4 4
45873+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45874+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45875+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45876+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45877+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45878+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45879+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45880+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45886+4 4 4 4 4 4
45887+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45888+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45889+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45890+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45891+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45892+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45893+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45894+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45895+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45900+4 4 4 4 4 4
45901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45902+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45903+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45904+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45905+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45906+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45907+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45908+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45909+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45914+4 4 4 4 4 4
45915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45916+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45917+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45918+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45919+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45920+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45921+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45922+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45928+4 4 4 4 4 4
45929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45930+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45931+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45932+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45933+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45934+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45935+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45936+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45942+4 4 4 4 4 4
45943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45944+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45945+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45946+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45947+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45948+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45949+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45950+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45956+4 4 4 4 4 4
45957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45958+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45959+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45960+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45961+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
45962+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
45963+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
45964+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45970+4 4 4 4 4 4
45971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45972+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45973+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45974+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45975+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45976+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
45977+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
45978+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45984+4 4 4 4 4 4
45985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45987+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45988+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45989+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45990+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45991+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
45992+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45998+4 4 4 4 4 4
45999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46002+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
46003+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
46004+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
46005+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
46006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46012+4 4 4 4 4 4
46013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46017+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
46018+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46019+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46026+4 4 4 4 4 4
46027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46031+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
46032+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
46033+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46040+4 4 4 4 4 4
46041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46045+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
46046+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
46047+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46054+4 4 4 4 4 4
46055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46059+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
46060+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
46061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46068+4 4 4 4 4 4
46069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46073+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46074+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
46075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46082+4 4 4 4 4 4
46083diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
46084index 443e3c8..c443d6a 100644
46085--- a/drivers/video/nvidia/nv_backlight.c
46086+++ b/drivers/video/nvidia/nv_backlight.c
46087@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
46088 return bd->props.brightness;
46089 }
46090
46091-static struct backlight_ops nvidia_bl_ops = {
46092+static const struct backlight_ops nvidia_bl_ops = {
46093 .get_brightness = nvidia_bl_get_brightness,
46094 .update_status = nvidia_bl_update_status,
46095 };
46096diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
46097index d94c57f..912984c 100644
46098--- a/drivers/video/riva/fbdev.c
46099+++ b/drivers/video/riva/fbdev.c
46100@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
46101 return bd->props.brightness;
46102 }
46103
46104-static struct backlight_ops riva_bl_ops = {
46105+static const struct backlight_ops riva_bl_ops = {
46106 .get_brightness = riva_bl_get_brightness,
46107 .update_status = riva_bl_update_status,
46108 };
46109diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
46110index 54fbb29..2c108fc 100644
46111--- a/drivers/video/uvesafb.c
46112+++ b/drivers/video/uvesafb.c
46113@@ -18,6 +18,7 @@
46114 #include <linux/fb.h>
46115 #include <linux/io.h>
46116 #include <linux/mutex.h>
46117+#include <linux/moduleloader.h>
46118 #include <video/edid.h>
46119 #include <video/uvesafb.h>
46120 #ifdef CONFIG_X86
46121@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
46122 NULL,
46123 };
46124
46125- return call_usermodehelper(v86d_path, argv, envp, 1);
46126+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
46127 }
46128
46129 /*
46130@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
46131 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
46132 par->pmi_setpal = par->ypan = 0;
46133 } else {
46134+
46135+#ifdef CONFIG_PAX_KERNEXEC
46136+#ifdef CONFIG_MODULES
46137+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
46138+#endif
46139+ if (!par->pmi_code) {
46140+ par->pmi_setpal = par->ypan = 0;
46141+ return 0;
46142+ }
46143+#endif
46144+
46145 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
46146 + task->t.regs.edi);
46147+
46148+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46149+ pax_open_kernel();
46150+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
46151+ pax_close_kernel();
46152+
46153+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
46154+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
46155+#else
46156 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
46157 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
46158+#endif
46159+
46160 printk(KERN_INFO "uvesafb: protected mode interface info at "
46161 "%04x:%04x\n",
46162 (u16)task->t.regs.es, (u16)task->t.regs.edi);
46163@@ -1799,6 +1822,11 @@ out:
46164 if (par->vbe_modes)
46165 kfree(par->vbe_modes);
46166
46167+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46168+ if (par->pmi_code)
46169+ module_free_exec(NULL, par->pmi_code);
46170+#endif
46171+
46172 framebuffer_release(info);
46173 return err;
46174 }
46175@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
46176 kfree(par->vbe_state_orig);
46177 if (par->vbe_state_saved)
46178 kfree(par->vbe_state_saved);
46179+
46180+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46181+ if (par->pmi_code)
46182+ module_free_exec(NULL, par->pmi_code);
46183+#endif
46184+
46185 }
46186
46187 framebuffer_release(info);
46188diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
46189index bd37ee1..cb827e8 100644
46190--- a/drivers/video/vesafb.c
46191+++ b/drivers/video/vesafb.c
46192@@ -9,6 +9,7 @@
46193 */
46194
46195 #include <linux/module.h>
46196+#include <linux/moduleloader.h>
46197 #include <linux/kernel.h>
46198 #include <linux/errno.h>
46199 #include <linux/string.h>
46200@@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
46201 static int vram_total __initdata; /* Set total amount of memory */
46202 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
46203 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
46204-static void (*pmi_start)(void) __read_mostly;
46205-static void (*pmi_pal) (void) __read_mostly;
46206+static void (*pmi_start)(void) __read_only;
46207+static void (*pmi_pal) (void) __read_only;
46208 static int depth __read_mostly;
46209 static int vga_compat __read_mostly;
46210 /* --------------------------------------------------------------------- */
46211@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
46212 unsigned int size_vmode;
46213 unsigned int size_remap;
46214 unsigned int size_total;
46215+ void *pmi_code = NULL;
46216
46217 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
46218 return -ENODEV;
46219@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
46220 size_remap = size_total;
46221 vesafb_fix.smem_len = size_remap;
46222
46223-#ifndef __i386__
46224- screen_info.vesapm_seg = 0;
46225-#endif
46226-
46227 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
46228 printk(KERN_WARNING
46229 "vesafb: cannot reserve video memory at 0x%lx\n",
46230@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
46231 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
46232 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
46233
46234+#ifdef __i386__
46235+
46236+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46237+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
46238+ if (!pmi_code)
46239+#elif !defined(CONFIG_PAX_KERNEXEC)
46240+ if (0)
46241+#endif
46242+
46243+#endif
46244+ screen_info.vesapm_seg = 0;
46245+
46246 if (screen_info.vesapm_seg) {
46247- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
46248- screen_info.vesapm_seg,screen_info.vesapm_off);
46249+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
46250+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
46251 }
46252
46253 if (screen_info.vesapm_seg < 0xc000)
46254@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
46255
46256 if (ypan || pmi_setpal) {
46257 unsigned short *pmi_base;
46258+
46259 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
46260- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
46261- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
46262+
46263+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46264+ pax_open_kernel();
46265+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
46266+#else
46267+ pmi_code = pmi_base;
46268+#endif
46269+
46270+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
46271+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
46272+
46273+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46274+ pmi_start = ktva_ktla(pmi_start);
46275+ pmi_pal = ktva_ktla(pmi_pal);
46276+ pax_close_kernel();
46277+#endif
46278+
46279 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
46280 if (pmi_base[3]) {
46281 printk(KERN_INFO "vesafb: pmi: ports = ");
46282@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
46283 info->node, info->fix.id);
46284 return 0;
46285 err:
46286+
46287+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46288+ module_free_exec(NULL, pmi_code);
46289+#endif
46290+
46291 if (info->screen_base)
46292 iounmap(info->screen_base);
46293 framebuffer_release(info);
46294diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
46295index 88a60e0..6783cc2 100644
46296--- a/drivers/xen/sys-hypervisor.c
46297+++ b/drivers/xen/sys-hypervisor.c
46298@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
46299 return 0;
46300 }
46301
46302-static struct sysfs_ops hyp_sysfs_ops = {
46303+static const struct sysfs_ops hyp_sysfs_ops = {
46304 .show = hyp_sysfs_show,
46305 .store = hyp_sysfs_store,
46306 };
46307diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
46308index 18f74ec..3227009 100644
46309--- a/fs/9p/vfs_inode.c
46310+++ b/fs/9p/vfs_inode.c
46311@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46312 static void
46313 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46314 {
46315- char *s = nd_get_link(nd);
46316+ const char *s = nd_get_link(nd);
46317
46318 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
46319 IS_ERR(s) ? "<error>" : s);
46320diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
46321index bb4cc5b..df5eaa0 100644
46322--- a/fs/Kconfig.binfmt
46323+++ b/fs/Kconfig.binfmt
46324@@ -86,7 +86,7 @@ config HAVE_AOUT
46325
46326 config BINFMT_AOUT
46327 tristate "Kernel support for a.out and ECOFF binaries"
46328- depends on HAVE_AOUT
46329+ depends on HAVE_AOUT && BROKEN
46330 ---help---
46331 A.out (Assembler.OUTput) is a set of formats for libraries and
46332 executables used in the earliest versions of UNIX. Linux used
46333diff --git a/fs/aio.c b/fs/aio.c
46334index 22a19ad..d484e5b 100644
46335--- a/fs/aio.c
46336+++ b/fs/aio.c
46337@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
46338 size += sizeof(struct io_event) * nr_events;
46339 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
46340
46341- if (nr_pages < 0)
46342+ if (nr_pages <= 0)
46343 return -EINVAL;
46344
46345 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
46346@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
46347 struct aio_timeout to;
46348 int retry = 0;
46349
46350+ pax_track_stack();
46351+
46352 /* needed to zero any padding within an entry (there shouldn't be
46353 * any, but C is fun!
46354 */
46355@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
46356 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
46357 {
46358 ssize_t ret;
46359+ struct iovec iovstack;
46360
46361 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
46362 kiocb->ki_nbytes, 1,
46363- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
46364+ &iovstack, &kiocb->ki_iovec);
46365 if (ret < 0)
46366 goto out;
46367
46368+ if (kiocb->ki_iovec == &iovstack) {
46369+ kiocb->ki_inline_vec = iovstack;
46370+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
46371+ }
46372 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46373 kiocb->ki_cur_seg = 0;
46374 /* ki_nbytes/left now reflect bytes instead of segs */
46375diff --git a/fs/attr.c b/fs/attr.c
46376index 96d394b..33cf5b4 100644
46377--- a/fs/attr.c
46378+++ b/fs/attr.c
46379@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46380 unsigned long limit;
46381
46382 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46383+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46384 if (limit != RLIM_INFINITY && offset > limit)
46385 goto out_sig;
46386 if (offset > inode->i_sb->s_maxbytes)
46387diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46388index 4a1401c..05eb5ca 100644
46389--- a/fs/autofs/root.c
46390+++ b/fs/autofs/root.c
46391@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46392 set_bit(n,sbi->symlink_bitmap);
46393 sl = &sbi->symlink[n];
46394 sl->len = strlen(symname);
46395- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46396+ slsize = sl->len+1;
46397+ sl->data = kmalloc(slsize, GFP_KERNEL);
46398 if (!sl->data) {
46399 clear_bit(n,sbi->symlink_bitmap);
46400 unlock_kernel();
46401diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46402index b4ea829..e63ef18 100644
46403--- a/fs/autofs4/symlink.c
46404+++ b/fs/autofs4/symlink.c
46405@@ -15,7 +15,7 @@
46406 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46407 {
46408 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46409- nd_set_link(nd, (char *)ino->u.symlink);
46410+ nd_set_link(nd, ino->u.symlink);
46411 return NULL;
46412 }
46413
46414diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46415index 2341375..df9d1c2 100644
46416--- a/fs/autofs4/waitq.c
46417+++ b/fs/autofs4/waitq.c
46418@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46419 {
46420 unsigned long sigpipe, flags;
46421 mm_segment_t fs;
46422- const char *data = (const char *)addr;
46423+ const char __user *data = (const char __force_user *)addr;
46424 ssize_t wr = 0;
46425
46426 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46427diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46428index 9158c07..3f06659 100644
46429--- a/fs/befs/linuxvfs.c
46430+++ b/fs/befs/linuxvfs.c
46431@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46432 {
46433 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46434 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46435- char *link = nd_get_link(nd);
46436+ const char *link = nd_get_link(nd);
46437 if (!IS_ERR(link))
46438 kfree(link);
46439 }
46440diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
46441index 0133b5a..b3baa9f 100644
46442--- a/fs/binfmt_aout.c
46443+++ b/fs/binfmt_aout.c
46444@@ -16,6 +16,7 @@
46445 #include <linux/string.h>
46446 #include <linux/fs.h>
46447 #include <linux/file.h>
46448+#include <linux/security.h>
46449 #include <linux/stat.h>
46450 #include <linux/fcntl.h>
46451 #include <linux/ptrace.h>
46452@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46453 #endif
46454 # define START_STACK(u) (u.start_stack)
46455
46456+ memset(&dump, 0, sizeof(dump));
46457+
46458 fs = get_fs();
46459 set_fs(KERNEL_DS);
46460 has_dumped = 1;
46461@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46462
46463 /* If the size of the dump file exceeds the rlimit, then see what would happen
46464 if we wrote the stack, but not the data area. */
46465+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
46466 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
46467 dump.u_dsize = 0;
46468
46469 /* Make sure we have enough room to write the stack and data areas. */
46470+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46471 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46472 dump.u_ssize = 0;
46473
46474@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46475 dump_size = dump.u_ssize << PAGE_SHIFT;
46476 DUMP_WRITE(dump_start,dump_size);
46477 }
46478-/* Finally dump the task struct. Not be used by gdb, but could be useful */
46479- set_fs(KERNEL_DS);
46480- DUMP_WRITE(current,sizeof(*current));
46481+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46482 end_coredump:
46483 set_fs(fs);
46484 return has_dumped;
46485@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46486 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46487 if (rlim >= RLIM_INFINITY)
46488 rlim = ~0;
46489+
46490+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46491 if (ex.a_data + ex.a_bss > rlim)
46492 return -ENOMEM;
46493
46494@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46495 install_exec_creds(bprm);
46496 current->flags &= ~PF_FORKNOEXEC;
46497
46498+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46499+ current->mm->pax_flags = 0UL;
46500+#endif
46501+
46502+#ifdef CONFIG_PAX_PAGEEXEC
46503+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46504+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46505+
46506+#ifdef CONFIG_PAX_EMUTRAMP
46507+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46508+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46509+#endif
46510+
46511+#ifdef CONFIG_PAX_MPROTECT
46512+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46513+ current->mm->pax_flags |= MF_PAX_MPROTECT;
46514+#endif
46515+
46516+ }
46517+#endif
46518+
46519 if (N_MAGIC(ex) == OMAGIC) {
46520 unsigned long text_addr, map_size;
46521 loff_t pos;
46522@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46523
46524 down_write(&current->mm->mmap_sem);
46525 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46526- PROT_READ | PROT_WRITE | PROT_EXEC,
46527+ PROT_READ | PROT_WRITE,
46528 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46529 fd_offset + ex.a_text);
46530 up_write(&current->mm->mmap_sem);
46531diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46532index 1ed37ba..de82ab7 100644
46533--- a/fs/binfmt_elf.c
46534+++ b/fs/binfmt_elf.c
46535@@ -31,6 +31,7 @@
46536 #include <linux/random.h>
46537 #include <linux/elf.h>
46538 #include <linux/utsname.h>
46539+#include <linux/xattr.h>
46540 #include <asm/uaccess.h>
46541 #include <asm/param.h>
46542 #include <asm/page.h>
46543@@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46544 #define elf_core_dump NULL
46545 #endif
46546
46547+#ifdef CONFIG_PAX_MPROTECT
46548+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46549+#endif
46550+
46551 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46552 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46553 #else
46554@@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
46555 .load_binary = load_elf_binary,
46556 .load_shlib = load_elf_library,
46557 .core_dump = elf_core_dump,
46558+
46559+#ifdef CONFIG_PAX_MPROTECT
46560+ .handle_mprotect= elf_handle_mprotect,
46561+#endif
46562+
46563 .min_coredump = ELF_EXEC_PAGESIZE,
46564 .hasvdso = 1
46565 };
46566@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
46567
46568 static int set_brk(unsigned long start, unsigned long end)
46569 {
46570+ unsigned long e = end;
46571+
46572 start = ELF_PAGEALIGN(start);
46573 end = ELF_PAGEALIGN(end);
46574 if (end > start) {
46575@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
46576 if (BAD_ADDR(addr))
46577 return addr;
46578 }
46579- current->mm->start_brk = current->mm->brk = end;
46580+ current->mm->start_brk = current->mm->brk = e;
46581 return 0;
46582 }
46583
46584@@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46585 elf_addr_t __user *u_rand_bytes;
46586 const char *k_platform = ELF_PLATFORM;
46587 const char *k_base_platform = ELF_BASE_PLATFORM;
46588- unsigned char k_rand_bytes[16];
46589+ u32 k_rand_bytes[4];
46590 int items;
46591 elf_addr_t *elf_info;
46592 int ei_index = 0;
46593 const struct cred *cred = current_cred();
46594 struct vm_area_struct *vma;
46595+ unsigned long saved_auxv[AT_VECTOR_SIZE];
46596+
46597+ pax_track_stack();
46598
46599 /*
46600 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46601@@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46602 * Generate 16 random bytes for userspace PRNG seeding.
46603 */
46604 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46605- u_rand_bytes = (elf_addr_t __user *)
46606- STACK_ALLOC(p, sizeof(k_rand_bytes));
46607+ srandom32(k_rand_bytes[0] ^ random32());
46608+ srandom32(k_rand_bytes[1] ^ random32());
46609+ srandom32(k_rand_bytes[2] ^ random32());
46610+ srandom32(k_rand_bytes[3] ^ random32());
46611+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
46612+ u_rand_bytes = (elf_addr_t __user *) p;
46613 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46614 return -EFAULT;
46615
46616@@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46617 return -EFAULT;
46618 current->mm->env_end = p;
46619
46620+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46621+
46622 /* Put the elf_info on the stack in the right place. */
46623 sp = (elf_addr_t __user *)envp + 1;
46624- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46625+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46626 return -EFAULT;
46627 return 0;
46628 }
46629@@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46630 {
46631 struct elf_phdr *elf_phdata;
46632 struct elf_phdr *eppnt;
46633- unsigned long load_addr = 0;
46634+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46635 int load_addr_set = 0;
46636 unsigned long last_bss = 0, elf_bss = 0;
46637- unsigned long error = ~0UL;
46638+ unsigned long error = -EINVAL;
46639 unsigned long total_size;
46640 int retval, i, size;
46641
46642@@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46643 goto out_close;
46644 }
46645
46646+#ifdef CONFIG_PAX_SEGMEXEC
46647+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46648+ pax_task_size = SEGMEXEC_TASK_SIZE;
46649+#endif
46650+
46651 eppnt = elf_phdata;
46652 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46653 if (eppnt->p_type == PT_LOAD) {
46654@@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46655 k = load_addr + eppnt->p_vaddr;
46656 if (BAD_ADDR(k) ||
46657 eppnt->p_filesz > eppnt->p_memsz ||
46658- eppnt->p_memsz > TASK_SIZE ||
46659- TASK_SIZE - eppnt->p_memsz < k) {
46660+ eppnt->p_memsz > pax_task_size ||
46661+ pax_task_size - eppnt->p_memsz < k) {
46662 error = -ENOMEM;
46663 goto out_close;
46664 }
46665@@ -532,6 +558,351 @@ out:
46666 return error;
46667 }
46668
46669+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
46670+{
46671+ unsigned long pax_flags = 0UL;
46672+
46673+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46674+
46675+#ifdef CONFIG_PAX_PAGEEXEC
46676+ if (elf_phdata->p_flags & PF_PAGEEXEC)
46677+ pax_flags |= MF_PAX_PAGEEXEC;
46678+#endif
46679+
46680+#ifdef CONFIG_PAX_SEGMEXEC
46681+ if (elf_phdata->p_flags & PF_SEGMEXEC)
46682+ pax_flags |= MF_PAX_SEGMEXEC;
46683+#endif
46684+
46685+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46686+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46687+ if (nx_enabled)
46688+ pax_flags &= ~MF_PAX_SEGMEXEC;
46689+ else
46690+ pax_flags &= ~MF_PAX_PAGEEXEC;
46691+ }
46692+#endif
46693+
46694+#ifdef CONFIG_PAX_EMUTRAMP
46695+ if (elf_phdata->p_flags & PF_EMUTRAMP)
46696+ pax_flags |= MF_PAX_EMUTRAMP;
46697+#endif
46698+
46699+#ifdef CONFIG_PAX_MPROTECT
46700+ if (elf_phdata->p_flags & PF_MPROTECT)
46701+ pax_flags |= MF_PAX_MPROTECT;
46702+#endif
46703+
46704+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46705+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46706+ pax_flags |= MF_PAX_RANDMMAP;
46707+#endif
46708+
46709+#endif
46710+
46711+ return pax_flags;
46712+}
46713+
46714+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
46715+{
46716+ unsigned long pax_flags = 0UL;
46717+
46718+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46719+
46720+#ifdef CONFIG_PAX_PAGEEXEC
46721+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46722+ pax_flags |= MF_PAX_PAGEEXEC;
46723+#endif
46724+
46725+#ifdef CONFIG_PAX_SEGMEXEC
46726+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46727+ pax_flags |= MF_PAX_SEGMEXEC;
46728+#endif
46729+
46730+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46731+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46732+ if (nx_enabled)
46733+ pax_flags &= ~MF_PAX_SEGMEXEC;
46734+ else
46735+ pax_flags &= ~MF_PAX_PAGEEXEC;
46736+ }
46737+#endif
46738+
46739+#ifdef CONFIG_PAX_EMUTRAMP
46740+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46741+ pax_flags |= MF_PAX_EMUTRAMP;
46742+#endif
46743+
46744+#ifdef CONFIG_PAX_MPROTECT
46745+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46746+ pax_flags |= MF_PAX_MPROTECT;
46747+#endif
46748+
46749+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46750+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46751+ pax_flags |= MF_PAX_RANDMMAP;
46752+#endif
46753+
46754+#endif
46755+
46756+ return pax_flags;
46757+}
46758+
46759+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46760+{
46761+ unsigned long pax_flags = 0UL;
46762+
46763+#ifdef CONFIG_PAX_EI_PAX
46764+
46765+#ifdef CONFIG_PAX_PAGEEXEC
46766+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46767+ pax_flags |= MF_PAX_PAGEEXEC;
46768+#endif
46769+
46770+#ifdef CONFIG_PAX_SEGMEXEC
46771+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46772+ pax_flags |= MF_PAX_SEGMEXEC;
46773+#endif
46774+
46775+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46776+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46777+ if (nx_enabled)
46778+ pax_flags &= ~MF_PAX_SEGMEXEC;
46779+ else
46780+ pax_flags &= ~MF_PAX_PAGEEXEC;
46781+ }
46782+#endif
46783+
46784+#ifdef CONFIG_PAX_EMUTRAMP
46785+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46786+ pax_flags |= MF_PAX_EMUTRAMP;
46787+#endif
46788+
46789+#ifdef CONFIG_PAX_MPROTECT
46790+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46791+ pax_flags |= MF_PAX_MPROTECT;
46792+#endif
46793+
46794+#ifdef CONFIG_PAX_ASLR
46795+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46796+ pax_flags |= MF_PAX_RANDMMAP;
46797+#endif
46798+
46799+#else
46800+
46801+#ifdef CONFIG_PAX_PAGEEXEC
46802+ pax_flags |= MF_PAX_PAGEEXEC;
46803+#endif
46804+
46805+#ifdef CONFIG_PAX_MPROTECT
46806+ pax_flags |= MF_PAX_MPROTECT;
46807+#endif
46808+
46809+#ifdef CONFIG_PAX_RANDMMAP
46810+ pax_flags |= MF_PAX_RANDMMAP;
46811+#endif
46812+
46813+#ifdef CONFIG_PAX_SEGMEXEC
46814+ if (!(__supported_pte_mask & _PAGE_NX)) {
46815+ pax_flags &= ~MF_PAX_PAGEEXEC;
46816+ pax_flags |= MF_PAX_SEGMEXEC;
46817+ }
46818+#endif
46819+
46820+#endif
46821+
46822+ return pax_flags;
46823+}
46824+
46825+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46826+{
46827+
46828+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46829+ unsigned long i;
46830+
46831+ for (i = 0UL; i < elf_ex->e_phnum; i++)
46832+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46833+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46834+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46835+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46836+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46837+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46838+ return ~0UL;
46839+
46840+#ifdef CONFIG_PAX_SOFTMODE
46841+ if (pax_softmode)
46842+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
46843+ else
46844+#endif
46845+
46846+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
46847+ break;
46848+ }
46849+#endif
46850+
46851+ return ~0UL;
46852+}
46853+
46854+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46855+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
46856+{
46857+ unsigned long pax_flags = 0UL;
46858+
46859+#ifdef CONFIG_PAX_PAGEEXEC
46860+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
46861+ pax_flags |= MF_PAX_PAGEEXEC;
46862+#endif
46863+
46864+#ifdef CONFIG_PAX_SEGMEXEC
46865+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
46866+ pax_flags |= MF_PAX_SEGMEXEC;
46867+#endif
46868+
46869+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46870+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46871+ if ((__supported_pte_mask & _PAGE_NX))
46872+ pax_flags &= ~MF_PAX_SEGMEXEC;
46873+ else
46874+ pax_flags &= ~MF_PAX_PAGEEXEC;
46875+ }
46876+#endif
46877+
46878+#ifdef CONFIG_PAX_EMUTRAMP
46879+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
46880+ pax_flags |= MF_PAX_EMUTRAMP;
46881+#endif
46882+
46883+#ifdef CONFIG_PAX_MPROTECT
46884+ if (pax_flags_softmode & MF_PAX_MPROTECT)
46885+ pax_flags |= MF_PAX_MPROTECT;
46886+#endif
46887+
46888+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46889+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
46890+ pax_flags |= MF_PAX_RANDMMAP;
46891+#endif
46892+
46893+ return pax_flags;
46894+}
46895+
46896+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
46897+{
46898+ unsigned long pax_flags = 0UL;
46899+
46900+#ifdef CONFIG_PAX_PAGEEXEC
46901+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
46902+ pax_flags |= MF_PAX_PAGEEXEC;
46903+#endif
46904+
46905+#ifdef CONFIG_PAX_SEGMEXEC
46906+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
46907+ pax_flags |= MF_PAX_SEGMEXEC;
46908+#endif
46909+
46910+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46911+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46912+ if ((__supported_pte_mask & _PAGE_NX))
46913+ pax_flags &= ~MF_PAX_SEGMEXEC;
46914+ else
46915+ pax_flags &= ~MF_PAX_PAGEEXEC;
46916+ }
46917+#endif
46918+
46919+#ifdef CONFIG_PAX_EMUTRAMP
46920+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
46921+ pax_flags |= MF_PAX_EMUTRAMP;
46922+#endif
46923+
46924+#ifdef CONFIG_PAX_MPROTECT
46925+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
46926+ pax_flags |= MF_PAX_MPROTECT;
46927+#endif
46928+
46929+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46930+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
46931+ pax_flags |= MF_PAX_RANDMMAP;
46932+#endif
46933+
46934+ return pax_flags;
46935+}
46936+#endif
46937+
46938+static unsigned long pax_parse_xattr_pax(struct file * const file)
46939+{
46940+
46941+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46942+ ssize_t xattr_size, i;
46943+ unsigned char xattr_value[5];
46944+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
46945+
46946+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
46947+ if (xattr_size <= 0)
46948+ return ~0UL;
46949+
46950+ for (i = 0; i < xattr_size; i++)
46951+ switch (xattr_value[i]) {
46952+ default:
46953+ return ~0UL;
46954+
46955+#define parse_flag(option1, option2, flag) \
46956+ case option1: \
46957+ pax_flags_hardmode |= MF_PAX_##flag; \
46958+ break; \
46959+ case option2: \
46960+ pax_flags_softmode |= MF_PAX_##flag; \
46961+ break;
46962+
46963+ parse_flag('p', 'P', PAGEEXEC);
46964+ parse_flag('e', 'E', EMUTRAMP);
46965+ parse_flag('m', 'M', MPROTECT);
46966+ parse_flag('r', 'R', RANDMMAP);
46967+ parse_flag('s', 'S', SEGMEXEC);
46968+
46969+#undef parse_flag
46970+ }
46971+
46972+ if (pax_flags_hardmode & pax_flags_softmode)
46973+ return ~0UL;
46974+
46975+#ifdef CONFIG_PAX_SOFTMODE
46976+ if (pax_softmode)
46977+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
46978+ else
46979+#endif
46980+
46981+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
46982+#else
46983+ return ~0UL;
46984+#endif
46985+
46986+}
46987+
46988+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46989+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
46990+{
46991+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
46992+
46993+ pax_flags = pax_parse_ei_pax(elf_ex);
46994+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
46995+ xattr_pax_flags = pax_parse_xattr_pax(file);
46996+
46997+ if (pt_pax_flags == ~0UL)
46998+ pt_pax_flags = xattr_pax_flags;
46999+ else if (xattr_pax_flags == ~0UL)
47000+ xattr_pax_flags = pt_pax_flags;
47001+ if (pt_pax_flags != xattr_pax_flags)
47002+ return -EINVAL;
47003+ if (pt_pax_flags != ~0UL)
47004+ pax_flags = pt_pax_flags;
47005+
47006+ if (0 > pax_check_flags(&pax_flags))
47007+ return -EINVAL;
47008+
47009+ current->mm->pax_flags = pax_flags;
47010+ return 0;
47011+}
47012+#endif
47013+
47014 /*
47015 * These are the functions used to load ELF style executables and shared
47016 * libraries. There is no binary dependent code anywhere else.
47017@@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
47018 {
47019 unsigned int random_variable = 0;
47020
47021+#ifdef CONFIG_PAX_RANDUSTACK
47022+ if (randomize_va_space)
47023+ return stack_top - current->mm->delta_stack;
47024+#endif
47025+
47026 if ((current->flags & PF_RANDOMIZE) &&
47027 !(current->personality & ADDR_NO_RANDOMIZE)) {
47028 random_variable = get_random_int() & STACK_RND_MASK;
47029@@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47030 unsigned long load_addr = 0, load_bias = 0;
47031 int load_addr_set = 0;
47032 char * elf_interpreter = NULL;
47033- unsigned long error;
47034+ unsigned long error = 0;
47035 struct elf_phdr *elf_ppnt, *elf_phdata;
47036 unsigned long elf_bss, elf_brk;
47037 int retval, i;
47038@@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47039 unsigned long start_code, end_code, start_data, end_data;
47040 unsigned long reloc_func_desc = 0;
47041 int executable_stack = EXSTACK_DEFAULT;
47042- unsigned long def_flags = 0;
47043 struct {
47044 struct elfhdr elf_ex;
47045 struct elfhdr interp_elf_ex;
47046 } *loc;
47047+ unsigned long pax_task_size = TASK_SIZE;
47048
47049 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
47050 if (!loc) {
47051@@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47052
47053 /* OK, This is the point of no return */
47054 current->flags &= ~PF_FORKNOEXEC;
47055- current->mm->def_flags = def_flags;
47056+
47057+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47058+ current->mm->pax_flags = 0UL;
47059+#endif
47060+
47061+#ifdef CONFIG_PAX_DLRESOLVE
47062+ current->mm->call_dl_resolve = 0UL;
47063+#endif
47064+
47065+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
47066+ current->mm->call_syscall = 0UL;
47067+#endif
47068+
47069+#ifdef CONFIG_PAX_ASLR
47070+ current->mm->delta_mmap = 0UL;
47071+ current->mm->delta_stack = 0UL;
47072+#endif
47073+
47074+ current->mm->def_flags = 0;
47075+
47076+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
47077+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
47078+ send_sig(SIGKILL, current, 0);
47079+ goto out_free_dentry;
47080+ }
47081+#endif
47082+
47083+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47084+ pax_set_initial_flags(bprm);
47085+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
47086+ if (pax_set_initial_flags_func)
47087+ (pax_set_initial_flags_func)(bprm);
47088+#endif
47089+
47090+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47091+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
47092+ current->mm->context.user_cs_limit = PAGE_SIZE;
47093+ current->mm->def_flags |= VM_PAGEEXEC;
47094+ }
47095+#endif
47096+
47097+#ifdef CONFIG_PAX_SEGMEXEC
47098+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
47099+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
47100+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
47101+ pax_task_size = SEGMEXEC_TASK_SIZE;
47102+ }
47103+#endif
47104+
47105+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
47106+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47107+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
47108+ put_cpu();
47109+ }
47110+#endif
47111
47112 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
47113 may depend on the personality. */
47114 SET_PERSONALITY(loc->elf_ex);
47115+
47116+#ifdef CONFIG_PAX_ASLR
47117+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
47118+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
47119+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
47120+ }
47121+#endif
47122+
47123+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47124+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47125+ executable_stack = EXSTACK_DISABLE_X;
47126+ current->personality &= ~READ_IMPLIES_EXEC;
47127+ } else
47128+#endif
47129+
47130 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
47131 current->personality |= READ_IMPLIES_EXEC;
47132
47133@@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47134 * might try to exec. This is because the brk will
47135 * follow the loader, and is not movable. */
47136 #ifdef CONFIG_X86
47137- load_bias = 0;
47138+ if (current->flags & PF_RANDOMIZE)
47139+ load_bias = 0;
47140+ else
47141+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47142 #else
47143 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47144 #endif
47145+
47146+#ifdef CONFIG_PAX_RANDMMAP
47147+ /* PaX: randomize base address at the default exe base if requested */
47148+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
47149+#ifdef CONFIG_SPARC64
47150+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
47151+#else
47152+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
47153+#endif
47154+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
47155+ elf_flags |= MAP_FIXED;
47156+ }
47157+#endif
47158+
47159 }
47160
47161 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
47162@@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47163 * allowed task size. Note that p_filesz must always be
47164 * <= p_memsz so it is only necessary to check p_memsz.
47165 */
47166- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47167- elf_ppnt->p_memsz > TASK_SIZE ||
47168- TASK_SIZE - elf_ppnt->p_memsz < k) {
47169+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47170+ elf_ppnt->p_memsz > pax_task_size ||
47171+ pax_task_size - elf_ppnt->p_memsz < k) {
47172 /* set_brk can never work. Avoid overflows. */
47173 send_sig(SIGKILL, current, 0);
47174 retval = -EINVAL;
47175@@ -866,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47176 start_data += load_bias;
47177 end_data += load_bias;
47178
47179+#ifdef CONFIG_PAX_RANDMMAP
47180+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
47181+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
47182+#endif
47183+
47184 /* Calling set_brk effectively mmaps the pages that we need
47185 * for the bss and break sections. We must do this before
47186 * mapping in the interpreter, to make sure it doesn't wind
47187@@ -877,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47188 goto out_free_dentry;
47189 }
47190 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
47191- send_sig(SIGSEGV, current, 0);
47192- retval = -EFAULT; /* Nobody gets to see this, but.. */
47193- goto out_free_dentry;
47194+ /*
47195+ * This bss-zeroing can fail if the ELF
47196+ * file specifies odd protections. So
47197+ * we don't check the return value
47198+ */
47199 }
47200
47201 if (elf_interpreter) {
47202@@ -1112,8 +1581,10 @@ static int dump_seek(struct file *file, loff_t off)
47203 unsigned long n = off;
47204 if (n > PAGE_SIZE)
47205 n = PAGE_SIZE;
47206- if (!dump_write(file, buf, n))
47207+ if (!dump_write(file, buf, n)) {
47208+ free_page((unsigned long)buf);
47209 return 0;
47210+ }
47211 off -= n;
47212 }
47213 free_page((unsigned long)buf);
47214@@ -1125,7 +1596,7 @@ static int dump_seek(struct file *file, loff_t off)
47215 * Decide what to dump of a segment, part, all or none.
47216 */
47217 static unsigned long vma_dump_size(struct vm_area_struct *vma,
47218- unsigned long mm_flags)
47219+ unsigned long mm_flags, long signr)
47220 {
47221 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
47222
47223@@ -1159,7 +1630,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
47224 if (vma->vm_file == NULL)
47225 return 0;
47226
47227- if (FILTER(MAPPED_PRIVATE))
47228+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
47229 goto whole;
47230
47231 /*
47232@@ -1255,8 +1726,11 @@ static int writenote(struct memelfnote *men, struct file *file,
47233 #undef DUMP_WRITE
47234
47235 #define DUMP_WRITE(addr, nr) \
47236+ do { \
47237+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
47238 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
47239- goto end_coredump;
47240+ goto end_coredump; \
47241+ } while (0);
47242
47243 static void fill_elf_header(struct elfhdr *elf, int segs,
47244 u16 machine, u32 flags, u8 osabi)
47245@@ -1385,9 +1859,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
47246 {
47247 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
47248 int i = 0;
47249- do
47250+ do {
47251 i += 2;
47252- while (auxv[i - 2] != AT_NULL);
47253+ } while (auxv[i - 2] != AT_NULL);
47254 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
47255 }
47256
47257@@ -1973,7 +2447,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47258 phdr.p_offset = offset;
47259 phdr.p_vaddr = vma->vm_start;
47260 phdr.p_paddr = 0;
47261- phdr.p_filesz = vma_dump_size(vma, mm_flags);
47262+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
47263 phdr.p_memsz = vma->vm_end - vma->vm_start;
47264 offset += phdr.p_filesz;
47265 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
47266@@ -2006,7 +2480,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47267 unsigned long addr;
47268 unsigned long end;
47269
47270- end = vma->vm_start + vma_dump_size(vma, mm_flags);
47271+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
47272
47273 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
47274 struct page *page;
47275@@ -2015,6 +2489,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47276 page = get_dump_page(addr);
47277 if (page) {
47278 void *kaddr = kmap(page);
47279+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
47280 stop = ((size += PAGE_SIZE) > limit) ||
47281 !dump_write(file, kaddr, PAGE_SIZE);
47282 kunmap(page);
47283@@ -2042,6 +2517,97 @@ out:
47284
47285 #endif /* USE_ELF_CORE_DUMP */
47286
47287+#ifdef CONFIG_PAX_MPROTECT
47288+/* PaX: non-PIC ELF libraries need relocations on their executable segments
47289+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
47290+ * we'll remove VM_MAYWRITE for good on RELRO segments.
47291+ *
47292+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
47293+ * basis because we want to allow the common case and not the special ones.
47294+ */
47295+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
47296+{
47297+ struct elfhdr elf_h;
47298+ struct elf_phdr elf_p;
47299+ unsigned long i;
47300+ unsigned long oldflags;
47301+ bool is_textrel_rw, is_textrel_rx, is_relro;
47302+
47303+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
47304+ return;
47305+
47306+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
47307+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
47308+
47309+#ifdef CONFIG_PAX_ELFRELOCS
47310+ /* possible TEXTREL */
47311+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
47312+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
47313+#else
47314+ is_textrel_rw = false;
47315+ is_textrel_rx = false;
47316+#endif
47317+
47318+ /* possible RELRO */
47319+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
47320+
47321+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
47322+ return;
47323+
47324+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
47325+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
47326+
47327+#ifdef CONFIG_PAX_ETEXECRELOCS
47328+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47329+#else
47330+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
47331+#endif
47332+
47333+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47334+ !elf_check_arch(&elf_h) ||
47335+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
47336+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
47337+ return;
47338+
47339+ for (i = 0UL; i < elf_h.e_phnum; i++) {
47340+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
47341+ return;
47342+ switch (elf_p.p_type) {
47343+ case PT_DYNAMIC:
47344+ if (!is_textrel_rw && !is_textrel_rx)
47345+ continue;
47346+ i = 0UL;
47347+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
47348+ elf_dyn dyn;
47349+
47350+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
47351+ return;
47352+ if (dyn.d_tag == DT_NULL)
47353+ return;
47354+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
47355+ gr_log_textrel(vma);
47356+ if (is_textrel_rw)
47357+ vma->vm_flags |= VM_MAYWRITE;
47358+ else
47359+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
47360+ vma->vm_flags &= ~VM_MAYWRITE;
47361+ return;
47362+ }
47363+ i++;
47364+ }
47365+ return;
47366+
47367+ case PT_GNU_RELRO:
47368+ if (!is_relro)
47369+ continue;
47370+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
47371+ vma->vm_flags &= ~VM_MAYWRITE;
47372+ return;
47373+ }
47374+ }
47375+}
47376+#endif
47377+
47378 static int __init init_elf_binfmt(void)
47379 {
47380 return register_binfmt(&elf_format);
47381diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
47382index ca88c46..f155a60 100644
47383--- a/fs/binfmt_flat.c
47384+++ b/fs/binfmt_flat.c
47385@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
47386 realdatastart = (unsigned long) -ENOMEM;
47387 printk("Unable to allocate RAM for process data, errno %d\n",
47388 (int)-realdatastart);
47389+ down_write(&current->mm->mmap_sem);
47390 do_munmap(current->mm, textpos, text_len);
47391+ up_write(&current->mm->mmap_sem);
47392 ret = realdatastart;
47393 goto err;
47394 }
47395@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47396 }
47397 if (IS_ERR_VALUE(result)) {
47398 printk("Unable to read data+bss, errno %d\n", (int)-result);
47399+ down_write(&current->mm->mmap_sem);
47400 do_munmap(current->mm, textpos, text_len);
47401 do_munmap(current->mm, realdatastart, data_len + extra);
47402+ up_write(&current->mm->mmap_sem);
47403 ret = result;
47404 goto err;
47405 }
47406@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47407 }
47408 if (IS_ERR_VALUE(result)) {
47409 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
47410+ down_write(&current->mm->mmap_sem);
47411 do_munmap(current->mm, textpos, text_len + data_len + extra +
47412 MAX_SHARED_LIBS * sizeof(unsigned long));
47413+ up_write(&current->mm->mmap_sem);
47414 ret = result;
47415 goto err;
47416 }
47417diff --git a/fs/bio.c b/fs/bio.c
47418index e696713..83de133 100644
47419--- a/fs/bio.c
47420+++ b/fs/bio.c
47421@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
47422
47423 i = 0;
47424 while (i < bio_slab_nr) {
47425- struct bio_slab *bslab = &bio_slabs[i];
47426+ bslab = &bio_slabs[i];
47427
47428 if (!bslab->slab && entry == -1)
47429 entry = i;
47430@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
47431 const int read = bio_data_dir(bio) == READ;
47432 struct bio_map_data *bmd = bio->bi_private;
47433 int i;
47434- char *p = bmd->sgvecs[0].iov_base;
47435+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
47436
47437 __bio_for_each_segment(bvec, bio, i, 0) {
47438 char *addr = page_address(bvec->bv_page);
47439diff --git a/fs/block_dev.c b/fs/block_dev.c
47440index e65efa2..04fae57 100644
47441--- a/fs/block_dev.c
47442+++ b/fs/block_dev.c
47443@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
47444 else if (bdev->bd_contains == bdev)
47445 res = 0; /* is a whole device which isn't held */
47446
47447- else if (bdev->bd_contains->bd_holder == bd_claim)
47448+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
47449 res = 0; /* is a partition of a device that is being partitioned */
47450 else if (bdev->bd_contains->bd_holder != NULL)
47451 res = -EBUSY; /* is a partition of a held device */
47452diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
47453index c4bc570..42acd8d 100644
47454--- a/fs/btrfs/ctree.c
47455+++ b/fs/btrfs/ctree.c
47456@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
47457 free_extent_buffer(buf);
47458 add_root_to_dirty_list(root);
47459 } else {
47460- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
47461- parent_start = parent->start;
47462- else
47463+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
47464+ if (parent)
47465+ parent_start = parent->start;
47466+ else
47467+ parent_start = 0;
47468+ } else
47469 parent_start = 0;
47470
47471 WARN_ON(trans->transid != btrfs_header_generation(parent));
47472@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
47473
47474 ret = 0;
47475 if (slot == 0) {
47476- struct btrfs_disk_key disk_key;
47477 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
47478 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
47479 }
47480diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
47481index f447188..59c17c5 100644
47482--- a/fs/btrfs/disk-io.c
47483+++ b/fs/btrfs/disk-io.c
47484@@ -39,7 +39,7 @@
47485 #include "tree-log.h"
47486 #include "free-space-cache.h"
47487
47488-static struct extent_io_ops btree_extent_io_ops;
47489+static const struct extent_io_ops btree_extent_io_ops;
47490 static void end_workqueue_fn(struct btrfs_work *work);
47491 static void free_fs_root(struct btrfs_root *root);
47492
47493@@ -2607,7 +2607,7 @@ out:
47494 return 0;
47495 }
47496
47497-static struct extent_io_ops btree_extent_io_ops = {
47498+static const struct extent_io_ops btree_extent_io_ops = {
47499 .write_cache_pages_lock_hook = btree_lock_page_hook,
47500 .readpage_end_io_hook = btree_readpage_end_io_hook,
47501 .submit_bio_hook = btree_submit_bio_hook,
47502diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
47503index 559f724..a026171 100644
47504--- a/fs/btrfs/extent-tree.c
47505+++ b/fs/btrfs/extent-tree.c
47506@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
47507 u64 group_start = group->key.objectid;
47508 new_extents = kmalloc(sizeof(*new_extents),
47509 GFP_NOFS);
47510+ if (!new_extents) {
47511+ ret = -ENOMEM;
47512+ goto out;
47513+ }
47514 nr_extents = 1;
47515 ret = get_new_locations(reloc_inode,
47516 extent_key,
47517diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
47518index 36de250..7ec75c7 100644
47519--- a/fs/btrfs/extent_io.h
47520+++ b/fs/btrfs/extent_io.h
47521@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
47522 struct bio *bio, int mirror_num,
47523 unsigned long bio_flags);
47524 struct extent_io_ops {
47525- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
47526+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
47527 u64 start, u64 end, int *page_started,
47528 unsigned long *nr_written);
47529- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
47530- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
47531+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
47532+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
47533 extent_submit_bio_hook_t *submit_bio_hook;
47534- int (*merge_bio_hook)(struct page *page, unsigned long offset,
47535+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
47536 size_t size, struct bio *bio,
47537 unsigned long bio_flags);
47538- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
47539- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
47540+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
47541+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
47542 u64 start, u64 end,
47543 struct extent_state *state);
47544- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
47545+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
47546 u64 start, u64 end,
47547 struct extent_state *state);
47548- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47549+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47550 struct extent_state *state);
47551- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47552+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47553 struct extent_state *state, int uptodate);
47554- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
47555+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
47556 unsigned long old, unsigned long bits);
47557- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
47558+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
47559 unsigned long bits);
47560- int (*merge_extent_hook)(struct inode *inode,
47561+ int (* const merge_extent_hook)(struct inode *inode,
47562 struct extent_state *new,
47563 struct extent_state *other);
47564- int (*split_extent_hook)(struct inode *inode,
47565+ int (* const split_extent_hook)(struct inode *inode,
47566 struct extent_state *orig, u64 split);
47567- int (*write_cache_pages_lock_hook)(struct page *page);
47568+ int (* const write_cache_pages_lock_hook)(struct page *page);
47569 };
47570
47571 struct extent_io_tree {
47572@@ -88,7 +88,7 @@ struct extent_io_tree {
47573 u64 dirty_bytes;
47574 spinlock_t lock;
47575 spinlock_t buffer_lock;
47576- struct extent_io_ops *ops;
47577+ const struct extent_io_ops *ops;
47578 };
47579
47580 struct extent_state {
47581diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
47582index cb2849f..3718fb4 100644
47583--- a/fs/btrfs/free-space-cache.c
47584+++ b/fs/btrfs/free-space-cache.c
47585@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
47586
47587 while(1) {
47588 if (entry->bytes < bytes || entry->offset < min_start) {
47589- struct rb_node *node;
47590-
47591 node = rb_next(&entry->offset_index);
47592 if (!node)
47593 break;
47594@@ -1226,7 +1224,7 @@ again:
47595 */
47596 while (entry->bitmap || found_bitmap ||
47597 (!entry->bitmap && entry->bytes < min_bytes)) {
47598- struct rb_node *node = rb_next(&entry->offset_index);
47599+ node = rb_next(&entry->offset_index);
47600
47601 if (entry->bitmap && entry->bytes > bytes + empty_size) {
47602 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
47603diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
47604index e03a836..323837e 100644
47605--- a/fs/btrfs/inode.c
47606+++ b/fs/btrfs/inode.c
47607@@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
47608 static const struct address_space_operations btrfs_aops;
47609 static const struct address_space_operations btrfs_symlink_aops;
47610 static const struct file_operations btrfs_dir_file_operations;
47611-static struct extent_io_ops btrfs_extent_io_ops;
47612+static const struct extent_io_ops btrfs_extent_io_ops;
47613
47614 static struct kmem_cache *btrfs_inode_cachep;
47615 struct kmem_cache *btrfs_trans_handle_cachep;
47616@@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
47617 1, 0, NULL, GFP_NOFS);
47618 while (start < end) {
47619 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
47620+ BUG_ON(!async_cow);
47621 async_cow->inode = inode;
47622 async_cow->root = root;
47623 async_cow->locked_page = locked_page;
47624@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
47625 inline_size = btrfs_file_extent_inline_item_len(leaf,
47626 btrfs_item_nr(leaf, path->slots[0]));
47627 tmp = kmalloc(inline_size, GFP_NOFS);
47628+ if (!tmp)
47629+ return -ENOMEM;
47630 ptr = btrfs_file_extent_inline_start(item);
47631
47632 read_extent_buffer(leaf, tmp, ptr, inline_size);
47633@@ -5410,7 +5413,7 @@ fail:
47634 return -ENOMEM;
47635 }
47636
47637-static int btrfs_getattr(struct vfsmount *mnt,
47638+int btrfs_getattr(struct vfsmount *mnt,
47639 struct dentry *dentry, struct kstat *stat)
47640 {
47641 struct inode *inode = dentry->d_inode;
47642@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47643 return 0;
47644 }
47645
47646+EXPORT_SYMBOL(btrfs_getattr);
47647+
47648+dev_t get_btrfs_dev_from_inode(struct inode *inode)
47649+{
47650+ return BTRFS_I(inode)->root->anon_super.s_dev;
47651+}
47652+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47653+
47654 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47655 struct inode *new_dir, struct dentry *new_dentry)
47656 {
47657@@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47658 .fsync = btrfs_sync_file,
47659 };
47660
47661-static struct extent_io_ops btrfs_extent_io_ops = {
47662+static const struct extent_io_ops btrfs_extent_io_ops = {
47663 .fill_delalloc = run_delalloc_range,
47664 .submit_bio_hook = btrfs_submit_bio_hook,
47665 .merge_bio_hook = btrfs_merge_bio_hook,
47666diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47667index ab7ab53..94e0781 100644
47668--- a/fs/btrfs/relocation.c
47669+++ b/fs/btrfs/relocation.c
47670@@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47671 }
47672 spin_unlock(&rc->reloc_root_tree.lock);
47673
47674- BUG_ON((struct btrfs_root *)node->data != root);
47675+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
47676
47677 if (!del) {
47678 spin_lock(&rc->reloc_root_tree.lock);
47679diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47680index a240b6f..4ce16ef 100644
47681--- a/fs/btrfs/sysfs.c
47682+++ b/fs/btrfs/sysfs.c
47683@@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47684 complete(&root->kobj_unregister);
47685 }
47686
47687-static struct sysfs_ops btrfs_super_attr_ops = {
47688+static const struct sysfs_ops btrfs_super_attr_ops = {
47689 .show = btrfs_super_attr_show,
47690 .store = btrfs_super_attr_store,
47691 };
47692
47693-static struct sysfs_ops btrfs_root_attr_ops = {
47694+static const struct sysfs_ops btrfs_root_attr_ops = {
47695 .show = btrfs_root_attr_show,
47696 .store = btrfs_root_attr_store,
47697 };
47698diff --git a/fs/buffer.c b/fs/buffer.c
47699index 6fa5302..395d9f6 100644
47700--- a/fs/buffer.c
47701+++ b/fs/buffer.c
47702@@ -25,6 +25,7 @@
47703 #include <linux/percpu.h>
47704 #include <linux/slab.h>
47705 #include <linux/capability.h>
47706+#include <linux/security.h>
47707 #include <linux/blkdev.h>
47708 #include <linux/file.h>
47709 #include <linux/quotaops.h>
47710diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47711index 3797e00..ce776f6 100644
47712--- a/fs/cachefiles/bind.c
47713+++ b/fs/cachefiles/bind.c
47714@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47715 args);
47716
47717 /* start by checking things over */
47718- ASSERT(cache->fstop_percent >= 0 &&
47719- cache->fstop_percent < cache->fcull_percent &&
47720+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
47721 cache->fcull_percent < cache->frun_percent &&
47722 cache->frun_percent < 100);
47723
47724- ASSERT(cache->bstop_percent >= 0 &&
47725- cache->bstop_percent < cache->bcull_percent &&
47726+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
47727 cache->bcull_percent < cache->brun_percent &&
47728 cache->brun_percent < 100);
47729
47730diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47731index 4618516..bb30d01 100644
47732--- a/fs/cachefiles/daemon.c
47733+++ b/fs/cachefiles/daemon.c
47734@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47735 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47736 return -EIO;
47737
47738- if (datalen < 0 || datalen > PAGE_SIZE - 1)
47739+ if (datalen > PAGE_SIZE - 1)
47740 return -EOPNOTSUPP;
47741
47742 /* drag the command string into the kernel so we can parse it */
47743@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47744 if (args[0] != '%' || args[1] != '\0')
47745 return -EINVAL;
47746
47747- if (fstop < 0 || fstop >= cache->fcull_percent)
47748+ if (fstop >= cache->fcull_percent)
47749 return cachefiles_daemon_range_error(cache, args);
47750
47751 cache->fstop_percent = fstop;
47752@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47753 if (args[0] != '%' || args[1] != '\0')
47754 return -EINVAL;
47755
47756- if (bstop < 0 || bstop >= cache->bcull_percent)
47757+ if (bstop >= cache->bcull_percent)
47758 return cachefiles_daemon_range_error(cache, args);
47759
47760 cache->bstop_percent = bstop;
47761diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47762index f7c255f..fcd61de 100644
47763--- a/fs/cachefiles/internal.h
47764+++ b/fs/cachefiles/internal.h
47765@@ -56,7 +56,7 @@ struct cachefiles_cache {
47766 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47767 struct rb_root active_nodes; /* active nodes (can't be culled) */
47768 rwlock_t active_lock; /* lock for active_nodes */
47769- atomic_t gravecounter; /* graveyard uniquifier */
47770+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47771 unsigned frun_percent; /* when to stop culling (% files) */
47772 unsigned fcull_percent; /* when to start culling (% files) */
47773 unsigned fstop_percent; /* when to stop allocating (% files) */
47774@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47775 * proc.c
47776 */
47777 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47778-extern atomic_t cachefiles_lookup_histogram[HZ];
47779-extern atomic_t cachefiles_mkdir_histogram[HZ];
47780-extern atomic_t cachefiles_create_histogram[HZ];
47781+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47782+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47783+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47784
47785 extern int __init cachefiles_proc_init(void);
47786 extern void cachefiles_proc_cleanup(void);
47787 static inline
47788-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47789+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47790 {
47791 unsigned long jif = jiffies - start_jif;
47792 if (jif >= HZ)
47793 jif = HZ - 1;
47794- atomic_inc(&histogram[jif]);
47795+ atomic_inc_unchecked(&histogram[jif]);
47796 }
47797
47798 #else
47799diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47800index 14ac480..a62766c 100644
47801--- a/fs/cachefiles/namei.c
47802+++ b/fs/cachefiles/namei.c
47803@@ -250,7 +250,7 @@ try_again:
47804 /* first step is to make up a grave dentry in the graveyard */
47805 sprintf(nbuffer, "%08x%08x",
47806 (uint32_t) get_seconds(),
47807- (uint32_t) atomic_inc_return(&cache->gravecounter));
47808+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47809
47810 /* do the multiway lock magic */
47811 trap = lock_rename(cache->graveyard, dir);
47812diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47813index eccd339..4c1d995 100644
47814--- a/fs/cachefiles/proc.c
47815+++ b/fs/cachefiles/proc.c
47816@@ -14,9 +14,9 @@
47817 #include <linux/seq_file.h>
47818 #include "internal.h"
47819
47820-atomic_t cachefiles_lookup_histogram[HZ];
47821-atomic_t cachefiles_mkdir_histogram[HZ];
47822-atomic_t cachefiles_create_histogram[HZ];
47823+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47824+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47825+atomic_unchecked_t cachefiles_create_histogram[HZ];
47826
47827 /*
47828 * display the latency histogram
47829@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47830 return 0;
47831 default:
47832 index = (unsigned long) v - 3;
47833- x = atomic_read(&cachefiles_lookup_histogram[index]);
47834- y = atomic_read(&cachefiles_mkdir_histogram[index]);
47835- z = atomic_read(&cachefiles_create_histogram[index]);
47836+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47837+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47838+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47839 if (x == 0 && y == 0 && z == 0)
47840 return 0;
47841
47842diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47843index a6c8c6f..5cf8517 100644
47844--- a/fs/cachefiles/rdwr.c
47845+++ b/fs/cachefiles/rdwr.c
47846@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47847 old_fs = get_fs();
47848 set_fs(KERNEL_DS);
47849 ret = file->f_op->write(
47850- file, (const void __user *) data, len, &pos);
47851+ file, (const void __force_user *) data, len, &pos);
47852 set_fs(old_fs);
47853 kunmap(page);
47854 if (ret != len)
47855diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47856index 42cec2a..2aba466 100644
47857--- a/fs/cifs/cifs_debug.c
47858+++ b/fs/cifs/cifs_debug.c
47859@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47860 tcon = list_entry(tmp3,
47861 struct cifsTconInfo,
47862 tcon_list);
47863- atomic_set(&tcon->num_smbs_sent, 0);
47864- atomic_set(&tcon->num_writes, 0);
47865- atomic_set(&tcon->num_reads, 0);
47866- atomic_set(&tcon->num_oplock_brks, 0);
47867- atomic_set(&tcon->num_opens, 0);
47868- atomic_set(&tcon->num_posixopens, 0);
47869- atomic_set(&tcon->num_posixmkdirs, 0);
47870- atomic_set(&tcon->num_closes, 0);
47871- atomic_set(&tcon->num_deletes, 0);
47872- atomic_set(&tcon->num_mkdirs, 0);
47873- atomic_set(&tcon->num_rmdirs, 0);
47874- atomic_set(&tcon->num_renames, 0);
47875- atomic_set(&tcon->num_t2renames, 0);
47876- atomic_set(&tcon->num_ffirst, 0);
47877- atomic_set(&tcon->num_fnext, 0);
47878- atomic_set(&tcon->num_fclose, 0);
47879- atomic_set(&tcon->num_hardlinks, 0);
47880- atomic_set(&tcon->num_symlinks, 0);
47881- atomic_set(&tcon->num_locks, 0);
47882+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47883+ atomic_set_unchecked(&tcon->num_writes, 0);
47884+ atomic_set_unchecked(&tcon->num_reads, 0);
47885+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47886+ atomic_set_unchecked(&tcon->num_opens, 0);
47887+ atomic_set_unchecked(&tcon->num_posixopens, 0);
47888+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47889+ atomic_set_unchecked(&tcon->num_closes, 0);
47890+ atomic_set_unchecked(&tcon->num_deletes, 0);
47891+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
47892+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
47893+ atomic_set_unchecked(&tcon->num_renames, 0);
47894+ atomic_set_unchecked(&tcon->num_t2renames, 0);
47895+ atomic_set_unchecked(&tcon->num_ffirst, 0);
47896+ atomic_set_unchecked(&tcon->num_fnext, 0);
47897+ atomic_set_unchecked(&tcon->num_fclose, 0);
47898+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
47899+ atomic_set_unchecked(&tcon->num_symlinks, 0);
47900+ atomic_set_unchecked(&tcon->num_locks, 0);
47901 }
47902 }
47903 }
47904@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47905 if (tcon->need_reconnect)
47906 seq_puts(m, "\tDISCONNECTED ");
47907 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47908- atomic_read(&tcon->num_smbs_sent),
47909- atomic_read(&tcon->num_oplock_brks));
47910+ atomic_read_unchecked(&tcon->num_smbs_sent),
47911+ atomic_read_unchecked(&tcon->num_oplock_brks));
47912 seq_printf(m, "\nReads: %d Bytes: %lld",
47913- atomic_read(&tcon->num_reads),
47914+ atomic_read_unchecked(&tcon->num_reads),
47915 (long long)(tcon->bytes_read));
47916 seq_printf(m, "\nWrites: %d Bytes: %lld",
47917- atomic_read(&tcon->num_writes),
47918+ atomic_read_unchecked(&tcon->num_writes),
47919 (long long)(tcon->bytes_written));
47920 seq_printf(m, "\nFlushes: %d",
47921- atomic_read(&tcon->num_flushes));
47922+ atomic_read_unchecked(&tcon->num_flushes));
47923 seq_printf(m, "\nLocks: %d HardLinks: %d "
47924 "Symlinks: %d",
47925- atomic_read(&tcon->num_locks),
47926- atomic_read(&tcon->num_hardlinks),
47927- atomic_read(&tcon->num_symlinks));
47928+ atomic_read_unchecked(&tcon->num_locks),
47929+ atomic_read_unchecked(&tcon->num_hardlinks),
47930+ atomic_read_unchecked(&tcon->num_symlinks));
47931 seq_printf(m, "\nOpens: %d Closes: %d "
47932 "Deletes: %d",
47933- atomic_read(&tcon->num_opens),
47934- atomic_read(&tcon->num_closes),
47935- atomic_read(&tcon->num_deletes));
47936+ atomic_read_unchecked(&tcon->num_opens),
47937+ atomic_read_unchecked(&tcon->num_closes),
47938+ atomic_read_unchecked(&tcon->num_deletes));
47939 seq_printf(m, "\nPosix Opens: %d "
47940 "Posix Mkdirs: %d",
47941- atomic_read(&tcon->num_posixopens),
47942- atomic_read(&tcon->num_posixmkdirs));
47943+ atomic_read_unchecked(&tcon->num_posixopens),
47944+ atomic_read_unchecked(&tcon->num_posixmkdirs));
47945 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47946- atomic_read(&tcon->num_mkdirs),
47947- atomic_read(&tcon->num_rmdirs));
47948+ atomic_read_unchecked(&tcon->num_mkdirs),
47949+ atomic_read_unchecked(&tcon->num_rmdirs));
47950 seq_printf(m, "\nRenames: %d T2 Renames %d",
47951- atomic_read(&tcon->num_renames),
47952- atomic_read(&tcon->num_t2renames));
47953+ atomic_read_unchecked(&tcon->num_renames),
47954+ atomic_read_unchecked(&tcon->num_t2renames));
47955 seq_printf(m, "\nFindFirst: %d FNext %d "
47956 "FClose %d",
47957- atomic_read(&tcon->num_ffirst),
47958- atomic_read(&tcon->num_fnext),
47959- atomic_read(&tcon->num_fclose));
47960+ atomic_read_unchecked(&tcon->num_ffirst),
47961+ atomic_read_unchecked(&tcon->num_fnext),
47962+ atomic_read_unchecked(&tcon->num_fclose));
47963 }
47964 }
47965 }
47966diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
47967index 1445407..68cb0dc 100644
47968--- a/fs/cifs/cifsfs.c
47969+++ b/fs/cifs/cifsfs.c
47970@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
47971 cifs_req_cachep = kmem_cache_create("cifs_request",
47972 CIFSMaxBufSize +
47973 MAX_CIFS_HDR_SIZE, 0,
47974- SLAB_HWCACHE_ALIGN, NULL);
47975+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
47976 if (cifs_req_cachep == NULL)
47977 return -ENOMEM;
47978
47979@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
47980 efficient to alloc 1 per page off the slab compared to 17K (5page)
47981 alloc of large cifs buffers even when page debugging is on */
47982 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
47983- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
47984+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
47985 NULL);
47986 if (cifs_sm_req_cachep == NULL) {
47987 mempool_destroy(cifs_req_poolp);
47988@@ -991,8 +991,8 @@ init_cifs(void)
47989 atomic_set(&bufAllocCount, 0);
47990 atomic_set(&smBufAllocCount, 0);
47991 #ifdef CONFIG_CIFS_STATS2
47992- atomic_set(&totBufAllocCount, 0);
47993- atomic_set(&totSmBufAllocCount, 0);
47994+ atomic_set_unchecked(&totBufAllocCount, 0);
47995+ atomic_set_unchecked(&totSmBufAllocCount, 0);
47996 #endif /* CONFIG_CIFS_STATS2 */
47997
47998 atomic_set(&midCount, 0);
47999diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
48000index e29581e..1c22bab 100644
48001--- a/fs/cifs/cifsglob.h
48002+++ b/fs/cifs/cifsglob.h
48003@@ -252,28 +252,28 @@ struct cifsTconInfo {
48004 __u16 Flags; /* optional support bits */
48005 enum statusEnum tidStatus;
48006 #ifdef CONFIG_CIFS_STATS
48007- atomic_t num_smbs_sent;
48008- atomic_t num_writes;
48009- atomic_t num_reads;
48010- atomic_t num_flushes;
48011- atomic_t num_oplock_brks;
48012- atomic_t num_opens;
48013- atomic_t num_closes;
48014- atomic_t num_deletes;
48015- atomic_t num_mkdirs;
48016- atomic_t num_posixopens;
48017- atomic_t num_posixmkdirs;
48018- atomic_t num_rmdirs;
48019- atomic_t num_renames;
48020- atomic_t num_t2renames;
48021- atomic_t num_ffirst;
48022- atomic_t num_fnext;
48023- atomic_t num_fclose;
48024- atomic_t num_hardlinks;
48025- atomic_t num_symlinks;
48026- atomic_t num_locks;
48027- atomic_t num_acl_get;
48028- atomic_t num_acl_set;
48029+ atomic_unchecked_t num_smbs_sent;
48030+ atomic_unchecked_t num_writes;
48031+ atomic_unchecked_t num_reads;
48032+ atomic_unchecked_t num_flushes;
48033+ atomic_unchecked_t num_oplock_brks;
48034+ atomic_unchecked_t num_opens;
48035+ atomic_unchecked_t num_closes;
48036+ atomic_unchecked_t num_deletes;
48037+ atomic_unchecked_t num_mkdirs;
48038+ atomic_unchecked_t num_posixopens;
48039+ atomic_unchecked_t num_posixmkdirs;
48040+ atomic_unchecked_t num_rmdirs;
48041+ atomic_unchecked_t num_renames;
48042+ atomic_unchecked_t num_t2renames;
48043+ atomic_unchecked_t num_ffirst;
48044+ atomic_unchecked_t num_fnext;
48045+ atomic_unchecked_t num_fclose;
48046+ atomic_unchecked_t num_hardlinks;
48047+ atomic_unchecked_t num_symlinks;
48048+ atomic_unchecked_t num_locks;
48049+ atomic_unchecked_t num_acl_get;
48050+ atomic_unchecked_t num_acl_set;
48051 #ifdef CONFIG_CIFS_STATS2
48052 unsigned long long time_writes;
48053 unsigned long long time_reads;
48054@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
48055 }
48056
48057 #ifdef CONFIG_CIFS_STATS
48058-#define cifs_stats_inc atomic_inc
48059+#define cifs_stats_inc atomic_inc_unchecked
48060
48061 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
48062 unsigned int bytes)
48063@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
48064 /* Various Debug counters */
48065 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
48066 #ifdef CONFIG_CIFS_STATS2
48067-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
48068-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
48069+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
48070+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
48071 #endif
48072 GLOBAL_EXTERN atomic_t smBufAllocCount;
48073 GLOBAL_EXTERN atomic_t midCount;
48074diff --git a/fs/cifs/link.c b/fs/cifs/link.c
48075index fc1e048..28b3441 100644
48076--- a/fs/cifs/link.c
48077+++ b/fs/cifs/link.c
48078@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
48079
48080 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
48081 {
48082- char *p = nd_get_link(nd);
48083+ const char *p = nd_get_link(nd);
48084 if (!IS_ERR(p))
48085 kfree(p);
48086 }
48087diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
48088index 95b82e8..12a538d 100644
48089--- a/fs/cifs/misc.c
48090+++ b/fs/cifs/misc.c
48091@@ -155,7 +155,7 @@ cifs_buf_get(void)
48092 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
48093 atomic_inc(&bufAllocCount);
48094 #ifdef CONFIG_CIFS_STATS2
48095- atomic_inc(&totBufAllocCount);
48096+ atomic_inc_unchecked(&totBufAllocCount);
48097 #endif /* CONFIG_CIFS_STATS2 */
48098 }
48099
48100@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
48101 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
48102 atomic_inc(&smBufAllocCount);
48103 #ifdef CONFIG_CIFS_STATS2
48104- atomic_inc(&totSmBufAllocCount);
48105+ atomic_inc_unchecked(&totSmBufAllocCount);
48106 #endif /* CONFIG_CIFS_STATS2 */
48107
48108 }
48109diff --git a/fs/coda/cache.c b/fs/coda/cache.c
48110index a5bf577..6d19845 100644
48111--- a/fs/coda/cache.c
48112+++ b/fs/coda/cache.c
48113@@ -24,14 +24,14 @@
48114 #include <linux/coda_fs_i.h>
48115 #include <linux/coda_cache.h>
48116
48117-static atomic_t permission_epoch = ATOMIC_INIT(0);
48118+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
48119
48120 /* replace or extend an acl cache hit */
48121 void coda_cache_enter(struct inode *inode, int mask)
48122 {
48123 struct coda_inode_info *cii = ITOC(inode);
48124
48125- cii->c_cached_epoch = atomic_read(&permission_epoch);
48126+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
48127 if (cii->c_uid != current_fsuid()) {
48128 cii->c_uid = current_fsuid();
48129 cii->c_cached_perm = mask;
48130@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
48131 void coda_cache_clear_inode(struct inode *inode)
48132 {
48133 struct coda_inode_info *cii = ITOC(inode);
48134- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
48135+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
48136 }
48137
48138 /* remove all acl caches */
48139 void coda_cache_clear_all(struct super_block *sb)
48140 {
48141- atomic_inc(&permission_epoch);
48142+ atomic_inc_unchecked(&permission_epoch);
48143 }
48144
48145
48146@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
48147
48148 hit = (mask & cii->c_cached_perm) == mask &&
48149 cii->c_uid == current_fsuid() &&
48150- cii->c_cached_epoch == atomic_read(&permission_epoch);
48151+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
48152
48153 return hit;
48154 }
48155diff --git a/fs/compat.c b/fs/compat.c
48156index d1e2411..c2ef8ed 100644
48157--- a/fs/compat.c
48158+++ b/fs/compat.c
48159@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
48160 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
48161 {
48162 compat_ino_t ino = stat->ino;
48163- typeof(ubuf->st_uid) uid = 0;
48164- typeof(ubuf->st_gid) gid = 0;
48165+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
48166+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
48167 int err;
48168
48169 SET_UID(uid, stat->uid);
48170@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
48171
48172 set_fs(KERNEL_DS);
48173 /* The __user pointer cast is valid because of the set_fs() */
48174- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
48175+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
48176 set_fs(oldfs);
48177 /* truncating is ok because it's a user address */
48178 if (!ret)
48179@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
48180
48181 struct compat_readdir_callback {
48182 struct compat_old_linux_dirent __user *dirent;
48183+ struct file * file;
48184 int result;
48185 };
48186
48187@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
48188 buf->result = -EOVERFLOW;
48189 return -EOVERFLOW;
48190 }
48191+
48192+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48193+ return 0;
48194+
48195 buf->result++;
48196 dirent = buf->dirent;
48197 if (!access_ok(VERIFY_WRITE, dirent,
48198@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
48199
48200 buf.result = 0;
48201 buf.dirent = dirent;
48202+ buf.file = file;
48203
48204 error = vfs_readdir(file, compat_fillonedir, &buf);
48205 if (buf.result)
48206@@ -899,6 +905,7 @@ struct compat_linux_dirent {
48207 struct compat_getdents_callback {
48208 struct compat_linux_dirent __user *current_dir;
48209 struct compat_linux_dirent __user *previous;
48210+ struct file * file;
48211 int count;
48212 int error;
48213 };
48214@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
48215 buf->error = -EOVERFLOW;
48216 return -EOVERFLOW;
48217 }
48218+
48219+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48220+ return 0;
48221+
48222 dirent = buf->previous;
48223 if (dirent) {
48224 if (__put_user(offset, &dirent->d_off))
48225@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
48226 buf.previous = NULL;
48227 buf.count = count;
48228 buf.error = 0;
48229+ buf.file = file;
48230
48231 error = vfs_readdir(file, compat_filldir, &buf);
48232 if (error >= 0)
48233@@ -987,6 +999,7 @@ out:
48234 struct compat_getdents_callback64 {
48235 struct linux_dirent64 __user *current_dir;
48236 struct linux_dirent64 __user *previous;
48237+ struct file * file;
48238 int count;
48239 int error;
48240 };
48241@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
48242 buf->error = -EINVAL; /* only used if we fail.. */
48243 if (reclen > buf->count)
48244 return -EINVAL;
48245+
48246+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48247+ return 0;
48248+
48249 dirent = buf->previous;
48250
48251 if (dirent) {
48252@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
48253 buf.previous = NULL;
48254 buf.count = count;
48255 buf.error = 0;
48256+ buf.file = file;
48257
48258 error = vfs_readdir(file, compat_filldir64, &buf);
48259 if (error >= 0)
48260 error = buf.error;
48261 lastdirent = buf.previous;
48262 if (lastdirent) {
48263- typeof(lastdirent->d_off) d_off = file->f_pos;
48264+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48265 if (__put_user_unaligned(d_off, &lastdirent->d_off))
48266 error = -EFAULT;
48267 else
48268@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
48269 * verify all the pointers
48270 */
48271 ret = -EINVAL;
48272- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
48273+ if (nr_segs > UIO_MAXIOV)
48274 goto out;
48275 if (!file->f_op)
48276 goto out;
48277@@ -1454,6 +1472,10 @@ out:
48278 return ret;
48279 }
48280
48281+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48282+extern atomic64_unchecked_t global_exec_counter;
48283+#endif
48284+
48285 /*
48286 * compat_do_execve() is mostly a copy of do_execve(), with the exception
48287 * that it processes 32 bit argv and envp pointers.
48288@@ -1463,11 +1485,35 @@ int compat_do_execve(char * filename,
48289 compat_uptr_t __user *envp,
48290 struct pt_regs * regs)
48291 {
48292+#ifdef CONFIG_GRKERNSEC
48293+ struct file *old_exec_file;
48294+ struct acl_subject_label *old_acl;
48295+ struct rlimit old_rlim[RLIM_NLIMITS];
48296+#endif
48297 struct linux_binprm *bprm;
48298 struct file *file;
48299 struct files_struct *displaced;
48300 bool clear_in_exec;
48301 int retval;
48302+ const struct cred *cred = current_cred();
48303+
48304+ /*
48305+ * We move the actual failure in case of RLIMIT_NPROC excess from
48306+ * set*uid() to execve() because too many poorly written programs
48307+ * don't check setuid() return code. Here we additionally recheck
48308+ * whether NPROC limit is still exceeded.
48309+ */
48310+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48311+
48312+ if ((current->flags & PF_NPROC_EXCEEDED) &&
48313+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48314+ retval = -EAGAIN;
48315+ goto out_ret;
48316+ }
48317+
48318+ /* We're below the limit (still or again), so we don't want to make
48319+ * further execve() calls fail. */
48320+ current->flags &= ~PF_NPROC_EXCEEDED;
48321
48322 retval = unshare_files(&displaced);
48323 if (retval)
48324@@ -1493,12 +1539,26 @@ int compat_do_execve(char * filename,
48325 if (IS_ERR(file))
48326 goto out_unmark;
48327
48328+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
48329+ retval = -EPERM;
48330+ goto out_file;
48331+ }
48332+
48333 sched_exec();
48334
48335 bprm->file = file;
48336 bprm->filename = filename;
48337 bprm->interp = filename;
48338
48339+ if (gr_process_user_ban()) {
48340+ retval = -EPERM;
48341+ goto out_file;
48342+ }
48343+
48344+ retval = -EACCES;
48345+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
48346+ goto out_file;
48347+
48348 retval = bprm_mm_init(bprm);
48349 if (retval)
48350 goto out_file;
48351@@ -1528,11 +1588,45 @@ int compat_do_execve(char * filename,
48352 if (retval < 0)
48353 goto out;
48354
48355+ if (!gr_tpe_allow(file)) {
48356+ retval = -EACCES;
48357+ goto out;
48358+ }
48359+
48360+ if (gr_check_crash_exec(file)) {
48361+ retval = -EACCES;
48362+ goto out;
48363+ }
48364+
48365+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48366+
48367+ gr_handle_exec_args_compat(bprm, argv);
48368+
48369+#ifdef CONFIG_GRKERNSEC
48370+ old_acl = current->acl;
48371+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48372+ old_exec_file = current->exec_file;
48373+ get_file(file);
48374+ current->exec_file = file;
48375+#endif
48376+
48377+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48378+ bprm->unsafe);
48379+ if (retval < 0)
48380+ goto out_fail;
48381+
48382 retval = search_binary_handler(bprm, regs);
48383 if (retval < 0)
48384- goto out;
48385+ goto out_fail;
48386+#ifdef CONFIG_GRKERNSEC
48387+ if (old_exec_file)
48388+ fput(old_exec_file);
48389+#endif
48390
48391 /* execve succeeded */
48392+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48393+ current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
48394+#endif
48395 current->fs->in_exec = 0;
48396 current->in_execve = 0;
48397 acct_update_integrals(current);
48398@@ -1541,6 +1635,14 @@ int compat_do_execve(char * filename,
48399 put_files_struct(displaced);
48400 return retval;
48401
48402+out_fail:
48403+#ifdef CONFIG_GRKERNSEC
48404+ current->acl = old_acl;
48405+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48406+ fput(current->exec_file);
48407+ current->exec_file = old_exec_file;
48408+#endif
48409+
48410 out:
48411 if (bprm->mm) {
48412 acct_arg_size(bprm, 0);
48413@@ -1711,6 +1813,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
48414 struct fdtable *fdt;
48415 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
48416
48417+ pax_track_stack();
48418+
48419 if (n < 0)
48420 goto out_nofds;
48421
48422@@ -2151,7 +2255,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
48423 oldfs = get_fs();
48424 set_fs(KERNEL_DS);
48425 /* The __user pointer casts are valid because of the set_fs() */
48426- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
48427+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
48428 set_fs(oldfs);
48429
48430 if (err)
48431diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
48432index 0adced2..bbb1b0d 100644
48433--- a/fs/compat_binfmt_elf.c
48434+++ b/fs/compat_binfmt_elf.c
48435@@ -29,10 +29,12 @@
48436 #undef elfhdr
48437 #undef elf_phdr
48438 #undef elf_note
48439+#undef elf_dyn
48440 #undef elf_addr_t
48441 #define elfhdr elf32_hdr
48442 #define elf_phdr elf32_phdr
48443 #define elf_note elf32_note
48444+#define elf_dyn Elf32_Dyn
48445 #define elf_addr_t Elf32_Addr
48446
48447 /*
48448diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
48449index d84e705..d8c364c 100644
48450--- a/fs/compat_ioctl.c
48451+++ b/fs/compat_ioctl.c
48452@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
48453 up = (struct compat_video_spu_palette __user *) arg;
48454 err = get_user(palp, &up->palette);
48455 err |= get_user(length, &up->length);
48456+ if (err)
48457+ return -EFAULT;
48458
48459 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
48460 err = put_user(compat_ptr(palp), &up_native->palette);
48461@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
48462 return -EFAULT;
48463 if (__get_user(udata, &ss32->iomem_base))
48464 return -EFAULT;
48465- ss.iomem_base = compat_ptr(udata);
48466+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
48467 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
48468 __get_user(ss.port_high, &ss32->port_high))
48469 return -EFAULT;
48470@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
48471 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
48472 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
48473 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
48474- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48475+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48476 return -EFAULT;
48477
48478 return ioctl_preallocate(file, p);
48479diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
48480index 8e48b52..f01ed91 100644
48481--- a/fs/configfs/dir.c
48482+++ b/fs/configfs/dir.c
48483@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48484 }
48485 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
48486 struct configfs_dirent *next;
48487- const char * name;
48488+ const unsigned char * name;
48489+ char d_name[sizeof(next->s_dentry->d_iname)];
48490 int len;
48491
48492 next = list_entry(p, struct configfs_dirent,
48493@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48494 continue;
48495
48496 name = configfs_get_name(next);
48497- len = strlen(name);
48498+ if (next->s_dentry && name == next->s_dentry->d_iname) {
48499+ len = next->s_dentry->d_name.len;
48500+ memcpy(d_name, name, len);
48501+ name = d_name;
48502+ } else
48503+ len = strlen(name);
48504 if (next->s_dentry)
48505 ino = next->s_dentry->d_inode->i_ino;
48506 else
48507diff --git a/fs/dcache.c b/fs/dcache.c
48508index 44c0aea..2529092 100644
48509--- a/fs/dcache.c
48510+++ b/fs/dcache.c
48511@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
48512
48513 static struct kmem_cache *dentry_cache __read_mostly;
48514
48515-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
48516-
48517 /*
48518 * This is the single most critical data structure when it comes
48519 * to the dcache: the hashtable for lookups. Somebody should try
48520@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
48521 mempages -= reserve;
48522
48523 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
48524- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
48525+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
48526
48527 dcache_init();
48528 inode_init();
48529diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
48530index 39c6ee8..dcee0f1 100644
48531--- a/fs/debugfs/inode.c
48532+++ b/fs/debugfs/inode.c
48533@@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
48534 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
48535 {
48536 return debugfs_create_file(name,
48537+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48538+ S_IFDIR | S_IRWXU,
48539+#else
48540 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
48541+#endif
48542 parent, NULL, NULL);
48543 }
48544 EXPORT_SYMBOL_GPL(debugfs_create_dir);
48545diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
48546index c010ecf..a8d8c59 100644
48547--- a/fs/dlm/lockspace.c
48548+++ b/fs/dlm/lockspace.c
48549@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
48550 kfree(ls);
48551 }
48552
48553-static struct sysfs_ops dlm_attr_ops = {
48554+static const struct sysfs_ops dlm_attr_ops = {
48555 .show = dlm_attr_show,
48556 .store = dlm_attr_store,
48557 };
48558diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
48559index 7a5f1ac..205b034 100644
48560--- a/fs/ecryptfs/crypto.c
48561+++ b/fs/ecryptfs/crypto.c
48562@@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48563 rc);
48564 goto out;
48565 }
48566- if (unlikely(ecryptfs_verbosity > 0)) {
48567- ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
48568- "with iv:\n");
48569- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48570- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48571- "encryption:\n");
48572- ecryptfs_dump_hex((char *)
48573- (page_address(page)
48574- + (extent_offset * crypt_stat->extent_size)),
48575- 8);
48576- }
48577 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
48578 page, (extent_offset
48579 * crypt_stat->extent_size),
48580@@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48581 goto out;
48582 }
48583 rc = 0;
48584- if (unlikely(ecryptfs_verbosity > 0)) {
48585- ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
48586- "rc = [%d]\n", (extent_base + extent_offset),
48587- rc);
48588- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48589- "encryption:\n");
48590- ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
48591- }
48592 out:
48593 return rc;
48594 }
48595@@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48596 rc);
48597 goto out;
48598 }
48599- if (unlikely(ecryptfs_verbosity > 0)) {
48600- ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
48601- "with iv:\n");
48602- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48603- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48604- "decryption:\n");
48605- ecryptfs_dump_hex((char *)
48606- (page_address(enc_extent_page)
48607- + (extent_offset * crypt_stat->extent_size)),
48608- 8);
48609- }
48610 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
48611 (extent_offset
48612 * crypt_stat->extent_size),
48613@@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48614 goto out;
48615 }
48616 rc = 0;
48617- if (unlikely(ecryptfs_verbosity > 0)) {
48618- ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
48619- "rc = [%d]\n", (extent_base + extent_offset),
48620- rc);
48621- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48622- "decryption:\n");
48623- ecryptfs_dump_hex((char *)(page_address(page)
48624- + (extent_offset
48625- * crypt_stat->extent_size)), 8);
48626- }
48627 out:
48628 return rc;
48629 }
48630diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
48631index 88ba4d4..073f003 100644
48632--- a/fs/ecryptfs/inode.c
48633+++ b/fs/ecryptfs/inode.c
48634@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
48635 old_fs = get_fs();
48636 set_fs(get_ds());
48637 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
48638- (char __user *)lower_buf,
48639+ (char __force_user *)lower_buf,
48640 lower_bufsiz);
48641 set_fs(old_fs);
48642 if (rc < 0)
48643@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48644 }
48645 old_fs = get_fs();
48646 set_fs(get_ds());
48647- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
48648+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
48649 set_fs(old_fs);
48650 if (rc < 0)
48651 goto out_free;
48652diff --git a/fs/exec.c b/fs/exec.c
48653index 86fafc6..6272c0e 100644
48654--- a/fs/exec.c
48655+++ b/fs/exec.c
48656@@ -56,12 +56,28 @@
48657 #include <linux/fsnotify.h>
48658 #include <linux/fs_struct.h>
48659 #include <linux/pipe_fs_i.h>
48660+#include <linux/random.h>
48661+#include <linux/seq_file.h>
48662+
48663+#ifdef CONFIG_PAX_REFCOUNT
48664+#include <linux/kallsyms.h>
48665+#include <linux/kdebug.h>
48666+#endif
48667
48668 #include <asm/uaccess.h>
48669 #include <asm/mmu_context.h>
48670 #include <asm/tlb.h>
48671 #include "internal.h"
48672
48673+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
48674+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
48675+#endif
48676+
48677+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
48678+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
48679+EXPORT_SYMBOL(pax_set_initial_flags_func);
48680+#endif
48681+
48682 int core_uses_pid;
48683 char core_pattern[CORENAME_MAX_SIZE] = "core";
48684 unsigned int core_pipe_limit;
48685@@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48686 int write)
48687 {
48688 struct page *page;
48689- int ret;
48690
48691-#ifdef CONFIG_STACK_GROWSUP
48692- if (write) {
48693- ret = expand_stack_downwards(bprm->vma, pos);
48694- if (ret < 0)
48695- return NULL;
48696- }
48697-#endif
48698- ret = get_user_pages(current, bprm->mm, pos,
48699- 1, write, 1, &page, NULL);
48700- if (ret <= 0)
48701+ if (0 > expand_stack_downwards(bprm->vma, pos))
48702+ return NULL;
48703+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
48704 return NULL;
48705
48706 if (write) {
48707@@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48708 if (size <= ARG_MAX)
48709 return page;
48710
48711+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48712+ // only allow 1MB for argv+env on suid/sgid binaries
48713+ // to prevent easy ASLR exhaustion
48714+ if (((bprm->cred->euid != current_euid()) ||
48715+ (bprm->cred->egid != current_egid())) &&
48716+ (size > (1024 * 1024))) {
48717+ put_page(page);
48718+ return NULL;
48719+ }
48720+#endif
48721+
48722 /*
48723 * Limit to 1/4-th the stack size for the argv+env strings.
48724 * This ensures that:
48725@@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48726 vma->vm_end = STACK_TOP_MAX;
48727 vma->vm_start = vma->vm_end - PAGE_SIZE;
48728 vma->vm_flags = VM_STACK_FLAGS;
48729+
48730+#ifdef CONFIG_PAX_SEGMEXEC
48731+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
48732+#endif
48733+
48734 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
48735
48736 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
48737@@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48738 mm->stack_vm = mm->total_vm = 1;
48739 up_write(&mm->mmap_sem);
48740 bprm->p = vma->vm_end - sizeof(void *);
48741+
48742+#ifdef CONFIG_PAX_RANDUSTACK
48743+ if (randomize_va_space)
48744+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
48745+#endif
48746+
48747 return 0;
48748 err:
48749 up_write(&mm->mmap_sem);
48750@@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
48751 int r;
48752 mm_segment_t oldfs = get_fs();
48753 set_fs(KERNEL_DS);
48754- r = copy_strings(argc, (char __user * __user *)argv, bprm);
48755+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
48756 set_fs(oldfs);
48757 return r;
48758 }
48759@@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48760 unsigned long new_end = old_end - shift;
48761 struct mmu_gather *tlb;
48762
48763- BUG_ON(new_start > new_end);
48764+ if (new_start >= new_end || new_start < mmap_min_addr)
48765+ return -ENOMEM;
48766
48767 /*
48768 * ensure there are no vmas between where we want to go
48769@@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48770 if (vma != find_vma(mm, new_start))
48771 return -EFAULT;
48772
48773+#ifdef CONFIG_PAX_SEGMEXEC
48774+ BUG_ON(pax_find_mirror_vma(vma));
48775+#endif
48776+
48777 /*
48778 * cover the whole range: [new_start, old_end)
48779 */
48780@@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
48781 stack_top = arch_align_stack(stack_top);
48782 stack_top = PAGE_ALIGN(stack_top);
48783
48784- if (unlikely(stack_top < mmap_min_addr) ||
48785- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
48786- return -ENOMEM;
48787-
48788 stack_shift = vma->vm_end - stack_top;
48789
48790 bprm->p -= stack_shift;
48791@@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
48792 bprm->exec -= stack_shift;
48793
48794 down_write(&mm->mmap_sem);
48795+
48796+ /* Move stack pages down in memory. */
48797+ if (stack_shift) {
48798+ ret = shift_arg_pages(vma, stack_shift);
48799+ if (ret)
48800+ goto out_unlock;
48801+ }
48802+
48803 vm_flags = VM_STACK_FLAGS;
48804
48805 /*
48806@@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
48807 vm_flags &= ~VM_EXEC;
48808 vm_flags |= mm->def_flags;
48809
48810+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48811+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48812+ vm_flags &= ~VM_EXEC;
48813+
48814+#ifdef CONFIG_PAX_MPROTECT
48815+ if (mm->pax_flags & MF_PAX_MPROTECT)
48816+ vm_flags &= ~VM_MAYEXEC;
48817+#endif
48818+
48819+ }
48820+#endif
48821+
48822 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
48823 vm_flags);
48824 if (ret)
48825 goto out_unlock;
48826 BUG_ON(prev != vma);
48827
48828- /* Move stack pages down in memory. */
48829- if (stack_shift) {
48830- ret = shift_arg_pages(vma, stack_shift);
48831- if (ret)
48832- goto out_unlock;
48833- }
48834-
48835 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
48836 stack_size = vma->vm_end - vma->vm_start;
48837 /*
48838@@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset,
48839 old_fs = get_fs();
48840 set_fs(get_ds());
48841 /* The cast to a user pointer is valid due to the set_fs() */
48842- result = vfs_read(file, (void __user *)addr, count, &pos);
48843+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
48844 set_fs(old_fs);
48845 return result;
48846 }
48847@@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
48848 perf_event_comm(tsk);
48849 }
48850
48851+static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
48852+{
48853+ int i, ch;
48854+
48855+ /* Copies the binary name from after last slash */
48856+ for (i = 0; (ch = *(fn++)) != '\0';) {
48857+ if (ch == '/')
48858+ i = 0; /* overwrite what we wrote */
48859+ else
48860+ if (i < len - 1)
48861+ tcomm[i++] = ch;
48862+ }
48863+ tcomm[i] = '\0';
48864+}
48865+
48866 int flush_old_exec(struct linux_binprm * bprm)
48867 {
48868 int retval;
48869@@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm)
48870
48871 set_mm_exe_file(bprm->mm, bprm->file);
48872
48873+ filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
48874 /*
48875 * Release all of the old mmap stuff
48876 */
48877@@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec);
48878
48879 void setup_new_exec(struct linux_binprm * bprm)
48880 {
48881- int i, ch;
48882- char * name;
48883- char tcomm[sizeof(current->comm)];
48884-
48885 arch_pick_mmap_layout(current->mm);
48886
48887 /* This is the point of no return */
48888@@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm)
48889 else
48890 set_dumpable(current->mm, suid_dumpable);
48891
48892- name = bprm->filename;
48893-
48894- /* Copies the binary name from after last slash */
48895- for (i=0; (ch = *(name++)) != '\0';) {
48896- if (ch == '/')
48897- i = 0; /* overwrite what we wrote */
48898- else
48899- if (i < (sizeof(tcomm) - 1))
48900- tcomm[i++] = ch;
48901- }
48902- tcomm[i] = '\0';
48903- set_task_comm(current, tcomm);
48904+ set_task_comm(current, bprm->tcomm);
48905
48906 /* Set the new mm task size. We have to do that late because it may
48907 * depend on TIF_32BIT which is only updated in flush_thread() on
48908@@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
48909 }
48910 rcu_read_unlock();
48911
48912- if (p->fs->users > n_fs) {
48913+ if (atomic_read(&p->fs->users) > n_fs) {
48914 bprm->unsafe |= LSM_UNSAFE_SHARE;
48915 } else {
48916 res = -EAGAIN;
48917@@ -1339,6 +1384,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
48918
48919 EXPORT_SYMBOL(search_binary_handler);
48920
48921+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48922+atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
48923+#endif
48924+
48925 /*
48926 * sys_execve() executes a new program.
48927 */
48928@@ -1347,11 +1396,35 @@ int do_execve(char * filename,
48929 char __user *__user *envp,
48930 struct pt_regs * regs)
48931 {
48932+#ifdef CONFIG_GRKERNSEC
48933+ struct file *old_exec_file;
48934+ struct acl_subject_label *old_acl;
48935+ struct rlimit old_rlim[RLIM_NLIMITS];
48936+#endif
48937 struct linux_binprm *bprm;
48938 struct file *file;
48939 struct files_struct *displaced;
48940 bool clear_in_exec;
48941 int retval;
48942+ const struct cred *cred = current_cred();
48943+
48944+ /*
48945+ * We move the actual failure in case of RLIMIT_NPROC excess from
48946+ * set*uid() to execve() because too many poorly written programs
48947+ * don't check setuid() return code. Here we additionally recheck
48948+ * whether NPROC limit is still exceeded.
48949+ */
48950+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48951+
48952+ if ((current->flags & PF_NPROC_EXCEEDED) &&
48953+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48954+ retval = -EAGAIN;
48955+ goto out_ret;
48956+ }
48957+
48958+ /* We're below the limit (still or again), so we don't want to make
48959+ * further execve() calls fail. */
48960+ current->flags &= ~PF_NPROC_EXCEEDED;
48961
48962 retval = unshare_files(&displaced);
48963 if (retval)
48964@@ -1377,12 +1450,27 @@ int do_execve(char * filename,
48965 if (IS_ERR(file))
48966 goto out_unmark;
48967
48968+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
48969+ retval = -EPERM;
48970+ goto out_file;
48971+ }
48972+
48973 sched_exec();
48974
48975 bprm->file = file;
48976 bprm->filename = filename;
48977 bprm->interp = filename;
48978
48979+ if (gr_process_user_ban()) {
48980+ retval = -EPERM;
48981+ goto out_file;
48982+ }
48983+
48984+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
48985+ retval = -EACCES;
48986+ goto out_file;
48987+ }
48988+
48989 retval = bprm_mm_init(bprm);
48990 if (retval)
48991 goto out_file;
48992@@ -1412,12 +1500,47 @@ int do_execve(char * filename,
48993 if (retval < 0)
48994 goto out;
48995
48996+ if (!gr_tpe_allow(file)) {
48997+ retval = -EACCES;
48998+ goto out;
48999+ }
49000+
49001+ if (gr_check_crash_exec(file)) {
49002+ retval = -EACCES;
49003+ goto out;
49004+ }
49005+
49006+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
49007+
49008+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
49009+
49010+#ifdef CONFIG_GRKERNSEC
49011+ old_acl = current->acl;
49012+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
49013+ old_exec_file = current->exec_file;
49014+ get_file(file);
49015+ current->exec_file = file;
49016+#endif
49017+
49018+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
49019+ bprm->unsafe);
49020+ if (retval < 0)
49021+ goto out_fail;
49022+
49023 current->flags &= ~PF_KTHREAD;
49024 retval = search_binary_handler(bprm,regs);
49025 if (retval < 0)
49026- goto out;
49027+ goto out_fail;
49028+#ifdef CONFIG_GRKERNSEC
49029+ if (old_exec_file)
49030+ fput(old_exec_file);
49031+#endif
49032
49033 /* execve succeeded */
49034+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49035+ current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
49036+#endif
49037+
49038 current->fs->in_exec = 0;
49039 current->in_execve = 0;
49040 acct_update_integrals(current);
49041@@ -1426,6 +1549,14 @@ int do_execve(char * filename,
49042 put_files_struct(displaced);
49043 return retval;
49044
49045+out_fail:
49046+#ifdef CONFIG_GRKERNSEC
49047+ current->acl = old_acl;
49048+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
49049+ fput(current->exec_file);
49050+ current->exec_file = old_exec_file;
49051+#endif
49052+
49053 out:
49054 if (bprm->mm) {
49055 acct_arg_size(bprm, 0);
49056@@ -1591,6 +1722,220 @@ out:
49057 return ispipe;
49058 }
49059
49060+int pax_check_flags(unsigned long *flags)
49061+{
49062+ int retval = 0;
49063+
49064+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
49065+ if (*flags & MF_PAX_SEGMEXEC)
49066+ {
49067+ *flags &= ~MF_PAX_SEGMEXEC;
49068+ retval = -EINVAL;
49069+ }
49070+#endif
49071+
49072+ if ((*flags & MF_PAX_PAGEEXEC)
49073+
49074+#ifdef CONFIG_PAX_PAGEEXEC
49075+ && (*flags & MF_PAX_SEGMEXEC)
49076+#endif
49077+
49078+ )
49079+ {
49080+ *flags &= ~MF_PAX_PAGEEXEC;
49081+ retval = -EINVAL;
49082+ }
49083+
49084+ if ((*flags & MF_PAX_MPROTECT)
49085+
49086+#ifdef CONFIG_PAX_MPROTECT
49087+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49088+#endif
49089+
49090+ )
49091+ {
49092+ *flags &= ~MF_PAX_MPROTECT;
49093+ retval = -EINVAL;
49094+ }
49095+
49096+ if ((*flags & MF_PAX_EMUTRAMP)
49097+
49098+#ifdef CONFIG_PAX_EMUTRAMP
49099+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49100+#endif
49101+
49102+ )
49103+ {
49104+ *flags &= ~MF_PAX_EMUTRAMP;
49105+ retval = -EINVAL;
49106+ }
49107+
49108+ return retval;
49109+}
49110+
49111+EXPORT_SYMBOL(pax_check_flags);
49112+
49113+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49114+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
49115+{
49116+ struct task_struct *tsk = current;
49117+ struct mm_struct *mm = current->mm;
49118+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
49119+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
49120+ char *path_exec = NULL;
49121+ char *path_fault = NULL;
49122+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
49123+
49124+ if (buffer_exec && buffer_fault) {
49125+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
49126+
49127+ down_read(&mm->mmap_sem);
49128+ vma = mm->mmap;
49129+ while (vma && (!vma_exec || !vma_fault)) {
49130+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
49131+ vma_exec = vma;
49132+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
49133+ vma_fault = vma;
49134+ vma = vma->vm_next;
49135+ }
49136+ if (vma_exec) {
49137+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
49138+ if (IS_ERR(path_exec))
49139+ path_exec = "<path too long>";
49140+ else {
49141+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
49142+ if (path_exec) {
49143+ *path_exec = 0;
49144+ path_exec = buffer_exec;
49145+ } else
49146+ path_exec = "<path too long>";
49147+ }
49148+ }
49149+ if (vma_fault) {
49150+ start = vma_fault->vm_start;
49151+ end = vma_fault->vm_end;
49152+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
49153+ if (vma_fault->vm_file) {
49154+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
49155+ if (IS_ERR(path_fault))
49156+ path_fault = "<path too long>";
49157+ else {
49158+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
49159+ if (path_fault) {
49160+ *path_fault = 0;
49161+ path_fault = buffer_fault;
49162+ } else
49163+ path_fault = "<path too long>";
49164+ }
49165+ } else
49166+ path_fault = "<anonymous mapping>";
49167+ }
49168+ up_read(&mm->mmap_sem);
49169+ }
49170+ if (tsk->signal->curr_ip)
49171+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
49172+ else
49173+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
49174+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
49175+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
49176+ task_uid(tsk), task_euid(tsk), pc, sp);
49177+ free_page((unsigned long)buffer_exec);
49178+ free_page((unsigned long)buffer_fault);
49179+ pax_report_insns(regs, pc, sp);
49180+ do_coredump(SIGKILL, SIGKILL, regs);
49181+}
49182+#endif
49183+
49184+#ifdef CONFIG_PAX_REFCOUNT
49185+void pax_report_refcount_overflow(struct pt_regs *regs)
49186+{
49187+ if (current->signal->curr_ip)
49188+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49189+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
49190+ else
49191+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49192+ current->comm, task_pid_nr(current), current_uid(), current_euid());
49193+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
49194+ show_regs(regs);
49195+ force_sig_specific(SIGKILL, current);
49196+}
49197+#endif
49198+
49199+#ifdef CONFIG_PAX_USERCOPY
49200+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
49201+int object_is_on_stack(const void *obj, unsigned long len)
49202+{
49203+ const void * const stack = task_stack_page(current);
49204+ const void * const stackend = stack + THREAD_SIZE;
49205+
49206+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49207+ const void *frame = NULL;
49208+ const void *oldframe;
49209+#endif
49210+
49211+ if (obj + len < obj)
49212+ return -1;
49213+
49214+ if (obj + len <= stack || stackend <= obj)
49215+ return 0;
49216+
49217+ if (obj < stack || stackend < obj + len)
49218+ return -1;
49219+
49220+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49221+ oldframe = __builtin_frame_address(1);
49222+ if (oldframe)
49223+ frame = __builtin_frame_address(2);
49224+ /*
49225+ low ----------------------------------------------> high
49226+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
49227+ ^----------------^
49228+ allow copies only within here
49229+ */
49230+ while (stack <= frame && frame < stackend) {
49231+ /* if obj + len extends past the last frame, this
49232+ check won't pass and the next frame will be 0,
49233+ causing us to bail out and correctly report
49234+ the copy as invalid
49235+ */
49236+ if (obj + len <= frame)
49237+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
49238+ oldframe = frame;
49239+ frame = *(const void * const *)frame;
49240+ }
49241+ return -1;
49242+#else
49243+ return 1;
49244+#endif
49245+}
49246+
49247+
49248+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
49249+{
49250+ if (current->signal->curr_ip)
49251+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49252+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49253+ else
49254+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49255+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49256+
49257+ dump_stack();
49258+ gr_handle_kernel_exploit();
49259+ do_group_exit(SIGKILL);
49260+}
49261+#endif
49262+
49263+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
49264+void pax_track_stack(void)
49265+{
49266+ unsigned long sp = (unsigned long)&sp;
49267+ if (sp < current_thread_info()->lowest_stack &&
49268+ sp > (unsigned long)task_stack_page(current))
49269+ current_thread_info()->lowest_stack = sp;
49270+}
49271+EXPORT_SYMBOL(pax_track_stack);
49272+#endif
49273+
49274 static int zap_process(struct task_struct *start)
49275 {
49276 struct task_struct *t;
49277@@ -1793,17 +2138,17 @@ static void wait_for_dump_helpers(struct file *file)
49278 pipe = file->f_path.dentry->d_inode->i_pipe;
49279
49280 pipe_lock(pipe);
49281- pipe->readers++;
49282- pipe->writers--;
49283+ atomic_inc(&pipe->readers);
49284+ atomic_dec(&pipe->writers);
49285
49286- while ((pipe->readers > 1) && (!signal_pending(current))) {
49287+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
49288 wake_up_interruptible_sync(&pipe->wait);
49289 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
49290 pipe_wait(pipe);
49291 }
49292
49293- pipe->readers--;
49294- pipe->writers++;
49295+ atomic_dec(&pipe->readers);
49296+ atomic_inc(&pipe->writers);
49297 pipe_unlock(pipe);
49298
49299 }
49300@@ -1826,10 +2171,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49301 char **helper_argv = NULL;
49302 int helper_argc = 0;
49303 int dump_count = 0;
49304- static atomic_t core_dump_count = ATOMIC_INIT(0);
49305+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
49306
49307 audit_core_dumps(signr);
49308
49309+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
49310+ gr_handle_brute_attach(current, mm->flags);
49311+
49312 binfmt = mm->binfmt;
49313 if (!binfmt || !binfmt->core_dump)
49314 goto fail;
49315@@ -1874,6 +2222,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49316 */
49317 clear_thread_flag(TIF_SIGPENDING);
49318
49319+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
49320+
49321 /*
49322 * lock_kernel() because format_corename() is controlled by sysctl, which
49323 * uses lock_kernel()
49324@@ -1908,7 +2258,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49325 goto fail_unlock;
49326 }
49327
49328- dump_count = atomic_inc_return(&core_dump_count);
49329+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
49330 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49331 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49332 task_tgid_vnr(current), current->comm);
49333@@ -1972,7 +2322,7 @@ close_fail:
49334 filp_close(file, NULL);
49335 fail_dropcount:
49336 if (dump_count)
49337- atomic_dec(&core_dump_count);
49338+ atomic_dec_unchecked(&core_dump_count);
49339 fail_unlock:
49340 if (helper_argv)
49341 argv_free(helper_argv);
49342diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
49343index 7f8d2e5..a1abdbb 100644
49344--- a/fs/ext2/balloc.c
49345+++ b/fs/ext2/balloc.c
49346@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
49347
49348 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49349 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49350- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49351+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49352 sbi->s_resuid != current_fsuid() &&
49353 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49354 return 0;
49355diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
49356index 27967f9..9f2a5fb 100644
49357--- a/fs/ext3/balloc.c
49358+++ b/fs/ext3/balloc.c
49359@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
49360
49361 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49362 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49363- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49364+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49365 sbi->s_resuid != current_fsuid() &&
49366 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49367 return 0;
49368diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
49369index e85b63c..80398e6 100644
49370--- a/fs/ext4/balloc.c
49371+++ b/fs/ext4/balloc.c
49372@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
49373 /* Hm, nope. Are (enough) root reserved blocks available? */
49374 if (sbi->s_resuid == current_fsuid() ||
49375 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
49376- capable(CAP_SYS_RESOURCE)) {
49377+ capable_nolog(CAP_SYS_RESOURCE)) {
49378 if (free_blocks >= (nblocks + dirty_blocks))
49379 return 1;
49380 }
49381diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
49382index 67c46ed..1f237e5 100644
49383--- a/fs/ext4/ext4.h
49384+++ b/fs/ext4/ext4.h
49385@@ -1077,19 +1077,19 @@ struct ext4_sb_info {
49386
49387 /* stats for buddy allocator */
49388 spinlock_t s_mb_pa_lock;
49389- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
49390- atomic_t s_bal_success; /* we found long enough chunks */
49391- atomic_t s_bal_allocated; /* in blocks */
49392- atomic_t s_bal_ex_scanned; /* total extents scanned */
49393- atomic_t s_bal_goals; /* goal hits */
49394- atomic_t s_bal_breaks; /* too long searches */
49395- atomic_t s_bal_2orders; /* 2^order hits */
49396+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
49397+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
49398+ atomic_unchecked_t s_bal_allocated; /* in blocks */
49399+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
49400+ atomic_unchecked_t s_bal_goals; /* goal hits */
49401+ atomic_unchecked_t s_bal_breaks; /* too long searches */
49402+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
49403 spinlock_t s_bal_lock;
49404 unsigned long s_mb_buddies_generated;
49405 unsigned long long s_mb_generation_time;
49406- atomic_t s_mb_lost_chunks;
49407- atomic_t s_mb_preallocated;
49408- atomic_t s_mb_discarded;
49409+ atomic_unchecked_t s_mb_lost_chunks;
49410+ atomic_unchecked_t s_mb_preallocated;
49411+ atomic_unchecked_t s_mb_discarded;
49412 atomic_t s_lock_busy;
49413
49414 /* locality groups */
49415diff --git a/fs/ext4/file.c b/fs/ext4/file.c
49416index 2a60541..7439d61 100644
49417--- a/fs/ext4/file.c
49418+++ b/fs/ext4/file.c
49419@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
49420 cp = d_path(&path, buf, sizeof(buf));
49421 path_put(&path);
49422 if (!IS_ERR(cp)) {
49423- memcpy(sbi->s_es->s_last_mounted, cp,
49424- sizeof(sbi->s_es->s_last_mounted));
49425+ strlcpy(sbi->s_es->s_last_mounted, cp,
49426+ sizeof(sbi->s_es->s_last_mounted));
49427 sb->s_dirt = 1;
49428 }
49429 }
49430diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
49431index 42bac1b..0aab9d8 100644
49432--- a/fs/ext4/mballoc.c
49433+++ b/fs/ext4/mballoc.c
49434@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
49435 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
49436
49437 if (EXT4_SB(sb)->s_mb_stats)
49438- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
49439+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
49440
49441 break;
49442 }
49443@@ -2131,7 +2131,7 @@ repeat:
49444 ac->ac_status = AC_STATUS_CONTINUE;
49445 ac->ac_flags |= EXT4_MB_HINT_FIRST;
49446 cr = 3;
49447- atomic_inc(&sbi->s_mb_lost_chunks);
49448+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
49449 goto repeat;
49450 }
49451 }
49452@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
49453 ext4_grpblk_t counters[16];
49454 } sg;
49455
49456+ pax_track_stack();
49457+
49458 group--;
49459 if (group == 0)
49460 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
49461@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
49462 if (sbi->s_mb_stats) {
49463 printk(KERN_INFO
49464 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
49465- atomic_read(&sbi->s_bal_allocated),
49466- atomic_read(&sbi->s_bal_reqs),
49467- atomic_read(&sbi->s_bal_success));
49468+ atomic_read_unchecked(&sbi->s_bal_allocated),
49469+ atomic_read_unchecked(&sbi->s_bal_reqs),
49470+ atomic_read_unchecked(&sbi->s_bal_success));
49471 printk(KERN_INFO
49472 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
49473 "%u 2^N hits, %u breaks, %u lost\n",
49474- atomic_read(&sbi->s_bal_ex_scanned),
49475- atomic_read(&sbi->s_bal_goals),
49476- atomic_read(&sbi->s_bal_2orders),
49477- atomic_read(&sbi->s_bal_breaks),
49478- atomic_read(&sbi->s_mb_lost_chunks));
49479+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
49480+ atomic_read_unchecked(&sbi->s_bal_goals),
49481+ atomic_read_unchecked(&sbi->s_bal_2orders),
49482+ atomic_read_unchecked(&sbi->s_bal_breaks),
49483+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
49484 printk(KERN_INFO
49485 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
49486 sbi->s_mb_buddies_generated++,
49487 sbi->s_mb_generation_time);
49488 printk(KERN_INFO
49489 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
49490- atomic_read(&sbi->s_mb_preallocated),
49491- atomic_read(&sbi->s_mb_discarded));
49492+ atomic_read_unchecked(&sbi->s_mb_preallocated),
49493+ atomic_read_unchecked(&sbi->s_mb_discarded));
49494 }
49495
49496 free_percpu(sbi->s_locality_groups);
49497@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
49498 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
49499
49500 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
49501- atomic_inc(&sbi->s_bal_reqs);
49502- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49503+ atomic_inc_unchecked(&sbi->s_bal_reqs);
49504+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49505 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
49506- atomic_inc(&sbi->s_bal_success);
49507- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
49508+ atomic_inc_unchecked(&sbi->s_bal_success);
49509+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
49510 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
49511 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
49512- atomic_inc(&sbi->s_bal_goals);
49513+ atomic_inc_unchecked(&sbi->s_bal_goals);
49514 if (ac->ac_found > sbi->s_mb_max_to_scan)
49515- atomic_inc(&sbi->s_bal_breaks);
49516+ atomic_inc_unchecked(&sbi->s_bal_breaks);
49517 }
49518
49519 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
49520@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
49521 trace_ext4_mb_new_inode_pa(ac, pa);
49522
49523 ext4_mb_use_inode_pa(ac, pa);
49524- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49525+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49526
49527 ei = EXT4_I(ac->ac_inode);
49528 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49529@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
49530 trace_ext4_mb_new_group_pa(ac, pa);
49531
49532 ext4_mb_use_group_pa(ac, pa);
49533- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49534+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49535
49536 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49537 lg = ac->ac_lg;
49538@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
49539 * from the bitmap and continue.
49540 */
49541 }
49542- atomic_add(free, &sbi->s_mb_discarded);
49543+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
49544
49545 return err;
49546 }
49547@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
49548 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
49549 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
49550 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
49551- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49552+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49553
49554 if (ac) {
49555 ac->ac_sb = sb;
49556diff --git a/fs/ext4/super.c b/fs/ext4/super.c
49557index f1e7077..edd86b2 100644
49558--- a/fs/ext4/super.c
49559+++ b/fs/ext4/super.c
49560@@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
49561 }
49562
49563
49564-static struct sysfs_ops ext4_attr_ops = {
49565+static const struct sysfs_ops ext4_attr_ops = {
49566 .show = ext4_attr_show,
49567 .store = ext4_attr_store,
49568 };
49569diff --git a/fs/fcntl.c b/fs/fcntl.c
49570index 97e01dc..e9aab2d 100644
49571--- a/fs/fcntl.c
49572+++ b/fs/fcntl.c
49573@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
49574 if (err)
49575 return err;
49576
49577+ if (gr_handle_chroot_fowner(pid, type))
49578+ return -ENOENT;
49579+ if (gr_check_protected_task_fowner(pid, type))
49580+ return -EACCES;
49581+
49582 f_modown(filp, pid, type, force);
49583 return 0;
49584 }
49585@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
49586
49587 static int f_setown_ex(struct file *filp, unsigned long arg)
49588 {
49589- struct f_owner_ex * __user owner_p = (void * __user)arg;
49590+ struct f_owner_ex __user *owner_p = (void __user *)arg;
49591 struct f_owner_ex owner;
49592 struct pid *pid;
49593 int type;
49594@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
49595
49596 static int f_getown_ex(struct file *filp, unsigned long arg)
49597 {
49598- struct f_owner_ex * __user owner_p = (void * __user)arg;
49599+ struct f_owner_ex __user *owner_p = (void __user *)arg;
49600 struct f_owner_ex owner;
49601 int ret = 0;
49602
49603@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
49604 switch (cmd) {
49605 case F_DUPFD:
49606 case F_DUPFD_CLOEXEC:
49607+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
49608 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49609 break;
49610 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
49611diff --git a/fs/fifo.c b/fs/fifo.c
49612index f8f97b8..b1f2259 100644
49613--- a/fs/fifo.c
49614+++ b/fs/fifo.c
49615@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
49616 */
49617 filp->f_op = &read_pipefifo_fops;
49618 pipe->r_counter++;
49619- if (pipe->readers++ == 0)
49620+ if (atomic_inc_return(&pipe->readers) == 1)
49621 wake_up_partner(inode);
49622
49623- if (!pipe->writers) {
49624+ if (!atomic_read(&pipe->writers)) {
49625 if ((filp->f_flags & O_NONBLOCK)) {
49626 /* suppress POLLHUP until we have
49627 * seen a writer */
49628@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
49629 * errno=ENXIO when there is no process reading the FIFO.
49630 */
49631 ret = -ENXIO;
49632- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
49633+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
49634 goto err;
49635
49636 filp->f_op = &write_pipefifo_fops;
49637 pipe->w_counter++;
49638- if (!pipe->writers++)
49639+ if (atomic_inc_return(&pipe->writers) == 1)
49640 wake_up_partner(inode);
49641
49642- if (!pipe->readers) {
49643+ if (!atomic_read(&pipe->readers)) {
49644 wait_for_partner(inode, &pipe->r_counter);
49645 if (signal_pending(current))
49646 goto err_wr;
49647@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
49648 */
49649 filp->f_op = &rdwr_pipefifo_fops;
49650
49651- pipe->readers++;
49652- pipe->writers++;
49653+ atomic_inc(&pipe->readers);
49654+ atomic_inc(&pipe->writers);
49655 pipe->r_counter++;
49656 pipe->w_counter++;
49657- if (pipe->readers == 1 || pipe->writers == 1)
49658+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
49659 wake_up_partner(inode);
49660 break;
49661
49662@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
49663 return 0;
49664
49665 err_rd:
49666- if (!--pipe->readers)
49667+ if (atomic_dec_and_test(&pipe->readers))
49668 wake_up_interruptible(&pipe->wait);
49669 ret = -ERESTARTSYS;
49670 goto err;
49671
49672 err_wr:
49673- if (!--pipe->writers)
49674+ if (atomic_dec_and_test(&pipe->writers))
49675 wake_up_interruptible(&pipe->wait);
49676 ret = -ERESTARTSYS;
49677 goto err;
49678
49679 err:
49680- if (!pipe->readers && !pipe->writers)
49681+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
49682 free_pipe_info(inode);
49683
49684 err_nocleanup:
49685diff --git a/fs/file.c b/fs/file.c
49686index 87e1290..a930cc4 100644
49687--- a/fs/file.c
49688+++ b/fs/file.c
49689@@ -14,6 +14,7 @@
49690 #include <linux/slab.h>
49691 #include <linux/vmalloc.h>
49692 #include <linux/file.h>
49693+#include <linux/security.h>
49694 #include <linux/fdtable.h>
49695 #include <linux/bitops.h>
49696 #include <linux/interrupt.h>
49697@@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
49698 * N.B. For clone tasks sharing a files structure, this test
49699 * will limit the total number of files that can be opened.
49700 */
49701+
49702+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
49703 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49704 return -EMFILE;
49705
49706diff --git a/fs/filesystems.c b/fs/filesystems.c
49707index a24c58e..53f91ee 100644
49708--- a/fs/filesystems.c
49709+++ b/fs/filesystems.c
49710@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
49711 int len = dot ? dot - name : strlen(name);
49712
49713 fs = __get_fs_type(name, len);
49714+
49715+#ifdef CONFIG_GRKERNSEC_MODHARDEN
49716+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
49717+#else
49718 if (!fs && (request_module("%.*s", len, name) == 0))
49719+#endif
49720 fs = __get_fs_type(name, len);
49721
49722 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
49723diff --git a/fs/fs_struct.c b/fs/fs_struct.c
49724index eee0590..1181166 100644
49725--- a/fs/fs_struct.c
49726+++ b/fs/fs_struct.c
49727@@ -4,6 +4,7 @@
49728 #include <linux/path.h>
49729 #include <linux/slab.h>
49730 #include <linux/fs_struct.h>
49731+#include <linux/grsecurity.h>
49732
49733 /*
49734 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
49735@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
49736 old_root = fs->root;
49737 fs->root = *path;
49738 path_get(path);
49739+ gr_set_chroot_entries(current, path);
49740 write_unlock(&fs->lock);
49741 if (old_root.dentry)
49742 path_put(&old_root);
49743@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
49744 && fs->root.mnt == old_root->mnt) {
49745 path_get(new_root);
49746 fs->root = *new_root;
49747+ gr_set_chroot_entries(p, new_root);
49748 count++;
49749 }
49750 if (fs->pwd.dentry == old_root->dentry
49751@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
49752 task_lock(tsk);
49753 write_lock(&fs->lock);
49754 tsk->fs = NULL;
49755- kill = !--fs->users;
49756+ gr_clear_chroot_entries(tsk);
49757+ kill = !atomic_dec_return(&fs->users);
49758 write_unlock(&fs->lock);
49759 task_unlock(tsk);
49760 if (kill)
49761@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
49762 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
49763 /* We don't need to lock fs - think why ;-) */
49764 if (fs) {
49765- fs->users = 1;
49766+ atomic_set(&fs->users, 1);
49767 fs->in_exec = 0;
49768 rwlock_init(&fs->lock);
49769 fs->umask = old->umask;
49770@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
49771
49772 task_lock(current);
49773 write_lock(&fs->lock);
49774- kill = !--fs->users;
49775+ kill = !atomic_dec_return(&fs->users);
49776 current->fs = new_fs;
49777+ gr_set_chroot_entries(current, &new_fs->root);
49778 write_unlock(&fs->lock);
49779 task_unlock(current);
49780
49781@@ -141,13 +146,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
49782
49783 int current_umask(void)
49784 {
49785- return current->fs->umask;
49786+ return current->fs->umask | gr_acl_umask();
49787 }
49788 EXPORT_SYMBOL(current_umask);
49789
49790 /* to be mentioned only in INIT_TASK */
49791 struct fs_struct init_fs = {
49792- .users = 1,
49793+ .users = ATOMIC_INIT(1),
49794 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
49795 .umask = 0022,
49796 };
49797@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
49798 task_lock(current);
49799
49800 write_lock(&init_fs.lock);
49801- init_fs.users++;
49802+ atomic_inc(&init_fs.users);
49803 write_unlock(&init_fs.lock);
49804
49805 write_lock(&fs->lock);
49806 current->fs = &init_fs;
49807- kill = !--fs->users;
49808+ gr_set_chroot_entries(current, &current->fs->root);
49809+ kill = !atomic_dec_return(&fs->users);
49810 write_unlock(&fs->lock);
49811
49812 task_unlock(current);
49813diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
49814index 9905350..02eaec4 100644
49815--- a/fs/fscache/cookie.c
49816+++ b/fs/fscache/cookie.c
49817@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
49818 parent ? (char *) parent->def->name : "<no-parent>",
49819 def->name, netfs_data);
49820
49821- fscache_stat(&fscache_n_acquires);
49822+ fscache_stat_unchecked(&fscache_n_acquires);
49823
49824 /* if there's no parent cookie, then we don't create one here either */
49825 if (!parent) {
49826- fscache_stat(&fscache_n_acquires_null);
49827+ fscache_stat_unchecked(&fscache_n_acquires_null);
49828 _leave(" [no parent]");
49829 return NULL;
49830 }
49831@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
49832 /* allocate and initialise a cookie */
49833 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
49834 if (!cookie) {
49835- fscache_stat(&fscache_n_acquires_oom);
49836+ fscache_stat_unchecked(&fscache_n_acquires_oom);
49837 _leave(" [ENOMEM]");
49838 return NULL;
49839 }
49840@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49841
49842 switch (cookie->def->type) {
49843 case FSCACHE_COOKIE_TYPE_INDEX:
49844- fscache_stat(&fscache_n_cookie_index);
49845+ fscache_stat_unchecked(&fscache_n_cookie_index);
49846 break;
49847 case FSCACHE_COOKIE_TYPE_DATAFILE:
49848- fscache_stat(&fscache_n_cookie_data);
49849+ fscache_stat_unchecked(&fscache_n_cookie_data);
49850 break;
49851 default:
49852- fscache_stat(&fscache_n_cookie_special);
49853+ fscache_stat_unchecked(&fscache_n_cookie_special);
49854 break;
49855 }
49856
49857@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49858 if (fscache_acquire_non_index_cookie(cookie) < 0) {
49859 atomic_dec(&parent->n_children);
49860 __fscache_cookie_put(cookie);
49861- fscache_stat(&fscache_n_acquires_nobufs);
49862+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
49863 _leave(" = NULL");
49864 return NULL;
49865 }
49866 }
49867
49868- fscache_stat(&fscache_n_acquires_ok);
49869+ fscache_stat_unchecked(&fscache_n_acquires_ok);
49870 _leave(" = %p", cookie);
49871 return cookie;
49872 }
49873@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
49874 cache = fscache_select_cache_for_object(cookie->parent);
49875 if (!cache) {
49876 up_read(&fscache_addremove_sem);
49877- fscache_stat(&fscache_n_acquires_no_cache);
49878+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
49879 _leave(" = -ENOMEDIUM [no cache]");
49880 return -ENOMEDIUM;
49881 }
49882@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
49883 object = cache->ops->alloc_object(cache, cookie);
49884 fscache_stat_d(&fscache_n_cop_alloc_object);
49885 if (IS_ERR(object)) {
49886- fscache_stat(&fscache_n_object_no_alloc);
49887+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
49888 ret = PTR_ERR(object);
49889 goto error;
49890 }
49891
49892- fscache_stat(&fscache_n_object_alloc);
49893+ fscache_stat_unchecked(&fscache_n_object_alloc);
49894
49895 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
49896
49897@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
49898 struct fscache_object *object;
49899 struct hlist_node *_p;
49900
49901- fscache_stat(&fscache_n_updates);
49902+ fscache_stat_unchecked(&fscache_n_updates);
49903
49904 if (!cookie) {
49905- fscache_stat(&fscache_n_updates_null);
49906+ fscache_stat_unchecked(&fscache_n_updates_null);
49907 _leave(" [no cookie]");
49908 return;
49909 }
49910@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49911 struct fscache_object *object;
49912 unsigned long event;
49913
49914- fscache_stat(&fscache_n_relinquishes);
49915+ fscache_stat_unchecked(&fscache_n_relinquishes);
49916 if (retire)
49917- fscache_stat(&fscache_n_relinquishes_retire);
49918+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
49919
49920 if (!cookie) {
49921- fscache_stat(&fscache_n_relinquishes_null);
49922+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
49923 _leave(" [no cookie]");
49924 return;
49925 }
49926@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49927
49928 /* wait for the cookie to finish being instantiated (or to fail) */
49929 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
49930- fscache_stat(&fscache_n_relinquishes_waitcrt);
49931+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
49932 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
49933 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
49934 }
49935diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
49936index edd7434..0725e66 100644
49937--- a/fs/fscache/internal.h
49938+++ b/fs/fscache/internal.h
49939@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
49940 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
49941 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
49942
49943-extern atomic_t fscache_n_op_pend;
49944-extern atomic_t fscache_n_op_run;
49945-extern atomic_t fscache_n_op_enqueue;
49946-extern atomic_t fscache_n_op_deferred_release;
49947-extern atomic_t fscache_n_op_release;
49948-extern atomic_t fscache_n_op_gc;
49949-extern atomic_t fscache_n_op_cancelled;
49950-extern atomic_t fscache_n_op_rejected;
49951+extern atomic_unchecked_t fscache_n_op_pend;
49952+extern atomic_unchecked_t fscache_n_op_run;
49953+extern atomic_unchecked_t fscache_n_op_enqueue;
49954+extern atomic_unchecked_t fscache_n_op_deferred_release;
49955+extern atomic_unchecked_t fscache_n_op_release;
49956+extern atomic_unchecked_t fscache_n_op_gc;
49957+extern atomic_unchecked_t fscache_n_op_cancelled;
49958+extern atomic_unchecked_t fscache_n_op_rejected;
49959
49960-extern atomic_t fscache_n_attr_changed;
49961-extern atomic_t fscache_n_attr_changed_ok;
49962-extern atomic_t fscache_n_attr_changed_nobufs;
49963-extern atomic_t fscache_n_attr_changed_nomem;
49964-extern atomic_t fscache_n_attr_changed_calls;
49965+extern atomic_unchecked_t fscache_n_attr_changed;
49966+extern atomic_unchecked_t fscache_n_attr_changed_ok;
49967+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
49968+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
49969+extern atomic_unchecked_t fscache_n_attr_changed_calls;
49970
49971-extern atomic_t fscache_n_allocs;
49972-extern atomic_t fscache_n_allocs_ok;
49973-extern atomic_t fscache_n_allocs_wait;
49974-extern atomic_t fscache_n_allocs_nobufs;
49975-extern atomic_t fscache_n_allocs_intr;
49976-extern atomic_t fscache_n_allocs_object_dead;
49977-extern atomic_t fscache_n_alloc_ops;
49978-extern atomic_t fscache_n_alloc_op_waits;
49979+extern atomic_unchecked_t fscache_n_allocs;
49980+extern atomic_unchecked_t fscache_n_allocs_ok;
49981+extern atomic_unchecked_t fscache_n_allocs_wait;
49982+extern atomic_unchecked_t fscache_n_allocs_nobufs;
49983+extern atomic_unchecked_t fscache_n_allocs_intr;
49984+extern atomic_unchecked_t fscache_n_allocs_object_dead;
49985+extern atomic_unchecked_t fscache_n_alloc_ops;
49986+extern atomic_unchecked_t fscache_n_alloc_op_waits;
49987
49988-extern atomic_t fscache_n_retrievals;
49989-extern atomic_t fscache_n_retrievals_ok;
49990-extern atomic_t fscache_n_retrievals_wait;
49991-extern atomic_t fscache_n_retrievals_nodata;
49992-extern atomic_t fscache_n_retrievals_nobufs;
49993-extern atomic_t fscache_n_retrievals_intr;
49994-extern atomic_t fscache_n_retrievals_nomem;
49995-extern atomic_t fscache_n_retrievals_object_dead;
49996-extern atomic_t fscache_n_retrieval_ops;
49997-extern atomic_t fscache_n_retrieval_op_waits;
49998+extern atomic_unchecked_t fscache_n_retrievals;
49999+extern atomic_unchecked_t fscache_n_retrievals_ok;
50000+extern atomic_unchecked_t fscache_n_retrievals_wait;
50001+extern atomic_unchecked_t fscache_n_retrievals_nodata;
50002+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
50003+extern atomic_unchecked_t fscache_n_retrievals_intr;
50004+extern atomic_unchecked_t fscache_n_retrievals_nomem;
50005+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
50006+extern atomic_unchecked_t fscache_n_retrieval_ops;
50007+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
50008
50009-extern atomic_t fscache_n_stores;
50010-extern atomic_t fscache_n_stores_ok;
50011-extern atomic_t fscache_n_stores_again;
50012-extern atomic_t fscache_n_stores_nobufs;
50013-extern atomic_t fscache_n_stores_oom;
50014-extern atomic_t fscache_n_store_ops;
50015-extern atomic_t fscache_n_store_calls;
50016-extern atomic_t fscache_n_store_pages;
50017-extern atomic_t fscache_n_store_radix_deletes;
50018-extern atomic_t fscache_n_store_pages_over_limit;
50019+extern atomic_unchecked_t fscache_n_stores;
50020+extern atomic_unchecked_t fscache_n_stores_ok;
50021+extern atomic_unchecked_t fscache_n_stores_again;
50022+extern atomic_unchecked_t fscache_n_stores_nobufs;
50023+extern atomic_unchecked_t fscache_n_stores_oom;
50024+extern atomic_unchecked_t fscache_n_store_ops;
50025+extern atomic_unchecked_t fscache_n_store_calls;
50026+extern atomic_unchecked_t fscache_n_store_pages;
50027+extern atomic_unchecked_t fscache_n_store_radix_deletes;
50028+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
50029
50030-extern atomic_t fscache_n_store_vmscan_not_storing;
50031-extern atomic_t fscache_n_store_vmscan_gone;
50032-extern atomic_t fscache_n_store_vmscan_busy;
50033-extern atomic_t fscache_n_store_vmscan_cancelled;
50034+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50035+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
50036+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
50037+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50038
50039-extern atomic_t fscache_n_marks;
50040-extern atomic_t fscache_n_uncaches;
50041+extern atomic_unchecked_t fscache_n_marks;
50042+extern atomic_unchecked_t fscache_n_uncaches;
50043
50044-extern atomic_t fscache_n_acquires;
50045-extern atomic_t fscache_n_acquires_null;
50046-extern atomic_t fscache_n_acquires_no_cache;
50047-extern atomic_t fscache_n_acquires_ok;
50048-extern atomic_t fscache_n_acquires_nobufs;
50049-extern atomic_t fscache_n_acquires_oom;
50050+extern atomic_unchecked_t fscache_n_acquires;
50051+extern atomic_unchecked_t fscache_n_acquires_null;
50052+extern atomic_unchecked_t fscache_n_acquires_no_cache;
50053+extern atomic_unchecked_t fscache_n_acquires_ok;
50054+extern atomic_unchecked_t fscache_n_acquires_nobufs;
50055+extern atomic_unchecked_t fscache_n_acquires_oom;
50056
50057-extern atomic_t fscache_n_updates;
50058-extern atomic_t fscache_n_updates_null;
50059-extern atomic_t fscache_n_updates_run;
50060+extern atomic_unchecked_t fscache_n_updates;
50061+extern atomic_unchecked_t fscache_n_updates_null;
50062+extern atomic_unchecked_t fscache_n_updates_run;
50063
50064-extern atomic_t fscache_n_relinquishes;
50065-extern atomic_t fscache_n_relinquishes_null;
50066-extern atomic_t fscache_n_relinquishes_waitcrt;
50067-extern atomic_t fscache_n_relinquishes_retire;
50068+extern atomic_unchecked_t fscache_n_relinquishes;
50069+extern atomic_unchecked_t fscache_n_relinquishes_null;
50070+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50071+extern atomic_unchecked_t fscache_n_relinquishes_retire;
50072
50073-extern atomic_t fscache_n_cookie_index;
50074-extern atomic_t fscache_n_cookie_data;
50075-extern atomic_t fscache_n_cookie_special;
50076+extern atomic_unchecked_t fscache_n_cookie_index;
50077+extern atomic_unchecked_t fscache_n_cookie_data;
50078+extern atomic_unchecked_t fscache_n_cookie_special;
50079
50080-extern atomic_t fscache_n_object_alloc;
50081-extern atomic_t fscache_n_object_no_alloc;
50082-extern atomic_t fscache_n_object_lookups;
50083-extern atomic_t fscache_n_object_lookups_negative;
50084-extern atomic_t fscache_n_object_lookups_positive;
50085-extern atomic_t fscache_n_object_lookups_timed_out;
50086-extern atomic_t fscache_n_object_created;
50087-extern atomic_t fscache_n_object_avail;
50088-extern atomic_t fscache_n_object_dead;
50089+extern atomic_unchecked_t fscache_n_object_alloc;
50090+extern atomic_unchecked_t fscache_n_object_no_alloc;
50091+extern atomic_unchecked_t fscache_n_object_lookups;
50092+extern atomic_unchecked_t fscache_n_object_lookups_negative;
50093+extern atomic_unchecked_t fscache_n_object_lookups_positive;
50094+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
50095+extern atomic_unchecked_t fscache_n_object_created;
50096+extern atomic_unchecked_t fscache_n_object_avail;
50097+extern atomic_unchecked_t fscache_n_object_dead;
50098
50099-extern atomic_t fscache_n_checkaux_none;
50100-extern atomic_t fscache_n_checkaux_okay;
50101-extern atomic_t fscache_n_checkaux_update;
50102-extern atomic_t fscache_n_checkaux_obsolete;
50103+extern atomic_unchecked_t fscache_n_checkaux_none;
50104+extern atomic_unchecked_t fscache_n_checkaux_okay;
50105+extern atomic_unchecked_t fscache_n_checkaux_update;
50106+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
50107
50108 extern atomic_t fscache_n_cop_alloc_object;
50109 extern atomic_t fscache_n_cop_lookup_object;
50110@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
50111 atomic_inc(stat);
50112 }
50113
50114+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
50115+{
50116+ atomic_inc_unchecked(stat);
50117+}
50118+
50119 static inline void fscache_stat_d(atomic_t *stat)
50120 {
50121 atomic_dec(stat);
50122@@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
50123
50124 #define __fscache_stat(stat) (NULL)
50125 #define fscache_stat(stat) do {} while (0)
50126+#define fscache_stat_unchecked(stat) do {} while (0)
50127 #define fscache_stat_d(stat) do {} while (0)
50128 #endif
50129
50130diff --git a/fs/fscache/object.c b/fs/fscache/object.c
50131index e513ac5..e888d34 100644
50132--- a/fs/fscache/object.c
50133+++ b/fs/fscache/object.c
50134@@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50135 /* update the object metadata on disk */
50136 case FSCACHE_OBJECT_UPDATING:
50137 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
50138- fscache_stat(&fscache_n_updates_run);
50139+ fscache_stat_unchecked(&fscache_n_updates_run);
50140 fscache_stat(&fscache_n_cop_update_object);
50141 object->cache->ops->update_object(object);
50142 fscache_stat_d(&fscache_n_cop_update_object);
50143@@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50144 spin_lock(&object->lock);
50145 object->state = FSCACHE_OBJECT_DEAD;
50146 spin_unlock(&object->lock);
50147- fscache_stat(&fscache_n_object_dead);
50148+ fscache_stat_unchecked(&fscache_n_object_dead);
50149 goto terminal_transit;
50150
50151 /* handle the parent cache of this object being withdrawn from
50152@@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50153 spin_lock(&object->lock);
50154 object->state = FSCACHE_OBJECT_DEAD;
50155 spin_unlock(&object->lock);
50156- fscache_stat(&fscache_n_object_dead);
50157+ fscache_stat_unchecked(&fscache_n_object_dead);
50158 goto terminal_transit;
50159
50160 /* complain about the object being woken up once it is
50161@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50162 parent->cookie->def->name, cookie->def->name,
50163 object->cache->tag->name);
50164
50165- fscache_stat(&fscache_n_object_lookups);
50166+ fscache_stat_unchecked(&fscache_n_object_lookups);
50167 fscache_stat(&fscache_n_cop_lookup_object);
50168 ret = object->cache->ops->lookup_object(object);
50169 fscache_stat_d(&fscache_n_cop_lookup_object);
50170@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50171 if (ret == -ETIMEDOUT) {
50172 /* probably stuck behind another object, so move this one to
50173 * the back of the queue */
50174- fscache_stat(&fscache_n_object_lookups_timed_out);
50175+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
50176 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50177 }
50178
50179@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
50180
50181 spin_lock(&object->lock);
50182 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50183- fscache_stat(&fscache_n_object_lookups_negative);
50184+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
50185
50186 /* transit here to allow write requests to begin stacking up
50187 * and read requests to begin returning ENODATA */
50188@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
50189 * result, in which case there may be data available */
50190 spin_lock(&object->lock);
50191 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50192- fscache_stat(&fscache_n_object_lookups_positive);
50193+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
50194
50195 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
50196
50197@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
50198 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50199 } else {
50200 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
50201- fscache_stat(&fscache_n_object_created);
50202+ fscache_stat_unchecked(&fscache_n_object_created);
50203
50204 object->state = FSCACHE_OBJECT_AVAILABLE;
50205 spin_unlock(&object->lock);
50206@@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
50207 fscache_enqueue_dependents(object);
50208
50209 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
50210- fscache_stat(&fscache_n_object_avail);
50211+ fscache_stat_unchecked(&fscache_n_object_avail);
50212
50213 _leave("");
50214 }
50215@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50216 enum fscache_checkaux result;
50217
50218 if (!object->cookie->def->check_aux) {
50219- fscache_stat(&fscache_n_checkaux_none);
50220+ fscache_stat_unchecked(&fscache_n_checkaux_none);
50221 return FSCACHE_CHECKAUX_OKAY;
50222 }
50223
50224@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50225 switch (result) {
50226 /* entry okay as is */
50227 case FSCACHE_CHECKAUX_OKAY:
50228- fscache_stat(&fscache_n_checkaux_okay);
50229+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
50230 break;
50231
50232 /* entry requires update */
50233 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
50234- fscache_stat(&fscache_n_checkaux_update);
50235+ fscache_stat_unchecked(&fscache_n_checkaux_update);
50236 break;
50237
50238 /* entry requires deletion */
50239 case FSCACHE_CHECKAUX_OBSOLETE:
50240- fscache_stat(&fscache_n_checkaux_obsolete);
50241+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
50242 break;
50243
50244 default:
50245diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
50246index 313e79a..775240f 100644
50247--- a/fs/fscache/operation.c
50248+++ b/fs/fscache/operation.c
50249@@ -16,7 +16,7 @@
50250 #include <linux/seq_file.h>
50251 #include "internal.h"
50252
50253-atomic_t fscache_op_debug_id;
50254+atomic_unchecked_t fscache_op_debug_id;
50255 EXPORT_SYMBOL(fscache_op_debug_id);
50256
50257 /**
50258@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
50259 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
50260 ASSERTCMP(atomic_read(&op->usage), >, 0);
50261
50262- fscache_stat(&fscache_n_op_enqueue);
50263+ fscache_stat_unchecked(&fscache_n_op_enqueue);
50264 switch (op->flags & FSCACHE_OP_TYPE) {
50265 case FSCACHE_OP_FAST:
50266 _debug("queue fast");
50267@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
50268 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
50269 if (op->processor)
50270 fscache_enqueue_operation(op);
50271- fscache_stat(&fscache_n_op_run);
50272+ fscache_stat_unchecked(&fscache_n_op_run);
50273 }
50274
50275 /*
50276@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50277 if (object->n_ops > 0) {
50278 atomic_inc(&op->usage);
50279 list_add_tail(&op->pend_link, &object->pending_ops);
50280- fscache_stat(&fscache_n_op_pend);
50281+ fscache_stat_unchecked(&fscache_n_op_pend);
50282 } else if (!list_empty(&object->pending_ops)) {
50283 atomic_inc(&op->usage);
50284 list_add_tail(&op->pend_link, &object->pending_ops);
50285- fscache_stat(&fscache_n_op_pend);
50286+ fscache_stat_unchecked(&fscache_n_op_pend);
50287 fscache_start_operations(object);
50288 } else {
50289 ASSERTCMP(object->n_in_progress, ==, 0);
50290@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50291 object->n_exclusive++; /* reads and writes must wait */
50292 atomic_inc(&op->usage);
50293 list_add_tail(&op->pend_link, &object->pending_ops);
50294- fscache_stat(&fscache_n_op_pend);
50295+ fscache_stat_unchecked(&fscache_n_op_pend);
50296 ret = 0;
50297 } else {
50298 /* not allowed to submit ops in any other state */
50299@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
50300 if (object->n_exclusive > 0) {
50301 atomic_inc(&op->usage);
50302 list_add_tail(&op->pend_link, &object->pending_ops);
50303- fscache_stat(&fscache_n_op_pend);
50304+ fscache_stat_unchecked(&fscache_n_op_pend);
50305 } else if (!list_empty(&object->pending_ops)) {
50306 atomic_inc(&op->usage);
50307 list_add_tail(&op->pend_link, &object->pending_ops);
50308- fscache_stat(&fscache_n_op_pend);
50309+ fscache_stat_unchecked(&fscache_n_op_pend);
50310 fscache_start_operations(object);
50311 } else {
50312 ASSERTCMP(object->n_exclusive, ==, 0);
50313@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
50314 object->n_ops++;
50315 atomic_inc(&op->usage);
50316 list_add_tail(&op->pend_link, &object->pending_ops);
50317- fscache_stat(&fscache_n_op_pend);
50318+ fscache_stat_unchecked(&fscache_n_op_pend);
50319 ret = 0;
50320 } else if (object->state == FSCACHE_OBJECT_DYING ||
50321 object->state == FSCACHE_OBJECT_LC_DYING ||
50322 object->state == FSCACHE_OBJECT_WITHDRAWING) {
50323- fscache_stat(&fscache_n_op_rejected);
50324+ fscache_stat_unchecked(&fscache_n_op_rejected);
50325 ret = -ENOBUFS;
50326 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
50327 fscache_report_unexpected_submission(object, op, ostate);
50328@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
50329
50330 ret = -EBUSY;
50331 if (!list_empty(&op->pend_link)) {
50332- fscache_stat(&fscache_n_op_cancelled);
50333+ fscache_stat_unchecked(&fscache_n_op_cancelled);
50334 list_del_init(&op->pend_link);
50335 object->n_ops--;
50336 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
50337@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
50338 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
50339 BUG();
50340
50341- fscache_stat(&fscache_n_op_release);
50342+ fscache_stat_unchecked(&fscache_n_op_release);
50343
50344 if (op->release) {
50345 op->release(op);
50346@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
50347 * lock, and defer it otherwise */
50348 if (!spin_trylock(&object->lock)) {
50349 _debug("defer put");
50350- fscache_stat(&fscache_n_op_deferred_release);
50351+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
50352
50353 cache = object->cache;
50354 spin_lock(&cache->op_gc_list_lock);
50355@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
50356
50357 _debug("GC DEFERRED REL OBJ%x OP%x",
50358 object->debug_id, op->debug_id);
50359- fscache_stat(&fscache_n_op_gc);
50360+ fscache_stat_unchecked(&fscache_n_op_gc);
50361
50362 ASSERTCMP(atomic_read(&op->usage), ==, 0);
50363
50364diff --git a/fs/fscache/page.c b/fs/fscache/page.c
50365index c598ea4..6aac13e 100644
50366--- a/fs/fscache/page.c
50367+++ b/fs/fscache/page.c
50368@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50369 val = radix_tree_lookup(&cookie->stores, page->index);
50370 if (!val) {
50371 rcu_read_unlock();
50372- fscache_stat(&fscache_n_store_vmscan_not_storing);
50373+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
50374 __fscache_uncache_page(cookie, page);
50375 return true;
50376 }
50377@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50378 spin_unlock(&cookie->stores_lock);
50379
50380 if (xpage) {
50381- fscache_stat(&fscache_n_store_vmscan_cancelled);
50382- fscache_stat(&fscache_n_store_radix_deletes);
50383+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
50384+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50385 ASSERTCMP(xpage, ==, page);
50386 } else {
50387- fscache_stat(&fscache_n_store_vmscan_gone);
50388+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
50389 }
50390
50391 wake_up_bit(&cookie->flags, 0);
50392@@ -106,7 +106,7 @@ page_busy:
50393 /* we might want to wait here, but that could deadlock the allocator as
50394 * the slow-work threads writing to the cache may all end up sleeping
50395 * on memory allocation */
50396- fscache_stat(&fscache_n_store_vmscan_busy);
50397+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
50398 return false;
50399 }
50400 EXPORT_SYMBOL(__fscache_maybe_release_page);
50401@@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
50402 FSCACHE_COOKIE_STORING_TAG);
50403 if (!radix_tree_tag_get(&cookie->stores, page->index,
50404 FSCACHE_COOKIE_PENDING_TAG)) {
50405- fscache_stat(&fscache_n_store_radix_deletes);
50406+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50407 xpage = radix_tree_delete(&cookie->stores, page->index);
50408 }
50409 spin_unlock(&cookie->stores_lock);
50410@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
50411
50412 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
50413
50414- fscache_stat(&fscache_n_attr_changed_calls);
50415+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
50416
50417 if (fscache_object_is_active(object)) {
50418 fscache_set_op_state(op, "CallFS");
50419@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50420
50421 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50422
50423- fscache_stat(&fscache_n_attr_changed);
50424+ fscache_stat_unchecked(&fscache_n_attr_changed);
50425
50426 op = kzalloc(sizeof(*op), GFP_KERNEL);
50427 if (!op) {
50428- fscache_stat(&fscache_n_attr_changed_nomem);
50429+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
50430 _leave(" = -ENOMEM");
50431 return -ENOMEM;
50432 }
50433@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50434 if (fscache_submit_exclusive_op(object, op) < 0)
50435 goto nobufs;
50436 spin_unlock(&cookie->lock);
50437- fscache_stat(&fscache_n_attr_changed_ok);
50438+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
50439 fscache_put_operation(op);
50440 _leave(" = 0");
50441 return 0;
50442@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50443 nobufs:
50444 spin_unlock(&cookie->lock);
50445 kfree(op);
50446- fscache_stat(&fscache_n_attr_changed_nobufs);
50447+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
50448 _leave(" = %d", -ENOBUFS);
50449 return -ENOBUFS;
50450 }
50451@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
50452 /* allocate a retrieval operation and attempt to submit it */
50453 op = kzalloc(sizeof(*op), GFP_NOIO);
50454 if (!op) {
50455- fscache_stat(&fscache_n_retrievals_nomem);
50456+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50457 return NULL;
50458 }
50459
50460@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50461 return 0;
50462 }
50463
50464- fscache_stat(&fscache_n_retrievals_wait);
50465+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
50466
50467 jif = jiffies;
50468 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
50469 fscache_wait_bit_interruptible,
50470 TASK_INTERRUPTIBLE) != 0) {
50471- fscache_stat(&fscache_n_retrievals_intr);
50472+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50473 _leave(" = -ERESTARTSYS");
50474 return -ERESTARTSYS;
50475 }
50476@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50477 */
50478 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50479 struct fscache_retrieval *op,
50480- atomic_t *stat_op_waits,
50481- atomic_t *stat_object_dead)
50482+ atomic_unchecked_t *stat_op_waits,
50483+ atomic_unchecked_t *stat_object_dead)
50484 {
50485 int ret;
50486
50487@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50488 goto check_if_dead;
50489
50490 _debug(">>> WT");
50491- fscache_stat(stat_op_waits);
50492+ fscache_stat_unchecked(stat_op_waits);
50493 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
50494 fscache_wait_bit_interruptible,
50495 TASK_INTERRUPTIBLE) < 0) {
50496@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50497
50498 check_if_dead:
50499 if (unlikely(fscache_object_is_dead(object))) {
50500- fscache_stat(stat_object_dead);
50501+ fscache_stat_unchecked(stat_object_dead);
50502 return -ENOBUFS;
50503 }
50504 return 0;
50505@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50506
50507 _enter("%p,%p,,,", cookie, page);
50508
50509- fscache_stat(&fscache_n_retrievals);
50510+ fscache_stat_unchecked(&fscache_n_retrievals);
50511
50512 if (hlist_empty(&cookie->backing_objects))
50513 goto nobufs;
50514@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50515 goto nobufs_unlock;
50516 spin_unlock(&cookie->lock);
50517
50518- fscache_stat(&fscache_n_retrieval_ops);
50519+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
50520
50521 /* pin the netfs read context in case we need to do the actual netfs
50522 * read because we've encountered a cache read failure */
50523@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50524
50525 error:
50526 if (ret == -ENOMEM)
50527- fscache_stat(&fscache_n_retrievals_nomem);
50528+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50529 else if (ret == -ERESTARTSYS)
50530- fscache_stat(&fscache_n_retrievals_intr);
50531+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50532 else if (ret == -ENODATA)
50533- fscache_stat(&fscache_n_retrievals_nodata);
50534+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50535 else if (ret < 0)
50536- fscache_stat(&fscache_n_retrievals_nobufs);
50537+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50538 else
50539- fscache_stat(&fscache_n_retrievals_ok);
50540+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
50541
50542 fscache_put_retrieval(op);
50543 _leave(" = %d", ret);
50544@@ -453,7 +453,7 @@ nobufs_unlock:
50545 spin_unlock(&cookie->lock);
50546 kfree(op);
50547 nobufs:
50548- fscache_stat(&fscache_n_retrievals_nobufs);
50549+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50550 _leave(" = -ENOBUFS");
50551 return -ENOBUFS;
50552 }
50553@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50554
50555 _enter("%p,,%d,,,", cookie, *nr_pages);
50556
50557- fscache_stat(&fscache_n_retrievals);
50558+ fscache_stat_unchecked(&fscache_n_retrievals);
50559
50560 if (hlist_empty(&cookie->backing_objects))
50561 goto nobufs;
50562@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50563 goto nobufs_unlock;
50564 spin_unlock(&cookie->lock);
50565
50566- fscache_stat(&fscache_n_retrieval_ops);
50567+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
50568
50569 /* pin the netfs read context in case we need to do the actual netfs
50570 * read because we've encountered a cache read failure */
50571@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50572
50573 error:
50574 if (ret == -ENOMEM)
50575- fscache_stat(&fscache_n_retrievals_nomem);
50576+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50577 else if (ret == -ERESTARTSYS)
50578- fscache_stat(&fscache_n_retrievals_intr);
50579+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50580 else if (ret == -ENODATA)
50581- fscache_stat(&fscache_n_retrievals_nodata);
50582+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50583 else if (ret < 0)
50584- fscache_stat(&fscache_n_retrievals_nobufs);
50585+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50586 else
50587- fscache_stat(&fscache_n_retrievals_ok);
50588+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
50589
50590 fscache_put_retrieval(op);
50591 _leave(" = %d", ret);
50592@@ -570,7 +570,7 @@ nobufs_unlock:
50593 spin_unlock(&cookie->lock);
50594 kfree(op);
50595 nobufs:
50596- fscache_stat(&fscache_n_retrievals_nobufs);
50597+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50598 _leave(" = -ENOBUFS");
50599 return -ENOBUFS;
50600 }
50601@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50602
50603 _enter("%p,%p,,,", cookie, page);
50604
50605- fscache_stat(&fscache_n_allocs);
50606+ fscache_stat_unchecked(&fscache_n_allocs);
50607
50608 if (hlist_empty(&cookie->backing_objects))
50609 goto nobufs;
50610@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50611 goto nobufs_unlock;
50612 spin_unlock(&cookie->lock);
50613
50614- fscache_stat(&fscache_n_alloc_ops);
50615+ fscache_stat_unchecked(&fscache_n_alloc_ops);
50616
50617 ret = fscache_wait_for_retrieval_activation(
50618 object, op,
50619@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50620
50621 error:
50622 if (ret == -ERESTARTSYS)
50623- fscache_stat(&fscache_n_allocs_intr);
50624+ fscache_stat_unchecked(&fscache_n_allocs_intr);
50625 else if (ret < 0)
50626- fscache_stat(&fscache_n_allocs_nobufs);
50627+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50628 else
50629- fscache_stat(&fscache_n_allocs_ok);
50630+ fscache_stat_unchecked(&fscache_n_allocs_ok);
50631
50632 fscache_put_retrieval(op);
50633 _leave(" = %d", ret);
50634@@ -651,7 +651,7 @@ nobufs_unlock:
50635 spin_unlock(&cookie->lock);
50636 kfree(op);
50637 nobufs:
50638- fscache_stat(&fscache_n_allocs_nobufs);
50639+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50640 _leave(" = -ENOBUFS");
50641 return -ENOBUFS;
50642 }
50643@@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50644
50645 spin_lock(&cookie->stores_lock);
50646
50647- fscache_stat(&fscache_n_store_calls);
50648+ fscache_stat_unchecked(&fscache_n_store_calls);
50649
50650 /* find a page to store */
50651 page = NULL;
50652@@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50653 page = results[0];
50654 _debug("gang %d [%lx]", n, page->index);
50655 if (page->index > op->store_limit) {
50656- fscache_stat(&fscache_n_store_pages_over_limit);
50657+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
50658 goto superseded;
50659 }
50660
50661@@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50662
50663 if (page) {
50664 fscache_set_op_state(&op->op, "Store");
50665- fscache_stat(&fscache_n_store_pages);
50666+ fscache_stat_unchecked(&fscache_n_store_pages);
50667 fscache_stat(&fscache_n_cop_write_page);
50668 ret = object->cache->ops->write_page(op, page);
50669 fscache_stat_d(&fscache_n_cop_write_page);
50670@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50671 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50672 ASSERT(PageFsCache(page));
50673
50674- fscache_stat(&fscache_n_stores);
50675+ fscache_stat_unchecked(&fscache_n_stores);
50676
50677 op = kzalloc(sizeof(*op), GFP_NOIO);
50678 if (!op)
50679@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50680 spin_unlock(&cookie->stores_lock);
50681 spin_unlock(&object->lock);
50682
50683- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
50684+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
50685 op->store_limit = object->store_limit;
50686
50687 if (fscache_submit_op(object, &op->op) < 0)
50688@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50689
50690 spin_unlock(&cookie->lock);
50691 radix_tree_preload_end();
50692- fscache_stat(&fscache_n_store_ops);
50693- fscache_stat(&fscache_n_stores_ok);
50694+ fscache_stat_unchecked(&fscache_n_store_ops);
50695+ fscache_stat_unchecked(&fscache_n_stores_ok);
50696
50697 /* the slow work queue now carries its own ref on the object */
50698 fscache_put_operation(&op->op);
50699@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50700 return 0;
50701
50702 already_queued:
50703- fscache_stat(&fscache_n_stores_again);
50704+ fscache_stat_unchecked(&fscache_n_stores_again);
50705 already_pending:
50706 spin_unlock(&cookie->stores_lock);
50707 spin_unlock(&object->lock);
50708 spin_unlock(&cookie->lock);
50709 radix_tree_preload_end();
50710 kfree(op);
50711- fscache_stat(&fscache_n_stores_ok);
50712+ fscache_stat_unchecked(&fscache_n_stores_ok);
50713 _leave(" = 0");
50714 return 0;
50715
50716@@ -886,14 +886,14 @@ nobufs:
50717 spin_unlock(&cookie->lock);
50718 radix_tree_preload_end();
50719 kfree(op);
50720- fscache_stat(&fscache_n_stores_nobufs);
50721+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
50722 _leave(" = -ENOBUFS");
50723 return -ENOBUFS;
50724
50725 nomem_free:
50726 kfree(op);
50727 nomem:
50728- fscache_stat(&fscache_n_stores_oom);
50729+ fscache_stat_unchecked(&fscache_n_stores_oom);
50730 _leave(" = -ENOMEM");
50731 return -ENOMEM;
50732 }
50733@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
50734 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50735 ASSERTCMP(page, !=, NULL);
50736
50737- fscache_stat(&fscache_n_uncaches);
50738+ fscache_stat_unchecked(&fscache_n_uncaches);
50739
50740 /* cache withdrawal may beat us to it */
50741 if (!PageFsCache(page))
50742@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
50743 unsigned long loop;
50744
50745 #ifdef CONFIG_FSCACHE_STATS
50746- atomic_add(pagevec->nr, &fscache_n_marks);
50747+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
50748 #endif
50749
50750 for (loop = 0; loop < pagevec->nr; loop++) {
50751diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
50752index 46435f3..8cddf18 100644
50753--- a/fs/fscache/stats.c
50754+++ b/fs/fscache/stats.c
50755@@ -18,95 +18,95 @@
50756 /*
50757 * operation counters
50758 */
50759-atomic_t fscache_n_op_pend;
50760-atomic_t fscache_n_op_run;
50761-atomic_t fscache_n_op_enqueue;
50762-atomic_t fscache_n_op_requeue;
50763-atomic_t fscache_n_op_deferred_release;
50764-atomic_t fscache_n_op_release;
50765-atomic_t fscache_n_op_gc;
50766-atomic_t fscache_n_op_cancelled;
50767-atomic_t fscache_n_op_rejected;
50768+atomic_unchecked_t fscache_n_op_pend;
50769+atomic_unchecked_t fscache_n_op_run;
50770+atomic_unchecked_t fscache_n_op_enqueue;
50771+atomic_unchecked_t fscache_n_op_requeue;
50772+atomic_unchecked_t fscache_n_op_deferred_release;
50773+atomic_unchecked_t fscache_n_op_release;
50774+atomic_unchecked_t fscache_n_op_gc;
50775+atomic_unchecked_t fscache_n_op_cancelled;
50776+atomic_unchecked_t fscache_n_op_rejected;
50777
50778-atomic_t fscache_n_attr_changed;
50779-atomic_t fscache_n_attr_changed_ok;
50780-atomic_t fscache_n_attr_changed_nobufs;
50781-atomic_t fscache_n_attr_changed_nomem;
50782-atomic_t fscache_n_attr_changed_calls;
50783+atomic_unchecked_t fscache_n_attr_changed;
50784+atomic_unchecked_t fscache_n_attr_changed_ok;
50785+atomic_unchecked_t fscache_n_attr_changed_nobufs;
50786+atomic_unchecked_t fscache_n_attr_changed_nomem;
50787+atomic_unchecked_t fscache_n_attr_changed_calls;
50788
50789-atomic_t fscache_n_allocs;
50790-atomic_t fscache_n_allocs_ok;
50791-atomic_t fscache_n_allocs_wait;
50792-atomic_t fscache_n_allocs_nobufs;
50793-atomic_t fscache_n_allocs_intr;
50794-atomic_t fscache_n_allocs_object_dead;
50795-atomic_t fscache_n_alloc_ops;
50796-atomic_t fscache_n_alloc_op_waits;
50797+atomic_unchecked_t fscache_n_allocs;
50798+atomic_unchecked_t fscache_n_allocs_ok;
50799+atomic_unchecked_t fscache_n_allocs_wait;
50800+atomic_unchecked_t fscache_n_allocs_nobufs;
50801+atomic_unchecked_t fscache_n_allocs_intr;
50802+atomic_unchecked_t fscache_n_allocs_object_dead;
50803+atomic_unchecked_t fscache_n_alloc_ops;
50804+atomic_unchecked_t fscache_n_alloc_op_waits;
50805
50806-atomic_t fscache_n_retrievals;
50807-atomic_t fscache_n_retrievals_ok;
50808-atomic_t fscache_n_retrievals_wait;
50809-atomic_t fscache_n_retrievals_nodata;
50810-atomic_t fscache_n_retrievals_nobufs;
50811-atomic_t fscache_n_retrievals_intr;
50812-atomic_t fscache_n_retrievals_nomem;
50813-atomic_t fscache_n_retrievals_object_dead;
50814-atomic_t fscache_n_retrieval_ops;
50815-atomic_t fscache_n_retrieval_op_waits;
50816+atomic_unchecked_t fscache_n_retrievals;
50817+atomic_unchecked_t fscache_n_retrievals_ok;
50818+atomic_unchecked_t fscache_n_retrievals_wait;
50819+atomic_unchecked_t fscache_n_retrievals_nodata;
50820+atomic_unchecked_t fscache_n_retrievals_nobufs;
50821+atomic_unchecked_t fscache_n_retrievals_intr;
50822+atomic_unchecked_t fscache_n_retrievals_nomem;
50823+atomic_unchecked_t fscache_n_retrievals_object_dead;
50824+atomic_unchecked_t fscache_n_retrieval_ops;
50825+atomic_unchecked_t fscache_n_retrieval_op_waits;
50826
50827-atomic_t fscache_n_stores;
50828-atomic_t fscache_n_stores_ok;
50829-atomic_t fscache_n_stores_again;
50830-atomic_t fscache_n_stores_nobufs;
50831-atomic_t fscache_n_stores_oom;
50832-atomic_t fscache_n_store_ops;
50833-atomic_t fscache_n_store_calls;
50834-atomic_t fscache_n_store_pages;
50835-atomic_t fscache_n_store_radix_deletes;
50836-atomic_t fscache_n_store_pages_over_limit;
50837+atomic_unchecked_t fscache_n_stores;
50838+atomic_unchecked_t fscache_n_stores_ok;
50839+atomic_unchecked_t fscache_n_stores_again;
50840+atomic_unchecked_t fscache_n_stores_nobufs;
50841+atomic_unchecked_t fscache_n_stores_oom;
50842+atomic_unchecked_t fscache_n_store_ops;
50843+atomic_unchecked_t fscache_n_store_calls;
50844+atomic_unchecked_t fscache_n_store_pages;
50845+atomic_unchecked_t fscache_n_store_radix_deletes;
50846+atomic_unchecked_t fscache_n_store_pages_over_limit;
50847
50848-atomic_t fscache_n_store_vmscan_not_storing;
50849-atomic_t fscache_n_store_vmscan_gone;
50850-atomic_t fscache_n_store_vmscan_busy;
50851-atomic_t fscache_n_store_vmscan_cancelled;
50852+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50853+atomic_unchecked_t fscache_n_store_vmscan_gone;
50854+atomic_unchecked_t fscache_n_store_vmscan_busy;
50855+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50856
50857-atomic_t fscache_n_marks;
50858-atomic_t fscache_n_uncaches;
50859+atomic_unchecked_t fscache_n_marks;
50860+atomic_unchecked_t fscache_n_uncaches;
50861
50862-atomic_t fscache_n_acquires;
50863-atomic_t fscache_n_acquires_null;
50864-atomic_t fscache_n_acquires_no_cache;
50865-atomic_t fscache_n_acquires_ok;
50866-atomic_t fscache_n_acquires_nobufs;
50867-atomic_t fscache_n_acquires_oom;
50868+atomic_unchecked_t fscache_n_acquires;
50869+atomic_unchecked_t fscache_n_acquires_null;
50870+atomic_unchecked_t fscache_n_acquires_no_cache;
50871+atomic_unchecked_t fscache_n_acquires_ok;
50872+atomic_unchecked_t fscache_n_acquires_nobufs;
50873+atomic_unchecked_t fscache_n_acquires_oom;
50874
50875-atomic_t fscache_n_updates;
50876-atomic_t fscache_n_updates_null;
50877-atomic_t fscache_n_updates_run;
50878+atomic_unchecked_t fscache_n_updates;
50879+atomic_unchecked_t fscache_n_updates_null;
50880+atomic_unchecked_t fscache_n_updates_run;
50881
50882-atomic_t fscache_n_relinquishes;
50883-atomic_t fscache_n_relinquishes_null;
50884-atomic_t fscache_n_relinquishes_waitcrt;
50885-atomic_t fscache_n_relinquishes_retire;
50886+atomic_unchecked_t fscache_n_relinquishes;
50887+atomic_unchecked_t fscache_n_relinquishes_null;
50888+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50889+atomic_unchecked_t fscache_n_relinquishes_retire;
50890
50891-atomic_t fscache_n_cookie_index;
50892-atomic_t fscache_n_cookie_data;
50893-atomic_t fscache_n_cookie_special;
50894+atomic_unchecked_t fscache_n_cookie_index;
50895+atomic_unchecked_t fscache_n_cookie_data;
50896+atomic_unchecked_t fscache_n_cookie_special;
50897
50898-atomic_t fscache_n_object_alloc;
50899-atomic_t fscache_n_object_no_alloc;
50900-atomic_t fscache_n_object_lookups;
50901-atomic_t fscache_n_object_lookups_negative;
50902-atomic_t fscache_n_object_lookups_positive;
50903-atomic_t fscache_n_object_lookups_timed_out;
50904-atomic_t fscache_n_object_created;
50905-atomic_t fscache_n_object_avail;
50906-atomic_t fscache_n_object_dead;
50907+atomic_unchecked_t fscache_n_object_alloc;
50908+atomic_unchecked_t fscache_n_object_no_alloc;
50909+atomic_unchecked_t fscache_n_object_lookups;
50910+atomic_unchecked_t fscache_n_object_lookups_negative;
50911+atomic_unchecked_t fscache_n_object_lookups_positive;
50912+atomic_unchecked_t fscache_n_object_lookups_timed_out;
50913+atomic_unchecked_t fscache_n_object_created;
50914+atomic_unchecked_t fscache_n_object_avail;
50915+atomic_unchecked_t fscache_n_object_dead;
50916
50917-atomic_t fscache_n_checkaux_none;
50918-atomic_t fscache_n_checkaux_okay;
50919-atomic_t fscache_n_checkaux_update;
50920-atomic_t fscache_n_checkaux_obsolete;
50921+atomic_unchecked_t fscache_n_checkaux_none;
50922+atomic_unchecked_t fscache_n_checkaux_okay;
50923+atomic_unchecked_t fscache_n_checkaux_update;
50924+atomic_unchecked_t fscache_n_checkaux_obsolete;
50925
50926 atomic_t fscache_n_cop_alloc_object;
50927 atomic_t fscache_n_cop_lookup_object;
50928@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
50929 seq_puts(m, "FS-Cache statistics\n");
50930
50931 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
50932- atomic_read(&fscache_n_cookie_index),
50933- atomic_read(&fscache_n_cookie_data),
50934- atomic_read(&fscache_n_cookie_special));
50935+ atomic_read_unchecked(&fscache_n_cookie_index),
50936+ atomic_read_unchecked(&fscache_n_cookie_data),
50937+ atomic_read_unchecked(&fscache_n_cookie_special));
50938
50939 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
50940- atomic_read(&fscache_n_object_alloc),
50941- atomic_read(&fscache_n_object_no_alloc),
50942- atomic_read(&fscache_n_object_avail),
50943- atomic_read(&fscache_n_object_dead));
50944+ atomic_read_unchecked(&fscache_n_object_alloc),
50945+ atomic_read_unchecked(&fscache_n_object_no_alloc),
50946+ atomic_read_unchecked(&fscache_n_object_avail),
50947+ atomic_read_unchecked(&fscache_n_object_dead));
50948 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
50949- atomic_read(&fscache_n_checkaux_none),
50950- atomic_read(&fscache_n_checkaux_okay),
50951- atomic_read(&fscache_n_checkaux_update),
50952- atomic_read(&fscache_n_checkaux_obsolete));
50953+ atomic_read_unchecked(&fscache_n_checkaux_none),
50954+ atomic_read_unchecked(&fscache_n_checkaux_okay),
50955+ atomic_read_unchecked(&fscache_n_checkaux_update),
50956+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
50957
50958 seq_printf(m, "Pages : mrk=%u unc=%u\n",
50959- atomic_read(&fscache_n_marks),
50960- atomic_read(&fscache_n_uncaches));
50961+ atomic_read_unchecked(&fscache_n_marks),
50962+ atomic_read_unchecked(&fscache_n_uncaches));
50963
50964 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
50965 " oom=%u\n",
50966- atomic_read(&fscache_n_acquires),
50967- atomic_read(&fscache_n_acquires_null),
50968- atomic_read(&fscache_n_acquires_no_cache),
50969- atomic_read(&fscache_n_acquires_ok),
50970- atomic_read(&fscache_n_acquires_nobufs),
50971- atomic_read(&fscache_n_acquires_oom));
50972+ atomic_read_unchecked(&fscache_n_acquires),
50973+ atomic_read_unchecked(&fscache_n_acquires_null),
50974+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
50975+ atomic_read_unchecked(&fscache_n_acquires_ok),
50976+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
50977+ atomic_read_unchecked(&fscache_n_acquires_oom));
50978
50979 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
50980- atomic_read(&fscache_n_object_lookups),
50981- atomic_read(&fscache_n_object_lookups_negative),
50982- atomic_read(&fscache_n_object_lookups_positive),
50983- atomic_read(&fscache_n_object_lookups_timed_out),
50984- atomic_read(&fscache_n_object_created));
50985+ atomic_read_unchecked(&fscache_n_object_lookups),
50986+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
50987+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
50988+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
50989+ atomic_read_unchecked(&fscache_n_object_created));
50990
50991 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
50992- atomic_read(&fscache_n_updates),
50993- atomic_read(&fscache_n_updates_null),
50994- atomic_read(&fscache_n_updates_run));
50995+ atomic_read_unchecked(&fscache_n_updates),
50996+ atomic_read_unchecked(&fscache_n_updates_null),
50997+ atomic_read_unchecked(&fscache_n_updates_run));
50998
50999 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
51000- atomic_read(&fscache_n_relinquishes),
51001- atomic_read(&fscache_n_relinquishes_null),
51002- atomic_read(&fscache_n_relinquishes_waitcrt),
51003- atomic_read(&fscache_n_relinquishes_retire));
51004+ atomic_read_unchecked(&fscache_n_relinquishes),
51005+ atomic_read_unchecked(&fscache_n_relinquishes_null),
51006+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
51007+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
51008
51009 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
51010- atomic_read(&fscache_n_attr_changed),
51011- atomic_read(&fscache_n_attr_changed_ok),
51012- atomic_read(&fscache_n_attr_changed_nobufs),
51013- atomic_read(&fscache_n_attr_changed_nomem),
51014- atomic_read(&fscache_n_attr_changed_calls));
51015+ atomic_read_unchecked(&fscache_n_attr_changed),
51016+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
51017+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
51018+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
51019+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
51020
51021 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
51022- atomic_read(&fscache_n_allocs),
51023- atomic_read(&fscache_n_allocs_ok),
51024- atomic_read(&fscache_n_allocs_wait),
51025- atomic_read(&fscache_n_allocs_nobufs),
51026- atomic_read(&fscache_n_allocs_intr));
51027+ atomic_read_unchecked(&fscache_n_allocs),
51028+ atomic_read_unchecked(&fscache_n_allocs_ok),
51029+ atomic_read_unchecked(&fscache_n_allocs_wait),
51030+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
51031+ atomic_read_unchecked(&fscache_n_allocs_intr));
51032 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
51033- atomic_read(&fscache_n_alloc_ops),
51034- atomic_read(&fscache_n_alloc_op_waits),
51035- atomic_read(&fscache_n_allocs_object_dead));
51036+ atomic_read_unchecked(&fscache_n_alloc_ops),
51037+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
51038+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
51039
51040 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
51041 " int=%u oom=%u\n",
51042- atomic_read(&fscache_n_retrievals),
51043- atomic_read(&fscache_n_retrievals_ok),
51044- atomic_read(&fscache_n_retrievals_wait),
51045- atomic_read(&fscache_n_retrievals_nodata),
51046- atomic_read(&fscache_n_retrievals_nobufs),
51047- atomic_read(&fscache_n_retrievals_intr),
51048- atomic_read(&fscache_n_retrievals_nomem));
51049+ atomic_read_unchecked(&fscache_n_retrievals),
51050+ atomic_read_unchecked(&fscache_n_retrievals_ok),
51051+ atomic_read_unchecked(&fscache_n_retrievals_wait),
51052+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
51053+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
51054+ atomic_read_unchecked(&fscache_n_retrievals_intr),
51055+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
51056 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
51057- atomic_read(&fscache_n_retrieval_ops),
51058- atomic_read(&fscache_n_retrieval_op_waits),
51059- atomic_read(&fscache_n_retrievals_object_dead));
51060+ atomic_read_unchecked(&fscache_n_retrieval_ops),
51061+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
51062+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
51063
51064 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
51065- atomic_read(&fscache_n_stores),
51066- atomic_read(&fscache_n_stores_ok),
51067- atomic_read(&fscache_n_stores_again),
51068- atomic_read(&fscache_n_stores_nobufs),
51069- atomic_read(&fscache_n_stores_oom));
51070+ atomic_read_unchecked(&fscache_n_stores),
51071+ atomic_read_unchecked(&fscache_n_stores_ok),
51072+ atomic_read_unchecked(&fscache_n_stores_again),
51073+ atomic_read_unchecked(&fscache_n_stores_nobufs),
51074+ atomic_read_unchecked(&fscache_n_stores_oom));
51075 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
51076- atomic_read(&fscache_n_store_ops),
51077- atomic_read(&fscache_n_store_calls),
51078- atomic_read(&fscache_n_store_pages),
51079- atomic_read(&fscache_n_store_radix_deletes),
51080- atomic_read(&fscache_n_store_pages_over_limit));
51081+ atomic_read_unchecked(&fscache_n_store_ops),
51082+ atomic_read_unchecked(&fscache_n_store_calls),
51083+ atomic_read_unchecked(&fscache_n_store_pages),
51084+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
51085+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
51086
51087 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
51088- atomic_read(&fscache_n_store_vmscan_not_storing),
51089- atomic_read(&fscache_n_store_vmscan_gone),
51090- atomic_read(&fscache_n_store_vmscan_busy),
51091- atomic_read(&fscache_n_store_vmscan_cancelled));
51092+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
51093+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
51094+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
51095+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
51096
51097 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
51098- atomic_read(&fscache_n_op_pend),
51099- atomic_read(&fscache_n_op_run),
51100- atomic_read(&fscache_n_op_enqueue),
51101- atomic_read(&fscache_n_op_cancelled),
51102- atomic_read(&fscache_n_op_rejected));
51103+ atomic_read_unchecked(&fscache_n_op_pend),
51104+ atomic_read_unchecked(&fscache_n_op_run),
51105+ atomic_read_unchecked(&fscache_n_op_enqueue),
51106+ atomic_read_unchecked(&fscache_n_op_cancelled),
51107+ atomic_read_unchecked(&fscache_n_op_rejected));
51108 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
51109- atomic_read(&fscache_n_op_deferred_release),
51110- atomic_read(&fscache_n_op_release),
51111- atomic_read(&fscache_n_op_gc));
51112+ atomic_read_unchecked(&fscache_n_op_deferred_release),
51113+ atomic_read_unchecked(&fscache_n_op_release),
51114+ atomic_read_unchecked(&fscache_n_op_gc));
51115
51116 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
51117 atomic_read(&fscache_n_cop_alloc_object),
51118diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
51119index de792dc..448b532 100644
51120--- a/fs/fuse/cuse.c
51121+++ b/fs/fuse/cuse.c
51122@@ -576,10 +576,12 @@ static int __init cuse_init(void)
51123 INIT_LIST_HEAD(&cuse_conntbl[i]);
51124
51125 /* inherit and extend fuse_dev_operations */
51126- cuse_channel_fops = fuse_dev_operations;
51127- cuse_channel_fops.owner = THIS_MODULE;
51128- cuse_channel_fops.open = cuse_channel_open;
51129- cuse_channel_fops.release = cuse_channel_release;
51130+ pax_open_kernel();
51131+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
51132+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
51133+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
51134+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
51135+ pax_close_kernel();
51136
51137 cuse_class = class_create(THIS_MODULE, "cuse");
51138 if (IS_ERR(cuse_class))
51139diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
51140index 1facb39..7f48557 100644
51141--- a/fs/fuse/dev.c
51142+++ b/fs/fuse/dev.c
51143@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51144 {
51145 struct fuse_notify_inval_entry_out outarg;
51146 int err = -EINVAL;
51147- char buf[FUSE_NAME_MAX+1];
51148+ char *buf = NULL;
51149 struct qstr name;
51150
51151 if (size < sizeof(outarg))
51152@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51153 if (outarg.namelen > FUSE_NAME_MAX)
51154 goto err;
51155
51156+ err = -ENOMEM;
51157+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
51158+ if (!buf)
51159+ goto err;
51160+
51161 err = -EINVAL;
51162 if (size != sizeof(outarg) + outarg.namelen + 1)
51163 goto err;
51164@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51165
51166 down_read(&fc->killsb);
51167 err = -ENOENT;
51168- if (!fc->sb)
51169- goto err_unlock;
51170-
51171- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51172-
51173-err_unlock:
51174+ if (fc->sb)
51175+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51176 up_read(&fc->killsb);
51177+ kfree(buf);
51178 return err;
51179
51180 err:
51181 fuse_copy_finish(cs);
51182+ kfree(buf);
51183 return err;
51184 }
51185
51186diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
51187index 4787ae6..73efff7 100644
51188--- a/fs/fuse/dir.c
51189+++ b/fs/fuse/dir.c
51190@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
51191 return link;
51192 }
51193
51194-static void free_link(char *link)
51195+static void free_link(const char *link)
51196 {
51197 if (!IS_ERR(link))
51198 free_page((unsigned long) link);
51199diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
51200index 247436c..e650ccb 100644
51201--- a/fs/gfs2/ops_inode.c
51202+++ b/fs/gfs2/ops_inode.c
51203@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
51204 unsigned int x;
51205 int error;
51206
51207+ pax_track_stack();
51208+
51209 if (ndentry->d_inode) {
51210 nip = GFS2_I(ndentry->d_inode);
51211 if (ip == nip)
51212diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
51213index 4463297..4fed53b 100644
51214--- a/fs/gfs2/sys.c
51215+++ b/fs/gfs2/sys.c
51216@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
51217 return a->store ? a->store(sdp, buf, len) : len;
51218 }
51219
51220-static struct sysfs_ops gfs2_attr_ops = {
51221+static const struct sysfs_ops gfs2_attr_ops = {
51222 .show = gfs2_attr_show,
51223 .store = gfs2_attr_store,
51224 };
51225@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
51226 return 0;
51227 }
51228
51229-static struct kset_uevent_ops gfs2_uevent_ops = {
51230+static const struct kset_uevent_ops gfs2_uevent_ops = {
51231 .uevent = gfs2_uevent,
51232 };
51233
51234diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
51235index f6874ac..7cd98a8 100644
51236--- a/fs/hfsplus/catalog.c
51237+++ b/fs/hfsplus/catalog.c
51238@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
51239 int err;
51240 u16 type;
51241
51242+ pax_track_stack();
51243+
51244 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
51245 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
51246 if (err)
51247@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
51248 int entry_size;
51249 int err;
51250
51251+ pax_track_stack();
51252+
51253 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
51254 sb = dir->i_sb;
51255 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
51256@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
51257 int entry_size, type;
51258 int err = 0;
51259
51260+ pax_track_stack();
51261+
51262 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
51263 dst_dir->i_ino, dst_name->name);
51264 sb = src_dir->i_sb;
51265diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
51266index 5f40236..dac3421 100644
51267--- a/fs/hfsplus/dir.c
51268+++ b/fs/hfsplus/dir.c
51269@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
51270 struct hfsplus_readdir_data *rd;
51271 u16 type;
51272
51273+ pax_track_stack();
51274+
51275 if (filp->f_pos >= inode->i_size)
51276 return 0;
51277
51278diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
51279index 1bcf597..905a251 100644
51280--- a/fs/hfsplus/inode.c
51281+++ b/fs/hfsplus/inode.c
51282@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
51283 int res = 0;
51284 u16 type;
51285
51286+ pax_track_stack();
51287+
51288 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
51289
51290 HFSPLUS_I(inode).dev = 0;
51291@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
51292 struct hfs_find_data fd;
51293 hfsplus_cat_entry entry;
51294
51295+ pax_track_stack();
51296+
51297 if (HFSPLUS_IS_RSRC(inode))
51298 main_inode = HFSPLUS_I(inode).rsrc_inode;
51299
51300diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
51301index f457d2c..7ef4ad5 100644
51302--- a/fs/hfsplus/ioctl.c
51303+++ b/fs/hfsplus/ioctl.c
51304@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
51305 struct hfsplus_cat_file *file;
51306 int res;
51307
51308+ pax_track_stack();
51309+
51310 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51311 return -EOPNOTSUPP;
51312
51313@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
51314 struct hfsplus_cat_file *file;
51315 ssize_t res = 0;
51316
51317+ pax_track_stack();
51318+
51319 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51320 return -EOPNOTSUPP;
51321
51322diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
51323index 43022f3..7298079 100644
51324--- a/fs/hfsplus/super.c
51325+++ b/fs/hfsplus/super.c
51326@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
51327 struct nls_table *nls = NULL;
51328 int err = -EINVAL;
51329
51330+ pax_track_stack();
51331+
51332 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
51333 if (!sbi)
51334 return -ENOMEM;
51335diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
51336index 87a1258..5694d91 100644
51337--- a/fs/hugetlbfs/inode.c
51338+++ b/fs/hugetlbfs/inode.c
51339@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
51340 .kill_sb = kill_litter_super,
51341 };
51342
51343-static struct vfsmount *hugetlbfs_vfsmount;
51344+struct vfsmount *hugetlbfs_vfsmount;
51345
51346 static int can_do_hugetlb_shm(void)
51347 {
51348diff --git a/fs/ioctl.c b/fs/ioctl.c
51349index 6c75110..19d2c3c 100644
51350--- a/fs/ioctl.c
51351+++ b/fs/ioctl.c
51352@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
51353 u64 phys, u64 len, u32 flags)
51354 {
51355 struct fiemap_extent extent;
51356- struct fiemap_extent *dest = fieinfo->fi_extents_start;
51357+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
51358
51359 /* only count the extents */
51360 if (fieinfo->fi_extents_max == 0) {
51361@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51362
51363 fieinfo.fi_flags = fiemap.fm_flags;
51364 fieinfo.fi_extents_max = fiemap.fm_extent_count;
51365- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
51366+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
51367
51368 if (fiemap.fm_extent_count != 0 &&
51369 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
51370@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51371 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
51372 fiemap.fm_flags = fieinfo.fi_flags;
51373 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
51374- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
51375+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
51376 error = -EFAULT;
51377
51378 return error;
51379diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
51380index b0435dd..81ee0be 100644
51381--- a/fs/jbd/checkpoint.c
51382+++ b/fs/jbd/checkpoint.c
51383@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
51384 tid_t this_tid;
51385 int result;
51386
51387+ pax_track_stack();
51388+
51389 jbd_debug(1, "Start checkpoint\n");
51390
51391 /*
51392diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
51393index 546d153..736896c 100644
51394--- a/fs/jffs2/compr_rtime.c
51395+++ b/fs/jffs2/compr_rtime.c
51396@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
51397 int outpos = 0;
51398 int pos=0;
51399
51400+ pax_track_stack();
51401+
51402 memset(positions,0,sizeof(positions));
51403
51404 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
51405@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
51406 int outpos = 0;
51407 int pos=0;
51408
51409+ pax_track_stack();
51410+
51411 memset(positions,0,sizeof(positions));
51412
51413 while (outpos<destlen) {
51414diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
51415index 170d289..3254b98 100644
51416--- a/fs/jffs2/compr_rubin.c
51417+++ b/fs/jffs2/compr_rubin.c
51418@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
51419 int ret;
51420 uint32_t mysrclen, mydstlen;
51421
51422+ pax_track_stack();
51423+
51424 mysrclen = *sourcelen;
51425 mydstlen = *dstlen - 8;
51426
51427diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
51428index b47679b..00d65d3 100644
51429--- a/fs/jffs2/erase.c
51430+++ b/fs/jffs2/erase.c
51431@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
51432 struct jffs2_unknown_node marker = {
51433 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
51434 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51435- .totlen = cpu_to_je32(c->cleanmarker_size)
51436+ .totlen = cpu_to_je32(c->cleanmarker_size),
51437+ .hdr_crc = cpu_to_je32(0)
51438 };
51439
51440 jffs2_prealloc_raw_node_refs(c, jeb, 1);
51441diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
51442index 5ef7bac..4fd1e3c 100644
51443--- a/fs/jffs2/wbuf.c
51444+++ b/fs/jffs2/wbuf.c
51445@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
51446 {
51447 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
51448 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51449- .totlen = constant_cpu_to_je32(8)
51450+ .totlen = constant_cpu_to_je32(8),
51451+ .hdr_crc = constant_cpu_to_je32(0)
51452 };
51453
51454 /*
51455diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
51456index 082e844..52012a1 100644
51457--- a/fs/jffs2/xattr.c
51458+++ b/fs/jffs2/xattr.c
51459@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
51460
51461 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
51462
51463+ pax_track_stack();
51464+
51465 /* Phase.1 : Merge same xref */
51466 for (i=0; i < XREF_TMPHASH_SIZE; i++)
51467 xref_tmphash[i] = NULL;
51468diff --git a/fs/jfs/super.c b/fs/jfs/super.c
51469index 2234c73..f6e6e6b 100644
51470--- a/fs/jfs/super.c
51471+++ b/fs/jfs/super.c
51472@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
51473
51474 jfs_inode_cachep =
51475 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
51476- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
51477+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
51478 init_once);
51479 if (jfs_inode_cachep == NULL)
51480 return -ENOMEM;
51481diff --git a/fs/libfs.c b/fs/libfs.c
51482index ba36e93..3153fce 100644
51483--- a/fs/libfs.c
51484+++ b/fs/libfs.c
51485@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
51486
51487 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
51488 struct dentry *next;
51489+ char d_name[sizeof(next->d_iname)];
51490+ const unsigned char *name;
51491+
51492 next = list_entry(p, struct dentry, d_u.d_child);
51493 if (d_unhashed(next) || !next->d_inode)
51494 continue;
51495
51496 spin_unlock(&dcache_lock);
51497- if (filldir(dirent, next->d_name.name,
51498+ name = next->d_name.name;
51499+ if (name == next->d_iname) {
51500+ memcpy(d_name, name, next->d_name.len);
51501+ name = d_name;
51502+ }
51503+ if (filldir(dirent, name,
51504 next->d_name.len, filp->f_pos,
51505 next->d_inode->i_ino,
51506 dt_type(next->d_inode)) < 0)
51507diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
51508index c325a83..d15b07b 100644
51509--- a/fs/lockd/clntproc.c
51510+++ b/fs/lockd/clntproc.c
51511@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
51512 /*
51513 * Cookie counter for NLM requests
51514 */
51515-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
51516+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
51517
51518 void nlmclnt_next_cookie(struct nlm_cookie *c)
51519 {
51520- u32 cookie = atomic_inc_return(&nlm_cookie);
51521+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
51522
51523 memcpy(c->data, &cookie, 4);
51524 c->len=4;
51525@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
51526 struct nlm_rqst reqst, *req;
51527 int status;
51528
51529+ pax_track_stack();
51530+
51531 req = &reqst;
51532 memset(req, 0, sizeof(*req));
51533 locks_init_lock(&req->a_args.lock.fl);
51534diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
51535index 1a54ae1..6a16c27 100644
51536--- a/fs/lockd/svc.c
51537+++ b/fs/lockd/svc.c
51538@@ -43,7 +43,7 @@
51539
51540 static struct svc_program nlmsvc_program;
51541
51542-struct nlmsvc_binding * nlmsvc_ops;
51543+const struct nlmsvc_binding * nlmsvc_ops;
51544 EXPORT_SYMBOL_GPL(nlmsvc_ops);
51545
51546 static DEFINE_MUTEX(nlmsvc_mutex);
51547diff --git a/fs/locks.c b/fs/locks.c
51548index a8794f2..4041e55 100644
51549--- a/fs/locks.c
51550+++ b/fs/locks.c
51551@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
51552
51553 static struct kmem_cache *filelock_cache __read_mostly;
51554
51555+static void locks_init_lock_always(struct file_lock *fl)
51556+{
51557+ fl->fl_next = NULL;
51558+ fl->fl_fasync = NULL;
51559+ fl->fl_owner = NULL;
51560+ fl->fl_pid = 0;
51561+ fl->fl_nspid = NULL;
51562+ fl->fl_file = NULL;
51563+ fl->fl_flags = 0;
51564+ fl->fl_type = 0;
51565+ fl->fl_start = fl->fl_end = 0;
51566+}
51567+
51568 /* Allocate an empty lock structure. */
51569 static struct file_lock *locks_alloc_lock(void)
51570 {
51571- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51572+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51573+
51574+ if (fl)
51575+ locks_init_lock_always(fl);
51576+
51577+ return fl;
51578 }
51579
51580 void locks_release_private(struct file_lock *fl)
51581@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
51582 INIT_LIST_HEAD(&fl->fl_link);
51583 INIT_LIST_HEAD(&fl->fl_block);
51584 init_waitqueue_head(&fl->fl_wait);
51585- fl->fl_next = NULL;
51586- fl->fl_fasync = NULL;
51587- fl->fl_owner = NULL;
51588- fl->fl_pid = 0;
51589- fl->fl_nspid = NULL;
51590- fl->fl_file = NULL;
51591- fl->fl_flags = 0;
51592- fl->fl_type = 0;
51593- fl->fl_start = fl->fl_end = 0;
51594 fl->fl_ops = NULL;
51595 fl->fl_lmops = NULL;
51596+ locks_init_lock_always(fl);
51597 }
51598
51599 EXPORT_SYMBOL(locks_init_lock);
51600@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
51601 return;
51602
51603 if (filp->f_op && filp->f_op->flock) {
51604- struct file_lock fl = {
51605+ struct file_lock flock = {
51606 .fl_pid = current->tgid,
51607 .fl_file = filp,
51608 .fl_flags = FL_FLOCK,
51609 .fl_type = F_UNLCK,
51610 .fl_end = OFFSET_MAX,
51611 };
51612- filp->f_op->flock(filp, F_SETLKW, &fl);
51613- if (fl.fl_ops && fl.fl_ops->fl_release_private)
51614- fl.fl_ops->fl_release_private(&fl);
51615+ filp->f_op->flock(filp, F_SETLKW, &flock);
51616+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
51617+ flock.fl_ops->fl_release_private(&flock);
51618 }
51619
51620 lock_kernel();
51621diff --git a/fs/mbcache.c b/fs/mbcache.c
51622index ec88ff3..b843a82 100644
51623--- a/fs/mbcache.c
51624+++ b/fs/mbcache.c
51625@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
51626 if (!cache)
51627 goto fail;
51628 cache->c_name = name;
51629- cache->c_op.free = NULL;
51630+ *(void **)&cache->c_op.free = NULL;
51631 if (cache_op)
51632- cache->c_op.free = cache_op->free;
51633+ *(void **)&cache->c_op.free = cache_op->free;
51634 atomic_set(&cache->c_entry_count, 0);
51635 cache->c_bucket_bits = bucket_bits;
51636 #ifdef MB_CACHE_INDEXES_COUNT
51637diff --git a/fs/namei.c b/fs/namei.c
51638index b0afbd4..8d065a1 100644
51639--- a/fs/namei.c
51640+++ b/fs/namei.c
51641@@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
51642 return ret;
51643
51644 /*
51645+ * Searching includes executable on directories, else just read.
51646+ */
51647+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51648+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51649+ if (capable(CAP_DAC_READ_SEARCH))
51650+ return 0;
51651+
51652+ /*
51653 * Read/write DACs are always overridable.
51654 * Executable DACs are overridable if at least one exec bit is set.
51655 */
51656@@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
51657 if (capable(CAP_DAC_OVERRIDE))
51658 return 0;
51659
51660- /*
51661- * Searching includes executable on directories, else just read.
51662- */
51663- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51664- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51665- if (capable(CAP_DAC_READ_SEARCH))
51666- return 0;
51667-
51668 return -EACCES;
51669 }
51670
51671@@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
51672 if (!ret)
51673 goto ok;
51674
51675- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
51676+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
51677+ capable(CAP_DAC_OVERRIDE))
51678 goto ok;
51679
51680 return ret;
51681@@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
51682 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
51683 error = PTR_ERR(cookie);
51684 if (!IS_ERR(cookie)) {
51685- char *s = nd_get_link(nd);
51686+ const char *s = nd_get_link(nd);
51687 error = 0;
51688 if (s)
51689 error = __vfs_follow_link(nd, s);
51690@@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
51691 err = security_inode_follow_link(path->dentry, nd);
51692 if (err)
51693 goto loop;
51694+
51695+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
51696+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
51697+ err = -EACCES;
51698+ goto loop;
51699+ }
51700+
51701 current->link_count++;
51702 current->total_link_count++;
51703 nd->depth++;
51704@@ -1016,11 +1024,19 @@ return_reval:
51705 break;
51706 }
51707 return_base:
51708+ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
51709+ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
51710+ path_put(&nd->path);
51711+ return -ENOENT;
51712+ }
51713 return 0;
51714 out_dput:
51715 path_put_conditional(&next, nd);
51716 break;
51717 }
51718+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
51719+ err = -ENOENT;
51720+
51721 path_put(&nd->path);
51722 return_err:
51723 return err;
51724@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
51725 int retval = path_init(dfd, name, flags, nd);
51726 if (!retval)
51727 retval = path_walk(name, nd);
51728- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
51729- nd->path.dentry->d_inode))
51730- audit_inode(name, nd->path.dentry);
51731+
51732+ if (likely(!retval)) {
51733+ if (nd->path.dentry && nd->path.dentry->d_inode) {
51734+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
51735+ retval = -ENOENT;
51736+ if (!audit_dummy_context())
51737+ audit_inode(name, nd->path.dentry);
51738+ }
51739+ }
51740 if (nd->root.mnt) {
51741 path_put(&nd->root);
51742 nd->root.mnt = NULL;
51743 }
51744+
51745 return retval;
51746 }
51747
51748@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
51749 if (error)
51750 goto err_out;
51751
51752+
51753+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
51754+ error = -EPERM;
51755+ goto err_out;
51756+ }
51757+ if (gr_handle_rawio(inode)) {
51758+ error = -EPERM;
51759+ goto err_out;
51760+ }
51761+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
51762+ error = -EACCES;
51763+ goto err_out;
51764+ }
51765+
51766 if (flag & O_TRUNC) {
51767 error = get_write_access(inode);
51768 if (error)
51769@@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51770 {
51771 int error;
51772 struct dentry *dir = nd->path.dentry;
51773+ int acc_mode = ACC_MODE(flag);
51774+
51775+ if (flag & O_TRUNC)
51776+ acc_mode |= MAY_WRITE;
51777+ if (flag & O_APPEND)
51778+ acc_mode |= MAY_APPEND;
51779+
51780+ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
51781+ error = -EACCES;
51782+ goto out_unlock;
51783+ }
51784
51785 if (!IS_POSIXACL(dir->d_inode))
51786 mode &= ~current_umask();
51787@@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51788 if (error)
51789 goto out_unlock;
51790 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
51791+ if (!error)
51792+ gr_handle_create(path->dentry, nd->path.mnt);
51793 out_unlock:
51794 mutex_unlock(&dir->d_inode->i_mutex);
51795 dput(nd->path.dentry);
51796@@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
51797 &nd, flag);
51798 if (error)
51799 return ERR_PTR(error);
51800+
51801+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
51802+ error = -EPERM;
51803+ goto exit;
51804+ }
51805+
51806+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
51807+ error = -EPERM;
51808+ goto exit;
51809+ }
51810+
51811+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
51812+ error = -EACCES;
51813+ goto exit;
51814+ }
51815+
51816 goto ok;
51817 }
51818
51819@@ -1795,6 +1861,19 @@ do_last:
51820 /*
51821 * It already exists.
51822 */
51823+
51824+ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
51825+ error = -ENOENT;
51826+ goto exit_mutex_unlock;
51827+ }
51828+
51829+ /* only check if O_CREAT is specified, all other checks need
51830+ to go into may_open */
51831+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
51832+ error = -EACCES;
51833+ goto exit_mutex_unlock;
51834+ }
51835+
51836 mutex_unlock(&dir->d_inode->i_mutex);
51837 audit_inode(pathname, path.dentry);
51838
51839@@ -1887,6 +1966,13 @@ do_link:
51840 error = security_inode_follow_link(path.dentry, &nd);
51841 if (error)
51842 goto exit_dput;
51843+
51844+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
51845+ path.dentry, nd.path.mnt)) {
51846+ error = -EACCES;
51847+ goto exit_dput;
51848+ }
51849+
51850 error = __do_follow_link(&path, &nd);
51851 if (error) {
51852 /* Does someone understand code flow here? Or it is only
51853@@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
51854 }
51855 return dentry;
51856 eexist:
51857+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
51858+ dput(dentry);
51859+ return ERR_PTR(-ENOENT);
51860+ }
51861 dput(dentry);
51862 dentry = ERR_PTR(-EEXIST);
51863 fail:
51864@@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51865 error = may_mknod(mode);
51866 if (error)
51867 goto out_dput;
51868+
51869+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
51870+ error = -EPERM;
51871+ goto out_dput;
51872+ }
51873+
51874+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
51875+ error = -EACCES;
51876+ goto out_dput;
51877+ }
51878+
51879 error = mnt_want_write(nd.path.mnt);
51880 if (error)
51881 goto out_dput;
51882@@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51883 }
51884 out_drop_write:
51885 mnt_drop_write(nd.path.mnt);
51886+
51887+ if (!error)
51888+ gr_handle_create(dentry, nd.path.mnt);
51889 out_dput:
51890 dput(dentry);
51891 out_unlock:
51892@@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51893 if (IS_ERR(dentry))
51894 goto out_unlock;
51895
51896+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
51897+ error = -EACCES;
51898+ goto out_dput;
51899+ }
51900+
51901 if (!IS_POSIXACL(nd.path.dentry->d_inode))
51902 mode &= ~current_umask();
51903 error = mnt_want_write(nd.path.mnt);
51904@@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51905 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
51906 out_drop_write:
51907 mnt_drop_write(nd.path.mnt);
51908+
51909+ if (!error)
51910+ gr_handle_create(dentry, nd.path.mnt);
51911+
51912 out_dput:
51913 dput(dentry);
51914 out_unlock:
51915@@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51916 char * name;
51917 struct dentry *dentry;
51918 struct nameidata nd;
51919+ ino_t saved_ino = 0;
51920+ dev_t saved_dev = 0;
51921
51922 error = user_path_parent(dfd, pathname, &nd, &name);
51923 if (error)
51924@@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
51925 error = PTR_ERR(dentry);
51926 if (IS_ERR(dentry))
51927 goto exit2;
51928+
51929+ if (dentry->d_inode != NULL) {
51930+ saved_ino = dentry->d_inode->i_ino;
51931+ saved_dev = gr_get_dev_from_dentry(dentry);
51932+
51933+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
51934+ error = -EACCES;
51935+ goto exit3;
51936+ }
51937+ }
51938+
51939 error = mnt_want_write(nd.path.mnt);
51940 if (error)
51941 goto exit3;
51942@@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51943 if (error)
51944 goto exit4;
51945 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
51946+ if (!error && (saved_dev || saved_ino))
51947+ gr_handle_delete(saved_ino, saved_dev);
51948 exit4:
51949 mnt_drop_write(nd.path.mnt);
51950 exit3:
51951@@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51952 struct dentry *dentry;
51953 struct nameidata nd;
51954 struct inode *inode = NULL;
51955+ ino_t saved_ino = 0;
51956+ dev_t saved_dev = 0;
51957
51958 error = user_path_parent(dfd, pathname, &nd, &name);
51959 if (error)
51960@@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51961 if (nd.last.name[nd.last.len])
51962 goto slashes;
51963 inode = dentry->d_inode;
51964- if (inode)
51965+ if (inode) {
51966+ if (inode->i_nlink <= 1) {
51967+ saved_ino = inode->i_ino;
51968+ saved_dev = gr_get_dev_from_dentry(dentry);
51969+ }
51970+
51971 atomic_inc(&inode->i_count);
51972+
51973+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
51974+ error = -EACCES;
51975+ goto exit2;
51976+ }
51977+ }
51978 error = mnt_want_write(nd.path.mnt);
51979 if (error)
51980 goto exit2;
51981@@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51982 if (error)
51983 goto exit3;
51984 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
51985+ if (!error && (saved_ino || saved_dev))
51986+ gr_handle_delete(saved_ino, saved_dev);
51987 exit3:
51988 mnt_drop_write(nd.path.mnt);
51989 exit2:
51990@@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51991 if (IS_ERR(dentry))
51992 goto out_unlock;
51993
51994+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
51995+ error = -EACCES;
51996+ goto out_dput;
51997+ }
51998+
51999 error = mnt_want_write(nd.path.mnt);
52000 if (error)
52001 goto out_dput;
52002@@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
52003 if (error)
52004 goto out_drop_write;
52005 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
52006+ if (!error)
52007+ gr_handle_create(dentry, nd.path.mnt);
52008 out_drop_write:
52009 mnt_drop_write(nd.path.mnt);
52010 out_dput:
52011@@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52012 error = PTR_ERR(new_dentry);
52013 if (IS_ERR(new_dentry))
52014 goto out_unlock;
52015+
52016+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
52017+ old_path.dentry->d_inode,
52018+ old_path.dentry->d_inode->i_mode, to)) {
52019+ error = -EACCES;
52020+ goto out_dput;
52021+ }
52022+
52023+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
52024+ old_path.dentry, old_path.mnt, to)) {
52025+ error = -EACCES;
52026+ goto out_dput;
52027+ }
52028+
52029 error = mnt_want_write(nd.path.mnt);
52030 if (error)
52031 goto out_dput;
52032@@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52033 if (error)
52034 goto out_drop_write;
52035 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
52036+ if (!error)
52037+ gr_handle_create(new_dentry, nd.path.mnt);
52038 out_drop_write:
52039 mnt_drop_write(nd.path.mnt);
52040 out_dput:
52041@@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52042 char *to;
52043 int error;
52044
52045+ pax_track_stack();
52046+
52047 error = user_path_parent(olddfd, oldname, &oldnd, &from);
52048 if (error)
52049 goto exit;
52050@@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52051 if (new_dentry == trap)
52052 goto exit5;
52053
52054+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
52055+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
52056+ to);
52057+ if (error)
52058+ goto exit5;
52059+
52060 error = mnt_want_write(oldnd.path.mnt);
52061 if (error)
52062 goto exit5;
52063@@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52064 goto exit6;
52065 error = vfs_rename(old_dir->d_inode, old_dentry,
52066 new_dir->d_inode, new_dentry);
52067+ if (!error)
52068+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
52069+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
52070 exit6:
52071 mnt_drop_write(oldnd.path.mnt);
52072 exit5:
52073@@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
52074
52075 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
52076 {
52077+ char tmpbuf[64];
52078+ const char *newlink;
52079 int len;
52080
52081 len = PTR_ERR(link);
52082@@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
52083 len = strlen(link);
52084 if (len > (unsigned) buflen)
52085 len = buflen;
52086- if (copy_to_user(buffer, link, len))
52087+
52088+ if (len < sizeof(tmpbuf)) {
52089+ memcpy(tmpbuf, link, len);
52090+ newlink = tmpbuf;
52091+ } else
52092+ newlink = link;
52093+
52094+ if (copy_to_user(buffer, newlink, len))
52095 len = -EFAULT;
52096 out:
52097 return len;
52098diff --git a/fs/namespace.c b/fs/namespace.c
52099index 2beb0fb..11a95a5 100644
52100--- a/fs/namespace.c
52101+++ b/fs/namespace.c
52102@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52103 if (!(sb->s_flags & MS_RDONLY))
52104 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
52105 up_write(&sb->s_umount);
52106+
52107+ gr_log_remount(mnt->mnt_devname, retval);
52108+
52109 return retval;
52110 }
52111
52112@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52113 security_sb_umount_busy(mnt);
52114 up_write(&namespace_sem);
52115 release_mounts(&umount_list);
52116+
52117+ gr_log_unmount(mnt->mnt_devname, retval);
52118+
52119 return retval;
52120 }
52121
52122@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52123 if (retval)
52124 goto dput_out;
52125
52126+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
52127+ retval = -EPERM;
52128+ goto dput_out;
52129+ }
52130+
52131+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
52132+ retval = -EPERM;
52133+ goto dput_out;
52134+ }
52135+
52136 if (flags & MS_REMOUNT)
52137 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
52138 data_page);
52139@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52140 dev_name, data_page);
52141 dput_out:
52142 path_put(&path);
52143+
52144+ gr_log_mount(dev_name, dir_name, retval);
52145+
52146 return retval;
52147 }
52148
52149@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
52150 goto out1;
52151 }
52152
52153+ if (gr_handle_chroot_pivot()) {
52154+ error = -EPERM;
52155+ path_put(&old);
52156+ goto out1;
52157+ }
52158+
52159 read_lock(&current->fs->lock);
52160 root = current->fs->root;
52161 path_get(&current->fs->root);
52162diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
52163index b8b5b30..2bd9ccb 100644
52164--- a/fs/ncpfs/dir.c
52165+++ b/fs/ncpfs/dir.c
52166@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
52167 int res, val = 0, len;
52168 __u8 __name[NCP_MAXPATHLEN + 1];
52169
52170+ pax_track_stack();
52171+
52172 parent = dget_parent(dentry);
52173 dir = parent->d_inode;
52174
52175@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
52176 int error, res, len;
52177 __u8 __name[NCP_MAXPATHLEN + 1];
52178
52179+ pax_track_stack();
52180+
52181 lock_kernel();
52182 error = -EIO;
52183 if (!ncp_conn_valid(server))
52184@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
52185 int error, result, len;
52186 int opmode;
52187 __u8 __name[NCP_MAXPATHLEN + 1];
52188-
52189+
52190 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
52191 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
52192
52193+ pax_track_stack();
52194+
52195 error = -EIO;
52196 lock_kernel();
52197 if (!ncp_conn_valid(server))
52198@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52199 int error, len;
52200 __u8 __name[NCP_MAXPATHLEN + 1];
52201
52202+ pax_track_stack();
52203+
52204 DPRINTK("ncp_mkdir: making %s/%s\n",
52205 dentry->d_parent->d_name.name, dentry->d_name.name);
52206
52207@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52208 if (!ncp_conn_valid(server))
52209 goto out;
52210
52211+ pax_track_stack();
52212+
52213 ncp_age_dentry(server, dentry);
52214 len = sizeof(__name);
52215 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
52216@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
52217 int old_len, new_len;
52218 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
52219
52220+ pax_track_stack();
52221+
52222 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
52223 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
52224 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
52225diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
52226index cf98da1..da890a9 100644
52227--- a/fs/ncpfs/inode.c
52228+++ b/fs/ncpfs/inode.c
52229@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
52230 #endif
52231 struct ncp_entry_info finfo;
52232
52233+ pax_track_stack();
52234+
52235 data.wdog_pid = NULL;
52236 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
52237 if (!server)
52238diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
52239index bfaef7b..e9d03ca 100644
52240--- a/fs/nfs/inode.c
52241+++ b/fs/nfs/inode.c
52242@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
52243 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
52244 nfsi->attrtimeo_timestamp = jiffies;
52245
52246- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
52247+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
52248 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
52249 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
52250 else
52251@@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
52252 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
52253 }
52254
52255-static atomic_long_t nfs_attr_generation_counter;
52256+static atomic_long_unchecked_t nfs_attr_generation_counter;
52257
52258 static unsigned long nfs_read_attr_generation_counter(void)
52259 {
52260- return atomic_long_read(&nfs_attr_generation_counter);
52261+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
52262 }
52263
52264 unsigned long nfs_inc_attr_generation_counter(void)
52265 {
52266- return atomic_long_inc_return(&nfs_attr_generation_counter);
52267+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
52268 }
52269
52270 void nfs_fattr_init(struct nfs_fattr *fattr)
52271diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
52272index cc2f505..f6a236f 100644
52273--- a/fs/nfsd/lockd.c
52274+++ b/fs/nfsd/lockd.c
52275@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
52276 fput(filp);
52277 }
52278
52279-static struct nlmsvc_binding nfsd_nlm_ops = {
52280+static const struct nlmsvc_binding nfsd_nlm_ops = {
52281 .fopen = nlm_fopen, /* open file for locking */
52282 .fclose = nlm_fclose, /* close file */
52283 };
52284diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
52285index cfc3391..dcc083a 100644
52286--- a/fs/nfsd/nfs4state.c
52287+++ b/fs/nfsd/nfs4state.c
52288@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
52289 unsigned int cmd;
52290 int err;
52291
52292+ pax_track_stack();
52293+
52294 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
52295 (long long) lock->lk_offset,
52296 (long long) lock->lk_length);
52297diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
52298index 4a82a96..0d5fb49 100644
52299--- a/fs/nfsd/nfs4xdr.c
52300+++ b/fs/nfsd/nfs4xdr.c
52301@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
52302 struct nfsd4_compoundres *resp = rqstp->rq_resp;
52303 u32 minorversion = resp->cstate.minorversion;
52304
52305+ pax_track_stack();
52306+
52307 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
52308 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
52309 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
52310diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
52311index 2e09588..596421d 100644
52312--- a/fs/nfsd/vfs.c
52313+++ b/fs/nfsd/vfs.c
52314@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52315 } else {
52316 oldfs = get_fs();
52317 set_fs(KERNEL_DS);
52318- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
52319+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
52320 set_fs(oldfs);
52321 }
52322
52323@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52324
52325 /* Write the data. */
52326 oldfs = get_fs(); set_fs(KERNEL_DS);
52327- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
52328+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
52329 set_fs(oldfs);
52330 if (host_err < 0)
52331 goto out_nfserr;
52332@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
52333 */
52334
52335 oldfs = get_fs(); set_fs(KERNEL_DS);
52336- host_err = inode->i_op->readlink(dentry, buf, *lenp);
52337+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
52338 set_fs(oldfs);
52339
52340 if (host_err < 0)
52341diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
52342index f6af760..d0adf34 100644
52343--- a/fs/nilfs2/ioctl.c
52344+++ b/fs/nilfs2/ioctl.c
52345@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52346 unsigned int cmd, void __user *argp)
52347 {
52348 struct nilfs_argv argv[5];
52349- const static size_t argsz[5] = {
52350+ static const size_t argsz[5] = {
52351 sizeof(struct nilfs_vdesc),
52352 sizeof(struct nilfs_period),
52353 sizeof(__u64),
52354@@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52355 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
52356 goto out_free;
52357
52358+ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
52359+ goto out_free;
52360+
52361 len = argv[n].v_size * argv[n].v_nmembs;
52362 base = (void __user *)(unsigned long)argv[n].v_base;
52363 if (len == 0) {
52364diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
52365index 7e54e52..9337248 100644
52366--- a/fs/notify/dnotify/dnotify.c
52367+++ b/fs/notify/dnotify/dnotify.c
52368@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
52369 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
52370 }
52371
52372-static struct fsnotify_ops dnotify_fsnotify_ops = {
52373+static const struct fsnotify_ops dnotify_fsnotify_ops = {
52374 .handle_event = dnotify_handle_event,
52375 .should_send_event = dnotify_should_send_event,
52376 .free_group_priv = NULL,
52377diff --git a/fs/notify/notification.c b/fs/notify/notification.c
52378index b8bf53b..c518688 100644
52379--- a/fs/notify/notification.c
52380+++ b/fs/notify/notification.c
52381@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
52382 * get set to 0 so it will never get 'freed'
52383 */
52384 static struct fsnotify_event q_overflow_event;
52385-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52386+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52387
52388 /**
52389 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
52390@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52391 */
52392 u32 fsnotify_get_cookie(void)
52393 {
52394- return atomic_inc_return(&fsnotify_sync_cookie);
52395+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
52396 }
52397 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
52398
52399diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
52400index 5a9e344..0f8cd28 100644
52401--- a/fs/ntfs/dir.c
52402+++ b/fs/ntfs/dir.c
52403@@ -1328,7 +1328,7 @@ find_next_index_buffer:
52404 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
52405 ~(s64)(ndir->itype.index.block_size - 1)));
52406 /* Bounds checks. */
52407- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52408+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52409 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
52410 "inode 0x%lx or driver bug.", vdir->i_ino);
52411 goto err_out;
52412diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
52413index 663c0e3..b6868e9 100644
52414--- a/fs/ntfs/file.c
52415+++ b/fs/ntfs/file.c
52416@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
52417 #endif /* NTFS_RW */
52418 };
52419
52420-const struct file_operations ntfs_empty_file_ops = {};
52421+const struct file_operations ntfs_empty_file_ops __read_only;
52422
52423-const struct inode_operations ntfs_empty_inode_ops = {};
52424+const struct inode_operations ntfs_empty_inode_ops __read_only;
52425diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
52426index 1cd2934..880b5d2 100644
52427--- a/fs/ocfs2/cluster/masklog.c
52428+++ b/fs/ocfs2/cluster/masklog.c
52429@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
52430 return mlog_mask_store(mlog_attr->mask, buf, count);
52431 }
52432
52433-static struct sysfs_ops mlog_attr_ops = {
52434+static const struct sysfs_ops mlog_attr_ops = {
52435 .show = mlog_show,
52436 .store = mlog_store,
52437 };
52438diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
52439index ac10f83..2cd2607 100644
52440--- a/fs/ocfs2/localalloc.c
52441+++ b/fs/ocfs2/localalloc.c
52442@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
52443 goto bail;
52444 }
52445
52446- atomic_inc(&osb->alloc_stats.moves);
52447+ atomic_inc_unchecked(&osb->alloc_stats.moves);
52448
52449 status = 0;
52450 bail:
52451diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
52452index f010b22..9f9ed34 100644
52453--- a/fs/ocfs2/namei.c
52454+++ b/fs/ocfs2/namei.c
52455@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
52456 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
52457 struct ocfs2_dir_lookup_result target_insert = { NULL, };
52458
52459+ pax_track_stack();
52460+
52461 /* At some point it might be nice to break this function up a
52462 * bit. */
52463
52464diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
52465index d963d86..914cfbd 100644
52466--- a/fs/ocfs2/ocfs2.h
52467+++ b/fs/ocfs2/ocfs2.h
52468@@ -217,11 +217,11 @@ enum ocfs2_vol_state
52469
52470 struct ocfs2_alloc_stats
52471 {
52472- atomic_t moves;
52473- atomic_t local_data;
52474- atomic_t bitmap_data;
52475- atomic_t bg_allocs;
52476- atomic_t bg_extends;
52477+ atomic_unchecked_t moves;
52478+ atomic_unchecked_t local_data;
52479+ atomic_unchecked_t bitmap_data;
52480+ atomic_unchecked_t bg_allocs;
52481+ atomic_unchecked_t bg_extends;
52482 };
52483
52484 enum ocfs2_local_alloc_state
52485diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
52486index 79b5dac..d322952 100644
52487--- a/fs/ocfs2/suballoc.c
52488+++ b/fs/ocfs2/suballoc.c
52489@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
52490 mlog_errno(status);
52491 goto bail;
52492 }
52493- atomic_inc(&osb->alloc_stats.bg_extends);
52494+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
52495
52496 /* You should never ask for this much metadata */
52497 BUG_ON(bits_wanted >
52498@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
52499 mlog_errno(status);
52500 goto bail;
52501 }
52502- atomic_inc(&osb->alloc_stats.bg_allocs);
52503+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52504
52505 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
52506 ac->ac_bits_given += (*num_bits);
52507@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
52508 mlog_errno(status);
52509 goto bail;
52510 }
52511- atomic_inc(&osb->alloc_stats.bg_allocs);
52512+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52513
52514 BUG_ON(num_bits != 1);
52515
52516@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52517 cluster_start,
52518 num_clusters);
52519 if (!status)
52520- atomic_inc(&osb->alloc_stats.local_data);
52521+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
52522 } else {
52523 if (min_clusters > (osb->bitmap_cpg - 1)) {
52524 /* The only paths asking for contiguousness
52525@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52526 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
52527 bg_blkno,
52528 bg_bit_off);
52529- atomic_inc(&osb->alloc_stats.bitmap_data);
52530+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
52531 }
52532 }
52533 if (status < 0) {
52534diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
52535index 9f55be4..a3f8048 100644
52536--- a/fs/ocfs2/super.c
52537+++ b/fs/ocfs2/super.c
52538@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
52539 "%10s => GlobalAllocs: %d LocalAllocs: %d "
52540 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
52541 "Stats",
52542- atomic_read(&osb->alloc_stats.bitmap_data),
52543- atomic_read(&osb->alloc_stats.local_data),
52544- atomic_read(&osb->alloc_stats.bg_allocs),
52545- atomic_read(&osb->alloc_stats.moves),
52546- atomic_read(&osb->alloc_stats.bg_extends));
52547+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
52548+ atomic_read_unchecked(&osb->alloc_stats.local_data),
52549+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
52550+ atomic_read_unchecked(&osb->alloc_stats.moves),
52551+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
52552
52553 out += snprintf(buf + out, len - out,
52554 "%10s => State: %u Descriptor: %llu Size: %u bits "
52555@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
52556 spin_lock_init(&osb->osb_xattr_lock);
52557 ocfs2_init_inode_steal_slot(osb);
52558
52559- atomic_set(&osb->alloc_stats.moves, 0);
52560- atomic_set(&osb->alloc_stats.local_data, 0);
52561- atomic_set(&osb->alloc_stats.bitmap_data, 0);
52562- atomic_set(&osb->alloc_stats.bg_allocs, 0);
52563- atomic_set(&osb->alloc_stats.bg_extends, 0);
52564+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
52565+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
52566+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
52567+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
52568+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
52569
52570 /* Copy the blockcheck stats from the superblock probe */
52571 osb->osb_ecc_stats = *stats;
52572diff --git a/fs/open.c b/fs/open.c
52573index 4f01e06..2a8057a 100644
52574--- a/fs/open.c
52575+++ b/fs/open.c
52576@@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
52577 error = locks_verify_truncate(inode, NULL, length);
52578 if (!error)
52579 error = security_path_truncate(&path, length, 0);
52580+
52581+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
52582+ error = -EACCES;
52583+
52584 if (!error) {
52585 vfs_dq_init(inode);
52586 error = do_truncate(path.dentry, length, 0, NULL);
52587@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
52588 if (__mnt_is_readonly(path.mnt))
52589 res = -EROFS;
52590
52591+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
52592+ res = -EACCES;
52593+
52594 out_path_release:
52595 path_put(&path);
52596 out:
52597@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
52598 if (error)
52599 goto dput_and_out;
52600
52601+ gr_log_chdir(path.dentry, path.mnt);
52602+
52603 set_fs_pwd(current->fs, &path);
52604
52605 dput_and_out:
52606@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
52607 goto out_putf;
52608
52609 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
52610+
52611+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
52612+ error = -EPERM;
52613+
52614+ if (!error)
52615+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
52616+
52617 if (!error)
52618 set_fs_pwd(current->fs, &file->f_path);
52619 out_putf:
52620@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
52621 if (!capable(CAP_SYS_CHROOT))
52622 goto dput_and_out;
52623
52624+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
52625+ goto dput_and_out;
52626+
52627 set_fs_root(current->fs, &path);
52628+
52629+ gr_handle_chroot_chdir(&path);
52630+
52631 error = 0;
52632 dput_and_out:
52633 path_put(&path);
52634@@ -596,66 +618,57 @@ out:
52635 return error;
52636 }
52637
52638+static int chmod_common(struct path *path, umode_t mode)
52639+{
52640+ struct inode *inode = path->dentry->d_inode;
52641+ struct iattr newattrs;
52642+ int error;
52643+
52644+ error = mnt_want_write(path->mnt);
52645+ if (error)
52646+ return error;
52647+ mutex_lock(&inode->i_mutex);
52648+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
52649+ error = -EACCES;
52650+ goto out_unlock;
52651+ }
52652+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
52653+ error = -EPERM;
52654+ goto out_unlock;
52655+ }
52656+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52657+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52658+ error = notify_change(path->dentry, &newattrs);
52659+out_unlock:
52660+ mutex_unlock(&inode->i_mutex);
52661+ mnt_drop_write(path->mnt);
52662+ return error;
52663+}
52664+
52665 SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
52666 {
52667- struct inode * inode;
52668- struct dentry * dentry;
52669 struct file * file;
52670 int err = -EBADF;
52671- struct iattr newattrs;
52672
52673 file = fget(fd);
52674- if (!file)
52675- goto out;
52676-
52677- dentry = file->f_path.dentry;
52678- inode = dentry->d_inode;
52679-
52680- audit_inode(NULL, dentry);
52681-
52682- err = mnt_want_write_file(file);
52683- if (err)
52684- goto out_putf;
52685- mutex_lock(&inode->i_mutex);
52686- if (mode == (mode_t) -1)
52687- mode = inode->i_mode;
52688- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52689- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52690- err = notify_change(dentry, &newattrs);
52691- mutex_unlock(&inode->i_mutex);
52692- mnt_drop_write(file->f_path.mnt);
52693-out_putf:
52694- fput(file);
52695-out:
52696+ if (file) {
52697+ audit_inode(NULL, file->f_path.dentry);
52698+ err = chmod_common(&file->f_path, mode);
52699+ fput(file);
52700+ }
52701 return err;
52702 }
52703
52704 SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
52705 {
52706 struct path path;
52707- struct inode *inode;
52708 int error;
52709- struct iattr newattrs;
52710
52711 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
52712- if (error)
52713- goto out;
52714- inode = path.dentry->d_inode;
52715-
52716- error = mnt_want_write(path.mnt);
52717- if (error)
52718- goto dput_and_out;
52719- mutex_lock(&inode->i_mutex);
52720- if (mode == (mode_t) -1)
52721- mode = inode->i_mode;
52722- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52723- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52724- error = notify_change(path.dentry, &newattrs);
52725- mutex_unlock(&inode->i_mutex);
52726- mnt_drop_write(path.mnt);
52727-dput_and_out:
52728- path_put(&path);
52729-out:
52730+ if (!error) {
52731+ error = chmod_common(&path, mode);
52732+ path_put(&path);
52733+ }
52734 return error;
52735 }
52736
52737@@ -664,12 +677,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
52738 return sys_fchmodat(AT_FDCWD, filename, mode);
52739 }
52740
52741-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
52742+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
52743 {
52744 struct inode *inode = dentry->d_inode;
52745 int error;
52746 struct iattr newattrs;
52747
52748+ if (!gr_acl_handle_chown(dentry, mnt))
52749+ return -EACCES;
52750+
52751 newattrs.ia_valid = ATTR_CTIME;
52752 if (user != (uid_t) -1) {
52753 newattrs.ia_valid |= ATTR_UID;
52754@@ -700,7 +716,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
52755 error = mnt_want_write(path.mnt);
52756 if (error)
52757 goto out_release;
52758- error = chown_common(path.dentry, user, group);
52759+ error = chown_common(path.dentry, user, group, path.mnt);
52760 mnt_drop_write(path.mnt);
52761 out_release:
52762 path_put(&path);
52763@@ -725,7 +741,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
52764 error = mnt_want_write(path.mnt);
52765 if (error)
52766 goto out_release;
52767- error = chown_common(path.dentry, user, group);
52768+ error = chown_common(path.dentry, user, group, path.mnt);
52769 mnt_drop_write(path.mnt);
52770 out_release:
52771 path_put(&path);
52772@@ -744,7 +760,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
52773 error = mnt_want_write(path.mnt);
52774 if (error)
52775 goto out_release;
52776- error = chown_common(path.dentry, user, group);
52777+ error = chown_common(path.dentry, user, group, path.mnt);
52778 mnt_drop_write(path.mnt);
52779 out_release:
52780 path_put(&path);
52781@@ -767,7 +783,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
52782 goto out_fput;
52783 dentry = file->f_path.dentry;
52784 audit_inode(NULL, dentry);
52785- error = chown_common(dentry, user, group);
52786+ error = chown_common(dentry, user, group, file->f_path.mnt);
52787 mnt_drop_write(file->f_path.mnt);
52788 out_fput:
52789 fput(file);
52790@@ -1036,7 +1052,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
52791 if (!IS_ERR(tmp)) {
52792 fd = get_unused_fd_flags(flags);
52793 if (fd >= 0) {
52794- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52795+ struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52796 if (IS_ERR(f)) {
52797 put_unused_fd(fd);
52798 fd = PTR_ERR(f);
52799diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
52800index 6ab70f4..f4103d1 100644
52801--- a/fs/partitions/efi.c
52802+++ b/fs/partitions/efi.c
52803@@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
52804 if (!bdev || !gpt)
52805 return NULL;
52806
52807+ if (!le32_to_cpu(gpt->num_partition_entries))
52808+ return NULL;
52809+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
52810+ if (!pte)
52811+ return NULL;
52812+
52813 count = le32_to_cpu(gpt->num_partition_entries) *
52814 le32_to_cpu(gpt->sizeof_partition_entry);
52815- if (!count)
52816- return NULL;
52817- pte = kzalloc(count, GFP_KERNEL);
52818- if (!pte)
52819- return NULL;
52820-
52821 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
52822 (u8 *) pte,
52823 count) < count) {
52824diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
52825index dd6efdb..3babc6c 100644
52826--- a/fs/partitions/ldm.c
52827+++ b/fs/partitions/ldm.c
52828@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52829 ldm_error ("A VBLK claims to have %d parts.", num);
52830 return false;
52831 }
52832+
52833 if (rec >= num) {
52834 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
52835 return false;
52836@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52837 goto found;
52838 }
52839
52840- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
52841+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
52842 if (!f) {
52843 ldm_crit ("Out of memory.");
52844 return false;
52845diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
52846index 5765198..7f8e9e0 100644
52847--- a/fs/partitions/mac.c
52848+++ b/fs/partitions/mac.c
52849@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
52850 return 0; /* not a MacOS disk */
52851 }
52852 blocks_in_map = be32_to_cpu(part->map_count);
52853- if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52854- put_dev_sector(sect);
52855- return 0;
52856- }
52857 printk(" [mac]");
52858+ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52859+ put_dev_sector(sect);
52860+ return 0;
52861+ }
52862 for (slot = 1; slot <= blocks_in_map; ++slot) {
52863 int pos = slot * secsize;
52864 put_dev_sector(sect);
52865diff --git a/fs/pipe.c b/fs/pipe.c
52866index d0cc080..8a6f211 100644
52867--- a/fs/pipe.c
52868+++ b/fs/pipe.c
52869@@ -401,9 +401,9 @@ redo:
52870 }
52871 if (bufs) /* More to do? */
52872 continue;
52873- if (!pipe->writers)
52874+ if (!atomic_read(&pipe->writers))
52875 break;
52876- if (!pipe->waiting_writers) {
52877+ if (!atomic_read(&pipe->waiting_writers)) {
52878 /* syscall merging: Usually we must not sleep
52879 * if O_NONBLOCK is set, or if we got some data.
52880 * But if a writer sleeps in kernel space, then
52881@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
52882 mutex_lock(&inode->i_mutex);
52883 pipe = inode->i_pipe;
52884
52885- if (!pipe->readers) {
52886+ if (!atomic_read(&pipe->readers)) {
52887 send_sig(SIGPIPE, current, 0);
52888 ret = -EPIPE;
52889 goto out;
52890@@ -511,7 +511,7 @@ redo1:
52891 for (;;) {
52892 int bufs;
52893
52894- if (!pipe->readers) {
52895+ if (!atomic_read(&pipe->readers)) {
52896 send_sig(SIGPIPE, current, 0);
52897 if (!ret)
52898 ret = -EPIPE;
52899@@ -597,9 +597,9 @@ redo2:
52900 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52901 do_wakeup = 0;
52902 }
52903- pipe->waiting_writers++;
52904+ atomic_inc(&pipe->waiting_writers);
52905 pipe_wait(pipe);
52906- pipe->waiting_writers--;
52907+ atomic_dec(&pipe->waiting_writers);
52908 }
52909 out:
52910 mutex_unlock(&inode->i_mutex);
52911@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52912 mask = 0;
52913 if (filp->f_mode & FMODE_READ) {
52914 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
52915- if (!pipe->writers && filp->f_version != pipe->w_counter)
52916+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
52917 mask |= POLLHUP;
52918 }
52919
52920@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52921 * Most Unices do not set POLLERR for FIFOs but on Linux they
52922 * behave exactly like pipes for poll().
52923 */
52924- if (!pipe->readers)
52925+ if (!atomic_read(&pipe->readers))
52926 mask |= POLLERR;
52927 }
52928
52929@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
52930
52931 mutex_lock(&inode->i_mutex);
52932 pipe = inode->i_pipe;
52933- pipe->readers -= decr;
52934- pipe->writers -= decw;
52935+ atomic_sub(decr, &pipe->readers);
52936+ atomic_sub(decw, &pipe->writers);
52937
52938- if (!pipe->readers && !pipe->writers) {
52939+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
52940 free_pipe_info(inode);
52941 } else {
52942 wake_up_interruptible_sync(&pipe->wait);
52943@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
52944
52945 if (inode->i_pipe) {
52946 ret = 0;
52947- inode->i_pipe->readers++;
52948+ atomic_inc(&inode->i_pipe->readers);
52949 }
52950
52951 mutex_unlock(&inode->i_mutex);
52952@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
52953
52954 if (inode->i_pipe) {
52955 ret = 0;
52956- inode->i_pipe->writers++;
52957+ atomic_inc(&inode->i_pipe->writers);
52958 }
52959
52960 mutex_unlock(&inode->i_mutex);
52961@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
52962 if (inode->i_pipe) {
52963 ret = 0;
52964 if (filp->f_mode & FMODE_READ)
52965- inode->i_pipe->readers++;
52966+ atomic_inc(&inode->i_pipe->readers);
52967 if (filp->f_mode & FMODE_WRITE)
52968- inode->i_pipe->writers++;
52969+ atomic_inc(&inode->i_pipe->writers);
52970 }
52971
52972 mutex_unlock(&inode->i_mutex);
52973@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
52974 inode->i_pipe = NULL;
52975 }
52976
52977-static struct vfsmount *pipe_mnt __read_mostly;
52978+struct vfsmount *pipe_mnt __read_mostly;
52979 static int pipefs_delete_dentry(struct dentry *dentry)
52980 {
52981 /*
52982@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
52983 goto fail_iput;
52984 inode->i_pipe = pipe;
52985
52986- pipe->readers = pipe->writers = 1;
52987+ atomic_set(&pipe->readers, 1);
52988+ atomic_set(&pipe->writers, 1);
52989 inode->i_fop = &rdwr_pipefifo_fops;
52990
52991 /*
52992diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
52993index 50f8f06..c5755df 100644
52994--- a/fs/proc/Kconfig
52995+++ b/fs/proc/Kconfig
52996@@ -30,12 +30,12 @@ config PROC_FS
52997
52998 config PROC_KCORE
52999 bool "/proc/kcore support" if !ARM
53000- depends on PROC_FS && MMU
53001+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
53002
53003 config PROC_VMCORE
53004 bool "/proc/vmcore support (EXPERIMENTAL)"
53005- depends on PROC_FS && CRASH_DUMP
53006- default y
53007+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
53008+ default n
53009 help
53010 Exports the dump image of crashed kernel in ELF format.
53011
53012@@ -59,8 +59,8 @@ config PROC_SYSCTL
53013 limited in memory.
53014
53015 config PROC_PAGE_MONITOR
53016- default y
53017- depends on PROC_FS && MMU
53018+ default n
53019+ depends on PROC_FS && MMU && !GRKERNSEC
53020 bool "Enable /proc page monitoring" if EMBEDDED
53021 help
53022 Various /proc files exist to monitor process memory utilization:
53023diff --git a/fs/proc/array.c b/fs/proc/array.c
53024index c5ef152..24a1b87 100644
53025--- a/fs/proc/array.c
53026+++ b/fs/proc/array.c
53027@@ -60,6 +60,7 @@
53028 #include <linux/tty.h>
53029 #include <linux/string.h>
53030 #include <linux/mman.h>
53031+#include <linux/grsecurity.h>
53032 #include <linux/proc_fs.h>
53033 #include <linux/ioport.h>
53034 #include <linux/uaccess.h>
53035@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
53036 p->nivcsw);
53037 }
53038
53039+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53040+static inline void task_pax(struct seq_file *m, struct task_struct *p)
53041+{
53042+ if (p->mm)
53043+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
53044+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
53045+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
53046+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
53047+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
53048+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
53049+ else
53050+ seq_printf(m, "PaX:\t-----\n");
53051+}
53052+#endif
53053+
53054 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53055 struct pid *pid, struct task_struct *task)
53056 {
53057@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53058 task_cap(m, task);
53059 cpuset_task_status_allowed(m, task);
53060 task_context_switch_counts(m, task);
53061+
53062+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53063+ task_pax(m, task);
53064+#endif
53065+
53066+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
53067+ task_grsec_rbac(m, task);
53068+#endif
53069+
53070 return 0;
53071 }
53072
53073+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53074+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53075+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53076+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53077+#endif
53078+
53079 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53080 struct pid *pid, struct task_struct *task, int whole)
53081 {
53082@@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53083 cputime_t cutime, cstime, utime, stime;
53084 cputime_t cgtime, gtime;
53085 unsigned long rsslim = 0;
53086- char tcomm[sizeof(task->comm)];
53087+ char tcomm[sizeof(task->comm)] = { 0 };
53088 unsigned long flags;
53089
53090+ pax_track_stack();
53091+
53092+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53093+ if (current->exec_id != m->exec_id) {
53094+ gr_log_badprocpid("stat");
53095+ return 0;
53096+ }
53097+#endif
53098+
53099 state = *get_task_state(task);
53100 vsize = eip = esp = 0;
53101 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
53102@@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53103 gtime = task_gtime(task);
53104 }
53105
53106+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53107+ if (PAX_RAND_FLAGS(mm)) {
53108+ eip = 0;
53109+ esp = 0;
53110+ wchan = 0;
53111+ }
53112+#endif
53113+#ifdef CONFIG_GRKERNSEC_HIDESYM
53114+ wchan = 0;
53115+ eip =0;
53116+ esp =0;
53117+#endif
53118+
53119 /* scale priority and nice values from timeslices to -20..20 */
53120 /* to make it look like a "normal" Unix priority/nice value */
53121 priority = task_prio(task);
53122@@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53123 vsize,
53124 mm ? get_mm_rss(mm) : 0,
53125 rsslim,
53126+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53127+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
53128+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
53129+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
53130+#else
53131 mm ? (permitted ? mm->start_code : 1) : 0,
53132 mm ? (permitted ? mm->end_code : 1) : 0,
53133 (permitted && mm) ? mm->start_stack : 0,
53134+#endif
53135 esp,
53136 eip,
53137 /* The signal information here is obsolete.
53138@@ -519,6 +578,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53139 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
53140 struct mm_struct *mm = get_task_mm(task);
53141
53142+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53143+ if (current->exec_id != m->exec_id) {
53144+ gr_log_badprocpid("statm");
53145+ return 0;
53146+ }
53147+#endif
53148+
53149 if (mm) {
53150 size = task_statm(mm, &shared, &text, &data, &resident);
53151 mmput(mm);
53152@@ -528,3 +594,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53153
53154 return 0;
53155 }
53156+
53157+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53158+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
53159+{
53160+ u32 curr_ip = 0;
53161+ unsigned long flags;
53162+
53163+ if (lock_task_sighand(task, &flags)) {
53164+ curr_ip = task->signal->curr_ip;
53165+ unlock_task_sighand(task, &flags);
53166+ }
53167+
53168+ return sprintf(buffer, "%pI4\n", &curr_ip);
53169+}
53170+#endif
53171diff --git a/fs/proc/base.c b/fs/proc/base.c
53172index 67f7dc0..a86ad9a 100644
53173--- a/fs/proc/base.c
53174+++ b/fs/proc/base.c
53175@@ -102,6 +102,22 @@ struct pid_entry {
53176 union proc_op op;
53177 };
53178
53179+struct getdents_callback {
53180+ struct linux_dirent __user * current_dir;
53181+ struct linux_dirent __user * previous;
53182+ struct file * file;
53183+ int count;
53184+ int error;
53185+};
53186+
53187+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
53188+ loff_t offset, u64 ino, unsigned int d_type)
53189+{
53190+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
53191+ buf->error = -EINVAL;
53192+ return 0;
53193+}
53194+
53195 #define NOD(NAME, MODE, IOP, FOP, OP) { \
53196 .name = (NAME), \
53197 .len = sizeof(NAME) - 1, \
53198@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
53199 if (task == current)
53200 return 0;
53201
53202+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
53203+ return -EPERM;
53204+
53205 /*
53206 * If current is actively ptrace'ing, and would also be
53207 * permitted to freshly attach with ptrace now, permit it.
53208@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
53209 if (!mm->arg_end)
53210 goto out_mm; /* Shh! No looking before we're done */
53211
53212+ if (gr_acl_handle_procpidmem(task))
53213+ goto out_mm;
53214+
53215 len = mm->arg_end - mm->arg_start;
53216
53217 if (len > PAGE_SIZE)
53218@@ -287,12 +309,28 @@ out:
53219 return res;
53220 }
53221
53222+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53223+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53224+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53225+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53226+#endif
53227+
53228 static int proc_pid_auxv(struct task_struct *task, char *buffer)
53229 {
53230 int res = 0;
53231 struct mm_struct *mm = get_task_mm(task);
53232 if (mm) {
53233 unsigned int nwords = 0;
53234+
53235+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53236+ /* allow if we're currently ptracing this task */
53237+ if (PAX_RAND_FLAGS(mm) &&
53238+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
53239+ mmput(mm);
53240+ return 0;
53241+ }
53242+#endif
53243+
53244 do {
53245 nwords += 2;
53246 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
53247@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
53248 }
53249
53250
53251-#ifdef CONFIG_KALLSYMS
53252+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53253 /*
53254 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
53255 * Returns the resolved symbol. If that fails, simply return the address.
53256@@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
53257 mutex_unlock(&task->cred_guard_mutex);
53258 }
53259
53260-#ifdef CONFIG_STACKTRACE
53261+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53262
53263 #define MAX_STACK_TRACE_DEPTH 64
53264
53265@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
53266 return count;
53267 }
53268
53269-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53270+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53271 static int proc_pid_syscall(struct task_struct *task, char *buffer)
53272 {
53273 long nr;
53274@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
53275 /************************************************************************/
53276
53277 /* permission checks */
53278-static int proc_fd_access_allowed(struct inode *inode)
53279+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
53280 {
53281 struct task_struct *task;
53282 int allowed = 0;
53283@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
53284 */
53285 task = get_proc_task(inode);
53286 if (task) {
53287- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53288+ if (log)
53289+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
53290+ else
53291+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53292 put_task_struct(task);
53293 }
53294 return allowed;
53295@@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = {
53296 static int mem_open(struct inode* inode, struct file* file)
53297 {
53298 file->private_data = (void*)((long)current->self_exec_id);
53299+
53300+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53301+ file->f_version = current->exec_id;
53302+#endif
53303+
53304 return 0;
53305 }
53306
53307+static int task_dumpable(struct task_struct *task);
53308+
53309 static ssize_t mem_read(struct file * file, char __user * buf,
53310 size_t count, loff_t *ppos)
53311 {
53312@@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
53313 int ret = -ESRCH;
53314 struct mm_struct *mm;
53315
53316+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53317+ if (file->f_version != current->exec_id) {
53318+ gr_log_badprocpid("mem");
53319+ return 0;
53320+ }
53321+#endif
53322+
53323 if (!task)
53324 goto out_no_task;
53325
53326@@ -963,6 +1018,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
53327 if (!task)
53328 goto out_no_task;
53329
53330+ if (gr_acl_handle_procpidmem(task))
53331+ goto out;
53332+
53333 if (!ptrace_may_access(task, PTRACE_MODE_READ))
53334 goto out;
53335
53336@@ -1377,7 +1435,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
53337 path_put(&nd->path);
53338
53339 /* Are we allowed to snoop on the tasks file descriptors? */
53340- if (!proc_fd_access_allowed(inode))
53341+ if (!proc_fd_access_allowed(inode,0))
53342 goto out;
53343
53344 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
53345@@ -1417,8 +1475,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
53346 struct path path;
53347
53348 /* Are we allowed to snoop on the tasks file descriptors? */
53349- if (!proc_fd_access_allowed(inode))
53350- goto out;
53351+ /* logging this is needed for learning on chromium to work properly,
53352+ but we don't want to flood the logs from 'ps' which does a readlink
53353+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
53354+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
53355+ */
53356+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
53357+ if (!proc_fd_access_allowed(inode,0))
53358+ goto out;
53359+ } else {
53360+ if (!proc_fd_access_allowed(inode,1))
53361+ goto out;
53362+ }
53363
53364 error = PROC_I(inode)->op.proc_get_link(inode, &path);
53365 if (error)
53366@@ -1483,7 +1551,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
53367 rcu_read_lock();
53368 cred = __task_cred(task);
53369 inode->i_uid = cred->euid;
53370+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53371+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53372+#else
53373 inode->i_gid = cred->egid;
53374+#endif
53375 rcu_read_unlock();
53376 }
53377 security_task_to_inode(task, inode);
53378@@ -1501,6 +1573,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53379 struct inode *inode = dentry->d_inode;
53380 struct task_struct *task;
53381 const struct cred *cred;
53382+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53383+ const struct cred *tmpcred = current_cred();
53384+#endif
53385
53386 generic_fillattr(inode, stat);
53387
53388@@ -1508,13 +1583,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53389 stat->uid = 0;
53390 stat->gid = 0;
53391 task = pid_task(proc_pid(inode), PIDTYPE_PID);
53392+
53393+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
53394+ rcu_read_unlock();
53395+ return -ENOENT;
53396+ }
53397+
53398 if (task) {
53399+ cred = __task_cred(task);
53400+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53401+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
53402+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53403+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53404+#endif
53405+ ) {
53406+#endif
53407 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53408+#ifdef CONFIG_GRKERNSEC_PROC_USER
53409+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53410+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53411+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53412+#endif
53413 task_dumpable(task)) {
53414- cred = __task_cred(task);
53415 stat->uid = cred->euid;
53416+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53417+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
53418+#else
53419 stat->gid = cred->egid;
53420+#endif
53421 }
53422+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53423+ } else {
53424+ rcu_read_unlock();
53425+ return -ENOENT;
53426+ }
53427+#endif
53428 }
53429 rcu_read_unlock();
53430 return 0;
53431@@ -1545,11 +1648,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
53432
53433 if (task) {
53434 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53435+#ifdef CONFIG_GRKERNSEC_PROC_USER
53436+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53437+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53438+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53439+#endif
53440 task_dumpable(task)) {
53441 rcu_read_lock();
53442 cred = __task_cred(task);
53443 inode->i_uid = cred->euid;
53444+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53445+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53446+#else
53447 inode->i_gid = cred->egid;
53448+#endif
53449 rcu_read_unlock();
53450 } else {
53451 inode->i_uid = 0;
53452@@ -1670,7 +1782,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
53453 int fd = proc_fd(inode);
53454
53455 if (task) {
53456- files = get_files_struct(task);
53457+ if (!gr_acl_handle_procpidmem(task))
53458+ files = get_files_struct(task);
53459 put_task_struct(task);
53460 }
53461 if (files) {
53462@@ -1922,12 +2035,22 @@ static const struct file_operations proc_fd_operations = {
53463 static int proc_fd_permission(struct inode *inode, int mask)
53464 {
53465 int rv;
53466+ struct task_struct *task;
53467
53468 rv = generic_permission(inode, mask, NULL);
53469- if (rv == 0)
53470- return 0;
53471+
53472 if (task_pid(current) == proc_pid(inode))
53473 rv = 0;
53474+
53475+ task = get_proc_task(inode);
53476+ if (task == NULL)
53477+ return rv;
53478+
53479+ if (gr_acl_handle_procpidmem(task))
53480+ rv = -EACCES;
53481+
53482+ put_task_struct(task);
53483+
53484 return rv;
53485 }
53486
53487@@ -2036,6 +2159,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
53488 if (!task)
53489 goto out_no_task;
53490
53491+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53492+ goto out;
53493+
53494 /*
53495 * Yes, it does not scale. And it should not. Don't add
53496 * new entries into /proc/<tgid>/ without very good reasons.
53497@@ -2080,6 +2206,9 @@ static int proc_pident_readdir(struct file *filp,
53498 if (!task)
53499 goto out_no_task;
53500
53501+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53502+ goto out;
53503+
53504 ret = 0;
53505 i = filp->f_pos;
53506 switch (i) {
53507@@ -2347,7 +2476,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
53508 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
53509 void *cookie)
53510 {
53511- char *s = nd_get_link(nd);
53512+ const char *s = nd_get_link(nd);
53513 if (!IS_ERR(s))
53514 __putname(s);
53515 }
53516@@ -2553,7 +2682,7 @@ static const struct pid_entry tgid_base_stuff[] = {
53517 #ifdef CONFIG_SCHED_DEBUG
53518 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53519 #endif
53520-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53521+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53522 INF("syscall", S_IRUGO, proc_pid_syscall),
53523 #endif
53524 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53525@@ -2578,10 +2707,10 @@ static const struct pid_entry tgid_base_stuff[] = {
53526 #ifdef CONFIG_SECURITY
53527 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53528 #endif
53529-#ifdef CONFIG_KALLSYMS
53530+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53531 INF("wchan", S_IRUGO, proc_pid_wchan),
53532 #endif
53533-#ifdef CONFIG_STACKTRACE
53534+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53535 ONE("stack", S_IRUGO, proc_pid_stack),
53536 #endif
53537 #ifdef CONFIG_SCHEDSTATS
53538@@ -2611,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
53539 #ifdef CONFIG_TASK_IO_ACCOUNTING
53540 INF("io", S_IRUSR, proc_tgid_io_accounting),
53541 #endif
53542+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53543+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
53544+#endif
53545 };
53546
53547 static int proc_tgid_base_readdir(struct file * filp,
53548@@ -2735,7 +2867,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
53549 if (!inode)
53550 goto out;
53551
53552+#ifdef CONFIG_GRKERNSEC_PROC_USER
53553+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
53554+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53555+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53556+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
53557+#else
53558 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
53559+#endif
53560 inode->i_op = &proc_tgid_base_inode_operations;
53561 inode->i_fop = &proc_tgid_base_operations;
53562 inode->i_flags|=S_IMMUTABLE;
53563@@ -2777,7 +2916,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
53564 if (!task)
53565 goto out;
53566
53567+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53568+ goto out_put_task;
53569+
53570 result = proc_pid_instantiate(dir, dentry, task, NULL);
53571+out_put_task:
53572 put_task_struct(task);
53573 out:
53574 return result;
53575@@ -2842,6 +2985,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53576 {
53577 unsigned int nr;
53578 struct task_struct *reaper;
53579+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53580+ const struct cred *tmpcred = current_cred();
53581+ const struct cred *itercred;
53582+#endif
53583+ filldir_t __filldir = filldir;
53584 struct tgid_iter iter;
53585 struct pid_namespace *ns;
53586
53587@@ -2865,8 +3013,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53588 for (iter = next_tgid(ns, iter);
53589 iter.task;
53590 iter.tgid += 1, iter = next_tgid(ns, iter)) {
53591+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53592+ rcu_read_lock();
53593+ itercred = __task_cred(iter.task);
53594+#endif
53595+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
53596+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53597+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
53598+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53599+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53600+#endif
53601+ )
53602+#endif
53603+ )
53604+ __filldir = &gr_fake_filldir;
53605+ else
53606+ __filldir = filldir;
53607+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53608+ rcu_read_unlock();
53609+#endif
53610 filp->f_pos = iter.tgid + TGID_OFFSET;
53611- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
53612+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
53613 put_task_struct(iter.task);
53614 goto out;
53615 }
53616@@ -2892,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
53617 #ifdef CONFIG_SCHED_DEBUG
53618 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53619 #endif
53620-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53621+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53622 INF("syscall", S_IRUGO, proc_pid_syscall),
53623 #endif
53624 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53625@@ -2916,10 +3083,10 @@ static const struct pid_entry tid_base_stuff[] = {
53626 #ifdef CONFIG_SECURITY
53627 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53628 #endif
53629-#ifdef CONFIG_KALLSYMS
53630+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53631 INF("wchan", S_IRUGO, proc_pid_wchan),
53632 #endif
53633-#ifdef CONFIG_STACKTRACE
53634+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53635 ONE("stack", S_IRUGO, proc_pid_stack),
53636 #endif
53637 #ifdef CONFIG_SCHEDSTATS
53638diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
53639index 82676e3..5f8518a 100644
53640--- a/fs/proc/cmdline.c
53641+++ b/fs/proc/cmdline.c
53642@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
53643
53644 static int __init proc_cmdline_init(void)
53645 {
53646+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53647+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
53648+#else
53649 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
53650+#endif
53651 return 0;
53652 }
53653 module_init(proc_cmdline_init);
53654diff --git a/fs/proc/devices.c b/fs/proc/devices.c
53655index 59ee7da..469b4b6 100644
53656--- a/fs/proc/devices.c
53657+++ b/fs/proc/devices.c
53658@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
53659
53660 static int __init proc_devices_init(void)
53661 {
53662+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53663+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
53664+#else
53665 proc_create("devices", 0, NULL, &proc_devinfo_operations);
53666+#endif
53667 return 0;
53668 }
53669 module_init(proc_devices_init);
53670diff --git a/fs/proc/inode.c b/fs/proc/inode.c
53671index d78ade3..81767f9 100644
53672--- a/fs/proc/inode.c
53673+++ b/fs/proc/inode.c
53674@@ -18,12 +18,19 @@
53675 #include <linux/module.h>
53676 #include <linux/smp_lock.h>
53677 #include <linux/sysctl.h>
53678+#include <linux/grsecurity.h>
53679
53680 #include <asm/system.h>
53681 #include <asm/uaccess.h>
53682
53683 #include "internal.h"
53684
53685+#ifdef CONFIG_PROC_SYSCTL
53686+extern const struct inode_operations proc_sys_inode_operations;
53687+extern const struct inode_operations proc_sys_dir_operations;
53688+#endif
53689+
53690+
53691 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
53692 {
53693 atomic_inc(&de->count);
53694@@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
53695 de_put(de);
53696 if (PROC_I(inode)->sysctl)
53697 sysctl_head_put(PROC_I(inode)->sysctl);
53698+
53699+#ifdef CONFIG_PROC_SYSCTL
53700+ if (inode->i_op == &proc_sys_inode_operations ||
53701+ inode->i_op == &proc_sys_dir_operations)
53702+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
53703+#endif
53704+
53705 clear_inode(inode);
53706 }
53707
53708@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
53709 if (de->mode) {
53710 inode->i_mode = de->mode;
53711 inode->i_uid = de->uid;
53712+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53713+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53714+#else
53715 inode->i_gid = de->gid;
53716+#endif
53717 }
53718 if (de->size)
53719 inode->i_size = de->size;
53720diff --git a/fs/proc/internal.h b/fs/proc/internal.h
53721index 753ca37..26bcf3b 100644
53722--- a/fs/proc/internal.h
53723+++ b/fs/proc/internal.h
53724@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53725 struct pid *pid, struct task_struct *task);
53726 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53727 struct pid *pid, struct task_struct *task);
53728+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53729+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
53730+#endif
53731 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
53732
53733 extern const struct file_operations proc_maps_operations;
53734diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
53735index b442dac..aab29cb 100644
53736--- a/fs/proc/kcore.c
53737+++ b/fs/proc/kcore.c
53738@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
53739 off_t offset = 0;
53740 struct kcore_list *m;
53741
53742+ pax_track_stack();
53743+
53744 /* setup ELF header */
53745 elf = (struct elfhdr *) bufp;
53746 bufp += sizeof(struct elfhdr);
53747@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53748 * the addresses in the elf_phdr on our list.
53749 */
53750 start = kc_offset_to_vaddr(*fpos - elf_buflen);
53751- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
53752+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
53753+ if (tsz > buflen)
53754 tsz = buflen;
53755-
53756+
53757 while (buflen) {
53758 struct kcore_list *m;
53759
53760@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53761 kfree(elf_buf);
53762 } else {
53763 if (kern_addr_valid(start)) {
53764- unsigned long n;
53765+ char *elf_buf;
53766+ mm_segment_t oldfs;
53767
53768- n = copy_to_user(buffer, (char *)start, tsz);
53769- /*
53770- * We cannot distingush between fault on source
53771- * and fault on destination. When this happens
53772- * we clear too and hope it will trigger the
53773- * EFAULT again.
53774- */
53775- if (n) {
53776- if (clear_user(buffer + tsz - n,
53777- n))
53778+ elf_buf = kmalloc(tsz, GFP_KERNEL);
53779+ if (!elf_buf)
53780+ return -ENOMEM;
53781+ oldfs = get_fs();
53782+ set_fs(KERNEL_DS);
53783+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
53784+ set_fs(oldfs);
53785+ if (copy_to_user(buffer, elf_buf, tsz)) {
53786+ kfree(elf_buf);
53787 return -EFAULT;
53788+ }
53789 }
53790+ set_fs(oldfs);
53791+ kfree(elf_buf);
53792 } else {
53793 if (clear_user(buffer, tsz))
53794 return -EFAULT;
53795@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53796
53797 static int open_kcore(struct inode *inode, struct file *filp)
53798 {
53799+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
53800+ return -EPERM;
53801+#endif
53802 if (!capable(CAP_SYS_RAWIO))
53803 return -EPERM;
53804 if (kcore_need_update)
53805diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
53806index 7ca7834..cfe90a4 100644
53807--- a/fs/proc/kmsg.c
53808+++ b/fs/proc/kmsg.c
53809@@ -12,37 +12,37 @@
53810 #include <linux/poll.h>
53811 #include <linux/proc_fs.h>
53812 #include <linux/fs.h>
53813+#include <linux/syslog.h>
53814
53815 #include <asm/uaccess.h>
53816 #include <asm/io.h>
53817
53818 extern wait_queue_head_t log_wait;
53819
53820-extern int do_syslog(int type, char __user *bug, int count);
53821-
53822 static int kmsg_open(struct inode * inode, struct file * file)
53823 {
53824- return do_syslog(1,NULL,0);
53825+ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
53826 }
53827
53828 static int kmsg_release(struct inode * inode, struct file * file)
53829 {
53830- (void) do_syslog(0,NULL,0);
53831+ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
53832 return 0;
53833 }
53834
53835 static ssize_t kmsg_read(struct file *file, char __user *buf,
53836 size_t count, loff_t *ppos)
53837 {
53838- if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
53839+ if ((file->f_flags & O_NONBLOCK) &&
53840+ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53841 return -EAGAIN;
53842- return do_syslog(2, buf, count);
53843+ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
53844 }
53845
53846 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
53847 {
53848 poll_wait(file, &log_wait, wait);
53849- if (do_syslog(9, NULL, 0))
53850+ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53851 return POLLIN | POLLRDNORM;
53852 return 0;
53853 }
53854diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
53855index a65239c..ad1182a 100644
53856--- a/fs/proc/meminfo.c
53857+++ b/fs/proc/meminfo.c
53858@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53859 unsigned long pages[NR_LRU_LISTS];
53860 int lru;
53861
53862+ pax_track_stack();
53863+
53864 /*
53865 * display in kilobytes.
53866 */
53867@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53868 vmi.used >> 10,
53869 vmi.largest_chunk >> 10
53870 #ifdef CONFIG_MEMORY_FAILURE
53871- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
53872+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
53873 #endif
53874 );
53875
53876diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
53877index 9fe7d7e..cdb62c9 100644
53878--- a/fs/proc/nommu.c
53879+++ b/fs/proc/nommu.c
53880@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
53881 if (len < 1)
53882 len = 1;
53883 seq_printf(m, "%*c", len, ' ');
53884- seq_path(m, &file->f_path, "");
53885+ seq_path(m, &file->f_path, "\n\\");
53886 }
53887
53888 seq_putc(m, '\n');
53889diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
53890index 04d1270..25e1173 100644
53891--- a/fs/proc/proc_net.c
53892+++ b/fs/proc/proc_net.c
53893@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
53894 struct task_struct *task;
53895 struct nsproxy *ns;
53896 struct net *net = NULL;
53897+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53898+ const struct cred *cred = current_cred();
53899+#endif
53900+
53901+#ifdef CONFIG_GRKERNSEC_PROC_USER
53902+ if (cred->fsuid)
53903+ return net;
53904+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53905+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
53906+ return net;
53907+#endif
53908
53909 rcu_read_lock();
53910 task = pid_task(proc_pid(dir), PIDTYPE_PID);
53911diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
53912index f667e8a..55f4d96 100644
53913--- a/fs/proc/proc_sysctl.c
53914+++ b/fs/proc/proc_sysctl.c
53915@@ -7,11 +7,13 @@
53916 #include <linux/security.h>
53917 #include "internal.h"
53918
53919+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
53920+
53921 static const struct dentry_operations proc_sys_dentry_operations;
53922 static const struct file_operations proc_sys_file_operations;
53923-static const struct inode_operations proc_sys_inode_operations;
53924+const struct inode_operations proc_sys_inode_operations;
53925 static const struct file_operations proc_sys_dir_file_operations;
53926-static const struct inode_operations proc_sys_dir_operations;
53927+const struct inode_operations proc_sys_dir_operations;
53928
53929 static struct inode *proc_sys_make_inode(struct super_block *sb,
53930 struct ctl_table_header *head, struct ctl_table *table)
53931@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53932 if (!p)
53933 goto out;
53934
53935+ if (gr_handle_sysctl(p, MAY_EXEC))
53936+ goto out;
53937+
53938 err = ERR_PTR(-ENOMEM);
53939 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
53940 if (h)
53941@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53942
53943 err = NULL;
53944 dentry->d_op = &proc_sys_dentry_operations;
53945+
53946+ gr_handle_proc_create(dentry, inode);
53947+
53948 d_add(dentry, inode);
53949
53950 out:
53951@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
53952 return -ENOMEM;
53953 } else {
53954 child->d_op = &proc_sys_dentry_operations;
53955+
53956+ gr_handle_proc_create(child, inode);
53957+
53958 d_add(child, inode);
53959 }
53960 } else {
53961@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
53962 if (*pos < file->f_pos)
53963 continue;
53964
53965+ if (gr_handle_sysctl(table, 0))
53966+ continue;
53967+
53968 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
53969 if (res)
53970 return res;
53971@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
53972 if (IS_ERR(head))
53973 return PTR_ERR(head);
53974
53975+ if (table && gr_handle_sysctl(table, MAY_EXEC))
53976+ return -ENOENT;
53977+
53978 generic_fillattr(inode, stat);
53979 if (table)
53980 stat->mode = (stat->mode & S_IFMT) | table->mode;
53981@@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
53982 };
53983
53984 static const struct file_operations proc_sys_dir_file_operations = {
53985+ .read = generic_read_dir,
53986 .readdir = proc_sys_readdir,
53987 .llseek = generic_file_llseek,
53988 };
53989
53990-static const struct inode_operations proc_sys_inode_operations = {
53991+const struct inode_operations proc_sys_inode_operations = {
53992 .permission = proc_sys_permission,
53993 .setattr = proc_sys_setattr,
53994 .getattr = proc_sys_getattr,
53995 };
53996
53997-static const struct inode_operations proc_sys_dir_operations = {
53998+const struct inode_operations proc_sys_dir_operations = {
53999 .lookup = proc_sys_lookup,
54000 .permission = proc_sys_permission,
54001 .setattr = proc_sys_setattr,
54002diff --git a/fs/proc/root.c b/fs/proc/root.c
54003index b080b79..d957e63 100644
54004--- a/fs/proc/root.c
54005+++ b/fs/proc/root.c
54006@@ -134,7 +134,15 @@ void __init proc_root_init(void)
54007 #ifdef CONFIG_PROC_DEVICETREE
54008 proc_device_tree_init();
54009 #endif
54010+#ifdef CONFIG_GRKERNSEC_PROC_ADD
54011+#ifdef CONFIG_GRKERNSEC_PROC_USER
54012+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
54013+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54014+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
54015+#endif
54016+#else
54017 proc_mkdir("bus", NULL);
54018+#endif
54019 proc_sys_init();
54020 }
54021
54022diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
54023index 3b7b82a..4b420b0 100644
54024--- a/fs/proc/task_mmu.c
54025+++ b/fs/proc/task_mmu.c
54026@@ -8,6 +8,7 @@
54027 #include <linux/mempolicy.h>
54028 #include <linux/swap.h>
54029 #include <linux/swapops.h>
54030+#include <linux/grsecurity.h>
54031
54032 #include <asm/elf.h>
54033 #include <asm/uaccess.h>
54034@@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54035 "VmStk:\t%8lu kB\n"
54036 "VmExe:\t%8lu kB\n"
54037 "VmLib:\t%8lu kB\n"
54038- "VmPTE:\t%8lu kB\n",
54039- hiwater_vm << (PAGE_SHIFT-10),
54040+ "VmPTE:\t%8lu kB\n"
54041+
54042+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54043+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
54044+#endif
54045+
54046+ ,hiwater_vm << (PAGE_SHIFT-10),
54047 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
54048 mm->locked_vm << (PAGE_SHIFT-10),
54049 hiwater_rss << (PAGE_SHIFT-10),
54050 total_rss << (PAGE_SHIFT-10),
54051 data << (PAGE_SHIFT-10),
54052 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54053- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
54054+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
54055+
54056+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54057+ , mm->context.user_cs_base, mm->context.user_cs_limit
54058+#endif
54059+
54060+ );
54061 }
54062
54063 unsigned long task_vsize(struct mm_struct *mm)
54064@@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
54065 struct proc_maps_private *priv = m->private;
54066 struct vm_area_struct *vma = v;
54067
54068- vma_stop(priv, vma);
54069+ if (!IS_ERR(vma))
54070+ vma_stop(priv, vma);
54071 if (priv->task)
54072 put_task_struct(priv->task);
54073 }
54074@@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
54075 return ret;
54076 }
54077
54078+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54079+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54080+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
54081+ _mm->pax_flags & MF_PAX_SEGMEXEC))
54082+#endif
54083+
54084 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54085 {
54086 struct mm_struct *mm = vma->vm_mm;
54087@@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54088 int flags = vma->vm_flags;
54089 unsigned long ino = 0;
54090 unsigned long long pgoff = 0;
54091- unsigned long start;
54092 dev_t dev = 0;
54093 int len;
54094
54095@@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54096 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
54097 }
54098
54099- /* We don't show the stack guard page in /proc/maps */
54100- start = vma->vm_start;
54101- if (vma->vm_flags & VM_GROWSDOWN)
54102- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
54103- start += PAGE_SIZE;
54104-
54105 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
54106- start,
54107+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54108+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
54109+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
54110+#else
54111+ vma->vm_start,
54112 vma->vm_end,
54113+#endif
54114 flags & VM_READ ? 'r' : '-',
54115 flags & VM_WRITE ? 'w' : '-',
54116 flags & VM_EXEC ? 'x' : '-',
54117 flags & VM_MAYSHARE ? 's' : 'p',
54118+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54119+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
54120+#else
54121 pgoff,
54122+#endif
54123 MAJOR(dev), MINOR(dev), ino, &len);
54124
54125 /*
54126@@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54127 */
54128 if (file) {
54129 pad_len_spaces(m, len);
54130- seq_path(m, &file->f_path, "\n");
54131+ seq_path(m, &file->f_path, "\n\\");
54132 } else {
54133 const char *name = arch_vma_name(vma);
54134 if (!name) {
54135@@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54136 if (vma->vm_start <= mm->brk &&
54137 vma->vm_end >= mm->start_brk) {
54138 name = "[heap]";
54139- } else if (vma->vm_start <= mm->start_stack &&
54140- vma->vm_end >= mm->start_stack) {
54141+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
54142+ (vma->vm_start <= mm->start_stack &&
54143+ vma->vm_end >= mm->start_stack)) {
54144 name = "[stack]";
54145 }
54146 } else {
54147@@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
54148 struct proc_maps_private *priv = m->private;
54149 struct task_struct *task = priv->task;
54150
54151+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54152+ if (current->exec_id != m->exec_id) {
54153+ gr_log_badprocpid("maps");
54154+ return 0;
54155+ }
54156+#endif
54157+
54158 show_map_vma(m, vma);
54159
54160 if (m->count < m->size) /* vma is copied successfully */
54161@@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
54162 .private = &mss,
54163 };
54164
54165+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54166+ if (current->exec_id != m->exec_id) {
54167+ gr_log_badprocpid("smaps");
54168+ return 0;
54169+ }
54170+#endif
54171 memset(&mss, 0, sizeof mss);
54172- mss.vma = vma;
54173- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54174- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54175+
54176+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54177+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
54178+#endif
54179+ mss.vma = vma;
54180+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54181+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54182+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54183+ }
54184+#endif
54185
54186 show_map_vma(m, vma);
54187
54188@@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
54189 "Swap: %8lu kB\n"
54190 "KernelPageSize: %8lu kB\n"
54191 "MMUPageSize: %8lu kB\n",
54192+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54193+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
54194+#else
54195 (vma->vm_end - vma->vm_start) >> 10,
54196+#endif
54197 mss.resident >> 10,
54198 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
54199 mss.shared_clean >> 10,
54200diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
54201index 8f5c05d..c99c76d 100644
54202--- a/fs/proc/task_nommu.c
54203+++ b/fs/proc/task_nommu.c
54204@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54205 else
54206 bytes += kobjsize(mm);
54207
54208- if (current->fs && current->fs->users > 1)
54209+ if (current->fs && atomic_read(&current->fs->users) > 1)
54210 sbytes += kobjsize(current->fs);
54211 else
54212 bytes += kobjsize(current->fs);
54213@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
54214 if (len < 1)
54215 len = 1;
54216 seq_printf(m, "%*c", len, ' ');
54217- seq_path(m, &file->f_path, "");
54218+ seq_path(m, &file->f_path, "\n\\");
54219 }
54220
54221 seq_putc(m, '\n');
54222diff --git a/fs/readdir.c b/fs/readdir.c
54223index 7723401..30059a6 100644
54224--- a/fs/readdir.c
54225+++ b/fs/readdir.c
54226@@ -16,6 +16,7 @@
54227 #include <linux/security.h>
54228 #include <linux/syscalls.h>
54229 #include <linux/unistd.h>
54230+#include <linux/namei.h>
54231
54232 #include <asm/uaccess.h>
54233
54234@@ -67,6 +68,7 @@ struct old_linux_dirent {
54235
54236 struct readdir_callback {
54237 struct old_linux_dirent __user * dirent;
54238+ struct file * file;
54239 int result;
54240 };
54241
54242@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
54243 buf->result = -EOVERFLOW;
54244 return -EOVERFLOW;
54245 }
54246+
54247+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54248+ return 0;
54249+
54250 buf->result++;
54251 dirent = buf->dirent;
54252 if (!access_ok(VERIFY_WRITE, dirent,
54253@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
54254
54255 buf.result = 0;
54256 buf.dirent = dirent;
54257+ buf.file = file;
54258
54259 error = vfs_readdir(file, fillonedir, &buf);
54260 if (buf.result)
54261@@ -142,6 +149,7 @@ struct linux_dirent {
54262 struct getdents_callback {
54263 struct linux_dirent __user * current_dir;
54264 struct linux_dirent __user * previous;
54265+ struct file * file;
54266 int count;
54267 int error;
54268 };
54269@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
54270 buf->error = -EOVERFLOW;
54271 return -EOVERFLOW;
54272 }
54273+
54274+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54275+ return 0;
54276+
54277 dirent = buf->previous;
54278 if (dirent) {
54279 if (__put_user(offset, &dirent->d_off))
54280@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
54281 buf.previous = NULL;
54282 buf.count = count;
54283 buf.error = 0;
54284+ buf.file = file;
54285
54286 error = vfs_readdir(file, filldir, &buf);
54287 if (error >= 0)
54288@@ -228,6 +241,7 @@ out:
54289 struct getdents_callback64 {
54290 struct linux_dirent64 __user * current_dir;
54291 struct linux_dirent64 __user * previous;
54292+ struct file *file;
54293 int count;
54294 int error;
54295 };
54296@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
54297 buf->error = -EINVAL; /* only used if we fail.. */
54298 if (reclen > buf->count)
54299 return -EINVAL;
54300+
54301+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54302+ return 0;
54303+
54304 dirent = buf->previous;
54305 if (dirent) {
54306 if (__put_user(offset, &dirent->d_off))
54307@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54308
54309 buf.current_dir = dirent;
54310 buf.previous = NULL;
54311+ buf.file = file;
54312 buf.count = count;
54313 buf.error = 0;
54314
54315@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54316 error = buf.error;
54317 lastdirent = buf.previous;
54318 if (lastdirent) {
54319- typeof(lastdirent->d_off) d_off = file->f_pos;
54320+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
54321 if (__put_user(d_off, &lastdirent->d_off))
54322 error = -EFAULT;
54323 else
54324diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
54325index d42c30c..4fd8718 100644
54326--- a/fs/reiserfs/dir.c
54327+++ b/fs/reiserfs/dir.c
54328@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
54329 struct reiserfs_dir_entry de;
54330 int ret = 0;
54331
54332+ pax_track_stack();
54333+
54334 reiserfs_write_lock(inode->i_sb);
54335
54336 reiserfs_check_lock_depth(inode->i_sb, "readdir");
54337diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
54338index 128d3f7..8840d44 100644
54339--- a/fs/reiserfs/do_balan.c
54340+++ b/fs/reiserfs/do_balan.c
54341@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
54342 return;
54343 }
54344
54345- atomic_inc(&(fs_generation(tb->tb_sb)));
54346+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
54347 do_balance_starts(tb);
54348
54349 /* balance leaf returns 0 except if combining L R and S into
54350diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
54351index 72cb1cc..d0e3181 100644
54352--- a/fs/reiserfs/item_ops.c
54353+++ b/fs/reiserfs/item_ops.c
54354@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
54355 vi->vi_index, vi->vi_type, vi->vi_ih);
54356 }
54357
54358-static struct item_operations stat_data_ops = {
54359+static const struct item_operations stat_data_ops = {
54360 .bytes_number = sd_bytes_number,
54361 .decrement_key = sd_decrement_key,
54362 .is_left_mergeable = sd_is_left_mergeable,
54363@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
54364 vi->vi_index, vi->vi_type, vi->vi_ih);
54365 }
54366
54367-static struct item_operations direct_ops = {
54368+static const struct item_operations direct_ops = {
54369 .bytes_number = direct_bytes_number,
54370 .decrement_key = direct_decrement_key,
54371 .is_left_mergeable = direct_is_left_mergeable,
54372@@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
54373 vi->vi_index, vi->vi_type, vi->vi_ih);
54374 }
54375
54376-static struct item_operations indirect_ops = {
54377+static const struct item_operations indirect_ops = {
54378 .bytes_number = indirect_bytes_number,
54379 .decrement_key = indirect_decrement_key,
54380 .is_left_mergeable = indirect_is_left_mergeable,
54381@@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
54382 printk("\n");
54383 }
54384
54385-static struct item_operations direntry_ops = {
54386+static const struct item_operations direntry_ops = {
54387 .bytes_number = direntry_bytes_number,
54388 .decrement_key = direntry_decrement_key,
54389 .is_left_mergeable = direntry_is_left_mergeable,
54390@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
54391 "Invalid item type observed, run fsck ASAP");
54392 }
54393
54394-static struct item_operations errcatch_ops = {
54395+static const struct item_operations errcatch_ops = {
54396 errcatch_bytes_number,
54397 errcatch_decrement_key,
54398 errcatch_is_left_mergeable,
54399@@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
54400 #error Item types must use disk-format assigned values.
54401 #endif
54402
54403-struct item_operations *item_ops[TYPE_ANY + 1] = {
54404+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
54405 &stat_data_ops,
54406 &indirect_ops,
54407 &direct_ops,
54408diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
54409index b5fe0aa..e0e25c4 100644
54410--- a/fs/reiserfs/journal.c
54411+++ b/fs/reiserfs/journal.c
54412@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
54413 struct buffer_head *bh;
54414 int i, j;
54415
54416+ pax_track_stack();
54417+
54418 bh = __getblk(dev, block, bufsize);
54419 if (buffer_uptodate(bh))
54420 return (bh);
54421diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
54422index 2715791..b8996db 100644
54423--- a/fs/reiserfs/namei.c
54424+++ b/fs/reiserfs/namei.c
54425@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
54426 unsigned long savelink = 1;
54427 struct timespec ctime;
54428
54429+ pax_track_stack();
54430+
54431 /* three balancings: (1) old name removal, (2) new name insertion
54432 and (3) maybe "save" link insertion
54433 stat data updates: (1) old directory,
54434diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
54435index 9229e55..3d2e3b7 100644
54436--- a/fs/reiserfs/procfs.c
54437+++ b/fs/reiserfs/procfs.c
54438@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
54439 "SMALL_TAILS " : "NO_TAILS ",
54440 replay_only(sb) ? "REPLAY_ONLY " : "",
54441 convert_reiserfs(sb) ? "CONV " : "",
54442- atomic_read(&r->s_generation_counter),
54443+ atomic_read_unchecked(&r->s_generation_counter),
54444 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
54445 SF(s_do_balance), SF(s_unneeded_left_neighbor),
54446 SF(s_good_search_by_key_reada), SF(s_bmaps),
54447@@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
54448 struct journal_params *jp = &rs->s_v1.s_journal;
54449 char b[BDEVNAME_SIZE];
54450
54451+ pax_track_stack();
54452+
54453 seq_printf(m, /* on-disk fields */
54454 "jp_journal_1st_block: \t%i\n"
54455 "jp_journal_dev: \t%s[%x]\n"
54456diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
54457index d036ee5..4c7dca1 100644
54458--- a/fs/reiserfs/stree.c
54459+++ b/fs/reiserfs/stree.c
54460@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
54461 int iter = 0;
54462 #endif
54463
54464+ pax_track_stack();
54465+
54466 BUG_ON(!th->t_trans_id);
54467
54468 init_tb_struct(th, &s_del_balance, sb, path,
54469@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
54470 int retval;
54471 int quota_cut_bytes = 0;
54472
54473+ pax_track_stack();
54474+
54475 BUG_ON(!th->t_trans_id);
54476
54477 le_key2cpu_key(&cpu_key, key);
54478@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
54479 int quota_cut_bytes;
54480 loff_t tail_pos = 0;
54481
54482+ pax_track_stack();
54483+
54484 BUG_ON(!th->t_trans_id);
54485
54486 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
54487@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
54488 int retval;
54489 int fs_gen;
54490
54491+ pax_track_stack();
54492+
54493 BUG_ON(!th->t_trans_id);
54494
54495 fs_gen = get_generation(inode->i_sb);
54496@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
54497 int fs_gen = 0;
54498 int quota_bytes = 0;
54499
54500+ pax_track_stack();
54501+
54502 BUG_ON(!th->t_trans_id);
54503
54504 if (inode) { /* Do we count quotas for item? */
54505diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
54506index 7cb1285..c726cd0 100644
54507--- a/fs/reiserfs/super.c
54508+++ b/fs/reiserfs/super.c
54509@@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
54510 {.option_name = NULL}
54511 };
54512
54513+ pax_track_stack();
54514+
54515 *blocks = 0;
54516 if (!options || !*options)
54517 /* use default configuration: create tails, journaling on, no
54518diff --git a/fs/select.c b/fs/select.c
54519index fd38ce2..f5381b8 100644
54520--- a/fs/select.c
54521+++ b/fs/select.c
54522@@ -20,6 +20,7 @@
54523 #include <linux/module.h>
54524 #include <linux/slab.h>
54525 #include <linux/poll.h>
54526+#include <linux/security.h>
54527 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
54528 #include <linux/file.h>
54529 #include <linux/fdtable.h>
54530@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
54531 int retval, i, timed_out = 0;
54532 unsigned long slack = 0;
54533
54534+ pax_track_stack();
54535+
54536 rcu_read_lock();
54537 retval = max_select_fd(n, fds);
54538 rcu_read_unlock();
54539@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
54540 /* Allocate small arguments on the stack to save memory and be faster */
54541 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
54542
54543+ pax_track_stack();
54544+
54545 ret = -EINVAL;
54546 if (n < 0)
54547 goto out_nofds;
54548@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
54549 struct poll_list *walk = head;
54550 unsigned long todo = nfds;
54551
54552+ pax_track_stack();
54553+
54554+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
54555 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
54556 return -EINVAL;
54557
54558diff --git a/fs/seq_file.c b/fs/seq_file.c
54559index eae7d9d..4ddabe2 100644
54560--- a/fs/seq_file.c
54561+++ b/fs/seq_file.c
54562@@ -9,6 +9,7 @@
54563 #include <linux/module.h>
54564 #include <linux/seq_file.h>
54565 #include <linux/slab.h>
54566+#include <linux/sched.h>
54567
54568 #include <asm/uaccess.h>
54569 #include <asm/page.h>
54570@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
54571 memset(p, 0, sizeof(*p));
54572 mutex_init(&p->lock);
54573 p->op = op;
54574+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54575+ p->exec_id = current->exec_id;
54576+#endif
54577
54578 /*
54579 * Wrappers around seq_open(e.g. swaps_open) need to be
54580@@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54581 return 0;
54582 }
54583 if (!m->buf) {
54584- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54585+ m->size = PAGE_SIZE;
54586+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54587 if (!m->buf)
54588 return -ENOMEM;
54589 }
54590@@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54591 Eoverflow:
54592 m->op->stop(m, p);
54593 kfree(m->buf);
54594- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54595+ m->size <<= 1;
54596+ m->buf = kmalloc(m->size, GFP_KERNEL);
54597 return !m->buf ? -ENOMEM : -EAGAIN;
54598 }
54599
54600@@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54601 m->version = file->f_version;
54602 /* grab buffer if we didn't have one */
54603 if (!m->buf) {
54604- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54605+ m->size = PAGE_SIZE;
54606+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54607 if (!m->buf)
54608 goto Enomem;
54609 }
54610@@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54611 goto Fill;
54612 m->op->stop(m, p);
54613 kfree(m->buf);
54614- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54615+ m->size <<= 1;
54616+ m->buf = kmalloc(m->size, GFP_KERNEL);
54617 if (!m->buf)
54618 goto Enomem;
54619 m->count = 0;
54620@@ -551,7 +559,7 @@ static void single_stop(struct seq_file *p, void *v)
54621 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
54622 void *data)
54623 {
54624- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
54625+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
54626 int res = -ENOMEM;
54627
54628 if (op) {
54629diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
54630index 71c29b6..54694dd 100644
54631--- a/fs/smbfs/proc.c
54632+++ b/fs/smbfs/proc.c
54633@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
54634
54635 out:
54636 if (server->local_nls != NULL && server->remote_nls != NULL)
54637- server->ops->convert = convert_cp;
54638+ *(void **)&server->ops->convert = convert_cp;
54639 else
54640- server->ops->convert = convert_memcpy;
54641+ *(void **)&server->ops->convert = convert_memcpy;
54642
54643 smb_unlock_server(server);
54644 return n;
54645@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
54646
54647 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
54648 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
54649- server->ops->getattr = smb_proc_getattr_core;
54650+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
54651 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
54652- server->ops->getattr = smb_proc_getattr_ff;
54653+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
54654 }
54655
54656 /* Decode server capabilities */
54657@@ -3439,7 +3439,7 @@ out:
54658 static void
54659 install_ops(struct smb_ops *dst, struct smb_ops *src)
54660 {
54661- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54662+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54663 }
54664
54665 /* < LANMAN2 */
54666diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
54667index 00b2909..2ace383 100644
54668--- a/fs/smbfs/symlink.c
54669+++ b/fs/smbfs/symlink.c
54670@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
54671
54672 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54673 {
54674- char *s = nd_get_link(nd);
54675+ const char *s = nd_get_link(nd);
54676 if (!IS_ERR(s))
54677 __putname(s);
54678 }
54679diff --git a/fs/splice.c b/fs/splice.c
54680index bb92b7c..5aa72b0 100644
54681--- a/fs/splice.c
54682+++ b/fs/splice.c
54683@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54684 pipe_lock(pipe);
54685
54686 for (;;) {
54687- if (!pipe->readers) {
54688+ if (!atomic_read(&pipe->readers)) {
54689 send_sig(SIGPIPE, current, 0);
54690 if (!ret)
54691 ret = -EPIPE;
54692@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54693 do_wakeup = 0;
54694 }
54695
54696- pipe->waiting_writers++;
54697+ atomic_inc(&pipe->waiting_writers);
54698 pipe_wait(pipe);
54699- pipe->waiting_writers--;
54700+ atomic_dec(&pipe->waiting_writers);
54701 }
54702
54703 pipe_unlock(pipe);
54704@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
54705 .spd_release = spd_release_page,
54706 };
54707
54708+ pax_track_stack();
54709+
54710 index = *ppos >> PAGE_CACHE_SHIFT;
54711 loff = *ppos & ~PAGE_CACHE_MASK;
54712 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54713@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
54714 old_fs = get_fs();
54715 set_fs(get_ds());
54716 /* The cast to a user pointer is valid due to the set_fs() */
54717- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
54718+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
54719 set_fs(old_fs);
54720
54721 return res;
54722@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
54723 old_fs = get_fs();
54724 set_fs(get_ds());
54725 /* The cast to a user pointer is valid due to the set_fs() */
54726- res = vfs_write(file, (const char __user *)buf, count, &pos);
54727+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
54728 set_fs(old_fs);
54729
54730 return res;
54731@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54732 .spd_release = spd_release_page,
54733 };
54734
54735+ pax_track_stack();
54736+
54737 index = *ppos >> PAGE_CACHE_SHIFT;
54738 offset = *ppos & ~PAGE_CACHE_MASK;
54739 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54740@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54741 goto err;
54742
54743 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
54744- vec[i].iov_base = (void __user *) page_address(page);
54745+ vec[i].iov_base = (__force void __user *) page_address(page);
54746 vec[i].iov_len = this_len;
54747 pages[i] = page;
54748 spd.nr_pages++;
54749@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
54750 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
54751 {
54752 while (!pipe->nrbufs) {
54753- if (!pipe->writers)
54754+ if (!atomic_read(&pipe->writers))
54755 return 0;
54756
54757- if (!pipe->waiting_writers && sd->num_spliced)
54758+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
54759 return 0;
54760
54761 if (sd->flags & SPLICE_F_NONBLOCK)
54762@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
54763 * out of the pipe right after the splice_to_pipe(). So set
54764 * PIPE_READERS appropriately.
54765 */
54766- pipe->readers = 1;
54767+ atomic_set(&pipe->readers, 1);
54768
54769 current->splice_pipe = pipe;
54770 }
54771@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
54772 .spd_release = spd_release_page,
54773 };
54774
54775+ pax_track_stack();
54776+
54777 pipe = pipe_info(file->f_path.dentry->d_inode);
54778 if (!pipe)
54779 return -EBADF;
54780@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54781 ret = -ERESTARTSYS;
54782 break;
54783 }
54784- if (!pipe->writers)
54785+ if (!atomic_read(&pipe->writers))
54786 break;
54787- if (!pipe->waiting_writers) {
54788+ if (!atomic_read(&pipe->waiting_writers)) {
54789 if (flags & SPLICE_F_NONBLOCK) {
54790 ret = -EAGAIN;
54791 break;
54792@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54793 pipe_lock(pipe);
54794
54795 while (pipe->nrbufs >= PIPE_BUFFERS) {
54796- if (!pipe->readers) {
54797+ if (!atomic_read(&pipe->readers)) {
54798 send_sig(SIGPIPE, current, 0);
54799 ret = -EPIPE;
54800 break;
54801@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54802 ret = -ERESTARTSYS;
54803 break;
54804 }
54805- pipe->waiting_writers++;
54806+ atomic_inc(&pipe->waiting_writers);
54807 pipe_wait(pipe);
54808- pipe->waiting_writers--;
54809+ atomic_dec(&pipe->waiting_writers);
54810 }
54811
54812 pipe_unlock(pipe);
54813@@ -1786,14 +1792,14 @@ retry:
54814 pipe_double_lock(ipipe, opipe);
54815
54816 do {
54817- if (!opipe->readers) {
54818+ if (!atomic_read(&opipe->readers)) {
54819 send_sig(SIGPIPE, current, 0);
54820 if (!ret)
54821 ret = -EPIPE;
54822 break;
54823 }
54824
54825- if (!ipipe->nrbufs && !ipipe->writers)
54826+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
54827 break;
54828
54829 /*
54830@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54831 pipe_double_lock(ipipe, opipe);
54832
54833 do {
54834- if (!opipe->readers) {
54835+ if (!atomic_read(&opipe->readers)) {
54836 send_sig(SIGPIPE, current, 0);
54837 if (!ret)
54838 ret = -EPIPE;
54839@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54840 * return EAGAIN if we have the potential of some data in the
54841 * future, otherwise just return 0
54842 */
54843- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
54844+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
54845 ret = -EAGAIN;
54846
54847 pipe_unlock(ipipe);
54848diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
54849index e020183..18d64b4 100644
54850--- a/fs/sysfs/dir.c
54851+++ b/fs/sysfs/dir.c
54852@@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
54853 struct sysfs_dirent *sd;
54854 int rc;
54855
54856+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
54857+ const char *parent_name = parent_sd->s_name;
54858+
54859+ mode = S_IFDIR | S_IRWXU;
54860+
54861+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
54862+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
54863+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
54864+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
54865+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
54866+#endif
54867+
54868 /* allocate */
54869 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
54870 if (!sd)
54871diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
54872index 7118a38..70af853 100644
54873--- a/fs/sysfs/file.c
54874+++ b/fs/sysfs/file.c
54875@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
54876
54877 struct sysfs_open_dirent {
54878 atomic_t refcnt;
54879- atomic_t event;
54880+ atomic_unchecked_t event;
54881 wait_queue_head_t poll;
54882 struct list_head buffers; /* goes through sysfs_buffer.list */
54883 };
54884@@ -53,7 +53,7 @@ struct sysfs_buffer {
54885 size_t count;
54886 loff_t pos;
54887 char * page;
54888- struct sysfs_ops * ops;
54889+ const struct sysfs_ops * ops;
54890 struct mutex mutex;
54891 int needs_read_fill;
54892 int event;
54893@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54894 {
54895 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54896 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54897- struct sysfs_ops * ops = buffer->ops;
54898+ const struct sysfs_ops * ops = buffer->ops;
54899 int ret = 0;
54900 ssize_t count;
54901
54902@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54903 if (!sysfs_get_active_two(attr_sd))
54904 return -ENODEV;
54905
54906- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
54907+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
54908 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
54909
54910 sysfs_put_active_two(attr_sd);
54911@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
54912 {
54913 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54914 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54915- struct sysfs_ops * ops = buffer->ops;
54916+ const struct sysfs_ops * ops = buffer->ops;
54917 int rc;
54918
54919 /* need attr_sd for attr and ops, its parent for kobj */
54920@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
54921 return -ENOMEM;
54922
54923 atomic_set(&new_od->refcnt, 0);
54924- atomic_set(&new_od->event, 1);
54925+ atomic_set_unchecked(&new_od->event, 1);
54926 init_waitqueue_head(&new_od->poll);
54927 INIT_LIST_HEAD(&new_od->buffers);
54928 goto retry;
54929@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
54930 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
54931 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54932 struct sysfs_buffer *buffer;
54933- struct sysfs_ops *ops;
54934+ const struct sysfs_ops *ops;
54935 int error = -EACCES;
54936 char *p;
54937
54938@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
54939
54940 sysfs_put_active_two(attr_sd);
54941
54942- if (buffer->event != atomic_read(&od->event))
54943+ if (buffer->event != atomic_read_unchecked(&od->event))
54944 goto trigger;
54945
54946 return DEFAULT_POLLMASK;
54947@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
54948
54949 od = sd->s_attr.open;
54950 if (od) {
54951- atomic_inc(&od->event);
54952+ atomic_inc_unchecked(&od->event);
54953 wake_up_interruptible(&od->poll);
54954 }
54955
54956diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
54957index c5081ad..342ea86 100644
54958--- a/fs/sysfs/symlink.c
54959+++ b/fs/sysfs/symlink.c
54960@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
54961
54962 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
54963 {
54964- char *page = nd_get_link(nd);
54965+ const char *page = nd_get_link(nd);
54966 if (!IS_ERR(page))
54967 free_page((unsigned long)page);
54968 }
54969diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
54970index 1e06853..b06d325 100644
54971--- a/fs/udf/balloc.c
54972+++ b/fs/udf/balloc.c
54973@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
54974
54975 mutex_lock(&sbi->s_alloc_mutex);
54976 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54977- if (bloc->logicalBlockNum < 0 ||
54978- (bloc->logicalBlockNum + count) >
54979- partmap->s_partition_len) {
54980+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54981 udf_debug("%d < %d || %d + %d > %d\n",
54982 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
54983 count, partmap->s_partition_len);
54984@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
54985
54986 mutex_lock(&sbi->s_alloc_mutex);
54987 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54988- if (bloc->logicalBlockNum < 0 ||
54989- (bloc->logicalBlockNum + count) >
54990- partmap->s_partition_len) {
54991+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54992 udf_debug("%d < %d || %d + %d > %d\n",
54993 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
54994 partmap->s_partition_len);
54995diff --git a/fs/udf/inode.c b/fs/udf/inode.c
54996index 6d24c2c..fff470f 100644
54997--- a/fs/udf/inode.c
54998+++ b/fs/udf/inode.c
54999@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
55000 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
55001 int lastblock = 0;
55002
55003+ pax_track_stack();
55004+
55005 prev_epos.offset = udf_file_entry_alloc_offset(inode);
55006 prev_epos.block = iinfo->i_location;
55007 prev_epos.bh = NULL;
55008diff --git a/fs/udf/misc.c b/fs/udf/misc.c
55009index 9215700..bf1f68e 100644
55010--- a/fs/udf/misc.c
55011+++ b/fs/udf/misc.c
55012@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
55013
55014 u8 udf_tag_checksum(const struct tag *t)
55015 {
55016- u8 *data = (u8 *)t;
55017+ const u8 *data = (const u8 *)t;
55018 u8 checksum = 0;
55019 int i;
55020 for (i = 0; i < sizeof(struct tag); ++i)
55021diff --git a/fs/utimes.c b/fs/utimes.c
55022index e4c75db..b4df0e0 100644
55023--- a/fs/utimes.c
55024+++ b/fs/utimes.c
55025@@ -1,6 +1,7 @@
55026 #include <linux/compiler.h>
55027 #include <linux/file.h>
55028 #include <linux/fs.h>
55029+#include <linux/security.h>
55030 #include <linux/linkage.h>
55031 #include <linux/mount.h>
55032 #include <linux/namei.h>
55033@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
55034 goto mnt_drop_write_and_out;
55035 }
55036 }
55037+
55038+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
55039+ error = -EACCES;
55040+ goto mnt_drop_write_and_out;
55041+ }
55042+
55043 mutex_lock(&inode->i_mutex);
55044 error = notify_change(path->dentry, &newattrs);
55045 mutex_unlock(&inode->i_mutex);
55046diff --git a/fs/xattr.c b/fs/xattr.c
55047index 6d4f6d3..cda3958 100644
55048--- a/fs/xattr.c
55049+++ b/fs/xattr.c
55050@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
55051 * Extended attribute SET operations
55052 */
55053 static long
55054-setxattr(struct dentry *d, const char __user *name, const void __user *value,
55055+setxattr(struct path *path, const char __user *name, const void __user *value,
55056 size_t size, int flags)
55057 {
55058 int error;
55059@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
55060 return PTR_ERR(kvalue);
55061 }
55062
55063- error = vfs_setxattr(d, kname, kvalue, size, flags);
55064+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
55065+ error = -EACCES;
55066+ goto out;
55067+ }
55068+
55069+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
55070+out:
55071 kfree(kvalue);
55072 return error;
55073 }
55074@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
55075 return error;
55076 error = mnt_want_write(path.mnt);
55077 if (!error) {
55078- error = setxattr(path.dentry, name, value, size, flags);
55079+ error = setxattr(&path, name, value, size, flags);
55080 mnt_drop_write(path.mnt);
55081 }
55082 path_put(&path);
55083@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
55084 return error;
55085 error = mnt_want_write(path.mnt);
55086 if (!error) {
55087- error = setxattr(path.dentry, name, value, size, flags);
55088+ error = setxattr(&path, name, value, size, flags);
55089 mnt_drop_write(path.mnt);
55090 }
55091 path_put(&path);
55092@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
55093 const void __user *,value, size_t, size, int, flags)
55094 {
55095 struct file *f;
55096- struct dentry *dentry;
55097 int error = -EBADF;
55098
55099 f = fget(fd);
55100 if (!f)
55101 return error;
55102- dentry = f->f_path.dentry;
55103- audit_inode(NULL, dentry);
55104+ audit_inode(NULL, f->f_path.dentry);
55105 error = mnt_want_write_file(f);
55106 if (!error) {
55107- error = setxattr(dentry, name, value, size, flags);
55108+ error = setxattr(&f->f_path, name, value, size, flags);
55109 mnt_drop_write(f->f_path.mnt);
55110 }
55111 fput(f);
55112diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
55113index c6ad7c7..f2847a7 100644
55114--- a/fs/xattr_acl.c
55115+++ b/fs/xattr_acl.c
55116@@ -17,8 +17,8 @@
55117 struct posix_acl *
55118 posix_acl_from_xattr(const void *value, size_t size)
55119 {
55120- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
55121- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
55122+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
55123+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
55124 int count;
55125 struct posix_acl *acl;
55126 struct posix_acl_entry *acl_e;
55127diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
55128index 942362f..88f96f5 100644
55129--- a/fs/xfs/linux-2.6/xfs_ioctl.c
55130+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
55131@@ -134,7 +134,7 @@ xfs_find_handle(
55132 }
55133
55134 error = -EFAULT;
55135- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
55136+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
55137 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
55138 goto out_put;
55139
55140@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
55141 if (IS_ERR(dentry))
55142 return PTR_ERR(dentry);
55143
55144- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
55145+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
55146 if (!kbuf)
55147 goto out_dput;
55148
55149@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
55150 xfs_mount_t *mp,
55151 void __user *arg)
55152 {
55153- xfs_fsop_geom_t fsgeo;
55154+ xfs_fsop_geom_t fsgeo;
55155 int error;
55156
55157 error = xfs_fs_geometry(mp, &fsgeo, 3);
55158diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
55159index bad485a..479bd32 100644
55160--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
55161+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
55162@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
55163 xfs_fsop_geom_t fsgeo;
55164 int error;
55165
55166+ memset(&fsgeo, 0, sizeof(fsgeo));
55167 error = xfs_fs_geometry(mp, &fsgeo, 3);
55168 if (error)
55169 return -error;
55170diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
55171index 1f3b4b8..6102f6d 100644
55172--- a/fs/xfs/linux-2.6/xfs_iops.c
55173+++ b/fs/xfs/linux-2.6/xfs_iops.c
55174@@ -468,7 +468,7 @@ xfs_vn_put_link(
55175 struct nameidata *nd,
55176 void *p)
55177 {
55178- char *s = nd_get_link(nd);
55179+ const char *s = nd_get_link(nd);
55180
55181 if (!IS_ERR(s))
55182 kfree(s);
55183diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
55184index 8971fb0..5fc1eb2 100644
55185--- a/fs/xfs/xfs_bmap.c
55186+++ b/fs/xfs/xfs_bmap.c
55187@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
55188 int nmap,
55189 int ret_nmap);
55190 #else
55191-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
55192+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
55193 #endif /* DEBUG */
55194
55195 #if defined(XFS_RW_TRACE)
55196diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
55197index e89734e..5e84d8d 100644
55198--- a/fs/xfs/xfs_dir2_sf.c
55199+++ b/fs/xfs/xfs_dir2_sf.c
55200@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
55201 }
55202
55203 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
55204- if (filldir(dirent, sfep->name, sfep->namelen,
55205+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
55206+ char name[sfep->namelen];
55207+ memcpy(name, sfep->name, sfep->namelen);
55208+ if (filldir(dirent, name, sfep->namelen,
55209+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
55210+ *offset = off & 0x7fffffff;
55211+ return 0;
55212+ }
55213+ } else if (filldir(dirent, sfep->name, sfep->namelen,
55214 off & 0x7fffffff, ino, DT_UNKNOWN)) {
55215 *offset = off & 0x7fffffff;
55216 return 0;
55217diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
55218index 8f32f50..b6a41e8 100644
55219--- a/fs/xfs/xfs_vnodeops.c
55220+++ b/fs/xfs/xfs_vnodeops.c
55221@@ -564,13 +564,18 @@ xfs_readlink(
55222
55223 xfs_ilock(ip, XFS_ILOCK_SHARED);
55224
55225- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
55226- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
55227-
55228 pathlen = ip->i_d.di_size;
55229 if (!pathlen)
55230 goto out;
55231
55232+ if (pathlen > MAXPATHLEN) {
55233+ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
55234+ __func__, (unsigned long long)ip->i_ino, pathlen);
55235+ ASSERT(0);
55236+ error = XFS_ERROR(EFSCORRUPTED);
55237+ goto out;
55238+ }
55239+
55240 if (ip->i_df.if_flags & XFS_IFINLINE) {
55241 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
55242 link[pathlen] = '\0';
55243diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
55244new file mode 100644
55245index 0000000..7026cbd
55246--- /dev/null
55247+++ b/grsecurity/Kconfig
55248@@ -0,0 +1,1074 @@
55249+#
55250+# grecurity configuration
55251+#
55252+
55253+menu "Grsecurity"
55254+
55255+config GRKERNSEC
55256+ bool "Grsecurity"
55257+ select CRYPTO
55258+ select CRYPTO_SHA256
55259+ help
55260+ If you say Y here, you will be able to configure many features
55261+ that will enhance the security of your system. It is highly
55262+ recommended that you say Y here and read through the help
55263+ for each option so that you fully understand the features and
55264+ can evaluate their usefulness for your machine.
55265+
55266+choice
55267+ prompt "Security Level"
55268+ depends on GRKERNSEC
55269+ default GRKERNSEC_CUSTOM
55270+
55271+config GRKERNSEC_LOW
55272+ bool "Low"
55273+ select GRKERNSEC_LINK
55274+ select GRKERNSEC_FIFO
55275+ select GRKERNSEC_RANDNET
55276+ select GRKERNSEC_DMESG
55277+ select GRKERNSEC_CHROOT
55278+ select GRKERNSEC_CHROOT_CHDIR
55279+
55280+ help
55281+ If you choose this option, several of the grsecurity options will
55282+ be enabled that will give you greater protection against a number
55283+ of attacks, while assuring that none of your software will have any
55284+ conflicts with the additional security measures. If you run a lot
55285+ of unusual software, or you are having problems with the higher
55286+ security levels, you should say Y here. With this option, the
55287+ following features are enabled:
55288+
55289+ - Linking restrictions
55290+ - FIFO restrictions
55291+ - Restricted dmesg
55292+ - Enforced chdir("/") on chroot
55293+ - Runtime module disabling
55294+
55295+config GRKERNSEC_MEDIUM
55296+ bool "Medium"
55297+ select PAX
55298+ select PAX_EI_PAX
55299+ select PAX_PT_PAX_FLAGS
55300+ select PAX_HAVE_ACL_FLAGS
55301+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55302+ select GRKERNSEC_CHROOT
55303+ select GRKERNSEC_CHROOT_SYSCTL
55304+ select GRKERNSEC_LINK
55305+ select GRKERNSEC_FIFO
55306+ select GRKERNSEC_DMESG
55307+ select GRKERNSEC_RANDNET
55308+ select GRKERNSEC_FORKFAIL
55309+ select GRKERNSEC_TIME
55310+ select GRKERNSEC_SIGNAL
55311+ select GRKERNSEC_CHROOT
55312+ select GRKERNSEC_CHROOT_UNIX
55313+ select GRKERNSEC_CHROOT_MOUNT
55314+ select GRKERNSEC_CHROOT_PIVOT
55315+ select GRKERNSEC_CHROOT_DOUBLE
55316+ select GRKERNSEC_CHROOT_CHDIR
55317+ select GRKERNSEC_CHROOT_MKNOD
55318+ select GRKERNSEC_PROC
55319+ select GRKERNSEC_PROC_USERGROUP
55320+ select PAX_RANDUSTACK
55321+ select PAX_ASLR
55322+ select PAX_RANDMMAP
55323+ select PAX_REFCOUNT if (X86 || SPARC64)
55324+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55325+
55326+ help
55327+ If you say Y here, several features in addition to those included
55328+ in the low additional security level will be enabled. These
55329+ features provide even more security to your system, though in rare
55330+ cases they may be incompatible with very old or poorly written
55331+ software. If you enable this option, make sure that your auth
55332+ service (identd) is running as gid 1001. With this option,
55333+ the following features (in addition to those provided in the
55334+ low additional security level) will be enabled:
55335+
55336+ - Failed fork logging
55337+ - Time change logging
55338+ - Signal logging
55339+ - Deny mounts in chroot
55340+ - Deny double chrooting
55341+ - Deny sysctl writes in chroot
55342+ - Deny mknod in chroot
55343+ - Deny access to abstract AF_UNIX sockets out of chroot
55344+ - Deny pivot_root in chroot
55345+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
55346+ - /proc restrictions with special GID set to 10 (usually wheel)
55347+ - Address Space Layout Randomization (ASLR)
55348+ - Prevent exploitation of most refcount overflows
55349+ - Bounds checking of copying between the kernel and userland
55350+
55351+config GRKERNSEC_HIGH
55352+ bool "High"
55353+ select GRKERNSEC_LINK
55354+ select GRKERNSEC_FIFO
55355+ select GRKERNSEC_DMESG
55356+ select GRKERNSEC_FORKFAIL
55357+ select GRKERNSEC_TIME
55358+ select GRKERNSEC_SIGNAL
55359+ select GRKERNSEC_CHROOT
55360+ select GRKERNSEC_CHROOT_SHMAT
55361+ select GRKERNSEC_CHROOT_UNIX
55362+ select GRKERNSEC_CHROOT_MOUNT
55363+ select GRKERNSEC_CHROOT_FCHDIR
55364+ select GRKERNSEC_CHROOT_PIVOT
55365+ select GRKERNSEC_CHROOT_DOUBLE
55366+ select GRKERNSEC_CHROOT_CHDIR
55367+ select GRKERNSEC_CHROOT_MKNOD
55368+ select GRKERNSEC_CHROOT_CAPS
55369+ select GRKERNSEC_CHROOT_SYSCTL
55370+ select GRKERNSEC_CHROOT_FINDTASK
55371+ select GRKERNSEC_SYSFS_RESTRICT
55372+ select GRKERNSEC_PROC
55373+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55374+ select GRKERNSEC_HIDESYM
55375+ select GRKERNSEC_BRUTE
55376+ select GRKERNSEC_PROC_USERGROUP
55377+ select GRKERNSEC_KMEM
55378+ select GRKERNSEC_RESLOG
55379+ select GRKERNSEC_RANDNET
55380+ select GRKERNSEC_PROC_ADD
55381+ select GRKERNSEC_CHROOT_CHMOD
55382+ select GRKERNSEC_CHROOT_NICE
55383+ select GRKERNSEC_SETXID
55384+ select GRKERNSEC_AUDIT_MOUNT
55385+ select GRKERNSEC_MODHARDEN if (MODULES)
55386+ select GRKERNSEC_HARDEN_PTRACE
55387+ select GRKERNSEC_PTRACE_READEXEC
55388+ select GRKERNSEC_VM86 if (X86_32)
55389+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55390+ select PAX
55391+ select PAX_RANDUSTACK
55392+ select PAX_ASLR
55393+ select PAX_RANDMMAP
55394+ select PAX_NOEXEC
55395+ select PAX_MPROTECT
55396+ select PAX_EI_PAX
55397+ select PAX_PT_PAX_FLAGS
55398+ select PAX_HAVE_ACL_FLAGS
55399+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55400+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
55401+ select PAX_RANDKSTACK if (X86_TSC && X86)
55402+ select PAX_SEGMEXEC if (X86_32)
55403+ select PAX_PAGEEXEC
55404+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55405+ select PAX_EMUTRAMP if (PARISC)
55406+ select PAX_EMUSIGRT if (PARISC)
55407+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55408+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55409+ select PAX_REFCOUNT if (X86 || SPARC64)
55410+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55411+ help
55412+ If you say Y here, many of the features of grsecurity will be
55413+ enabled, which will protect you against many kinds of attacks
55414+ against your system. The heightened security comes at a cost
55415+ of an increased chance of incompatibilities with rare software
55416+ on your machine. Since this security level enables PaX, you should
55417+ view <http://pax.grsecurity.net> and read about the PaX
55418+ project. While you are there, download chpax and run it on
55419+ binaries that cause problems with PaX. Also remember that
55420+ since the /proc restrictions are enabled, you must run your
55421+ identd as gid 1001. This security level enables the following
55422+ features in addition to those listed in the low and medium
55423+ security levels:
55424+
55425+ - Additional /proc restrictions
55426+ - Chmod restrictions in chroot
55427+ - No signals, ptrace, or viewing of processes outside of chroot
55428+ - Capability restrictions in chroot
55429+ - Deny fchdir out of chroot
55430+ - Priority restrictions in chroot
55431+ - Segmentation-based implementation of PaX
55432+ - Mprotect restrictions
55433+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55434+ - Kernel stack randomization
55435+ - Mount/unmount/remount logging
55436+ - Kernel symbol hiding
55437+ - Hardening of module auto-loading
55438+ - Ptrace restrictions
55439+ - Restricted vm86 mode
55440+ - Restricted sysfs/debugfs
55441+ - Active kernel exploit response
55442+
55443+config GRKERNSEC_CUSTOM
55444+ bool "Custom"
55445+ help
55446+ If you say Y here, you will be able to configure every grsecurity
55447+ option, which allows you to enable many more features that aren't
55448+ covered in the basic security levels. These additional features
55449+ include TPE, socket restrictions, and the sysctl system for
55450+ grsecurity. It is advised that you read through the help for
55451+ each option to determine its usefulness in your situation.
55452+
55453+endchoice
55454+
55455+menu "Memory Protections"
55456+depends on GRKERNSEC
55457+
55458+config GRKERNSEC_KMEM
55459+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
55460+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55461+ help
55462+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55463+ be written to or read from to modify or leak the contents of the running
55464+ kernel. /dev/port will also not be allowed to be opened. If you have module
55465+ support disabled, enabling this will close up four ways that are
55466+ currently used to insert malicious code into the running kernel.
55467+ Even with all these features enabled, we still highly recommend that
55468+ you use the RBAC system, as it is still possible for an attacker to
55469+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55470+ If you are not using XFree86, you may be able to stop this additional
55471+ case by enabling the 'Disable privileged I/O' option. Though nothing
55472+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55473+ but only to video memory, which is the only writing we allow in this
55474+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55475+ not be allowed to mprotect it with PROT_WRITE later.
55476+ It is highly recommended that you say Y here if you meet all the
55477+ conditions above.
55478+
55479+config GRKERNSEC_VM86
55480+ bool "Restrict VM86 mode"
55481+ depends on X86_32
55482+
55483+ help
55484+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55485+ make use of a special execution mode on 32bit x86 processors called
55486+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55487+ video cards and will still work with this option enabled. The purpose
55488+ of the option is to prevent exploitation of emulation errors in
55489+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55490+ Nearly all users should be able to enable this option.
55491+
55492+config GRKERNSEC_IO
55493+ bool "Disable privileged I/O"
55494+ depends on X86
55495+ select RTC_CLASS
55496+ select RTC_INTF_DEV
55497+ select RTC_DRV_CMOS
55498+
55499+ help
55500+ If you say Y here, all ioperm and iopl calls will return an error.
55501+ Ioperm and iopl can be used to modify the running kernel.
55502+ Unfortunately, some programs need this access to operate properly,
55503+ the most notable of which are XFree86 and hwclock. hwclock can be
55504+ remedied by having RTC support in the kernel, so real-time
55505+ clock support is enabled if this option is enabled, to ensure
55506+ that hwclock operates correctly. XFree86 still will not
55507+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55508+ IF YOU USE XFree86. If you use XFree86 and you still want to
55509+ protect your kernel against modification, use the RBAC system.
55510+
55511+config GRKERNSEC_PROC_MEMMAP
55512+ bool "Harden ASLR against information leaks and entropy reduction"
55513+ default y if (PAX_NOEXEC || PAX_ASLR)
55514+ depends on PAX_NOEXEC || PAX_ASLR
55515+ help
55516+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55517+ give no information about the addresses of its mappings if
55518+ PaX features that rely on random addresses are enabled on the task.
55519+ In addition to sanitizing this information and disabling other
55520+ dangerous sources of information, this option causes reads of sensitive
55521+ /proc/<pid> entries where the file descriptor was opened in a different
55522+ task than the one performing the read. Such attempts are logged.
55523+ Finally, this option limits argv/env strings for suid/sgid binaries
55524+ to 1MB to prevent a complete exhaustion of the stack entropy provided
55525+ by ASLR.
55526+ If you use PaX it is essential that you say Y here as it closes up
55527+ several holes that make full ASLR useless for suid/sgid binaries.
55528+
55529+config GRKERNSEC_BRUTE
55530+ bool "Deter exploit bruteforcing"
55531+ help
55532+ If you say Y here, attempts to bruteforce exploits against forking
55533+ daemons such as apache or sshd, as well as against suid/sgid binaries
55534+ will be deterred. When a child of a forking daemon is killed by PaX
55535+ or crashes due to an illegal instruction or other suspicious signal,
55536+ the parent process will be delayed 30 seconds upon every subsequent
55537+ fork until the administrator is able to assess the situation and
55538+ restart the daemon.
55539+ In the suid/sgid case, the attempt is logged, the user has all their
55540+ processes terminated, and they are prevented from executing any further
55541+ processes for 15 minutes.
55542+ It is recommended that you also enable signal logging in the auditing
55543+ section so that logs are generated when a process triggers a suspicious
55544+ signal.
55545+ If the sysctl option is enabled, a sysctl option with name
55546+ "deter_bruteforce" is created.
55547+
55548+config GRKERNSEC_MODHARDEN
55549+ bool "Harden module auto-loading"
55550+ depends on MODULES
55551+ help
55552+ If you say Y here, module auto-loading in response to use of some
55553+ feature implemented by an unloaded module will be restricted to
55554+ root users. Enabling this option helps defend against attacks
55555+ by unprivileged users who abuse the auto-loading behavior to
55556+ cause a vulnerable module to load that is then exploited.
55557+
55558+ If this option prevents a legitimate use of auto-loading for a
55559+ non-root user, the administrator can execute modprobe manually
55560+ with the exact name of the module mentioned in the alert log.
55561+ Alternatively, the administrator can add the module to the list
55562+ of modules loaded at boot by modifying init scripts.
55563+
55564+ Modification of init scripts will most likely be needed on
55565+ Ubuntu servers with encrypted home directory support enabled,
55566+ as the first non-root user logging in will cause the ecb(aes),
55567+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55568+
55569+config GRKERNSEC_HIDESYM
55570+ bool "Hide kernel symbols"
55571+ help
55572+ If you say Y here, getting information on loaded modules, and
55573+ displaying all kernel symbols through a syscall will be restricted
55574+ to users with CAP_SYS_MODULE. For software compatibility reasons,
55575+ /proc/kallsyms will be restricted to the root user. The RBAC
55576+ system can hide that entry even from root.
55577+
55578+ This option also prevents leaking of kernel addresses through
55579+ several /proc entries.
55580+
55581+ Note that this option is only effective provided the following
55582+ conditions are met:
55583+ 1) The kernel using grsecurity is not precompiled by some distribution
55584+ 2) You have also enabled GRKERNSEC_DMESG
55585+ 3) You are using the RBAC system and hiding other files such as your
55586+ kernel image and System.map. Alternatively, enabling this option
55587+ causes the permissions on /boot, /lib/modules, and the kernel
55588+ source directory to change at compile time to prevent
55589+ reading by non-root users.
55590+ If the above conditions are met, this option will aid in providing a
55591+ useful protection against local kernel exploitation of overflows
55592+ and arbitrary read/write vulnerabilities.
55593+
55594+config GRKERNSEC_KERN_LOCKOUT
55595+ bool "Active kernel exploit response"
55596+ depends on X86 || ARM || PPC || SPARC
55597+ help
55598+ If you say Y here, when a PaX alert is triggered due to suspicious
55599+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55600+ or an OOPs occurs due to bad memory accesses, instead of just
55601+ terminating the offending process (and potentially allowing
55602+ a subsequent exploit from the same user), we will take one of two
55603+ actions:
55604+ If the user was root, we will panic the system
55605+ If the user was non-root, we will log the attempt, terminate
55606+ all processes owned by the user, then prevent them from creating
55607+ any new processes until the system is restarted
55608+ This deters repeated kernel exploitation/bruteforcing attempts
55609+ and is useful for later forensics.
55610+
55611+endmenu
55612+menu "Role Based Access Control Options"
55613+depends on GRKERNSEC
55614+
55615+config GRKERNSEC_RBAC_DEBUG
55616+ bool
55617+
55618+config GRKERNSEC_NO_RBAC
55619+ bool "Disable RBAC system"
55620+ help
55621+ If you say Y here, the /dev/grsec device will be removed from the kernel,
55622+ preventing the RBAC system from being enabled. You should only say Y
55623+ here if you have no intention of using the RBAC system, so as to prevent
55624+ an attacker with root access from misusing the RBAC system to hide files
55625+ and processes when loadable module support and /dev/[k]mem have been
55626+ locked down.
55627+
55628+config GRKERNSEC_ACL_HIDEKERN
55629+ bool "Hide kernel processes"
55630+ help
55631+ If you say Y here, all kernel threads will be hidden to all
55632+ processes but those whose subject has the "view hidden processes"
55633+ flag.
55634+
55635+config GRKERNSEC_ACL_MAXTRIES
55636+ int "Maximum tries before password lockout"
55637+ default 3
55638+ help
55639+ This option enforces the maximum number of times a user can attempt
55640+ to authorize themselves with the grsecurity RBAC system before being
55641+ denied the ability to attempt authorization again for a specified time.
55642+ The lower the number, the harder it will be to brute-force a password.
55643+
55644+config GRKERNSEC_ACL_TIMEOUT
55645+ int "Time to wait after max password tries, in seconds"
55646+ default 30
55647+ help
55648+ This option specifies the time the user must wait after attempting to
55649+ authorize to the RBAC system with the maximum number of invalid
55650+ passwords. The higher the number, the harder it will be to brute-force
55651+ a password.
55652+
55653+endmenu
55654+menu "Filesystem Protections"
55655+depends on GRKERNSEC
55656+
55657+config GRKERNSEC_PROC
55658+ bool "Proc restrictions"
55659+ help
55660+ If you say Y here, the permissions of the /proc filesystem
55661+ will be altered to enhance system security and privacy. You MUST
55662+ choose either a user only restriction or a user and group restriction.
55663+ Depending upon the option you choose, you can either restrict users to
55664+ see only the processes they themselves run, or choose a group that can
55665+ view all processes and files normally restricted to root if you choose
55666+ the "restrict to user only" option. NOTE: If you're running identd as
55667+ a non-root user, you will have to run it as the group you specify here.
55668+
55669+config GRKERNSEC_PROC_USER
55670+ bool "Restrict /proc to user only"
55671+ depends on GRKERNSEC_PROC
55672+ help
55673+ If you say Y here, non-root users will only be able to view their own
55674+ processes, and restricts them from viewing network-related information,
55675+ and viewing kernel symbol and module information.
55676+
55677+config GRKERNSEC_PROC_USERGROUP
55678+ bool "Allow special group"
55679+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55680+ help
55681+ If you say Y here, you will be able to select a group that will be
55682+ able to view all processes and network-related information. If you've
55683+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55684+ remain hidden. This option is useful if you want to run identd as
55685+ a non-root user.
55686+
55687+config GRKERNSEC_PROC_GID
55688+ int "GID for special group"
55689+ depends on GRKERNSEC_PROC_USERGROUP
55690+ default 1001
55691+
55692+config GRKERNSEC_PROC_ADD
55693+ bool "Additional restrictions"
55694+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55695+ help
55696+ If you say Y here, additional restrictions will be placed on
55697+ /proc that keep normal users from viewing device information and
55698+ slabinfo information that could be useful for exploits.
55699+
55700+config GRKERNSEC_LINK
55701+ bool "Linking restrictions"
55702+ help
55703+ If you say Y here, /tmp race exploits will be prevented, since users
55704+ will no longer be able to follow symlinks owned by other users in
55705+ world-writable +t directories (e.g. /tmp), unless the owner of the
55706+ symlink is the owner of the directory. users will also not be
55707+ able to hardlink to files they do not own. If the sysctl option is
55708+ enabled, a sysctl option with name "linking_restrictions" is created.
55709+
55710+config GRKERNSEC_FIFO
55711+ bool "FIFO restrictions"
55712+ help
55713+ If you say Y here, users will not be able to write to FIFOs they don't
55714+ own in world-writable +t directories (e.g. /tmp), unless the owner of
55715+ the FIFO is the same owner of the directory it's held in. If the sysctl
55716+ option is enabled, a sysctl option with name "fifo_restrictions" is
55717+ created.
55718+
55719+config GRKERNSEC_SYSFS_RESTRICT
55720+ bool "Sysfs/debugfs restriction"
55721+ depends on SYSFS
55722+ help
55723+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55724+ any filesystem normally mounted under it (e.g. debugfs) will be
55725+ mostly accessible only by root. These filesystems generally provide access
55726+ to hardware and debug information that isn't appropriate for unprivileged
55727+ users of the system. Sysfs and debugfs have also become a large source
55728+ of new vulnerabilities, ranging from infoleaks to local compromise.
55729+ There has been very little oversight with an eye toward security involved
55730+ in adding new exporters of information to these filesystems, so their
55731+ use is discouraged.
55732+ For reasons of compatibility, a few directories have been whitelisted
55733+ for access by non-root users:
55734+ /sys/fs/selinux
55735+ /sys/fs/fuse
55736+ /sys/devices/system/cpu
55737+
55738+config GRKERNSEC_ROFS
55739+ bool "Runtime read-only mount protection"
55740+ help
55741+ If you say Y here, a sysctl option with name "romount_protect" will
55742+ be created. By setting this option to 1 at runtime, filesystems
55743+ will be protected in the following ways:
55744+ * No new writable mounts will be allowed
55745+ * Existing read-only mounts won't be able to be remounted read/write
55746+ * Write operations will be denied on all block devices
55747+ This option acts independently of grsec_lock: once it is set to 1,
55748+ it cannot be turned off. Therefore, please be mindful of the resulting
55749+ behavior if this option is enabled in an init script on a read-only
55750+ filesystem. This feature is mainly intended for secure embedded systems.
55751+
55752+config GRKERNSEC_CHROOT
55753+ bool "Chroot jail restrictions"
55754+ help
55755+ If you say Y here, you will be able to choose several options that will
55756+ make breaking out of a chrooted jail much more difficult. If you
55757+ encounter no software incompatibilities with the following options, it
55758+ is recommended that you enable each one.
55759+
55760+config GRKERNSEC_CHROOT_MOUNT
55761+ bool "Deny mounts"
55762+ depends on GRKERNSEC_CHROOT
55763+ help
55764+ If you say Y here, processes inside a chroot will not be able to
55765+ mount or remount filesystems. If the sysctl option is enabled, a
55766+ sysctl option with name "chroot_deny_mount" is created.
55767+
55768+config GRKERNSEC_CHROOT_DOUBLE
55769+ bool "Deny double-chroots"
55770+ depends on GRKERNSEC_CHROOT
55771+ help
55772+ If you say Y here, processes inside a chroot will not be able to chroot
55773+ again outside the chroot. This is a widely used method of breaking
55774+ out of a chroot jail and should not be allowed. If the sysctl
55775+ option is enabled, a sysctl option with name
55776+ "chroot_deny_chroot" is created.
55777+
55778+config GRKERNSEC_CHROOT_PIVOT
55779+ bool "Deny pivot_root in chroot"
55780+ depends on GRKERNSEC_CHROOT
55781+ help
55782+ If you say Y here, processes inside a chroot will not be able to use
55783+ a function called pivot_root() that was introduced in Linux 2.3.41. It
55784+ works similar to chroot in that it changes the root filesystem. This
55785+ function could be misused in a chrooted process to attempt to break out
55786+ of the chroot, and therefore should not be allowed. If the sysctl
55787+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
55788+ created.
55789+
55790+config GRKERNSEC_CHROOT_CHDIR
55791+ bool "Enforce chdir(\"/\") on all chroots"
55792+ depends on GRKERNSEC_CHROOT
55793+ help
55794+ If you say Y here, the current working directory of all newly-chrooted
55795+ applications will be set to the the root directory of the chroot.
55796+ The man page on chroot(2) states:
55797+ Note that this call does not change the current working
55798+ directory, so that `.' can be outside the tree rooted at
55799+ `/'. In particular, the super-user can escape from a
55800+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55801+
55802+ It is recommended that you say Y here, since it's not known to break
55803+ any software. If the sysctl option is enabled, a sysctl option with
55804+ name "chroot_enforce_chdir" is created.
55805+
55806+config GRKERNSEC_CHROOT_CHMOD
55807+ bool "Deny (f)chmod +s"
55808+ depends on GRKERNSEC_CHROOT
55809+ help
55810+ If you say Y here, processes inside a chroot will not be able to chmod
55811+ or fchmod files to make them have suid or sgid bits. This protects
55812+ against another published method of breaking a chroot. If the sysctl
55813+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
55814+ created.
55815+
55816+config GRKERNSEC_CHROOT_FCHDIR
55817+ bool "Deny fchdir out of chroot"
55818+ depends on GRKERNSEC_CHROOT
55819+ help
55820+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
55821+ to a file descriptor of the chrooting process that points to a directory
55822+ outside the filesystem will be stopped. If the sysctl option
55823+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55824+
55825+config GRKERNSEC_CHROOT_MKNOD
55826+ bool "Deny mknod"
55827+ depends on GRKERNSEC_CHROOT
55828+ help
55829+ If you say Y here, processes inside a chroot will not be allowed to
55830+ mknod. The problem with using mknod inside a chroot is that it
55831+ would allow an attacker to create a device entry that is the same
55832+ as one on the physical root of your system, which could range from
55833+ anything from the console device to a device for your harddrive (which
55834+ they could then use to wipe the drive or steal data). It is recommended
55835+ that you say Y here, unless you run into software incompatibilities.
55836+ If the sysctl option is enabled, a sysctl option with name
55837+ "chroot_deny_mknod" is created.
55838+
55839+config GRKERNSEC_CHROOT_SHMAT
55840+ bool "Deny shmat() out of chroot"
55841+ depends on GRKERNSEC_CHROOT
55842+ help
55843+ If you say Y here, processes inside a chroot will not be able to attach
55844+ to shared memory segments that were created outside of the chroot jail.
55845+ It is recommended that you say Y here. If the sysctl option is enabled,
55846+ a sysctl option with name "chroot_deny_shmat" is created.
55847+
55848+config GRKERNSEC_CHROOT_UNIX
55849+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
55850+ depends on GRKERNSEC_CHROOT
55851+ help
55852+ If you say Y here, processes inside a chroot will not be able to
55853+ connect to abstract (meaning not belonging to a filesystem) Unix
55854+ domain sockets that were bound outside of a chroot. It is recommended
55855+ that you say Y here. If the sysctl option is enabled, a sysctl option
55856+ with name "chroot_deny_unix" is created.
55857+
55858+config GRKERNSEC_CHROOT_FINDTASK
55859+ bool "Protect outside processes"
55860+ depends on GRKERNSEC_CHROOT
55861+ help
55862+ If you say Y here, processes inside a chroot will not be able to
55863+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55864+ getsid, or view any process outside of the chroot. If the sysctl
55865+ option is enabled, a sysctl option with name "chroot_findtask" is
55866+ created.
55867+
55868+config GRKERNSEC_CHROOT_NICE
55869+ bool "Restrict priority changes"
55870+ depends on GRKERNSEC_CHROOT
55871+ help
55872+ If you say Y here, processes inside a chroot will not be able to raise
55873+ the priority of processes in the chroot, or alter the priority of
55874+ processes outside the chroot. This provides more security than simply
55875+ removing CAP_SYS_NICE from the process' capability set. If the
55876+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55877+ is created.
55878+
55879+config GRKERNSEC_CHROOT_SYSCTL
55880+ bool "Deny sysctl writes"
55881+ depends on GRKERNSEC_CHROOT
55882+ help
55883+ If you say Y here, an attacker in a chroot will not be able to
55884+ write to sysctl entries, either by sysctl(2) or through a /proc
55885+ interface. It is strongly recommended that you say Y here. If the
55886+ sysctl option is enabled, a sysctl option with name
55887+ "chroot_deny_sysctl" is created.
55888+
55889+config GRKERNSEC_CHROOT_CAPS
55890+ bool "Capability restrictions"
55891+ depends on GRKERNSEC_CHROOT
55892+ help
55893+ If you say Y here, the capabilities on all processes within a
55894+ chroot jail will be lowered to stop module insertion, raw i/o,
55895+ system and net admin tasks, rebooting the system, modifying immutable
55896+ files, modifying IPC owned by another, and changing the system time.
55897+ This is left an option because it can break some apps. Disable this
55898+ if your chrooted apps are having problems performing those kinds of
55899+ tasks. If the sysctl option is enabled, a sysctl option with
55900+ name "chroot_caps" is created.
55901+
55902+endmenu
55903+menu "Kernel Auditing"
55904+depends on GRKERNSEC
55905+
55906+config GRKERNSEC_AUDIT_GROUP
55907+ bool "Single group for auditing"
55908+ help
55909+ If you say Y here, the exec, chdir, and (un)mount logging features
55910+ will only operate on a group you specify. This option is recommended
55911+ if you only want to watch certain users instead of having a large
55912+ amount of logs from the entire system. If the sysctl option is enabled,
55913+ a sysctl option with name "audit_group" is created.
55914+
55915+config GRKERNSEC_AUDIT_GID
55916+ int "GID for auditing"
55917+ depends on GRKERNSEC_AUDIT_GROUP
55918+ default 1007
55919+
55920+config GRKERNSEC_EXECLOG
55921+ bool "Exec logging"
55922+ help
55923+ If you say Y here, all execve() calls will be logged (since the
55924+ other exec*() calls are frontends to execve(), all execution
55925+ will be logged). Useful for shell-servers that like to keep track
55926+ of their users. If the sysctl option is enabled, a sysctl option with
55927+ name "exec_logging" is created.
55928+ WARNING: This option when enabled will produce a LOT of logs, especially
55929+ on an active system.
55930+
55931+config GRKERNSEC_RESLOG
55932+ bool "Resource logging"
55933+ help
55934+ If you say Y here, all attempts to overstep resource limits will
55935+ be logged with the resource name, the requested size, and the current
55936+ limit. It is highly recommended that you say Y here. If the sysctl
55937+ option is enabled, a sysctl option with name "resource_logging" is
55938+ created. If the RBAC system is enabled, the sysctl value is ignored.
55939+
55940+config GRKERNSEC_CHROOT_EXECLOG
55941+ bool "Log execs within chroot"
55942+ help
55943+ If you say Y here, all executions inside a chroot jail will be logged
55944+ to syslog. This can cause a large amount of logs if certain
55945+ applications (eg. djb's daemontools) are installed on the system, and
55946+ is therefore left as an option. If the sysctl option is enabled, a
55947+ sysctl option with name "chroot_execlog" is created.
55948+
55949+config GRKERNSEC_AUDIT_PTRACE
55950+ bool "Ptrace logging"
55951+ help
55952+ If you say Y here, all attempts to attach to a process via ptrace
55953+ will be logged. If the sysctl option is enabled, a sysctl option
55954+ with name "audit_ptrace" is created.
55955+
55956+config GRKERNSEC_AUDIT_CHDIR
55957+ bool "Chdir logging"
55958+ help
55959+ If you say Y here, all chdir() calls will be logged. If the sysctl
55960+ option is enabled, a sysctl option with name "audit_chdir" is created.
55961+
55962+config GRKERNSEC_AUDIT_MOUNT
55963+ bool "(Un)Mount logging"
55964+ help
55965+ If you say Y here, all mounts and unmounts will be logged. If the
55966+ sysctl option is enabled, a sysctl option with name "audit_mount" is
55967+ created.
55968+
55969+config GRKERNSEC_SIGNAL
55970+ bool "Signal logging"
55971+ help
55972+ If you say Y here, certain important signals will be logged, such as
55973+ SIGSEGV, which will as a result inform you of when a error in a program
55974+ occurred, which in some cases could mean a possible exploit attempt.
55975+ If the sysctl option is enabled, a sysctl option with name
55976+ "signal_logging" is created.
55977+
55978+config GRKERNSEC_FORKFAIL
55979+ bool "Fork failure logging"
55980+ help
55981+ If you say Y here, all failed fork() attempts will be logged.
55982+ This could suggest a fork bomb, or someone attempting to overstep
55983+ their process limit. If the sysctl option is enabled, a sysctl option
55984+ with name "forkfail_logging" is created.
55985+
55986+config GRKERNSEC_TIME
55987+ bool "Time change logging"
55988+ help
55989+ If you say Y here, any changes of the system clock will be logged.
55990+ If the sysctl option is enabled, a sysctl option with name
55991+ "timechange_logging" is created.
55992+
55993+config GRKERNSEC_PROC_IPADDR
55994+ bool "/proc/<pid>/ipaddr support"
55995+ help
55996+ If you say Y here, a new entry will be added to each /proc/<pid>
55997+ directory that contains the IP address of the person using the task.
55998+ The IP is carried across local TCP and AF_UNIX stream sockets.
55999+ This information can be useful for IDS/IPSes to perform remote response
56000+ to a local attack. The entry is readable by only the owner of the
56001+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56002+ the RBAC system), and thus does not create privacy concerns.
56003+
56004+config GRKERNSEC_RWXMAP_LOG
56005+ bool 'Denied RWX mmap/mprotect logging'
56006+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56007+ help
56008+ If you say Y here, calls to mmap() and mprotect() with explicit
56009+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56010+ denied by the PAX_MPROTECT feature. If the sysctl option is
56011+ enabled, a sysctl option with name "rwxmap_logging" is created.
56012+
56013+config GRKERNSEC_AUDIT_TEXTREL
56014+ bool 'ELF text relocations logging (READ HELP)'
56015+ depends on PAX_MPROTECT
56016+ help
56017+ If you say Y here, text relocations will be logged with the filename
56018+ of the offending library or binary. The purpose of the feature is
56019+ to help Linux distribution developers get rid of libraries and
56020+ binaries that need text relocations which hinder the future progress
56021+ of PaX. Only Linux distribution developers should say Y here, and
56022+ never on a production machine, as this option creates an information
56023+ leak that could aid an attacker in defeating the randomization of
56024+ a single memory region. If the sysctl option is enabled, a sysctl
56025+ option with name "audit_textrel" is created.
56026+
56027+endmenu
56028+
56029+menu "Executable Protections"
56030+depends on GRKERNSEC
56031+
56032+config GRKERNSEC_DMESG
56033+ bool "Dmesg(8) restriction"
56034+ help
56035+ If you say Y here, non-root users will not be able to use dmesg(8)
56036+ to view up to the last 4kb of messages in the kernel's log buffer.
56037+ The kernel's log buffer often contains kernel addresses and other
56038+ identifying information useful to an attacker in fingerprinting a
56039+ system for a targeted exploit.
56040+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56041+ created.
56042+
56043+config GRKERNSEC_HARDEN_PTRACE
56044+ bool "Deter ptrace-based process snooping"
56045+ help
56046+ If you say Y here, TTY sniffers and other malicious monitoring
56047+ programs implemented through ptrace will be defeated. If you
56048+ have been using the RBAC system, this option has already been
56049+ enabled for several years for all users, with the ability to make
56050+ fine-grained exceptions.
56051+
56052+ This option only affects the ability of non-root users to ptrace
56053+ processes that are not a descendent of the ptracing process.
56054+ This means that strace ./binary and gdb ./binary will still work,
56055+ but attaching to arbitrary processes will not. If the sysctl
56056+ option is enabled, a sysctl option with name "harden_ptrace" is
56057+ created.
56058+
56059+config GRKERNSEC_PTRACE_READEXEC
56060+ bool "Require read access to ptrace sensitive binaries"
56061+ help
56062+ If you say Y here, unprivileged users will not be able to ptrace unreadable
56063+ binaries. This option is useful in environments that
56064+ remove the read bits (e.g. file mode 4711) from suid binaries to
56065+ prevent infoleaking of their contents. This option adds
56066+ consistency to the use of that file mode, as the binary could normally
56067+ be read out when run without privileges while ptracing.
56068+
56069+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
56070+ is created.
56071+
56072+config GRKERNSEC_SETXID
56073+ bool "Enforce consistent multithreaded privileges"
56074+ help
56075+ If you say Y here, a change from a root uid to a non-root uid
56076+ in a multithreaded application will cause the resulting uids,
56077+ gids, supplementary groups, and capabilities in that thread
56078+ to be propagated to the other threads of the process. In most
56079+ cases this is unnecessary, as glibc will emulate this behavior
56080+ on behalf of the application. Other libcs do not act in the
56081+ same way, allowing the other threads of the process to continue
56082+ running with root privileges. If the sysctl option is enabled,
56083+ a sysctl option with name "consistent_setxid" is created.
56084+
56085+config GRKERNSEC_TPE
56086+ bool "Trusted Path Execution (TPE)"
56087+ help
56088+ If you say Y here, you will be able to choose a gid to add to the
56089+ supplementary groups of users you want to mark as "untrusted."
56090+ These users will not be able to execute any files that are not in
56091+ root-owned directories writable only by root. If the sysctl option
56092+ is enabled, a sysctl option with name "tpe" is created.
56093+
56094+config GRKERNSEC_TPE_ALL
56095+ bool "Partially restrict all non-root users"
56096+ depends on GRKERNSEC_TPE
56097+ help
56098+ If you say Y here, all non-root users will be covered under
56099+ a weaker TPE restriction. This is separate from, and in addition to,
56100+ the main TPE options that you have selected elsewhere. Thus, if a
56101+ "trusted" GID is chosen, this restriction applies to even that GID.
56102+ Under this restriction, all non-root users will only be allowed to
56103+ execute files in directories they own that are not group or
56104+ world-writable, or in directories owned by root and writable only by
56105+ root. If the sysctl option is enabled, a sysctl option with name
56106+ "tpe_restrict_all" is created.
56107+
56108+config GRKERNSEC_TPE_INVERT
56109+ bool "Invert GID option"
56110+ depends on GRKERNSEC_TPE
56111+ help
56112+ If you say Y here, the group you specify in the TPE configuration will
56113+ decide what group TPE restrictions will be *disabled* for. This
56114+ option is useful if you want TPE restrictions to be applied to most
56115+ users on the system. If the sysctl option is enabled, a sysctl option
56116+ with name "tpe_invert" is created. Unlike other sysctl options, this
56117+ entry will default to on for backward-compatibility.
56118+
56119+config GRKERNSEC_TPE_GID
56120+ int "GID for untrusted users"
56121+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56122+ default 1005
56123+ help
56124+ Setting this GID determines what group TPE restrictions will be
56125+ *enabled* for. If the sysctl option is enabled, a sysctl option
56126+ with name "tpe_gid" is created.
56127+
56128+config GRKERNSEC_TPE_GID
56129+ int "GID for trusted users"
56130+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56131+ default 1005
56132+ help
56133+ Setting this GID determines what group TPE restrictions will be
56134+ *disabled* for. If the sysctl option is enabled, a sysctl option
56135+ with name "tpe_gid" is created.
56136+
56137+endmenu
56138+menu "Network Protections"
56139+depends on GRKERNSEC
56140+
56141+config GRKERNSEC_RANDNET
56142+ bool "Larger entropy pools"
56143+ help
56144+ If you say Y here, the entropy pools used for many features of Linux
56145+ and grsecurity will be doubled in size. Since several grsecurity
56146+ features use additional randomness, it is recommended that you say Y
56147+ here. Saying Y here has a similar effect as modifying
56148+ /proc/sys/kernel/random/poolsize.
56149+
56150+config GRKERNSEC_BLACKHOLE
56151+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56152+ depends on NET
56153+ help
56154+ If you say Y here, neither TCP resets nor ICMP
56155+ destination-unreachable packets will be sent in response to packets
56156+ sent to ports for which no associated listening process exists.
56157+ This feature supports both IPV4 and IPV6 and exempts the
56158+ loopback interface from blackholing. Enabling this feature
56159+ makes a host more resilient to DoS attacks and reduces network
56160+ visibility against scanners.
56161+
56162+ The blackhole feature as-implemented is equivalent to the FreeBSD
56163+ blackhole feature, as it prevents RST responses to all packets, not
56164+ just SYNs. Under most application behavior this causes no
56165+ problems, but applications (like haproxy) may not close certain
56166+ connections in a way that cleanly terminates them on the remote
56167+ end, leaving the remote host in LAST_ACK state. Because of this
56168+ side-effect and to prevent intentional LAST_ACK DoSes, this
56169+ feature also adds automatic mitigation against such attacks.
56170+ The mitigation drastically reduces the amount of time a socket
56171+ can spend in LAST_ACK state. If you're using haproxy and not
56172+ all servers it connects to have this option enabled, consider
56173+ disabling this feature on the haproxy host.
56174+
56175+ If the sysctl option is enabled, two sysctl options with names
56176+ "ip_blackhole" and "lastack_retries" will be created.
56177+ While "ip_blackhole" takes the standard zero/non-zero on/off
56178+ toggle, "lastack_retries" uses the same kinds of values as
56179+ "tcp_retries1" and "tcp_retries2". The default value of 4
56180+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56181+ state.
56182+
56183+config GRKERNSEC_SOCKET
56184+ bool "Socket restrictions"
56185+ depends on NET
56186+ help
56187+ If you say Y here, you will be able to choose from several options.
56188+ If you assign a GID on your system and add it to the supplementary
56189+ groups of users you want to restrict socket access to, this patch
56190+ will perform up to three things, based on the option(s) you choose.
56191+
56192+config GRKERNSEC_SOCKET_ALL
56193+ bool "Deny any sockets to group"
56194+ depends on GRKERNSEC_SOCKET
56195+ help
56196+ If you say Y here, you will be able to choose a GID of whose users will
56197+ be unable to connect to other hosts from your machine or run server
56198+ applications from your machine. If the sysctl option is enabled, a
56199+ sysctl option with name "socket_all" is created.
56200+
56201+config GRKERNSEC_SOCKET_ALL_GID
56202+ int "GID to deny all sockets for"
56203+ depends on GRKERNSEC_SOCKET_ALL
56204+ default 1004
56205+ help
56206+ Here you can choose the GID to disable socket access for. Remember to
56207+ add the users you want socket access disabled for to the GID
56208+ specified here. If the sysctl option is enabled, a sysctl option
56209+ with name "socket_all_gid" is created.
56210+
56211+config GRKERNSEC_SOCKET_CLIENT
56212+ bool "Deny client sockets to group"
56213+ depends on GRKERNSEC_SOCKET
56214+ help
56215+ If you say Y here, you will be able to choose a GID of whose users will
56216+ be unable to connect to other hosts from your machine, but will be
56217+ able to run servers. If this option is enabled, all users in the group
56218+ you specify will have to use passive mode when initiating ftp transfers
56219+ from the shell on your machine. If the sysctl option is enabled, a
56220+ sysctl option with name "socket_client" is created.
56221+
56222+config GRKERNSEC_SOCKET_CLIENT_GID
56223+ int "GID to deny client sockets for"
56224+ depends on GRKERNSEC_SOCKET_CLIENT
56225+ default 1003
56226+ help
56227+ Here you can choose the GID to disable client socket access for.
56228+ Remember to add the users you want client socket access disabled for to
56229+ the GID specified here. If the sysctl option is enabled, a sysctl
56230+ option with name "socket_client_gid" is created.
56231+
56232+config GRKERNSEC_SOCKET_SERVER
56233+ bool "Deny server sockets to group"
56234+ depends on GRKERNSEC_SOCKET
56235+ help
56236+ If you say Y here, you will be able to choose a GID of whose users will
56237+ be unable to run server applications from your machine. If the sysctl
56238+ option is enabled, a sysctl option with name "socket_server" is created.
56239+
56240+config GRKERNSEC_SOCKET_SERVER_GID
56241+ int "GID to deny server sockets for"
56242+ depends on GRKERNSEC_SOCKET_SERVER
56243+ default 1002
56244+ help
56245+ Here you can choose the GID to disable server socket access for.
56246+ Remember to add the users you want server socket access disabled for to
56247+ the GID specified here. If the sysctl option is enabled, a sysctl
56248+ option with name "socket_server_gid" is created.
56249+
56250+endmenu
56251+menu "Sysctl support"
56252+depends on GRKERNSEC && SYSCTL
56253+
56254+config GRKERNSEC_SYSCTL
56255+ bool "Sysctl support"
56256+ help
56257+ If you say Y here, you will be able to change the options that
56258+ grsecurity runs with at bootup, without having to recompile your
56259+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56260+ to enable (1) or disable (0) various features. All the sysctl entries
56261+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56262+ All features enabled in the kernel configuration are disabled at boot
56263+ if you do not say Y to the "Turn on features by default" option.
56264+ All options should be set at startup, and the grsec_lock entry should
56265+ be set to a non-zero value after all the options are set.
56266+ *THIS IS EXTREMELY IMPORTANT*
56267+
56268+config GRKERNSEC_SYSCTL_DISTRO
56269+ bool "Extra sysctl support for distro makers (READ HELP)"
56270+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56271+ help
56272+ If you say Y here, additional sysctl options will be created
56273+ for features that affect processes running as root. Therefore,
56274+ it is critical when using this option that the grsec_lock entry be
56275+ enabled after boot. Only distros with prebuilt kernel packages
56276+ with this option enabled that can ensure grsec_lock is enabled
56277+ after boot should use this option.
56278+ *Failure to set grsec_lock after boot makes all grsec features
56279+ this option covers useless*
56280+
56281+ Currently this option creates the following sysctl entries:
56282+ "Disable Privileged I/O": "disable_priv_io"
56283+
56284+config GRKERNSEC_SYSCTL_ON
56285+ bool "Turn on features by default"
56286+ depends on GRKERNSEC_SYSCTL
56287+ help
56288+ If you say Y here, instead of having all features enabled in the
56289+ kernel configuration disabled at boot time, the features will be
56290+ enabled at boot time. It is recommended you say Y here unless
56291+ there is some reason you would want all sysctl-tunable features to
56292+ be disabled by default. As mentioned elsewhere, it is important
56293+ to enable the grsec_lock entry once you have finished modifying
56294+ the sysctl entries.
56295+
56296+endmenu
56297+menu "Logging Options"
56298+depends on GRKERNSEC
56299+
56300+config GRKERNSEC_FLOODTIME
56301+ int "Seconds in between log messages (minimum)"
56302+ default 10
56303+ help
56304+ This option allows you to enforce the number of seconds between
56305+ grsecurity log messages. The default should be suitable for most
56306+ people, however, if you choose to change it, choose a value small enough
56307+ to allow informative logs to be produced, but large enough to
56308+ prevent flooding.
56309+
56310+config GRKERNSEC_FLOODBURST
56311+ int "Number of messages in a burst (maximum)"
56312+ default 6
56313+ help
56314+ This option allows you to choose the maximum number of messages allowed
56315+ within the flood time interval you chose in a separate option. The
56316+ default should be suitable for most people, however if you find that
56317+ many of your logs are being interpreted as flooding, you may want to
56318+ raise this value.
56319+
56320+endmenu
56321+
56322+endmenu
56323diff --git a/grsecurity/Makefile b/grsecurity/Makefile
56324new file mode 100644
56325index 0000000..1b9afa9
56326--- /dev/null
56327+++ b/grsecurity/Makefile
56328@@ -0,0 +1,38 @@
56329+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56330+# during 2001-2009 it has been completely redesigned by Brad Spengler
56331+# into an RBAC system
56332+#
56333+# All code in this directory and various hooks inserted throughout the kernel
56334+# are copyright Brad Spengler - Open Source Security, Inc., and released
56335+# under the GPL v2 or higher
56336+
56337+KBUILD_CFLAGS += -Werror
56338+
56339+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56340+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56341+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56342+
56343+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56344+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56345+ gracl_learn.o grsec_log.o
56346+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56347+
56348+ifdef CONFIG_NET
56349+obj-y += grsec_sock.o
56350+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56351+endif
56352+
56353+ifndef CONFIG_GRKERNSEC
56354+obj-y += grsec_disabled.o
56355+endif
56356+
56357+ifdef CONFIG_GRKERNSEC_HIDESYM
56358+extra-y := grsec_hidesym.o
56359+$(obj)/grsec_hidesym.o:
56360+ @-chmod -f 500 /boot
56361+ @-chmod -f 500 /lib/modules
56362+ @-chmod -f 500 /lib64/modules
56363+ @-chmod -f 500 /lib32/modules
56364+ @-chmod -f 700 .
56365+ @echo ' grsec: protected kernel image paths'
56366+endif
56367diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
56368new file mode 100644
56369index 0000000..b1c4f4d
56370--- /dev/null
56371+++ b/grsecurity/gracl.c
56372@@ -0,0 +1,4149 @@
56373+#include <linux/kernel.h>
56374+#include <linux/module.h>
56375+#include <linux/sched.h>
56376+#include <linux/mm.h>
56377+#include <linux/file.h>
56378+#include <linux/fs.h>
56379+#include <linux/namei.h>
56380+#include <linux/mount.h>
56381+#include <linux/tty.h>
56382+#include <linux/proc_fs.h>
56383+#include <linux/smp_lock.h>
56384+#include <linux/slab.h>
56385+#include <linux/vmalloc.h>
56386+#include <linux/types.h>
56387+#include <linux/sysctl.h>
56388+#include <linux/netdevice.h>
56389+#include <linux/ptrace.h>
56390+#include <linux/gracl.h>
56391+#include <linux/gralloc.h>
56392+#include <linux/security.h>
56393+#include <linux/grinternal.h>
56394+#include <linux/pid_namespace.h>
56395+#include <linux/fdtable.h>
56396+#include <linux/percpu.h>
56397+
56398+#include <asm/uaccess.h>
56399+#include <asm/errno.h>
56400+#include <asm/mman.h>
56401+
56402+static struct acl_role_db acl_role_set;
56403+static struct name_db name_set;
56404+static struct inodev_db inodev_set;
56405+
56406+/* for keeping track of userspace pointers used for subjects, so we
56407+ can share references in the kernel as well
56408+*/
56409+
56410+static struct dentry *real_root;
56411+static struct vfsmount *real_root_mnt;
56412+
56413+static struct acl_subj_map_db subj_map_set;
56414+
56415+static struct acl_role_label *default_role;
56416+
56417+static struct acl_role_label *role_list;
56418+
56419+static u16 acl_sp_role_value;
56420+
56421+extern char *gr_shared_page[4];
56422+static DEFINE_MUTEX(gr_dev_mutex);
56423+DEFINE_RWLOCK(gr_inode_lock);
56424+
56425+struct gr_arg *gr_usermode;
56426+
56427+static unsigned int gr_status __read_only = GR_STATUS_INIT;
56428+
56429+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
56430+extern void gr_clear_learn_entries(void);
56431+
56432+#ifdef CONFIG_GRKERNSEC_RESLOG
56433+extern void gr_log_resource(const struct task_struct *task,
56434+ const int res, const unsigned long wanted, const int gt);
56435+#endif
56436+
56437+unsigned char *gr_system_salt;
56438+unsigned char *gr_system_sum;
56439+
56440+static struct sprole_pw **acl_special_roles = NULL;
56441+static __u16 num_sprole_pws = 0;
56442+
56443+static struct acl_role_label *kernel_role = NULL;
56444+
56445+static unsigned int gr_auth_attempts = 0;
56446+static unsigned long gr_auth_expires = 0UL;
56447+
56448+#ifdef CONFIG_NET
56449+extern struct vfsmount *sock_mnt;
56450+#endif
56451+extern struct vfsmount *pipe_mnt;
56452+extern struct vfsmount *shm_mnt;
56453+#ifdef CONFIG_HUGETLBFS
56454+extern struct vfsmount *hugetlbfs_vfsmount;
56455+#endif
56456+
56457+static struct acl_object_label *fakefs_obj_rw;
56458+static struct acl_object_label *fakefs_obj_rwx;
56459+
56460+extern int gr_init_uidset(void);
56461+extern void gr_free_uidset(void);
56462+extern void gr_remove_uid(uid_t uid);
56463+extern int gr_find_uid(uid_t uid);
56464+
56465+__inline__ int
56466+gr_acl_is_enabled(void)
56467+{
56468+ return (gr_status & GR_READY);
56469+}
56470+
56471+#ifdef CONFIG_BTRFS_FS
56472+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56473+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56474+#endif
56475+
56476+static inline dev_t __get_dev(const struct dentry *dentry)
56477+{
56478+#ifdef CONFIG_BTRFS_FS
56479+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56480+ return get_btrfs_dev_from_inode(dentry->d_inode);
56481+ else
56482+#endif
56483+ return dentry->d_inode->i_sb->s_dev;
56484+}
56485+
56486+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56487+{
56488+ return __get_dev(dentry);
56489+}
56490+
56491+static char gr_task_roletype_to_char(struct task_struct *task)
56492+{
56493+ switch (task->role->roletype &
56494+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
56495+ GR_ROLE_SPECIAL)) {
56496+ case GR_ROLE_DEFAULT:
56497+ return 'D';
56498+ case GR_ROLE_USER:
56499+ return 'U';
56500+ case GR_ROLE_GROUP:
56501+ return 'G';
56502+ case GR_ROLE_SPECIAL:
56503+ return 'S';
56504+ }
56505+
56506+ return 'X';
56507+}
56508+
56509+char gr_roletype_to_char(void)
56510+{
56511+ return gr_task_roletype_to_char(current);
56512+}
56513+
56514+__inline__ int
56515+gr_acl_tpe_check(void)
56516+{
56517+ if (unlikely(!(gr_status & GR_READY)))
56518+ return 0;
56519+ if (current->role->roletype & GR_ROLE_TPE)
56520+ return 1;
56521+ else
56522+ return 0;
56523+}
56524+
56525+int
56526+gr_handle_rawio(const struct inode *inode)
56527+{
56528+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56529+ if (inode && S_ISBLK(inode->i_mode) &&
56530+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56531+ !capable(CAP_SYS_RAWIO))
56532+ return 1;
56533+#endif
56534+ return 0;
56535+}
56536+
56537+static int
56538+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
56539+{
56540+ if (likely(lena != lenb))
56541+ return 0;
56542+
56543+ return !memcmp(a, b, lena);
56544+}
56545+
56546+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
56547+{
56548+ *buflen -= namelen;
56549+ if (*buflen < 0)
56550+ return -ENAMETOOLONG;
56551+ *buffer -= namelen;
56552+ memcpy(*buffer, str, namelen);
56553+ return 0;
56554+}
56555+
56556+/* this must be called with vfsmount_lock and dcache_lock held */
56557+
56558+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56559+ struct dentry *root, struct vfsmount *rootmnt,
56560+ char *buffer, int buflen)
56561+{
56562+ char * end = buffer+buflen;
56563+ char * retval;
56564+ int namelen;
56565+
56566+ *--end = '\0';
56567+ buflen--;
56568+
56569+ if (buflen < 1)
56570+ goto Elong;
56571+ /* Get '/' right */
56572+ retval = end-1;
56573+ *retval = '/';
56574+
56575+ for (;;) {
56576+ struct dentry * parent;
56577+
56578+ if (dentry == root && vfsmnt == rootmnt)
56579+ break;
56580+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
56581+ /* Global root? */
56582+ if (vfsmnt->mnt_parent == vfsmnt)
56583+ goto global_root;
56584+ dentry = vfsmnt->mnt_mountpoint;
56585+ vfsmnt = vfsmnt->mnt_parent;
56586+ continue;
56587+ }
56588+ parent = dentry->d_parent;
56589+ prefetch(parent);
56590+ namelen = dentry->d_name.len;
56591+ buflen -= namelen + 1;
56592+ if (buflen < 0)
56593+ goto Elong;
56594+ end -= namelen;
56595+ memcpy(end, dentry->d_name.name, namelen);
56596+ *--end = '/';
56597+ retval = end;
56598+ dentry = parent;
56599+ }
56600+
56601+out:
56602+ return retval;
56603+
56604+global_root:
56605+ namelen = dentry->d_name.len;
56606+ buflen -= namelen;
56607+ if (buflen < 0)
56608+ goto Elong;
56609+ retval -= namelen-1; /* hit the slash */
56610+ memcpy(retval, dentry->d_name.name, namelen);
56611+ goto out;
56612+Elong:
56613+ retval = ERR_PTR(-ENAMETOOLONG);
56614+ goto out;
56615+}
56616+
56617+static char *
56618+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56619+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
56620+{
56621+ char *retval;
56622+
56623+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
56624+ if (unlikely(IS_ERR(retval)))
56625+ retval = strcpy(buf, "<path too long>");
56626+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
56627+ retval[1] = '\0';
56628+
56629+ return retval;
56630+}
56631+
56632+static char *
56633+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56634+ char *buf, int buflen)
56635+{
56636+ char *res;
56637+
56638+ /* we can use real_root, real_root_mnt, because this is only called
56639+ by the RBAC system */
56640+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
56641+
56642+ return res;
56643+}
56644+
56645+static char *
56646+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56647+ char *buf, int buflen)
56648+{
56649+ char *res;
56650+ struct dentry *root;
56651+ struct vfsmount *rootmnt;
56652+ struct task_struct *reaper = &init_task;
56653+
56654+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
56655+ read_lock(&reaper->fs->lock);
56656+ root = dget(reaper->fs->root.dentry);
56657+ rootmnt = mntget(reaper->fs->root.mnt);
56658+ read_unlock(&reaper->fs->lock);
56659+
56660+ spin_lock(&dcache_lock);
56661+ spin_lock(&vfsmount_lock);
56662+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
56663+ spin_unlock(&vfsmount_lock);
56664+ spin_unlock(&dcache_lock);
56665+
56666+ dput(root);
56667+ mntput(rootmnt);
56668+ return res;
56669+}
56670+
56671+static char *
56672+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56673+{
56674+ char *ret;
56675+ spin_lock(&dcache_lock);
56676+ spin_lock(&vfsmount_lock);
56677+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56678+ PAGE_SIZE);
56679+ spin_unlock(&vfsmount_lock);
56680+ spin_unlock(&dcache_lock);
56681+ return ret;
56682+}
56683+
56684+static char *
56685+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56686+{
56687+ char *ret;
56688+ char *buf;
56689+ int buflen;
56690+
56691+ spin_lock(&dcache_lock);
56692+ spin_lock(&vfsmount_lock);
56693+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
56694+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
56695+ buflen = (int)(ret - buf);
56696+ if (buflen >= 5)
56697+ prepend(&ret, &buflen, "/proc", 5);
56698+ else
56699+ ret = strcpy(buf, "<path too long>");
56700+ spin_unlock(&vfsmount_lock);
56701+ spin_unlock(&dcache_lock);
56702+ return ret;
56703+}
56704+
56705+char *
56706+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
56707+{
56708+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56709+ PAGE_SIZE);
56710+}
56711+
56712+char *
56713+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
56714+{
56715+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
56716+ PAGE_SIZE);
56717+}
56718+
56719+char *
56720+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
56721+{
56722+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
56723+ PAGE_SIZE);
56724+}
56725+
56726+char *
56727+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
56728+{
56729+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
56730+ PAGE_SIZE);
56731+}
56732+
56733+char *
56734+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
56735+{
56736+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
56737+ PAGE_SIZE);
56738+}
56739+
56740+__inline__ __u32
56741+to_gr_audit(const __u32 reqmode)
56742+{
56743+ /* masks off auditable permission flags, then shifts them to create
56744+ auditing flags, and adds the special case of append auditing if
56745+ we're requesting write */
56746+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
56747+}
56748+
56749+struct acl_subject_label *
56750+lookup_subject_map(const struct acl_subject_label *userp)
56751+{
56752+ unsigned int index = shash(userp, subj_map_set.s_size);
56753+ struct subject_map *match;
56754+
56755+ match = subj_map_set.s_hash[index];
56756+
56757+ while (match && match->user != userp)
56758+ match = match->next;
56759+
56760+ if (match != NULL)
56761+ return match->kernel;
56762+ else
56763+ return NULL;
56764+}
56765+
56766+static void
56767+insert_subj_map_entry(struct subject_map *subjmap)
56768+{
56769+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
56770+ struct subject_map **curr;
56771+
56772+ subjmap->prev = NULL;
56773+
56774+ curr = &subj_map_set.s_hash[index];
56775+ if (*curr != NULL)
56776+ (*curr)->prev = subjmap;
56777+
56778+ subjmap->next = *curr;
56779+ *curr = subjmap;
56780+
56781+ return;
56782+}
56783+
56784+static struct acl_role_label *
56785+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
56786+ const gid_t gid)
56787+{
56788+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
56789+ struct acl_role_label *match;
56790+ struct role_allowed_ip *ipp;
56791+ unsigned int x;
56792+ u32 curr_ip = task->signal->curr_ip;
56793+
56794+ task->signal->saved_ip = curr_ip;
56795+
56796+ match = acl_role_set.r_hash[index];
56797+
56798+ while (match) {
56799+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
56800+ for (x = 0; x < match->domain_child_num; x++) {
56801+ if (match->domain_children[x] == uid)
56802+ goto found;
56803+ }
56804+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
56805+ break;
56806+ match = match->next;
56807+ }
56808+found:
56809+ if (match == NULL) {
56810+ try_group:
56811+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
56812+ match = acl_role_set.r_hash[index];
56813+
56814+ while (match) {
56815+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
56816+ for (x = 0; x < match->domain_child_num; x++) {
56817+ if (match->domain_children[x] == gid)
56818+ goto found2;
56819+ }
56820+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
56821+ break;
56822+ match = match->next;
56823+ }
56824+found2:
56825+ if (match == NULL)
56826+ match = default_role;
56827+ if (match->allowed_ips == NULL)
56828+ return match;
56829+ else {
56830+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56831+ if (likely
56832+ ((ntohl(curr_ip) & ipp->netmask) ==
56833+ (ntohl(ipp->addr) & ipp->netmask)))
56834+ return match;
56835+ }
56836+ match = default_role;
56837+ }
56838+ } else if (match->allowed_ips == NULL) {
56839+ return match;
56840+ } else {
56841+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56842+ if (likely
56843+ ((ntohl(curr_ip) & ipp->netmask) ==
56844+ (ntohl(ipp->addr) & ipp->netmask)))
56845+ return match;
56846+ }
56847+ goto try_group;
56848+ }
56849+
56850+ return match;
56851+}
56852+
56853+struct acl_subject_label *
56854+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
56855+ const struct acl_role_label *role)
56856+{
56857+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
56858+ struct acl_subject_label *match;
56859+
56860+ match = role->subj_hash[index];
56861+
56862+ while (match && (match->inode != ino || match->device != dev ||
56863+ (match->mode & GR_DELETED))) {
56864+ match = match->next;
56865+ }
56866+
56867+ if (match && !(match->mode & GR_DELETED))
56868+ return match;
56869+ else
56870+ return NULL;
56871+}
56872+
56873+struct acl_subject_label *
56874+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
56875+ const struct acl_role_label *role)
56876+{
56877+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
56878+ struct acl_subject_label *match;
56879+
56880+ match = role->subj_hash[index];
56881+
56882+ while (match && (match->inode != ino || match->device != dev ||
56883+ !(match->mode & GR_DELETED))) {
56884+ match = match->next;
56885+ }
56886+
56887+ if (match && (match->mode & GR_DELETED))
56888+ return match;
56889+ else
56890+ return NULL;
56891+}
56892+
56893+static struct acl_object_label *
56894+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
56895+ const struct acl_subject_label *subj)
56896+{
56897+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56898+ struct acl_object_label *match;
56899+
56900+ match = subj->obj_hash[index];
56901+
56902+ while (match && (match->inode != ino || match->device != dev ||
56903+ (match->mode & GR_DELETED))) {
56904+ match = match->next;
56905+ }
56906+
56907+ if (match && !(match->mode & GR_DELETED))
56908+ return match;
56909+ else
56910+ return NULL;
56911+}
56912+
56913+static struct acl_object_label *
56914+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
56915+ const struct acl_subject_label *subj)
56916+{
56917+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56918+ struct acl_object_label *match;
56919+
56920+ match = subj->obj_hash[index];
56921+
56922+ while (match && (match->inode != ino || match->device != dev ||
56923+ !(match->mode & GR_DELETED))) {
56924+ match = match->next;
56925+ }
56926+
56927+ if (match && (match->mode & GR_DELETED))
56928+ return match;
56929+
56930+ match = subj->obj_hash[index];
56931+
56932+ while (match && (match->inode != ino || match->device != dev ||
56933+ (match->mode & GR_DELETED))) {
56934+ match = match->next;
56935+ }
56936+
56937+ if (match && !(match->mode & GR_DELETED))
56938+ return match;
56939+ else
56940+ return NULL;
56941+}
56942+
56943+static struct name_entry *
56944+lookup_name_entry(const char *name)
56945+{
56946+ unsigned int len = strlen(name);
56947+ unsigned int key = full_name_hash(name, len);
56948+ unsigned int index = key % name_set.n_size;
56949+ struct name_entry *match;
56950+
56951+ match = name_set.n_hash[index];
56952+
56953+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
56954+ match = match->next;
56955+
56956+ return match;
56957+}
56958+
56959+static struct name_entry *
56960+lookup_name_entry_create(const char *name)
56961+{
56962+ unsigned int len = strlen(name);
56963+ unsigned int key = full_name_hash(name, len);
56964+ unsigned int index = key % name_set.n_size;
56965+ struct name_entry *match;
56966+
56967+ match = name_set.n_hash[index];
56968+
56969+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56970+ !match->deleted))
56971+ match = match->next;
56972+
56973+ if (match && match->deleted)
56974+ return match;
56975+
56976+ match = name_set.n_hash[index];
56977+
56978+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56979+ match->deleted))
56980+ match = match->next;
56981+
56982+ if (match && !match->deleted)
56983+ return match;
56984+ else
56985+ return NULL;
56986+}
56987+
56988+static struct inodev_entry *
56989+lookup_inodev_entry(const ino_t ino, const dev_t dev)
56990+{
56991+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
56992+ struct inodev_entry *match;
56993+
56994+ match = inodev_set.i_hash[index];
56995+
56996+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
56997+ match = match->next;
56998+
56999+ return match;
57000+}
57001+
57002+static void
57003+insert_inodev_entry(struct inodev_entry *entry)
57004+{
57005+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
57006+ inodev_set.i_size);
57007+ struct inodev_entry **curr;
57008+
57009+ entry->prev = NULL;
57010+
57011+ curr = &inodev_set.i_hash[index];
57012+ if (*curr != NULL)
57013+ (*curr)->prev = entry;
57014+
57015+ entry->next = *curr;
57016+ *curr = entry;
57017+
57018+ return;
57019+}
57020+
57021+static void
57022+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
57023+{
57024+ unsigned int index =
57025+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
57026+ struct acl_role_label **curr;
57027+ struct acl_role_label *tmp;
57028+
57029+ curr = &acl_role_set.r_hash[index];
57030+
57031+ /* if role was already inserted due to domains and already has
57032+ a role in the same bucket as it attached, then we need to
57033+ combine these two buckets
57034+ */
57035+ if (role->next) {
57036+ tmp = role->next;
57037+ while (tmp->next)
57038+ tmp = tmp->next;
57039+ tmp->next = *curr;
57040+ } else
57041+ role->next = *curr;
57042+ *curr = role;
57043+
57044+ return;
57045+}
57046+
57047+static void
57048+insert_acl_role_label(struct acl_role_label *role)
57049+{
57050+ int i;
57051+
57052+ if (role_list == NULL) {
57053+ role_list = role;
57054+ role->prev = NULL;
57055+ } else {
57056+ role->prev = role_list;
57057+ role_list = role;
57058+ }
57059+
57060+ /* used for hash chains */
57061+ role->next = NULL;
57062+
57063+ if (role->roletype & GR_ROLE_DOMAIN) {
57064+ for (i = 0; i < role->domain_child_num; i++)
57065+ __insert_acl_role_label(role, role->domain_children[i]);
57066+ } else
57067+ __insert_acl_role_label(role, role->uidgid);
57068+}
57069+
57070+static int
57071+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
57072+{
57073+ struct name_entry **curr, *nentry;
57074+ struct inodev_entry *ientry;
57075+ unsigned int len = strlen(name);
57076+ unsigned int key = full_name_hash(name, len);
57077+ unsigned int index = key % name_set.n_size;
57078+
57079+ curr = &name_set.n_hash[index];
57080+
57081+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
57082+ curr = &((*curr)->next);
57083+
57084+ if (*curr != NULL)
57085+ return 1;
57086+
57087+ nentry = acl_alloc(sizeof (struct name_entry));
57088+ if (nentry == NULL)
57089+ return 0;
57090+ ientry = acl_alloc(sizeof (struct inodev_entry));
57091+ if (ientry == NULL)
57092+ return 0;
57093+ ientry->nentry = nentry;
57094+
57095+ nentry->key = key;
57096+ nentry->name = name;
57097+ nentry->inode = inode;
57098+ nentry->device = device;
57099+ nentry->len = len;
57100+ nentry->deleted = deleted;
57101+
57102+ nentry->prev = NULL;
57103+ curr = &name_set.n_hash[index];
57104+ if (*curr != NULL)
57105+ (*curr)->prev = nentry;
57106+ nentry->next = *curr;
57107+ *curr = nentry;
57108+
57109+ /* insert us into the table searchable by inode/dev */
57110+ insert_inodev_entry(ientry);
57111+
57112+ return 1;
57113+}
57114+
57115+static void
57116+insert_acl_obj_label(struct acl_object_label *obj,
57117+ struct acl_subject_label *subj)
57118+{
57119+ unsigned int index =
57120+ fhash(obj->inode, obj->device, subj->obj_hash_size);
57121+ struct acl_object_label **curr;
57122+
57123+
57124+ obj->prev = NULL;
57125+
57126+ curr = &subj->obj_hash[index];
57127+ if (*curr != NULL)
57128+ (*curr)->prev = obj;
57129+
57130+ obj->next = *curr;
57131+ *curr = obj;
57132+
57133+ return;
57134+}
57135+
57136+static void
57137+insert_acl_subj_label(struct acl_subject_label *obj,
57138+ struct acl_role_label *role)
57139+{
57140+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
57141+ struct acl_subject_label **curr;
57142+
57143+ obj->prev = NULL;
57144+
57145+ curr = &role->subj_hash[index];
57146+ if (*curr != NULL)
57147+ (*curr)->prev = obj;
57148+
57149+ obj->next = *curr;
57150+ *curr = obj;
57151+
57152+ return;
57153+}
57154+
57155+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
57156+
57157+static void *
57158+create_table(__u32 * len, int elementsize)
57159+{
57160+ unsigned int table_sizes[] = {
57161+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
57162+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
57163+ 4194301, 8388593, 16777213, 33554393, 67108859
57164+ };
57165+ void *newtable = NULL;
57166+ unsigned int pwr = 0;
57167+
57168+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
57169+ table_sizes[pwr] <= *len)
57170+ pwr++;
57171+
57172+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
57173+ return newtable;
57174+
57175+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
57176+ newtable =
57177+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
57178+ else
57179+ newtable = vmalloc(table_sizes[pwr] * elementsize);
57180+
57181+ *len = table_sizes[pwr];
57182+
57183+ return newtable;
57184+}
57185+
57186+static int
57187+init_variables(const struct gr_arg *arg)
57188+{
57189+ struct task_struct *reaper = &init_task;
57190+ unsigned int stacksize;
57191+
57192+ subj_map_set.s_size = arg->role_db.num_subjects;
57193+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
57194+ name_set.n_size = arg->role_db.num_objects;
57195+ inodev_set.i_size = arg->role_db.num_objects;
57196+
57197+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
57198+ !name_set.n_size || !inodev_set.i_size)
57199+ return 1;
57200+
57201+ if (!gr_init_uidset())
57202+ return 1;
57203+
57204+ /* set up the stack that holds allocation info */
57205+
57206+ stacksize = arg->role_db.num_pointers + 5;
57207+
57208+ if (!acl_alloc_stack_init(stacksize))
57209+ return 1;
57210+
57211+ /* grab reference for the real root dentry and vfsmount */
57212+ read_lock(&reaper->fs->lock);
57213+ real_root = dget(reaper->fs->root.dentry);
57214+ real_root_mnt = mntget(reaper->fs->root.mnt);
57215+ read_unlock(&reaper->fs->lock);
57216+
57217+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57218+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
57219+#endif
57220+
57221+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
57222+ if (fakefs_obj_rw == NULL)
57223+ return 1;
57224+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
57225+
57226+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
57227+ if (fakefs_obj_rwx == NULL)
57228+ return 1;
57229+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
57230+
57231+ subj_map_set.s_hash =
57232+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
57233+ acl_role_set.r_hash =
57234+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
57235+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
57236+ inodev_set.i_hash =
57237+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
57238+
57239+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
57240+ !name_set.n_hash || !inodev_set.i_hash)
57241+ return 1;
57242+
57243+ memset(subj_map_set.s_hash, 0,
57244+ sizeof(struct subject_map *) * subj_map_set.s_size);
57245+ memset(acl_role_set.r_hash, 0,
57246+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
57247+ memset(name_set.n_hash, 0,
57248+ sizeof (struct name_entry *) * name_set.n_size);
57249+ memset(inodev_set.i_hash, 0,
57250+ sizeof (struct inodev_entry *) * inodev_set.i_size);
57251+
57252+ return 0;
57253+}
57254+
57255+/* free information not needed after startup
57256+ currently contains user->kernel pointer mappings for subjects
57257+*/
57258+
57259+static void
57260+free_init_variables(void)
57261+{
57262+ __u32 i;
57263+
57264+ if (subj_map_set.s_hash) {
57265+ for (i = 0; i < subj_map_set.s_size; i++) {
57266+ if (subj_map_set.s_hash[i]) {
57267+ kfree(subj_map_set.s_hash[i]);
57268+ subj_map_set.s_hash[i] = NULL;
57269+ }
57270+ }
57271+
57272+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
57273+ PAGE_SIZE)
57274+ kfree(subj_map_set.s_hash);
57275+ else
57276+ vfree(subj_map_set.s_hash);
57277+ }
57278+
57279+ return;
57280+}
57281+
57282+static void
57283+free_variables(void)
57284+{
57285+ struct acl_subject_label *s;
57286+ struct acl_role_label *r;
57287+ struct task_struct *task, *task2;
57288+ unsigned int x;
57289+
57290+ gr_clear_learn_entries();
57291+
57292+ read_lock(&tasklist_lock);
57293+ do_each_thread(task2, task) {
57294+ task->acl_sp_role = 0;
57295+ task->acl_role_id = 0;
57296+ task->acl = NULL;
57297+ task->role = NULL;
57298+ } while_each_thread(task2, task);
57299+ read_unlock(&tasklist_lock);
57300+
57301+ /* release the reference to the real root dentry and vfsmount */
57302+ if (real_root)
57303+ dput(real_root);
57304+ real_root = NULL;
57305+ if (real_root_mnt)
57306+ mntput(real_root_mnt);
57307+ real_root_mnt = NULL;
57308+
57309+ /* free all object hash tables */
57310+
57311+ FOR_EACH_ROLE_START(r)
57312+ if (r->subj_hash == NULL)
57313+ goto next_role;
57314+ FOR_EACH_SUBJECT_START(r, s, x)
57315+ if (s->obj_hash == NULL)
57316+ break;
57317+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57318+ kfree(s->obj_hash);
57319+ else
57320+ vfree(s->obj_hash);
57321+ FOR_EACH_SUBJECT_END(s, x)
57322+ FOR_EACH_NESTED_SUBJECT_START(r, s)
57323+ if (s->obj_hash == NULL)
57324+ break;
57325+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57326+ kfree(s->obj_hash);
57327+ else
57328+ vfree(s->obj_hash);
57329+ FOR_EACH_NESTED_SUBJECT_END(s)
57330+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
57331+ kfree(r->subj_hash);
57332+ else
57333+ vfree(r->subj_hash);
57334+ r->subj_hash = NULL;
57335+next_role:
57336+ FOR_EACH_ROLE_END(r)
57337+
57338+ acl_free_all();
57339+
57340+ if (acl_role_set.r_hash) {
57341+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
57342+ PAGE_SIZE)
57343+ kfree(acl_role_set.r_hash);
57344+ else
57345+ vfree(acl_role_set.r_hash);
57346+ }
57347+ if (name_set.n_hash) {
57348+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
57349+ PAGE_SIZE)
57350+ kfree(name_set.n_hash);
57351+ else
57352+ vfree(name_set.n_hash);
57353+ }
57354+
57355+ if (inodev_set.i_hash) {
57356+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
57357+ PAGE_SIZE)
57358+ kfree(inodev_set.i_hash);
57359+ else
57360+ vfree(inodev_set.i_hash);
57361+ }
57362+
57363+ gr_free_uidset();
57364+
57365+ memset(&name_set, 0, sizeof (struct name_db));
57366+ memset(&inodev_set, 0, sizeof (struct inodev_db));
57367+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
57368+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
57369+
57370+ default_role = NULL;
57371+ role_list = NULL;
57372+
57373+ return;
57374+}
57375+
57376+static __u32
57377+count_user_objs(struct acl_object_label *userp)
57378+{
57379+ struct acl_object_label o_tmp;
57380+ __u32 num = 0;
57381+
57382+ while (userp) {
57383+ if (copy_from_user(&o_tmp, userp,
57384+ sizeof (struct acl_object_label)))
57385+ break;
57386+
57387+ userp = o_tmp.prev;
57388+ num++;
57389+ }
57390+
57391+ return num;
57392+}
57393+
57394+static struct acl_subject_label *
57395+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
57396+
57397+static int
57398+copy_user_glob(struct acl_object_label *obj)
57399+{
57400+ struct acl_object_label *g_tmp, **guser;
57401+ unsigned int len;
57402+ char *tmp;
57403+
57404+ if (obj->globbed == NULL)
57405+ return 0;
57406+
57407+ guser = &obj->globbed;
57408+ while (*guser) {
57409+ g_tmp = (struct acl_object_label *)
57410+ acl_alloc(sizeof (struct acl_object_label));
57411+ if (g_tmp == NULL)
57412+ return -ENOMEM;
57413+
57414+ if (copy_from_user(g_tmp, *guser,
57415+ sizeof (struct acl_object_label)))
57416+ return -EFAULT;
57417+
57418+ len = strnlen_user(g_tmp->filename, PATH_MAX);
57419+
57420+ if (!len || len >= PATH_MAX)
57421+ return -EINVAL;
57422+
57423+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57424+ return -ENOMEM;
57425+
57426+ if (copy_from_user(tmp, g_tmp->filename, len))
57427+ return -EFAULT;
57428+ tmp[len-1] = '\0';
57429+ g_tmp->filename = tmp;
57430+
57431+ *guser = g_tmp;
57432+ guser = &(g_tmp->next);
57433+ }
57434+
57435+ return 0;
57436+}
57437+
57438+static int
57439+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
57440+ struct acl_role_label *role)
57441+{
57442+ struct acl_object_label *o_tmp;
57443+ unsigned int len;
57444+ int ret;
57445+ char *tmp;
57446+
57447+ while (userp) {
57448+ if ((o_tmp = (struct acl_object_label *)
57449+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
57450+ return -ENOMEM;
57451+
57452+ if (copy_from_user(o_tmp, userp,
57453+ sizeof (struct acl_object_label)))
57454+ return -EFAULT;
57455+
57456+ userp = o_tmp->prev;
57457+
57458+ len = strnlen_user(o_tmp->filename, PATH_MAX);
57459+
57460+ if (!len || len >= PATH_MAX)
57461+ return -EINVAL;
57462+
57463+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57464+ return -ENOMEM;
57465+
57466+ if (copy_from_user(tmp, o_tmp->filename, len))
57467+ return -EFAULT;
57468+ tmp[len-1] = '\0';
57469+ o_tmp->filename = tmp;
57470+
57471+ insert_acl_obj_label(o_tmp, subj);
57472+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
57473+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
57474+ return -ENOMEM;
57475+
57476+ ret = copy_user_glob(o_tmp);
57477+ if (ret)
57478+ return ret;
57479+
57480+ if (o_tmp->nested) {
57481+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
57482+ if (IS_ERR(o_tmp->nested))
57483+ return PTR_ERR(o_tmp->nested);
57484+
57485+ /* insert into nested subject list */
57486+ o_tmp->nested->next = role->hash->first;
57487+ role->hash->first = o_tmp->nested;
57488+ }
57489+ }
57490+
57491+ return 0;
57492+}
57493+
57494+static __u32
57495+count_user_subjs(struct acl_subject_label *userp)
57496+{
57497+ struct acl_subject_label s_tmp;
57498+ __u32 num = 0;
57499+
57500+ while (userp) {
57501+ if (copy_from_user(&s_tmp, userp,
57502+ sizeof (struct acl_subject_label)))
57503+ break;
57504+
57505+ userp = s_tmp.prev;
57506+ /* do not count nested subjects against this count, since
57507+ they are not included in the hash table, but are
57508+ attached to objects. We have already counted
57509+ the subjects in userspace for the allocation
57510+ stack
57511+ */
57512+ if (!(s_tmp.mode & GR_NESTED))
57513+ num++;
57514+ }
57515+
57516+ return num;
57517+}
57518+
57519+static int
57520+copy_user_allowedips(struct acl_role_label *rolep)
57521+{
57522+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
57523+
57524+ ruserip = rolep->allowed_ips;
57525+
57526+ while (ruserip) {
57527+ rlast = rtmp;
57528+
57529+ if ((rtmp = (struct role_allowed_ip *)
57530+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
57531+ return -ENOMEM;
57532+
57533+ if (copy_from_user(rtmp, ruserip,
57534+ sizeof (struct role_allowed_ip)))
57535+ return -EFAULT;
57536+
57537+ ruserip = rtmp->prev;
57538+
57539+ if (!rlast) {
57540+ rtmp->prev = NULL;
57541+ rolep->allowed_ips = rtmp;
57542+ } else {
57543+ rlast->next = rtmp;
57544+ rtmp->prev = rlast;
57545+ }
57546+
57547+ if (!ruserip)
57548+ rtmp->next = NULL;
57549+ }
57550+
57551+ return 0;
57552+}
57553+
57554+static int
57555+copy_user_transitions(struct acl_role_label *rolep)
57556+{
57557+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
57558+
57559+ unsigned int len;
57560+ char *tmp;
57561+
57562+ rusertp = rolep->transitions;
57563+
57564+ while (rusertp) {
57565+ rlast = rtmp;
57566+
57567+ if ((rtmp = (struct role_transition *)
57568+ acl_alloc(sizeof (struct role_transition))) == NULL)
57569+ return -ENOMEM;
57570+
57571+ if (copy_from_user(rtmp, rusertp,
57572+ sizeof (struct role_transition)))
57573+ return -EFAULT;
57574+
57575+ rusertp = rtmp->prev;
57576+
57577+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
57578+
57579+ if (!len || len >= GR_SPROLE_LEN)
57580+ return -EINVAL;
57581+
57582+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57583+ return -ENOMEM;
57584+
57585+ if (copy_from_user(tmp, rtmp->rolename, len))
57586+ return -EFAULT;
57587+ tmp[len-1] = '\0';
57588+ rtmp->rolename = tmp;
57589+
57590+ if (!rlast) {
57591+ rtmp->prev = NULL;
57592+ rolep->transitions = rtmp;
57593+ } else {
57594+ rlast->next = rtmp;
57595+ rtmp->prev = rlast;
57596+ }
57597+
57598+ if (!rusertp)
57599+ rtmp->next = NULL;
57600+ }
57601+
57602+ return 0;
57603+}
57604+
57605+static struct acl_subject_label *
57606+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
57607+{
57608+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
57609+ unsigned int len;
57610+ char *tmp;
57611+ __u32 num_objs;
57612+ struct acl_ip_label **i_tmp, *i_utmp2;
57613+ struct gr_hash_struct ghash;
57614+ struct subject_map *subjmap;
57615+ unsigned int i_num;
57616+ int err;
57617+
57618+ s_tmp = lookup_subject_map(userp);
57619+
57620+ /* we've already copied this subject into the kernel, just return
57621+ the reference to it, and don't copy it over again
57622+ */
57623+ if (s_tmp)
57624+ return(s_tmp);
57625+
57626+ if ((s_tmp = (struct acl_subject_label *)
57627+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
57628+ return ERR_PTR(-ENOMEM);
57629+
57630+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
57631+ if (subjmap == NULL)
57632+ return ERR_PTR(-ENOMEM);
57633+
57634+ subjmap->user = userp;
57635+ subjmap->kernel = s_tmp;
57636+ insert_subj_map_entry(subjmap);
57637+
57638+ if (copy_from_user(s_tmp, userp,
57639+ sizeof (struct acl_subject_label)))
57640+ return ERR_PTR(-EFAULT);
57641+
57642+ len = strnlen_user(s_tmp->filename, PATH_MAX);
57643+
57644+ if (!len || len >= PATH_MAX)
57645+ return ERR_PTR(-EINVAL);
57646+
57647+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57648+ return ERR_PTR(-ENOMEM);
57649+
57650+ if (copy_from_user(tmp, s_tmp->filename, len))
57651+ return ERR_PTR(-EFAULT);
57652+ tmp[len-1] = '\0';
57653+ s_tmp->filename = tmp;
57654+
57655+ if (!strcmp(s_tmp->filename, "/"))
57656+ role->root_label = s_tmp;
57657+
57658+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
57659+ return ERR_PTR(-EFAULT);
57660+
57661+ /* copy user and group transition tables */
57662+
57663+ if (s_tmp->user_trans_num) {
57664+ uid_t *uidlist;
57665+
57666+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
57667+ if (uidlist == NULL)
57668+ return ERR_PTR(-ENOMEM);
57669+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
57670+ return ERR_PTR(-EFAULT);
57671+
57672+ s_tmp->user_transitions = uidlist;
57673+ }
57674+
57675+ if (s_tmp->group_trans_num) {
57676+ gid_t *gidlist;
57677+
57678+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
57679+ if (gidlist == NULL)
57680+ return ERR_PTR(-ENOMEM);
57681+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
57682+ return ERR_PTR(-EFAULT);
57683+
57684+ s_tmp->group_transitions = gidlist;
57685+ }
57686+
57687+ /* set up object hash table */
57688+ num_objs = count_user_objs(ghash.first);
57689+
57690+ s_tmp->obj_hash_size = num_objs;
57691+ s_tmp->obj_hash =
57692+ (struct acl_object_label **)
57693+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
57694+
57695+ if (!s_tmp->obj_hash)
57696+ return ERR_PTR(-ENOMEM);
57697+
57698+ memset(s_tmp->obj_hash, 0,
57699+ s_tmp->obj_hash_size *
57700+ sizeof (struct acl_object_label *));
57701+
57702+ /* add in objects */
57703+ err = copy_user_objs(ghash.first, s_tmp, role);
57704+
57705+ if (err)
57706+ return ERR_PTR(err);
57707+
57708+ /* set pointer for parent subject */
57709+ if (s_tmp->parent_subject) {
57710+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
57711+
57712+ if (IS_ERR(s_tmp2))
57713+ return s_tmp2;
57714+
57715+ s_tmp->parent_subject = s_tmp2;
57716+ }
57717+
57718+ /* add in ip acls */
57719+
57720+ if (!s_tmp->ip_num) {
57721+ s_tmp->ips = NULL;
57722+ goto insert;
57723+ }
57724+
57725+ i_tmp =
57726+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
57727+ sizeof (struct acl_ip_label *));
57728+
57729+ if (!i_tmp)
57730+ return ERR_PTR(-ENOMEM);
57731+
57732+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
57733+ *(i_tmp + i_num) =
57734+ (struct acl_ip_label *)
57735+ acl_alloc(sizeof (struct acl_ip_label));
57736+ if (!*(i_tmp + i_num))
57737+ return ERR_PTR(-ENOMEM);
57738+
57739+ if (copy_from_user
57740+ (&i_utmp2, s_tmp->ips + i_num,
57741+ sizeof (struct acl_ip_label *)))
57742+ return ERR_PTR(-EFAULT);
57743+
57744+ if (copy_from_user
57745+ (*(i_tmp + i_num), i_utmp2,
57746+ sizeof (struct acl_ip_label)))
57747+ return ERR_PTR(-EFAULT);
57748+
57749+ if ((*(i_tmp + i_num))->iface == NULL)
57750+ continue;
57751+
57752+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
57753+ if (!len || len >= IFNAMSIZ)
57754+ return ERR_PTR(-EINVAL);
57755+ tmp = acl_alloc(len);
57756+ if (tmp == NULL)
57757+ return ERR_PTR(-ENOMEM);
57758+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
57759+ return ERR_PTR(-EFAULT);
57760+ (*(i_tmp + i_num))->iface = tmp;
57761+ }
57762+
57763+ s_tmp->ips = i_tmp;
57764+
57765+insert:
57766+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
57767+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
57768+ return ERR_PTR(-ENOMEM);
57769+
57770+ return s_tmp;
57771+}
57772+
57773+static int
57774+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
57775+{
57776+ struct acl_subject_label s_pre;
57777+ struct acl_subject_label * ret;
57778+ int err;
57779+
57780+ while (userp) {
57781+ if (copy_from_user(&s_pre, userp,
57782+ sizeof (struct acl_subject_label)))
57783+ return -EFAULT;
57784+
57785+ /* do not add nested subjects here, add
57786+ while parsing objects
57787+ */
57788+
57789+ if (s_pre.mode & GR_NESTED) {
57790+ userp = s_pre.prev;
57791+ continue;
57792+ }
57793+
57794+ ret = do_copy_user_subj(userp, role);
57795+
57796+ err = PTR_ERR(ret);
57797+ if (IS_ERR(ret))
57798+ return err;
57799+
57800+ insert_acl_subj_label(ret, role);
57801+
57802+ userp = s_pre.prev;
57803+ }
57804+
57805+ return 0;
57806+}
57807+
57808+static int
57809+copy_user_acl(struct gr_arg *arg)
57810+{
57811+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
57812+ struct sprole_pw *sptmp;
57813+ struct gr_hash_struct *ghash;
57814+ uid_t *domainlist;
57815+ unsigned int r_num;
57816+ unsigned int len;
57817+ char *tmp;
57818+ int err = 0;
57819+ __u16 i;
57820+ __u32 num_subjs;
57821+
57822+ /* we need a default and kernel role */
57823+ if (arg->role_db.num_roles < 2)
57824+ return -EINVAL;
57825+
57826+ /* copy special role authentication info from userspace */
57827+
57828+ num_sprole_pws = arg->num_sprole_pws;
57829+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
57830+
57831+ if (!acl_special_roles) {
57832+ err = -ENOMEM;
57833+ goto cleanup;
57834+ }
57835+
57836+ for (i = 0; i < num_sprole_pws; i++) {
57837+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
57838+ if (!sptmp) {
57839+ err = -ENOMEM;
57840+ goto cleanup;
57841+ }
57842+ if (copy_from_user(sptmp, arg->sprole_pws + i,
57843+ sizeof (struct sprole_pw))) {
57844+ err = -EFAULT;
57845+ goto cleanup;
57846+ }
57847+
57848+ len =
57849+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
57850+
57851+ if (!len || len >= GR_SPROLE_LEN) {
57852+ err = -EINVAL;
57853+ goto cleanup;
57854+ }
57855+
57856+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
57857+ err = -ENOMEM;
57858+ goto cleanup;
57859+ }
57860+
57861+ if (copy_from_user(tmp, sptmp->rolename, len)) {
57862+ err = -EFAULT;
57863+ goto cleanup;
57864+ }
57865+ tmp[len-1] = '\0';
57866+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57867+ printk(KERN_ALERT "Copying special role %s\n", tmp);
57868+#endif
57869+ sptmp->rolename = tmp;
57870+ acl_special_roles[i] = sptmp;
57871+ }
57872+
57873+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
57874+
57875+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
57876+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
57877+
57878+ if (!r_tmp) {
57879+ err = -ENOMEM;
57880+ goto cleanup;
57881+ }
57882+
57883+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
57884+ sizeof (struct acl_role_label *))) {
57885+ err = -EFAULT;
57886+ goto cleanup;
57887+ }
57888+
57889+ if (copy_from_user(r_tmp, r_utmp2,
57890+ sizeof (struct acl_role_label))) {
57891+ err = -EFAULT;
57892+ goto cleanup;
57893+ }
57894+
57895+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
57896+
57897+ if (!len || len >= PATH_MAX) {
57898+ err = -EINVAL;
57899+ goto cleanup;
57900+ }
57901+
57902+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
57903+ err = -ENOMEM;
57904+ goto cleanup;
57905+ }
57906+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
57907+ err = -EFAULT;
57908+ goto cleanup;
57909+ }
57910+ tmp[len-1] = '\0';
57911+ r_tmp->rolename = tmp;
57912+
57913+ if (!strcmp(r_tmp->rolename, "default")
57914+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
57915+ default_role = r_tmp;
57916+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
57917+ kernel_role = r_tmp;
57918+ }
57919+
57920+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
57921+ err = -ENOMEM;
57922+ goto cleanup;
57923+ }
57924+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
57925+ err = -EFAULT;
57926+ goto cleanup;
57927+ }
57928+
57929+ r_tmp->hash = ghash;
57930+
57931+ num_subjs = count_user_subjs(r_tmp->hash->first);
57932+
57933+ r_tmp->subj_hash_size = num_subjs;
57934+ r_tmp->subj_hash =
57935+ (struct acl_subject_label **)
57936+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
57937+
57938+ if (!r_tmp->subj_hash) {
57939+ err = -ENOMEM;
57940+ goto cleanup;
57941+ }
57942+
57943+ err = copy_user_allowedips(r_tmp);
57944+ if (err)
57945+ goto cleanup;
57946+
57947+ /* copy domain info */
57948+ if (r_tmp->domain_children != NULL) {
57949+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
57950+ if (domainlist == NULL) {
57951+ err = -ENOMEM;
57952+ goto cleanup;
57953+ }
57954+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
57955+ err = -EFAULT;
57956+ goto cleanup;
57957+ }
57958+ r_tmp->domain_children = domainlist;
57959+ }
57960+
57961+ err = copy_user_transitions(r_tmp);
57962+ if (err)
57963+ goto cleanup;
57964+
57965+ memset(r_tmp->subj_hash, 0,
57966+ r_tmp->subj_hash_size *
57967+ sizeof (struct acl_subject_label *));
57968+
57969+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
57970+
57971+ if (err)
57972+ goto cleanup;
57973+
57974+ /* set nested subject list to null */
57975+ r_tmp->hash->first = NULL;
57976+
57977+ insert_acl_role_label(r_tmp);
57978+ }
57979+
57980+ goto return_err;
57981+ cleanup:
57982+ free_variables();
57983+ return_err:
57984+ return err;
57985+
57986+}
57987+
57988+static int
57989+gracl_init(struct gr_arg *args)
57990+{
57991+ int error = 0;
57992+
57993+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
57994+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
57995+
57996+ if (init_variables(args)) {
57997+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
57998+ error = -ENOMEM;
57999+ free_variables();
58000+ goto out;
58001+ }
58002+
58003+ error = copy_user_acl(args);
58004+ free_init_variables();
58005+ if (error) {
58006+ free_variables();
58007+ goto out;
58008+ }
58009+
58010+ if ((error = gr_set_acls(0))) {
58011+ free_variables();
58012+ goto out;
58013+ }
58014+
58015+ pax_open_kernel();
58016+ gr_status |= GR_READY;
58017+ pax_close_kernel();
58018+
58019+ out:
58020+ return error;
58021+}
58022+
58023+/* derived from glibc fnmatch() 0: match, 1: no match*/
58024+
58025+static int
58026+glob_match(const char *p, const char *n)
58027+{
58028+ char c;
58029+
58030+ while ((c = *p++) != '\0') {
58031+ switch (c) {
58032+ case '?':
58033+ if (*n == '\0')
58034+ return 1;
58035+ else if (*n == '/')
58036+ return 1;
58037+ break;
58038+ case '\\':
58039+ if (*n != c)
58040+ return 1;
58041+ break;
58042+ case '*':
58043+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
58044+ if (*n == '/')
58045+ return 1;
58046+ else if (c == '?') {
58047+ if (*n == '\0')
58048+ return 1;
58049+ else
58050+ ++n;
58051+ }
58052+ }
58053+ if (c == '\0') {
58054+ return 0;
58055+ } else {
58056+ const char *endp;
58057+
58058+ if ((endp = strchr(n, '/')) == NULL)
58059+ endp = n + strlen(n);
58060+
58061+ if (c == '[') {
58062+ for (--p; n < endp; ++n)
58063+ if (!glob_match(p, n))
58064+ return 0;
58065+ } else if (c == '/') {
58066+ while (*n != '\0' && *n != '/')
58067+ ++n;
58068+ if (*n == '/' && !glob_match(p, n + 1))
58069+ return 0;
58070+ } else {
58071+ for (--p; n < endp; ++n)
58072+ if (*n == c && !glob_match(p, n))
58073+ return 0;
58074+ }
58075+
58076+ return 1;
58077+ }
58078+ case '[':
58079+ {
58080+ int not;
58081+ char cold;
58082+
58083+ if (*n == '\0' || *n == '/')
58084+ return 1;
58085+
58086+ not = (*p == '!' || *p == '^');
58087+ if (not)
58088+ ++p;
58089+
58090+ c = *p++;
58091+ for (;;) {
58092+ unsigned char fn = (unsigned char)*n;
58093+
58094+ if (c == '\0')
58095+ return 1;
58096+ else {
58097+ if (c == fn)
58098+ goto matched;
58099+ cold = c;
58100+ c = *p++;
58101+
58102+ if (c == '-' && *p != ']') {
58103+ unsigned char cend = *p++;
58104+
58105+ if (cend == '\0')
58106+ return 1;
58107+
58108+ if (cold <= fn && fn <= cend)
58109+ goto matched;
58110+
58111+ c = *p++;
58112+ }
58113+ }
58114+
58115+ if (c == ']')
58116+ break;
58117+ }
58118+ if (!not)
58119+ return 1;
58120+ break;
58121+ matched:
58122+ while (c != ']') {
58123+ if (c == '\0')
58124+ return 1;
58125+
58126+ c = *p++;
58127+ }
58128+ if (not)
58129+ return 1;
58130+ }
58131+ break;
58132+ default:
58133+ if (c != *n)
58134+ return 1;
58135+ }
58136+
58137+ ++n;
58138+ }
58139+
58140+ if (*n == '\0')
58141+ return 0;
58142+
58143+ if (*n == '/')
58144+ return 0;
58145+
58146+ return 1;
58147+}
58148+
58149+static struct acl_object_label *
58150+chk_glob_label(struct acl_object_label *globbed,
58151+ struct dentry *dentry, struct vfsmount *mnt, char **path)
58152+{
58153+ struct acl_object_label *tmp;
58154+
58155+ if (*path == NULL)
58156+ *path = gr_to_filename_nolock(dentry, mnt);
58157+
58158+ tmp = globbed;
58159+
58160+ while (tmp) {
58161+ if (!glob_match(tmp->filename, *path))
58162+ return tmp;
58163+ tmp = tmp->next;
58164+ }
58165+
58166+ return NULL;
58167+}
58168+
58169+static struct acl_object_label *
58170+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58171+ const ino_t curr_ino, const dev_t curr_dev,
58172+ const struct acl_subject_label *subj, char **path, const int checkglob)
58173+{
58174+ struct acl_subject_label *tmpsubj;
58175+ struct acl_object_label *retval;
58176+ struct acl_object_label *retval2;
58177+
58178+ tmpsubj = (struct acl_subject_label *) subj;
58179+ read_lock(&gr_inode_lock);
58180+ do {
58181+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
58182+ if (retval) {
58183+ if (checkglob && retval->globbed) {
58184+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
58185+ (struct vfsmount *)orig_mnt, path);
58186+ if (retval2)
58187+ retval = retval2;
58188+ }
58189+ break;
58190+ }
58191+ } while ((tmpsubj = tmpsubj->parent_subject));
58192+ read_unlock(&gr_inode_lock);
58193+
58194+ return retval;
58195+}
58196+
58197+static __inline__ struct acl_object_label *
58198+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58199+ const struct dentry *curr_dentry,
58200+ const struct acl_subject_label *subj, char **path, const int checkglob)
58201+{
58202+ int newglob = checkglob;
58203+
58204+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
58205+ as we don't want a / * rule to match instead of the / object
58206+ don't do this for create lookups that call this function though, since they're looking up
58207+ on the parent and thus need globbing checks on all paths
58208+ */
58209+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
58210+ newglob = GR_NO_GLOB;
58211+
58212+ return __full_lookup(orig_dentry, orig_mnt,
58213+ curr_dentry->d_inode->i_ino,
58214+ __get_dev(curr_dentry), subj, path, newglob);
58215+}
58216+
58217+static struct acl_object_label *
58218+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58219+ const struct acl_subject_label *subj, char *path, const int checkglob)
58220+{
58221+ struct dentry *dentry = (struct dentry *) l_dentry;
58222+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58223+ struct acl_object_label *retval;
58224+
58225+ spin_lock(&dcache_lock);
58226+ spin_lock(&vfsmount_lock);
58227+
58228+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
58229+#ifdef CONFIG_NET
58230+ mnt == sock_mnt ||
58231+#endif
58232+#ifdef CONFIG_HUGETLBFS
58233+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
58234+#endif
58235+ /* ignore Eric Biederman */
58236+ IS_PRIVATE(l_dentry->d_inode))) {
58237+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
58238+ goto out;
58239+ }
58240+
58241+ for (;;) {
58242+ if (dentry == real_root && mnt == real_root_mnt)
58243+ break;
58244+
58245+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58246+ if (mnt->mnt_parent == mnt)
58247+ break;
58248+
58249+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58250+ if (retval != NULL)
58251+ goto out;
58252+
58253+ dentry = mnt->mnt_mountpoint;
58254+ mnt = mnt->mnt_parent;
58255+ continue;
58256+ }
58257+
58258+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58259+ if (retval != NULL)
58260+ goto out;
58261+
58262+ dentry = dentry->d_parent;
58263+ }
58264+
58265+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58266+
58267+ if (retval == NULL)
58268+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
58269+out:
58270+ spin_unlock(&vfsmount_lock);
58271+ spin_unlock(&dcache_lock);
58272+
58273+ BUG_ON(retval == NULL);
58274+
58275+ return retval;
58276+}
58277+
58278+static __inline__ struct acl_object_label *
58279+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58280+ const struct acl_subject_label *subj)
58281+{
58282+ char *path = NULL;
58283+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
58284+}
58285+
58286+static __inline__ struct acl_object_label *
58287+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58288+ const struct acl_subject_label *subj)
58289+{
58290+ char *path = NULL;
58291+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
58292+}
58293+
58294+static __inline__ struct acl_object_label *
58295+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58296+ const struct acl_subject_label *subj, char *path)
58297+{
58298+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
58299+}
58300+
58301+static struct acl_subject_label *
58302+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58303+ const struct acl_role_label *role)
58304+{
58305+ struct dentry *dentry = (struct dentry *) l_dentry;
58306+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58307+ struct acl_subject_label *retval;
58308+
58309+ spin_lock(&dcache_lock);
58310+ spin_lock(&vfsmount_lock);
58311+
58312+ for (;;) {
58313+ if (dentry == real_root && mnt == real_root_mnt)
58314+ break;
58315+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58316+ if (mnt->mnt_parent == mnt)
58317+ break;
58318+
58319+ read_lock(&gr_inode_lock);
58320+ retval =
58321+ lookup_acl_subj_label(dentry->d_inode->i_ino,
58322+ __get_dev(dentry), role);
58323+ read_unlock(&gr_inode_lock);
58324+ if (retval != NULL)
58325+ goto out;
58326+
58327+ dentry = mnt->mnt_mountpoint;
58328+ mnt = mnt->mnt_parent;
58329+ continue;
58330+ }
58331+
58332+ read_lock(&gr_inode_lock);
58333+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58334+ __get_dev(dentry), role);
58335+ read_unlock(&gr_inode_lock);
58336+ if (retval != NULL)
58337+ goto out;
58338+
58339+ dentry = dentry->d_parent;
58340+ }
58341+
58342+ read_lock(&gr_inode_lock);
58343+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58344+ __get_dev(dentry), role);
58345+ read_unlock(&gr_inode_lock);
58346+
58347+ if (unlikely(retval == NULL)) {
58348+ read_lock(&gr_inode_lock);
58349+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
58350+ __get_dev(real_root), role);
58351+ read_unlock(&gr_inode_lock);
58352+ }
58353+out:
58354+ spin_unlock(&vfsmount_lock);
58355+ spin_unlock(&dcache_lock);
58356+
58357+ BUG_ON(retval == NULL);
58358+
58359+ return retval;
58360+}
58361+
58362+static void
58363+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
58364+{
58365+ struct task_struct *task = current;
58366+ const struct cred *cred = current_cred();
58367+
58368+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58369+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58370+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58371+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
58372+
58373+ return;
58374+}
58375+
58376+static void
58377+gr_log_learn_sysctl(const char *path, const __u32 mode)
58378+{
58379+ struct task_struct *task = current;
58380+ const struct cred *cred = current_cred();
58381+
58382+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58383+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58384+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58385+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
58386+
58387+ return;
58388+}
58389+
58390+static void
58391+gr_log_learn_id_change(const char type, const unsigned int real,
58392+ const unsigned int effective, const unsigned int fs)
58393+{
58394+ struct task_struct *task = current;
58395+ const struct cred *cred = current_cred();
58396+
58397+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
58398+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58399+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58400+ type, real, effective, fs, &task->signal->saved_ip);
58401+
58402+ return;
58403+}
58404+
58405+__u32
58406+gr_search_file(const struct dentry * dentry, const __u32 mode,
58407+ const struct vfsmount * mnt)
58408+{
58409+ __u32 retval = mode;
58410+ struct acl_subject_label *curracl;
58411+ struct acl_object_label *currobj;
58412+
58413+ if (unlikely(!(gr_status & GR_READY)))
58414+ return (mode & ~GR_AUDITS);
58415+
58416+ curracl = current->acl;
58417+
58418+ currobj = chk_obj_label(dentry, mnt, curracl);
58419+ retval = currobj->mode & mode;
58420+
58421+ /* if we're opening a specified transfer file for writing
58422+ (e.g. /dev/initctl), then transfer our role to init
58423+ */
58424+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
58425+ current->role->roletype & GR_ROLE_PERSIST)) {
58426+ struct task_struct *task = init_pid_ns.child_reaper;
58427+
58428+ if (task->role != current->role) {
58429+ task->acl_sp_role = 0;
58430+ task->acl_role_id = current->acl_role_id;
58431+ task->role = current->role;
58432+ rcu_read_lock();
58433+ read_lock(&grsec_exec_file_lock);
58434+ gr_apply_subject_to_task(task);
58435+ read_unlock(&grsec_exec_file_lock);
58436+ rcu_read_unlock();
58437+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
58438+ }
58439+ }
58440+
58441+ if (unlikely
58442+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
58443+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
58444+ __u32 new_mode = mode;
58445+
58446+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58447+
58448+ retval = new_mode;
58449+
58450+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
58451+ new_mode |= GR_INHERIT;
58452+
58453+ if (!(mode & GR_NOLEARN))
58454+ gr_log_learn(dentry, mnt, new_mode);
58455+ }
58456+
58457+ return retval;
58458+}
58459+
58460+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
58461+ const struct dentry *parent,
58462+ const struct vfsmount *mnt)
58463+{
58464+ struct name_entry *match;
58465+ struct acl_object_label *matchpo;
58466+ struct acl_subject_label *curracl;
58467+ char *path;
58468+
58469+ if (unlikely(!(gr_status & GR_READY)))
58470+ return NULL;
58471+
58472+ preempt_disable();
58473+ path = gr_to_filename_rbac(new_dentry, mnt);
58474+ match = lookup_name_entry_create(path);
58475+
58476+ curracl = current->acl;
58477+
58478+ if (match) {
58479+ read_lock(&gr_inode_lock);
58480+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
58481+ read_unlock(&gr_inode_lock);
58482+
58483+ if (matchpo) {
58484+ preempt_enable();
58485+ return matchpo;
58486+ }
58487+ }
58488+
58489+ // lookup parent
58490+
58491+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
58492+
58493+ preempt_enable();
58494+ return matchpo;
58495+}
58496+
58497+__u32
58498+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
58499+ const struct vfsmount * mnt, const __u32 mode)
58500+{
58501+ struct acl_object_label *matchpo;
58502+ __u32 retval;
58503+
58504+ if (unlikely(!(gr_status & GR_READY)))
58505+ return (mode & ~GR_AUDITS);
58506+
58507+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
58508+
58509+ retval = matchpo->mode & mode;
58510+
58511+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
58512+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58513+ __u32 new_mode = mode;
58514+
58515+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58516+
58517+ gr_log_learn(new_dentry, mnt, new_mode);
58518+ return new_mode;
58519+ }
58520+
58521+ return retval;
58522+}
58523+
58524+__u32
58525+gr_check_link(const struct dentry * new_dentry,
58526+ const struct dentry * parent_dentry,
58527+ const struct vfsmount * parent_mnt,
58528+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
58529+{
58530+ struct acl_object_label *obj;
58531+ __u32 oldmode, newmode;
58532+ __u32 needmode;
58533+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
58534+ GR_DELETE | GR_INHERIT;
58535+
58536+ if (unlikely(!(gr_status & GR_READY)))
58537+ return (GR_CREATE | GR_LINK);
58538+
58539+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
58540+ oldmode = obj->mode;
58541+
58542+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
58543+ newmode = obj->mode;
58544+
58545+ needmode = newmode & checkmodes;
58546+
58547+ // old name for hardlink must have at least the permissions of the new name
58548+ if ((oldmode & needmode) != needmode)
58549+ goto bad;
58550+
58551+ // if old name had restrictions/auditing, make sure the new name does as well
58552+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
58553+
58554+ // don't allow hardlinking of suid/sgid files without permission
58555+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58556+ needmode |= GR_SETID;
58557+
58558+ if ((newmode & needmode) != needmode)
58559+ goto bad;
58560+
58561+ // enforce minimum permissions
58562+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
58563+ return newmode;
58564+bad:
58565+ needmode = oldmode;
58566+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58567+ needmode |= GR_SETID;
58568+
58569+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
58570+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
58571+ return (GR_CREATE | GR_LINK);
58572+ } else if (newmode & GR_SUPPRESS)
58573+ return GR_SUPPRESS;
58574+ else
58575+ return 0;
58576+}
58577+
58578+int
58579+gr_check_hidden_task(const struct task_struct *task)
58580+{
58581+ if (unlikely(!(gr_status & GR_READY)))
58582+ return 0;
58583+
58584+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
58585+ return 1;
58586+
58587+ return 0;
58588+}
58589+
58590+int
58591+gr_check_protected_task(const struct task_struct *task)
58592+{
58593+ if (unlikely(!(gr_status & GR_READY) || !task))
58594+ return 0;
58595+
58596+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58597+ task->acl != current->acl)
58598+ return 1;
58599+
58600+ return 0;
58601+}
58602+
58603+int
58604+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
58605+{
58606+ struct task_struct *p;
58607+ int ret = 0;
58608+
58609+ if (unlikely(!(gr_status & GR_READY) || !pid))
58610+ return ret;
58611+
58612+ read_lock(&tasklist_lock);
58613+ do_each_pid_task(pid, type, p) {
58614+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58615+ p->acl != current->acl) {
58616+ ret = 1;
58617+ goto out;
58618+ }
58619+ } while_each_pid_task(pid, type, p);
58620+out:
58621+ read_unlock(&tasklist_lock);
58622+
58623+ return ret;
58624+}
58625+
58626+void
58627+gr_copy_label(struct task_struct *tsk)
58628+{
58629+ /* plain copying of fields is already done by dup_task_struct */
58630+ tsk->signal->used_accept = 0;
58631+ tsk->acl_sp_role = 0;
58632+ //tsk->acl_role_id = current->acl_role_id;
58633+ //tsk->acl = current->acl;
58634+ //tsk->role = current->role;
58635+ tsk->signal->curr_ip = current->signal->curr_ip;
58636+ tsk->signal->saved_ip = current->signal->saved_ip;
58637+ if (current->exec_file)
58638+ get_file(current->exec_file);
58639+ //tsk->exec_file = current->exec_file;
58640+ //tsk->is_writable = current->is_writable;
58641+ if (unlikely(current->signal->used_accept)) {
58642+ current->signal->curr_ip = 0;
58643+ current->signal->saved_ip = 0;
58644+ }
58645+
58646+ return;
58647+}
58648+
58649+static void
58650+gr_set_proc_res(struct task_struct *task)
58651+{
58652+ struct acl_subject_label *proc;
58653+ unsigned short i;
58654+
58655+ proc = task->acl;
58656+
58657+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
58658+ return;
58659+
58660+ for (i = 0; i < RLIM_NLIMITS; i++) {
58661+ if (!(proc->resmask & (1 << i)))
58662+ continue;
58663+
58664+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
58665+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
58666+ }
58667+
58668+ return;
58669+}
58670+
58671+extern int __gr_process_user_ban(struct user_struct *user);
58672+
58673+int
58674+gr_check_user_change(int real, int effective, int fs)
58675+{
58676+ unsigned int i;
58677+ __u16 num;
58678+ uid_t *uidlist;
58679+ int curuid;
58680+ int realok = 0;
58681+ int effectiveok = 0;
58682+ int fsok = 0;
58683+
58684+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58685+ struct user_struct *user;
58686+
58687+ if (real == -1)
58688+ goto skipit;
58689+
58690+ user = find_user(real);
58691+ if (user == NULL)
58692+ goto skipit;
58693+
58694+ if (__gr_process_user_ban(user)) {
58695+ /* for find_user */
58696+ free_uid(user);
58697+ return 1;
58698+ }
58699+
58700+ /* for find_user */
58701+ free_uid(user);
58702+
58703+skipit:
58704+#endif
58705+
58706+ if (unlikely(!(gr_status & GR_READY)))
58707+ return 0;
58708+
58709+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58710+ gr_log_learn_id_change('u', real, effective, fs);
58711+
58712+ num = current->acl->user_trans_num;
58713+ uidlist = current->acl->user_transitions;
58714+
58715+ if (uidlist == NULL)
58716+ return 0;
58717+
58718+ if (real == -1)
58719+ realok = 1;
58720+ if (effective == -1)
58721+ effectiveok = 1;
58722+ if (fs == -1)
58723+ fsok = 1;
58724+
58725+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
58726+ for (i = 0; i < num; i++) {
58727+ curuid = (int)uidlist[i];
58728+ if (real == curuid)
58729+ realok = 1;
58730+ if (effective == curuid)
58731+ effectiveok = 1;
58732+ if (fs == curuid)
58733+ fsok = 1;
58734+ }
58735+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
58736+ for (i = 0; i < num; i++) {
58737+ curuid = (int)uidlist[i];
58738+ if (real == curuid)
58739+ break;
58740+ if (effective == curuid)
58741+ break;
58742+ if (fs == curuid)
58743+ break;
58744+ }
58745+ /* not in deny list */
58746+ if (i == num) {
58747+ realok = 1;
58748+ effectiveok = 1;
58749+ fsok = 1;
58750+ }
58751+ }
58752+
58753+ if (realok && effectiveok && fsok)
58754+ return 0;
58755+ else {
58756+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58757+ return 1;
58758+ }
58759+}
58760+
58761+int
58762+gr_check_group_change(int real, int effective, int fs)
58763+{
58764+ unsigned int i;
58765+ __u16 num;
58766+ gid_t *gidlist;
58767+ int curgid;
58768+ int realok = 0;
58769+ int effectiveok = 0;
58770+ int fsok = 0;
58771+
58772+ if (unlikely(!(gr_status & GR_READY)))
58773+ return 0;
58774+
58775+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58776+ gr_log_learn_id_change('g', real, effective, fs);
58777+
58778+ num = current->acl->group_trans_num;
58779+ gidlist = current->acl->group_transitions;
58780+
58781+ if (gidlist == NULL)
58782+ return 0;
58783+
58784+ if (real == -1)
58785+ realok = 1;
58786+ if (effective == -1)
58787+ effectiveok = 1;
58788+ if (fs == -1)
58789+ fsok = 1;
58790+
58791+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
58792+ for (i = 0; i < num; i++) {
58793+ curgid = (int)gidlist[i];
58794+ if (real == curgid)
58795+ realok = 1;
58796+ if (effective == curgid)
58797+ effectiveok = 1;
58798+ if (fs == curgid)
58799+ fsok = 1;
58800+ }
58801+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
58802+ for (i = 0; i < num; i++) {
58803+ curgid = (int)gidlist[i];
58804+ if (real == curgid)
58805+ break;
58806+ if (effective == curgid)
58807+ break;
58808+ if (fs == curgid)
58809+ break;
58810+ }
58811+ /* not in deny list */
58812+ if (i == num) {
58813+ realok = 1;
58814+ effectiveok = 1;
58815+ fsok = 1;
58816+ }
58817+ }
58818+
58819+ if (realok && effectiveok && fsok)
58820+ return 0;
58821+ else {
58822+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58823+ return 1;
58824+ }
58825+}
58826+
58827+extern int gr_acl_is_capable(const int cap);
58828+
58829+void
58830+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
58831+{
58832+ struct acl_role_label *role = task->role;
58833+ struct acl_subject_label *subj = NULL;
58834+ struct acl_object_label *obj;
58835+ struct file *filp;
58836+
58837+ if (unlikely(!(gr_status & GR_READY)))
58838+ return;
58839+
58840+ filp = task->exec_file;
58841+
58842+ /* kernel process, we'll give them the kernel role */
58843+ if (unlikely(!filp)) {
58844+ task->role = kernel_role;
58845+ task->acl = kernel_role->root_label;
58846+ return;
58847+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
58848+ role = lookup_acl_role_label(task, uid, gid);
58849+
58850+ /* don't change the role if we're not a privileged process */
58851+ if (role && task->role != role &&
58852+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
58853+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
58854+ return;
58855+
58856+ /* perform subject lookup in possibly new role
58857+ we can use this result below in the case where role == task->role
58858+ */
58859+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
58860+
58861+ /* if we changed uid/gid, but result in the same role
58862+ and are using inheritance, don't lose the inherited subject
58863+ if current subject is other than what normal lookup
58864+ would result in, we arrived via inheritance, don't
58865+ lose subject
58866+ */
58867+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
58868+ (subj == task->acl)))
58869+ task->acl = subj;
58870+
58871+ task->role = role;
58872+
58873+ task->is_writable = 0;
58874+
58875+ /* ignore additional mmap checks for processes that are writable
58876+ by the default ACL */
58877+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58878+ if (unlikely(obj->mode & GR_WRITE))
58879+ task->is_writable = 1;
58880+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
58881+ if (unlikely(obj->mode & GR_WRITE))
58882+ task->is_writable = 1;
58883+
58884+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58885+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58886+#endif
58887+
58888+ gr_set_proc_res(task);
58889+
58890+ return;
58891+}
58892+
58893+int
58894+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
58895+ const int unsafe_flags)
58896+{
58897+ struct task_struct *task = current;
58898+ struct acl_subject_label *newacl;
58899+ struct acl_object_label *obj;
58900+ __u32 retmode;
58901+
58902+ if (unlikely(!(gr_status & GR_READY)))
58903+ return 0;
58904+
58905+ newacl = chk_subj_label(dentry, mnt, task->role);
58906+
58907+ task_lock(task);
58908+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
58909+ !(task->role->roletype & GR_ROLE_GOD) &&
58910+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
58911+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58912+ task_unlock(task);
58913+ if (unsafe_flags & LSM_UNSAFE_SHARE)
58914+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
58915+ else
58916+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
58917+ return -EACCES;
58918+ }
58919+ task_unlock(task);
58920+
58921+ obj = chk_obj_label(dentry, mnt, task->acl);
58922+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
58923+
58924+ if (!(task->acl->mode & GR_INHERITLEARN) &&
58925+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
58926+ if (obj->nested)
58927+ task->acl = obj->nested;
58928+ else
58929+ task->acl = newacl;
58930+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
58931+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
58932+
58933+ task->is_writable = 0;
58934+
58935+ /* ignore additional mmap checks for processes that are writable
58936+ by the default ACL */
58937+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
58938+ if (unlikely(obj->mode & GR_WRITE))
58939+ task->is_writable = 1;
58940+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
58941+ if (unlikely(obj->mode & GR_WRITE))
58942+ task->is_writable = 1;
58943+
58944+ gr_set_proc_res(task);
58945+
58946+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58947+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58948+#endif
58949+ return 0;
58950+}
58951+
58952+/* always called with valid inodev ptr */
58953+static void
58954+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
58955+{
58956+ struct acl_object_label *matchpo;
58957+ struct acl_subject_label *matchps;
58958+ struct acl_subject_label *subj;
58959+ struct acl_role_label *role;
58960+ unsigned int x;
58961+
58962+ FOR_EACH_ROLE_START(role)
58963+ FOR_EACH_SUBJECT_START(role, subj, x)
58964+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
58965+ matchpo->mode |= GR_DELETED;
58966+ FOR_EACH_SUBJECT_END(subj,x)
58967+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
58968+ if (subj->inode == ino && subj->device == dev)
58969+ subj->mode |= GR_DELETED;
58970+ FOR_EACH_NESTED_SUBJECT_END(subj)
58971+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
58972+ matchps->mode |= GR_DELETED;
58973+ FOR_EACH_ROLE_END(role)
58974+
58975+ inodev->nentry->deleted = 1;
58976+
58977+ return;
58978+}
58979+
58980+void
58981+gr_handle_delete(const ino_t ino, const dev_t dev)
58982+{
58983+ struct inodev_entry *inodev;
58984+
58985+ if (unlikely(!(gr_status & GR_READY)))
58986+ return;
58987+
58988+ write_lock(&gr_inode_lock);
58989+ inodev = lookup_inodev_entry(ino, dev);
58990+ if (inodev != NULL)
58991+ do_handle_delete(inodev, ino, dev);
58992+ write_unlock(&gr_inode_lock);
58993+
58994+ return;
58995+}
58996+
58997+static void
58998+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
58999+ const ino_t newinode, const dev_t newdevice,
59000+ struct acl_subject_label *subj)
59001+{
59002+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
59003+ struct acl_object_label *match;
59004+
59005+ match = subj->obj_hash[index];
59006+
59007+ while (match && (match->inode != oldinode ||
59008+ match->device != olddevice ||
59009+ !(match->mode & GR_DELETED)))
59010+ match = match->next;
59011+
59012+ if (match && (match->inode == oldinode)
59013+ && (match->device == olddevice)
59014+ && (match->mode & GR_DELETED)) {
59015+ if (match->prev == NULL) {
59016+ subj->obj_hash[index] = match->next;
59017+ if (match->next != NULL)
59018+ match->next->prev = NULL;
59019+ } else {
59020+ match->prev->next = match->next;
59021+ if (match->next != NULL)
59022+ match->next->prev = match->prev;
59023+ }
59024+ match->prev = NULL;
59025+ match->next = NULL;
59026+ match->inode = newinode;
59027+ match->device = newdevice;
59028+ match->mode &= ~GR_DELETED;
59029+
59030+ insert_acl_obj_label(match, subj);
59031+ }
59032+
59033+ return;
59034+}
59035+
59036+static void
59037+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
59038+ const ino_t newinode, const dev_t newdevice,
59039+ struct acl_role_label *role)
59040+{
59041+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
59042+ struct acl_subject_label *match;
59043+
59044+ match = role->subj_hash[index];
59045+
59046+ while (match && (match->inode != oldinode ||
59047+ match->device != olddevice ||
59048+ !(match->mode & GR_DELETED)))
59049+ match = match->next;
59050+
59051+ if (match && (match->inode == oldinode)
59052+ && (match->device == olddevice)
59053+ && (match->mode & GR_DELETED)) {
59054+ if (match->prev == NULL) {
59055+ role->subj_hash[index] = match->next;
59056+ if (match->next != NULL)
59057+ match->next->prev = NULL;
59058+ } else {
59059+ match->prev->next = match->next;
59060+ if (match->next != NULL)
59061+ match->next->prev = match->prev;
59062+ }
59063+ match->prev = NULL;
59064+ match->next = NULL;
59065+ match->inode = newinode;
59066+ match->device = newdevice;
59067+ match->mode &= ~GR_DELETED;
59068+
59069+ insert_acl_subj_label(match, role);
59070+ }
59071+
59072+ return;
59073+}
59074+
59075+static void
59076+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
59077+ const ino_t newinode, const dev_t newdevice)
59078+{
59079+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
59080+ struct inodev_entry *match;
59081+
59082+ match = inodev_set.i_hash[index];
59083+
59084+ while (match && (match->nentry->inode != oldinode ||
59085+ match->nentry->device != olddevice || !match->nentry->deleted))
59086+ match = match->next;
59087+
59088+ if (match && (match->nentry->inode == oldinode)
59089+ && (match->nentry->device == olddevice) &&
59090+ match->nentry->deleted) {
59091+ if (match->prev == NULL) {
59092+ inodev_set.i_hash[index] = match->next;
59093+ if (match->next != NULL)
59094+ match->next->prev = NULL;
59095+ } else {
59096+ match->prev->next = match->next;
59097+ if (match->next != NULL)
59098+ match->next->prev = match->prev;
59099+ }
59100+ match->prev = NULL;
59101+ match->next = NULL;
59102+ match->nentry->inode = newinode;
59103+ match->nentry->device = newdevice;
59104+ match->nentry->deleted = 0;
59105+
59106+ insert_inodev_entry(match);
59107+ }
59108+
59109+ return;
59110+}
59111+
59112+static void
59113+__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
59114+{
59115+ struct acl_subject_label *subj;
59116+ struct acl_role_label *role;
59117+ unsigned int x;
59118+
59119+ FOR_EACH_ROLE_START(role)
59120+ update_acl_subj_label(matchn->inode, matchn->device,
59121+ inode, dev, role);
59122+
59123+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
59124+ if ((subj->inode == inode) && (subj->device == dev)) {
59125+ subj->inode = inode;
59126+ subj->device = dev;
59127+ }
59128+ FOR_EACH_NESTED_SUBJECT_END(subj)
59129+ FOR_EACH_SUBJECT_START(role, subj, x)
59130+ update_acl_obj_label(matchn->inode, matchn->device,
59131+ inode, dev, subj);
59132+ FOR_EACH_SUBJECT_END(subj,x)
59133+ FOR_EACH_ROLE_END(role)
59134+
59135+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
59136+
59137+ return;
59138+}
59139+
59140+static void
59141+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
59142+ const struct vfsmount *mnt)
59143+{
59144+ ino_t ino = dentry->d_inode->i_ino;
59145+ dev_t dev = __get_dev(dentry);
59146+
59147+ __do_handle_create(matchn, ino, dev);
59148+
59149+ return;
59150+}
59151+
59152+void
59153+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
59154+{
59155+ struct name_entry *matchn;
59156+
59157+ if (unlikely(!(gr_status & GR_READY)))
59158+ return;
59159+
59160+ preempt_disable();
59161+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
59162+
59163+ if (unlikely((unsigned long)matchn)) {
59164+ write_lock(&gr_inode_lock);
59165+ do_handle_create(matchn, dentry, mnt);
59166+ write_unlock(&gr_inode_lock);
59167+ }
59168+ preempt_enable();
59169+
59170+ return;
59171+}
59172+
59173+void
59174+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
59175+{
59176+ struct name_entry *matchn;
59177+
59178+ if (unlikely(!(gr_status & GR_READY)))
59179+ return;
59180+
59181+ preempt_disable();
59182+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
59183+
59184+ if (unlikely((unsigned long)matchn)) {
59185+ write_lock(&gr_inode_lock);
59186+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
59187+ write_unlock(&gr_inode_lock);
59188+ }
59189+ preempt_enable();
59190+
59191+ return;
59192+}
59193+
59194+void
59195+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59196+ struct dentry *old_dentry,
59197+ struct dentry *new_dentry,
59198+ struct vfsmount *mnt, const __u8 replace)
59199+{
59200+ struct name_entry *matchn;
59201+ struct inodev_entry *inodev;
59202+ struct inode *inode = new_dentry->d_inode;
59203+ ino_t oldinode = old_dentry->d_inode->i_ino;
59204+ dev_t olddev = __get_dev(old_dentry);
59205+
59206+ /* vfs_rename swaps the name and parent link for old_dentry and
59207+ new_dentry
59208+ at this point, old_dentry has the new name, parent link, and inode
59209+ for the renamed file
59210+ if a file is being replaced by a rename, new_dentry has the inode
59211+ and name for the replaced file
59212+ */
59213+
59214+ if (unlikely(!(gr_status & GR_READY)))
59215+ return;
59216+
59217+ preempt_disable();
59218+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
59219+
59220+ /* we wouldn't have to check d_inode if it weren't for
59221+ NFS silly-renaming
59222+ */
59223+
59224+ write_lock(&gr_inode_lock);
59225+ if (unlikely(replace && inode)) {
59226+ ino_t newinode = inode->i_ino;
59227+ dev_t newdev = __get_dev(new_dentry);
59228+ inodev = lookup_inodev_entry(newinode, newdev);
59229+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
59230+ do_handle_delete(inodev, newinode, newdev);
59231+ }
59232+
59233+ inodev = lookup_inodev_entry(oldinode, olddev);
59234+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
59235+ do_handle_delete(inodev, oldinode, olddev);
59236+
59237+ if (unlikely((unsigned long)matchn))
59238+ do_handle_create(matchn, old_dentry, mnt);
59239+
59240+ write_unlock(&gr_inode_lock);
59241+ preempt_enable();
59242+
59243+ return;
59244+}
59245+
59246+static int
59247+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
59248+ unsigned char **sum)
59249+{
59250+ struct acl_role_label *r;
59251+ struct role_allowed_ip *ipp;
59252+ struct role_transition *trans;
59253+ unsigned int i;
59254+ int found = 0;
59255+ u32 curr_ip = current->signal->curr_ip;
59256+
59257+ current->signal->saved_ip = curr_ip;
59258+
59259+ /* check transition table */
59260+
59261+ for (trans = current->role->transitions; trans; trans = trans->next) {
59262+ if (!strcmp(rolename, trans->rolename)) {
59263+ found = 1;
59264+ break;
59265+ }
59266+ }
59267+
59268+ if (!found)
59269+ return 0;
59270+
59271+ /* handle special roles that do not require authentication
59272+ and check ip */
59273+
59274+ FOR_EACH_ROLE_START(r)
59275+ if (!strcmp(rolename, r->rolename) &&
59276+ (r->roletype & GR_ROLE_SPECIAL)) {
59277+ found = 0;
59278+ if (r->allowed_ips != NULL) {
59279+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
59280+ if ((ntohl(curr_ip) & ipp->netmask) ==
59281+ (ntohl(ipp->addr) & ipp->netmask))
59282+ found = 1;
59283+ }
59284+ } else
59285+ found = 2;
59286+ if (!found)
59287+ return 0;
59288+
59289+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
59290+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
59291+ *salt = NULL;
59292+ *sum = NULL;
59293+ return 1;
59294+ }
59295+ }
59296+ FOR_EACH_ROLE_END(r)
59297+
59298+ for (i = 0; i < num_sprole_pws; i++) {
59299+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
59300+ *salt = acl_special_roles[i]->salt;
59301+ *sum = acl_special_roles[i]->sum;
59302+ return 1;
59303+ }
59304+ }
59305+
59306+ return 0;
59307+}
59308+
59309+static void
59310+assign_special_role(char *rolename)
59311+{
59312+ struct acl_object_label *obj;
59313+ struct acl_role_label *r;
59314+ struct acl_role_label *assigned = NULL;
59315+ struct task_struct *tsk;
59316+ struct file *filp;
59317+
59318+ FOR_EACH_ROLE_START(r)
59319+ if (!strcmp(rolename, r->rolename) &&
59320+ (r->roletype & GR_ROLE_SPECIAL)) {
59321+ assigned = r;
59322+ break;
59323+ }
59324+ FOR_EACH_ROLE_END(r)
59325+
59326+ if (!assigned)
59327+ return;
59328+
59329+ read_lock(&tasklist_lock);
59330+ read_lock(&grsec_exec_file_lock);
59331+
59332+ tsk = current->real_parent;
59333+ if (tsk == NULL)
59334+ goto out_unlock;
59335+
59336+ filp = tsk->exec_file;
59337+ if (filp == NULL)
59338+ goto out_unlock;
59339+
59340+ tsk->is_writable = 0;
59341+
59342+ tsk->acl_sp_role = 1;
59343+ tsk->acl_role_id = ++acl_sp_role_value;
59344+ tsk->role = assigned;
59345+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
59346+
59347+ /* ignore additional mmap checks for processes that are writable
59348+ by the default ACL */
59349+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59350+ if (unlikely(obj->mode & GR_WRITE))
59351+ tsk->is_writable = 1;
59352+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
59353+ if (unlikely(obj->mode & GR_WRITE))
59354+ tsk->is_writable = 1;
59355+
59356+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59357+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
59358+#endif
59359+
59360+out_unlock:
59361+ read_unlock(&grsec_exec_file_lock);
59362+ read_unlock(&tasklist_lock);
59363+ return;
59364+}
59365+
59366+int gr_check_secure_terminal(struct task_struct *task)
59367+{
59368+ struct task_struct *p, *p2, *p3;
59369+ struct files_struct *files;
59370+ struct fdtable *fdt;
59371+ struct file *our_file = NULL, *file;
59372+ int i;
59373+
59374+ if (task->signal->tty == NULL)
59375+ return 1;
59376+
59377+ files = get_files_struct(task);
59378+ if (files != NULL) {
59379+ rcu_read_lock();
59380+ fdt = files_fdtable(files);
59381+ for (i=0; i < fdt->max_fds; i++) {
59382+ file = fcheck_files(files, i);
59383+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
59384+ get_file(file);
59385+ our_file = file;
59386+ }
59387+ }
59388+ rcu_read_unlock();
59389+ put_files_struct(files);
59390+ }
59391+
59392+ if (our_file == NULL)
59393+ return 1;
59394+
59395+ read_lock(&tasklist_lock);
59396+ do_each_thread(p2, p) {
59397+ files = get_files_struct(p);
59398+ if (files == NULL ||
59399+ (p->signal && p->signal->tty == task->signal->tty)) {
59400+ if (files != NULL)
59401+ put_files_struct(files);
59402+ continue;
59403+ }
59404+ rcu_read_lock();
59405+ fdt = files_fdtable(files);
59406+ for (i=0; i < fdt->max_fds; i++) {
59407+ file = fcheck_files(files, i);
59408+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
59409+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
59410+ p3 = task;
59411+ while (p3->pid > 0) {
59412+ if (p3 == p)
59413+ break;
59414+ p3 = p3->real_parent;
59415+ }
59416+ if (p3 == p)
59417+ break;
59418+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
59419+ gr_handle_alertkill(p);
59420+ rcu_read_unlock();
59421+ put_files_struct(files);
59422+ read_unlock(&tasklist_lock);
59423+ fput(our_file);
59424+ return 0;
59425+ }
59426+ }
59427+ rcu_read_unlock();
59428+ put_files_struct(files);
59429+ } while_each_thread(p2, p);
59430+ read_unlock(&tasklist_lock);
59431+
59432+ fput(our_file);
59433+ return 1;
59434+}
59435+
59436+ssize_t
59437+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
59438+{
59439+ struct gr_arg_wrapper uwrap;
59440+ unsigned char *sprole_salt = NULL;
59441+ unsigned char *sprole_sum = NULL;
59442+ int error = sizeof (struct gr_arg_wrapper);
59443+ int error2 = 0;
59444+
59445+ mutex_lock(&gr_dev_mutex);
59446+
59447+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
59448+ error = -EPERM;
59449+ goto out;
59450+ }
59451+
59452+ if (count != sizeof (struct gr_arg_wrapper)) {
59453+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
59454+ error = -EINVAL;
59455+ goto out;
59456+ }
59457+
59458+
59459+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
59460+ gr_auth_expires = 0;
59461+ gr_auth_attempts = 0;
59462+ }
59463+
59464+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
59465+ error = -EFAULT;
59466+ goto out;
59467+ }
59468+
59469+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
59470+ error = -EINVAL;
59471+ goto out;
59472+ }
59473+
59474+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
59475+ error = -EFAULT;
59476+ goto out;
59477+ }
59478+
59479+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59480+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59481+ time_after(gr_auth_expires, get_seconds())) {
59482+ error = -EBUSY;
59483+ goto out;
59484+ }
59485+
59486+ /* if non-root trying to do anything other than use a special role,
59487+ do not attempt authentication, do not count towards authentication
59488+ locking
59489+ */
59490+
59491+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
59492+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59493+ current_uid()) {
59494+ error = -EPERM;
59495+ goto out;
59496+ }
59497+
59498+ /* ensure pw and special role name are null terminated */
59499+
59500+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
59501+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
59502+
59503+ /* Okay.
59504+ * We have our enough of the argument structure..(we have yet
59505+ * to copy_from_user the tables themselves) . Copy the tables
59506+ * only if we need them, i.e. for loading operations. */
59507+
59508+ switch (gr_usermode->mode) {
59509+ case GR_STATUS:
59510+ if (gr_status & GR_READY) {
59511+ error = 1;
59512+ if (!gr_check_secure_terminal(current))
59513+ error = 3;
59514+ } else
59515+ error = 2;
59516+ goto out;
59517+ case GR_SHUTDOWN:
59518+ if ((gr_status & GR_READY)
59519+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59520+ pax_open_kernel();
59521+ gr_status &= ~GR_READY;
59522+ pax_close_kernel();
59523+
59524+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
59525+ free_variables();
59526+ memset(gr_usermode, 0, sizeof (struct gr_arg));
59527+ memset(gr_system_salt, 0, GR_SALT_LEN);
59528+ memset(gr_system_sum, 0, GR_SHA_LEN);
59529+ } else if (gr_status & GR_READY) {
59530+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
59531+ error = -EPERM;
59532+ } else {
59533+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
59534+ error = -EAGAIN;
59535+ }
59536+ break;
59537+ case GR_ENABLE:
59538+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
59539+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
59540+ else {
59541+ if (gr_status & GR_READY)
59542+ error = -EAGAIN;
59543+ else
59544+ error = error2;
59545+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
59546+ }
59547+ break;
59548+ case GR_RELOAD:
59549+ if (!(gr_status & GR_READY)) {
59550+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
59551+ error = -EAGAIN;
59552+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59553+ lock_kernel();
59554+
59555+ pax_open_kernel();
59556+ gr_status &= ~GR_READY;
59557+ pax_close_kernel();
59558+
59559+ free_variables();
59560+ if (!(error2 = gracl_init(gr_usermode))) {
59561+ unlock_kernel();
59562+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
59563+ } else {
59564+ unlock_kernel();
59565+ error = error2;
59566+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59567+ }
59568+ } else {
59569+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59570+ error = -EPERM;
59571+ }
59572+ break;
59573+ case GR_SEGVMOD:
59574+ if (unlikely(!(gr_status & GR_READY))) {
59575+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
59576+ error = -EAGAIN;
59577+ break;
59578+ }
59579+
59580+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59581+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
59582+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
59583+ struct acl_subject_label *segvacl;
59584+ segvacl =
59585+ lookup_acl_subj_label(gr_usermode->segv_inode,
59586+ gr_usermode->segv_device,
59587+ current->role);
59588+ if (segvacl) {
59589+ segvacl->crashes = 0;
59590+ segvacl->expires = 0;
59591+ }
59592+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
59593+ gr_remove_uid(gr_usermode->segv_uid);
59594+ }
59595+ } else {
59596+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
59597+ error = -EPERM;
59598+ }
59599+ break;
59600+ case GR_SPROLE:
59601+ case GR_SPROLEPAM:
59602+ if (unlikely(!(gr_status & GR_READY))) {
59603+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
59604+ error = -EAGAIN;
59605+ break;
59606+ }
59607+
59608+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
59609+ current->role->expires = 0;
59610+ current->role->auth_attempts = 0;
59611+ }
59612+
59613+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59614+ time_after(current->role->expires, get_seconds())) {
59615+ error = -EBUSY;
59616+ goto out;
59617+ }
59618+
59619+ if (lookup_special_role_auth
59620+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
59621+ && ((!sprole_salt && !sprole_sum)
59622+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
59623+ char *p = "";
59624+ assign_special_role(gr_usermode->sp_role);
59625+ read_lock(&tasklist_lock);
59626+ if (current->real_parent)
59627+ p = current->real_parent->role->rolename;
59628+ read_unlock(&tasklist_lock);
59629+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
59630+ p, acl_sp_role_value);
59631+ } else {
59632+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
59633+ error = -EPERM;
59634+ if(!(current->role->auth_attempts++))
59635+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59636+
59637+ goto out;
59638+ }
59639+ break;
59640+ case GR_UNSPROLE:
59641+ if (unlikely(!(gr_status & GR_READY))) {
59642+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
59643+ error = -EAGAIN;
59644+ break;
59645+ }
59646+
59647+ if (current->role->roletype & GR_ROLE_SPECIAL) {
59648+ char *p = "";
59649+ int i = 0;
59650+
59651+ read_lock(&tasklist_lock);
59652+ if (current->real_parent) {
59653+ p = current->real_parent->role->rolename;
59654+ i = current->real_parent->acl_role_id;
59655+ }
59656+ read_unlock(&tasklist_lock);
59657+
59658+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
59659+ gr_set_acls(1);
59660+ } else {
59661+ error = -EPERM;
59662+ goto out;
59663+ }
59664+ break;
59665+ default:
59666+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
59667+ error = -EINVAL;
59668+ break;
59669+ }
59670+
59671+ if (error != -EPERM)
59672+ goto out;
59673+
59674+ if(!(gr_auth_attempts++))
59675+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59676+
59677+ out:
59678+ mutex_unlock(&gr_dev_mutex);
59679+ return error;
59680+}
59681+
59682+/* must be called with
59683+ rcu_read_lock();
59684+ read_lock(&tasklist_lock);
59685+ read_lock(&grsec_exec_file_lock);
59686+*/
59687+int gr_apply_subject_to_task(struct task_struct *task)
59688+{
59689+ struct acl_object_label *obj;
59690+ char *tmpname;
59691+ struct acl_subject_label *tmpsubj;
59692+ struct file *filp;
59693+ struct name_entry *nmatch;
59694+
59695+ filp = task->exec_file;
59696+ if (filp == NULL)
59697+ return 0;
59698+
59699+ /* the following is to apply the correct subject
59700+ on binaries running when the RBAC system
59701+ is enabled, when the binaries have been
59702+ replaced or deleted since their execution
59703+ -----
59704+ when the RBAC system starts, the inode/dev
59705+ from exec_file will be one the RBAC system
59706+ is unaware of. It only knows the inode/dev
59707+ of the present file on disk, or the absence
59708+ of it.
59709+ */
59710+ preempt_disable();
59711+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
59712+
59713+ nmatch = lookup_name_entry(tmpname);
59714+ preempt_enable();
59715+ tmpsubj = NULL;
59716+ if (nmatch) {
59717+ if (nmatch->deleted)
59718+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
59719+ else
59720+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
59721+ if (tmpsubj != NULL)
59722+ task->acl = tmpsubj;
59723+ }
59724+ if (tmpsubj == NULL)
59725+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
59726+ task->role);
59727+ if (task->acl) {
59728+ task->is_writable = 0;
59729+ /* ignore additional mmap checks for processes that are writable
59730+ by the default ACL */
59731+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59732+ if (unlikely(obj->mode & GR_WRITE))
59733+ task->is_writable = 1;
59734+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59735+ if (unlikely(obj->mode & GR_WRITE))
59736+ task->is_writable = 1;
59737+
59738+ gr_set_proc_res(task);
59739+
59740+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59741+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59742+#endif
59743+ } else {
59744+ return 1;
59745+ }
59746+
59747+ return 0;
59748+}
59749+
59750+int
59751+gr_set_acls(const int type)
59752+{
59753+ struct task_struct *task, *task2;
59754+ struct acl_role_label *role = current->role;
59755+ __u16 acl_role_id = current->acl_role_id;
59756+ const struct cred *cred;
59757+ int ret;
59758+
59759+ rcu_read_lock();
59760+ read_lock(&tasklist_lock);
59761+ read_lock(&grsec_exec_file_lock);
59762+ do_each_thread(task2, task) {
59763+ /* check to see if we're called from the exit handler,
59764+ if so, only replace ACLs that have inherited the admin
59765+ ACL */
59766+
59767+ if (type && (task->role != role ||
59768+ task->acl_role_id != acl_role_id))
59769+ continue;
59770+
59771+ task->acl_role_id = 0;
59772+ task->acl_sp_role = 0;
59773+
59774+ if (task->exec_file) {
59775+ cred = __task_cred(task);
59776+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
59777+
59778+ ret = gr_apply_subject_to_task(task);
59779+ if (ret) {
59780+ read_unlock(&grsec_exec_file_lock);
59781+ read_unlock(&tasklist_lock);
59782+ rcu_read_unlock();
59783+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
59784+ return ret;
59785+ }
59786+ } else {
59787+ // it's a kernel process
59788+ task->role = kernel_role;
59789+ task->acl = kernel_role->root_label;
59790+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
59791+ task->acl->mode &= ~GR_PROCFIND;
59792+#endif
59793+ }
59794+ } while_each_thread(task2, task);
59795+ read_unlock(&grsec_exec_file_lock);
59796+ read_unlock(&tasklist_lock);
59797+ rcu_read_unlock();
59798+
59799+ return 0;
59800+}
59801+
59802+void
59803+gr_learn_resource(const struct task_struct *task,
59804+ const int res, const unsigned long wanted, const int gt)
59805+{
59806+ struct acl_subject_label *acl;
59807+ const struct cred *cred;
59808+
59809+ if (unlikely((gr_status & GR_READY) &&
59810+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
59811+ goto skip_reslog;
59812+
59813+#ifdef CONFIG_GRKERNSEC_RESLOG
59814+ gr_log_resource(task, res, wanted, gt);
59815+#endif
59816+ skip_reslog:
59817+
59818+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
59819+ return;
59820+
59821+ acl = task->acl;
59822+
59823+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
59824+ !(acl->resmask & (1 << (unsigned short) res))))
59825+ return;
59826+
59827+ if (wanted >= acl->res[res].rlim_cur) {
59828+ unsigned long res_add;
59829+
59830+ res_add = wanted;
59831+ switch (res) {
59832+ case RLIMIT_CPU:
59833+ res_add += GR_RLIM_CPU_BUMP;
59834+ break;
59835+ case RLIMIT_FSIZE:
59836+ res_add += GR_RLIM_FSIZE_BUMP;
59837+ break;
59838+ case RLIMIT_DATA:
59839+ res_add += GR_RLIM_DATA_BUMP;
59840+ break;
59841+ case RLIMIT_STACK:
59842+ res_add += GR_RLIM_STACK_BUMP;
59843+ break;
59844+ case RLIMIT_CORE:
59845+ res_add += GR_RLIM_CORE_BUMP;
59846+ break;
59847+ case RLIMIT_RSS:
59848+ res_add += GR_RLIM_RSS_BUMP;
59849+ break;
59850+ case RLIMIT_NPROC:
59851+ res_add += GR_RLIM_NPROC_BUMP;
59852+ break;
59853+ case RLIMIT_NOFILE:
59854+ res_add += GR_RLIM_NOFILE_BUMP;
59855+ break;
59856+ case RLIMIT_MEMLOCK:
59857+ res_add += GR_RLIM_MEMLOCK_BUMP;
59858+ break;
59859+ case RLIMIT_AS:
59860+ res_add += GR_RLIM_AS_BUMP;
59861+ break;
59862+ case RLIMIT_LOCKS:
59863+ res_add += GR_RLIM_LOCKS_BUMP;
59864+ break;
59865+ case RLIMIT_SIGPENDING:
59866+ res_add += GR_RLIM_SIGPENDING_BUMP;
59867+ break;
59868+ case RLIMIT_MSGQUEUE:
59869+ res_add += GR_RLIM_MSGQUEUE_BUMP;
59870+ break;
59871+ case RLIMIT_NICE:
59872+ res_add += GR_RLIM_NICE_BUMP;
59873+ break;
59874+ case RLIMIT_RTPRIO:
59875+ res_add += GR_RLIM_RTPRIO_BUMP;
59876+ break;
59877+ case RLIMIT_RTTIME:
59878+ res_add += GR_RLIM_RTTIME_BUMP;
59879+ break;
59880+ }
59881+
59882+ acl->res[res].rlim_cur = res_add;
59883+
59884+ if (wanted > acl->res[res].rlim_max)
59885+ acl->res[res].rlim_max = res_add;
59886+
59887+ /* only log the subject filename, since resource logging is supported for
59888+ single-subject learning only */
59889+ rcu_read_lock();
59890+ cred = __task_cred(task);
59891+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
59892+ task->role->roletype, cred->uid, cred->gid, acl->filename,
59893+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
59894+ "", (unsigned long) res, &task->signal->saved_ip);
59895+ rcu_read_unlock();
59896+ }
59897+
59898+ return;
59899+}
59900+
59901+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
59902+void
59903+pax_set_initial_flags(struct linux_binprm *bprm)
59904+{
59905+ struct task_struct *task = current;
59906+ struct acl_subject_label *proc;
59907+ unsigned long flags;
59908+
59909+ if (unlikely(!(gr_status & GR_READY)))
59910+ return;
59911+
59912+ flags = pax_get_flags(task);
59913+
59914+ proc = task->acl;
59915+
59916+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
59917+ flags &= ~MF_PAX_PAGEEXEC;
59918+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
59919+ flags &= ~MF_PAX_SEGMEXEC;
59920+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
59921+ flags &= ~MF_PAX_RANDMMAP;
59922+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
59923+ flags &= ~MF_PAX_EMUTRAMP;
59924+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
59925+ flags &= ~MF_PAX_MPROTECT;
59926+
59927+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
59928+ flags |= MF_PAX_PAGEEXEC;
59929+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
59930+ flags |= MF_PAX_SEGMEXEC;
59931+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
59932+ flags |= MF_PAX_RANDMMAP;
59933+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
59934+ flags |= MF_PAX_EMUTRAMP;
59935+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
59936+ flags |= MF_PAX_MPROTECT;
59937+
59938+ pax_set_flags(task, flags);
59939+
59940+ return;
59941+}
59942+#endif
59943+
59944+#ifdef CONFIG_SYSCTL
59945+/* Eric Biederman likes breaking userland ABI and every inode-based security
59946+ system to save 35kb of memory */
59947+
59948+/* we modify the passed in filename, but adjust it back before returning */
59949+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
59950+{
59951+ struct name_entry *nmatch;
59952+ char *p, *lastp = NULL;
59953+ struct acl_object_label *obj = NULL, *tmp;
59954+ struct acl_subject_label *tmpsubj;
59955+ char c = '\0';
59956+
59957+ read_lock(&gr_inode_lock);
59958+
59959+ p = name + len - 1;
59960+ do {
59961+ nmatch = lookup_name_entry(name);
59962+ if (lastp != NULL)
59963+ *lastp = c;
59964+
59965+ if (nmatch == NULL)
59966+ goto next_component;
59967+ tmpsubj = current->acl;
59968+ do {
59969+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
59970+ if (obj != NULL) {
59971+ tmp = obj->globbed;
59972+ while (tmp) {
59973+ if (!glob_match(tmp->filename, name)) {
59974+ obj = tmp;
59975+ goto found_obj;
59976+ }
59977+ tmp = tmp->next;
59978+ }
59979+ goto found_obj;
59980+ }
59981+ } while ((tmpsubj = tmpsubj->parent_subject));
59982+next_component:
59983+ /* end case */
59984+ if (p == name)
59985+ break;
59986+
59987+ while (*p != '/')
59988+ p--;
59989+ if (p == name)
59990+ lastp = p + 1;
59991+ else {
59992+ lastp = p;
59993+ p--;
59994+ }
59995+ c = *lastp;
59996+ *lastp = '\0';
59997+ } while (1);
59998+found_obj:
59999+ read_unlock(&gr_inode_lock);
60000+ /* obj returned will always be non-null */
60001+ return obj;
60002+}
60003+
60004+/* returns 0 when allowing, non-zero on error
60005+ op of 0 is used for readdir, so we don't log the names of hidden files
60006+*/
60007+__u32
60008+gr_handle_sysctl(const struct ctl_table *table, const int op)
60009+{
60010+ ctl_table *tmp;
60011+ const char *proc_sys = "/proc/sys";
60012+ char *path;
60013+ struct acl_object_label *obj;
60014+ unsigned short len = 0, pos = 0, depth = 0, i;
60015+ __u32 err = 0;
60016+ __u32 mode = 0;
60017+
60018+ if (unlikely(!(gr_status & GR_READY)))
60019+ return 0;
60020+
60021+ /* for now, ignore operations on non-sysctl entries if it's not a
60022+ readdir*/
60023+ if (table->child != NULL && op != 0)
60024+ return 0;
60025+
60026+ mode |= GR_FIND;
60027+ /* it's only a read if it's an entry, read on dirs is for readdir */
60028+ if (op & MAY_READ)
60029+ mode |= GR_READ;
60030+ if (op & MAY_WRITE)
60031+ mode |= GR_WRITE;
60032+
60033+ preempt_disable();
60034+
60035+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
60036+
60037+ /* it's only a read/write if it's an actual entry, not a dir
60038+ (which are opened for readdir)
60039+ */
60040+
60041+ /* convert the requested sysctl entry into a pathname */
60042+
60043+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60044+ len += strlen(tmp->procname);
60045+ len++;
60046+ depth++;
60047+ }
60048+
60049+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
60050+ /* deny */
60051+ goto out;
60052+ }
60053+
60054+ memset(path, 0, PAGE_SIZE);
60055+
60056+ memcpy(path, proc_sys, strlen(proc_sys));
60057+
60058+ pos += strlen(proc_sys);
60059+
60060+ for (; depth > 0; depth--) {
60061+ path[pos] = '/';
60062+ pos++;
60063+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60064+ if (depth == i) {
60065+ memcpy(path + pos, tmp->procname,
60066+ strlen(tmp->procname));
60067+ pos += strlen(tmp->procname);
60068+ }
60069+ i++;
60070+ }
60071+ }
60072+
60073+ obj = gr_lookup_by_name(path, pos);
60074+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
60075+
60076+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
60077+ ((err & mode) != mode))) {
60078+ __u32 new_mode = mode;
60079+
60080+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60081+
60082+ err = 0;
60083+ gr_log_learn_sysctl(path, new_mode);
60084+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
60085+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
60086+ err = -ENOENT;
60087+ } else if (!(err & GR_FIND)) {
60088+ err = -ENOENT;
60089+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
60090+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
60091+ path, (mode & GR_READ) ? " reading" : "",
60092+ (mode & GR_WRITE) ? " writing" : "");
60093+ err = -EACCES;
60094+ } else if ((err & mode) != mode) {
60095+ err = -EACCES;
60096+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
60097+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
60098+ path, (mode & GR_READ) ? " reading" : "",
60099+ (mode & GR_WRITE) ? " writing" : "");
60100+ err = 0;
60101+ } else
60102+ err = 0;
60103+
60104+ out:
60105+ preempt_enable();
60106+
60107+ return err;
60108+}
60109+#endif
60110+
60111+int
60112+gr_handle_proc_ptrace(struct task_struct *task)
60113+{
60114+ struct file *filp;
60115+ struct task_struct *tmp = task;
60116+ struct task_struct *curtemp = current;
60117+ __u32 retmode;
60118+
60119+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60120+ if (unlikely(!(gr_status & GR_READY)))
60121+ return 0;
60122+#endif
60123+
60124+ read_lock(&tasklist_lock);
60125+ read_lock(&grsec_exec_file_lock);
60126+ filp = task->exec_file;
60127+
60128+ while (tmp->pid > 0) {
60129+ if (tmp == curtemp)
60130+ break;
60131+ tmp = tmp->real_parent;
60132+ }
60133+
60134+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60135+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
60136+ read_unlock(&grsec_exec_file_lock);
60137+ read_unlock(&tasklist_lock);
60138+ return 1;
60139+ }
60140+
60141+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60142+ if (!(gr_status & GR_READY)) {
60143+ read_unlock(&grsec_exec_file_lock);
60144+ read_unlock(&tasklist_lock);
60145+ return 0;
60146+ }
60147+#endif
60148+
60149+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
60150+ read_unlock(&grsec_exec_file_lock);
60151+ read_unlock(&tasklist_lock);
60152+
60153+ if (retmode & GR_NOPTRACE)
60154+ return 1;
60155+
60156+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
60157+ && (current->acl != task->acl || (current->acl != current->role->root_label
60158+ && current->pid != task->pid)))
60159+ return 1;
60160+
60161+ return 0;
60162+}
60163+
60164+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
60165+{
60166+ if (unlikely(!(gr_status & GR_READY)))
60167+ return;
60168+
60169+ if (!(current->role->roletype & GR_ROLE_GOD))
60170+ return;
60171+
60172+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
60173+ p->role->rolename, gr_task_roletype_to_char(p),
60174+ p->acl->filename);
60175+}
60176+
60177+int
60178+gr_handle_ptrace(struct task_struct *task, const long request)
60179+{
60180+ struct task_struct *tmp = task;
60181+ struct task_struct *curtemp = current;
60182+ __u32 retmode;
60183+
60184+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60185+ if (unlikely(!(gr_status & GR_READY)))
60186+ return 0;
60187+#endif
60188+
60189+ read_lock(&tasklist_lock);
60190+ while (tmp->pid > 0) {
60191+ if (tmp == curtemp)
60192+ break;
60193+ tmp = tmp->real_parent;
60194+ }
60195+
60196+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60197+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
60198+ read_unlock(&tasklist_lock);
60199+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60200+ return 1;
60201+ }
60202+ read_unlock(&tasklist_lock);
60203+
60204+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60205+ if (!(gr_status & GR_READY))
60206+ return 0;
60207+#endif
60208+
60209+ read_lock(&grsec_exec_file_lock);
60210+ if (unlikely(!task->exec_file)) {
60211+ read_unlock(&grsec_exec_file_lock);
60212+ return 0;
60213+ }
60214+
60215+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
60216+ read_unlock(&grsec_exec_file_lock);
60217+
60218+ if (retmode & GR_NOPTRACE) {
60219+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60220+ return 1;
60221+ }
60222+
60223+ if (retmode & GR_PTRACERD) {
60224+ switch (request) {
60225+ case PTRACE_POKETEXT:
60226+ case PTRACE_POKEDATA:
60227+ case PTRACE_POKEUSR:
60228+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
60229+ case PTRACE_SETREGS:
60230+ case PTRACE_SETFPREGS:
60231+#endif
60232+#ifdef CONFIG_X86
60233+ case PTRACE_SETFPXREGS:
60234+#endif
60235+#ifdef CONFIG_ALTIVEC
60236+ case PTRACE_SETVRREGS:
60237+#endif
60238+ return 1;
60239+ default:
60240+ return 0;
60241+ }
60242+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
60243+ !(current->role->roletype & GR_ROLE_GOD) &&
60244+ (current->acl != task->acl)) {
60245+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60246+ return 1;
60247+ }
60248+
60249+ return 0;
60250+}
60251+
60252+static int is_writable_mmap(const struct file *filp)
60253+{
60254+ struct task_struct *task = current;
60255+ struct acl_object_label *obj, *obj2;
60256+
60257+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
60258+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
60259+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60260+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
60261+ task->role->root_label);
60262+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
60263+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
60264+ return 1;
60265+ }
60266+ }
60267+ return 0;
60268+}
60269+
60270+int
60271+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
60272+{
60273+ __u32 mode;
60274+
60275+ if (unlikely(!file || !(prot & PROT_EXEC)))
60276+ return 1;
60277+
60278+ if (is_writable_mmap(file))
60279+ return 0;
60280+
60281+ mode =
60282+ gr_search_file(file->f_path.dentry,
60283+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60284+ file->f_path.mnt);
60285+
60286+ if (!gr_tpe_allow(file))
60287+ return 0;
60288+
60289+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60290+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60291+ return 0;
60292+ } else if (unlikely(!(mode & GR_EXEC))) {
60293+ return 0;
60294+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60295+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60296+ return 1;
60297+ }
60298+
60299+ return 1;
60300+}
60301+
60302+int
60303+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
60304+{
60305+ __u32 mode;
60306+
60307+ if (unlikely(!file || !(prot & PROT_EXEC)))
60308+ return 1;
60309+
60310+ if (is_writable_mmap(file))
60311+ return 0;
60312+
60313+ mode =
60314+ gr_search_file(file->f_path.dentry,
60315+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60316+ file->f_path.mnt);
60317+
60318+ if (!gr_tpe_allow(file))
60319+ return 0;
60320+
60321+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60322+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60323+ return 0;
60324+ } else if (unlikely(!(mode & GR_EXEC))) {
60325+ return 0;
60326+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60327+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60328+ return 1;
60329+ }
60330+
60331+ return 1;
60332+}
60333+
60334+void
60335+gr_acl_handle_psacct(struct task_struct *task, const long code)
60336+{
60337+ unsigned long runtime;
60338+ unsigned long cputime;
60339+ unsigned int wday, cday;
60340+ __u8 whr, chr;
60341+ __u8 wmin, cmin;
60342+ __u8 wsec, csec;
60343+ struct timespec timeval;
60344+
60345+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
60346+ !(task->acl->mode & GR_PROCACCT)))
60347+ return;
60348+
60349+ do_posix_clock_monotonic_gettime(&timeval);
60350+ runtime = timeval.tv_sec - task->start_time.tv_sec;
60351+ wday = runtime / (3600 * 24);
60352+ runtime -= wday * (3600 * 24);
60353+ whr = runtime / 3600;
60354+ runtime -= whr * 3600;
60355+ wmin = runtime / 60;
60356+ runtime -= wmin * 60;
60357+ wsec = runtime;
60358+
60359+ cputime = (task->utime + task->stime) / HZ;
60360+ cday = cputime / (3600 * 24);
60361+ cputime -= cday * (3600 * 24);
60362+ chr = cputime / 3600;
60363+ cputime -= chr * 3600;
60364+ cmin = cputime / 60;
60365+ cputime -= cmin * 60;
60366+ csec = cputime;
60367+
60368+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
60369+
60370+ return;
60371+}
60372+
60373+void gr_set_kernel_label(struct task_struct *task)
60374+{
60375+ if (gr_status & GR_READY) {
60376+ task->role = kernel_role;
60377+ task->acl = kernel_role->root_label;
60378+ }
60379+ return;
60380+}
60381+
60382+#ifdef CONFIG_TASKSTATS
60383+int gr_is_taskstats_denied(int pid)
60384+{
60385+ struct task_struct *task;
60386+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60387+ const struct cred *cred;
60388+#endif
60389+ int ret = 0;
60390+
60391+ /* restrict taskstats viewing to un-chrooted root users
60392+ who have the 'view' subject flag if the RBAC system is enabled
60393+ */
60394+
60395+ rcu_read_lock();
60396+ read_lock(&tasklist_lock);
60397+ task = find_task_by_vpid(pid);
60398+ if (task) {
60399+#ifdef CONFIG_GRKERNSEC_CHROOT
60400+ if (proc_is_chrooted(task))
60401+ ret = -EACCES;
60402+#endif
60403+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60404+ cred = __task_cred(task);
60405+#ifdef CONFIG_GRKERNSEC_PROC_USER
60406+ if (cred->uid != 0)
60407+ ret = -EACCES;
60408+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60409+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
60410+ ret = -EACCES;
60411+#endif
60412+#endif
60413+ if (gr_status & GR_READY) {
60414+ if (!(task->acl->mode & GR_VIEW))
60415+ ret = -EACCES;
60416+ }
60417+ } else
60418+ ret = -ENOENT;
60419+
60420+ read_unlock(&tasklist_lock);
60421+ rcu_read_unlock();
60422+
60423+ return ret;
60424+}
60425+#endif
60426+
60427+/* AUXV entries are filled via a descendant of search_binary_handler
60428+ after we've already applied the subject for the target
60429+*/
60430+int gr_acl_enable_at_secure(void)
60431+{
60432+ if (unlikely(!(gr_status & GR_READY)))
60433+ return 0;
60434+
60435+ if (current->acl->mode & GR_ATSECURE)
60436+ return 1;
60437+
60438+ return 0;
60439+}
60440+
60441+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
60442+{
60443+ struct task_struct *task = current;
60444+ struct dentry *dentry = file->f_path.dentry;
60445+ struct vfsmount *mnt = file->f_path.mnt;
60446+ struct acl_object_label *obj, *tmp;
60447+ struct acl_subject_label *subj;
60448+ unsigned int bufsize;
60449+ int is_not_root;
60450+ char *path;
60451+ dev_t dev = __get_dev(dentry);
60452+
60453+ if (unlikely(!(gr_status & GR_READY)))
60454+ return 1;
60455+
60456+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60457+ return 1;
60458+
60459+ /* ignore Eric Biederman */
60460+ if (IS_PRIVATE(dentry->d_inode))
60461+ return 1;
60462+
60463+ subj = task->acl;
60464+ do {
60465+ obj = lookup_acl_obj_label(ino, dev, subj);
60466+ if (obj != NULL)
60467+ return (obj->mode & GR_FIND) ? 1 : 0;
60468+ } while ((subj = subj->parent_subject));
60469+
60470+ /* this is purely an optimization since we're looking for an object
60471+ for the directory we're doing a readdir on
60472+ if it's possible for any globbed object to match the entry we're
60473+ filling into the directory, then the object we find here will be
60474+ an anchor point with attached globbed objects
60475+ */
60476+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
60477+ if (obj->globbed == NULL)
60478+ return (obj->mode & GR_FIND) ? 1 : 0;
60479+
60480+ is_not_root = ((obj->filename[0] == '/') &&
60481+ (obj->filename[1] == '\0')) ? 0 : 1;
60482+ bufsize = PAGE_SIZE - namelen - is_not_root;
60483+
60484+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
60485+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
60486+ return 1;
60487+
60488+ preempt_disable();
60489+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
60490+ bufsize);
60491+
60492+ bufsize = strlen(path);
60493+
60494+ /* if base is "/", don't append an additional slash */
60495+ if (is_not_root)
60496+ *(path + bufsize) = '/';
60497+ memcpy(path + bufsize + is_not_root, name, namelen);
60498+ *(path + bufsize + namelen + is_not_root) = '\0';
60499+
60500+ tmp = obj->globbed;
60501+ while (tmp) {
60502+ if (!glob_match(tmp->filename, path)) {
60503+ preempt_enable();
60504+ return (tmp->mode & GR_FIND) ? 1 : 0;
60505+ }
60506+ tmp = tmp->next;
60507+ }
60508+ preempt_enable();
60509+ return (obj->mode & GR_FIND) ? 1 : 0;
60510+}
60511+
60512+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
60513+EXPORT_SYMBOL(gr_acl_is_enabled);
60514+#endif
60515+EXPORT_SYMBOL(gr_learn_resource);
60516+EXPORT_SYMBOL(gr_set_kernel_label);
60517+#ifdef CONFIG_SECURITY
60518+EXPORT_SYMBOL(gr_check_user_change);
60519+EXPORT_SYMBOL(gr_check_group_change);
60520+#endif
60521+
60522diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
60523new file mode 100644
60524index 0000000..34fefda
60525--- /dev/null
60526+++ b/grsecurity/gracl_alloc.c
60527@@ -0,0 +1,105 @@
60528+#include <linux/kernel.h>
60529+#include <linux/mm.h>
60530+#include <linux/slab.h>
60531+#include <linux/vmalloc.h>
60532+#include <linux/gracl.h>
60533+#include <linux/grsecurity.h>
60534+
60535+static unsigned long alloc_stack_next = 1;
60536+static unsigned long alloc_stack_size = 1;
60537+static void **alloc_stack;
60538+
60539+static __inline__ int
60540+alloc_pop(void)
60541+{
60542+ if (alloc_stack_next == 1)
60543+ return 0;
60544+
60545+ kfree(alloc_stack[alloc_stack_next - 2]);
60546+
60547+ alloc_stack_next--;
60548+
60549+ return 1;
60550+}
60551+
60552+static __inline__ int
60553+alloc_push(void *buf)
60554+{
60555+ if (alloc_stack_next >= alloc_stack_size)
60556+ return 1;
60557+
60558+ alloc_stack[alloc_stack_next - 1] = buf;
60559+
60560+ alloc_stack_next++;
60561+
60562+ return 0;
60563+}
60564+
60565+void *
60566+acl_alloc(unsigned long len)
60567+{
60568+ void *ret = NULL;
60569+
60570+ if (!len || len > PAGE_SIZE)
60571+ goto out;
60572+
60573+ ret = kmalloc(len, GFP_KERNEL);
60574+
60575+ if (ret) {
60576+ if (alloc_push(ret)) {
60577+ kfree(ret);
60578+ ret = NULL;
60579+ }
60580+ }
60581+
60582+out:
60583+ return ret;
60584+}
60585+
60586+void *
60587+acl_alloc_num(unsigned long num, unsigned long len)
60588+{
60589+ if (!len || (num > (PAGE_SIZE / len)))
60590+ return NULL;
60591+
60592+ return acl_alloc(num * len);
60593+}
60594+
60595+void
60596+acl_free_all(void)
60597+{
60598+ if (gr_acl_is_enabled() || !alloc_stack)
60599+ return;
60600+
60601+ while (alloc_pop()) ;
60602+
60603+ if (alloc_stack) {
60604+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
60605+ kfree(alloc_stack);
60606+ else
60607+ vfree(alloc_stack);
60608+ }
60609+
60610+ alloc_stack = NULL;
60611+ alloc_stack_size = 1;
60612+ alloc_stack_next = 1;
60613+
60614+ return;
60615+}
60616+
60617+int
60618+acl_alloc_stack_init(unsigned long size)
60619+{
60620+ if ((size * sizeof (void *)) <= PAGE_SIZE)
60621+ alloc_stack =
60622+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
60623+ else
60624+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
60625+
60626+ alloc_stack_size = size;
60627+
60628+ if (!alloc_stack)
60629+ return 0;
60630+ else
60631+ return 1;
60632+}
60633diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
60634new file mode 100644
60635index 0000000..955ddfb
60636--- /dev/null
60637+++ b/grsecurity/gracl_cap.c
60638@@ -0,0 +1,101 @@
60639+#include <linux/kernel.h>
60640+#include <linux/module.h>
60641+#include <linux/sched.h>
60642+#include <linux/gracl.h>
60643+#include <linux/grsecurity.h>
60644+#include <linux/grinternal.h>
60645+
60646+extern const char *captab_log[];
60647+extern int captab_log_entries;
60648+
60649+int
60650+gr_acl_is_capable(const int cap)
60651+{
60652+ struct task_struct *task = current;
60653+ const struct cred *cred = current_cred();
60654+ struct acl_subject_label *curracl;
60655+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60656+ kernel_cap_t cap_audit = __cap_empty_set;
60657+
60658+ if (!gr_acl_is_enabled())
60659+ return 1;
60660+
60661+ curracl = task->acl;
60662+
60663+ cap_drop = curracl->cap_lower;
60664+ cap_mask = curracl->cap_mask;
60665+ cap_audit = curracl->cap_invert_audit;
60666+
60667+ while ((curracl = curracl->parent_subject)) {
60668+ /* if the cap isn't specified in the current computed mask but is specified in the
60669+ current level subject, and is lowered in the current level subject, then add
60670+ it to the set of dropped capabilities
60671+ otherwise, add the current level subject's mask to the current computed mask
60672+ */
60673+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60674+ cap_raise(cap_mask, cap);
60675+ if (cap_raised(curracl->cap_lower, cap))
60676+ cap_raise(cap_drop, cap);
60677+ if (cap_raised(curracl->cap_invert_audit, cap))
60678+ cap_raise(cap_audit, cap);
60679+ }
60680+ }
60681+
60682+ if (!cap_raised(cap_drop, cap)) {
60683+ if (cap_raised(cap_audit, cap))
60684+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
60685+ return 1;
60686+ }
60687+
60688+ curracl = task->acl;
60689+
60690+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
60691+ && cap_raised(cred->cap_effective, cap)) {
60692+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60693+ task->role->roletype, cred->uid,
60694+ cred->gid, task->exec_file ?
60695+ gr_to_filename(task->exec_file->f_path.dentry,
60696+ task->exec_file->f_path.mnt) : curracl->filename,
60697+ curracl->filename, 0UL,
60698+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
60699+ return 1;
60700+ }
60701+
60702+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
60703+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
60704+ return 0;
60705+}
60706+
60707+int
60708+gr_acl_is_capable_nolog(const int cap)
60709+{
60710+ struct acl_subject_label *curracl;
60711+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60712+
60713+ if (!gr_acl_is_enabled())
60714+ return 1;
60715+
60716+ curracl = current->acl;
60717+
60718+ cap_drop = curracl->cap_lower;
60719+ cap_mask = curracl->cap_mask;
60720+
60721+ while ((curracl = curracl->parent_subject)) {
60722+ /* if the cap isn't specified in the current computed mask but is specified in the
60723+ current level subject, and is lowered in the current level subject, then add
60724+ it to the set of dropped capabilities
60725+ otherwise, add the current level subject's mask to the current computed mask
60726+ */
60727+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60728+ cap_raise(cap_mask, cap);
60729+ if (cap_raised(curracl->cap_lower, cap))
60730+ cap_raise(cap_drop, cap);
60731+ }
60732+ }
60733+
60734+ if (!cap_raised(cap_drop, cap))
60735+ return 1;
60736+
60737+ return 0;
60738+}
60739+
60740diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
60741new file mode 100644
60742index 0000000..523e7e8
60743--- /dev/null
60744+++ b/grsecurity/gracl_fs.c
60745@@ -0,0 +1,435 @@
60746+#include <linux/kernel.h>
60747+#include <linux/sched.h>
60748+#include <linux/types.h>
60749+#include <linux/fs.h>
60750+#include <linux/file.h>
60751+#include <linux/stat.h>
60752+#include <linux/grsecurity.h>
60753+#include <linux/grinternal.h>
60754+#include <linux/gracl.h>
60755+
60756+umode_t
60757+gr_acl_umask(void)
60758+{
60759+ if (unlikely(!gr_acl_is_enabled()))
60760+ return 0;
60761+
60762+ return current->role->umask;
60763+}
60764+
60765+__u32
60766+gr_acl_handle_hidden_file(const struct dentry * dentry,
60767+ const struct vfsmount * mnt)
60768+{
60769+ __u32 mode;
60770+
60771+ if (unlikely(!dentry->d_inode))
60772+ return GR_FIND;
60773+
60774+ mode =
60775+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
60776+
60777+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
60778+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60779+ return mode;
60780+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
60781+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60782+ return 0;
60783+ } else if (unlikely(!(mode & GR_FIND)))
60784+ return 0;
60785+
60786+ return GR_FIND;
60787+}
60788+
60789+__u32
60790+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
60791+ int acc_mode)
60792+{
60793+ __u32 reqmode = GR_FIND;
60794+ __u32 mode;
60795+
60796+ if (unlikely(!dentry->d_inode))
60797+ return reqmode;
60798+
60799+ if (acc_mode & MAY_APPEND)
60800+ reqmode |= GR_APPEND;
60801+ else if (acc_mode & MAY_WRITE)
60802+ reqmode |= GR_WRITE;
60803+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
60804+ reqmode |= GR_READ;
60805+
60806+ mode =
60807+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60808+ mnt);
60809+
60810+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60811+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60812+ reqmode & GR_READ ? " reading" : "",
60813+ reqmode & GR_WRITE ? " writing" : reqmode &
60814+ GR_APPEND ? " appending" : "");
60815+ return reqmode;
60816+ } else
60817+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60818+ {
60819+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60820+ reqmode & GR_READ ? " reading" : "",
60821+ reqmode & GR_WRITE ? " writing" : reqmode &
60822+ GR_APPEND ? " appending" : "");
60823+ return 0;
60824+ } else if (unlikely((mode & reqmode) != reqmode))
60825+ return 0;
60826+
60827+ return reqmode;
60828+}
60829+
60830+__u32
60831+gr_acl_handle_creat(const struct dentry * dentry,
60832+ const struct dentry * p_dentry,
60833+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
60834+ const int imode)
60835+{
60836+ __u32 reqmode = GR_WRITE | GR_CREATE;
60837+ __u32 mode;
60838+
60839+ if (acc_mode & MAY_APPEND)
60840+ reqmode |= GR_APPEND;
60841+ // if a directory was required or the directory already exists, then
60842+ // don't count this open as a read
60843+ if ((acc_mode & MAY_READ) &&
60844+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
60845+ reqmode |= GR_READ;
60846+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
60847+ reqmode |= GR_SETID;
60848+
60849+ mode =
60850+ gr_check_create(dentry, p_dentry, p_mnt,
60851+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60852+
60853+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60854+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60855+ reqmode & GR_READ ? " reading" : "",
60856+ reqmode & GR_WRITE ? " writing" : reqmode &
60857+ GR_APPEND ? " appending" : "");
60858+ return reqmode;
60859+ } else
60860+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60861+ {
60862+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60863+ reqmode & GR_READ ? " reading" : "",
60864+ reqmode & GR_WRITE ? " writing" : reqmode &
60865+ GR_APPEND ? " appending" : "");
60866+ return 0;
60867+ } else if (unlikely((mode & reqmode) != reqmode))
60868+ return 0;
60869+
60870+ return reqmode;
60871+}
60872+
60873+__u32
60874+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
60875+ const int fmode)
60876+{
60877+ __u32 mode, reqmode = GR_FIND;
60878+
60879+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
60880+ reqmode |= GR_EXEC;
60881+ if (fmode & S_IWOTH)
60882+ reqmode |= GR_WRITE;
60883+ if (fmode & S_IROTH)
60884+ reqmode |= GR_READ;
60885+
60886+ mode =
60887+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60888+ mnt);
60889+
60890+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60891+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60892+ reqmode & GR_READ ? " reading" : "",
60893+ reqmode & GR_WRITE ? " writing" : "",
60894+ reqmode & GR_EXEC ? " executing" : "");
60895+ return reqmode;
60896+ } else
60897+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60898+ {
60899+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60900+ reqmode & GR_READ ? " reading" : "",
60901+ reqmode & GR_WRITE ? " writing" : "",
60902+ reqmode & GR_EXEC ? " executing" : "");
60903+ return 0;
60904+ } else if (unlikely((mode & reqmode) != reqmode))
60905+ return 0;
60906+
60907+ return reqmode;
60908+}
60909+
60910+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
60911+{
60912+ __u32 mode;
60913+
60914+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
60915+
60916+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60917+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
60918+ return mode;
60919+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60920+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
60921+ return 0;
60922+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
60923+ return 0;
60924+
60925+ return (reqmode);
60926+}
60927+
60928+__u32
60929+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
60930+{
60931+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
60932+}
60933+
60934+__u32
60935+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
60936+{
60937+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
60938+}
60939+
60940+__u32
60941+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
60942+{
60943+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
60944+}
60945+
60946+__u32
60947+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
60948+{
60949+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
60950+}
60951+
60952+__u32
60953+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
60954+ umode_t *modeptr)
60955+{
60956+ mode_t mode;
60957+
60958+ *modeptr &= ~(mode_t)gr_acl_umask();
60959+ mode = *modeptr;
60960+
60961+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
60962+ return 1;
60963+
60964+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
60965+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60966+ GR_CHMOD_ACL_MSG);
60967+ } else {
60968+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
60969+ }
60970+}
60971+
60972+__u32
60973+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
60974+{
60975+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
60976+}
60977+
60978+__u32
60979+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
60980+{
60981+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
60982+}
60983+
60984+__u32
60985+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
60986+{
60987+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
60988+}
60989+
60990+__u32
60991+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
60992+{
60993+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
60994+ GR_UNIXCONNECT_ACL_MSG);
60995+}
60996+
60997+/* hardlinks require at minimum create and link permission,
60998+ any additional privilege required is based on the
60999+ privilege of the file being linked to
61000+*/
61001+__u32
61002+gr_acl_handle_link(const struct dentry * new_dentry,
61003+ const struct dentry * parent_dentry,
61004+ const struct vfsmount * parent_mnt,
61005+ const struct dentry * old_dentry,
61006+ const struct vfsmount * old_mnt, const char *to)
61007+{
61008+ __u32 mode;
61009+ __u32 needmode = GR_CREATE | GR_LINK;
61010+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
61011+
61012+ mode =
61013+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
61014+ old_mnt);
61015+
61016+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
61017+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61018+ return mode;
61019+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61020+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61021+ return 0;
61022+ } else if (unlikely((mode & needmode) != needmode))
61023+ return 0;
61024+
61025+ return 1;
61026+}
61027+
61028+__u32
61029+gr_acl_handle_symlink(const struct dentry * new_dentry,
61030+ const struct dentry * parent_dentry,
61031+ const struct vfsmount * parent_mnt, const char *from)
61032+{
61033+ __u32 needmode = GR_WRITE | GR_CREATE;
61034+ __u32 mode;
61035+
61036+ mode =
61037+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
61038+ GR_CREATE | GR_AUDIT_CREATE |
61039+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
61040+
61041+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
61042+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61043+ return mode;
61044+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61045+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61046+ return 0;
61047+ } else if (unlikely((mode & needmode) != needmode))
61048+ return 0;
61049+
61050+ return (GR_WRITE | GR_CREATE);
61051+}
61052+
61053+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
61054+{
61055+ __u32 mode;
61056+
61057+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61058+
61059+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61060+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
61061+ return mode;
61062+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61063+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
61064+ return 0;
61065+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
61066+ return 0;
61067+
61068+ return (reqmode);
61069+}
61070+
61071+__u32
61072+gr_acl_handle_mknod(const struct dentry * new_dentry,
61073+ const struct dentry * parent_dentry,
61074+ const struct vfsmount * parent_mnt,
61075+ const int mode)
61076+{
61077+ __u32 reqmode = GR_WRITE | GR_CREATE;
61078+ if (unlikely(mode & (S_ISUID | S_ISGID)))
61079+ reqmode |= GR_SETID;
61080+
61081+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61082+ reqmode, GR_MKNOD_ACL_MSG);
61083+}
61084+
61085+__u32
61086+gr_acl_handle_mkdir(const struct dentry *new_dentry,
61087+ const struct dentry *parent_dentry,
61088+ const struct vfsmount *parent_mnt)
61089+{
61090+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61091+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
61092+}
61093+
61094+#define RENAME_CHECK_SUCCESS(old, new) \
61095+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
61096+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
61097+
61098+int
61099+gr_acl_handle_rename(struct dentry *new_dentry,
61100+ struct dentry *parent_dentry,
61101+ const struct vfsmount *parent_mnt,
61102+ struct dentry *old_dentry,
61103+ struct inode *old_parent_inode,
61104+ struct vfsmount *old_mnt, const char *newname)
61105+{
61106+ __u32 comp1, comp2;
61107+ int error = 0;
61108+
61109+ if (unlikely(!gr_acl_is_enabled()))
61110+ return 0;
61111+
61112+ if (!new_dentry->d_inode) {
61113+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
61114+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
61115+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
61116+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
61117+ GR_DELETE | GR_AUDIT_DELETE |
61118+ GR_AUDIT_READ | GR_AUDIT_WRITE |
61119+ GR_SUPPRESS, old_mnt);
61120+ } else {
61121+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
61122+ GR_CREATE | GR_DELETE |
61123+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
61124+ GR_AUDIT_READ | GR_AUDIT_WRITE |
61125+ GR_SUPPRESS, parent_mnt);
61126+ comp2 =
61127+ gr_search_file(old_dentry,
61128+ GR_READ | GR_WRITE | GR_AUDIT_READ |
61129+ GR_DELETE | GR_AUDIT_DELETE |
61130+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
61131+ }
61132+
61133+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
61134+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
61135+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61136+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
61137+ && !(comp2 & GR_SUPPRESS)) {
61138+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61139+ error = -EACCES;
61140+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
61141+ error = -EACCES;
61142+
61143+ return error;
61144+}
61145+
61146+void
61147+gr_acl_handle_exit(void)
61148+{
61149+ u16 id;
61150+ char *rolename;
61151+ struct file *exec_file;
61152+
61153+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
61154+ !(current->role->roletype & GR_ROLE_PERSIST))) {
61155+ id = current->acl_role_id;
61156+ rolename = current->role->rolename;
61157+ gr_set_acls(1);
61158+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
61159+ }
61160+
61161+ write_lock(&grsec_exec_file_lock);
61162+ exec_file = current->exec_file;
61163+ current->exec_file = NULL;
61164+ write_unlock(&grsec_exec_file_lock);
61165+
61166+ if (exec_file)
61167+ fput(exec_file);
61168+}
61169+
61170+int
61171+gr_acl_handle_procpidmem(const struct task_struct *task)
61172+{
61173+ if (unlikely(!gr_acl_is_enabled()))
61174+ return 0;
61175+
61176+ if (task != current && task->acl->mode & GR_PROTPROCFD)
61177+ return -EACCES;
61178+
61179+ return 0;
61180+}
61181diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
61182new file mode 100644
61183index 0000000..cd07b96
61184--- /dev/null
61185+++ b/grsecurity/gracl_ip.c
61186@@ -0,0 +1,382 @@
61187+#include <linux/kernel.h>
61188+#include <asm/uaccess.h>
61189+#include <asm/errno.h>
61190+#include <net/sock.h>
61191+#include <linux/file.h>
61192+#include <linux/fs.h>
61193+#include <linux/net.h>
61194+#include <linux/in.h>
61195+#include <linux/skbuff.h>
61196+#include <linux/ip.h>
61197+#include <linux/udp.h>
61198+#include <linux/smp_lock.h>
61199+#include <linux/types.h>
61200+#include <linux/sched.h>
61201+#include <linux/netdevice.h>
61202+#include <linux/inetdevice.h>
61203+#include <linux/gracl.h>
61204+#include <linux/grsecurity.h>
61205+#include <linux/grinternal.h>
61206+
61207+#define GR_BIND 0x01
61208+#define GR_CONNECT 0x02
61209+#define GR_INVERT 0x04
61210+#define GR_BINDOVERRIDE 0x08
61211+#define GR_CONNECTOVERRIDE 0x10
61212+#define GR_SOCK_FAMILY 0x20
61213+
61214+static const char * gr_protocols[IPPROTO_MAX] = {
61215+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
61216+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
61217+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
61218+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
61219+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
61220+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
61221+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
61222+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
61223+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
61224+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
61225+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
61226+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
61227+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
61228+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
61229+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
61230+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
61231+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
61232+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
61233+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
61234+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
61235+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
61236+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
61237+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
61238+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
61239+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
61240+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
61241+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
61242+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
61243+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
61244+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
61245+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
61246+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
61247+ };
61248+
61249+static const char * gr_socktypes[SOCK_MAX] = {
61250+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
61251+ "unknown:7", "unknown:8", "unknown:9", "packet"
61252+ };
61253+
61254+static const char * gr_sockfamilies[AF_MAX+1] = {
61255+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
61256+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
61257+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
61258+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
61259+ };
61260+
61261+const char *
61262+gr_proto_to_name(unsigned char proto)
61263+{
61264+ return gr_protocols[proto];
61265+}
61266+
61267+const char *
61268+gr_socktype_to_name(unsigned char type)
61269+{
61270+ return gr_socktypes[type];
61271+}
61272+
61273+const char *
61274+gr_sockfamily_to_name(unsigned char family)
61275+{
61276+ return gr_sockfamilies[family];
61277+}
61278+
61279+int
61280+gr_search_socket(const int domain, const int type, const int protocol)
61281+{
61282+ struct acl_subject_label *curr;
61283+ const struct cred *cred = current_cred();
61284+
61285+ if (unlikely(!gr_acl_is_enabled()))
61286+ goto exit;
61287+
61288+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
61289+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
61290+ goto exit; // let the kernel handle it
61291+
61292+ curr = current->acl;
61293+
61294+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
61295+ /* the family is allowed, if this is PF_INET allow it only if
61296+ the extra sock type/protocol checks pass */
61297+ if (domain == PF_INET)
61298+ goto inet_check;
61299+ goto exit;
61300+ } else {
61301+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61302+ __u32 fakeip = 0;
61303+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61304+ current->role->roletype, cred->uid,
61305+ cred->gid, current->exec_file ?
61306+ gr_to_filename(current->exec_file->f_path.dentry,
61307+ current->exec_file->f_path.mnt) :
61308+ curr->filename, curr->filename,
61309+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
61310+ &current->signal->saved_ip);
61311+ goto exit;
61312+ }
61313+ goto exit_fail;
61314+ }
61315+
61316+inet_check:
61317+ /* the rest of this checking is for IPv4 only */
61318+ if (!curr->ips)
61319+ goto exit;
61320+
61321+ if ((curr->ip_type & (1 << type)) &&
61322+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
61323+ goto exit;
61324+
61325+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61326+ /* we don't place acls on raw sockets , and sometimes
61327+ dgram/ip sockets are opened for ioctl and not
61328+ bind/connect, so we'll fake a bind learn log */
61329+ if (type == SOCK_RAW || type == SOCK_PACKET) {
61330+ __u32 fakeip = 0;
61331+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61332+ current->role->roletype, cred->uid,
61333+ cred->gid, current->exec_file ?
61334+ gr_to_filename(current->exec_file->f_path.dentry,
61335+ current->exec_file->f_path.mnt) :
61336+ curr->filename, curr->filename,
61337+ &fakeip, 0, type,
61338+ protocol, GR_CONNECT, &current->signal->saved_ip);
61339+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
61340+ __u32 fakeip = 0;
61341+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61342+ current->role->roletype, cred->uid,
61343+ cred->gid, current->exec_file ?
61344+ gr_to_filename(current->exec_file->f_path.dentry,
61345+ current->exec_file->f_path.mnt) :
61346+ curr->filename, curr->filename,
61347+ &fakeip, 0, type,
61348+ protocol, GR_BIND, &current->signal->saved_ip);
61349+ }
61350+ /* we'll log when they use connect or bind */
61351+ goto exit;
61352+ }
61353+
61354+exit_fail:
61355+ if (domain == PF_INET)
61356+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
61357+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
61358+ else
61359+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
61360+ gr_socktype_to_name(type), protocol);
61361+
61362+ return 0;
61363+exit:
61364+ return 1;
61365+}
61366+
61367+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
61368+{
61369+ if ((ip->mode & mode) &&
61370+ (ip_port >= ip->low) &&
61371+ (ip_port <= ip->high) &&
61372+ ((ntohl(ip_addr) & our_netmask) ==
61373+ (ntohl(our_addr) & our_netmask))
61374+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
61375+ && (ip->type & (1 << type))) {
61376+ if (ip->mode & GR_INVERT)
61377+ return 2; // specifically denied
61378+ else
61379+ return 1; // allowed
61380+ }
61381+
61382+ return 0; // not specifically allowed, may continue parsing
61383+}
61384+
61385+static int
61386+gr_search_connectbind(const int full_mode, struct sock *sk,
61387+ struct sockaddr_in *addr, const int type)
61388+{
61389+ char iface[IFNAMSIZ] = {0};
61390+ struct acl_subject_label *curr;
61391+ struct acl_ip_label *ip;
61392+ struct inet_sock *isk;
61393+ struct net_device *dev;
61394+ struct in_device *idev;
61395+ unsigned long i;
61396+ int ret;
61397+ int mode = full_mode & (GR_BIND | GR_CONNECT);
61398+ __u32 ip_addr = 0;
61399+ __u32 our_addr;
61400+ __u32 our_netmask;
61401+ char *p;
61402+ __u16 ip_port = 0;
61403+ const struct cred *cred = current_cred();
61404+
61405+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
61406+ return 0;
61407+
61408+ curr = current->acl;
61409+ isk = inet_sk(sk);
61410+
61411+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
61412+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
61413+ addr->sin_addr.s_addr = curr->inaddr_any_override;
61414+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
61415+ struct sockaddr_in saddr;
61416+ int err;
61417+
61418+ saddr.sin_family = AF_INET;
61419+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
61420+ saddr.sin_port = isk->sport;
61421+
61422+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61423+ if (err)
61424+ return err;
61425+
61426+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61427+ if (err)
61428+ return err;
61429+ }
61430+
61431+ if (!curr->ips)
61432+ return 0;
61433+
61434+ ip_addr = addr->sin_addr.s_addr;
61435+ ip_port = ntohs(addr->sin_port);
61436+
61437+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61438+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61439+ current->role->roletype, cred->uid,
61440+ cred->gid, current->exec_file ?
61441+ gr_to_filename(current->exec_file->f_path.dentry,
61442+ current->exec_file->f_path.mnt) :
61443+ curr->filename, curr->filename,
61444+ &ip_addr, ip_port, type,
61445+ sk->sk_protocol, mode, &current->signal->saved_ip);
61446+ return 0;
61447+ }
61448+
61449+ for (i = 0; i < curr->ip_num; i++) {
61450+ ip = *(curr->ips + i);
61451+ if (ip->iface != NULL) {
61452+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
61453+ p = strchr(iface, ':');
61454+ if (p != NULL)
61455+ *p = '\0';
61456+ dev = dev_get_by_name(sock_net(sk), iface);
61457+ if (dev == NULL)
61458+ continue;
61459+ idev = in_dev_get(dev);
61460+ if (idev == NULL) {
61461+ dev_put(dev);
61462+ continue;
61463+ }
61464+ rcu_read_lock();
61465+ for_ifa(idev) {
61466+ if (!strcmp(ip->iface, ifa->ifa_label)) {
61467+ our_addr = ifa->ifa_address;
61468+ our_netmask = 0xffffffff;
61469+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61470+ if (ret == 1) {
61471+ rcu_read_unlock();
61472+ in_dev_put(idev);
61473+ dev_put(dev);
61474+ return 0;
61475+ } else if (ret == 2) {
61476+ rcu_read_unlock();
61477+ in_dev_put(idev);
61478+ dev_put(dev);
61479+ goto denied;
61480+ }
61481+ }
61482+ } endfor_ifa(idev);
61483+ rcu_read_unlock();
61484+ in_dev_put(idev);
61485+ dev_put(dev);
61486+ } else {
61487+ our_addr = ip->addr;
61488+ our_netmask = ip->netmask;
61489+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61490+ if (ret == 1)
61491+ return 0;
61492+ else if (ret == 2)
61493+ goto denied;
61494+ }
61495+ }
61496+
61497+denied:
61498+ if (mode == GR_BIND)
61499+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61500+ else if (mode == GR_CONNECT)
61501+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61502+
61503+ return -EACCES;
61504+}
61505+
61506+int
61507+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
61508+{
61509+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
61510+}
61511+
61512+int
61513+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
61514+{
61515+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
61516+}
61517+
61518+int gr_search_listen(struct socket *sock)
61519+{
61520+ struct sock *sk = sock->sk;
61521+ struct sockaddr_in addr;
61522+
61523+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61524+ addr.sin_port = inet_sk(sk)->sport;
61525+
61526+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61527+}
61528+
61529+int gr_search_accept(struct socket *sock)
61530+{
61531+ struct sock *sk = sock->sk;
61532+ struct sockaddr_in addr;
61533+
61534+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61535+ addr.sin_port = inet_sk(sk)->sport;
61536+
61537+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61538+}
61539+
61540+int
61541+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
61542+{
61543+ if (addr)
61544+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
61545+ else {
61546+ struct sockaddr_in sin;
61547+ const struct inet_sock *inet = inet_sk(sk);
61548+
61549+ sin.sin_addr.s_addr = inet->daddr;
61550+ sin.sin_port = inet->dport;
61551+
61552+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61553+ }
61554+}
61555+
61556+int
61557+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
61558+{
61559+ struct sockaddr_in sin;
61560+
61561+ if (unlikely(skb->len < sizeof (struct udphdr)))
61562+ return 0; // skip this packet
61563+
61564+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
61565+ sin.sin_port = udp_hdr(skb)->source;
61566+
61567+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61568+}
61569diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
61570new file mode 100644
61571index 0000000..34bdd46
61572--- /dev/null
61573+++ b/grsecurity/gracl_learn.c
61574@@ -0,0 +1,208 @@
61575+#include <linux/kernel.h>
61576+#include <linux/mm.h>
61577+#include <linux/sched.h>
61578+#include <linux/poll.h>
61579+#include <linux/smp_lock.h>
61580+#include <linux/string.h>
61581+#include <linux/file.h>
61582+#include <linux/types.h>
61583+#include <linux/vmalloc.h>
61584+#include <linux/grinternal.h>
61585+
61586+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
61587+ size_t count, loff_t *ppos);
61588+extern int gr_acl_is_enabled(void);
61589+
61590+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
61591+static int gr_learn_attached;
61592+
61593+/* use a 512k buffer */
61594+#define LEARN_BUFFER_SIZE (512 * 1024)
61595+
61596+static DEFINE_SPINLOCK(gr_learn_lock);
61597+static DEFINE_MUTEX(gr_learn_user_mutex);
61598+
61599+/* we need to maintain two buffers, so that the kernel context of grlearn
61600+ uses a semaphore around the userspace copying, and the other kernel contexts
61601+ use a spinlock when copying into the buffer, since they cannot sleep
61602+*/
61603+static char *learn_buffer;
61604+static char *learn_buffer_user;
61605+static int learn_buffer_len;
61606+static int learn_buffer_user_len;
61607+
61608+static ssize_t
61609+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
61610+{
61611+ DECLARE_WAITQUEUE(wait, current);
61612+ ssize_t retval = 0;
61613+
61614+ add_wait_queue(&learn_wait, &wait);
61615+ set_current_state(TASK_INTERRUPTIBLE);
61616+ do {
61617+ mutex_lock(&gr_learn_user_mutex);
61618+ spin_lock(&gr_learn_lock);
61619+ if (learn_buffer_len)
61620+ break;
61621+ spin_unlock(&gr_learn_lock);
61622+ mutex_unlock(&gr_learn_user_mutex);
61623+ if (file->f_flags & O_NONBLOCK) {
61624+ retval = -EAGAIN;
61625+ goto out;
61626+ }
61627+ if (signal_pending(current)) {
61628+ retval = -ERESTARTSYS;
61629+ goto out;
61630+ }
61631+
61632+ schedule();
61633+ } while (1);
61634+
61635+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
61636+ learn_buffer_user_len = learn_buffer_len;
61637+ retval = learn_buffer_len;
61638+ learn_buffer_len = 0;
61639+
61640+ spin_unlock(&gr_learn_lock);
61641+
61642+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
61643+ retval = -EFAULT;
61644+
61645+ mutex_unlock(&gr_learn_user_mutex);
61646+out:
61647+ set_current_state(TASK_RUNNING);
61648+ remove_wait_queue(&learn_wait, &wait);
61649+ return retval;
61650+}
61651+
61652+static unsigned int
61653+poll_learn(struct file * file, poll_table * wait)
61654+{
61655+ poll_wait(file, &learn_wait, wait);
61656+
61657+ if (learn_buffer_len)
61658+ return (POLLIN | POLLRDNORM);
61659+
61660+ return 0;
61661+}
61662+
61663+void
61664+gr_clear_learn_entries(void)
61665+{
61666+ char *tmp;
61667+
61668+ mutex_lock(&gr_learn_user_mutex);
61669+ spin_lock(&gr_learn_lock);
61670+ tmp = learn_buffer;
61671+ learn_buffer = NULL;
61672+ spin_unlock(&gr_learn_lock);
61673+ if (tmp)
61674+ vfree(tmp);
61675+ if (learn_buffer_user != NULL) {
61676+ vfree(learn_buffer_user);
61677+ learn_buffer_user = NULL;
61678+ }
61679+ learn_buffer_len = 0;
61680+ mutex_unlock(&gr_learn_user_mutex);
61681+
61682+ return;
61683+}
61684+
61685+void
61686+gr_add_learn_entry(const char *fmt, ...)
61687+{
61688+ va_list args;
61689+ unsigned int len;
61690+
61691+ if (!gr_learn_attached)
61692+ return;
61693+
61694+ spin_lock(&gr_learn_lock);
61695+
61696+ /* leave a gap at the end so we know when it's "full" but don't have to
61697+ compute the exact length of the string we're trying to append
61698+ */
61699+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
61700+ spin_unlock(&gr_learn_lock);
61701+ wake_up_interruptible(&learn_wait);
61702+ return;
61703+ }
61704+ if (learn_buffer == NULL) {
61705+ spin_unlock(&gr_learn_lock);
61706+ return;
61707+ }
61708+
61709+ va_start(args, fmt);
61710+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
61711+ va_end(args);
61712+
61713+ learn_buffer_len += len + 1;
61714+
61715+ spin_unlock(&gr_learn_lock);
61716+ wake_up_interruptible(&learn_wait);
61717+
61718+ return;
61719+}
61720+
61721+static int
61722+open_learn(struct inode *inode, struct file *file)
61723+{
61724+ if (file->f_mode & FMODE_READ && gr_learn_attached)
61725+ return -EBUSY;
61726+ if (file->f_mode & FMODE_READ) {
61727+ int retval = 0;
61728+ mutex_lock(&gr_learn_user_mutex);
61729+ if (learn_buffer == NULL)
61730+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
61731+ if (learn_buffer_user == NULL)
61732+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
61733+ if (learn_buffer == NULL) {
61734+ retval = -ENOMEM;
61735+ goto out_error;
61736+ }
61737+ if (learn_buffer_user == NULL) {
61738+ retval = -ENOMEM;
61739+ goto out_error;
61740+ }
61741+ learn_buffer_len = 0;
61742+ learn_buffer_user_len = 0;
61743+ gr_learn_attached = 1;
61744+out_error:
61745+ mutex_unlock(&gr_learn_user_mutex);
61746+ return retval;
61747+ }
61748+ return 0;
61749+}
61750+
61751+static int
61752+close_learn(struct inode *inode, struct file *file)
61753+{
61754+ if (file->f_mode & FMODE_READ) {
61755+ char *tmp = NULL;
61756+ mutex_lock(&gr_learn_user_mutex);
61757+ spin_lock(&gr_learn_lock);
61758+ tmp = learn_buffer;
61759+ learn_buffer = NULL;
61760+ spin_unlock(&gr_learn_lock);
61761+ if (tmp)
61762+ vfree(tmp);
61763+ if (learn_buffer_user != NULL) {
61764+ vfree(learn_buffer_user);
61765+ learn_buffer_user = NULL;
61766+ }
61767+ learn_buffer_len = 0;
61768+ learn_buffer_user_len = 0;
61769+ gr_learn_attached = 0;
61770+ mutex_unlock(&gr_learn_user_mutex);
61771+ }
61772+
61773+ return 0;
61774+}
61775+
61776+const struct file_operations grsec_fops = {
61777+ .read = read_learn,
61778+ .write = write_grsec_handler,
61779+ .open = open_learn,
61780+ .release = close_learn,
61781+ .poll = poll_learn,
61782+};
61783diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
61784new file mode 100644
61785index 0000000..70b2179
61786--- /dev/null
61787+++ b/grsecurity/gracl_res.c
61788@@ -0,0 +1,67 @@
61789+#include <linux/kernel.h>
61790+#include <linux/sched.h>
61791+#include <linux/gracl.h>
61792+#include <linux/grinternal.h>
61793+
61794+static const char *restab_log[] = {
61795+ [RLIMIT_CPU] = "RLIMIT_CPU",
61796+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
61797+ [RLIMIT_DATA] = "RLIMIT_DATA",
61798+ [RLIMIT_STACK] = "RLIMIT_STACK",
61799+ [RLIMIT_CORE] = "RLIMIT_CORE",
61800+ [RLIMIT_RSS] = "RLIMIT_RSS",
61801+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
61802+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
61803+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
61804+ [RLIMIT_AS] = "RLIMIT_AS",
61805+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
61806+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
61807+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
61808+ [RLIMIT_NICE] = "RLIMIT_NICE",
61809+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
61810+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
61811+ [GR_CRASH_RES] = "RLIMIT_CRASH"
61812+};
61813+
61814+void
61815+gr_log_resource(const struct task_struct *task,
61816+ const int res, const unsigned long wanted, const int gt)
61817+{
61818+ const struct cred *cred;
61819+ unsigned long rlim;
61820+
61821+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
61822+ return;
61823+
61824+ // not yet supported resource
61825+ if (unlikely(!restab_log[res]))
61826+ return;
61827+
61828+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
61829+ rlim = task->signal->rlim[res].rlim_max;
61830+ else
61831+ rlim = task->signal->rlim[res].rlim_cur;
61832+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
61833+ return;
61834+
61835+ rcu_read_lock();
61836+ cred = __task_cred(task);
61837+
61838+ if (res == RLIMIT_NPROC &&
61839+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
61840+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
61841+ goto out_rcu_unlock;
61842+ else if (res == RLIMIT_MEMLOCK &&
61843+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
61844+ goto out_rcu_unlock;
61845+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
61846+ goto out_rcu_unlock;
61847+ rcu_read_unlock();
61848+
61849+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
61850+
61851+ return;
61852+out_rcu_unlock:
61853+ rcu_read_unlock();
61854+ return;
61855+}
61856diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
61857new file mode 100644
61858index 0000000..1d1b734
61859--- /dev/null
61860+++ b/grsecurity/gracl_segv.c
61861@@ -0,0 +1,284 @@
61862+#include <linux/kernel.h>
61863+#include <linux/mm.h>
61864+#include <asm/uaccess.h>
61865+#include <asm/errno.h>
61866+#include <asm/mman.h>
61867+#include <net/sock.h>
61868+#include <linux/file.h>
61869+#include <linux/fs.h>
61870+#include <linux/net.h>
61871+#include <linux/in.h>
61872+#include <linux/smp_lock.h>
61873+#include <linux/slab.h>
61874+#include <linux/types.h>
61875+#include <linux/sched.h>
61876+#include <linux/timer.h>
61877+#include <linux/gracl.h>
61878+#include <linux/grsecurity.h>
61879+#include <linux/grinternal.h>
61880+
61881+static struct crash_uid *uid_set;
61882+static unsigned short uid_used;
61883+static DEFINE_SPINLOCK(gr_uid_lock);
61884+extern rwlock_t gr_inode_lock;
61885+extern struct acl_subject_label *
61886+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
61887+ struct acl_role_label *role);
61888+extern int gr_fake_force_sig(int sig, struct task_struct *t);
61889+
61890+int
61891+gr_init_uidset(void)
61892+{
61893+ uid_set =
61894+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
61895+ uid_used = 0;
61896+
61897+ return uid_set ? 1 : 0;
61898+}
61899+
61900+void
61901+gr_free_uidset(void)
61902+{
61903+ if (uid_set)
61904+ kfree(uid_set);
61905+
61906+ return;
61907+}
61908+
61909+int
61910+gr_find_uid(const uid_t uid)
61911+{
61912+ struct crash_uid *tmp = uid_set;
61913+ uid_t buid;
61914+ int low = 0, high = uid_used - 1, mid;
61915+
61916+ while (high >= low) {
61917+ mid = (low + high) >> 1;
61918+ buid = tmp[mid].uid;
61919+ if (buid == uid)
61920+ return mid;
61921+ if (buid > uid)
61922+ high = mid - 1;
61923+ if (buid < uid)
61924+ low = mid + 1;
61925+ }
61926+
61927+ return -1;
61928+}
61929+
61930+static __inline__ void
61931+gr_insertsort(void)
61932+{
61933+ unsigned short i, j;
61934+ struct crash_uid index;
61935+
61936+ for (i = 1; i < uid_used; i++) {
61937+ index = uid_set[i];
61938+ j = i;
61939+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
61940+ uid_set[j] = uid_set[j - 1];
61941+ j--;
61942+ }
61943+ uid_set[j] = index;
61944+ }
61945+
61946+ return;
61947+}
61948+
61949+static __inline__ void
61950+gr_insert_uid(const uid_t uid, const unsigned long expires)
61951+{
61952+ int loc;
61953+
61954+ if (uid_used == GR_UIDTABLE_MAX)
61955+ return;
61956+
61957+ loc = gr_find_uid(uid);
61958+
61959+ if (loc >= 0) {
61960+ uid_set[loc].expires = expires;
61961+ return;
61962+ }
61963+
61964+ uid_set[uid_used].uid = uid;
61965+ uid_set[uid_used].expires = expires;
61966+ uid_used++;
61967+
61968+ gr_insertsort();
61969+
61970+ return;
61971+}
61972+
61973+void
61974+gr_remove_uid(const unsigned short loc)
61975+{
61976+ unsigned short i;
61977+
61978+ for (i = loc + 1; i < uid_used; i++)
61979+ uid_set[i - 1] = uid_set[i];
61980+
61981+ uid_used--;
61982+
61983+ return;
61984+}
61985+
61986+int
61987+gr_check_crash_uid(const uid_t uid)
61988+{
61989+ int loc;
61990+ int ret = 0;
61991+
61992+ if (unlikely(!gr_acl_is_enabled()))
61993+ return 0;
61994+
61995+ spin_lock(&gr_uid_lock);
61996+ loc = gr_find_uid(uid);
61997+
61998+ if (loc < 0)
61999+ goto out_unlock;
62000+
62001+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
62002+ gr_remove_uid(loc);
62003+ else
62004+ ret = 1;
62005+
62006+out_unlock:
62007+ spin_unlock(&gr_uid_lock);
62008+ return ret;
62009+}
62010+
62011+static __inline__ int
62012+proc_is_setxid(const struct cred *cred)
62013+{
62014+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
62015+ cred->uid != cred->fsuid)
62016+ return 1;
62017+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
62018+ cred->gid != cred->fsgid)
62019+ return 1;
62020+
62021+ return 0;
62022+}
62023+
62024+void
62025+gr_handle_crash(struct task_struct *task, const int sig)
62026+{
62027+ struct acl_subject_label *curr;
62028+ struct task_struct *tsk, *tsk2;
62029+ const struct cred *cred;
62030+ const struct cred *cred2;
62031+
62032+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
62033+ return;
62034+
62035+ if (unlikely(!gr_acl_is_enabled()))
62036+ return;
62037+
62038+ curr = task->acl;
62039+
62040+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
62041+ return;
62042+
62043+ if (time_before_eq(curr->expires, get_seconds())) {
62044+ curr->expires = 0;
62045+ curr->crashes = 0;
62046+ }
62047+
62048+ curr->crashes++;
62049+
62050+ if (!curr->expires)
62051+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
62052+
62053+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62054+ time_after(curr->expires, get_seconds())) {
62055+ rcu_read_lock();
62056+ cred = __task_cred(task);
62057+ if (cred->uid && proc_is_setxid(cred)) {
62058+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62059+ spin_lock(&gr_uid_lock);
62060+ gr_insert_uid(cred->uid, curr->expires);
62061+ spin_unlock(&gr_uid_lock);
62062+ curr->expires = 0;
62063+ curr->crashes = 0;
62064+ read_lock(&tasklist_lock);
62065+ do_each_thread(tsk2, tsk) {
62066+ cred2 = __task_cred(tsk);
62067+ if (tsk != task && cred2->uid == cred->uid)
62068+ gr_fake_force_sig(SIGKILL, tsk);
62069+ } while_each_thread(tsk2, tsk);
62070+ read_unlock(&tasklist_lock);
62071+ } else {
62072+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62073+ read_lock(&tasklist_lock);
62074+ read_lock(&grsec_exec_file_lock);
62075+ do_each_thread(tsk2, tsk) {
62076+ if (likely(tsk != task)) {
62077+ // if this thread has the same subject as the one that triggered
62078+ // RES_CRASH and it's the same binary, kill it
62079+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
62080+ gr_fake_force_sig(SIGKILL, tsk);
62081+ }
62082+ } while_each_thread(tsk2, tsk);
62083+ read_unlock(&grsec_exec_file_lock);
62084+ read_unlock(&tasklist_lock);
62085+ }
62086+ rcu_read_unlock();
62087+ }
62088+
62089+ return;
62090+}
62091+
62092+int
62093+gr_check_crash_exec(const struct file *filp)
62094+{
62095+ struct acl_subject_label *curr;
62096+
62097+ if (unlikely(!gr_acl_is_enabled()))
62098+ return 0;
62099+
62100+ read_lock(&gr_inode_lock);
62101+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
62102+ filp->f_path.dentry->d_inode->i_sb->s_dev,
62103+ current->role);
62104+ read_unlock(&gr_inode_lock);
62105+
62106+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
62107+ (!curr->crashes && !curr->expires))
62108+ return 0;
62109+
62110+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62111+ time_after(curr->expires, get_seconds()))
62112+ return 1;
62113+ else if (time_before_eq(curr->expires, get_seconds())) {
62114+ curr->crashes = 0;
62115+ curr->expires = 0;
62116+ }
62117+
62118+ return 0;
62119+}
62120+
62121+void
62122+gr_handle_alertkill(struct task_struct *task)
62123+{
62124+ struct acl_subject_label *curracl;
62125+ __u32 curr_ip;
62126+ struct task_struct *p, *p2;
62127+
62128+ if (unlikely(!gr_acl_is_enabled()))
62129+ return;
62130+
62131+ curracl = task->acl;
62132+ curr_ip = task->signal->curr_ip;
62133+
62134+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
62135+ read_lock(&tasklist_lock);
62136+ do_each_thread(p2, p) {
62137+ if (p->signal->curr_ip == curr_ip)
62138+ gr_fake_force_sig(SIGKILL, p);
62139+ } while_each_thread(p2, p);
62140+ read_unlock(&tasklist_lock);
62141+ } else if (curracl->mode & GR_KILLPROC)
62142+ gr_fake_force_sig(SIGKILL, task);
62143+
62144+ return;
62145+}
62146diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
62147new file mode 100644
62148index 0000000..9d83a69
62149--- /dev/null
62150+++ b/grsecurity/gracl_shm.c
62151@@ -0,0 +1,40 @@
62152+#include <linux/kernel.h>
62153+#include <linux/mm.h>
62154+#include <linux/sched.h>
62155+#include <linux/file.h>
62156+#include <linux/ipc.h>
62157+#include <linux/gracl.h>
62158+#include <linux/grsecurity.h>
62159+#include <linux/grinternal.h>
62160+
62161+int
62162+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62163+ const time_t shm_createtime, const uid_t cuid, const int shmid)
62164+{
62165+ struct task_struct *task;
62166+
62167+ if (!gr_acl_is_enabled())
62168+ return 1;
62169+
62170+ rcu_read_lock();
62171+ read_lock(&tasklist_lock);
62172+
62173+ task = find_task_by_vpid(shm_cprid);
62174+
62175+ if (unlikely(!task))
62176+ task = find_task_by_vpid(shm_lapid);
62177+
62178+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
62179+ (task->pid == shm_lapid)) &&
62180+ (task->acl->mode & GR_PROTSHM) &&
62181+ (task->acl != current->acl))) {
62182+ read_unlock(&tasklist_lock);
62183+ rcu_read_unlock();
62184+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
62185+ return 0;
62186+ }
62187+ read_unlock(&tasklist_lock);
62188+ rcu_read_unlock();
62189+
62190+ return 1;
62191+}
62192diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
62193new file mode 100644
62194index 0000000..bc0be01
62195--- /dev/null
62196+++ b/grsecurity/grsec_chdir.c
62197@@ -0,0 +1,19 @@
62198+#include <linux/kernel.h>
62199+#include <linux/sched.h>
62200+#include <linux/fs.h>
62201+#include <linux/file.h>
62202+#include <linux/grsecurity.h>
62203+#include <linux/grinternal.h>
62204+
62205+void
62206+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
62207+{
62208+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62209+ if ((grsec_enable_chdir && grsec_enable_group &&
62210+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
62211+ !grsec_enable_group)) {
62212+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
62213+ }
62214+#endif
62215+ return;
62216+}
62217diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
62218new file mode 100644
62219index 0000000..197bdd5
62220--- /dev/null
62221+++ b/grsecurity/grsec_chroot.c
62222@@ -0,0 +1,386 @@
62223+#include <linux/kernel.h>
62224+#include <linux/module.h>
62225+#include <linux/sched.h>
62226+#include <linux/file.h>
62227+#include <linux/fs.h>
62228+#include <linux/mount.h>
62229+#include <linux/types.h>
62230+#include <linux/pid_namespace.h>
62231+#include <linux/grsecurity.h>
62232+#include <linux/grinternal.h>
62233+
62234+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
62235+{
62236+#ifdef CONFIG_GRKERNSEC
62237+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
62238+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
62239+ task->gr_is_chrooted = 1;
62240+ else
62241+ task->gr_is_chrooted = 0;
62242+
62243+ task->gr_chroot_dentry = path->dentry;
62244+#endif
62245+ return;
62246+}
62247+
62248+void gr_clear_chroot_entries(struct task_struct *task)
62249+{
62250+#ifdef CONFIG_GRKERNSEC
62251+ task->gr_is_chrooted = 0;
62252+ task->gr_chroot_dentry = NULL;
62253+#endif
62254+ return;
62255+}
62256+
62257+int
62258+gr_handle_chroot_unix(const pid_t pid)
62259+{
62260+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62261+ struct task_struct *p;
62262+
62263+ if (unlikely(!grsec_enable_chroot_unix))
62264+ return 1;
62265+
62266+ if (likely(!proc_is_chrooted(current)))
62267+ return 1;
62268+
62269+ rcu_read_lock();
62270+ read_lock(&tasklist_lock);
62271+
62272+ p = find_task_by_vpid_unrestricted(pid);
62273+ if (unlikely(p && !have_same_root(current, p))) {
62274+ read_unlock(&tasklist_lock);
62275+ rcu_read_unlock();
62276+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
62277+ return 0;
62278+ }
62279+ read_unlock(&tasklist_lock);
62280+ rcu_read_unlock();
62281+#endif
62282+ return 1;
62283+}
62284+
62285+int
62286+gr_handle_chroot_nice(void)
62287+{
62288+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62289+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
62290+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
62291+ return -EPERM;
62292+ }
62293+#endif
62294+ return 0;
62295+}
62296+
62297+int
62298+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
62299+{
62300+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62301+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
62302+ && proc_is_chrooted(current)) {
62303+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
62304+ return -EACCES;
62305+ }
62306+#endif
62307+ return 0;
62308+}
62309+
62310+int
62311+gr_handle_chroot_rawio(const struct inode *inode)
62312+{
62313+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62314+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
62315+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
62316+ return 1;
62317+#endif
62318+ return 0;
62319+}
62320+
62321+int
62322+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
62323+{
62324+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62325+ struct task_struct *p;
62326+ int ret = 0;
62327+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
62328+ return ret;
62329+
62330+ read_lock(&tasklist_lock);
62331+ do_each_pid_task(pid, type, p) {
62332+ if (!have_same_root(current, p)) {
62333+ ret = 1;
62334+ goto out;
62335+ }
62336+ } while_each_pid_task(pid, type, p);
62337+out:
62338+ read_unlock(&tasklist_lock);
62339+ return ret;
62340+#endif
62341+ return 0;
62342+}
62343+
62344+int
62345+gr_pid_is_chrooted(struct task_struct *p)
62346+{
62347+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62348+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
62349+ return 0;
62350+
62351+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
62352+ !have_same_root(current, p)) {
62353+ return 1;
62354+ }
62355+#endif
62356+ return 0;
62357+}
62358+
62359+EXPORT_SYMBOL(gr_pid_is_chrooted);
62360+
62361+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
62362+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
62363+{
62364+ struct dentry *dentry = (struct dentry *)u_dentry;
62365+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
62366+ struct dentry *realroot;
62367+ struct vfsmount *realrootmnt;
62368+ struct dentry *currentroot;
62369+ struct vfsmount *currentmnt;
62370+ struct task_struct *reaper = &init_task;
62371+ int ret = 1;
62372+
62373+ read_lock(&reaper->fs->lock);
62374+ realrootmnt = mntget(reaper->fs->root.mnt);
62375+ realroot = dget(reaper->fs->root.dentry);
62376+ read_unlock(&reaper->fs->lock);
62377+
62378+ read_lock(&current->fs->lock);
62379+ currentmnt = mntget(current->fs->root.mnt);
62380+ currentroot = dget(current->fs->root.dentry);
62381+ read_unlock(&current->fs->lock);
62382+
62383+ spin_lock(&dcache_lock);
62384+ for (;;) {
62385+ if (unlikely((dentry == realroot && mnt == realrootmnt)
62386+ || (dentry == currentroot && mnt == currentmnt)))
62387+ break;
62388+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
62389+ if (mnt->mnt_parent == mnt)
62390+ break;
62391+ dentry = mnt->mnt_mountpoint;
62392+ mnt = mnt->mnt_parent;
62393+ continue;
62394+ }
62395+ dentry = dentry->d_parent;
62396+ }
62397+ spin_unlock(&dcache_lock);
62398+
62399+ dput(currentroot);
62400+ mntput(currentmnt);
62401+
62402+ /* access is outside of chroot */
62403+ if (dentry == realroot && mnt == realrootmnt)
62404+ ret = 0;
62405+
62406+ dput(realroot);
62407+ mntput(realrootmnt);
62408+ return ret;
62409+}
62410+#endif
62411+
62412+int
62413+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
62414+{
62415+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62416+ if (!grsec_enable_chroot_fchdir)
62417+ return 1;
62418+
62419+ if (!proc_is_chrooted(current))
62420+ return 1;
62421+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
62422+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
62423+ return 0;
62424+ }
62425+#endif
62426+ return 1;
62427+}
62428+
62429+int
62430+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62431+ const time_t shm_createtime)
62432+{
62433+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62434+ struct task_struct *p;
62435+ time_t starttime;
62436+
62437+ if (unlikely(!grsec_enable_chroot_shmat))
62438+ return 1;
62439+
62440+ if (likely(!proc_is_chrooted(current)))
62441+ return 1;
62442+
62443+ rcu_read_lock();
62444+ read_lock(&tasklist_lock);
62445+
62446+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
62447+ starttime = p->start_time.tv_sec;
62448+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
62449+ if (have_same_root(current, p)) {
62450+ goto allow;
62451+ } else {
62452+ read_unlock(&tasklist_lock);
62453+ rcu_read_unlock();
62454+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62455+ return 0;
62456+ }
62457+ }
62458+ /* creator exited, pid reuse, fall through to next check */
62459+ }
62460+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
62461+ if (unlikely(!have_same_root(current, p))) {
62462+ read_unlock(&tasklist_lock);
62463+ rcu_read_unlock();
62464+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62465+ return 0;
62466+ }
62467+ }
62468+
62469+allow:
62470+ read_unlock(&tasklist_lock);
62471+ rcu_read_unlock();
62472+#endif
62473+ return 1;
62474+}
62475+
62476+void
62477+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
62478+{
62479+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62480+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
62481+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
62482+#endif
62483+ return;
62484+}
62485+
62486+int
62487+gr_handle_chroot_mknod(const struct dentry *dentry,
62488+ const struct vfsmount *mnt, const int mode)
62489+{
62490+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62491+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
62492+ proc_is_chrooted(current)) {
62493+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
62494+ return -EPERM;
62495+ }
62496+#endif
62497+ return 0;
62498+}
62499+
62500+int
62501+gr_handle_chroot_mount(const struct dentry *dentry,
62502+ const struct vfsmount *mnt, const char *dev_name)
62503+{
62504+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62505+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
62506+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
62507+ return -EPERM;
62508+ }
62509+#endif
62510+ return 0;
62511+}
62512+
62513+int
62514+gr_handle_chroot_pivot(void)
62515+{
62516+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62517+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
62518+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
62519+ return -EPERM;
62520+ }
62521+#endif
62522+ return 0;
62523+}
62524+
62525+int
62526+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
62527+{
62528+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62529+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
62530+ !gr_is_outside_chroot(dentry, mnt)) {
62531+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
62532+ return -EPERM;
62533+ }
62534+#endif
62535+ return 0;
62536+}
62537+
62538+extern const char *captab_log[];
62539+extern int captab_log_entries;
62540+
62541+int
62542+gr_chroot_is_capable(const int cap)
62543+{
62544+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62545+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62546+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62547+ if (cap_raised(chroot_caps, cap)) {
62548+ const struct cred *creds = current_cred();
62549+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
62550+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
62551+ }
62552+ return 0;
62553+ }
62554+ }
62555+#endif
62556+ return 1;
62557+}
62558+
62559+int
62560+gr_chroot_is_capable_nolog(const int cap)
62561+{
62562+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62563+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62564+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62565+ if (cap_raised(chroot_caps, cap)) {
62566+ return 0;
62567+ }
62568+ }
62569+#endif
62570+ return 1;
62571+}
62572+
62573+int
62574+gr_handle_chroot_sysctl(const int op)
62575+{
62576+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62577+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
62578+ && (op & MAY_WRITE))
62579+ return -EACCES;
62580+#endif
62581+ return 0;
62582+}
62583+
62584+void
62585+gr_handle_chroot_chdir(struct path *path)
62586+{
62587+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62588+ if (grsec_enable_chroot_chdir)
62589+ set_fs_pwd(current->fs, path);
62590+#endif
62591+ return;
62592+}
62593+
62594+int
62595+gr_handle_chroot_chmod(const struct dentry *dentry,
62596+ const struct vfsmount *mnt, const int mode)
62597+{
62598+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62599+ /* allow chmod +s on directories, but not on files */
62600+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
62601+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
62602+ proc_is_chrooted(current)) {
62603+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
62604+ return -EPERM;
62605+ }
62606+#endif
62607+ return 0;
62608+}
62609diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
62610new file mode 100644
62611index 0000000..40545bf
62612--- /dev/null
62613+++ b/grsecurity/grsec_disabled.c
62614@@ -0,0 +1,437 @@
62615+#include <linux/kernel.h>
62616+#include <linux/module.h>
62617+#include <linux/sched.h>
62618+#include <linux/file.h>
62619+#include <linux/fs.h>
62620+#include <linux/kdev_t.h>
62621+#include <linux/net.h>
62622+#include <linux/in.h>
62623+#include <linux/ip.h>
62624+#include <linux/skbuff.h>
62625+#include <linux/sysctl.h>
62626+
62627+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62628+void
62629+pax_set_initial_flags(struct linux_binprm *bprm)
62630+{
62631+ return;
62632+}
62633+#endif
62634+
62635+#ifdef CONFIG_SYSCTL
62636+__u32
62637+gr_handle_sysctl(const struct ctl_table * table, const int op)
62638+{
62639+ return 0;
62640+}
62641+#endif
62642+
62643+#ifdef CONFIG_TASKSTATS
62644+int gr_is_taskstats_denied(int pid)
62645+{
62646+ return 0;
62647+}
62648+#endif
62649+
62650+int
62651+gr_acl_is_enabled(void)
62652+{
62653+ return 0;
62654+}
62655+
62656+void
62657+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
62658+{
62659+ return;
62660+}
62661+
62662+int
62663+gr_handle_rawio(const struct inode *inode)
62664+{
62665+ return 0;
62666+}
62667+
62668+void
62669+gr_acl_handle_psacct(struct task_struct *task, const long code)
62670+{
62671+ return;
62672+}
62673+
62674+int
62675+gr_handle_ptrace(struct task_struct *task, const long request)
62676+{
62677+ return 0;
62678+}
62679+
62680+int
62681+gr_handle_proc_ptrace(struct task_struct *task)
62682+{
62683+ return 0;
62684+}
62685+
62686+void
62687+gr_learn_resource(const struct task_struct *task,
62688+ const int res, const unsigned long wanted, const int gt)
62689+{
62690+ return;
62691+}
62692+
62693+int
62694+gr_set_acls(const int type)
62695+{
62696+ return 0;
62697+}
62698+
62699+int
62700+gr_check_hidden_task(const struct task_struct *tsk)
62701+{
62702+ return 0;
62703+}
62704+
62705+int
62706+gr_check_protected_task(const struct task_struct *task)
62707+{
62708+ return 0;
62709+}
62710+
62711+int
62712+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
62713+{
62714+ return 0;
62715+}
62716+
62717+void
62718+gr_copy_label(struct task_struct *tsk)
62719+{
62720+ return;
62721+}
62722+
62723+void
62724+gr_set_pax_flags(struct task_struct *task)
62725+{
62726+ return;
62727+}
62728+
62729+int
62730+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
62731+ const int unsafe_share)
62732+{
62733+ return 0;
62734+}
62735+
62736+void
62737+gr_handle_delete(const ino_t ino, const dev_t dev)
62738+{
62739+ return;
62740+}
62741+
62742+void
62743+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
62744+{
62745+ return;
62746+}
62747+
62748+void
62749+gr_handle_crash(struct task_struct *task, const int sig)
62750+{
62751+ return;
62752+}
62753+
62754+int
62755+gr_check_crash_exec(const struct file *filp)
62756+{
62757+ return 0;
62758+}
62759+
62760+int
62761+gr_check_crash_uid(const uid_t uid)
62762+{
62763+ return 0;
62764+}
62765+
62766+void
62767+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62768+ struct dentry *old_dentry,
62769+ struct dentry *new_dentry,
62770+ struct vfsmount *mnt, const __u8 replace)
62771+{
62772+ return;
62773+}
62774+
62775+int
62776+gr_search_socket(const int family, const int type, const int protocol)
62777+{
62778+ return 1;
62779+}
62780+
62781+int
62782+gr_search_connectbind(const int mode, const struct socket *sock,
62783+ const struct sockaddr_in *addr)
62784+{
62785+ return 0;
62786+}
62787+
62788+void
62789+gr_handle_alertkill(struct task_struct *task)
62790+{
62791+ return;
62792+}
62793+
62794+__u32
62795+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
62796+{
62797+ return 1;
62798+}
62799+
62800+__u32
62801+gr_acl_handle_hidden_file(const struct dentry * dentry,
62802+ const struct vfsmount * mnt)
62803+{
62804+ return 1;
62805+}
62806+
62807+__u32
62808+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62809+ int acc_mode)
62810+{
62811+ return 1;
62812+}
62813+
62814+__u32
62815+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62816+{
62817+ return 1;
62818+}
62819+
62820+__u32
62821+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
62822+{
62823+ return 1;
62824+}
62825+
62826+int
62827+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
62828+ unsigned int *vm_flags)
62829+{
62830+ return 1;
62831+}
62832+
62833+__u32
62834+gr_acl_handle_truncate(const struct dentry * dentry,
62835+ const struct vfsmount * mnt)
62836+{
62837+ return 1;
62838+}
62839+
62840+__u32
62841+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
62842+{
62843+ return 1;
62844+}
62845+
62846+__u32
62847+gr_acl_handle_access(const struct dentry * dentry,
62848+ const struct vfsmount * mnt, const int fmode)
62849+{
62850+ return 1;
62851+}
62852+
62853+__u32
62854+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
62855+ umode_t *mode)
62856+{
62857+ return 1;
62858+}
62859+
62860+__u32
62861+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
62862+{
62863+ return 1;
62864+}
62865+
62866+__u32
62867+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
62868+{
62869+ return 1;
62870+}
62871+
62872+void
62873+grsecurity_init(void)
62874+{
62875+ return;
62876+}
62877+
62878+umode_t gr_acl_umask(void)
62879+{
62880+ return 0;
62881+}
62882+
62883+__u32
62884+gr_acl_handle_mknod(const struct dentry * new_dentry,
62885+ const struct dentry * parent_dentry,
62886+ const struct vfsmount * parent_mnt,
62887+ const int mode)
62888+{
62889+ return 1;
62890+}
62891+
62892+__u32
62893+gr_acl_handle_mkdir(const struct dentry * new_dentry,
62894+ const struct dentry * parent_dentry,
62895+ const struct vfsmount * parent_mnt)
62896+{
62897+ return 1;
62898+}
62899+
62900+__u32
62901+gr_acl_handle_symlink(const struct dentry * new_dentry,
62902+ const struct dentry * parent_dentry,
62903+ const struct vfsmount * parent_mnt, const char *from)
62904+{
62905+ return 1;
62906+}
62907+
62908+__u32
62909+gr_acl_handle_link(const struct dentry * new_dentry,
62910+ const struct dentry * parent_dentry,
62911+ const struct vfsmount * parent_mnt,
62912+ const struct dentry * old_dentry,
62913+ const struct vfsmount * old_mnt, const char *to)
62914+{
62915+ return 1;
62916+}
62917+
62918+int
62919+gr_acl_handle_rename(const struct dentry *new_dentry,
62920+ const struct dentry *parent_dentry,
62921+ const struct vfsmount *parent_mnt,
62922+ const struct dentry *old_dentry,
62923+ const struct inode *old_parent_inode,
62924+ const struct vfsmount *old_mnt, const char *newname)
62925+{
62926+ return 0;
62927+}
62928+
62929+int
62930+gr_acl_handle_filldir(const struct file *file, const char *name,
62931+ const int namelen, const ino_t ino)
62932+{
62933+ return 1;
62934+}
62935+
62936+int
62937+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62938+ const time_t shm_createtime, const uid_t cuid, const int shmid)
62939+{
62940+ return 1;
62941+}
62942+
62943+int
62944+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
62945+{
62946+ return 0;
62947+}
62948+
62949+int
62950+gr_search_accept(const struct socket *sock)
62951+{
62952+ return 0;
62953+}
62954+
62955+int
62956+gr_search_listen(const struct socket *sock)
62957+{
62958+ return 0;
62959+}
62960+
62961+int
62962+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
62963+{
62964+ return 0;
62965+}
62966+
62967+__u32
62968+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
62969+{
62970+ return 1;
62971+}
62972+
62973+__u32
62974+gr_acl_handle_creat(const struct dentry * dentry,
62975+ const struct dentry * p_dentry,
62976+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62977+ const int imode)
62978+{
62979+ return 1;
62980+}
62981+
62982+void
62983+gr_acl_handle_exit(void)
62984+{
62985+ return;
62986+}
62987+
62988+int
62989+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
62990+{
62991+ return 1;
62992+}
62993+
62994+void
62995+gr_set_role_label(const uid_t uid, const gid_t gid)
62996+{
62997+ return;
62998+}
62999+
63000+int
63001+gr_acl_handle_procpidmem(const struct task_struct *task)
63002+{
63003+ return 0;
63004+}
63005+
63006+int
63007+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
63008+{
63009+ return 0;
63010+}
63011+
63012+int
63013+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
63014+{
63015+ return 0;
63016+}
63017+
63018+void
63019+gr_set_kernel_label(struct task_struct *task)
63020+{
63021+ return;
63022+}
63023+
63024+int
63025+gr_check_user_change(int real, int effective, int fs)
63026+{
63027+ return 0;
63028+}
63029+
63030+int
63031+gr_check_group_change(int real, int effective, int fs)
63032+{
63033+ return 0;
63034+}
63035+
63036+int gr_acl_enable_at_secure(void)
63037+{
63038+ return 0;
63039+}
63040+
63041+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
63042+{
63043+ return dentry->d_inode->i_sb->s_dev;
63044+}
63045+
63046+EXPORT_SYMBOL(gr_learn_resource);
63047+EXPORT_SYMBOL(gr_set_kernel_label);
63048+#ifdef CONFIG_SECURITY
63049+EXPORT_SYMBOL(gr_check_user_change);
63050+EXPORT_SYMBOL(gr_check_group_change);
63051+#endif
63052diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
63053new file mode 100644
63054index 0000000..a96e155
63055--- /dev/null
63056+++ b/grsecurity/grsec_exec.c
63057@@ -0,0 +1,204 @@
63058+#include <linux/kernel.h>
63059+#include <linux/sched.h>
63060+#include <linux/file.h>
63061+#include <linux/binfmts.h>
63062+#include <linux/smp_lock.h>
63063+#include <linux/fs.h>
63064+#include <linux/types.h>
63065+#include <linux/grdefs.h>
63066+#include <linux/grinternal.h>
63067+#include <linux/capability.h>
63068+#include <linux/compat.h>
63069+#include <linux/module.h>
63070+
63071+#include <asm/uaccess.h>
63072+
63073+#ifdef CONFIG_GRKERNSEC_EXECLOG
63074+static char gr_exec_arg_buf[132];
63075+static DEFINE_MUTEX(gr_exec_arg_mutex);
63076+#endif
63077+
63078+void
63079+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
63080+{
63081+#ifdef CONFIG_GRKERNSEC_EXECLOG
63082+ char *grarg = gr_exec_arg_buf;
63083+ unsigned int i, x, execlen = 0;
63084+ char c;
63085+
63086+ if (!((grsec_enable_execlog && grsec_enable_group &&
63087+ in_group_p(grsec_audit_gid))
63088+ || (grsec_enable_execlog && !grsec_enable_group)))
63089+ return;
63090+
63091+ mutex_lock(&gr_exec_arg_mutex);
63092+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
63093+
63094+ if (unlikely(argv == NULL))
63095+ goto log;
63096+
63097+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
63098+ const char __user *p;
63099+ unsigned int len;
63100+
63101+ if (copy_from_user(&p, argv + i, sizeof(p)))
63102+ goto log;
63103+ if (!p)
63104+ goto log;
63105+ len = strnlen_user(p, 128 - execlen);
63106+ if (len > 128 - execlen)
63107+ len = 128 - execlen;
63108+ else if (len > 0)
63109+ len--;
63110+ if (copy_from_user(grarg + execlen, p, len))
63111+ goto log;
63112+
63113+ /* rewrite unprintable characters */
63114+ for (x = 0; x < len; x++) {
63115+ c = *(grarg + execlen + x);
63116+ if (c < 32 || c > 126)
63117+ *(grarg + execlen + x) = ' ';
63118+ }
63119+
63120+ execlen += len;
63121+ *(grarg + execlen) = ' ';
63122+ *(grarg + execlen + 1) = '\0';
63123+ execlen++;
63124+ }
63125+
63126+ log:
63127+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63128+ bprm->file->f_path.mnt, grarg);
63129+ mutex_unlock(&gr_exec_arg_mutex);
63130+#endif
63131+ return;
63132+}
63133+
63134+#ifdef CONFIG_COMPAT
63135+void
63136+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
63137+{
63138+#ifdef CONFIG_GRKERNSEC_EXECLOG
63139+ char *grarg = gr_exec_arg_buf;
63140+ unsigned int i, x, execlen = 0;
63141+ char c;
63142+
63143+ if (!((grsec_enable_execlog && grsec_enable_group &&
63144+ in_group_p(grsec_audit_gid))
63145+ || (grsec_enable_execlog && !grsec_enable_group)))
63146+ return;
63147+
63148+ mutex_lock(&gr_exec_arg_mutex);
63149+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
63150+
63151+ if (unlikely(argv == NULL))
63152+ goto log;
63153+
63154+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
63155+ compat_uptr_t p;
63156+ unsigned int len;
63157+
63158+ if (get_user(p, argv + i))
63159+ goto log;
63160+ len = strnlen_user(compat_ptr(p), 128 - execlen);
63161+ if (len > 128 - execlen)
63162+ len = 128 - execlen;
63163+ else if (len > 0)
63164+ len--;
63165+ else
63166+ goto log;
63167+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
63168+ goto log;
63169+
63170+ /* rewrite unprintable characters */
63171+ for (x = 0; x < len; x++) {
63172+ c = *(grarg + execlen + x);
63173+ if (c < 32 || c > 126)
63174+ *(grarg + execlen + x) = ' ';
63175+ }
63176+
63177+ execlen += len;
63178+ *(grarg + execlen) = ' ';
63179+ *(grarg + execlen + 1) = '\0';
63180+ execlen++;
63181+ }
63182+
63183+ log:
63184+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63185+ bprm->file->f_path.mnt, grarg);
63186+ mutex_unlock(&gr_exec_arg_mutex);
63187+#endif
63188+ return;
63189+}
63190+#endif
63191+
63192+#ifdef CONFIG_GRKERNSEC
63193+extern int gr_acl_is_capable(const int cap);
63194+extern int gr_acl_is_capable_nolog(const int cap);
63195+extern int gr_chroot_is_capable(const int cap);
63196+extern int gr_chroot_is_capable_nolog(const int cap);
63197+#endif
63198+
63199+const char *captab_log[] = {
63200+ "CAP_CHOWN",
63201+ "CAP_DAC_OVERRIDE",
63202+ "CAP_DAC_READ_SEARCH",
63203+ "CAP_FOWNER",
63204+ "CAP_FSETID",
63205+ "CAP_KILL",
63206+ "CAP_SETGID",
63207+ "CAP_SETUID",
63208+ "CAP_SETPCAP",
63209+ "CAP_LINUX_IMMUTABLE",
63210+ "CAP_NET_BIND_SERVICE",
63211+ "CAP_NET_BROADCAST",
63212+ "CAP_NET_ADMIN",
63213+ "CAP_NET_RAW",
63214+ "CAP_IPC_LOCK",
63215+ "CAP_IPC_OWNER",
63216+ "CAP_SYS_MODULE",
63217+ "CAP_SYS_RAWIO",
63218+ "CAP_SYS_CHROOT",
63219+ "CAP_SYS_PTRACE",
63220+ "CAP_SYS_PACCT",
63221+ "CAP_SYS_ADMIN",
63222+ "CAP_SYS_BOOT",
63223+ "CAP_SYS_NICE",
63224+ "CAP_SYS_RESOURCE",
63225+ "CAP_SYS_TIME",
63226+ "CAP_SYS_TTY_CONFIG",
63227+ "CAP_MKNOD",
63228+ "CAP_LEASE",
63229+ "CAP_AUDIT_WRITE",
63230+ "CAP_AUDIT_CONTROL",
63231+ "CAP_SETFCAP",
63232+ "CAP_MAC_OVERRIDE",
63233+ "CAP_MAC_ADMIN"
63234+};
63235+
63236+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
63237+
63238+int gr_is_capable(const int cap)
63239+{
63240+#ifdef CONFIG_GRKERNSEC
63241+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
63242+ return 1;
63243+ return 0;
63244+#else
63245+ return 1;
63246+#endif
63247+}
63248+
63249+int gr_is_capable_nolog(const int cap)
63250+{
63251+#ifdef CONFIG_GRKERNSEC
63252+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
63253+ return 1;
63254+ return 0;
63255+#else
63256+ return 1;
63257+#endif
63258+}
63259+
63260+EXPORT_SYMBOL(gr_is_capable);
63261+EXPORT_SYMBOL(gr_is_capable_nolog);
63262diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
63263new file mode 100644
63264index 0000000..d3ee748
63265--- /dev/null
63266+++ b/grsecurity/grsec_fifo.c
63267@@ -0,0 +1,24 @@
63268+#include <linux/kernel.h>
63269+#include <linux/sched.h>
63270+#include <linux/fs.h>
63271+#include <linux/file.h>
63272+#include <linux/grinternal.h>
63273+
63274+int
63275+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
63276+ const struct dentry *dir, const int flag, const int acc_mode)
63277+{
63278+#ifdef CONFIG_GRKERNSEC_FIFO
63279+ const struct cred *cred = current_cred();
63280+
63281+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
63282+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
63283+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
63284+ (cred->fsuid != dentry->d_inode->i_uid)) {
63285+ if (!inode_permission(dentry->d_inode, acc_mode))
63286+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
63287+ return -EACCES;
63288+ }
63289+#endif
63290+ return 0;
63291+}
63292diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
63293new file mode 100644
63294index 0000000..8ca18bf
63295--- /dev/null
63296+++ b/grsecurity/grsec_fork.c
63297@@ -0,0 +1,23 @@
63298+#include <linux/kernel.h>
63299+#include <linux/sched.h>
63300+#include <linux/grsecurity.h>
63301+#include <linux/grinternal.h>
63302+#include <linux/errno.h>
63303+
63304+void
63305+gr_log_forkfail(const int retval)
63306+{
63307+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63308+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
63309+ switch (retval) {
63310+ case -EAGAIN:
63311+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
63312+ break;
63313+ case -ENOMEM:
63314+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
63315+ break;
63316+ }
63317+ }
63318+#endif
63319+ return;
63320+}
63321diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
63322new file mode 100644
63323index 0000000..1e995d3
63324--- /dev/null
63325+++ b/grsecurity/grsec_init.c
63326@@ -0,0 +1,278 @@
63327+#include <linux/kernel.h>
63328+#include <linux/sched.h>
63329+#include <linux/mm.h>
63330+#include <linux/smp_lock.h>
63331+#include <linux/gracl.h>
63332+#include <linux/slab.h>
63333+#include <linux/vmalloc.h>
63334+#include <linux/percpu.h>
63335+#include <linux/module.h>
63336+
63337+int grsec_enable_ptrace_readexec;
63338+int grsec_enable_setxid;
63339+int grsec_enable_brute;
63340+int grsec_enable_link;
63341+int grsec_enable_dmesg;
63342+int grsec_enable_harden_ptrace;
63343+int grsec_enable_fifo;
63344+int grsec_enable_execlog;
63345+int grsec_enable_signal;
63346+int grsec_enable_forkfail;
63347+int grsec_enable_audit_ptrace;
63348+int grsec_enable_time;
63349+int grsec_enable_audit_textrel;
63350+int grsec_enable_group;
63351+int grsec_audit_gid;
63352+int grsec_enable_chdir;
63353+int grsec_enable_mount;
63354+int grsec_enable_rofs;
63355+int grsec_enable_chroot_findtask;
63356+int grsec_enable_chroot_mount;
63357+int grsec_enable_chroot_shmat;
63358+int grsec_enable_chroot_fchdir;
63359+int grsec_enable_chroot_double;
63360+int grsec_enable_chroot_pivot;
63361+int grsec_enable_chroot_chdir;
63362+int grsec_enable_chroot_chmod;
63363+int grsec_enable_chroot_mknod;
63364+int grsec_enable_chroot_nice;
63365+int grsec_enable_chroot_execlog;
63366+int grsec_enable_chroot_caps;
63367+int grsec_enable_chroot_sysctl;
63368+int grsec_enable_chroot_unix;
63369+int grsec_enable_tpe;
63370+int grsec_tpe_gid;
63371+int grsec_enable_blackhole;
63372+#ifdef CONFIG_IPV6_MODULE
63373+EXPORT_SYMBOL(grsec_enable_blackhole);
63374+#endif
63375+int grsec_lastack_retries;
63376+int grsec_enable_tpe_all;
63377+int grsec_enable_tpe_invert;
63378+int grsec_enable_socket_all;
63379+int grsec_socket_all_gid;
63380+int grsec_enable_socket_client;
63381+int grsec_socket_client_gid;
63382+int grsec_enable_socket_server;
63383+int grsec_socket_server_gid;
63384+int grsec_resource_logging;
63385+int grsec_disable_privio;
63386+int grsec_enable_log_rwxmaps;
63387+int grsec_lock;
63388+
63389+DEFINE_SPINLOCK(grsec_alert_lock);
63390+unsigned long grsec_alert_wtime = 0;
63391+unsigned long grsec_alert_fyet = 0;
63392+
63393+DEFINE_SPINLOCK(grsec_audit_lock);
63394+
63395+DEFINE_RWLOCK(grsec_exec_file_lock);
63396+
63397+char *gr_shared_page[4];
63398+
63399+char *gr_alert_log_fmt;
63400+char *gr_audit_log_fmt;
63401+char *gr_alert_log_buf;
63402+char *gr_audit_log_buf;
63403+
63404+extern struct gr_arg *gr_usermode;
63405+extern unsigned char *gr_system_salt;
63406+extern unsigned char *gr_system_sum;
63407+
63408+void __init
63409+grsecurity_init(void)
63410+{
63411+ int j;
63412+ /* create the per-cpu shared pages */
63413+
63414+#ifdef CONFIG_X86
63415+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
63416+#endif
63417+
63418+ for (j = 0; j < 4; j++) {
63419+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
63420+ if (gr_shared_page[j] == NULL) {
63421+ panic("Unable to allocate grsecurity shared page");
63422+ return;
63423+ }
63424+ }
63425+
63426+ /* allocate log buffers */
63427+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
63428+ if (!gr_alert_log_fmt) {
63429+ panic("Unable to allocate grsecurity alert log format buffer");
63430+ return;
63431+ }
63432+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
63433+ if (!gr_audit_log_fmt) {
63434+ panic("Unable to allocate grsecurity audit log format buffer");
63435+ return;
63436+ }
63437+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63438+ if (!gr_alert_log_buf) {
63439+ panic("Unable to allocate grsecurity alert log buffer");
63440+ return;
63441+ }
63442+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63443+ if (!gr_audit_log_buf) {
63444+ panic("Unable to allocate grsecurity audit log buffer");
63445+ return;
63446+ }
63447+
63448+ /* allocate memory for authentication structure */
63449+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
63450+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
63451+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
63452+
63453+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
63454+ panic("Unable to allocate grsecurity authentication structure");
63455+ return;
63456+ }
63457+
63458+
63459+#ifdef CONFIG_GRKERNSEC_IO
63460+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
63461+ grsec_disable_privio = 1;
63462+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63463+ grsec_disable_privio = 1;
63464+#else
63465+ grsec_disable_privio = 0;
63466+#endif
63467+#endif
63468+
63469+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63470+ /* for backward compatibility, tpe_invert always defaults to on if
63471+ enabled in the kernel
63472+ */
63473+ grsec_enable_tpe_invert = 1;
63474+#endif
63475+
63476+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63477+#ifndef CONFIG_GRKERNSEC_SYSCTL
63478+ grsec_lock = 1;
63479+#endif
63480+
63481+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63482+ grsec_enable_audit_textrel = 1;
63483+#endif
63484+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63485+ grsec_enable_log_rwxmaps = 1;
63486+#endif
63487+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63488+ grsec_enable_group = 1;
63489+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
63490+#endif
63491+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63492+ grsec_enable_chdir = 1;
63493+#endif
63494+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63495+ grsec_enable_harden_ptrace = 1;
63496+#endif
63497+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63498+ grsec_enable_mount = 1;
63499+#endif
63500+#ifdef CONFIG_GRKERNSEC_LINK
63501+ grsec_enable_link = 1;
63502+#endif
63503+#ifdef CONFIG_GRKERNSEC_BRUTE
63504+ grsec_enable_brute = 1;
63505+#endif
63506+#ifdef CONFIG_GRKERNSEC_DMESG
63507+ grsec_enable_dmesg = 1;
63508+#endif
63509+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63510+ grsec_enable_blackhole = 1;
63511+ grsec_lastack_retries = 4;
63512+#endif
63513+#ifdef CONFIG_GRKERNSEC_FIFO
63514+ grsec_enable_fifo = 1;
63515+#endif
63516+#ifdef CONFIG_GRKERNSEC_EXECLOG
63517+ grsec_enable_execlog = 1;
63518+#endif
63519+#ifdef CONFIG_GRKERNSEC_SETXID
63520+ grsec_enable_setxid = 1;
63521+#endif
63522+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63523+ grsec_enable_ptrace_readexec = 1;
63524+#endif
63525+#ifdef CONFIG_GRKERNSEC_SIGNAL
63526+ grsec_enable_signal = 1;
63527+#endif
63528+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63529+ grsec_enable_forkfail = 1;
63530+#endif
63531+#ifdef CONFIG_GRKERNSEC_TIME
63532+ grsec_enable_time = 1;
63533+#endif
63534+#ifdef CONFIG_GRKERNSEC_RESLOG
63535+ grsec_resource_logging = 1;
63536+#endif
63537+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63538+ grsec_enable_chroot_findtask = 1;
63539+#endif
63540+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63541+ grsec_enable_chroot_unix = 1;
63542+#endif
63543+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63544+ grsec_enable_chroot_mount = 1;
63545+#endif
63546+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63547+ grsec_enable_chroot_fchdir = 1;
63548+#endif
63549+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63550+ grsec_enable_chroot_shmat = 1;
63551+#endif
63552+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63553+ grsec_enable_audit_ptrace = 1;
63554+#endif
63555+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63556+ grsec_enable_chroot_double = 1;
63557+#endif
63558+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63559+ grsec_enable_chroot_pivot = 1;
63560+#endif
63561+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63562+ grsec_enable_chroot_chdir = 1;
63563+#endif
63564+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63565+ grsec_enable_chroot_chmod = 1;
63566+#endif
63567+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63568+ grsec_enable_chroot_mknod = 1;
63569+#endif
63570+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63571+ grsec_enable_chroot_nice = 1;
63572+#endif
63573+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63574+ grsec_enable_chroot_execlog = 1;
63575+#endif
63576+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63577+ grsec_enable_chroot_caps = 1;
63578+#endif
63579+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63580+ grsec_enable_chroot_sysctl = 1;
63581+#endif
63582+#ifdef CONFIG_GRKERNSEC_TPE
63583+ grsec_enable_tpe = 1;
63584+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
63585+#ifdef CONFIG_GRKERNSEC_TPE_ALL
63586+ grsec_enable_tpe_all = 1;
63587+#endif
63588+#endif
63589+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63590+ grsec_enable_socket_all = 1;
63591+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
63592+#endif
63593+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63594+ grsec_enable_socket_client = 1;
63595+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
63596+#endif
63597+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63598+ grsec_enable_socket_server = 1;
63599+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
63600+#endif
63601+#endif
63602+
63603+ return;
63604+}
63605diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
63606new file mode 100644
63607index 0000000..3efe141
63608--- /dev/null
63609+++ b/grsecurity/grsec_link.c
63610@@ -0,0 +1,43 @@
63611+#include <linux/kernel.h>
63612+#include <linux/sched.h>
63613+#include <linux/fs.h>
63614+#include <linux/file.h>
63615+#include <linux/grinternal.h>
63616+
63617+int
63618+gr_handle_follow_link(const struct inode *parent,
63619+ const struct inode *inode,
63620+ const struct dentry *dentry, const struct vfsmount *mnt)
63621+{
63622+#ifdef CONFIG_GRKERNSEC_LINK
63623+ const struct cred *cred = current_cred();
63624+
63625+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
63626+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
63627+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
63628+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
63629+ return -EACCES;
63630+ }
63631+#endif
63632+ return 0;
63633+}
63634+
63635+int
63636+gr_handle_hardlink(const struct dentry *dentry,
63637+ const struct vfsmount *mnt,
63638+ struct inode *inode, const int mode, const char *to)
63639+{
63640+#ifdef CONFIG_GRKERNSEC_LINK
63641+ const struct cred *cred = current_cred();
63642+
63643+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
63644+ (!S_ISREG(mode) || (mode & S_ISUID) ||
63645+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
63646+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
63647+ !capable(CAP_FOWNER) && cred->uid) {
63648+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
63649+ return -EPERM;
63650+ }
63651+#endif
63652+ return 0;
63653+}
63654diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
63655new file mode 100644
63656index 0000000..a45d2e9
63657--- /dev/null
63658+++ b/grsecurity/grsec_log.c
63659@@ -0,0 +1,322 @@
63660+#include <linux/kernel.h>
63661+#include <linux/sched.h>
63662+#include <linux/file.h>
63663+#include <linux/tty.h>
63664+#include <linux/fs.h>
63665+#include <linux/grinternal.h>
63666+
63667+#ifdef CONFIG_TREE_PREEMPT_RCU
63668+#define DISABLE_PREEMPT() preempt_disable()
63669+#define ENABLE_PREEMPT() preempt_enable()
63670+#else
63671+#define DISABLE_PREEMPT()
63672+#define ENABLE_PREEMPT()
63673+#endif
63674+
63675+#define BEGIN_LOCKS(x) \
63676+ DISABLE_PREEMPT(); \
63677+ rcu_read_lock(); \
63678+ read_lock(&tasklist_lock); \
63679+ read_lock(&grsec_exec_file_lock); \
63680+ if (x != GR_DO_AUDIT) \
63681+ spin_lock(&grsec_alert_lock); \
63682+ else \
63683+ spin_lock(&grsec_audit_lock)
63684+
63685+#define END_LOCKS(x) \
63686+ if (x != GR_DO_AUDIT) \
63687+ spin_unlock(&grsec_alert_lock); \
63688+ else \
63689+ spin_unlock(&grsec_audit_lock); \
63690+ read_unlock(&grsec_exec_file_lock); \
63691+ read_unlock(&tasklist_lock); \
63692+ rcu_read_unlock(); \
63693+ ENABLE_PREEMPT(); \
63694+ if (x == GR_DONT_AUDIT) \
63695+ gr_handle_alertkill(current)
63696+
63697+enum {
63698+ FLOODING,
63699+ NO_FLOODING
63700+};
63701+
63702+extern char *gr_alert_log_fmt;
63703+extern char *gr_audit_log_fmt;
63704+extern char *gr_alert_log_buf;
63705+extern char *gr_audit_log_buf;
63706+
63707+static int gr_log_start(int audit)
63708+{
63709+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
63710+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
63711+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63712+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
63713+ unsigned long curr_secs = get_seconds();
63714+
63715+ if (audit == GR_DO_AUDIT)
63716+ goto set_fmt;
63717+
63718+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
63719+ grsec_alert_wtime = curr_secs;
63720+ grsec_alert_fyet = 0;
63721+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
63722+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
63723+ grsec_alert_fyet++;
63724+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
63725+ grsec_alert_wtime = curr_secs;
63726+ grsec_alert_fyet++;
63727+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
63728+ return FLOODING;
63729+ }
63730+ else return FLOODING;
63731+
63732+set_fmt:
63733+#endif
63734+ memset(buf, 0, PAGE_SIZE);
63735+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
63736+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
63737+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63738+ } else if (current->signal->curr_ip) {
63739+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
63740+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
63741+ } else if (gr_acl_is_enabled()) {
63742+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
63743+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63744+ } else {
63745+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
63746+ strcpy(buf, fmt);
63747+ }
63748+
63749+ return NO_FLOODING;
63750+}
63751+
63752+static void gr_log_middle(int audit, const char *msg, va_list ap)
63753+ __attribute__ ((format (printf, 2, 0)));
63754+
63755+static void gr_log_middle(int audit, const char *msg, va_list ap)
63756+{
63757+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63758+ unsigned int len = strlen(buf);
63759+
63760+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63761+
63762+ return;
63763+}
63764+
63765+static void gr_log_middle_varargs(int audit, const char *msg, ...)
63766+ __attribute__ ((format (printf, 2, 3)));
63767+
63768+static void gr_log_middle_varargs(int audit, const char *msg, ...)
63769+{
63770+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63771+ unsigned int len = strlen(buf);
63772+ va_list ap;
63773+
63774+ va_start(ap, msg);
63775+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63776+ va_end(ap);
63777+
63778+ return;
63779+}
63780+
63781+static void gr_log_end(int audit, int append_default)
63782+{
63783+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63784+
63785+ if (append_default) {
63786+ unsigned int len = strlen(buf);
63787+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
63788+ }
63789+
63790+ printk("%s\n", buf);
63791+
63792+ return;
63793+}
63794+
63795+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
63796+{
63797+ int logtype;
63798+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
63799+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
63800+ void *voidptr = NULL;
63801+ int num1 = 0, num2 = 0;
63802+ unsigned long ulong1 = 0, ulong2 = 0;
63803+ struct dentry *dentry = NULL;
63804+ struct vfsmount *mnt = NULL;
63805+ struct file *file = NULL;
63806+ struct task_struct *task = NULL;
63807+ const struct cred *cred, *pcred;
63808+ va_list ap;
63809+
63810+ BEGIN_LOCKS(audit);
63811+ logtype = gr_log_start(audit);
63812+ if (logtype == FLOODING) {
63813+ END_LOCKS(audit);
63814+ return;
63815+ }
63816+ va_start(ap, argtypes);
63817+ switch (argtypes) {
63818+ case GR_TTYSNIFF:
63819+ task = va_arg(ap, struct task_struct *);
63820+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
63821+ break;
63822+ case GR_SYSCTL_HIDDEN:
63823+ str1 = va_arg(ap, char *);
63824+ gr_log_middle_varargs(audit, msg, result, str1);
63825+ break;
63826+ case GR_RBAC:
63827+ dentry = va_arg(ap, struct dentry *);
63828+ mnt = va_arg(ap, struct vfsmount *);
63829+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
63830+ break;
63831+ case GR_RBAC_STR:
63832+ dentry = va_arg(ap, struct dentry *);
63833+ mnt = va_arg(ap, struct vfsmount *);
63834+ str1 = va_arg(ap, char *);
63835+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
63836+ break;
63837+ case GR_STR_RBAC:
63838+ str1 = va_arg(ap, char *);
63839+ dentry = va_arg(ap, struct dentry *);
63840+ mnt = va_arg(ap, struct vfsmount *);
63841+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
63842+ break;
63843+ case GR_RBAC_MODE2:
63844+ dentry = va_arg(ap, struct dentry *);
63845+ mnt = va_arg(ap, struct vfsmount *);
63846+ str1 = va_arg(ap, char *);
63847+ str2 = va_arg(ap, char *);
63848+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
63849+ break;
63850+ case GR_RBAC_MODE3:
63851+ dentry = va_arg(ap, struct dentry *);
63852+ mnt = va_arg(ap, struct vfsmount *);
63853+ str1 = va_arg(ap, char *);
63854+ str2 = va_arg(ap, char *);
63855+ str3 = va_arg(ap, char *);
63856+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
63857+ break;
63858+ case GR_FILENAME:
63859+ dentry = va_arg(ap, struct dentry *);
63860+ mnt = va_arg(ap, struct vfsmount *);
63861+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
63862+ break;
63863+ case GR_STR_FILENAME:
63864+ str1 = va_arg(ap, char *);
63865+ dentry = va_arg(ap, struct dentry *);
63866+ mnt = va_arg(ap, struct vfsmount *);
63867+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
63868+ break;
63869+ case GR_FILENAME_STR:
63870+ dentry = va_arg(ap, struct dentry *);
63871+ mnt = va_arg(ap, struct vfsmount *);
63872+ str1 = va_arg(ap, char *);
63873+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
63874+ break;
63875+ case GR_FILENAME_TWO_INT:
63876+ dentry = va_arg(ap, struct dentry *);
63877+ mnt = va_arg(ap, struct vfsmount *);
63878+ num1 = va_arg(ap, int);
63879+ num2 = va_arg(ap, int);
63880+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
63881+ break;
63882+ case GR_FILENAME_TWO_INT_STR:
63883+ dentry = va_arg(ap, struct dentry *);
63884+ mnt = va_arg(ap, struct vfsmount *);
63885+ num1 = va_arg(ap, int);
63886+ num2 = va_arg(ap, int);
63887+ str1 = va_arg(ap, char *);
63888+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
63889+ break;
63890+ case GR_TEXTREL:
63891+ file = va_arg(ap, struct file *);
63892+ ulong1 = va_arg(ap, unsigned long);
63893+ ulong2 = va_arg(ap, unsigned long);
63894+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
63895+ break;
63896+ case GR_PTRACE:
63897+ task = va_arg(ap, struct task_struct *);
63898+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
63899+ break;
63900+ case GR_RESOURCE:
63901+ task = va_arg(ap, struct task_struct *);
63902+ cred = __task_cred(task);
63903+ pcred = __task_cred(task->real_parent);
63904+ ulong1 = va_arg(ap, unsigned long);
63905+ str1 = va_arg(ap, char *);
63906+ ulong2 = va_arg(ap, unsigned long);
63907+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63908+ break;
63909+ case GR_CAP:
63910+ task = va_arg(ap, struct task_struct *);
63911+ cred = __task_cred(task);
63912+ pcred = __task_cred(task->real_parent);
63913+ str1 = va_arg(ap, char *);
63914+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63915+ break;
63916+ case GR_SIG:
63917+ str1 = va_arg(ap, char *);
63918+ voidptr = va_arg(ap, void *);
63919+ gr_log_middle_varargs(audit, msg, str1, voidptr);
63920+ break;
63921+ case GR_SIG2:
63922+ task = va_arg(ap, struct task_struct *);
63923+ cred = __task_cred(task);
63924+ pcred = __task_cred(task->real_parent);
63925+ num1 = va_arg(ap, int);
63926+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63927+ break;
63928+ case GR_CRASH1:
63929+ task = va_arg(ap, struct task_struct *);
63930+ cred = __task_cred(task);
63931+ pcred = __task_cred(task->real_parent);
63932+ ulong1 = va_arg(ap, unsigned long);
63933+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
63934+ break;
63935+ case GR_CRASH2:
63936+ task = va_arg(ap, struct task_struct *);
63937+ cred = __task_cred(task);
63938+ pcred = __task_cred(task->real_parent);
63939+ ulong1 = va_arg(ap, unsigned long);
63940+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
63941+ break;
63942+ case GR_RWXMAP:
63943+ file = va_arg(ap, struct file *);
63944+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
63945+ break;
63946+ case GR_PSACCT:
63947+ {
63948+ unsigned int wday, cday;
63949+ __u8 whr, chr;
63950+ __u8 wmin, cmin;
63951+ __u8 wsec, csec;
63952+ char cur_tty[64] = { 0 };
63953+ char parent_tty[64] = { 0 };
63954+
63955+ task = va_arg(ap, struct task_struct *);
63956+ wday = va_arg(ap, unsigned int);
63957+ cday = va_arg(ap, unsigned int);
63958+ whr = va_arg(ap, int);
63959+ chr = va_arg(ap, int);
63960+ wmin = va_arg(ap, int);
63961+ cmin = va_arg(ap, int);
63962+ wsec = va_arg(ap, int);
63963+ csec = va_arg(ap, int);
63964+ ulong1 = va_arg(ap, unsigned long);
63965+ cred = __task_cred(task);
63966+ pcred = __task_cred(task->real_parent);
63967+
63968+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63969+ }
63970+ break;
63971+ default:
63972+ gr_log_middle(audit, msg, ap);
63973+ }
63974+ va_end(ap);
63975+ // these don't need DEFAULTSECARGS printed on the end
63976+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
63977+ gr_log_end(audit, 0);
63978+ else
63979+ gr_log_end(audit, 1);
63980+ END_LOCKS(audit);
63981+}
63982diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
63983new file mode 100644
63984index 0000000..f536303
63985--- /dev/null
63986+++ b/grsecurity/grsec_mem.c
63987@@ -0,0 +1,40 @@
63988+#include <linux/kernel.h>
63989+#include <linux/sched.h>
63990+#include <linux/mm.h>
63991+#include <linux/mman.h>
63992+#include <linux/grinternal.h>
63993+
63994+void
63995+gr_handle_ioperm(void)
63996+{
63997+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
63998+ return;
63999+}
64000+
64001+void
64002+gr_handle_iopl(void)
64003+{
64004+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
64005+ return;
64006+}
64007+
64008+void
64009+gr_handle_mem_readwrite(u64 from, u64 to)
64010+{
64011+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
64012+ return;
64013+}
64014+
64015+void
64016+gr_handle_vm86(void)
64017+{
64018+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
64019+ return;
64020+}
64021+
64022+void
64023+gr_log_badprocpid(const char *entry)
64024+{
64025+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
64026+ return;
64027+}
64028diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
64029new file mode 100644
64030index 0000000..2131422
64031--- /dev/null
64032+++ b/grsecurity/grsec_mount.c
64033@@ -0,0 +1,62 @@
64034+#include <linux/kernel.h>
64035+#include <linux/sched.h>
64036+#include <linux/mount.h>
64037+#include <linux/grsecurity.h>
64038+#include <linux/grinternal.h>
64039+
64040+void
64041+gr_log_remount(const char *devname, const int retval)
64042+{
64043+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64044+ if (grsec_enable_mount && (retval >= 0))
64045+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
64046+#endif
64047+ return;
64048+}
64049+
64050+void
64051+gr_log_unmount(const char *devname, const int retval)
64052+{
64053+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64054+ if (grsec_enable_mount && (retval >= 0))
64055+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
64056+#endif
64057+ return;
64058+}
64059+
64060+void
64061+gr_log_mount(const char *from, const char *to, const int retval)
64062+{
64063+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64064+ if (grsec_enable_mount && (retval >= 0))
64065+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
64066+#endif
64067+ return;
64068+}
64069+
64070+int
64071+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
64072+{
64073+#ifdef CONFIG_GRKERNSEC_ROFS
64074+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
64075+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
64076+ return -EPERM;
64077+ } else
64078+ return 0;
64079+#endif
64080+ return 0;
64081+}
64082+
64083+int
64084+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
64085+{
64086+#ifdef CONFIG_GRKERNSEC_ROFS
64087+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
64088+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
64089+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
64090+ return -EPERM;
64091+ } else
64092+ return 0;
64093+#endif
64094+ return 0;
64095+}
64096diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
64097new file mode 100644
64098index 0000000..a3b12a0
64099--- /dev/null
64100+++ b/grsecurity/grsec_pax.c
64101@@ -0,0 +1,36 @@
64102+#include <linux/kernel.h>
64103+#include <linux/sched.h>
64104+#include <linux/mm.h>
64105+#include <linux/file.h>
64106+#include <linux/grinternal.h>
64107+#include <linux/grsecurity.h>
64108+
64109+void
64110+gr_log_textrel(struct vm_area_struct * vma)
64111+{
64112+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64113+ if (grsec_enable_audit_textrel)
64114+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
64115+#endif
64116+ return;
64117+}
64118+
64119+void
64120+gr_log_rwxmmap(struct file *file)
64121+{
64122+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64123+ if (grsec_enable_log_rwxmaps)
64124+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
64125+#endif
64126+ return;
64127+}
64128+
64129+void
64130+gr_log_rwxmprotect(struct file *file)
64131+{
64132+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64133+ if (grsec_enable_log_rwxmaps)
64134+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
64135+#endif
64136+ return;
64137+}
64138diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
64139new file mode 100644
64140index 0000000..78f8733
64141--- /dev/null
64142+++ b/grsecurity/grsec_ptrace.c
64143@@ -0,0 +1,30 @@
64144+#include <linux/kernel.h>
64145+#include <linux/sched.h>
64146+#include <linux/grinternal.h>
64147+#include <linux/security.h>
64148+
64149+void
64150+gr_audit_ptrace(struct task_struct *task)
64151+{
64152+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64153+ if (grsec_enable_audit_ptrace)
64154+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
64155+#endif
64156+ return;
64157+}
64158+
64159+int
64160+gr_ptrace_readexec(struct file *file, int unsafe_flags)
64161+{
64162+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64163+ const struct dentry *dentry = file->f_path.dentry;
64164+ const struct vfsmount *mnt = file->f_path.mnt;
64165+
64166+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
64167+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
64168+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
64169+ return -EACCES;
64170+ }
64171+#endif
64172+ return 0;
64173+}
64174diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
64175new file mode 100644
64176index 0000000..c648492
64177--- /dev/null
64178+++ b/grsecurity/grsec_sig.c
64179@@ -0,0 +1,206 @@
64180+#include <linux/kernel.h>
64181+#include <linux/sched.h>
64182+#include <linux/delay.h>
64183+#include <linux/grsecurity.h>
64184+#include <linux/grinternal.h>
64185+#include <linux/hardirq.h>
64186+
64187+char *signames[] = {
64188+ [SIGSEGV] = "Segmentation fault",
64189+ [SIGILL] = "Illegal instruction",
64190+ [SIGABRT] = "Abort",
64191+ [SIGBUS] = "Invalid alignment/Bus error"
64192+};
64193+
64194+void
64195+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
64196+{
64197+#ifdef CONFIG_GRKERNSEC_SIGNAL
64198+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
64199+ (sig == SIGABRT) || (sig == SIGBUS))) {
64200+ if (t->pid == current->pid) {
64201+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
64202+ } else {
64203+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
64204+ }
64205+ }
64206+#endif
64207+ return;
64208+}
64209+
64210+int
64211+gr_handle_signal(const struct task_struct *p, const int sig)
64212+{
64213+#ifdef CONFIG_GRKERNSEC
64214+ /* ignore the 0 signal for protected task checks */
64215+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
64216+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
64217+ return -EPERM;
64218+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
64219+ return -EPERM;
64220+ }
64221+#endif
64222+ return 0;
64223+}
64224+
64225+#ifdef CONFIG_GRKERNSEC
64226+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
64227+
64228+int gr_fake_force_sig(int sig, struct task_struct *t)
64229+{
64230+ unsigned long int flags;
64231+ int ret, blocked, ignored;
64232+ struct k_sigaction *action;
64233+
64234+ spin_lock_irqsave(&t->sighand->siglock, flags);
64235+ action = &t->sighand->action[sig-1];
64236+ ignored = action->sa.sa_handler == SIG_IGN;
64237+ blocked = sigismember(&t->blocked, sig);
64238+ if (blocked || ignored) {
64239+ action->sa.sa_handler = SIG_DFL;
64240+ if (blocked) {
64241+ sigdelset(&t->blocked, sig);
64242+ recalc_sigpending_and_wake(t);
64243+ }
64244+ }
64245+ if (action->sa.sa_handler == SIG_DFL)
64246+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
64247+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
64248+
64249+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
64250+
64251+ return ret;
64252+}
64253+#endif
64254+
64255+#ifdef CONFIG_GRKERNSEC_BRUTE
64256+#define GR_USER_BAN_TIME (15 * 60)
64257+
64258+static int __get_dumpable(unsigned long mm_flags)
64259+{
64260+ int ret;
64261+
64262+ ret = mm_flags & MMF_DUMPABLE_MASK;
64263+ return (ret >= 2) ? 2 : ret;
64264+}
64265+#endif
64266+
64267+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
64268+{
64269+#ifdef CONFIG_GRKERNSEC_BRUTE
64270+ uid_t uid = 0;
64271+
64272+ if (!grsec_enable_brute)
64273+ return;
64274+
64275+ rcu_read_lock();
64276+ read_lock(&tasklist_lock);
64277+ read_lock(&grsec_exec_file_lock);
64278+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
64279+ p->real_parent->brute = 1;
64280+ else {
64281+ const struct cred *cred = __task_cred(p), *cred2;
64282+ struct task_struct *tsk, *tsk2;
64283+
64284+ if (!__get_dumpable(mm_flags) && cred->uid) {
64285+ struct user_struct *user;
64286+
64287+ uid = cred->uid;
64288+
64289+ /* this is put upon execution past expiration */
64290+ user = find_user(uid);
64291+ if (user == NULL)
64292+ goto unlock;
64293+ user->banned = 1;
64294+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
64295+ if (user->ban_expires == ~0UL)
64296+ user->ban_expires--;
64297+
64298+ do_each_thread(tsk2, tsk) {
64299+ cred2 = __task_cred(tsk);
64300+ if (tsk != p && cred2->uid == uid)
64301+ gr_fake_force_sig(SIGKILL, tsk);
64302+ } while_each_thread(tsk2, tsk);
64303+ }
64304+ }
64305+unlock:
64306+ read_unlock(&grsec_exec_file_lock);
64307+ read_unlock(&tasklist_lock);
64308+ rcu_read_unlock();
64309+
64310+ if (uid)
64311+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
64312+#endif
64313+ return;
64314+}
64315+
64316+void gr_handle_brute_check(void)
64317+{
64318+#ifdef CONFIG_GRKERNSEC_BRUTE
64319+ if (current->brute)
64320+ msleep(30 * 1000);
64321+#endif
64322+ return;
64323+}
64324+
64325+void gr_handle_kernel_exploit(void)
64326+{
64327+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
64328+ const struct cred *cred;
64329+ struct task_struct *tsk, *tsk2;
64330+ struct user_struct *user;
64331+ uid_t uid;
64332+
64333+ if (in_irq() || in_serving_softirq() || in_nmi())
64334+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
64335+
64336+ uid = current_uid();
64337+
64338+ if (uid == 0)
64339+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
64340+ else {
64341+ /* kill all the processes of this user, hold a reference
64342+ to their creds struct, and prevent them from creating
64343+ another process until system reset
64344+ */
64345+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
64346+ /* we intentionally leak this ref */
64347+ user = get_uid(current->cred->user);
64348+ if (user) {
64349+ user->banned = 1;
64350+ user->ban_expires = ~0UL;
64351+ }
64352+
64353+ read_lock(&tasklist_lock);
64354+ do_each_thread(tsk2, tsk) {
64355+ cred = __task_cred(tsk);
64356+ if (cred->uid == uid)
64357+ gr_fake_force_sig(SIGKILL, tsk);
64358+ } while_each_thread(tsk2, tsk);
64359+ read_unlock(&tasklist_lock);
64360+ }
64361+#endif
64362+}
64363+
64364+int __gr_process_user_ban(struct user_struct *user)
64365+{
64366+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64367+ if (unlikely(user->banned)) {
64368+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
64369+ user->banned = 0;
64370+ user->ban_expires = 0;
64371+ free_uid(user);
64372+ } else
64373+ return -EPERM;
64374+ }
64375+#endif
64376+ return 0;
64377+}
64378+
64379+int gr_process_user_ban(void)
64380+{
64381+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64382+ return __gr_process_user_ban(current->cred->user);
64383+#endif
64384+ return 0;
64385+}
64386diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
64387new file mode 100644
64388index 0000000..7512ea9
64389--- /dev/null
64390+++ b/grsecurity/grsec_sock.c
64391@@ -0,0 +1,275 @@
64392+#include <linux/kernel.h>
64393+#include <linux/module.h>
64394+#include <linux/sched.h>
64395+#include <linux/file.h>
64396+#include <linux/net.h>
64397+#include <linux/in.h>
64398+#include <linux/ip.h>
64399+#include <net/sock.h>
64400+#include <net/inet_sock.h>
64401+#include <linux/grsecurity.h>
64402+#include <linux/grinternal.h>
64403+#include <linux/gracl.h>
64404+
64405+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
64406+EXPORT_SYMBOL(gr_cap_rtnetlink);
64407+
64408+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
64409+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
64410+
64411+EXPORT_SYMBOL(gr_search_udp_recvmsg);
64412+EXPORT_SYMBOL(gr_search_udp_sendmsg);
64413+
64414+#ifdef CONFIG_UNIX_MODULE
64415+EXPORT_SYMBOL(gr_acl_handle_unix);
64416+EXPORT_SYMBOL(gr_acl_handle_mknod);
64417+EXPORT_SYMBOL(gr_handle_chroot_unix);
64418+EXPORT_SYMBOL(gr_handle_create);
64419+#endif
64420+
64421+#ifdef CONFIG_GRKERNSEC
64422+#define gr_conn_table_size 32749
64423+struct conn_table_entry {
64424+ struct conn_table_entry *next;
64425+ struct signal_struct *sig;
64426+};
64427+
64428+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
64429+DEFINE_SPINLOCK(gr_conn_table_lock);
64430+
64431+extern const char * gr_socktype_to_name(unsigned char type);
64432+extern const char * gr_proto_to_name(unsigned char proto);
64433+extern const char * gr_sockfamily_to_name(unsigned char family);
64434+
64435+static __inline__ int
64436+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
64437+{
64438+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
64439+}
64440+
64441+static __inline__ int
64442+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
64443+ __u16 sport, __u16 dport)
64444+{
64445+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
64446+ sig->gr_sport == sport && sig->gr_dport == dport))
64447+ return 1;
64448+ else
64449+ return 0;
64450+}
64451+
64452+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
64453+{
64454+ struct conn_table_entry **match;
64455+ unsigned int index;
64456+
64457+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64458+ sig->gr_sport, sig->gr_dport,
64459+ gr_conn_table_size);
64460+
64461+ newent->sig = sig;
64462+
64463+ match = &gr_conn_table[index];
64464+ newent->next = *match;
64465+ *match = newent;
64466+
64467+ return;
64468+}
64469+
64470+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
64471+{
64472+ struct conn_table_entry *match, *last = NULL;
64473+ unsigned int index;
64474+
64475+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64476+ sig->gr_sport, sig->gr_dport,
64477+ gr_conn_table_size);
64478+
64479+ match = gr_conn_table[index];
64480+ while (match && !conn_match(match->sig,
64481+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
64482+ sig->gr_dport)) {
64483+ last = match;
64484+ match = match->next;
64485+ }
64486+
64487+ if (match) {
64488+ if (last)
64489+ last->next = match->next;
64490+ else
64491+ gr_conn_table[index] = NULL;
64492+ kfree(match);
64493+ }
64494+
64495+ return;
64496+}
64497+
64498+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
64499+ __u16 sport, __u16 dport)
64500+{
64501+ struct conn_table_entry *match;
64502+ unsigned int index;
64503+
64504+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
64505+
64506+ match = gr_conn_table[index];
64507+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
64508+ match = match->next;
64509+
64510+ if (match)
64511+ return match->sig;
64512+ else
64513+ return NULL;
64514+}
64515+
64516+#endif
64517+
64518+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
64519+{
64520+#ifdef CONFIG_GRKERNSEC
64521+ struct signal_struct *sig = task->signal;
64522+ struct conn_table_entry *newent;
64523+
64524+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
64525+ if (newent == NULL)
64526+ return;
64527+ /* no bh lock needed since we are called with bh disabled */
64528+ spin_lock(&gr_conn_table_lock);
64529+ gr_del_task_from_ip_table_nolock(sig);
64530+ sig->gr_saddr = inet->rcv_saddr;
64531+ sig->gr_daddr = inet->daddr;
64532+ sig->gr_sport = inet->sport;
64533+ sig->gr_dport = inet->dport;
64534+ gr_add_to_task_ip_table_nolock(sig, newent);
64535+ spin_unlock(&gr_conn_table_lock);
64536+#endif
64537+ return;
64538+}
64539+
64540+void gr_del_task_from_ip_table(struct task_struct *task)
64541+{
64542+#ifdef CONFIG_GRKERNSEC
64543+ spin_lock_bh(&gr_conn_table_lock);
64544+ gr_del_task_from_ip_table_nolock(task->signal);
64545+ spin_unlock_bh(&gr_conn_table_lock);
64546+#endif
64547+ return;
64548+}
64549+
64550+void
64551+gr_attach_curr_ip(const struct sock *sk)
64552+{
64553+#ifdef CONFIG_GRKERNSEC
64554+ struct signal_struct *p, *set;
64555+ const struct inet_sock *inet = inet_sk(sk);
64556+
64557+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
64558+ return;
64559+
64560+ set = current->signal;
64561+
64562+ spin_lock_bh(&gr_conn_table_lock);
64563+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
64564+ inet->dport, inet->sport);
64565+ if (unlikely(p != NULL)) {
64566+ set->curr_ip = p->curr_ip;
64567+ set->used_accept = 1;
64568+ gr_del_task_from_ip_table_nolock(p);
64569+ spin_unlock_bh(&gr_conn_table_lock);
64570+ return;
64571+ }
64572+ spin_unlock_bh(&gr_conn_table_lock);
64573+
64574+ set->curr_ip = inet->daddr;
64575+ set->used_accept = 1;
64576+#endif
64577+ return;
64578+}
64579+
64580+int
64581+gr_handle_sock_all(const int family, const int type, const int protocol)
64582+{
64583+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64584+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
64585+ (family != AF_UNIX)) {
64586+ if (family == AF_INET)
64587+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
64588+ else
64589+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
64590+ return -EACCES;
64591+ }
64592+#endif
64593+ return 0;
64594+}
64595+
64596+int
64597+gr_handle_sock_server(const struct sockaddr *sck)
64598+{
64599+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64600+ if (grsec_enable_socket_server &&
64601+ in_group_p(grsec_socket_server_gid) &&
64602+ sck && (sck->sa_family != AF_UNIX) &&
64603+ (sck->sa_family != AF_LOCAL)) {
64604+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64605+ return -EACCES;
64606+ }
64607+#endif
64608+ return 0;
64609+}
64610+
64611+int
64612+gr_handle_sock_server_other(const struct sock *sck)
64613+{
64614+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64615+ if (grsec_enable_socket_server &&
64616+ in_group_p(grsec_socket_server_gid) &&
64617+ sck && (sck->sk_family != AF_UNIX) &&
64618+ (sck->sk_family != AF_LOCAL)) {
64619+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64620+ return -EACCES;
64621+ }
64622+#endif
64623+ return 0;
64624+}
64625+
64626+int
64627+gr_handle_sock_client(const struct sockaddr *sck)
64628+{
64629+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64630+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
64631+ sck && (sck->sa_family != AF_UNIX) &&
64632+ (sck->sa_family != AF_LOCAL)) {
64633+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
64634+ return -EACCES;
64635+ }
64636+#endif
64637+ return 0;
64638+}
64639+
64640+kernel_cap_t
64641+gr_cap_rtnetlink(struct sock *sock)
64642+{
64643+#ifdef CONFIG_GRKERNSEC
64644+ if (!gr_acl_is_enabled())
64645+ return current_cap();
64646+ else if (sock->sk_protocol == NETLINK_ISCSI &&
64647+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
64648+ gr_is_capable(CAP_SYS_ADMIN))
64649+ return current_cap();
64650+ else if (sock->sk_protocol == NETLINK_AUDIT &&
64651+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
64652+ gr_is_capable(CAP_AUDIT_WRITE) &&
64653+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
64654+ gr_is_capable(CAP_AUDIT_CONTROL))
64655+ return current_cap();
64656+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
64657+ ((sock->sk_protocol == NETLINK_ROUTE) ?
64658+ gr_is_capable_nolog(CAP_NET_ADMIN) :
64659+ gr_is_capable(CAP_NET_ADMIN)))
64660+ return current_cap();
64661+ else
64662+ return __cap_empty_set;
64663+#else
64664+ return current_cap();
64665+#endif
64666+}
64667diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
64668new file mode 100644
64669index 0000000..31f3258
64670--- /dev/null
64671+++ b/grsecurity/grsec_sysctl.c
64672@@ -0,0 +1,499 @@
64673+#include <linux/kernel.h>
64674+#include <linux/sched.h>
64675+#include <linux/sysctl.h>
64676+#include <linux/grsecurity.h>
64677+#include <linux/grinternal.h>
64678+
64679+int
64680+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
64681+{
64682+#ifdef CONFIG_GRKERNSEC_SYSCTL
64683+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
64684+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
64685+ return -EACCES;
64686+ }
64687+#endif
64688+ return 0;
64689+}
64690+
64691+#ifdef CONFIG_GRKERNSEC_ROFS
64692+static int __maybe_unused one = 1;
64693+#endif
64694+
64695+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64696+ctl_table grsecurity_table[] = {
64697+#ifdef CONFIG_GRKERNSEC_SYSCTL
64698+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
64699+#ifdef CONFIG_GRKERNSEC_IO
64700+ {
64701+ .ctl_name = CTL_UNNUMBERED,
64702+ .procname = "disable_priv_io",
64703+ .data = &grsec_disable_privio,
64704+ .maxlen = sizeof(int),
64705+ .mode = 0600,
64706+ .proc_handler = &proc_dointvec,
64707+ },
64708+#endif
64709+#endif
64710+#ifdef CONFIG_GRKERNSEC_LINK
64711+ {
64712+ .ctl_name = CTL_UNNUMBERED,
64713+ .procname = "linking_restrictions",
64714+ .data = &grsec_enable_link,
64715+ .maxlen = sizeof(int),
64716+ .mode = 0600,
64717+ .proc_handler = &proc_dointvec,
64718+ },
64719+#endif
64720+#ifdef CONFIG_GRKERNSEC_BRUTE
64721+ {
64722+ .ctl_name = CTL_UNNUMBERED,
64723+ .procname = "deter_bruteforce",
64724+ .data = &grsec_enable_brute,
64725+ .maxlen = sizeof(int),
64726+ .mode = 0600,
64727+ .proc_handler = &proc_dointvec,
64728+ },
64729+#endif
64730+#ifdef CONFIG_GRKERNSEC_FIFO
64731+ {
64732+ .ctl_name = CTL_UNNUMBERED,
64733+ .procname = "fifo_restrictions",
64734+ .data = &grsec_enable_fifo,
64735+ .maxlen = sizeof(int),
64736+ .mode = 0600,
64737+ .proc_handler = &proc_dointvec,
64738+ },
64739+#endif
64740+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64741+ {
64742+ .ctl_name = CTL_UNNUMBERED,
64743+ .procname = "ptrace_readexec",
64744+ .data = &grsec_enable_ptrace_readexec,
64745+ .maxlen = sizeof(int),
64746+ .mode = 0600,
64747+ .proc_handler = &proc_dointvec,
64748+ },
64749+#endif
64750+#ifdef CONFIG_GRKERNSEC_SETXID
64751+ {
64752+ .ctl_name = CTL_UNNUMBERED,
64753+ .procname = "consistent_setxid",
64754+ .data = &grsec_enable_setxid,
64755+ .maxlen = sizeof(int),
64756+ .mode = 0600,
64757+ .proc_handler = &proc_dointvec,
64758+ },
64759+#endif
64760+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64761+ {
64762+ .ctl_name = CTL_UNNUMBERED,
64763+ .procname = "ip_blackhole",
64764+ .data = &grsec_enable_blackhole,
64765+ .maxlen = sizeof(int),
64766+ .mode = 0600,
64767+ .proc_handler = &proc_dointvec,
64768+ },
64769+ {
64770+ .ctl_name = CTL_UNNUMBERED,
64771+ .procname = "lastack_retries",
64772+ .data = &grsec_lastack_retries,
64773+ .maxlen = sizeof(int),
64774+ .mode = 0600,
64775+ .proc_handler = &proc_dointvec,
64776+ },
64777+#endif
64778+#ifdef CONFIG_GRKERNSEC_EXECLOG
64779+ {
64780+ .ctl_name = CTL_UNNUMBERED,
64781+ .procname = "exec_logging",
64782+ .data = &grsec_enable_execlog,
64783+ .maxlen = sizeof(int),
64784+ .mode = 0600,
64785+ .proc_handler = &proc_dointvec,
64786+ },
64787+#endif
64788+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64789+ {
64790+ .ctl_name = CTL_UNNUMBERED,
64791+ .procname = "rwxmap_logging",
64792+ .data = &grsec_enable_log_rwxmaps,
64793+ .maxlen = sizeof(int),
64794+ .mode = 0600,
64795+ .proc_handler = &proc_dointvec,
64796+ },
64797+#endif
64798+#ifdef CONFIG_GRKERNSEC_SIGNAL
64799+ {
64800+ .ctl_name = CTL_UNNUMBERED,
64801+ .procname = "signal_logging",
64802+ .data = &grsec_enable_signal,
64803+ .maxlen = sizeof(int),
64804+ .mode = 0600,
64805+ .proc_handler = &proc_dointvec,
64806+ },
64807+#endif
64808+#ifdef CONFIG_GRKERNSEC_FORKFAIL
64809+ {
64810+ .ctl_name = CTL_UNNUMBERED,
64811+ .procname = "forkfail_logging",
64812+ .data = &grsec_enable_forkfail,
64813+ .maxlen = sizeof(int),
64814+ .mode = 0600,
64815+ .proc_handler = &proc_dointvec,
64816+ },
64817+#endif
64818+#ifdef CONFIG_GRKERNSEC_TIME
64819+ {
64820+ .ctl_name = CTL_UNNUMBERED,
64821+ .procname = "timechange_logging",
64822+ .data = &grsec_enable_time,
64823+ .maxlen = sizeof(int),
64824+ .mode = 0600,
64825+ .proc_handler = &proc_dointvec,
64826+ },
64827+#endif
64828+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64829+ {
64830+ .ctl_name = CTL_UNNUMBERED,
64831+ .procname = "chroot_deny_shmat",
64832+ .data = &grsec_enable_chroot_shmat,
64833+ .maxlen = sizeof(int),
64834+ .mode = 0600,
64835+ .proc_handler = &proc_dointvec,
64836+ },
64837+#endif
64838+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64839+ {
64840+ .ctl_name = CTL_UNNUMBERED,
64841+ .procname = "chroot_deny_unix",
64842+ .data = &grsec_enable_chroot_unix,
64843+ .maxlen = sizeof(int),
64844+ .mode = 0600,
64845+ .proc_handler = &proc_dointvec,
64846+ },
64847+#endif
64848+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64849+ {
64850+ .ctl_name = CTL_UNNUMBERED,
64851+ .procname = "chroot_deny_mount",
64852+ .data = &grsec_enable_chroot_mount,
64853+ .maxlen = sizeof(int),
64854+ .mode = 0600,
64855+ .proc_handler = &proc_dointvec,
64856+ },
64857+#endif
64858+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64859+ {
64860+ .ctl_name = CTL_UNNUMBERED,
64861+ .procname = "chroot_deny_fchdir",
64862+ .data = &grsec_enable_chroot_fchdir,
64863+ .maxlen = sizeof(int),
64864+ .mode = 0600,
64865+ .proc_handler = &proc_dointvec,
64866+ },
64867+#endif
64868+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64869+ {
64870+ .ctl_name = CTL_UNNUMBERED,
64871+ .procname = "chroot_deny_chroot",
64872+ .data = &grsec_enable_chroot_double,
64873+ .maxlen = sizeof(int),
64874+ .mode = 0600,
64875+ .proc_handler = &proc_dointvec,
64876+ },
64877+#endif
64878+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64879+ {
64880+ .ctl_name = CTL_UNNUMBERED,
64881+ .procname = "chroot_deny_pivot",
64882+ .data = &grsec_enable_chroot_pivot,
64883+ .maxlen = sizeof(int),
64884+ .mode = 0600,
64885+ .proc_handler = &proc_dointvec,
64886+ },
64887+#endif
64888+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64889+ {
64890+ .ctl_name = CTL_UNNUMBERED,
64891+ .procname = "chroot_enforce_chdir",
64892+ .data = &grsec_enable_chroot_chdir,
64893+ .maxlen = sizeof(int),
64894+ .mode = 0600,
64895+ .proc_handler = &proc_dointvec,
64896+ },
64897+#endif
64898+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64899+ {
64900+ .ctl_name = CTL_UNNUMBERED,
64901+ .procname = "chroot_deny_chmod",
64902+ .data = &grsec_enable_chroot_chmod,
64903+ .maxlen = sizeof(int),
64904+ .mode = 0600,
64905+ .proc_handler = &proc_dointvec,
64906+ },
64907+#endif
64908+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64909+ {
64910+ .ctl_name = CTL_UNNUMBERED,
64911+ .procname = "chroot_deny_mknod",
64912+ .data = &grsec_enable_chroot_mknod,
64913+ .maxlen = sizeof(int),
64914+ .mode = 0600,
64915+ .proc_handler = &proc_dointvec,
64916+ },
64917+#endif
64918+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64919+ {
64920+ .ctl_name = CTL_UNNUMBERED,
64921+ .procname = "chroot_restrict_nice",
64922+ .data = &grsec_enable_chroot_nice,
64923+ .maxlen = sizeof(int),
64924+ .mode = 0600,
64925+ .proc_handler = &proc_dointvec,
64926+ },
64927+#endif
64928+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64929+ {
64930+ .ctl_name = CTL_UNNUMBERED,
64931+ .procname = "chroot_execlog",
64932+ .data = &grsec_enable_chroot_execlog,
64933+ .maxlen = sizeof(int),
64934+ .mode = 0600,
64935+ .proc_handler = &proc_dointvec,
64936+ },
64937+#endif
64938+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64939+ {
64940+ .ctl_name = CTL_UNNUMBERED,
64941+ .procname = "chroot_caps",
64942+ .data = &grsec_enable_chroot_caps,
64943+ .maxlen = sizeof(int),
64944+ .mode = 0600,
64945+ .proc_handler = &proc_dointvec,
64946+ },
64947+#endif
64948+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64949+ {
64950+ .ctl_name = CTL_UNNUMBERED,
64951+ .procname = "chroot_deny_sysctl",
64952+ .data = &grsec_enable_chroot_sysctl,
64953+ .maxlen = sizeof(int),
64954+ .mode = 0600,
64955+ .proc_handler = &proc_dointvec,
64956+ },
64957+#endif
64958+#ifdef CONFIG_GRKERNSEC_TPE
64959+ {
64960+ .ctl_name = CTL_UNNUMBERED,
64961+ .procname = "tpe",
64962+ .data = &grsec_enable_tpe,
64963+ .maxlen = sizeof(int),
64964+ .mode = 0600,
64965+ .proc_handler = &proc_dointvec,
64966+ },
64967+ {
64968+ .ctl_name = CTL_UNNUMBERED,
64969+ .procname = "tpe_gid",
64970+ .data = &grsec_tpe_gid,
64971+ .maxlen = sizeof(int),
64972+ .mode = 0600,
64973+ .proc_handler = &proc_dointvec,
64974+ },
64975+#endif
64976+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64977+ {
64978+ .ctl_name = CTL_UNNUMBERED,
64979+ .procname = "tpe_invert",
64980+ .data = &grsec_enable_tpe_invert,
64981+ .maxlen = sizeof(int),
64982+ .mode = 0600,
64983+ .proc_handler = &proc_dointvec,
64984+ },
64985+#endif
64986+#ifdef CONFIG_GRKERNSEC_TPE_ALL
64987+ {
64988+ .ctl_name = CTL_UNNUMBERED,
64989+ .procname = "tpe_restrict_all",
64990+ .data = &grsec_enable_tpe_all,
64991+ .maxlen = sizeof(int),
64992+ .mode = 0600,
64993+ .proc_handler = &proc_dointvec,
64994+ },
64995+#endif
64996+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64997+ {
64998+ .ctl_name = CTL_UNNUMBERED,
64999+ .procname = "socket_all",
65000+ .data = &grsec_enable_socket_all,
65001+ .maxlen = sizeof(int),
65002+ .mode = 0600,
65003+ .proc_handler = &proc_dointvec,
65004+ },
65005+ {
65006+ .ctl_name = CTL_UNNUMBERED,
65007+ .procname = "socket_all_gid",
65008+ .data = &grsec_socket_all_gid,
65009+ .maxlen = sizeof(int),
65010+ .mode = 0600,
65011+ .proc_handler = &proc_dointvec,
65012+ },
65013+#endif
65014+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65015+ {
65016+ .ctl_name = CTL_UNNUMBERED,
65017+ .procname = "socket_client",
65018+ .data = &grsec_enable_socket_client,
65019+ .maxlen = sizeof(int),
65020+ .mode = 0600,
65021+ .proc_handler = &proc_dointvec,
65022+ },
65023+ {
65024+ .ctl_name = CTL_UNNUMBERED,
65025+ .procname = "socket_client_gid",
65026+ .data = &grsec_socket_client_gid,
65027+ .maxlen = sizeof(int),
65028+ .mode = 0600,
65029+ .proc_handler = &proc_dointvec,
65030+ },
65031+#endif
65032+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65033+ {
65034+ .ctl_name = CTL_UNNUMBERED,
65035+ .procname = "socket_server",
65036+ .data = &grsec_enable_socket_server,
65037+ .maxlen = sizeof(int),
65038+ .mode = 0600,
65039+ .proc_handler = &proc_dointvec,
65040+ },
65041+ {
65042+ .ctl_name = CTL_UNNUMBERED,
65043+ .procname = "socket_server_gid",
65044+ .data = &grsec_socket_server_gid,
65045+ .maxlen = sizeof(int),
65046+ .mode = 0600,
65047+ .proc_handler = &proc_dointvec,
65048+ },
65049+#endif
65050+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65051+ {
65052+ .ctl_name = CTL_UNNUMBERED,
65053+ .procname = "audit_group",
65054+ .data = &grsec_enable_group,
65055+ .maxlen = sizeof(int),
65056+ .mode = 0600,
65057+ .proc_handler = &proc_dointvec,
65058+ },
65059+ {
65060+ .ctl_name = CTL_UNNUMBERED,
65061+ .procname = "audit_gid",
65062+ .data = &grsec_audit_gid,
65063+ .maxlen = sizeof(int),
65064+ .mode = 0600,
65065+ .proc_handler = &proc_dointvec,
65066+ },
65067+#endif
65068+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65069+ {
65070+ .ctl_name = CTL_UNNUMBERED,
65071+ .procname = "audit_chdir",
65072+ .data = &grsec_enable_chdir,
65073+ .maxlen = sizeof(int),
65074+ .mode = 0600,
65075+ .proc_handler = &proc_dointvec,
65076+ },
65077+#endif
65078+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65079+ {
65080+ .ctl_name = CTL_UNNUMBERED,
65081+ .procname = "audit_mount",
65082+ .data = &grsec_enable_mount,
65083+ .maxlen = sizeof(int),
65084+ .mode = 0600,
65085+ .proc_handler = &proc_dointvec,
65086+ },
65087+#endif
65088+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65089+ {
65090+ .ctl_name = CTL_UNNUMBERED,
65091+ .procname = "audit_textrel",
65092+ .data = &grsec_enable_audit_textrel,
65093+ .maxlen = sizeof(int),
65094+ .mode = 0600,
65095+ .proc_handler = &proc_dointvec,
65096+ },
65097+#endif
65098+#ifdef CONFIG_GRKERNSEC_DMESG
65099+ {
65100+ .ctl_name = CTL_UNNUMBERED,
65101+ .procname = "dmesg",
65102+ .data = &grsec_enable_dmesg,
65103+ .maxlen = sizeof(int),
65104+ .mode = 0600,
65105+ .proc_handler = &proc_dointvec,
65106+ },
65107+#endif
65108+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65109+ {
65110+ .ctl_name = CTL_UNNUMBERED,
65111+ .procname = "chroot_findtask",
65112+ .data = &grsec_enable_chroot_findtask,
65113+ .maxlen = sizeof(int),
65114+ .mode = 0600,
65115+ .proc_handler = &proc_dointvec,
65116+ },
65117+#endif
65118+#ifdef CONFIG_GRKERNSEC_RESLOG
65119+ {
65120+ .ctl_name = CTL_UNNUMBERED,
65121+ .procname = "resource_logging",
65122+ .data = &grsec_resource_logging,
65123+ .maxlen = sizeof(int),
65124+ .mode = 0600,
65125+ .proc_handler = &proc_dointvec,
65126+ },
65127+#endif
65128+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65129+ {
65130+ .ctl_name = CTL_UNNUMBERED,
65131+ .procname = "audit_ptrace",
65132+ .data = &grsec_enable_audit_ptrace,
65133+ .maxlen = sizeof(int),
65134+ .mode = 0600,
65135+ .proc_handler = &proc_dointvec,
65136+ },
65137+#endif
65138+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65139+ {
65140+ .ctl_name = CTL_UNNUMBERED,
65141+ .procname = "harden_ptrace",
65142+ .data = &grsec_enable_harden_ptrace,
65143+ .maxlen = sizeof(int),
65144+ .mode = 0600,
65145+ .proc_handler = &proc_dointvec,
65146+ },
65147+#endif
65148+ {
65149+ .ctl_name = CTL_UNNUMBERED,
65150+ .procname = "grsec_lock",
65151+ .data = &grsec_lock,
65152+ .maxlen = sizeof(int),
65153+ .mode = 0600,
65154+ .proc_handler = &proc_dointvec,
65155+ },
65156+#endif
65157+#ifdef CONFIG_GRKERNSEC_ROFS
65158+ {
65159+ .ctl_name = CTL_UNNUMBERED,
65160+ .procname = "romount_protect",
65161+ .data = &grsec_enable_rofs,
65162+ .maxlen = sizeof(int),
65163+ .mode = 0600,
65164+ .proc_handler = &proc_dointvec_minmax,
65165+ .extra1 = &one,
65166+ .extra2 = &one,
65167+ },
65168+#endif
65169+ { .ctl_name = 0 }
65170+};
65171+#endif
65172diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
65173new file mode 100644
65174index 0000000..0dc13c3
65175--- /dev/null
65176+++ b/grsecurity/grsec_time.c
65177@@ -0,0 +1,16 @@
65178+#include <linux/kernel.h>
65179+#include <linux/sched.h>
65180+#include <linux/grinternal.h>
65181+#include <linux/module.h>
65182+
65183+void
65184+gr_log_timechange(void)
65185+{
65186+#ifdef CONFIG_GRKERNSEC_TIME
65187+ if (grsec_enable_time)
65188+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
65189+#endif
65190+ return;
65191+}
65192+
65193+EXPORT_SYMBOL(gr_log_timechange);
65194diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
65195new file mode 100644
65196index 0000000..07e0dc0
65197--- /dev/null
65198+++ b/grsecurity/grsec_tpe.c
65199@@ -0,0 +1,73 @@
65200+#include <linux/kernel.h>
65201+#include <linux/sched.h>
65202+#include <linux/file.h>
65203+#include <linux/fs.h>
65204+#include <linux/grinternal.h>
65205+
65206+extern int gr_acl_tpe_check(void);
65207+
65208+int
65209+gr_tpe_allow(const struct file *file)
65210+{
65211+#ifdef CONFIG_GRKERNSEC
65212+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
65213+ const struct cred *cred = current_cred();
65214+ char *msg = NULL;
65215+ char *msg2 = NULL;
65216+
65217+ // never restrict root
65218+ if (!cred->uid)
65219+ return 1;
65220+
65221+ if (grsec_enable_tpe) {
65222+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65223+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
65224+ msg = "not being in trusted group";
65225+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
65226+ msg = "being in untrusted group";
65227+#else
65228+ if (in_group_p(grsec_tpe_gid))
65229+ msg = "being in untrusted group";
65230+#endif
65231+ }
65232+ if (!msg && gr_acl_tpe_check())
65233+ msg = "being in untrusted role";
65234+
65235+ // not in any affected group/role
65236+ if (!msg)
65237+ goto next_check;
65238+
65239+ if (inode->i_uid)
65240+ msg2 = "file in non-root-owned directory";
65241+ else if (inode->i_mode & S_IWOTH)
65242+ msg2 = "file in world-writable directory";
65243+ else if (inode->i_mode & S_IWGRP)
65244+ msg2 = "file in group-writable directory";
65245+
65246+ if (msg && msg2) {
65247+ char fullmsg[70] = {0};
65248+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
65249+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
65250+ return 0;
65251+ }
65252+ msg = NULL;
65253+next_check:
65254+#ifdef CONFIG_GRKERNSEC_TPE_ALL
65255+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
65256+ return 1;
65257+
65258+ if (inode->i_uid && (inode->i_uid != cred->uid))
65259+ msg = "directory not owned by user";
65260+ else if (inode->i_mode & S_IWOTH)
65261+ msg = "file in world-writable directory";
65262+ else if (inode->i_mode & S_IWGRP)
65263+ msg = "file in group-writable directory";
65264+
65265+ if (msg) {
65266+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
65267+ return 0;
65268+ }
65269+#endif
65270+#endif
65271+ return 1;
65272+}
65273diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
65274new file mode 100644
65275index 0000000..9f7b1ac
65276--- /dev/null
65277+++ b/grsecurity/grsum.c
65278@@ -0,0 +1,61 @@
65279+#include <linux/err.h>
65280+#include <linux/kernel.h>
65281+#include <linux/sched.h>
65282+#include <linux/mm.h>
65283+#include <linux/scatterlist.h>
65284+#include <linux/crypto.h>
65285+#include <linux/gracl.h>
65286+
65287+
65288+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
65289+#error "crypto and sha256 must be built into the kernel"
65290+#endif
65291+
65292+int
65293+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
65294+{
65295+ char *p;
65296+ struct crypto_hash *tfm;
65297+ struct hash_desc desc;
65298+ struct scatterlist sg;
65299+ unsigned char temp_sum[GR_SHA_LEN];
65300+ volatile int retval = 0;
65301+ volatile int dummy = 0;
65302+ unsigned int i;
65303+
65304+ sg_init_table(&sg, 1);
65305+
65306+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
65307+ if (IS_ERR(tfm)) {
65308+ /* should never happen, since sha256 should be built in */
65309+ return 1;
65310+ }
65311+
65312+ desc.tfm = tfm;
65313+ desc.flags = 0;
65314+
65315+ crypto_hash_init(&desc);
65316+
65317+ p = salt;
65318+ sg_set_buf(&sg, p, GR_SALT_LEN);
65319+ crypto_hash_update(&desc, &sg, sg.length);
65320+
65321+ p = entry->pw;
65322+ sg_set_buf(&sg, p, strlen(p));
65323+
65324+ crypto_hash_update(&desc, &sg, sg.length);
65325+
65326+ crypto_hash_final(&desc, temp_sum);
65327+
65328+ memset(entry->pw, 0, GR_PW_LEN);
65329+
65330+ for (i = 0; i < GR_SHA_LEN; i++)
65331+ if (sum[i] != temp_sum[i])
65332+ retval = 1;
65333+ else
65334+ dummy = 1; // waste a cycle
65335+
65336+ crypto_free_hash(tfm);
65337+
65338+ return retval;
65339+}
65340diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
65341index 3cd9ccd..fe16d47 100644
65342--- a/include/acpi/acpi_bus.h
65343+++ b/include/acpi/acpi_bus.h
65344@@ -107,7 +107,7 @@ struct acpi_device_ops {
65345 acpi_op_bind bind;
65346 acpi_op_unbind unbind;
65347 acpi_op_notify notify;
65348-};
65349+} __no_const;
65350
65351 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
65352
65353diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
65354index f4906f6..71feb73 100644
65355--- a/include/acpi/acpi_drivers.h
65356+++ b/include/acpi/acpi_drivers.h
65357@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
65358 Dock Station
65359 -------------------------------------------------------------------------- */
65360 struct acpi_dock_ops {
65361- acpi_notify_handler handler;
65362- acpi_notify_handler uevent;
65363+ const acpi_notify_handler handler;
65364+ const acpi_notify_handler uevent;
65365 };
65366
65367 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
65368@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
65369 extern int register_dock_notifier(struct notifier_block *nb);
65370 extern void unregister_dock_notifier(struct notifier_block *nb);
65371 extern int register_hotplug_dock_device(acpi_handle handle,
65372- struct acpi_dock_ops *ops,
65373+ const struct acpi_dock_ops *ops,
65374 void *context);
65375 extern void unregister_hotplug_dock_device(acpi_handle handle);
65376 #else
65377@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
65378 {
65379 }
65380 static inline int register_hotplug_dock_device(acpi_handle handle,
65381- struct acpi_dock_ops *ops,
65382+ const struct acpi_dock_ops *ops,
65383 void *context)
65384 {
65385 return -ENODEV;
65386diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
65387index b7babf0..a9ac9fc 100644
65388--- a/include/asm-generic/atomic-long.h
65389+++ b/include/asm-generic/atomic-long.h
65390@@ -22,6 +22,12 @@
65391
65392 typedef atomic64_t atomic_long_t;
65393
65394+#ifdef CONFIG_PAX_REFCOUNT
65395+typedef atomic64_unchecked_t atomic_long_unchecked_t;
65396+#else
65397+typedef atomic64_t atomic_long_unchecked_t;
65398+#endif
65399+
65400 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
65401
65402 static inline long atomic_long_read(atomic_long_t *l)
65403@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65404 return (long)atomic64_read(v);
65405 }
65406
65407+#ifdef CONFIG_PAX_REFCOUNT
65408+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65409+{
65410+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65411+
65412+ return (long)atomic64_read_unchecked(v);
65413+}
65414+#endif
65415+
65416 static inline void atomic_long_set(atomic_long_t *l, long i)
65417 {
65418 atomic64_t *v = (atomic64_t *)l;
65419@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65420 atomic64_set(v, i);
65421 }
65422
65423+#ifdef CONFIG_PAX_REFCOUNT
65424+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65425+{
65426+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65427+
65428+ atomic64_set_unchecked(v, i);
65429+}
65430+#endif
65431+
65432 static inline void atomic_long_inc(atomic_long_t *l)
65433 {
65434 atomic64_t *v = (atomic64_t *)l;
65435@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65436 atomic64_inc(v);
65437 }
65438
65439+#ifdef CONFIG_PAX_REFCOUNT
65440+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65441+{
65442+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65443+
65444+ atomic64_inc_unchecked(v);
65445+}
65446+#endif
65447+
65448 static inline void atomic_long_dec(atomic_long_t *l)
65449 {
65450 atomic64_t *v = (atomic64_t *)l;
65451@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65452 atomic64_dec(v);
65453 }
65454
65455+#ifdef CONFIG_PAX_REFCOUNT
65456+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65457+{
65458+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65459+
65460+ atomic64_dec_unchecked(v);
65461+}
65462+#endif
65463+
65464 static inline void atomic_long_add(long i, atomic_long_t *l)
65465 {
65466 atomic64_t *v = (atomic64_t *)l;
65467@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65468 atomic64_add(i, v);
65469 }
65470
65471+#ifdef CONFIG_PAX_REFCOUNT
65472+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65473+{
65474+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65475+
65476+ atomic64_add_unchecked(i, v);
65477+}
65478+#endif
65479+
65480 static inline void atomic_long_sub(long i, atomic_long_t *l)
65481 {
65482 atomic64_t *v = (atomic64_t *)l;
65483@@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65484 return (long)atomic64_inc_return(v);
65485 }
65486
65487+#ifdef CONFIG_PAX_REFCOUNT
65488+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65489+{
65490+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65491+
65492+ return (long)atomic64_inc_return_unchecked(v);
65493+}
65494+#endif
65495+
65496 static inline long atomic_long_dec_return(atomic_long_t *l)
65497 {
65498 atomic64_t *v = (atomic64_t *)l;
65499@@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65500
65501 typedef atomic_t atomic_long_t;
65502
65503+#ifdef CONFIG_PAX_REFCOUNT
65504+typedef atomic_unchecked_t atomic_long_unchecked_t;
65505+#else
65506+typedef atomic_t atomic_long_unchecked_t;
65507+#endif
65508+
65509 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
65510 static inline long atomic_long_read(atomic_long_t *l)
65511 {
65512@@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65513 return (long)atomic_read(v);
65514 }
65515
65516+#ifdef CONFIG_PAX_REFCOUNT
65517+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65518+{
65519+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65520+
65521+ return (long)atomic_read_unchecked(v);
65522+}
65523+#endif
65524+
65525 static inline void atomic_long_set(atomic_long_t *l, long i)
65526 {
65527 atomic_t *v = (atomic_t *)l;
65528@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65529 atomic_set(v, i);
65530 }
65531
65532+#ifdef CONFIG_PAX_REFCOUNT
65533+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65534+{
65535+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65536+
65537+ atomic_set_unchecked(v, i);
65538+}
65539+#endif
65540+
65541 static inline void atomic_long_inc(atomic_long_t *l)
65542 {
65543 atomic_t *v = (atomic_t *)l;
65544@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65545 atomic_inc(v);
65546 }
65547
65548+#ifdef CONFIG_PAX_REFCOUNT
65549+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65550+{
65551+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65552+
65553+ atomic_inc_unchecked(v);
65554+}
65555+#endif
65556+
65557 static inline void atomic_long_dec(atomic_long_t *l)
65558 {
65559 atomic_t *v = (atomic_t *)l;
65560@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65561 atomic_dec(v);
65562 }
65563
65564+#ifdef CONFIG_PAX_REFCOUNT
65565+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65566+{
65567+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65568+
65569+ atomic_dec_unchecked(v);
65570+}
65571+#endif
65572+
65573 static inline void atomic_long_add(long i, atomic_long_t *l)
65574 {
65575 atomic_t *v = (atomic_t *)l;
65576@@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65577 atomic_add(i, v);
65578 }
65579
65580+#ifdef CONFIG_PAX_REFCOUNT
65581+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65582+{
65583+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65584+
65585+ atomic_add_unchecked(i, v);
65586+}
65587+#endif
65588+
65589 static inline void atomic_long_sub(long i, atomic_long_t *l)
65590 {
65591 atomic_t *v = (atomic_t *)l;
65592@@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65593 return (long)atomic_inc_return(v);
65594 }
65595
65596+#ifdef CONFIG_PAX_REFCOUNT
65597+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65598+{
65599+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65600+
65601+ return (long)atomic_inc_return_unchecked(v);
65602+}
65603+#endif
65604+
65605 static inline long atomic_long_dec_return(atomic_long_t *l)
65606 {
65607 atomic_t *v = (atomic_t *)l;
65608@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65609
65610 #endif /* BITS_PER_LONG == 64 */
65611
65612+#ifdef CONFIG_PAX_REFCOUNT
65613+static inline void pax_refcount_needs_these_functions(void)
65614+{
65615+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
65616+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
65617+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
65618+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
65619+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
65620+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
65621+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
65622+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
65623+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
65624+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
65625+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
65626+
65627+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
65628+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
65629+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
65630+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
65631+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
65632+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
65633+}
65634+#else
65635+#define atomic_read_unchecked(v) atomic_read(v)
65636+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
65637+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
65638+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
65639+#define atomic_inc_unchecked(v) atomic_inc(v)
65640+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
65641+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
65642+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
65643+#define atomic_dec_unchecked(v) atomic_dec(v)
65644+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
65645+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
65646+
65647+#define atomic_long_read_unchecked(v) atomic_long_read(v)
65648+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
65649+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
65650+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
65651+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
65652+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
65653+#endif
65654+
65655 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
65656diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
65657index b18ce4f..2ee2843 100644
65658--- a/include/asm-generic/atomic64.h
65659+++ b/include/asm-generic/atomic64.h
65660@@ -16,6 +16,8 @@ typedef struct {
65661 long long counter;
65662 } atomic64_t;
65663
65664+typedef atomic64_t atomic64_unchecked_t;
65665+
65666 #define ATOMIC64_INIT(i) { (i) }
65667
65668 extern long long atomic64_read(const atomic64_t *v);
65669@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
65670 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65671 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
65672
65673+#define atomic64_read_unchecked(v) atomic64_read(v)
65674+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
65675+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
65676+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
65677+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
65678+#define atomic64_inc_unchecked(v) atomic64_inc(v)
65679+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
65680+#define atomic64_dec_unchecked(v) atomic64_dec(v)
65681+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
65682+
65683 #endif /* _ASM_GENERIC_ATOMIC64_H */
65684diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
65685index d48ddf0..656a0ac 100644
65686--- a/include/asm-generic/bug.h
65687+++ b/include/asm-generic/bug.h
65688@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
65689
65690 #else /* !CONFIG_BUG */
65691 #ifndef HAVE_ARCH_BUG
65692-#define BUG() do {} while(0)
65693+#define BUG() do { for (;;) ; } while(0)
65694 #endif
65695
65696 #ifndef HAVE_ARCH_BUG_ON
65697-#define BUG_ON(condition) do { if (condition) ; } while(0)
65698+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
65699 #endif
65700
65701 #ifndef HAVE_ARCH_WARN_ON
65702diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
65703index 1bfcfe5..e04c5c9 100644
65704--- a/include/asm-generic/cache.h
65705+++ b/include/asm-generic/cache.h
65706@@ -6,7 +6,7 @@
65707 * cache lines need to provide their own cache.h.
65708 */
65709
65710-#define L1_CACHE_SHIFT 5
65711-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
65712+#define L1_CACHE_SHIFT 5UL
65713+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
65714
65715 #endif /* __ASM_GENERIC_CACHE_H */
65716diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
65717index 6920695..41038bc 100644
65718--- a/include/asm-generic/dma-mapping-common.h
65719+++ b/include/asm-generic/dma-mapping-common.h
65720@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
65721 enum dma_data_direction dir,
65722 struct dma_attrs *attrs)
65723 {
65724- struct dma_map_ops *ops = get_dma_ops(dev);
65725+ const struct dma_map_ops *ops = get_dma_ops(dev);
65726 dma_addr_t addr;
65727
65728 kmemcheck_mark_initialized(ptr, size);
65729@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
65730 enum dma_data_direction dir,
65731 struct dma_attrs *attrs)
65732 {
65733- struct dma_map_ops *ops = get_dma_ops(dev);
65734+ const struct dma_map_ops *ops = get_dma_ops(dev);
65735
65736 BUG_ON(!valid_dma_direction(dir));
65737 if (ops->unmap_page)
65738@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
65739 int nents, enum dma_data_direction dir,
65740 struct dma_attrs *attrs)
65741 {
65742- struct dma_map_ops *ops = get_dma_ops(dev);
65743+ const struct dma_map_ops *ops = get_dma_ops(dev);
65744 int i, ents;
65745 struct scatterlist *s;
65746
65747@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
65748 int nents, enum dma_data_direction dir,
65749 struct dma_attrs *attrs)
65750 {
65751- struct dma_map_ops *ops = get_dma_ops(dev);
65752+ const struct dma_map_ops *ops = get_dma_ops(dev);
65753
65754 BUG_ON(!valid_dma_direction(dir));
65755 debug_dma_unmap_sg(dev, sg, nents, dir);
65756@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65757 size_t offset, size_t size,
65758 enum dma_data_direction dir)
65759 {
65760- struct dma_map_ops *ops = get_dma_ops(dev);
65761+ const struct dma_map_ops *ops = get_dma_ops(dev);
65762 dma_addr_t addr;
65763
65764 kmemcheck_mark_initialized(page_address(page) + offset, size);
65765@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65766 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
65767 size_t size, enum dma_data_direction dir)
65768 {
65769- struct dma_map_ops *ops = get_dma_ops(dev);
65770+ const struct dma_map_ops *ops = get_dma_ops(dev);
65771
65772 BUG_ON(!valid_dma_direction(dir));
65773 if (ops->unmap_page)
65774@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
65775 size_t size,
65776 enum dma_data_direction dir)
65777 {
65778- struct dma_map_ops *ops = get_dma_ops(dev);
65779+ const struct dma_map_ops *ops = get_dma_ops(dev);
65780
65781 BUG_ON(!valid_dma_direction(dir));
65782 if (ops->sync_single_for_cpu)
65783@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
65784 dma_addr_t addr, size_t size,
65785 enum dma_data_direction dir)
65786 {
65787- struct dma_map_ops *ops = get_dma_ops(dev);
65788+ const struct dma_map_ops *ops = get_dma_ops(dev);
65789
65790 BUG_ON(!valid_dma_direction(dir));
65791 if (ops->sync_single_for_device)
65792@@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
65793 size_t size,
65794 enum dma_data_direction dir)
65795 {
65796- struct dma_map_ops *ops = get_dma_ops(dev);
65797+ const struct dma_map_ops *ops = get_dma_ops(dev);
65798
65799 BUG_ON(!valid_dma_direction(dir));
65800 if (ops->sync_single_range_for_cpu) {
65801@@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
65802 size_t size,
65803 enum dma_data_direction dir)
65804 {
65805- struct dma_map_ops *ops = get_dma_ops(dev);
65806+ const struct dma_map_ops *ops = get_dma_ops(dev);
65807
65808 BUG_ON(!valid_dma_direction(dir));
65809 if (ops->sync_single_range_for_device) {
65810@@ -155,7 +155,7 @@ static inline void
65811 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
65812 int nelems, enum dma_data_direction dir)
65813 {
65814- struct dma_map_ops *ops = get_dma_ops(dev);
65815+ const struct dma_map_ops *ops = get_dma_ops(dev);
65816
65817 BUG_ON(!valid_dma_direction(dir));
65818 if (ops->sync_sg_for_cpu)
65819@@ -167,7 +167,7 @@ static inline void
65820 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
65821 int nelems, enum dma_data_direction dir)
65822 {
65823- struct dma_map_ops *ops = get_dma_ops(dev);
65824+ const struct dma_map_ops *ops = get_dma_ops(dev);
65825
65826 BUG_ON(!valid_dma_direction(dir));
65827 if (ops->sync_sg_for_device)
65828diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
65829index 0d68a1e..b74a761 100644
65830--- a/include/asm-generic/emergency-restart.h
65831+++ b/include/asm-generic/emergency-restart.h
65832@@ -1,7 +1,7 @@
65833 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
65834 #define _ASM_GENERIC_EMERGENCY_RESTART_H
65835
65836-static inline void machine_emergency_restart(void)
65837+static inline __noreturn void machine_emergency_restart(void)
65838 {
65839 machine_restart(NULL);
65840 }
65841diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
65842index 3c2344f..4590a7d 100644
65843--- a/include/asm-generic/futex.h
65844+++ b/include/asm-generic/futex.h
65845@@ -6,7 +6,7 @@
65846 #include <asm/errno.h>
65847
65848 static inline int
65849-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65850+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
65851 {
65852 int op = (encoded_op >> 28) & 7;
65853 int cmp = (encoded_op >> 24) & 15;
65854@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65855 }
65856
65857 static inline int
65858-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
65859+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
65860 {
65861 return -ENOSYS;
65862 }
65863diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
65864index 1ca3efc..e3dc852 100644
65865--- a/include/asm-generic/int-l64.h
65866+++ b/include/asm-generic/int-l64.h
65867@@ -46,6 +46,8 @@ typedef unsigned int u32;
65868 typedef signed long s64;
65869 typedef unsigned long u64;
65870
65871+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
65872+
65873 #define S8_C(x) x
65874 #define U8_C(x) x ## U
65875 #define S16_C(x) x
65876diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
65877index f394147..b6152b9 100644
65878--- a/include/asm-generic/int-ll64.h
65879+++ b/include/asm-generic/int-ll64.h
65880@@ -51,6 +51,8 @@ typedef unsigned int u32;
65881 typedef signed long long s64;
65882 typedef unsigned long long u64;
65883
65884+typedef unsigned long long intoverflow_t;
65885+
65886 #define S8_C(x) x
65887 #define U8_C(x) x ## U
65888 #define S16_C(x) x
65889diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
65890index e5f234a..cdb16b3 100644
65891--- a/include/asm-generic/kmap_types.h
65892+++ b/include/asm-generic/kmap_types.h
65893@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
65894 KMAP_D(16) KM_IRQ_PTE,
65895 KMAP_D(17) KM_NMI,
65896 KMAP_D(18) KM_NMI_PTE,
65897-KMAP_D(19) KM_TYPE_NR
65898+KMAP_D(19) KM_CLEARPAGE,
65899+KMAP_D(20) KM_TYPE_NR
65900 };
65901
65902 #undef KMAP_D
65903diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
65904index 725612b..9cc513a 100644
65905--- a/include/asm-generic/pgtable-nopmd.h
65906+++ b/include/asm-generic/pgtable-nopmd.h
65907@@ -1,14 +1,19 @@
65908 #ifndef _PGTABLE_NOPMD_H
65909 #define _PGTABLE_NOPMD_H
65910
65911-#ifndef __ASSEMBLY__
65912-
65913 #include <asm-generic/pgtable-nopud.h>
65914
65915-struct mm_struct;
65916-
65917 #define __PAGETABLE_PMD_FOLDED
65918
65919+#define PMD_SHIFT PUD_SHIFT
65920+#define PTRS_PER_PMD 1
65921+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
65922+#define PMD_MASK (~(PMD_SIZE-1))
65923+
65924+#ifndef __ASSEMBLY__
65925+
65926+struct mm_struct;
65927+
65928 /*
65929 * Having the pmd type consist of a pud gets the size right, and allows
65930 * us to conceptually access the pud entry that this pmd is folded into
65931@@ -16,11 +21,6 @@ struct mm_struct;
65932 */
65933 typedef struct { pud_t pud; } pmd_t;
65934
65935-#define PMD_SHIFT PUD_SHIFT
65936-#define PTRS_PER_PMD 1
65937-#define PMD_SIZE (1UL << PMD_SHIFT)
65938-#define PMD_MASK (~(PMD_SIZE-1))
65939-
65940 /*
65941 * The "pud_xxx()" functions here are trivial for a folded two-level
65942 * setup: the pmd is never bad, and a pmd always exists (as it's folded
65943diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
65944index 810431d..ccc3638 100644
65945--- a/include/asm-generic/pgtable-nopud.h
65946+++ b/include/asm-generic/pgtable-nopud.h
65947@@ -1,10 +1,15 @@
65948 #ifndef _PGTABLE_NOPUD_H
65949 #define _PGTABLE_NOPUD_H
65950
65951-#ifndef __ASSEMBLY__
65952-
65953 #define __PAGETABLE_PUD_FOLDED
65954
65955+#define PUD_SHIFT PGDIR_SHIFT
65956+#define PTRS_PER_PUD 1
65957+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
65958+#define PUD_MASK (~(PUD_SIZE-1))
65959+
65960+#ifndef __ASSEMBLY__
65961+
65962 /*
65963 * Having the pud type consist of a pgd gets the size right, and allows
65964 * us to conceptually access the pgd entry that this pud is folded into
65965@@ -12,11 +17,6 @@
65966 */
65967 typedef struct { pgd_t pgd; } pud_t;
65968
65969-#define PUD_SHIFT PGDIR_SHIFT
65970-#define PTRS_PER_PUD 1
65971-#define PUD_SIZE (1UL << PUD_SHIFT)
65972-#define PUD_MASK (~(PUD_SIZE-1))
65973-
65974 /*
65975 * The "pgd_xxx()" functions here are trivial for a folded two-level
65976 * setup: the pud is never bad, and a pud always exists (as it's folded
65977diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
65978index e2bd73e..fea8ed3 100644
65979--- a/include/asm-generic/pgtable.h
65980+++ b/include/asm-generic/pgtable.h
65981@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
65982 unsigned long size);
65983 #endif
65984
65985+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
65986+static inline unsigned long pax_open_kernel(void) { return 0; }
65987+#endif
65988+
65989+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
65990+static inline unsigned long pax_close_kernel(void) { return 0; }
65991+#endif
65992+
65993 #endif /* !__ASSEMBLY__ */
65994
65995 #endif /* _ASM_GENERIC_PGTABLE_H */
65996diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
65997index b6e818f..21aa58a 100644
65998--- a/include/asm-generic/vmlinux.lds.h
65999+++ b/include/asm-generic/vmlinux.lds.h
66000@@ -199,6 +199,7 @@
66001 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
66002 VMLINUX_SYMBOL(__start_rodata) = .; \
66003 *(.rodata) *(.rodata.*) \
66004+ *(.data.read_only) \
66005 *(__vermagic) /* Kernel version magic */ \
66006 *(__markers_strings) /* Markers: strings */ \
66007 *(__tracepoints_strings)/* Tracepoints: strings */ \
66008@@ -656,22 +657,24 @@
66009 * section in the linker script will go there too. @phdr should have
66010 * a leading colon.
66011 *
66012- * Note that this macros defines __per_cpu_load as an absolute symbol.
66013+ * Note that this macros defines per_cpu_load as an absolute symbol.
66014 * If there is no need to put the percpu section at a predetermined
66015 * address, use PERCPU().
66016 */
66017 #define PERCPU_VADDR(vaddr, phdr) \
66018- VMLINUX_SYMBOL(__per_cpu_load) = .; \
66019- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
66020+ per_cpu_load = .; \
66021+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
66022 - LOAD_OFFSET) { \
66023+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
66024 VMLINUX_SYMBOL(__per_cpu_start) = .; \
66025 *(.data.percpu.first) \
66026- *(.data.percpu.page_aligned) \
66027 *(.data.percpu) \
66028+ . = ALIGN(PAGE_SIZE); \
66029+ *(.data.percpu.page_aligned) \
66030 *(.data.percpu.shared_aligned) \
66031 VMLINUX_SYMBOL(__per_cpu_end) = .; \
66032 } phdr \
66033- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
66034+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
66035
66036 /**
66037 * PERCPU - define output section for percpu area, simple version
66038diff --git a/include/drm/drmP.h b/include/drm/drmP.h
66039index ebab6a6..351dba1 100644
66040--- a/include/drm/drmP.h
66041+++ b/include/drm/drmP.h
66042@@ -71,6 +71,7 @@
66043 #include <linux/workqueue.h>
66044 #include <linux/poll.h>
66045 #include <asm/pgalloc.h>
66046+#include <asm/local.h>
66047 #include "drm.h"
66048
66049 #include <linux/idr.h>
66050@@ -814,7 +815,7 @@ struct drm_driver {
66051 void (*vgaarb_irq)(struct drm_device *dev, bool state);
66052
66053 /* Driver private ops for this object */
66054- struct vm_operations_struct *gem_vm_ops;
66055+ const struct vm_operations_struct *gem_vm_ops;
66056
66057 int major;
66058 int minor;
66059@@ -917,7 +918,7 @@ struct drm_device {
66060
66061 /** \name Usage Counters */
66062 /*@{ */
66063- int open_count; /**< Outstanding files open */
66064+ local_t open_count; /**< Outstanding files open */
66065 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
66066 atomic_t vma_count; /**< Outstanding vma areas open */
66067 int buf_use; /**< Buffers in use -- cannot alloc */
66068@@ -928,7 +929,7 @@ struct drm_device {
66069 /*@{ */
66070 unsigned long counters;
66071 enum drm_stat_type types[15];
66072- atomic_t counts[15];
66073+ atomic_unchecked_t counts[15];
66074 /*@} */
66075
66076 struct list_head filelist;
66077@@ -1016,7 +1017,7 @@ struct drm_device {
66078 struct pci_controller *hose;
66079 #endif
66080 struct drm_sg_mem *sg; /**< Scatter gather memory */
66081- unsigned int num_crtcs; /**< Number of CRTCs on this device */
66082+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
66083 void *dev_private; /**< device private data */
66084 void *mm_private;
66085 struct address_space *dev_mapping;
66086@@ -1042,11 +1043,11 @@ struct drm_device {
66087 spinlock_t object_name_lock;
66088 struct idr object_name_idr;
66089 atomic_t object_count;
66090- atomic_t object_memory;
66091+ atomic_unchecked_t object_memory;
66092 atomic_t pin_count;
66093- atomic_t pin_memory;
66094+ atomic_unchecked_t pin_memory;
66095 atomic_t gtt_count;
66096- atomic_t gtt_memory;
66097+ atomic_unchecked_t gtt_memory;
66098 uint32_t gtt_total;
66099 uint32_t invalidate_domains; /* domains pending invalidation */
66100 uint32_t flush_domains; /* domains pending flush */
66101diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
66102index b29e201..3413cc9 100644
66103--- a/include/drm/drm_crtc_helper.h
66104+++ b/include/drm/drm_crtc_helper.h
66105@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
66106
66107 /* reload the current crtc LUT */
66108 void (*load_lut)(struct drm_crtc *crtc);
66109-};
66110+} __no_const;
66111
66112 struct drm_encoder_helper_funcs {
66113 void (*dpms)(struct drm_encoder *encoder, int mode);
66114@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
66115 struct drm_connector *connector);
66116 /* disable encoder when not in use - more explicit than dpms off */
66117 void (*disable)(struct drm_encoder *encoder);
66118-};
66119+} __no_const;
66120
66121 struct drm_connector_helper_funcs {
66122 int (*get_modes)(struct drm_connector *connector);
66123diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
66124index b199170..6f9e64c 100644
66125--- a/include/drm/ttm/ttm_memory.h
66126+++ b/include/drm/ttm/ttm_memory.h
66127@@ -47,7 +47,7 @@
66128
66129 struct ttm_mem_shrink {
66130 int (*do_shrink) (struct ttm_mem_shrink *);
66131-};
66132+} __no_const;
66133
66134 /**
66135 * struct ttm_mem_global - Global memory accounting structure.
66136diff --git a/include/linux/a.out.h b/include/linux/a.out.h
66137index e86dfca..40cc55f 100644
66138--- a/include/linux/a.out.h
66139+++ b/include/linux/a.out.h
66140@@ -39,6 +39,14 @@ enum machine_type {
66141 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
66142 };
66143
66144+/* Constants for the N_FLAGS field */
66145+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66146+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
66147+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
66148+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
66149+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66150+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66151+
66152 #if !defined (N_MAGIC)
66153 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
66154 #endif
66155diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
66156index 817b237..62c10bc 100644
66157--- a/include/linux/atmdev.h
66158+++ b/include/linux/atmdev.h
66159@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
66160 #endif
66161
66162 struct k_atm_aal_stats {
66163-#define __HANDLE_ITEM(i) atomic_t i
66164+#define __HANDLE_ITEM(i) atomic_unchecked_t i
66165 __AAL_STAT_ITEMS
66166 #undef __HANDLE_ITEM
66167 };
66168diff --git a/include/linux/backlight.h b/include/linux/backlight.h
66169index 0f5f578..8c4f884 100644
66170--- a/include/linux/backlight.h
66171+++ b/include/linux/backlight.h
66172@@ -36,18 +36,18 @@ struct backlight_device;
66173 struct fb_info;
66174
66175 struct backlight_ops {
66176- unsigned int options;
66177+ const unsigned int options;
66178
66179 #define BL_CORE_SUSPENDRESUME (1 << 0)
66180
66181 /* Notify the backlight driver some property has changed */
66182- int (*update_status)(struct backlight_device *);
66183+ int (* const update_status)(struct backlight_device *);
66184 /* Return the current backlight brightness (accounting for power,
66185 fb_blank etc.) */
66186- int (*get_brightness)(struct backlight_device *);
66187+ int (* const get_brightness)(struct backlight_device *);
66188 /* Check if given framebuffer device is the one bound to this backlight;
66189 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
66190- int (*check_fb)(struct fb_info *);
66191+ int (* const check_fb)(struct fb_info *);
66192 };
66193
66194 /* This structure defines all the properties of a backlight */
66195@@ -86,7 +86,7 @@ struct backlight_device {
66196 registered this device has been unloaded, and if class_get_devdata()
66197 points to something in the body of that driver, it is also invalid. */
66198 struct mutex ops_lock;
66199- struct backlight_ops *ops;
66200+ const struct backlight_ops *ops;
66201
66202 /* The framebuffer notifier block */
66203 struct notifier_block fb_notif;
66204@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
66205 }
66206
66207 extern struct backlight_device *backlight_device_register(const char *name,
66208- struct device *dev, void *devdata, struct backlight_ops *ops);
66209+ struct device *dev, void *devdata, const struct backlight_ops *ops);
66210 extern void backlight_device_unregister(struct backlight_device *bd);
66211 extern void backlight_force_update(struct backlight_device *bd,
66212 enum backlight_update_reason reason);
66213diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
66214index a3d802e..93a2ef4 100644
66215--- a/include/linux/binfmts.h
66216+++ b/include/linux/binfmts.h
66217@@ -18,7 +18,7 @@ struct pt_regs;
66218 #define BINPRM_BUF_SIZE 128
66219
66220 #ifdef __KERNEL__
66221-#include <linux/list.h>
66222+#include <linux/sched.h>
66223
66224 #define CORENAME_MAX_SIZE 128
66225
66226@@ -58,6 +58,7 @@ struct linux_binprm{
66227 unsigned interp_flags;
66228 unsigned interp_data;
66229 unsigned long loader, exec;
66230+ char tcomm[TASK_COMM_LEN];
66231 };
66232
66233 extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
66234@@ -83,6 +84,7 @@ struct linux_binfmt {
66235 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
66236 int (*load_shlib)(struct file *);
66237 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
66238+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
66239 unsigned long min_coredump; /* minimal dump size */
66240 int hasvdso;
66241 };
66242diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
66243index 5eb6cb0..a2906d2 100644
66244--- a/include/linux/blkdev.h
66245+++ b/include/linux/blkdev.h
66246@@ -1281,7 +1281,7 @@ struct block_device_operations {
66247 int (*revalidate_disk) (struct gendisk *);
66248 int (*getgeo)(struct block_device *, struct hd_geometry *);
66249 struct module *owner;
66250-};
66251+} __do_const;
66252
66253 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
66254 unsigned long);
66255diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
66256index 3b73b99..629d21b 100644
66257--- a/include/linux/blktrace_api.h
66258+++ b/include/linux/blktrace_api.h
66259@@ -160,7 +160,7 @@ struct blk_trace {
66260 struct dentry *dir;
66261 struct dentry *dropped_file;
66262 struct dentry *msg_file;
66263- atomic_t dropped;
66264+ atomic_unchecked_t dropped;
66265 };
66266
66267 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
66268diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
66269index 83195fb..0b0f77d 100644
66270--- a/include/linux/byteorder/little_endian.h
66271+++ b/include/linux/byteorder/little_endian.h
66272@@ -42,51 +42,51 @@
66273
66274 static inline __le64 __cpu_to_le64p(const __u64 *p)
66275 {
66276- return (__force __le64)*p;
66277+ return (__force const __le64)*p;
66278 }
66279 static inline __u64 __le64_to_cpup(const __le64 *p)
66280 {
66281- return (__force __u64)*p;
66282+ return (__force const __u64)*p;
66283 }
66284 static inline __le32 __cpu_to_le32p(const __u32 *p)
66285 {
66286- return (__force __le32)*p;
66287+ return (__force const __le32)*p;
66288 }
66289 static inline __u32 __le32_to_cpup(const __le32 *p)
66290 {
66291- return (__force __u32)*p;
66292+ return (__force const __u32)*p;
66293 }
66294 static inline __le16 __cpu_to_le16p(const __u16 *p)
66295 {
66296- return (__force __le16)*p;
66297+ return (__force const __le16)*p;
66298 }
66299 static inline __u16 __le16_to_cpup(const __le16 *p)
66300 {
66301- return (__force __u16)*p;
66302+ return (__force const __u16)*p;
66303 }
66304 static inline __be64 __cpu_to_be64p(const __u64 *p)
66305 {
66306- return (__force __be64)__swab64p(p);
66307+ return (__force const __be64)__swab64p(p);
66308 }
66309 static inline __u64 __be64_to_cpup(const __be64 *p)
66310 {
66311- return __swab64p((__u64 *)p);
66312+ return __swab64p((const __u64 *)p);
66313 }
66314 static inline __be32 __cpu_to_be32p(const __u32 *p)
66315 {
66316- return (__force __be32)__swab32p(p);
66317+ return (__force const __be32)__swab32p(p);
66318 }
66319 static inline __u32 __be32_to_cpup(const __be32 *p)
66320 {
66321- return __swab32p((__u32 *)p);
66322+ return __swab32p((const __u32 *)p);
66323 }
66324 static inline __be16 __cpu_to_be16p(const __u16 *p)
66325 {
66326- return (__force __be16)__swab16p(p);
66327+ return (__force const __be16)__swab16p(p);
66328 }
66329 static inline __u16 __be16_to_cpup(const __be16 *p)
66330 {
66331- return __swab16p((__u16 *)p);
66332+ return __swab16p((const __u16 *)p);
66333 }
66334 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
66335 #define __le64_to_cpus(x) do { (void)(x); } while (0)
66336diff --git a/include/linux/cache.h b/include/linux/cache.h
66337index 97e2488..e7576b9 100644
66338--- a/include/linux/cache.h
66339+++ b/include/linux/cache.h
66340@@ -16,6 +16,10 @@
66341 #define __read_mostly
66342 #endif
66343
66344+#ifndef __read_only
66345+#define __read_only __read_mostly
66346+#endif
66347+
66348 #ifndef ____cacheline_aligned
66349 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
66350 #endif
66351diff --git a/include/linux/capability.h b/include/linux/capability.h
66352index c8f2a5f7..1618a5c 100644
66353--- a/include/linux/capability.h
66354+++ b/include/linux/capability.h
66355@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
66356 (security_real_capable_noaudit((t), (cap)) == 0)
66357
66358 extern int capable(int cap);
66359+int capable_nolog(int cap);
66360
66361 /* audit system wants to get cap info from files as well */
66362 struct dentry;
66363diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
66364index 450fa59..86019fb 100644
66365--- a/include/linux/compiler-gcc4.h
66366+++ b/include/linux/compiler-gcc4.h
66367@@ -36,4 +36,16 @@
66368 the kernel context */
66369 #define __cold __attribute__((__cold__))
66370
66371+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
66372+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
66373+#define __bos0(ptr) __bos((ptr), 0)
66374+#define __bos1(ptr) __bos((ptr), 1)
66375+
66376+#if __GNUC_MINOR__ >= 5
66377+#ifdef CONSTIFY_PLUGIN
66378+#define __no_const __attribute__((no_const))
66379+#define __do_const __attribute__((do_const))
66380+#endif
66381+#endif
66382+
66383 #endif
66384diff --git a/include/linux/compiler.h b/include/linux/compiler.h
66385index 04fb513..fd6477b 100644
66386--- a/include/linux/compiler.h
66387+++ b/include/linux/compiler.h
66388@@ -5,11 +5,14 @@
66389
66390 #ifdef __CHECKER__
66391 # define __user __attribute__((noderef, address_space(1)))
66392+# define __force_user __force __user
66393 # define __kernel /* default address space */
66394+# define __force_kernel __force __kernel
66395 # define __safe __attribute__((safe))
66396 # define __force __attribute__((force))
66397 # define __nocast __attribute__((nocast))
66398 # define __iomem __attribute__((noderef, address_space(2)))
66399+# define __force_iomem __force __iomem
66400 # define __acquires(x) __attribute__((context(x,0,1)))
66401 # define __releases(x) __attribute__((context(x,1,0)))
66402 # define __acquire(x) __context__(x,1)
66403@@ -17,13 +20,34 @@
66404 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
66405 extern void __chk_user_ptr(const volatile void __user *);
66406 extern void __chk_io_ptr(const volatile void __iomem *);
66407+#elif defined(CHECKER_PLUGIN)
66408+//# define __user
66409+//# define __force_user
66410+//# define __kernel
66411+//# define __force_kernel
66412+# define __safe
66413+# define __force
66414+# define __nocast
66415+# define __iomem
66416+# define __force_iomem
66417+# define __chk_user_ptr(x) (void)0
66418+# define __chk_io_ptr(x) (void)0
66419+# define __builtin_warning(x, y...) (1)
66420+# define __acquires(x)
66421+# define __releases(x)
66422+# define __acquire(x) (void)0
66423+# define __release(x) (void)0
66424+# define __cond_lock(x,c) (c)
66425 #else
66426 # define __user
66427+# define __force_user
66428 # define __kernel
66429+# define __force_kernel
66430 # define __safe
66431 # define __force
66432 # define __nocast
66433 # define __iomem
66434+# define __force_iomem
66435 # define __chk_user_ptr(x) (void)0
66436 # define __chk_io_ptr(x) (void)0
66437 # define __builtin_warning(x, y...) (1)
66438@@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66439 # define __attribute_const__ /* unimplemented */
66440 #endif
66441
66442+#ifndef __no_const
66443+# define __no_const
66444+#endif
66445+
66446+#ifndef __do_const
66447+# define __do_const
66448+#endif
66449+
66450 /*
66451 * Tell gcc if a function is cold. The compiler will assume any path
66452 * directly leading to the call is unlikely.
66453@@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66454 #define __cold
66455 #endif
66456
66457+#ifndef __alloc_size
66458+#define __alloc_size(...)
66459+#endif
66460+
66461+#ifndef __bos
66462+#define __bos(ptr, arg)
66463+#endif
66464+
66465+#ifndef __bos0
66466+#define __bos0(ptr)
66467+#endif
66468+
66469+#ifndef __bos1
66470+#define __bos1(ptr)
66471+#endif
66472+
66473 /* Simple shorthand for a section definition */
66474 #ifndef __section
66475 # define __section(S) __attribute__ ((__section__(#S)))
66476@@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66477 * use is to mediate communication between process-level code and irq/NMI
66478 * handlers, all running on the same CPU.
66479 */
66480-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
66481+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
66482+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
66483
66484 #endif /* __LINUX_COMPILER_H */
66485diff --git a/include/linux/crypto.h b/include/linux/crypto.h
66486index fd92988..a3164bd 100644
66487--- a/include/linux/crypto.h
66488+++ b/include/linux/crypto.h
66489@@ -394,7 +394,7 @@ struct cipher_tfm {
66490 const u8 *key, unsigned int keylen);
66491 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66492 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66493-};
66494+} __no_const;
66495
66496 struct hash_tfm {
66497 int (*init)(struct hash_desc *desc);
66498@@ -415,13 +415,13 @@ struct compress_tfm {
66499 int (*cot_decompress)(struct crypto_tfm *tfm,
66500 const u8 *src, unsigned int slen,
66501 u8 *dst, unsigned int *dlen);
66502-};
66503+} __no_const;
66504
66505 struct rng_tfm {
66506 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
66507 unsigned int dlen);
66508 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
66509-};
66510+} __no_const;
66511
66512 #define crt_ablkcipher crt_u.ablkcipher
66513 #define crt_aead crt_u.aead
66514diff --git a/include/linux/dcache.h b/include/linux/dcache.h
66515index 30b93b2..cd7a8db 100644
66516--- a/include/linux/dcache.h
66517+++ b/include/linux/dcache.h
66518@@ -119,6 +119,8 @@ struct dentry {
66519 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
66520 };
66521
66522+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66523+
66524 /*
66525 * dentry->d_lock spinlock nesting subclasses:
66526 *
66527diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
66528index 3e9bd6a..f4e1aa0 100644
66529--- a/include/linux/decompress/mm.h
66530+++ b/include/linux/decompress/mm.h
66531@@ -78,7 +78,7 @@ static void free(void *where)
66532 * warnings when not needed (indeed large_malloc / large_free are not
66533 * needed by inflate */
66534
66535-#define malloc(a) kmalloc(a, GFP_KERNEL)
66536+#define malloc(a) kmalloc((a), GFP_KERNEL)
66537 #define free(a) kfree(a)
66538
66539 #define large_malloc(a) vmalloc(a)
66540diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
66541index 91b7618..92a93d32 100644
66542--- a/include/linux/dma-mapping.h
66543+++ b/include/linux/dma-mapping.h
66544@@ -16,51 +16,51 @@ enum dma_data_direction {
66545 };
66546
66547 struct dma_map_ops {
66548- void* (*alloc_coherent)(struct device *dev, size_t size,
66549+ void* (* const alloc_coherent)(struct device *dev, size_t size,
66550 dma_addr_t *dma_handle, gfp_t gfp);
66551- void (*free_coherent)(struct device *dev, size_t size,
66552+ void (* const free_coherent)(struct device *dev, size_t size,
66553 void *vaddr, dma_addr_t dma_handle);
66554- dma_addr_t (*map_page)(struct device *dev, struct page *page,
66555+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
66556 unsigned long offset, size_t size,
66557 enum dma_data_direction dir,
66558 struct dma_attrs *attrs);
66559- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
66560+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
66561 size_t size, enum dma_data_direction dir,
66562 struct dma_attrs *attrs);
66563- int (*map_sg)(struct device *dev, struct scatterlist *sg,
66564+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
66565 int nents, enum dma_data_direction dir,
66566 struct dma_attrs *attrs);
66567- void (*unmap_sg)(struct device *dev,
66568+ void (* const unmap_sg)(struct device *dev,
66569 struct scatterlist *sg, int nents,
66570 enum dma_data_direction dir,
66571 struct dma_attrs *attrs);
66572- void (*sync_single_for_cpu)(struct device *dev,
66573+ void (* const sync_single_for_cpu)(struct device *dev,
66574 dma_addr_t dma_handle, size_t size,
66575 enum dma_data_direction dir);
66576- void (*sync_single_for_device)(struct device *dev,
66577+ void (* const sync_single_for_device)(struct device *dev,
66578 dma_addr_t dma_handle, size_t size,
66579 enum dma_data_direction dir);
66580- void (*sync_single_range_for_cpu)(struct device *dev,
66581+ void (* const sync_single_range_for_cpu)(struct device *dev,
66582 dma_addr_t dma_handle,
66583 unsigned long offset,
66584 size_t size,
66585 enum dma_data_direction dir);
66586- void (*sync_single_range_for_device)(struct device *dev,
66587+ void (* const sync_single_range_for_device)(struct device *dev,
66588 dma_addr_t dma_handle,
66589 unsigned long offset,
66590 size_t size,
66591 enum dma_data_direction dir);
66592- void (*sync_sg_for_cpu)(struct device *dev,
66593+ void (* const sync_sg_for_cpu)(struct device *dev,
66594 struct scatterlist *sg, int nents,
66595 enum dma_data_direction dir);
66596- void (*sync_sg_for_device)(struct device *dev,
66597+ void (* const sync_sg_for_device)(struct device *dev,
66598 struct scatterlist *sg, int nents,
66599 enum dma_data_direction dir);
66600- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
66601- int (*dma_supported)(struct device *dev, u64 mask);
66602+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
66603+ int (* const dma_supported)(struct device *dev, u64 mask);
66604 int (*set_dma_mask)(struct device *dev, u64 mask);
66605 int is_phys;
66606-};
66607+} __do_const;
66608
66609 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66610
66611diff --git a/include/linux/dst.h b/include/linux/dst.h
66612index e26fed8..b976d9f 100644
66613--- a/include/linux/dst.h
66614+++ b/include/linux/dst.h
66615@@ -380,7 +380,7 @@ struct dst_node
66616 struct thread_pool *pool;
66617
66618 /* Transaction IDs live here */
66619- atomic_long_t gen;
66620+ atomic_long_unchecked_t gen;
66621
66622 /*
66623 * How frequently and how many times transaction
66624diff --git a/include/linux/elf.h b/include/linux/elf.h
66625index 90a4ed0..d652617 100644
66626--- a/include/linux/elf.h
66627+++ b/include/linux/elf.h
66628@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
66629 #define PT_GNU_EH_FRAME 0x6474e550
66630
66631 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
66632+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
66633+
66634+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
66635+
66636+/* Constants for the e_flags field */
66637+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66638+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
66639+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
66640+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
66641+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66642+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66643
66644 /* These constants define the different elf file types */
66645 #define ET_NONE 0
66646@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
66647 #define DT_DEBUG 21
66648 #define DT_TEXTREL 22
66649 #define DT_JMPREL 23
66650+#define DT_FLAGS 30
66651+ #define DF_TEXTREL 0x00000004
66652 #define DT_ENCODING 32
66653 #define OLD_DT_LOOS 0x60000000
66654 #define DT_LOOS 0x6000000d
66655@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
66656 #define PF_W 0x2
66657 #define PF_X 0x1
66658
66659+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
66660+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
66661+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
66662+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
66663+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
66664+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
66665+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
66666+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
66667+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
66668+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
66669+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
66670+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
66671+
66672 typedef struct elf32_phdr{
66673 Elf32_Word p_type;
66674 Elf32_Off p_offset;
66675@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
66676 #define EI_OSABI 7
66677 #define EI_PAD 8
66678
66679+#define EI_PAX 14
66680+
66681 #define ELFMAG0 0x7f /* EI_MAG */
66682 #define ELFMAG1 'E'
66683 #define ELFMAG2 'L'
66684@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
66685 #define elf_phdr elf32_phdr
66686 #define elf_note elf32_note
66687 #define elf_addr_t Elf32_Off
66688+#define elf_dyn Elf32_Dyn
66689
66690 #else
66691
66692@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
66693 #define elf_phdr elf64_phdr
66694 #define elf_note elf64_note
66695 #define elf_addr_t Elf64_Off
66696+#define elf_dyn Elf64_Dyn
66697
66698 #endif
66699
66700diff --git a/include/linux/fs.h b/include/linux/fs.h
66701index 1b9a47a..6fe2934 100644
66702--- a/include/linux/fs.h
66703+++ b/include/linux/fs.h
66704@@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
66705 unsigned long, unsigned long);
66706
66707 struct address_space_operations {
66708- int (*writepage)(struct page *page, struct writeback_control *wbc);
66709- int (*readpage)(struct file *, struct page *);
66710- void (*sync_page)(struct page *);
66711+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
66712+ int (* const readpage)(struct file *, struct page *);
66713+ void (* const sync_page)(struct page *);
66714
66715 /* Write back some dirty pages from this mapping. */
66716- int (*writepages)(struct address_space *, struct writeback_control *);
66717+ int (* const writepages)(struct address_space *, struct writeback_control *);
66718
66719 /* Set a page dirty. Return true if this dirtied it */
66720- int (*set_page_dirty)(struct page *page);
66721+ int (* const set_page_dirty)(struct page *page);
66722
66723- int (*readpages)(struct file *filp, struct address_space *mapping,
66724+ int (* const readpages)(struct file *filp, struct address_space *mapping,
66725 struct list_head *pages, unsigned nr_pages);
66726
66727- int (*write_begin)(struct file *, struct address_space *mapping,
66728+ int (* const write_begin)(struct file *, struct address_space *mapping,
66729 loff_t pos, unsigned len, unsigned flags,
66730 struct page **pagep, void **fsdata);
66731- int (*write_end)(struct file *, struct address_space *mapping,
66732+ int (* const write_end)(struct file *, struct address_space *mapping,
66733 loff_t pos, unsigned len, unsigned copied,
66734 struct page *page, void *fsdata);
66735
66736 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
66737- sector_t (*bmap)(struct address_space *, sector_t);
66738- void (*invalidatepage) (struct page *, unsigned long);
66739- int (*releasepage) (struct page *, gfp_t);
66740- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
66741+ sector_t (* const bmap)(struct address_space *, sector_t);
66742+ void (* const invalidatepage) (struct page *, unsigned long);
66743+ int (* const releasepage) (struct page *, gfp_t);
66744+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
66745 loff_t offset, unsigned long nr_segs);
66746- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
66747+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
66748 void **, unsigned long *);
66749 /* migrate the contents of a page to the specified target */
66750- int (*migratepage) (struct address_space *,
66751+ int (* const migratepage) (struct address_space *,
66752 struct page *, struct page *);
66753- int (*launder_page) (struct page *);
66754- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
66755+ int (* const launder_page) (struct page *);
66756+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
66757 unsigned long);
66758- int (*error_remove_page)(struct address_space *, struct page *);
66759+ int (* const error_remove_page)(struct address_space *, struct page *);
66760 };
66761
66762 /*
66763@@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
66764 typedef struct files_struct *fl_owner_t;
66765
66766 struct file_lock_operations {
66767- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66768- void (*fl_release_private)(struct file_lock *);
66769+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66770+ void (* const fl_release_private)(struct file_lock *);
66771 };
66772
66773 struct lock_manager_operations {
66774- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
66775- void (*fl_notify)(struct file_lock *); /* unblock callback */
66776- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
66777- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66778- void (*fl_release_private)(struct file_lock *);
66779- void (*fl_break)(struct file_lock *);
66780- int (*fl_mylease)(struct file_lock *, struct file_lock *);
66781- int (*fl_change)(struct file_lock **, int);
66782+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
66783+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
66784+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
66785+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66786+ void (* const fl_release_private)(struct file_lock *);
66787+ void (* const fl_break)(struct file_lock *);
66788+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
66789+ int (* const fl_change)(struct file_lock **, int);
66790 };
66791
66792 struct lock_manager {
66793@@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
66794 unsigned int fi_flags; /* Flags as passed from user */
66795 unsigned int fi_extents_mapped; /* Number of mapped extents */
66796 unsigned int fi_extents_max; /* Size of fiemap_extent array */
66797- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
66798+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
66799 * array */
66800 };
66801 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
66802@@ -1512,7 +1512,8 @@ struct file_operations {
66803 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
66804 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
66805 int (*setlease)(struct file *, long, struct file_lock **);
66806-};
66807+} __do_const;
66808+typedef struct file_operations __no_const file_operations_no_const;
66809
66810 struct inode_operations {
66811 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
66812@@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
66813 unsigned long, loff_t *);
66814
66815 struct super_operations {
66816- struct inode *(*alloc_inode)(struct super_block *sb);
66817- void (*destroy_inode)(struct inode *);
66818+ struct inode *(* const alloc_inode)(struct super_block *sb);
66819+ void (* const destroy_inode)(struct inode *);
66820
66821- void (*dirty_inode) (struct inode *);
66822- int (*write_inode) (struct inode *, int);
66823- void (*drop_inode) (struct inode *);
66824- void (*delete_inode) (struct inode *);
66825- void (*put_super) (struct super_block *);
66826- void (*write_super) (struct super_block *);
66827- int (*sync_fs)(struct super_block *sb, int wait);
66828- int (*freeze_fs) (struct super_block *);
66829- int (*unfreeze_fs) (struct super_block *);
66830- int (*statfs) (struct dentry *, struct kstatfs *);
66831- int (*remount_fs) (struct super_block *, int *, char *);
66832- void (*clear_inode) (struct inode *);
66833- void (*umount_begin) (struct super_block *);
66834+ void (* const dirty_inode) (struct inode *);
66835+ int (* const write_inode) (struct inode *, int);
66836+ void (* const drop_inode) (struct inode *);
66837+ void (* const delete_inode) (struct inode *);
66838+ void (* const put_super) (struct super_block *);
66839+ void (* const write_super) (struct super_block *);
66840+ int (* const sync_fs)(struct super_block *sb, int wait);
66841+ int (* const freeze_fs) (struct super_block *);
66842+ int (* const unfreeze_fs) (struct super_block *);
66843+ int (* const statfs) (struct dentry *, struct kstatfs *);
66844+ int (* const remount_fs) (struct super_block *, int *, char *);
66845+ void (* const clear_inode) (struct inode *);
66846+ void (* const umount_begin) (struct super_block *);
66847
66848- int (*show_options)(struct seq_file *, struct vfsmount *);
66849- int (*show_stats)(struct seq_file *, struct vfsmount *);
66850+ int (* const show_options)(struct seq_file *, struct vfsmount *);
66851+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
66852 #ifdef CONFIG_QUOTA
66853- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
66854- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66855+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
66856+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66857 #endif
66858- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66859+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66860 };
66861
66862 /*
66863diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
66864index 78a05bf..2a7d3e1 100644
66865--- a/include/linux/fs_struct.h
66866+++ b/include/linux/fs_struct.h
66867@@ -4,7 +4,7 @@
66868 #include <linux/path.h>
66869
66870 struct fs_struct {
66871- int users;
66872+ atomic_t users;
66873 rwlock_t lock;
66874 int umask;
66875 int in_exec;
66876diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
66877index 7be0c6f..2f63a2b 100644
66878--- a/include/linux/fscache-cache.h
66879+++ b/include/linux/fscache-cache.h
66880@@ -116,7 +116,7 @@ struct fscache_operation {
66881 #endif
66882 };
66883
66884-extern atomic_t fscache_op_debug_id;
66885+extern atomic_unchecked_t fscache_op_debug_id;
66886 extern const struct slow_work_ops fscache_op_slow_work_ops;
66887
66888 extern void fscache_enqueue_operation(struct fscache_operation *);
66889@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
66890 fscache_operation_release_t release)
66891 {
66892 atomic_set(&op->usage, 1);
66893- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
66894+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
66895 op->release = release;
66896 INIT_LIST_HEAD(&op->pend_link);
66897 fscache_set_op_state(op, "Init");
66898diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
66899index 4d6f47b..00bcedb 100644
66900--- a/include/linux/fsnotify_backend.h
66901+++ b/include/linux/fsnotify_backend.h
66902@@ -86,6 +86,7 @@ struct fsnotify_ops {
66903 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
66904 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
66905 };
66906+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
66907
66908 /*
66909 * A group is a "thing" that wants to receive notification about filesystem
66910diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
66911index 4ec5e67..42f1eb9 100644
66912--- a/include/linux/ftrace_event.h
66913+++ b/include/linux/ftrace_event.h
66914@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
66915 int filter_type);
66916 extern int trace_define_common_fields(struct ftrace_event_call *call);
66917
66918-#define is_signed_type(type) (((type)(-1)) < 0)
66919+#define is_signed_type(type) (((type)(-1)) < (type)1)
66920
66921 int trace_set_clr_event(const char *system, const char *event, int set);
66922
66923diff --git a/include/linux/genhd.h b/include/linux/genhd.h
66924index 297df45..b6a74ff 100644
66925--- a/include/linux/genhd.h
66926+++ b/include/linux/genhd.h
66927@@ -161,7 +161,7 @@ struct gendisk {
66928
66929 struct timer_rand_state *random;
66930
66931- atomic_t sync_io; /* RAID */
66932+ atomic_unchecked_t sync_io; /* RAID */
66933 struct work_struct async_notify;
66934 #ifdef CONFIG_BLK_DEV_INTEGRITY
66935 struct blk_integrity *integrity;
66936diff --git a/include/linux/gracl.h b/include/linux/gracl.h
66937new file mode 100644
66938index 0000000..af663cf
66939--- /dev/null
66940+++ b/include/linux/gracl.h
66941@@ -0,0 +1,319 @@
66942+#ifndef GR_ACL_H
66943+#define GR_ACL_H
66944+
66945+#include <linux/grdefs.h>
66946+#include <linux/resource.h>
66947+#include <linux/capability.h>
66948+#include <linux/dcache.h>
66949+#include <asm/resource.h>
66950+
66951+/* Major status information */
66952+
66953+#define GR_VERSION "grsecurity 2.9"
66954+#define GRSECURITY_VERSION 0x2900
66955+
66956+enum {
66957+ GR_SHUTDOWN = 0,
66958+ GR_ENABLE = 1,
66959+ GR_SPROLE = 2,
66960+ GR_RELOAD = 3,
66961+ GR_SEGVMOD = 4,
66962+ GR_STATUS = 5,
66963+ GR_UNSPROLE = 6,
66964+ GR_PASSSET = 7,
66965+ GR_SPROLEPAM = 8,
66966+};
66967+
66968+/* Password setup definitions
66969+ * kernel/grhash.c */
66970+enum {
66971+ GR_PW_LEN = 128,
66972+ GR_SALT_LEN = 16,
66973+ GR_SHA_LEN = 32,
66974+};
66975+
66976+enum {
66977+ GR_SPROLE_LEN = 64,
66978+};
66979+
66980+enum {
66981+ GR_NO_GLOB = 0,
66982+ GR_REG_GLOB,
66983+ GR_CREATE_GLOB
66984+};
66985+
66986+#define GR_NLIMITS 32
66987+
66988+/* Begin Data Structures */
66989+
66990+struct sprole_pw {
66991+ unsigned char *rolename;
66992+ unsigned char salt[GR_SALT_LEN];
66993+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
66994+};
66995+
66996+struct name_entry {
66997+ __u32 key;
66998+ ino_t inode;
66999+ dev_t device;
67000+ char *name;
67001+ __u16 len;
67002+ __u8 deleted;
67003+ struct name_entry *prev;
67004+ struct name_entry *next;
67005+};
67006+
67007+struct inodev_entry {
67008+ struct name_entry *nentry;
67009+ struct inodev_entry *prev;
67010+ struct inodev_entry *next;
67011+};
67012+
67013+struct acl_role_db {
67014+ struct acl_role_label **r_hash;
67015+ __u32 r_size;
67016+};
67017+
67018+struct inodev_db {
67019+ struct inodev_entry **i_hash;
67020+ __u32 i_size;
67021+};
67022+
67023+struct name_db {
67024+ struct name_entry **n_hash;
67025+ __u32 n_size;
67026+};
67027+
67028+struct crash_uid {
67029+ uid_t uid;
67030+ unsigned long expires;
67031+};
67032+
67033+struct gr_hash_struct {
67034+ void **table;
67035+ void **nametable;
67036+ void *first;
67037+ __u32 table_size;
67038+ __u32 used_size;
67039+ int type;
67040+};
67041+
67042+/* Userspace Grsecurity ACL data structures */
67043+
67044+struct acl_subject_label {
67045+ char *filename;
67046+ ino_t inode;
67047+ dev_t device;
67048+ __u32 mode;
67049+ kernel_cap_t cap_mask;
67050+ kernel_cap_t cap_lower;
67051+ kernel_cap_t cap_invert_audit;
67052+
67053+ struct rlimit res[GR_NLIMITS];
67054+ __u32 resmask;
67055+
67056+ __u8 user_trans_type;
67057+ __u8 group_trans_type;
67058+ uid_t *user_transitions;
67059+ gid_t *group_transitions;
67060+ __u16 user_trans_num;
67061+ __u16 group_trans_num;
67062+
67063+ __u32 sock_families[2];
67064+ __u32 ip_proto[8];
67065+ __u32 ip_type;
67066+ struct acl_ip_label **ips;
67067+ __u32 ip_num;
67068+ __u32 inaddr_any_override;
67069+
67070+ __u32 crashes;
67071+ unsigned long expires;
67072+
67073+ struct acl_subject_label *parent_subject;
67074+ struct gr_hash_struct *hash;
67075+ struct acl_subject_label *prev;
67076+ struct acl_subject_label *next;
67077+
67078+ struct acl_object_label **obj_hash;
67079+ __u32 obj_hash_size;
67080+ __u16 pax_flags;
67081+};
67082+
67083+struct role_allowed_ip {
67084+ __u32 addr;
67085+ __u32 netmask;
67086+
67087+ struct role_allowed_ip *prev;
67088+ struct role_allowed_ip *next;
67089+};
67090+
67091+struct role_transition {
67092+ char *rolename;
67093+
67094+ struct role_transition *prev;
67095+ struct role_transition *next;
67096+};
67097+
67098+struct acl_role_label {
67099+ char *rolename;
67100+ uid_t uidgid;
67101+ __u16 roletype;
67102+
67103+ __u16 auth_attempts;
67104+ unsigned long expires;
67105+
67106+ struct acl_subject_label *root_label;
67107+ struct gr_hash_struct *hash;
67108+
67109+ struct acl_role_label *prev;
67110+ struct acl_role_label *next;
67111+
67112+ struct role_transition *transitions;
67113+ struct role_allowed_ip *allowed_ips;
67114+ uid_t *domain_children;
67115+ __u16 domain_child_num;
67116+
67117+ mode_t umask;
67118+
67119+ struct acl_subject_label **subj_hash;
67120+ __u32 subj_hash_size;
67121+};
67122+
67123+struct user_acl_role_db {
67124+ struct acl_role_label **r_table;
67125+ __u32 num_pointers; /* Number of allocations to track */
67126+ __u32 num_roles; /* Number of roles */
67127+ __u32 num_domain_children; /* Number of domain children */
67128+ __u32 num_subjects; /* Number of subjects */
67129+ __u32 num_objects; /* Number of objects */
67130+};
67131+
67132+struct acl_object_label {
67133+ char *filename;
67134+ ino_t inode;
67135+ dev_t device;
67136+ __u32 mode;
67137+
67138+ struct acl_subject_label *nested;
67139+ struct acl_object_label *globbed;
67140+
67141+ /* next two structures not used */
67142+
67143+ struct acl_object_label *prev;
67144+ struct acl_object_label *next;
67145+};
67146+
67147+struct acl_ip_label {
67148+ char *iface;
67149+ __u32 addr;
67150+ __u32 netmask;
67151+ __u16 low, high;
67152+ __u8 mode;
67153+ __u32 type;
67154+ __u32 proto[8];
67155+
67156+ /* next two structures not used */
67157+
67158+ struct acl_ip_label *prev;
67159+ struct acl_ip_label *next;
67160+};
67161+
67162+struct gr_arg {
67163+ struct user_acl_role_db role_db;
67164+ unsigned char pw[GR_PW_LEN];
67165+ unsigned char salt[GR_SALT_LEN];
67166+ unsigned char sum[GR_SHA_LEN];
67167+ unsigned char sp_role[GR_SPROLE_LEN];
67168+ struct sprole_pw *sprole_pws;
67169+ dev_t segv_device;
67170+ ino_t segv_inode;
67171+ uid_t segv_uid;
67172+ __u16 num_sprole_pws;
67173+ __u16 mode;
67174+};
67175+
67176+struct gr_arg_wrapper {
67177+ struct gr_arg *arg;
67178+ __u32 version;
67179+ __u32 size;
67180+};
67181+
67182+struct subject_map {
67183+ struct acl_subject_label *user;
67184+ struct acl_subject_label *kernel;
67185+ struct subject_map *prev;
67186+ struct subject_map *next;
67187+};
67188+
67189+struct acl_subj_map_db {
67190+ struct subject_map **s_hash;
67191+ __u32 s_size;
67192+};
67193+
67194+/* End Data Structures Section */
67195+
67196+/* Hash functions generated by empirical testing by Brad Spengler
67197+ Makes good use of the low bits of the inode. Generally 0-1 times
67198+ in loop for successful match. 0-3 for unsuccessful match.
67199+ Shift/add algorithm with modulus of table size and an XOR*/
67200+
67201+static __inline__ unsigned int
67202+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
67203+{
67204+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
67205+}
67206+
67207+ static __inline__ unsigned int
67208+shash(const struct acl_subject_label *userp, const unsigned int sz)
67209+{
67210+ return ((const unsigned long)userp % sz);
67211+}
67212+
67213+static __inline__ unsigned int
67214+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
67215+{
67216+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
67217+}
67218+
67219+static __inline__ unsigned int
67220+nhash(const char *name, const __u16 len, const unsigned int sz)
67221+{
67222+ return full_name_hash((const unsigned char *)name, len) % sz;
67223+}
67224+
67225+#define FOR_EACH_ROLE_START(role) \
67226+ role = role_list; \
67227+ while (role) {
67228+
67229+#define FOR_EACH_ROLE_END(role) \
67230+ role = role->prev; \
67231+ }
67232+
67233+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
67234+ subj = NULL; \
67235+ iter = 0; \
67236+ while (iter < role->subj_hash_size) { \
67237+ if (subj == NULL) \
67238+ subj = role->subj_hash[iter]; \
67239+ if (subj == NULL) { \
67240+ iter++; \
67241+ continue; \
67242+ }
67243+
67244+#define FOR_EACH_SUBJECT_END(subj,iter) \
67245+ subj = subj->next; \
67246+ if (subj == NULL) \
67247+ iter++; \
67248+ }
67249+
67250+
67251+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
67252+ subj = role->hash->first; \
67253+ while (subj != NULL) {
67254+
67255+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
67256+ subj = subj->next; \
67257+ }
67258+
67259+#endif
67260+
67261diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
67262new file mode 100644
67263index 0000000..323ecf2
67264--- /dev/null
67265+++ b/include/linux/gralloc.h
67266@@ -0,0 +1,9 @@
67267+#ifndef __GRALLOC_H
67268+#define __GRALLOC_H
67269+
67270+void acl_free_all(void);
67271+int acl_alloc_stack_init(unsigned long size);
67272+void *acl_alloc(unsigned long len);
67273+void *acl_alloc_num(unsigned long num, unsigned long len);
67274+
67275+#endif
67276diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
67277new file mode 100644
67278index 0000000..70d6cd5
67279--- /dev/null
67280+++ b/include/linux/grdefs.h
67281@@ -0,0 +1,140 @@
67282+#ifndef GRDEFS_H
67283+#define GRDEFS_H
67284+
67285+/* Begin grsecurity status declarations */
67286+
67287+enum {
67288+ GR_READY = 0x01,
67289+ GR_STATUS_INIT = 0x00 // disabled state
67290+};
67291+
67292+/* Begin ACL declarations */
67293+
67294+/* Role flags */
67295+
67296+enum {
67297+ GR_ROLE_USER = 0x0001,
67298+ GR_ROLE_GROUP = 0x0002,
67299+ GR_ROLE_DEFAULT = 0x0004,
67300+ GR_ROLE_SPECIAL = 0x0008,
67301+ GR_ROLE_AUTH = 0x0010,
67302+ GR_ROLE_NOPW = 0x0020,
67303+ GR_ROLE_GOD = 0x0040,
67304+ GR_ROLE_LEARN = 0x0080,
67305+ GR_ROLE_TPE = 0x0100,
67306+ GR_ROLE_DOMAIN = 0x0200,
67307+ GR_ROLE_PAM = 0x0400,
67308+ GR_ROLE_PERSIST = 0x800
67309+};
67310+
67311+/* ACL Subject and Object mode flags */
67312+enum {
67313+ GR_DELETED = 0x80000000
67314+};
67315+
67316+/* ACL Object-only mode flags */
67317+enum {
67318+ GR_READ = 0x00000001,
67319+ GR_APPEND = 0x00000002,
67320+ GR_WRITE = 0x00000004,
67321+ GR_EXEC = 0x00000008,
67322+ GR_FIND = 0x00000010,
67323+ GR_INHERIT = 0x00000020,
67324+ GR_SETID = 0x00000040,
67325+ GR_CREATE = 0x00000080,
67326+ GR_DELETE = 0x00000100,
67327+ GR_LINK = 0x00000200,
67328+ GR_AUDIT_READ = 0x00000400,
67329+ GR_AUDIT_APPEND = 0x00000800,
67330+ GR_AUDIT_WRITE = 0x00001000,
67331+ GR_AUDIT_EXEC = 0x00002000,
67332+ GR_AUDIT_FIND = 0x00004000,
67333+ GR_AUDIT_INHERIT= 0x00008000,
67334+ GR_AUDIT_SETID = 0x00010000,
67335+ GR_AUDIT_CREATE = 0x00020000,
67336+ GR_AUDIT_DELETE = 0x00040000,
67337+ GR_AUDIT_LINK = 0x00080000,
67338+ GR_PTRACERD = 0x00100000,
67339+ GR_NOPTRACE = 0x00200000,
67340+ GR_SUPPRESS = 0x00400000,
67341+ GR_NOLEARN = 0x00800000,
67342+ GR_INIT_TRANSFER= 0x01000000
67343+};
67344+
67345+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
67346+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
67347+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
67348+
67349+/* ACL subject-only mode flags */
67350+enum {
67351+ GR_KILL = 0x00000001,
67352+ GR_VIEW = 0x00000002,
67353+ GR_PROTECTED = 0x00000004,
67354+ GR_LEARN = 0x00000008,
67355+ GR_OVERRIDE = 0x00000010,
67356+ /* just a placeholder, this mode is only used in userspace */
67357+ GR_DUMMY = 0x00000020,
67358+ GR_PROTSHM = 0x00000040,
67359+ GR_KILLPROC = 0x00000080,
67360+ GR_KILLIPPROC = 0x00000100,
67361+ /* just a placeholder, this mode is only used in userspace */
67362+ GR_NOTROJAN = 0x00000200,
67363+ GR_PROTPROCFD = 0x00000400,
67364+ GR_PROCACCT = 0x00000800,
67365+ GR_RELAXPTRACE = 0x00001000,
67366+ GR_NESTED = 0x00002000,
67367+ GR_INHERITLEARN = 0x00004000,
67368+ GR_PROCFIND = 0x00008000,
67369+ GR_POVERRIDE = 0x00010000,
67370+ GR_KERNELAUTH = 0x00020000,
67371+ GR_ATSECURE = 0x00040000,
67372+ GR_SHMEXEC = 0x00080000
67373+};
67374+
67375+enum {
67376+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
67377+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
67378+ GR_PAX_ENABLE_MPROTECT = 0x0004,
67379+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
67380+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
67381+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
67382+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
67383+ GR_PAX_DISABLE_MPROTECT = 0x0400,
67384+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
67385+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
67386+};
67387+
67388+enum {
67389+ GR_ID_USER = 0x01,
67390+ GR_ID_GROUP = 0x02,
67391+};
67392+
67393+enum {
67394+ GR_ID_ALLOW = 0x01,
67395+ GR_ID_DENY = 0x02,
67396+};
67397+
67398+#define GR_CRASH_RES 31
67399+#define GR_UIDTABLE_MAX 500
67400+
67401+/* begin resource learning section */
67402+enum {
67403+ GR_RLIM_CPU_BUMP = 60,
67404+ GR_RLIM_FSIZE_BUMP = 50000,
67405+ GR_RLIM_DATA_BUMP = 10000,
67406+ GR_RLIM_STACK_BUMP = 1000,
67407+ GR_RLIM_CORE_BUMP = 10000,
67408+ GR_RLIM_RSS_BUMP = 500000,
67409+ GR_RLIM_NPROC_BUMP = 1,
67410+ GR_RLIM_NOFILE_BUMP = 5,
67411+ GR_RLIM_MEMLOCK_BUMP = 50000,
67412+ GR_RLIM_AS_BUMP = 500000,
67413+ GR_RLIM_LOCKS_BUMP = 2,
67414+ GR_RLIM_SIGPENDING_BUMP = 5,
67415+ GR_RLIM_MSGQUEUE_BUMP = 10000,
67416+ GR_RLIM_NICE_BUMP = 1,
67417+ GR_RLIM_RTPRIO_BUMP = 1,
67418+ GR_RLIM_RTTIME_BUMP = 1000000
67419+};
67420+
67421+#endif
67422diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
67423new file mode 100644
67424index 0000000..3826b91
67425--- /dev/null
67426+++ b/include/linux/grinternal.h
67427@@ -0,0 +1,219 @@
67428+#ifndef __GRINTERNAL_H
67429+#define __GRINTERNAL_H
67430+
67431+#ifdef CONFIG_GRKERNSEC
67432+
67433+#include <linux/fs.h>
67434+#include <linux/mnt_namespace.h>
67435+#include <linux/nsproxy.h>
67436+#include <linux/gracl.h>
67437+#include <linux/grdefs.h>
67438+#include <linux/grmsg.h>
67439+
67440+void gr_add_learn_entry(const char *fmt, ...)
67441+ __attribute__ ((format (printf, 1, 2)));
67442+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
67443+ const struct vfsmount *mnt);
67444+__u32 gr_check_create(const struct dentry *new_dentry,
67445+ const struct dentry *parent,
67446+ const struct vfsmount *mnt, const __u32 mode);
67447+int gr_check_protected_task(const struct task_struct *task);
67448+__u32 to_gr_audit(const __u32 reqmode);
67449+int gr_set_acls(const int type);
67450+int gr_apply_subject_to_task(struct task_struct *task);
67451+int gr_acl_is_enabled(void);
67452+char gr_roletype_to_char(void);
67453+
67454+void gr_handle_alertkill(struct task_struct *task);
67455+char *gr_to_filename(const struct dentry *dentry,
67456+ const struct vfsmount *mnt);
67457+char *gr_to_filename1(const struct dentry *dentry,
67458+ const struct vfsmount *mnt);
67459+char *gr_to_filename2(const struct dentry *dentry,
67460+ const struct vfsmount *mnt);
67461+char *gr_to_filename3(const struct dentry *dentry,
67462+ const struct vfsmount *mnt);
67463+
67464+extern int grsec_enable_ptrace_readexec;
67465+extern int grsec_enable_harden_ptrace;
67466+extern int grsec_enable_link;
67467+extern int grsec_enable_fifo;
67468+extern int grsec_enable_shm;
67469+extern int grsec_enable_execlog;
67470+extern int grsec_enable_signal;
67471+extern int grsec_enable_audit_ptrace;
67472+extern int grsec_enable_forkfail;
67473+extern int grsec_enable_time;
67474+extern int grsec_enable_rofs;
67475+extern int grsec_enable_chroot_shmat;
67476+extern int grsec_enable_chroot_mount;
67477+extern int grsec_enable_chroot_double;
67478+extern int grsec_enable_chroot_pivot;
67479+extern int grsec_enable_chroot_chdir;
67480+extern int grsec_enable_chroot_chmod;
67481+extern int grsec_enable_chroot_mknod;
67482+extern int grsec_enable_chroot_fchdir;
67483+extern int grsec_enable_chroot_nice;
67484+extern int grsec_enable_chroot_execlog;
67485+extern int grsec_enable_chroot_caps;
67486+extern int grsec_enable_chroot_sysctl;
67487+extern int grsec_enable_chroot_unix;
67488+extern int grsec_enable_tpe;
67489+extern int grsec_tpe_gid;
67490+extern int grsec_enable_tpe_all;
67491+extern int grsec_enable_tpe_invert;
67492+extern int grsec_enable_socket_all;
67493+extern int grsec_socket_all_gid;
67494+extern int grsec_enable_socket_client;
67495+extern int grsec_socket_client_gid;
67496+extern int grsec_enable_socket_server;
67497+extern int grsec_socket_server_gid;
67498+extern int grsec_audit_gid;
67499+extern int grsec_enable_group;
67500+extern int grsec_enable_audit_textrel;
67501+extern int grsec_enable_log_rwxmaps;
67502+extern int grsec_enable_mount;
67503+extern int grsec_enable_chdir;
67504+extern int grsec_resource_logging;
67505+extern int grsec_enable_blackhole;
67506+extern int grsec_lastack_retries;
67507+extern int grsec_enable_brute;
67508+extern int grsec_lock;
67509+
67510+extern spinlock_t grsec_alert_lock;
67511+extern unsigned long grsec_alert_wtime;
67512+extern unsigned long grsec_alert_fyet;
67513+
67514+extern spinlock_t grsec_audit_lock;
67515+
67516+extern rwlock_t grsec_exec_file_lock;
67517+
67518+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
67519+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
67520+ (tsk)->exec_file->f_vfsmnt) : "/")
67521+
67522+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
67523+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
67524+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67525+
67526+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
67527+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
67528+ (tsk)->exec_file->f_vfsmnt) : "/")
67529+
67530+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
67531+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
67532+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67533+
67534+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
67535+
67536+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
67537+
67538+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
67539+ (task)->pid, (cred)->uid, \
67540+ (cred)->euid, (cred)->gid, (cred)->egid, \
67541+ gr_parent_task_fullpath(task), \
67542+ (task)->real_parent->comm, (task)->real_parent->pid, \
67543+ (pcred)->uid, (pcred)->euid, \
67544+ (pcred)->gid, (pcred)->egid
67545+
67546+#define GR_CHROOT_CAPS {{ \
67547+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
67548+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
67549+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
67550+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
67551+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
67552+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
67553+ CAP_TO_MASK(CAP_MAC_ADMIN) }}
67554+
67555+#define security_learn(normal_msg,args...) \
67556+({ \
67557+ read_lock(&grsec_exec_file_lock); \
67558+ gr_add_learn_entry(normal_msg "\n", ## args); \
67559+ read_unlock(&grsec_exec_file_lock); \
67560+})
67561+
67562+enum {
67563+ GR_DO_AUDIT,
67564+ GR_DONT_AUDIT,
67565+ GR_DONT_AUDIT_GOOD
67566+};
67567+
67568+enum {
67569+ GR_TTYSNIFF,
67570+ GR_RBAC,
67571+ GR_RBAC_STR,
67572+ GR_STR_RBAC,
67573+ GR_RBAC_MODE2,
67574+ GR_RBAC_MODE3,
67575+ GR_FILENAME,
67576+ GR_SYSCTL_HIDDEN,
67577+ GR_NOARGS,
67578+ GR_ONE_INT,
67579+ GR_ONE_INT_TWO_STR,
67580+ GR_ONE_STR,
67581+ GR_STR_INT,
67582+ GR_TWO_STR_INT,
67583+ GR_TWO_INT,
67584+ GR_TWO_U64,
67585+ GR_THREE_INT,
67586+ GR_FIVE_INT_TWO_STR,
67587+ GR_TWO_STR,
67588+ GR_THREE_STR,
67589+ GR_FOUR_STR,
67590+ GR_STR_FILENAME,
67591+ GR_FILENAME_STR,
67592+ GR_FILENAME_TWO_INT,
67593+ GR_FILENAME_TWO_INT_STR,
67594+ GR_TEXTREL,
67595+ GR_PTRACE,
67596+ GR_RESOURCE,
67597+ GR_CAP,
67598+ GR_SIG,
67599+ GR_SIG2,
67600+ GR_CRASH1,
67601+ GR_CRASH2,
67602+ GR_PSACCT,
67603+ GR_RWXMAP
67604+};
67605+
67606+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
67607+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
67608+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
67609+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
67610+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
67611+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
67612+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
67613+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
67614+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
67615+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
67616+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
67617+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
67618+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
67619+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
67620+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
67621+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
67622+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
67623+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
67624+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
67625+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
67626+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
67627+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
67628+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
67629+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
67630+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
67631+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
67632+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
67633+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
67634+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
67635+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
67636+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
67637+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
67638+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
67639+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
67640+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
67641+
67642+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
67643+
67644+#endif
67645+
67646+#endif
67647diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
67648new file mode 100644
67649index 0000000..f885406
67650--- /dev/null
67651+++ b/include/linux/grmsg.h
67652@@ -0,0 +1,109 @@
67653+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
67654+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
67655+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
67656+#define GR_STOPMOD_MSG "denied modification of module state by "
67657+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
67658+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
67659+#define GR_IOPERM_MSG "denied use of ioperm() by "
67660+#define GR_IOPL_MSG "denied use of iopl() by "
67661+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
67662+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
67663+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
67664+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
67665+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
67666+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
67667+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
67668+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
67669+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
67670+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
67671+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
67672+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
67673+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
67674+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
67675+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
67676+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
67677+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
67678+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
67679+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
67680+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
67681+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
67682+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
67683+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
67684+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
67685+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
67686+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
67687+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
67688+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
67689+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
67690+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
67691+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
67692+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
67693+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
67694+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
67695+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
67696+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
67697+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
67698+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
67699+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
67700+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
67701+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
67702+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
67703+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
67704+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
67705+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
67706+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
67707+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
67708+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
67709+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
67710+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
67711+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
67712+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
67713+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
67714+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
67715+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
67716+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
67717+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
67718+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
67719+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
67720+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
67721+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
67722+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
67723+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
67724+#define GR_FAILFORK_MSG "failed fork with errno %s by "
67725+#define GR_NICE_CHROOT_MSG "denied priority change by "
67726+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
67727+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
67728+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
67729+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
67730+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
67731+#define GR_TIME_MSG "time set by "
67732+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
67733+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
67734+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
67735+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
67736+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
67737+#define GR_BIND_MSG "denied bind() by "
67738+#define GR_CONNECT_MSG "denied connect() by "
67739+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
67740+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
67741+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
67742+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
67743+#define GR_CAP_ACL_MSG "use of %s denied for "
67744+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
67745+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
67746+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
67747+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
67748+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
67749+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
67750+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
67751+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
67752+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
67753+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
67754+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
67755+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
67756+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
67757+#define GR_VM86_MSG "denied use of vm86 by "
67758+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
67759+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
67760+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
67761+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
67762diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
67763new file mode 100644
67764index 0000000..c1793ae
67765--- /dev/null
67766+++ b/include/linux/grsecurity.h
67767@@ -0,0 +1,219 @@
67768+#ifndef GR_SECURITY_H
67769+#define GR_SECURITY_H
67770+#include <linux/fs.h>
67771+#include <linux/fs_struct.h>
67772+#include <linux/binfmts.h>
67773+#include <linux/gracl.h>
67774+#include <linux/compat.h>
67775+
67776+/* notify of brain-dead configs */
67777+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67778+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
67779+#endif
67780+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
67781+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
67782+#endif
67783+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
67784+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
67785+#endif
67786+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
67787+#error "CONFIG_PAX enabled, but no PaX options are enabled."
67788+#endif
67789+
67790+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
67791+void gr_handle_brute_check(void);
67792+void gr_handle_kernel_exploit(void);
67793+int gr_process_user_ban(void);
67794+
67795+char gr_roletype_to_char(void);
67796+
67797+int gr_acl_enable_at_secure(void);
67798+
67799+int gr_check_user_change(int real, int effective, int fs);
67800+int gr_check_group_change(int real, int effective, int fs);
67801+
67802+void gr_del_task_from_ip_table(struct task_struct *p);
67803+
67804+int gr_pid_is_chrooted(struct task_struct *p);
67805+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
67806+int gr_handle_chroot_nice(void);
67807+int gr_handle_chroot_sysctl(const int op);
67808+int gr_handle_chroot_setpriority(struct task_struct *p,
67809+ const int niceval);
67810+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
67811+int gr_handle_chroot_chroot(const struct dentry *dentry,
67812+ const struct vfsmount *mnt);
67813+void gr_handle_chroot_chdir(struct path *path);
67814+int gr_handle_chroot_chmod(const struct dentry *dentry,
67815+ const struct vfsmount *mnt, const int mode);
67816+int gr_handle_chroot_mknod(const struct dentry *dentry,
67817+ const struct vfsmount *mnt, const int mode);
67818+int gr_handle_chroot_mount(const struct dentry *dentry,
67819+ const struct vfsmount *mnt,
67820+ const char *dev_name);
67821+int gr_handle_chroot_pivot(void);
67822+int gr_handle_chroot_unix(const pid_t pid);
67823+
67824+int gr_handle_rawio(const struct inode *inode);
67825+
67826+void gr_handle_ioperm(void);
67827+void gr_handle_iopl(void);
67828+
67829+umode_t gr_acl_umask(void);
67830+
67831+int gr_tpe_allow(const struct file *file);
67832+
67833+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
67834+void gr_clear_chroot_entries(struct task_struct *task);
67835+
67836+void gr_log_forkfail(const int retval);
67837+void gr_log_timechange(void);
67838+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
67839+void gr_log_chdir(const struct dentry *dentry,
67840+ const struct vfsmount *mnt);
67841+void gr_log_chroot_exec(const struct dentry *dentry,
67842+ const struct vfsmount *mnt);
67843+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
67844+#ifdef CONFIG_COMPAT
67845+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
67846+#endif
67847+void gr_log_remount(const char *devname, const int retval);
67848+void gr_log_unmount(const char *devname, const int retval);
67849+void gr_log_mount(const char *from, const char *to, const int retval);
67850+void gr_log_textrel(struct vm_area_struct *vma);
67851+void gr_log_rwxmmap(struct file *file);
67852+void gr_log_rwxmprotect(struct file *file);
67853+
67854+int gr_handle_follow_link(const struct inode *parent,
67855+ const struct inode *inode,
67856+ const struct dentry *dentry,
67857+ const struct vfsmount *mnt);
67858+int gr_handle_fifo(const struct dentry *dentry,
67859+ const struct vfsmount *mnt,
67860+ const struct dentry *dir, const int flag,
67861+ const int acc_mode);
67862+int gr_handle_hardlink(const struct dentry *dentry,
67863+ const struct vfsmount *mnt,
67864+ struct inode *inode,
67865+ const int mode, const char *to);
67866+
67867+int gr_is_capable(const int cap);
67868+int gr_is_capable_nolog(const int cap);
67869+void gr_learn_resource(const struct task_struct *task, const int limit,
67870+ const unsigned long wanted, const int gt);
67871+void gr_copy_label(struct task_struct *tsk);
67872+void gr_handle_crash(struct task_struct *task, const int sig);
67873+int gr_handle_signal(const struct task_struct *p, const int sig);
67874+int gr_check_crash_uid(const uid_t uid);
67875+int gr_check_protected_task(const struct task_struct *task);
67876+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
67877+int gr_acl_handle_mmap(const struct file *file,
67878+ const unsigned long prot);
67879+int gr_acl_handle_mprotect(const struct file *file,
67880+ const unsigned long prot);
67881+int gr_check_hidden_task(const struct task_struct *tsk);
67882+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
67883+ const struct vfsmount *mnt);
67884+__u32 gr_acl_handle_utime(const struct dentry *dentry,
67885+ const struct vfsmount *mnt);
67886+__u32 gr_acl_handle_access(const struct dentry *dentry,
67887+ const struct vfsmount *mnt, const int fmode);
67888+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
67889+ const struct vfsmount *mnt, umode_t *mode);
67890+__u32 gr_acl_handle_chown(const struct dentry *dentry,
67891+ const struct vfsmount *mnt);
67892+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
67893+ const struct vfsmount *mnt);
67894+int gr_handle_ptrace(struct task_struct *task, const long request);
67895+int gr_handle_proc_ptrace(struct task_struct *task);
67896+__u32 gr_acl_handle_execve(const struct dentry *dentry,
67897+ const struct vfsmount *mnt);
67898+int gr_check_crash_exec(const struct file *filp);
67899+int gr_acl_is_enabled(void);
67900+void gr_set_kernel_label(struct task_struct *task);
67901+void gr_set_role_label(struct task_struct *task, const uid_t uid,
67902+ const gid_t gid);
67903+int gr_set_proc_label(const struct dentry *dentry,
67904+ const struct vfsmount *mnt,
67905+ const int unsafe_flags);
67906+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
67907+ const struct vfsmount *mnt);
67908+__u32 gr_acl_handle_open(const struct dentry *dentry,
67909+ const struct vfsmount *mnt, int acc_mode);
67910+__u32 gr_acl_handle_creat(const struct dentry *dentry,
67911+ const struct dentry *p_dentry,
67912+ const struct vfsmount *p_mnt,
67913+ int open_flags, int acc_mode, const int imode);
67914+void gr_handle_create(const struct dentry *dentry,
67915+ const struct vfsmount *mnt);
67916+void gr_handle_proc_create(const struct dentry *dentry,
67917+ const struct inode *inode);
67918+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
67919+ const struct dentry *parent_dentry,
67920+ const struct vfsmount *parent_mnt,
67921+ const int mode);
67922+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
67923+ const struct dentry *parent_dentry,
67924+ const struct vfsmount *parent_mnt);
67925+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
67926+ const struct vfsmount *mnt);
67927+void gr_handle_delete(const ino_t ino, const dev_t dev);
67928+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
67929+ const struct vfsmount *mnt);
67930+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
67931+ const struct dentry *parent_dentry,
67932+ const struct vfsmount *parent_mnt,
67933+ const char *from);
67934+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
67935+ const struct dentry *parent_dentry,
67936+ const struct vfsmount *parent_mnt,
67937+ const struct dentry *old_dentry,
67938+ const struct vfsmount *old_mnt, const char *to);
67939+int gr_acl_handle_rename(struct dentry *new_dentry,
67940+ struct dentry *parent_dentry,
67941+ const struct vfsmount *parent_mnt,
67942+ struct dentry *old_dentry,
67943+ struct inode *old_parent_inode,
67944+ struct vfsmount *old_mnt, const char *newname);
67945+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
67946+ struct dentry *old_dentry,
67947+ struct dentry *new_dentry,
67948+ struct vfsmount *mnt, const __u8 replace);
67949+__u32 gr_check_link(const struct dentry *new_dentry,
67950+ const struct dentry *parent_dentry,
67951+ const struct vfsmount *parent_mnt,
67952+ const struct dentry *old_dentry,
67953+ const struct vfsmount *old_mnt);
67954+int gr_acl_handle_filldir(const struct file *file, const char *name,
67955+ const unsigned int namelen, const ino_t ino);
67956+
67957+__u32 gr_acl_handle_unix(const struct dentry *dentry,
67958+ const struct vfsmount *mnt);
67959+void gr_acl_handle_exit(void);
67960+void gr_acl_handle_psacct(struct task_struct *task, const long code);
67961+int gr_acl_handle_procpidmem(const struct task_struct *task);
67962+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
67963+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
67964+void gr_audit_ptrace(struct task_struct *task);
67965+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
67966+
67967+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
67968+
67969+#ifdef CONFIG_GRKERNSEC
67970+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
67971+void gr_handle_vm86(void);
67972+void gr_handle_mem_readwrite(u64 from, u64 to);
67973+
67974+void gr_log_badprocpid(const char *entry);
67975+
67976+extern int grsec_enable_dmesg;
67977+extern int grsec_disable_privio;
67978+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67979+extern int grsec_enable_chroot_findtask;
67980+#endif
67981+#ifdef CONFIG_GRKERNSEC_SETXID
67982+extern int grsec_enable_setxid;
67983+#endif
67984+#endif
67985+
67986+#endif
67987diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
67988index 6a87154..a3ce57b 100644
67989--- a/include/linux/hdpu_features.h
67990+++ b/include/linux/hdpu_features.h
67991@@ -3,7 +3,7 @@
67992 struct cpustate_t {
67993 spinlock_t lock;
67994 int excl;
67995- int open_count;
67996+ atomic_t open_count;
67997 unsigned char cached_val;
67998 int inited;
67999 unsigned long *set_addr;
68000diff --git a/include/linux/highmem.h b/include/linux/highmem.h
68001index 211ff44..00ab6d7 100644
68002--- a/include/linux/highmem.h
68003+++ b/include/linux/highmem.h
68004@@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
68005 kunmap_atomic(kaddr, KM_USER0);
68006 }
68007
68008+static inline void sanitize_highpage(struct page *page)
68009+{
68010+ void *kaddr;
68011+ unsigned long flags;
68012+
68013+ local_irq_save(flags);
68014+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
68015+ clear_page(kaddr);
68016+ kunmap_atomic(kaddr, KM_CLEARPAGE);
68017+ local_irq_restore(flags);
68018+}
68019+
68020 static inline void zero_user_segments(struct page *page,
68021 unsigned start1, unsigned end1,
68022 unsigned start2, unsigned end2)
68023diff --git a/include/linux/i2c.h b/include/linux/i2c.h
68024index 7b40cda..24eb44e 100644
68025--- a/include/linux/i2c.h
68026+++ b/include/linux/i2c.h
68027@@ -325,6 +325,7 @@ struct i2c_algorithm {
68028 /* To determine what the adapter supports */
68029 u32 (*functionality) (struct i2c_adapter *);
68030 };
68031+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
68032
68033 /*
68034 * i2c_adapter is the structure used to identify a physical i2c bus along
68035diff --git a/include/linux/i2o.h b/include/linux/i2o.h
68036index 4c4e57d..f3c5303 100644
68037--- a/include/linux/i2o.h
68038+++ b/include/linux/i2o.h
68039@@ -564,7 +564,7 @@ struct i2o_controller {
68040 struct i2o_device *exec; /* Executive */
68041 #if BITS_PER_LONG == 64
68042 spinlock_t context_list_lock; /* lock for context_list */
68043- atomic_t context_list_counter; /* needed for unique contexts */
68044+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
68045 struct list_head context_list; /* list of context id's
68046 and pointers */
68047 #endif
68048diff --git a/include/linux/init_task.h b/include/linux/init_task.h
68049index 21a6f5d..dc42eab 100644
68050--- a/include/linux/init_task.h
68051+++ b/include/linux/init_task.h
68052@@ -83,6 +83,12 @@ extern struct group_info init_groups;
68053 #define INIT_IDS
68054 #endif
68055
68056+#ifdef CONFIG_X86
68057+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
68058+#else
68059+#define INIT_TASK_THREAD_INFO
68060+#endif
68061+
68062 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
68063 /*
68064 * Because of the reduced scope of CAP_SETPCAP when filesystem
68065@@ -156,6 +162,7 @@ extern struct cred init_cred;
68066 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
68067 .comm = "swapper", \
68068 .thread = INIT_THREAD, \
68069+ INIT_TASK_THREAD_INFO \
68070 .fs = &init_fs, \
68071 .files = &init_files, \
68072 .signal = &init_signals, \
68073diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
68074index 4f0a72a..a849599 100644
68075--- a/include/linux/intel-iommu.h
68076+++ b/include/linux/intel-iommu.h
68077@@ -296,7 +296,7 @@ struct iommu_flush {
68078 u8 fm, u64 type);
68079 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
68080 unsigned int size_order, u64 type);
68081-};
68082+} __no_const;
68083
68084 enum {
68085 SR_DMAR_FECTL_REG,
68086diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
68087index c739150..be577b5 100644
68088--- a/include/linux/interrupt.h
68089+++ b/include/linux/interrupt.h
68090@@ -369,7 +369,7 @@ enum
68091 /* map softirq index to softirq name. update 'softirq_to_name' in
68092 * kernel/softirq.c when adding a new softirq.
68093 */
68094-extern char *softirq_to_name[NR_SOFTIRQS];
68095+extern const char * const softirq_to_name[NR_SOFTIRQS];
68096
68097 /* softirq mask and active fields moved to irq_cpustat_t in
68098 * asm/hardirq.h to get better cache usage. KAO
68099@@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
68100
68101 struct softirq_action
68102 {
68103- void (*action)(struct softirq_action *);
68104+ void (*action)(void);
68105 };
68106
68107 asmlinkage void do_softirq(void);
68108 asmlinkage void __do_softirq(void);
68109-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
68110+extern void open_softirq(int nr, void (*action)(void));
68111 extern void softirq_init(void);
68112 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
68113 extern void raise_softirq_irqoff(unsigned int nr);
68114diff --git a/include/linux/irq.h b/include/linux/irq.h
68115index 9e5f45a..025865b 100644
68116--- a/include/linux/irq.h
68117+++ b/include/linux/irq.h
68118@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
68119 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
68120 bool boot)
68121 {
68122+#ifdef CONFIG_CPUMASK_OFFSTACK
68123 gfp_t gfp = GFP_ATOMIC;
68124
68125 if (boot)
68126 gfp = GFP_NOWAIT;
68127
68128-#ifdef CONFIG_CPUMASK_OFFSTACK
68129 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
68130 return false;
68131
68132diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
68133index 7922742..27306a2 100644
68134--- a/include/linux/kallsyms.h
68135+++ b/include/linux/kallsyms.h
68136@@ -15,7 +15,8 @@
68137
68138 struct module;
68139
68140-#ifdef CONFIG_KALLSYMS
68141+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
68142+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
68143 /* Lookup the address for a symbol. Returns 0 if not found. */
68144 unsigned long kallsyms_lookup_name(const char *name);
68145
68146@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
68147 /* Stupid that this does nothing, but I didn't create this mess. */
68148 #define __print_symbol(fmt, addr)
68149 #endif /*CONFIG_KALLSYMS*/
68150+#else /* when included by kallsyms.c, vsnprintf.c, or
68151+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
68152+extern void __print_symbol(const char *fmt, unsigned long address);
68153+extern int sprint_symbol(char *buffer, unsigned long address);
68154+const char *kallsyms_lookup(unsigned long addr,
68155+ unsigned long *symbolsize,
68156+ unsigned long *offset,
68157+ char **modname, char *namebuf);
68158+#endif
68159
68160 /* This macro allows us to keep printk typechecking */
68161 static void __check_printsym_format(const char *fmt, ...)
68162diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
68163index 6adcc29..13369e8 100644
68164--- a/include/linux/kgdb.h
68165+++ b/include/linux/kgdb.h
68166@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
68167
68168 extern int kgdb_connected;
68169
68170-extern atomic_t kgdb_setting_breakpoint;
68171-extern atomic_t kgdb_cpu_doing_single_step;
68172+extern atomic_unchecked_t kgdb_setting_breakpoint;
68173+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
68174
68175 extern struct task_struct *kgdb_usethread;
68176 extern struct task_struct *kgdb_contthread;
68177@@ -235,7 +235,7 @@ struct kgdb_arch {
68178 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
68179 void (*remove_all_hw_break)(void);
68180 void (*correct_hw_break)(void);
68181-};
68182+} __do_const;
68183
68184 /**
68185 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
68186@@ -257,14 +257,14 @@ struct kgdb_io {
68187 int (*init) (void);
68188 void (*pre_exception) (void);
68189 void (*post_exception) (void);
68190-};
68191+} __do_const;
68192
68193-extern struct kgdb_arch arch_kgdb_ops;
68194+extern const struct kgdb_arch arch_kgdb_ops;
68195
68196 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
68197
68198-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
68199-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
68200+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
68201+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
68202
68203 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
68204 extern int kgdb_mem2hex(char *mem, char *buf, int count);
68205diff --git a/include/linux/kmod.h b/include/linux/kmod.h
68206index 384ca8b..83dd97d 100644
68207--- a/include/linux/kmod.h
68208+++ b/include/linux/kmod.h
68209@@ -31,6 +31,8 @@
68210 * usually useless though. */
68211 extern int __request_module(bool wait, const char *name, ...) \
68212 __attribute__((format(printf, 2, 3)));
68213+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
68214+ __attribute__((format(printf, 3, 4)));
68215 #define request_module(mod...) __request_module(true, mod)
68216 #define request_module_nowait(mod...) __request_module(false, mod)
68217 #define try_then_request_module(x, mod...) \
68218diff --git a/include/linux/kobject.h b/include/linux/kobject.h
68219index 58ae8e0..3950d3c 100644
68220--- a/include/linux/kobject.h
68221+++ b/include/linux/kobject.h
68222@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
68223
68224 struct kobj_type {
68225 void (*release)(struct kobject *kobj);
68226- struct sysfs_ops *sysfs_ops;
68227+ const struct sysfs_ops *sysfs_ops;
68228 struct attribute **default_attrs;
68229 };
68230
68231@@ -118,9 +118,9 @@ struct kobj_uevent_env {
68232 };
68233
68234 struct kset_uevent_ops {
68235- int (*filter)(struct kset *kset, struct kobject *kobj);
68236- const char *(*name)(struct kset *kset, struct kobject *kobj);
68237- int (*uevent)(struct kset *kset, struct kobject *kobj,
68238+ int (* const filter)(struct kset *kset, struct kobject *kobj);
68239+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
68240+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
68241 struct kobj_uevent_env *env);
68242 };
68243
68244@@ -132,7 +132,7 @@ struct kobj_attribute {
68245 const char *buf, size_t count);
68246 };
68247
68248-extern struct sysfs_ops kobj_sysfs_ops;
68249+extern const struct sysfs_ops kobj_sysfs_ops;
68250
68251 /**
68252 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
68253@@ -155,14 +155,14 @@ struct kset {
68254 struct list_head list;
68255 spinlock_t list_lock;
68256 struct kobject kobj;
68257- struct kset_uevent_ops *uevent_ops;
68258+ const struct kset_uevent_ops *uevent_ops;
68259 };
68260
68261 extern void kset_init(struct kset *kset);
68262 extern int __must_check kset_register(struct kset *kset);
68263 extern void kset_unregister(struct kset *kset);
68264 extern struct kset * __must_check kset_create_and_add(const char *name,
68265- struct kset_uevent_ops *u,
68266+ const struct kset_uevent_ops *u,
68267 struct kobject *parent_kobj);
68268
68269 static inline struct kset *to_kset(struct kobject *kobj)
68270diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
68271index c728a50..752d821 100644
68272--- a/include/linux/kvm_host.h
68273+++ b/include/linux/kvm_host.h
68274@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
68275 void vcpu_load(struct kvm_vcpu *vcpu);
68276 void vcpu_put(struct kvm_vcpu *vcpu);
68277
68278-int kvm_init(void *opaque, unsigned int vcpu_size,
68279+int kvm_init(const void *opaque, unsigned int vcpu_size,
68280 struct module *module);
68281 void kvm_exit(void);
68282
68283@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
68284 struct kvm_guest_debug *dbg);
68285 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
68286
68287-int kvm_arch_init(void *opaque);
68288+int kvm_arch_init(const void *opaque);
68289 void kvm_arch_exit(void);
68290
68291 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
68292diff --git a/include/linux/libata.h b/include/linux/libata.h
68293index a069916..223edde 100644
68294--- a/include/linux/libata.h
68295+++ b/include/linux/libata.h
68296@@ -525,11 +525,11 @@ struct ata_ioports {
68297
68298 struct ata_host {
68299 spinlock_t lock;
68300- struct device *dev;
68301+ struct device *dev;
68302 void __iomem * const *iomap;
68303 unsigned int n_ports;
68304 void *private_data;
68305- struct ata_port_operations *ops;
68306+ const struct ata_port_operations *ops;
68307 unsigned long flags;
68308 #ifdef CONFIG_ATA_ACPI
68309 acpi_handle acpi_handle;
68310@@ -710,7 +710,7 @@ struct ata_link {
68311
68312 struct ata_port {
68313 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
68314- struct ata_port_operations *ops;
68315+ const struct ata_port_operations *ops;
68316 spinlock_t *lock;
68317 /* Flags owned by the EH context. Only EH should touch these once the
68318 port is active */
68319@@ -884,7 +884,7 @@ struct ata_port_operations {
68320 * fields must be pointers.
68321 */
68322 const struct ata_port_operations *inherits;
68323-};
68324+} __do_const;
68325
68326 struct ata_port_info {
68327 unsigned long flags;
68328@@ -892,7 +892,7 @@ struct ata_port_info {
68329 unsigned long pio_mask;
68330 unsigned long mwdma_mask;
68331 unsigned long udma_mask;
68332- struct ata_port_operations *port_ops;
68333+ const struct ata_port_operations *port_ops;
68334 void *private_data;
68335 };
68336
68337@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
68338 extern const unsigned long sata_deb_timing_hotplug[];
68339 extern const unsigned long sata_deb_timing_long[];
68340
68341-extern struct ata_port_operations ata_dummy_port_ops;
68342+extern const struct ata_port_operations ata_dummy_port_ops;
68343 extern const struct ata_port_info ata_dummy_port_info;
68344
68345 static inline const unsigned long *
68346@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
68347 struct scsi_host_template *sht);
68348 extern void ata_host_detach(struct ata_host *host);
68349 extern void ata_host_init(struct ata_host *, struct device *,
68350- unsigned long, struct ata_port_operations *);
68351+ unsigned long, const struct ata_port_operations *);
68352 extern int ata_scsi_detect(struct scsi_host_template *sht);
68353 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
68354 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
68355diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
68356index fbc48f8..0886e57 100644
68357--- a/include/linux/lockd/bind.h
68358+++ b/include/linux/lockd/bind.h
68359@@ -23,13 +23,13 @@ struct svc_rqst;
68360 * This is the set of functions for lockd->nfsd communication
68361 */
68362 struct nlmsvc_binding {
68363- __be32 (*fopen)(struct svc_rqst *,
68364+ __be32 (* const fopen)(struct svc_rqst *,
68365 struct nfs_fh *,
68366 struct file **);
68367- void (*fclose)(struct file *);
68368+ void (* const fclose)(struct file *);
68369 };
68370
68371-extern struct nlmsvc_binding * nlmsvc_ops;
68372+extern const struct nlmsvc_binding * nlmsvc_ops;
68373
68374 /*
68375 * Similar to nfs_client_initdata, but without the NFS-specific
68376diff --git a/include/linux/mca.h b/include/linux/mca.h
68377index 3797270..7765ede 100644
68378--- a/include/linux/mca.h
68379+++ b/include/linux/mca.h
68380@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
68381 int region);
68382 void * (*mca_transform_memory)(struct mca_device *,
68383 void *memory);
68384-};
68385+} __no_const;
68386
68387 struct mca_bus {
68388 u64 default_dma_mask;
68389diff --git a/include/linux/memory.h b/include/linux/memory.h
68390index 37fa19b..b597c85 100644
68391--- a/include/linux/memory.h
68392+++ b/include/linux/memory.h
68393@@ -108,7 +108,7 @@ struct memory_accessor {
68394 size_t count);
68395 ssize_t (*write)(struct memory_accessor *, const char *buf,
68396 off_t offset, size_t count);
68397-};
68398+} __no_const;
68399
68400 /*
68401 * Kernel text modification mutex, used for code patching. Users of this lock
68402diff --git a/include/linux/mm.h b/include/linux/mm.h
68403index 11e5be6..1ff2423 100644
68404--- a/include/linux/mm.h
68405+++ b/include/linux/mm.h
68406@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
68407
68408 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
68409 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
68410+
68411+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68412+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
68413+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
68414+#else
68415 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
68416+#endif
68417+
68418 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
68419 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
68420
68421@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
68422 int set_page_dirty_lock(struct page *page);
68423 int clear_page_dirty_for_io(struct page *page);
68424
68425-/* Is the vma a continuation of the stack vma above it? */
68426-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
68427-{
68428- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
68429-}
68430-
68431 extern unsigned long move_page_tables(struct vm_area_struct *vma,
68432 unsigned long old_addr, struct vm_area_struct *new_vma,
68433 unsigned long new_addr, unsigned long len);
68434@@ -890,6 +891,8 @@ struct shrinker {
68435 extern void register_shrinker(struct shrinker *);
68436 extern void unregister_shrinker(struct shrinker *);
68437
68438+pgprot_t vm_get_page_prot(unsigned long vm_flags);
68439+
68440 int vma_wants_writenotify(struct vm_area_struct *vma);
68441
68442 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
68443@@ -1162,6 +1165,7 @@ out:
68444 }
68445
68446 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
68447+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
68448
68449 extern unsigned long do_brk(unsigned long, unsigned long);
68450
68451@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
68452 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
68453 struct vm_area_struct **pprev);
68454
68455+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
68456+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
68457+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
68458+
68459 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
68460 NULL if none. Assume start_addr < end_addr. */
68461 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
68462@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
68463 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
68464 }
68465
68466-pgprot_t vm_get_page_prot(unsigned long vm_flags);
68467 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
68468 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
68469 unsigned long pfn, unsigned long size, pgprot_t);
68470@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
68471 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
68472 extern int sysctl_memory_failure_early_kill;
68473 extern int sysctl_memory_failure_recovery;
68474-extern atomic_long_t mce_bad_pages;
68475+extern atomic_long_unchecked_t mce_bad_pages;
68476+
68477+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68478+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
68479+#else
68480+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
68481+#endif
68482
68483 #endif /* __KERNEL__ */
68484 #endif /* _LINUX_MM_H */
68485diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
68486index 9d12ed5..6d9707a 100644
68487--- a/include/linux/mm_types.h
68488+++ b/include/linux/mm_types.h
68489@@ -186,6 +186,8 @@ struct vm_area_struct {
68490 #ifdef CONFIG_NUMA
68491 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
68492 #endif
68493+
68494+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
68495 };
68496
68497 struct core_thread {
68498@@ -287,6 +289,24 @@ struct mm_struct {
68499 #ifdef CONFIG_MMU_NOTIFIER
68500 struct mmu_notifier_mm *mmu_notifier_mm;
68501 #endif
68502+
68503+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68504+ unsigned long pax_flags;
68505+#endif
68506+
68507+#ifdef CONFIG_PAX_DLRESOLVE
68508+ unsigned long call_dl_resolve;
68509+#endif
68510+
68511+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
68512+ unsigned long call_syscall;
68513+#endif
68514+
68515+#ifdef CONFIG_PAX_ASLR
68516+ unsigned long delta_mmap; /* randomized offset */
68517+ unsigned long delta_stack; /* randomized offset */
68518+#endif
68519+
68520 };
68521
68522 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
68523diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
68524index 4e02ee2..afb159e 100644
68525--- a/include/linux/mmu_notifier.h
68526+++ b/include/linux/mmu_notifier.h
68527@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
68528 */
68529 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
68530 ({ \
68531- pte_t __pte; \
68532+ pte_t ___pte; \
68533 struct vm_area_struct *___vma = __vma; \
68534 unsigned long ___address = __address; \
68535- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
68536+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
68537 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
68538- __pte; \
68539+ ___pte; \
68540 })
68541
68542 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
68543diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
68544index 6c31a2a..4b0e930 100644
68545--- a/include/linux/mmzone.h
68546+++ b/include/linux/mmzone.h
68547@@ -350,7 +350,7 @@ struct zone {
68548 unsigned long flags; /* zone flags, see below */
68549
68550 /* Zone statistics */
68551- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68552+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68553
68554 /*
68555 * prev_priority holds the scanning priority for this zone. It is
68556diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
68557index f58e9d8..3503935 100644
68558--- a/include/linux/mod_devicetable.h
68559+++ b/include/linux/mod_devicetable.h
68560@@ -12,7 +12,7 @@
68561 typedef unsigned long kernel_ulong_t;
68562 #endif
68563
68564-#define PCI_ANY_ID (~0)
68565+#define PCI_ANY_ID ((__u16)~0)
68566
68567 struct pci_device_id {
68568 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
68569@@ -131,7 +131,7 @@ struct usb_device_id {
68570 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
68571 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
68572
68573-#define HID_ANY_ID (~0)
68574+#define HID_ANY_ID (~0U)
68575
68576 struct hid_device_id {
68577 __u16 bus;
68578diff --git a/include/linux/module.h b/include/linux/module.h
68579index 482efc8..642032b 100644
68580--- a/include/linux/module.h
68581+++ b/include/linux/module.h
68582@@ -16,6 +16,7 @@
68583 #include <linux/kobject.h>
68584 #include <linux/moduleparam.h>
68585 #include <linux/tracepoint.h>
68586+#include <linux/fs.h>
68587
68588 #include <asm/local.h>
68589 #include <asm/module.h>
68590@@ -287,16 +288,16 @@ struct module
68591 int (*init)(void);
68592
68593 /* If this is non-NULL, vfree after init() returns */
68594- void *module_init;
68595+ void *module_init_rx, *module_init_rw;
68596
68597 /* Here is the actual code + data, vfree'd on unload. */
68598- void *module_core;
68599+ void *module_core_rx, *module_core_rw;
68600
68601 /* Here are the sizes of the init and core sections */
68602- unsigned int init_size, core_size;
68603+ unsigned int init_size_rw, core_size_rw;
68604
68605 /* The size of the executable code in each section. */
68606- unsigned int init_text_size, core_text_size;
68607+ unsigned int init_size_rx, core_size_rx;
68608
68609 /* Arch-specific module values */
68610 struct mod_arch_specific arch;
68611@@ -345,6 +346,10 @@ struct module
68612 #ifdef CONFIG_EVENT_TRACING
68613 struct ftrace_event_call *trace_events;
68614 unsigned int num_trace_events;
68615+ struct file_operations trace_id;
68616+ struct file_operations trace_enable;
68617+ struct file_operations trace_format;
68618+ struct file_operations trace_filter;
68619 #endif
68620 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
68621 unsigned long *ftrace_callsites;
68622@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
68623 bool is_module_address(unsigned long addr);
68624 bool is_module_text_address(unsigned long addr);
68625
68626+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
68627+{
68628+
68629+#ifdef CONFIG_PAX_KERNEXEC
68630+ if (ktla_ktva(addr) >= (unsigned long)start &&
68631+ ktla_ktva(addr) < (unsigned long)start + size)
68632+ return 1;
68633+#endif
68634+
68635+ return ((void *)addr >= start && (void *)addr < start + size);
68636+}
68637+
68638+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
68639+{
68640+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
68641+}
68642+
68643+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
68644+{
68645+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
68646+}
68647+
68648+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
68649+{
68650+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
68651+}
68652+
68653+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
68654+{
68655+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
68656+}
68657+
68658 static inline int within_module_core(unsigned long addr, struct module *mod)
68659 {
68660- return (unsigned long)mod->module_core <= addr &&
68661- addr < (unsigned long)mod->module_core + mod->core_size;
68662+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
68663 }
68664
68665 static inline int within_module_init(unsigned long addr, struct module *mod)
68666 {
68667- return (unsigned long)mod->module_init <= addr &&
68668- addr < (unsigned long)mod->module_init + mod->init_size;
68669+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
68670 }
68671
68672 /* Search for module by name: must hold module_mutex. */
68673diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
68674index c1f40c2..682ca53 100644
68675--- a/include/linux/moduleloader.h
68676+++ b/include/linux/moduleloader.h
68677@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
68678 sections. Returns NULL on failure. */
68679 void *module_alloc(unsigned long size);
68680
68681+#ifdef CONFIG_PAX_KERNEXEC
68682+void *module_alloc_exec(unsigned long size);
68683+#else
68684+#define module_alloc_exec(x) module_alloc(x)
68685+#endif
68686+
68687 /* Free memory returned from module_alloc. */
68688 void module_free(struct module *mod, void *module_region);
68689
68690+#ifdef CONFIG_PAX_KERNEXEC
68691+void module_free_exec(struct module *mod, void *module_region);
68692+#else
68693+#define module_free_exec(x, y) module_free((x), (y))
68694+#endif
68695+
68696 /* Apply the given relocation to the (simplified) ELF. Return -error
68697 or 0. */
68698 int apply_relocate(Elf_Shdr *sechdrs,
68699diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
68700index 82a9124..8a5f622 100644
68701--- a/include/linux/moduleparam.h
68702+++ b/include/linux/moduleparam.h
68703@@ -132,7 +132,7 @@ struct kparam_array
68704
68705 /* Actually copy string: maxlen param is usually sizeof(string). */
68706 #define module_param_string(name, string, len, perm) \
68707- static const struct kparam_string __param_string_##name \
68708+ static const struct kparam_string __param_string_##name __used \
68709 = { len, string }; \
68710 __module_param_call(MODULE_PARAM_PREFIX, name, \
68711 param_set_copystring, param_get_string, \
68712@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
68713
68714 /* Comma-separated array: *nump is set to number they actually specified. */
68715 #define module_param_array_named(name, array, type, nump, perm) \
68716- static const struct kparam_array __param_arr_##name \
68717+ static const struct kparam_array __param_arr_##name __used \
68718 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
68719 sizeof(array[0]), array }; \
68720 __module_param_call(MODULE_PARAM_PREFIX, name, \
68721diff --git a/include/linux/mutex.h b/include/linux/mutex.h
68722index 878cab4..c92cb3e 100644
68723--- a/include/linux/mutex.h
68724+++ b/include/linux/mutex.h
68725@@ -51,7 +51,7 @@ struct mutex {
68726 spinlock_t wait_lock;
68727 struct list_head wait_list;
68728 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
68729- struct thread_info *owner;
68730+ struct task_struct *owner;
68731 #endif
68732 #ifdef CONFIG_DEBUG_MUTEXES
68733 const char *name;
68734diff --git a/include/linux/namei.h b/include/linux/namei.h
68735index ec0f607..d19e675 100644
68736--- a/include/linux/namei.h
68737+++ b/include/linux/namei.h
68738@@ -22,7 +22,7 @@ struct nameidata {
68739 unsigned int flags;
68740 int last_type;
68741 unsigned depth;
68742- char *saved_names[MAX_NESTED_LINKS + 1];
68743+ const char *saved_names[MAX_NESTED_LINKS + 1];
68744
68745 /* Intent data */
68746 union {
68747@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
68748 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
68749 extern void unlock_rename(struct dentry *, struct dentry *);
68750
68751-static inline void nd_set_link(struct nameidata *nd, char *path)
68752+static inline void nd_set_link(struct nameidata *nd, const char *path)
68753 {
68754 nd->saved_names[nd->depth] = path;
68755 }
68756
68757-static inline char *nd_get_link(struct nameidata *nd)
68758+static inline const char *nd_get_link(const struct nameidata *nd)
68759 {
68760 return nd->saved_names[nd->depth];
68761 }
68762diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
68763index 9d7e8f7..04428c5 100644
68764--- a/include/linux/netdevice.h
68765+++ b/include/linux/netdevice.h
68766@@ -637,6 +637,7 @@ struct net_device_ops {
68767 u16 xid);
68768 #endif
68769 };
68770+typedef struct net_device_ops __no_const net_device_ops_no_const;
68771
68772 /*
68773 * The DEVICE structure.
68774diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
68775new file mode 100644
68776index 0000000..33f4af8
68777--- /dev/null
68778+++ b/include/linux/netfilter/xt_gradm.h
68779@@ -0,0 +1,9 @@
68780+#ifndef _LINUX_NETFILTER_XT_GRADM_H
68781+#define _LINUX_NETFILTER_XT_GRADM_H 1
68782+
68783+struct xt_gradm_mtinfo {
68784+ __u16 flags;
68785+ __u16 invflags;
68786+};
68787+
68788+#endif
68789diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
68790index b359c4a..c08b334 100644
68791--- a/include/linux/nodemask.h
68792+++ b/include/linux/nodemask.h
68793@@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
68794
68795 #define any_online_node(mask) \
68796 ({ \
68797- int node; \
68798- for_each_node_mask(node, (mask)) \
68799- if (node_online(node)) \
68800+ int __node; \
68801+ for_each_node_mask(__node, (mask)) \
68802+ if (node_online(__node)) \
68803 break; \
68804- node; \
68805+ __node; \
68806 })
68807
68808 #define num_online_nodes() num_node_state(N_ONLINE)
68809diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
68810index 5171639..7cf4235 100644
68811--- a/include/linux/oprofile.h
68812+++ b/include/linux/oprofile.h
68813@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
68814 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
68815 char const * name, ulong * val);
68816
68817-/** Create a file for read-only access to an atomic_t. */
68818+/** Create a file for read-only access to an atomic_unchecked_t. */
68819 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
68820- char const * name, atomic_t * val);
68821+ char const * name, atomic_unchecked_t * val);
68822
68823 /** create a directory */
68824 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
68825diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
68826index 3c62ed4..8924c7c 100644
68827--- a/include/linux/pagemap.h
68828+++ b/include/linux/pagemap.h
68829@@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
68830 if (((unsigned long)uaddr & PAGE_MASK) !=
68831 ((unsigned long)end & PAGE_MASK))
68832 ret = __get_user(c, end);
68833+ (void)c;
68834 }
68835+ (void)c;
68836 return ret;
68837 }
68838
68839diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
68840index 81c9689..a567a55 100644
68841--- a/include/linux/perf_event.h
68842+++ b/include/linux/perf_event.h
68843@@ -476,7 +476,7 @@ struct hw_perf_event {
68844 struct hrtimer hrtimer;
68845 };
68846 };
68847- atomic64_t prev_count;
68848+ atomic64_unchecked_t prev_count;
68849 u64 sample_period;
68850 u64 last_period;
68851 atomic64_t period_left;
68852@@ -557,7 +557,7 @@ struct perf_event {
68853 const struct pmu *pmu;
68854
68855 enum perf_event_active_state state;
68856- atomic64_t count;
68857+ atomic64_unchecked_t count;
68858
68859 /*
68860 * These are the total time in nanoseconds that the event
68861@@ -595,8 +595,8 @@ struct perf_event {
68862 * These accumulate total time (in nanoseconds) that children
68863 * events have been enabled and running, respectively.
68864 */
68865- atomic64_t child_total_time_enabled;
68866- atomic64_t child_total_time_running;
68867+ atomic64_unchecked_t child_total_time_enabled;
68868+ atomic64_unchecked_t child_total_time_running;
68869
68870 /*
68871 * Protect attach/detach and child_list:
68872diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
68873index b43a9e0..b77d869 100644
68874--- a/include/linux/pipe_fs_i.h
68875+++ b/include/linux/pipe_fs_i.h
68876@@ -46,9 +46,9 @@ struct pipe_inode_info {
68877 wait_queue_head_t wait;
68878 unsigned int nrbufs, curbuf;
68879 struct page *tmp_page;
68880- unsigned int readers;
68881- unsigned int writers;
68882- unsigned int waiting_writers;
68883+ atomic_t readers;
68884+ atomic_t writers;
68885+ atomic_t waiting_writers;
68886 unsigned int r_counter;
68887 unsigned int w_counter;
68888 struct fasync_struct *fasync_readers;
68889diff --git a/include/linux/poison.h b/include/linux/poison.h
68890index 34066ff..e95d744 100644
68891--- a/include/linux/poison.h
68892+++ b/include/linux/poison.h
68893@@ -19,8 +19,8 @@
68894 * under normal circumstances, used to verify that nobody uses
68895 * non-initialized list entries.
68896 */
68897-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
68898-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
68899+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
68900+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
68901
68902 /********** include/linux/timer.h **********/
68903 /*
68904diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
68905index 4f71bf4..cd2f68e 100644
68906--- a/include/linux/posix-timers.h
68907+++ b/include/linux/posix-timers.h
68908@@ -82,7 +82,8 @@ struct k_clock {
68909 #define TIMER_RETRY 1
68910 void (*timer_get) (struct k_itimer * timr,
68911 struct itimerspec * cur_setting);
68912-};
68913+} __do_const;
68914+typedef struct k_clock __no_const k_clock_no_const;
68915
68916 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
68917
68918diff --git a/include/linux/preempt.h b/include/linux/preempt.h
68919index 72b1a10..13303a9 100644
68920--- a/include/linux/preempt.h
68921+++ b/include/linux/preempt.h
68922@@ -110,7 +110,7 @@ struct preempt_ops {
68923 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
68924 void (*sched_out)(struct preempt_notifier *notifier,
68925 struct task_struct *next);
68926-};
68927+} __no_const;
68928
68929 /**
68930 * preempt_notifier - key for installing preemption notifiers
68931diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
68932index 379eaed..1bf73e3 100644
68933--- a/include/linux/proc_fs.h
68934+++ b/include/linux/proc_fs.h
68935@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
68936 return proc_create_data(name, mode, parent, proc_fops, NULL);
68937 }
68938
68939+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
68940+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
68941+{
68942+#ifdef CONFIG_GRKERNSEC_PROC_USER
68943+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
68944+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68945+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
68946+#else
68947+ return proc_create_data(name, mode, parent, proc_fops, NULL);
68948+#endif
68949+}
68950+
68951+
68952 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
68953 mode_t mode, struct proc_dir_entry *base,
68954 read_proc_t *read_proc, void * data)
68955@@ -256,7 +269,7 @@ union proc_op {
68956 int (*proc_show)(struct seq_file *m,
68957 struct pid_namespace *ns, struct pid *pid,
68958 struct task_struct *task);
68959-};
68960+} __no_const;
68961
68962 struct ctl_table_header;
68963 struct ctl_table;
68964diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
68965index 7456d7d..6c1cfc9 100644
68966--- a/include/linux/ptrace.h
68967+++ b/include/linux/ptrace.h
68968@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
68969 extern void exit_ptrace(struct task_struct *tracer);
68970 #define PTRACE_MODE_READ 1
68971 #define PTRACE_MODE_ATTACH 2
68972-/* Returns 0 on success, -errno on denial. */
68973-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
68974 /* Returns true on success, false on denial. */
68975 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
68976+/* Returns true on success, false on denial. */
68977+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
68978
68979 static inline int ptrace_reparented(struct task_struct *child)
68980 {
68981diff --git a/include/linux/random.h b/include/linux/random.h
68982index 2948046..3262567 100644
68983--- a/include/linux/random.h
68984+++ b/include/linux/random.h
68985@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
68986 u32 random32(void);
68987 void srandom32(u32 seed);
68988
68989+static inline unsigned long pax_get_random_long(void)
68990+{
68991+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
68992+}
68993+
68994 #endif /* __KERNEL___ */
68995
68996 #endif /* _LINUX_RANDOM_H */
68997diff --git a/include/linux/reboot.h b/include/linux/reboot.h
68998index 988e55f..17cb4ef 100644
68999--- a/include/linux/reboot.h
69000+++ b/include/linux/reboot.h
69001@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
69002 * Architecture-specific implementations of sys_reboot commands.
69003 */
69004
69005-extern void machine_restart(char *cmd);
69006-extern void machine_halt(void);
69007-extern void machine_power_off(void);
69008+extern void machine_restart(char *cmd) __noreturn;
69009+extern void machine_halt(void) __noreturn;
69010+extern void machine_power_off(void) __noreturn;
69011
69012 extern void machine_shutdown(void);
69013 struct pt_regs;
69014@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
69015 */
69016
69017 extern void kernel_restart_prepare(char *cmd);
69018-extern void kernel_restart(char *cmd);
69019-extern void kernel_halt(void);
69020-extern void kernel_power_off(void);
69021+extern void kernel_restart(char *cmd) __noreturn;
69022+extern void kernel_halt(void) __noreturn;
69023+extern void kernel_power_off(void) __noreturn;
69024
69025 void ctrl_alt_del(void);
69026
69027@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
69028 * Emergency restart, callable from an interrupt handler.
69029 */
69030
69031-extern void emergency_restart(void);
69032+extern void emergency_restart(void) __noreturn;
69033 #include <asm/emergency-restart.h>
69034
69035 #endif
69036diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
69037index dd31e7b..5b03c5c 100644
69038--- a/include/linux/reiserfs_fs.h
69039+++ b/include/linux/reiserfs_fs.h
69040@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
69041 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
69042
69043 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
69044-#define get_generation(s) atomic_read (&fs_generation(s))
69045+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
69046 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
69047 #define __fs_changed(gen,s) (gen != get_generation (s))
69048 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
69049@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
69050 */
69051
69052 struct item_operations {
69053- int (*bytes_number) (struct item_head * ih, int block_size);
69054- void (*decrement_key) (struct cpu_key *);
69055- int (*is_left_mergeable) (struct reiserfs_key * ih,
69056+ int (* const bytes_number) (struct item_head * ih, int block_size);
69057+ void (* const decrement_key) (struct cpu_key *);
69058+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
69059 unsigned long bsize);
69060- void (*print_item) (struct item_head *, char *item);
69061- void (*check_item) (struct item_head *, char *item);
69062+ void (* const print_item) (struct item_head *, char *item);
69063+ void (* const check_item) (struct item_head *, char *item);
69064
69065- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69066+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69067 int is_affected, int insert_size);
69068- int (*check_left) (struct virtual_item * vi, int free,
69069+ int (* const check_left) (struct virtual_item * vi, int free,
69070 int start_skip, int end_skip);
69071- int (*check_right) (struct virtual_item * vi, int free);
69072- int (*part_size) (struct virtual_item * vi, int from, int to);
69073- int (*unit_num) (struct virtual_item * vi);
69074- void (*print_vi) (struct virtual_item * vi);
69075+ int (* const check_right) (struct virtual_item * vi, int free);
69076+ int (* const part_size) (struct virtual_item * vi, int from, int to);
69077+ int (* const unit_num) (struct virtual_item * vi);
69078+ void (* const print_vi) (struct virtual_item * vi);
69079 };
69080
69081-extern struct item_operations *item_ops[TYPE_ANY + 1];
69082+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
69083
69084 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
69085 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
69086diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
69087index dab68bb..0688727 100644
69088--- a/include/linux/reiserfs_fs_sb.h
69089+++ b/include/linux/reiserfs_fs_sb.h
69090@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
69091 /* Comment? -Hans */
69092 wait_queue_head_t s_wait;
69093 /* To be obsoleted soon by per buffer seals.. -Hans */
69094- atomic_t s_generation_counter; // increased by one every time the
69095+ atomic_unchecked_t s_generation_counter; // increased by one every time the
69096 // tree gets re-balanced
69097 unsigned long s_properties; /* File system properties. Currently holds
69098 on-disk FS format */
69099diff --git a/include/linux/relay.h b/include/linux/relay.h
69100index 14a86bc..17d0700 100644
69101--- a/include/linux/relay.h
69102+++ b/include/linux/relay.h
69103@@ -159,7 +159,7 @@ struct rchan_callbacks
69104 * The callback should return 0 if successful, negative if not.
69105 */
69106 int (*remove_buf_file)(struct dentry *dentry);
69107-};
69108+} __no_const;
69109
69110 /*
69111 * CONFIG_RELAY kernel API, kernel/relay.c
69112diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
69113index 3392c59..a746428 100644
69114--- a/include/linux/rfkill.h
69115+++ b/include/linux/rfkill.h
69116@@ -144,6 +144,7 @@ struct rfkill_ops {
69117 void (*query)(struct rfkill *rfkill, void *data);
69118 int (*set_block)(void *data, bool blocked);
69119 };
69120+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
69121
69122 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
69123 /**
69124diff --git a/include/linux/sched.h b/include/linux/sched.h
69125index 71849bf..2ef383dc3 100644
69126--- a/include/linux/sched.h
69127+++ b/include/linux/sched.h
69128@@ -101,6 +101,7 @@ struct bio;
69129 struct fs_struct;
69130 struct bts_context;
69131 struct perf_event_context;
69132+struct linux_binprm;
69133
69134 /*
69135 * List of flags we want to share for kernel threads,
69136@@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
69137 extern signed long schedule_timeout_uninterruptible(signed long timeout);
69138 asmlinkage void __schedule(void);
69139 asmlinkage void schedule(void);
69140-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
69141+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
69142
69143 struct nsproxy;
69144 struct user_namespace;
69145@@ -371,9 +372,12 @@ struct user_namespace;
69146 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
69147
69148 extern int sysctl_max_map_count;
69149+extern unsigned long sysctl_heap_stack_gap;
69150
69151 #include <linux/aio.h>
69152
69153+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
69154+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
69155 extern unsigned long
69156 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
69157 unsigned long, unsigned long);
69158@@ -666,6 +670,16 @@ struct signal_struct {
69159 struct tty_audit_buf *tty_audit_buf;
69160 #endif
69161
69162+#ifdef CONFIG_GRKERNSEC
69163+ u32 curr_ip;
69164+ u32 saved_ip;
69165+ u32 gr_saddr;
69166+ u32 gr_daddr;
69167+ u16 gr_sport;
69168+ u16 gr_dport;
69169+ u8 used_accept:1;
69170+#endif
69171+
69172 int oom_adj; /* OOM kill score adjustment (bit shift) */
69173 };
69174
69175@@ -723,6 +737,11 @@ struct user_struct {
69176 struct key *session_keyring; /* UID's default session keyring */
69177 #endif
69178
69179+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
69180+ unsigned int banned;
69181+ unsigned long ban_expires;
69182+#endif
69183+
69184 /* Hash table maintenance information */
69185 struct hlist_node uidhash_node;
69186 uid_t uid;
69187@@ -1328,8 +1347,8 @@ struct task_struct {
69188 struct list_head thread_group;
69189
69190 struct completion *vfork_done; /* for vfork() */
69191- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
69192- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69193+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
69194+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69195
69196 cputime_t utime, stime, utimescaled, stimescaled;
69197 cputime_t gtime;
69198@@ -1343,16 +1362,6 @@ struct task_struct {
69199 struct task_cputime cputime_expires;
69200 struct list_head cpu_timers[3];
69201
69202-/* process credentials */
69203- const struct cred *real_cred; /* objective and real subjective task
69204- * credentials (COW) */
69205- const struct cred *cred; /* effective (overridable) subjective task
69206- * credentials (COW) */
69207- struct mutex cred_guard_mutex; /* guard against foreign influences on
69208- * credential calculations
69209- * (notably. ptrace) */
69210- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
69211-
69212 char comm[TASK_COMM_LEN]; /* executable name excluding path
69213 - access with [gs]et_task_comm (which lock
69214 it with task_lock())
69215@@ -1369,6 +1378,10 @@ struct task_struct {
69216 #endif
69217 /* CPU-specific state of this task */
69218 struct thread_struct thread;
69219+/* thread_info moved to task_struct */
69220+#ifdef CONFIG_X86
69221+ struct thread_info tinfo;
69222+#endif
69223 /* filesystem information */
69224 struct fs_struct *fs;
69225 /* open file information */
69226@@ -1436,6 +1449,15 @@ struct task_struct {
69227 int hardirq_context;
69228 int softirq_context;
69229 #endif
69230+
69231+/* process credentials */
69232+ const struct cred *real_cred; /* objective and real subjective task
69233+ * credentials (COW) */
69234+ struct mutex cred_guard_mutex; /* guard against foreign influences on
69235+ * credential calculations
69236+ * (notably. ptrace) */
69237+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
69238+
69239 #ifdef CONFIG_LOCKDEP
69240 # define MAX_LOCK_DEPTH 48UL
69241 u64 curr_chain_key;
69242@@ -1456,6 +1478,9 @@ struct task_struct {
69243
69244 struct backing_dev_info *backing_dev_info;
69245
69246+ const struct cred *cred; /* effective (overridable) subjective task
69247+ * credentials (COW) */
69248+
69249 struct io_context *io_context;
69250
69251 unsigned long ptrace_message;
69252@@ -1519,6 +1544,27 @@ struct task_struct {
69253 unsigned long default_timer_slack_ns;
69254
69255 struct list_head *scm_work_list;
69256+
69257+#ifdef CONFIG_GRKERNSEC
69258+ /* grsecurity */
69259+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69260+ u64 exec_id;
69261+#endif
69262+#ifdef CONFIG_GRKERNSEC_SETXID
69263+ const struct cred *delayed_cred;
69264+#endif
69265+ struct dentry *gr_chroot_dentry;
69266+ struct acl_subject_label *acl;
69267+ struct acl_role_label *role;
69268+ struct file *exec_file;
69269+ u16 acl_role_id;
69270+ /* is this the task that authenticated to the special role */
69271+ u8 acl_sp_role;
69272+ u8 is_writable;
69273+ u8 brute;
69274+ u8 gr_is_chrooted;
69275+#endif
69276+
69277 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
69278 /* Index of current stored adress in ret_stack */
69279 int curr_ret_stack;
69280@@ -1542,6 +1588,57 @@ struct task_struct {
69281 #endif /* CONFIG_TRACING */
69282 };
69283
69284+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
69285+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
69286+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
69287+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
69288+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
69289+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
69290+
69291+#ifdef CONFIG_PAX_SOFTMODE
69292+extern int pax_softmode;
69293+#endif
69294+
69295+extern int pax_check_flags(unsigned long *);
69296+
69297+/* if tsk != current then task_lock must be held on it */
69298+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
69299+static inline unsigned long pax_get_flags(struct task_struct *tsk)
69300+{
69301+ if (likely(tsk->mm))
69302+ return tsk->mm->pax_flags;
69303+ else
69304+ return 0UL;
69305+}
69306+
69307+/* if tsk != current then task_lock must be held on it */
69308+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
69309+{
69310+ if (likely(tsk->mm)) {
69311+ tsk->mm->pax_flags = flags;
69312+ return 0;
69313+ }
69314+ return -EINVAL;
69315+}
69316+#endif
69317+
69318+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
69319+extern void pax_set_initial_flags(struct linux_binprm *bprm);
69320+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
69321+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
69322+#endif
69323+
69324+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
69325+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
69326+extern void pax_report_refcount_overflow(struct pt_regs *regs);
69327+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
69328+
69329+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
69330+extern void pax_track_stack(void);
69331+#else
69332+static inline void pax_track_stack(void) {}
69333+#endif
69334+
69335 /* Future-safe accessor for struct task_struct's cpus_allowed. */
69336 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
69337
69338@@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
69339 #define PF_DUMPCORE 0x00000200 /* dumped core */
69340 #define PF_SIGNALED 0x00000400 /* killed by a signal */
69341 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
69342-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
69343+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
69344 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
69345 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
69346 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
69347@@ -1978,7 +2075,9 @@ void yield(void);
69348 extern struct exec_domain default_exec_domain;
69349
69350 union thread_union {
69351+#ifndef CONFIG_X86
69352 struct thread_info thread_info;
69353+#endif
69354 unsigned long stack[THREAD_SIZE/sizeof(long)];
69355 };
69356
69357@@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
69358 */
69359
69360 extern struct task_struct *find_task_by_vpid(pid_t nr);
69361+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
69362 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
69363 struct pid_namespace *ns);
69364
69365@@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
69366 extern void exit_itimers(struct signal_struct *);
69367 extern void flush_itimer_signals(void);
69368
69369-extern NORET_TYPE void do_group_exit(int);
69370+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
69371
69372 extern void daemonize(const char *, ...);
69373 extern int allow_signal(int);
69374@@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
69375
69376 #endif
69377
69378-static inline int object_is_on_stack(void *obj)
69379+static inline int object_starts_on_stack(void *obj)
69380 {
69381- void *stack = task_stack_page(current);
69382+ const void *stack = task_stack_page(current);
69383
69384 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
69385 }
69386
69387+#ifdef CONFIG_PAX_USERCOPY
69388+extern int object_is_on_stack(const void *obj, unsigned long len);
69389+#endif
69390+
69391 extern void thread_info_cache_init(void);
69392
69393 #ifdef CONFIG_DEBUG_STACK_USAGE
69394diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
69395index 1ee2c05..81b7ec4 100644
69396--- a/include/linux/screen_info.h
69397+++ b/include/linux/screen_info.h
69398@@ -42,7 +42,8 @@ struct screen_info {
69399 __u16 pages; /* 0x32 */
69400 __u16 vesa_attributes; /* 0x34 */
69401 __u32 capabilities; /* 0x36 */
69402- __u8 _reserved[6]; /* 0x3a */
69403+ __u16 vesapm_size; /* 0x3a */
69404+ __u8 _reserved[4]; /* 0x3c */
69405 } __attribute__((packed));
69406
69407 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
69408diff --git a/include/linux/security.h b/include/linux/security.h
69409index d40d23f..d739b08 100644
69410--- a/include/linux/security.h
69411+++ b/include/linux/security.h
69412@@ -34,6 +34,7 @@
69413 #include <linux/key.h>
69414 #include <linux/xfrm.h>
69415 #include <linux/gfp.h>
69416+#include <linux/grsecurity.h>
69417 #include <net/flow.h>
69418
69419 /* Maximum number of letters for an LSM name string */
69420@@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
69421 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
69422 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
69423 extern int cap_task_setnice(struct task_struct *p, int nice);
69424-extern int cap_syslog(int type);
69425+extern int cap_syslog(int type, bool from_file);
69426 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
69427
69428 struct msghdr;
69429@@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
69430 * logging to the console.
69431 * See the syslog(2) manual page for an explanation of the @type values.
69432 * @type contains the type of action.
69433+ * @from_file indicates the context of action (if it came from /proc).
69434 * Return 0 if permission is granted.
69435 * @settime:
69436 * Check permission to change the system time.
69437@@ -1445,7 +1447,7 @@ struct security_operations {
69438 int (*sysctl) (struct ctl_table *table, int op);
69439 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
69440 int (*quota_on) (struct dentry *dentry);
69441- int (*syslog) (int type);
69442+ int (*syslog) (int type, bool from_file);
69443 int (*settime) (struct timespec *ts, struct timezone *tz);
69444 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
69445
69446@@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
69447 int security_sysctl(struct ctl_table *table, int op);
69448 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
69449 int security_quota_on(struct dentry *dentry);
69450-int security_syslog(int type);
69451+int security_syslog(int type, bool from_file);
69452 int security_settime(struct timespec *ts, struct timezone *tz);
69453 int security_vm_enough_memory(long pages);
69454 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
69455@@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
69456 return 0;
69457 }
69458
69459-static inline int security_syslog(int type)
69460+static inline int security_syslog(int type, bool from_file)
69461 {
69462- return cap_syslog(type);
69463+ return cap_syslog(type, from_file);
69464 }
69465
69466 static inline int security_settime(struct timespec *ts, struct timezone *tz)
69467diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
69468index 8366d8f..cc5f9d6 100644
69469--- a/include/linux/seq_file.h
69470+++ b/include/linux/seq_file.h
69471@@ -23,6 +23,9 @@ struct seq_file {
69472 u64 version;
69473 struct mutex lock;
69474 const struct seq_operations *op;
69475+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69476+ u64 exec_id;
69477+#endif
69478 void *private;
69479 };
69480
69481@@ -32,6 +35,7 @@ struct seq_operations {
69482 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
69483 int (*show) (struct seq_file *m, void *v);
69484 };
69485+typedef struct seq_operations __no_const seq_operations_no_const;
69486
69487 #define SEQ_SKIP 1
69488
69489diff --git a/include/linux/shm.h b/include/linux/shm.h
69490index eca6235..c7417ed 100644
69491--- a/include/linux/shm.h
69492+++ b/include/linux/shm.h
69493@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
69494 pid_t shm_cprid;
69495 pid_t shm_lprid;
69496 struct user_struct *mlock_user;
69497+#ifdef CONFIG_GRKERNSEC
69498+ time_t shm_createtime;
69499+ pid_t shm_lapid;
69500+#endif
69501 };
69502
69503 /* shm_mode upper byte flags */
69504diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
69505index bcdd660..6e12e11 100644
69506--- a/include/linux/skbuff.h
69507+++ b/include/linux/skbuff.h
69508@@ -14,6 +14,7 @@
69509 #ifndef _LINUX_SKBUFF_H
69510 #define _LINUX_SKBUFF_H
69511
69512+#include <linux/const.h>
69513 #include <linux/kernel.h>
69514 #include <linux/kmemcheck.h>
69515 #include <linux/compiler.h>
69516@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
69517 */
69518 static inline int skb_queue_empty(const struct sk_buff_head *list)
69519 {
69520- return list->next == (struct sk_buff *)list;
69521+ return list->next == (const struct sk_buff *)list;
69522 }
69523
69524 /**
69525@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
69526 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69527 const struct sk_buff *skb)
69528 {
69529- return (skb->next == (struct sk_buff *) list);
69530+ return (skb->next == (const struct sk_buff *) list);
69531 }
69532
69533 /**
69534@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69535 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
69536 const struct sk_buff *skb)
69537 {
69538- return (skb->prev == (struct sk_buff *) list);
69539+ return (skb->prev == (const struct sk_buff *) list);
69540 }
69541
69542 /**
69543@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
69544 * headroom, you should not reduce this.
69545 */
69546 #ifndef NET_SKB_PAD
69547-#define NET_SKB_PAD 32
69548+#define NET_SKB_PAD (_AC(32,UL))
69549 #endif
69550
69551 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
69552diff --git a/include/linux/slab.h b/include/linux/slab.h
69553index 2da8372..a3be824 100644
69554--- a/include/linux/slab.h
69555+++ b/include/linux/slab.h
69556@@ -11,12 +11,20 @@
69557
69558 #include <linux/gfp.h>
69559 #include <linux/types.h>
69560+#include <linux/err.h>
69561
69562 /*
69563 * Flags to pass to kmem_cache_create().
69564 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
69565 */
69566 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
69567+
69568+#ifdef CONFIG_PAX_USERCOPY
69569+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
69570+#else
69571+#define SLAB_USERCOPY 0x00000000UL
69572+#endif
69573+
69574 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
69575 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
69576 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
69577@@ -82,10 +90,13 @@
69578 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
69579 * Both make kfree a no-op.
69580 */
69581-#define ZERO_SIZE_PTR ((void *)16)
69582+#define ZERO_SIZE_PTR \
69583+({ \
69584+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
69585+ (void *)(-MAX_ERRNO-1L); \
69586+})
69587
69588-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
69589- (unsigned long)ZERO_SIZE_PTR)
69590+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
69591
69592 /*
69593 * struct kmem_cache related prototypes
69594@@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
69595 void kfree(const void *);
69596 void kzfree(const void *);
69597 size_t ksize(const void *);
69598+void check_object_size(const void *ptr, unsigned long n, bool to);
69599
69600 /*
69601 * Allocator specific definitions. These are mainly used to establish optimized
69602@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
69603
69604 void __init kmem_cache_init_late(void);
69605
69606+#define kmalloc(x, y) \
69607+({ \
69608+ void *___retval; \
69609+ intoverflow_t ___x = (intoverflow_t)x; \
69610+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
69611+ ___retval = NULL; \
69612+ else \
69613+ ___retval = kmalloc((size_t)___x, (y)); \
69614+ ___retval; \
69615+})
69616+
69617+#define kmalloc_node(x, y, z) \
69618+({ \
69619+ void *___retval; \
69620+ intoverflow_t ___x = (intoverflow_t)x; \
69621+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
69622+ ___retval = NULL; \
69623+ else \
69624+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
69625+ ___retval; \
69626+})
69627+
69628+#define kzalloc(x, y) \
69629+({ \
69630+ void *___retval; \
69631+ intoverflow_t ___x = (intoverflow_t)x; \
69632+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
69633+ ___retval = NULL; \
69634+ else \
69635+ ___retval = kzalloc((size_t)___x, (y)); \
69636+ ___retval; \
69637+})
69638+
69639 #endif /* _LINUX_SLAB_H */
69640diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
69641index 850d057..d9dfe3c 100644
69642--- a/include/linux/slab_def.h
69643+++ b/include/linux/slab_def.h
69644@@ -69,10 +69,10 @@ struct kmem_cache {
69645 unsigned long node_allocs;
69646 unsigned long node_frees;
69647 unsigned long node_overflow;
69648- atomic_t allochit;
69649- atomic_t allocmiss;
69650- atomic_t freehit;
69651- atomic_t freemiss;
69652+ atomic_unchecked_t allochit;
69653+ atomic_unchecked_t allocmiss;
69654+ atomic_unchecked_t freehit;
69655+ atomic_unchecked_t freemiss;
69656
69657 /*
69658 * If debugging is enabled, then the allocator can add additional
69659diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
69660index 5ad70a6..57f9f65 100644
69661--- a/include/linux/slub_def.h
69662+++ b/include/linux/slub_def.h
69663@@ -86,7 +86,7 @@ struct kmem_cache {
69664 struct kmem_cache_order_objects max;
69665 struct kmem_cache_order_objects min;
69666 gfp_t allocflags; /* gfp flags to use on each alloc */
69667- int refcount; /* Refcount for slab cache destroy */
69668+ atomic_t refcount; /* Refcount for slab cache destroy */
69669 void (*ctor)(void *);
69670 int inuse; /* Offset to metadata */
69671 int align; /* Alignment */
69672@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
69673 #endif
69674
69675 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
69676-void *__kmalloc(size_t size, gfp_t flags);
69677+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
69678
69679 #ifdef CONFIG_KMEMTRACE
69680 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
69681diff --git a/include/linux/sonet.h b/include/linux/sonet.h
69682index 67ad11f..0bbd8af 100644
69683--- a/include/linux/sonet.h
69684+++ b/include/linux/sonet.h
69685@@ -61,7 +61,7 @@ struct sonet_stats {
69686 #include <asm/atomic.h>
69687
69688 struct k_sonet_stats {
69689-#define __HANDLE_ITEM(i) atomic_t i
69690+#define __HANDLE_ITEM(i) atomic_unchecked_t i
69691 __SONET_ITEMS
69692 #undef __HANDLE_ITEM
69693 };
69694diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
69695index 6f52b4d..5500323 100644
69696--- a/include/linux/sunrpc/cache.h
69697+++ b/include/linux/sunrpc/cache.h
69698@@ -125,7 +125,7 @@ struct cache_detail {
69699 */
69700 struct cache_req {
69701 struct cache_deferred_req *(*defer)(struct cache_req *req);
69702-};
69703+} __no_const;
69704 /* this must be embedded in a deferred_request that is being
69705 * delayed awaiting cache-fill
69706 */
69707diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
69708index 8ed9642..101ceab 100644
69709--- a/include/linux/sunrpc/clnt.h
69710+++ b/include/linux/sunrpc/clnt.h
69711@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
69712 {
69713 switch (sap->sa_family) {
69714 case AF_INET:
69715- return ntohs(((struct sockaddr_in *)sap)->sin_port);
69716+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
69717 case AF_INET6:
69718- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
69719+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
69720 }
69721 return 0;
69722 }
69723@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
69724 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
69725 const struct sockaddr *src)
69726 {
69727- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
69728+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
69729 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
69730
69731 dsin->sin_family = ssin->sin_family;
69732@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
69733 if (sa->sa_family != AF_INET6)
69734 return 0;
69735
69736- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
69737+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
69738 }
69739
69740 #endif /* __KERNEL__ */
69741diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
69742index c14fe86..393245e 100644
69743--- a/include/linux/sunrpc/svc_rdma.h
69744+++ b/include/linux/sunrpc/svc_rdma.h
69745@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
69746 extern unsigned int svcrdma_max_requests;
69747 extern unsigned int svcrdma_max_req_size;
69748
69749-extern atomic_t rdma_stat_recv;
69750-extern atomic_t rdma_stat_read;
69751-extern atomic_t rdma_stat_write;
69752-extern atomic_t rdma_stat_sq_starve;
69753-extern atomic_t rdma_stat_rq_starve;
69754-extern atomic_t rdma_stat_rq_poll;
69755-extern atomic_t rdma_stat_rq_prod;
69756-extern atomic_t rdma_stat_sq_poll;
69757-extern atomic_t rdma_stat_sq_prod;
69758+extern atomic_unchecked_t rdma_stat_recv;
69759+extern atomic_unchecked_t rdma_stat_read;
69760+extern atomic_unchecked_t rdma_stat_write;
69761+extern atomic_unchecked_t rdma_stat_sq_starve;
69762+extern atomic_unchecked_t rdma_stat_rq_starve;
69763+extern atomic_unchecked_t rdma_stat_rq_poll;
69764+extern atomic_unchecked_t rdma_stat_rq_prod;
69765+extern atomic_unchecked_t rdma_stat_sq_poll;
69766+extern atomic_unchecked_t rdma_stat_sq_prod;
69767
69768 #define RPCRDMA_VERSION 1
69769
69770diff --git a/include/linux/suspend.h b/include/linux/suspend.h
69771index 5e781d8..1e62818 100644
69772--- a/include/linux/suspend.h
69773+++ b/include/linux/suspend.h
69774@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
69775 * which require special recovery actions in that situation.
69776 */
69777 struct platform_suspend_ops {
69778- int (*valid)(suspend_state_t state);
69779- int (*begin)(suspend_state_t state);
69780- int (*prepare)(void);
69781- int (*prepare_late)(void);
69782- int (*enter)(suspend_state_t state);
69783- void (*wake)(void);
69784- void (*finish)(void);
69785- void (*end)(void);
69786- void (*recover)(void);
69787+ int (* const valid)(suspend_state_t state);
69788+ int (* const begin)(suspend_state_t state);
69789+ int (* const prepare)(void);
69790+ int (* const prepare_late)(void);
69791+ int (* const enter)(suspend_state_t state);
69792+ void (* const wake)(void);
69793+ void (* const finish)(void);
69794+ void (* const end)(void);
69795+ void (* const recover)(void);
69796 };
69797
69798 #ifdef CONFIG_SUSPEND
69799@@ -120,7 +120,7 @@ struct platform_suspend_ops {
69800 * suspend_set_ops - set platform dependent suspend operations
69801 * @ops: The new suspend operations to set.
69802 */
69803-extern void suspend_set_ops(struct platform_suspend_ops *ops);
69804+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
69805 extern int suspend_valid_only_mem(suspend_state_t state);
69806
69807 /**
69808@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
69809 #else /* !CONFIG_SUSPEND */
69810 #define suspend_valid_only_mem NULL
69811
69812-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
69813+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
69814 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
69815 #endif /* !CONFIG_SUSPEND */
69816
69817@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
69818 * platforms which require special recovery actions in that situation.
69819 */
69820 struct platform_hibernation_ops {
69821- int (*begin)(void);
69822- void (*end)(void);
69823- int (*pre_snapshot)(void);
69824- void (*finish)(void);
69825- int (*prepare)(void);
69826- int (*enter)(void);
69827- void (*leave)(void);
69828- int (*pre_restore)(void);
69829- void (*restore_cleanup)(void);
69830- void (*recover)(void);
69831+ int (* const begin)(void);
69832+ void (* const end)(void);
69833+ int (* const pre_snapshot)(void);
69834+ void (* const finish)(void);
69835+ int (* const prepare)(void);
69836+ int (* const enter)(void);
69837+ void (* const leave)(void);
69838+ int (* const pre_restore)(void);
69839+ void (* const restore_cleanup)(void);
69840+ void (* const recover)(void);
69841 };
69842
69843 #ifdef CONFIG_HIBERNATION
69844@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
69845 extern void swsusp_unset_page_free(struct page *);
69846 extern unsigned long get_safe_page(gfp_t gfp_mask);
69847
69848-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
69849+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
69850 extern int hibernate(void);
69851 extern bool system_entering_hibernation(void);
69852 #else /* CONFIG_HIBERNATION */
69853@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
69854 static inline void swsusp_set_page_free(struct page *p) {}
69855 static inline void swsusp_unset_page_free(struct page *p) {}
69856
69857-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
69858+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
69859 static inline int hibernate(void) { return -ENOSYS; }
69860 static inline bool system_entering_hibernation(void) { return false; }
69861 #endif /* CONFIG_HIBERNATION */
69862diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
69863index 0eb6942..a805cb6 100644
69864--- a/include/linux/sysctl.h
69865+++ b/include/linux/sysctl.h
69866@@ -164,7 +164,11 @@ enum
69867 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
69868 };
69869
69870-
69871+#ifdef CONFIG_PAX_SOFTMODE
69872+enum {
69873+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
69874+};
69875+#endif
69876
69877 /* CTL_VM names: */
69878 enum
69879@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
69880
69881 extern int proc_dostring(struct ctl_table *, int,
69882 void __user *, size_t *, loff_t *);
69883+extern int proc_dostring_modpriv(struct ctl_table *, int,
69884+ void __user *, size_t *, loff_t *);
69885 extern int proc_dointvec(struct ctl_table *, int,
69886 void __user *, size_t *, loff_t *);
69887 extern int proc_dointvec_minmax(struct ctl_table *, int,
69888@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
69889
69890 extern ctl_handler sysctl_data;
69891 extern ctl_handler sysctl_string;
69892+extern ctl_handler sysctl_string_modpriv;
69893 extern ctl_handler sysctl_intvec;
69894 extern ctl_handler sysctl_jiffies;
69895 extern ctl_handler sysctl_ms_jiffies;
69896diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
69897index 9d68fed..71f02cc 100644
69898--- a/include/linux/sysfs.h
69899+++ b/include/linux/sysfs.h
69900@@ -75,8 +75,8 @@ struct bin_attribute {
69901 };
69902
69903 struct sysfs_ops {
69904- ssize_t (*show)(struct kobject *, struct attribute *,char *);
69905- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
69906+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
69907+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
69908 };
69909
69910 struct sysfs_dirent;
69911diff --git a/include/linux/syslog.h b/include/linux/syslog.h
69912new file mode 100644
69913index 0000000..3891139
69914--- /dev/null
69915+++ b/include/linux/syslog.h
69916@@ -0,0 +1,52 @@
69917+/* Syslog internals
69918+ *
69919+ * Copyright 2010 Canonical, Ltd.
69920+ * Author: Kees Cook <kees.cook@canonical.com>
69921+ *
69922+ * This program is free software; you can redistribute it and/or modify
69923+ * it under the terms of the GNU General Public License as published by
69924+ * the Free Software Foundation; either version 2, or (at your option)
69925+ * any later version.
69926+ *
69927+ * This program is distributed in the hope that it will be useful,
69928+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
69929+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69930+ * GNU General Public License for more details.
69931+ *
69932+ * You should have received a copy of the GNU General Public License
69933+ * along with this program; see the file COPYING. If not, write to
69934+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
69935+ */
69936+
69937+#ifndef _LINUX_SYSLOG_H
69938+#define _LINUX_SYSLOG_H
69939+
69940+/* Close the log. Currently a NOP. */
69941+#define SYSLOG_ACTION_CLOSE 0
69942+/* Open the log. Currently a NOP. */
69943+#define SYSLOG_ACTION_OPEN 1
69944+/* Read from the log. */
69945+#define SYSLOG_ACTION_READ 2
69946+/* Read all messages remaining in the ring buffer. */
69947+#define SYSLOG_ACTION_READ_ALL 3
69948+/* Read and clear all messages remaining in the ring buffer */
69949+#define SYSLOG_ACTION_READ_CLEAR 4
69950+/* Clear ring buffer. */
69951+#define SYSLOG_ACTION_CLEAR 5
69952+/* Disable printk's to console */
69953+#define SYSLOG_ACTION_CONSOLE_OFF 6
69954+/* Enable printk's to console */
69955+#define SYSLOG_ACTION_CONSOLE_ON 7
69956+/* Set level of messages printed to console */
69957+#define SYSLOG_ACTION_CONSOLE_LEVEL 8
69958+/* Return number of unread characters in the log buffer */
69959+#define SYSLOG_ACTION_SIZE_UNREAD 9
69960+/* Return size of the log buffer */
69961+#define SYSLOG_ACTION_SIZE_BUFFER 10
69962+
69963+#define SYSLOG_FROM_CALL 0
69964+#define SYSLOG_FROM_FILE 1
69965+
69966+int do_syslog(int type, char __user *buf, int count, bool from_file);
69967+
69968+#endif /* _LINUX_SYSLOG_H */
69969diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
69970index a8cc4e1..98d3b85 100644
69971--- a/include/linux/thread_info.h
69972+++ b/include/linux/thread_info.h
69973@@ -23,7 +23,7 @@ struct restart_block {
69974 };
69975 /* For futex_wait and futex_wait_requeue_pi */
69976 struct {
69977- u32 *uaddr;
69978+ u32 __user *uaddr;
69979 u32 val;
69980 u32 flags;
69981 u32 bitset;
69982diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
69983index 1eb44a9..f582df3 100644
69984--- a/include/linux/tracehook.h
69985+++ b/include/linux/tracehook.h
69986@@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
69987 /*
69988 * ptrace report for syscall entry and exit looks identical.
69989 */
69990-static inline void ptrace_report_syscall(struct pt_regs *regs)
69991+static inline int ptrace_report_syscall(struct pt_regs *regs)
69992 {
69993 int ptrace = task_ptrace(current);
69994
69995 if (!(ptrace & PT_PTRACED))
69996- return;
69997+ return 0;
69998
69999 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
70000
70001@@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70002 send_sig(current->exit_code, current, 1);
70003 current->exit_code = 0;
70004 }
70005+
70006+ return fatal_signal_pending(current);
70007 }
70008
70009 /**
70010@@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70011 static inline __must_check int tracehook_report_syscall_entry(
70012 struct pt_regs *regs)
70013 {
70014- ptrace_report_syscall(regs);
70015- return 0;
70016+ return ptrace_report_syscall(regs);
70017 }
70018
70019 /**
70020diff --git a/include/linux/tty.h b/include/linux/tty.h
70021index e9c57e9..ee6d489 100644
70022--- a/include/linux/tty.h
70023+++ b/include/linux/tty.h
70024@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
70025 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
70026 extern void tty_ldisc_enable(struct tty_struct *tty);
70027
70028-
70029 /* n_tty.c */
70030 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
70031
70032diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
70033index 0c4ee9b..9f7c426 100644
70034--- a/include/linux/tty_ldisc.h
70035+++ b/include/linux/tty_ldisc.h
70036@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
70037
70038 struct module *owner;
70039
70040- int refcount;
70041+ atomic_t refcount;
70042 };
70043
70044 struct tty_ldisc {
70045diff --git a/include/linux/types.h b/include/linux/types.h
70046index c42724f..d190eee 100644
70047--- a/include/linux/types.h
70048+++ b/include/linux/types.h
70049@@ -191,10 +191,26 @@ typedef struct {
70050 volatile int counter;
70051 } atomic_t;
70052
70053+#ifdef CONFIG_PAX_REFCOUNT
70054+typedef struct {
70055+ volatile int counter;
70056+} atomic_unchecked_t;
70057+#else
70058+typedef atomic_t atomic_unchecked_t;
70059+#endif
70060+
70061 #ifdef CONFIG_64BIT
70062 typedef struct {
70063 volatile long counter;
70064 } atomic64_t;
70065+
70066+#ifdef CONFIG_PAX_REFCOUNT
70067+typedef struct {
70068+ volatile long counter;
70069+} atomic64_unchecked_t;
70070+#else
70071+typedef atomic64_t atomic64_unchecked_t;
70072+#endif
70073 #endif
70074
70075 struct ustat {
70076diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
70077index 6b58367..53a3e8e 100644
70078--- a/include/linux/uaccess.h
70079+++ b/include/linux/uaccess.h
70080@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70081 long ret; \
70082 mm_segment_t old_fs = get_fs(); \
70083 \
70084- set_fs(KERNEL_DS); \
70085 pagefault_disable(); \
70086- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
70087- pagefault_enable(); \
70088+ set_fs(KERNEL_DS); \
70089+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
70090 set_fs(old_fs); \
70091+ pagefault_enable(); \
70092 ret; \
70093 })
70094
70095@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70096 * Safely read from address @src to the buffer at @dst. If a kernel fault
70097 * happens, handle that and return -EFAULT.
70098 */
70099-extern long probe_kernel_read(void *dst, void *src, size_t size);
70100+extern long probe_kernel_read(void *dst, const void *src, size_t size);
70101
70102 /*
70103 * probe_kernel_write(): safely attempt to write to a location
70104@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
70105 * Safely write to address @dst from the buffer at @src. If a kernel fault
70106 * happens, handle that and return -EFAULT.
70107 */
70108-extern long probe_kernel_write(void *dst, void *src, size_t size);
70109+extern long probe_kernel_write(void *dst, const void *src, size_t size);
70110
70111 #endif /* __LINUX_UACCESS_H__ */
70112diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
70113index 99c1b4d..bb94261 100644
70114--- a/include/linux/unaligned/access_ok.h
70115+++ b/include/linux/unaligned/access_ok.h
70116@@ -6,32 +6,32 @@
70117
70118 static inline u16 get_unaligned_le16(const void *p)
70119 {
70120- return le16_to_cpup((__le16 *)p);
70121+ return le16_to_cpup((const __le16 *)p);
70122 }
70123
70124 static inline u32 get_unaligned_le32(const void *p)
70125 {
70126- return le32_to_cpup((__le32 *)p);
70127+ return le32_to_cpup((const __le32 *)p);
70128 }
70129
70130 static inline u64 get_unaligned_le64(const void *p)
70131 {
70132- return le64_to_cpup((__le64 *)p);
70133+ return le64_to_cpup((const __le64 *)p);
70134 }
70135
70136 static inline u16 get_unaligned_be16(const void *p)
70137 {
70138- return be16_to_cpup((__be16 *)p);
70139+ return be16_to_cpup((const __be16 *)p);
70140 }
70141
70142 static inline u32 get_unaligned_be32(const void *p)
70143 {
70144- return be32_to_cpup((__be32 *)p);
70145+ return be32_to_cpup((const __be32 *)p);
70146 }
70147
70148 static inline u64 get_unaligned_be64(const void *p)
70149 {
70150- return be64_to_cpup((__be64 *)p);
70151+ return be64_to_cpup((const __be64 *)p);
70152 }
70153
70154 static inline void put_unaligned_le16(u16 val, void *p)
70155diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
70156index 79b9837..b5a56f9 100644
70157--- a/include/linux/vermagic.h
70158+++ b/include/linux/vermagic.h
70159@@ -26,9 +26,35 @@
70160 #define MODULE_ARCH_VERMAGIC ""
70161 #endif
70162
70163+#ifdef CONFIG_PAX_REFCOUNT
70164+#define MODULE_PAX_REFCOUNT "REFCOUNT "
70165+#else
70166+#define MODULE_PAX_REFCOUNT ""
70167+#endif
70168+
70169+#ifdef CONSTIFY_PLUGIN
70170+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
70171+#else
70172+#define MODULE_CONSTIFY_PLUGIN ""
70173+#endif
70174+
70175+#ifdef STACKLEAK_PLUGIN
70176+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
70177+#else
70178+#define MODULE_STACKLEAK_PLUGIN ""
70179+#endif
70180+
70181+#ifdef CONFIG_GRKERNSEC
70182+#define MODULE_GRSEC "GRSEC "
70183+#else
70184+#define MODULE_GRSEC ""
70185+#endif
70186+
70187 #define VERMAGIC_STRING \
70188 UTS_RELEASE " " \
70189 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
70190 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
70191- MODULE_ARCH_VERMAGIC
70192+ MODULE_ARCH_VERMAGIC \
70193+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
70194+ MODULE_GRSEC
70195
70196diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
70197index 819a634..462ac12 100644
70198--- a/include/linux/vmalloc.h
70199+++ b/include/linux/vmalloc.h
70200@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
70201 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
70202 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
70203 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
70204+
70205+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70206+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
70207+#endif
70208+
70209 /* bits [20..32] reserved for arch specific ioremap internals */
70210
70211 /*
70212@@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
70213
70214 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
70215
70216+#define vmalloc(x) \
70217+({ \
70218+ void *___retval; \
70219+ intoverflow_t ___x = (intoverflow_t)x; \
70220+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
70221+ ___retval = NULL; \
70222+ else \
70223+ ___retval = vmalloc((unsigned long)___x); \
70224+ ___retval; \
70225+})
70226+
70227+#define __vmalloc(x, y, z) \
70228+({ \
70229+ void *___retval; \
70230+ intoverflow_t ___x = (intoverflow_t)x; \
70231+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
70232+ ___retval = NULL; \
70233+ else \
70234+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
70235+ ___retval; \
70236+})
70237+
70238+#define vmalloc_user(x) \
70239+({ \
70240+ void *___retval; \
70241+ intoverflow_t ___x = (intoverflow_t)x; \
70242+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
70243+ ___retval = NULL; \
70244+ else \
70245+ ___retval = vmalloc_user((unsigned long)___x); \
70246+ ___retval; \
70247+})
70248+
70249+#define vmalloc_exec(x) \
70250+({ \
70251+ void *___retval; \
70252+ intoverflow_t ___x = (intoverflow_t)x; \
70253+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
70254+ ___retval = NULL; \
70255+ else \
70256+ ___retval = vmalloc_exec((unsigned long)___x); \
70257+ ___retval; \
70258+})
70259+
70260+#define vmalloc_node(x, y) \
70261+({ \
70262+ void *___retval; \
70263+ intoverflow_t ___x = (intoverflow_t)x; \
70264+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
70265+ ___retval = NULL; \
70266+ else \
70267+ ___retval = vmalloc_node((unsigned long)___x, (y));\
70268+ ___retval; \
70269+})
70270+
70271+#define vmalloc_32(x) \
70272+({ \
70273+ void *___retval; \
70274+ intoverflow_t ___x = (intoverflow_t)x; \
70275+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
70276+ ___retval = NULL; \
70277+ else \
70278+ ___retval = vmalloc_32((unsigned long)___x); \
70279+ ___retval; \
70280+})
70281+
70282+#define vmalloc_32_user(x) \
70283+({ \
70284+ void *___retval; \
70285+ intoverflow_t ___x = (intoverflow_t)x; \
70286+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
70287+ ___retval = NULL; \
70288+ else \
70289+ ___retval = vmalloc_32_user((unsigned long)___x);\
70290+ ___retval; \
70291+})
70292+
70293 #endif /* _LINUX_VMALLOC_H */
70294diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
70295index 13070d6..aa4159a 100644
70296--- a/include/linux/vmstat.h
70297+++ b/include/linux/vmstat.h
70298@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
70299 /*
70300 * Zone based page accounting with per cpu differentials.
70301 */
70302-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70303+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70304
70305 static inline void zone_page_state_add(long x, struct zone *zone,
70306 enum zone_stat_item item)
70307 {
70308- atomic_long_add(x, &zone->vm_stat[item]);
70309- atomic_long_add(x, &vm_stat[item]);
70310+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
70311+ atomic_long_add_unchecked(x, &vm_stat[item]);
70312 }
70313
70314 static inline unsigned long global_page_state(enum zone_stat_item item)
70315 {
70316- long x = atomic_long_read(&vm_stat[item]);
70317+ long x = atomic_long_read_unchecked(&vm_stat[item]);
70318 #ifdef CONFIG_SMP
70319 if (x < 0)
70320 x = 0;
70321@@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
70322 static inline unsigned long zone_page_state(struct zone *zone,
70323 enum zone_stat_item item)
70324 {
70325- long x = atomic_long_read(&zone->vm_stat[item]);
70326+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70327 #ifdef CONFIG_SMP
70328 if (x < 0)
70329 x = 0;
70330@@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
70331 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
70332 enum zone_stat_item item)
70333 {
70334- long x = atomic_long_read(&zone->vm_stat[item]);
70335+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70336
70337 #ifdef CONFIG_SMP
70338 int cpu;
70339@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
70340
70341 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
70342 {
70343- atomic_long_inc(&zone->vm_stat[item]);
70344- atomic_long_inc(&vm_stat[item]);
70345+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
70346+ atomic_long_inc_unchecked(&vm_stat[item]);
70347 }
70348
70349 static inline void __inc_zone_page_state(struct page *page,
70350@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
70351
70352 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
70353 {
70354- atomic_long_dec(&zone->vm_stat[item]);
70355- atomic_long_dec(&vm_stat[item]);
70356+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
70357+ atomic_long_dec_unchecked(&vm_stat[item]);
70358 }
70359
70360 static inline void __dec_zone_page_state(struct page *page,
70361diff --git a/include/linux/xattr.h b/include/linux/xattr.h
70362index 5c84af8..1a3b6e2 100644
70363--- a/include/linux/xattr.h
70364+++ b/include/linux/xattr.h
70365@@ -33,6 +33,11 @@
70366 #define XATTR_USER_PREFIX "user."
70367 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
70368
70369+/* User namespace */
70370+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
70371+#define XATTR_PAX_FLAGS_SUFFIX "flags"
70372+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
70373+
70374 struct inode;
70375 struct dentry;
70376
70377diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
70378index eed5fcc..5080d24 100644
70379--- a/include/media/saa7146_vv.h
70380+++ b/include/media/saa7146_vv.h
70381@@ -167,7 +167,7 @@ struct saa7146_ext_vv
70382 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
70383
70384 /* the extension can override this */
70385- struct v4l2_ioctl_ops ops;
70386+ v4l2_ioctl_ops_no_const ops;
70387 /* pointer to the saa7146 core ops */
70388 const struct v4l2_ioctl_ops *core_ops;
70389
70390diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
70391index 73c9867..2da8837 100644
70392--- a/include/media/v4l2-dev.h
70393+++ b/include/media/v4l2-dev.h
70394@@ -34,7 +34,7 @@ struct v4l2_device;
70395 #define V4L2_FL_UNREGISTERED (0)
70396
70397 struct v4l2_file_operations {
70398- struct module *owner;
70399+ struct module * const owner;
70400 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
70401 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
70402 unsigned int (*poll) (struct file *, struct poll_table_struct *);
70403@@ -46,6 +46,7 @@ struct v4l2_file_operations {
70404 int (*open) (struct file *);
70405 int (*release) (struct file *);
70406 };
70407+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
70408
70409 /*
70410 * Newer version of video_device, handled by videodev2.c
70411diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
70412index 5d5d550..f559ef1 100644
70413--- a/include/media/v4l2-device.h
70414+++ b/include/media/v4l2-device.h
70415@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
70416 this function returns 0. If the name ends with a digit (e.g. cx18),
70417 then the name will be set to cx18-0 since cx180 looks really odd. */
70418 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
70419- atomic_t *instance);
70420+ atomic_unchecked_t *instance);
70421
70422 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
70423 Since the parent disappears this ensures that v4l2_dev doesn't have an
70424diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
70425index 7a4529d..7244290 100644
70426--- a/include/media/v4l2-ioctl.h
70427+++ b/include/media/v4l2-ioctl.h
70428@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
70429 long (*vidioc_default) (struct file *file, void *fh,
70430 int cmd, void *arg);
70431 };
70432+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
70433
70434
70435 /* v4l debugging and diagnostics */
70436diff --git a/include/net/flow.h b/include/net/flow.h
70437index 809970b..c3df4f3 100644
70438--- a/include/net/flow.h
70439+++ b/include/net/flow.h
70440@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
70441 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
70442 u8 dir, flow_resolve_t resolver);
70443 extern void flow_cache_flush(void);
70444-extern atomic_t flow_cache_genid;
70445+extern atomic_unchecked_t flow_cache_genid;
70446
70447 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
70448 {
70449diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
70450index 15e1f8fe..668837c 100644
70451--- a/include/net/inetpeer.h
70452+++ b/include/net/inetpeer.h
70453@@ -24,7 +24,7 @@ struct inet_peer
70454 __u32 dtime; /* the time of last use of not
70455 * referenced entries */
70456 atomic_t refcnt;
70457- atomic_t rid; /* Frag reception counter */
70458+ atomic_unchecked_t rid; /* Frag reception counter */
70459 __u32 tcp_ts;
70460 unsigned long tcp_ts_stamp;
70461 };
70462diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
70463index 98978e7..2243a3d 100644
70464--- a/include/net/ip_vs.h
70465+++ b/include/net/ip_vs.h
70466@@ -365,7 +365,7 @@ struct ip_vs_conn {
70467 struct ip_vs_conn *control; /* Master control connection */
70468 atomic_t n_control; /* Number of controlled ones */
70469 struct ip_vs_dest *dest; /* real server */
70470- atomic_t in_pkts; /* incoming packet counter */
70471+ atomic_unchecked_t in_pkts; /* incoming packet counter */
70472
70473 /* packet transmitter for different forwarding methods. If it
70474 mangles the packet, it must return NF_DROP or better NF_STOLEN,
70475@@ -466,7 +466,7 @@ struct ip_vs_dest {
70476 union nf_inet_addr addr; /* IP address of the server */
70477 __be16 port; /* port number of the server */
70478 volatile unsigned flags; /* dest status flags */
70479- atomic_t conn_flags; /* flags to copy to conn */
70480+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
70481 atomic_t weight; /* server weight */
70482
70483 atomic_t refcnt; /* reference counter */
70484diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
70485index 69b610a..fe3962c 100644
70486--- a/include/net/irda/ircomm_core.h
70487+++ b/include/net/irda/ircomm_core.h
70488@@ -51,7 +51,7 @@ typedef struct {
70489 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
70490 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
70491 struct ircomm_info *);
70492-} call_t;
70493+} __no_const call_t;
70494
70495 struct ircomm_cb {
70496 irda_queue_t queue;
70497diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
70498index eea2e61..08c692d 100644
70499--- a/include/net/irda/ircomm_tty.h
70500+++ b/include/net/irda/ircomm_tty.h
70501@@ -35,6 +35,7 @@
70502 #include <linux/termios.h>
70503 #include <linux/timer.h>
70504 #include <linux/tty.h> /* struct tty_struct */
70505+#include <asm/local.h>
70506
70507 #include <net/irda/irias_object.h>
70508 #include <net/irda/ircomm_core.h>
70509@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
70510 unsigned short close_delay;
70511 unsigned short closing_wait; /* time to wait before closing */
70512
70513- int open_count;
70514- int blocked_open; /* # of blocked opens */
70515+ local_t open_count;
70516+ local_t blocked_open; /* # of blocked opens */
70517
70518 /* Protect concurent access to :
70519 * o self->open_count
70520diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
70521index f82a1e8..82d81e8 100644
70522--- a/include/net/iucv/af_iucv.h
70523+++ b/include/net/iucv/af_iucv.h
70524@@ -87,7 +87,7 @@ struct iucv_sock {
70525 struct iucv_sock_list {
70526 struct hlist_head head;
70527 rwlock_t lock;
70528- atomic_t autobind_name;
70529+ atomic_unchecked_t autobind_name;
70530 };
70531
70532 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
70533diff --git a/include/net/lapb.h b/include/net/lapb.h
70534index 96cb5dd..25e8d4f 100644
70535--- a/include/net/lapb.h
70536+++ b/include/net/lapb.h
70537@@ -95,7 +95,7 @@ struct lapb_cb {
70538 struct sk_buff_head write_queue;
70539 struct sk_buff_head ack_queue;
70540 unsigned char window;
70541- struct lapb_register_struct callbacks;
70542+ struct lapb_register_struct *callbacks;
70543
70544 /* FRMR control information */
70545 struct lapb_frame frmr_data;
70546diff --git a/include/net/neighbour.h b/include/net/neighbour.h
70547index 3817fda..cdb2343 100644
70548--- a/include/net/neighbour.h
70549+++ b/include/net/neighbour.h
70550@@ -131,7 +131,7 @@ struct neigh_ops
70551 int (*connected_output)(struct sk_buff*);
70552 int (*hh_output)(struct sk_buff*);
70553 int (*queue_xmit)(struct sk_buff*);
70554-};
70555+} __do_const;
70556
70557 struct pneigh_entry
70558 {
70559diff --git a/include/net/netlink.h b/include/net/netlink.h
70560index c344646..4778c71 100644
70561--- a/include/net/netlink.h
70562+++ b/include/net/netlink.h
70563@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
70564 {
70565 return (remaining >= (int) sizeof(struct nlmsghdr) &&
70566 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
70567- nlh->nlmsg_len <= remaining);
70568+ nlh->nlmsg_len <= (unsigned int)remaining);
70569 }
70570
70571 /**
70572@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
70573 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
70574 {
70575 if (mark)
70576- skb_trim(skb, (unsigned char *) mark - skb->data);
70577+ skb_trim(skb, (const unsigned char *) mark - skb->data);
70578 }
70579
70580 /**
70581diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
70582index 9a4b8b7..e49e077 100644
70583--- a/include/net/netns/ipv4.h
70584+++ b/include/net/netns/ipv4.h
70585@@ -54,7 +54,7 @@ struct netns_ipv4 {
70586 int current_rt_cache_rebuild_count;
70587
70588 struct timer_list rt_secret_timer;
70589- atomic_t rt_genid;
70590+ atomic_unchecked_t rt_genid;
70591
70592 #ifdef CONFIG_IP_MROUTE
70593 struct sock *mroute_sk;
70594diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
70595index 8a6d529..171f401 100644
70596--- a/include/net/sctp/sctp.h
70597+++ b/include/net/sctp/sctp.h
70598@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
70599
70600 #else /* SCTP_DEBUG */
70601
70602-#define SCTP_DEBUG_PRINTK(whatever...)
70603-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
70604+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
70605+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
70606 #define SCTP_ENABLE_DEBUG
70607 #define SCTP_DISABLE_DEBUG
70608 #define SCTP_ASSERT(expr, str, func)
70609diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
70610index d97f689..f3b90ab 100644
70611--- a/include/net/secure_seq.h
70612+++ b/include/net/secure_seq.h
70613@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
70614 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
70615 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
70616 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
70617- __be16 dport);
70618+ __be16 dport);
70619 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
70620 __be16 sport, __be16 dport);
70621 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70622- __be16 sport, __be16 dport);
70623+ __be16 sport, __be16 dport);
70624 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
70625- __be16 sport, __be16 dport);
70626+ __be16 sport, __be16 dport);
70627 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70628- __be16 sport, __be16 dport);
70629+ __be16 sport, __be16 dport);
70630
70631 #endif /* _NET_SECURE_SEQ */
70632diff --git a/include/net/sock.h b/include/net/sock.h
70633index 78adf52..99afd29 100644
70634--- a/include/net/sock.h
70635+++ b/include/net/sock.h
70636@@ -272,7 +272,7 @@ struct sock {
70637 rwlock_t sk_callback_lock;
70638 int sk_err,
70639 sk_err_soft;
70640- atomic_t sk_drops;
70641+ atomic_unchecked_t sk_drops;
70642 unsigned short sk_ack_backlog;
70643 unsigned short sk_max_ack_backlog;
70644 __u32 sk_priority;
70645@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
70646 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
70647 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
70648 #else
70649-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
70650+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
70651 int inc)
70652 {
70653 }
70654diff --git a/include/net/tcp.h b/include/net/tcp.h
70655index 6cfe18b..dd21acb 100644
70656--- a/include/net/tcp.h
70657+++ b/include/net/tcp.h
70658@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
70659 struct tcp_seq_afinfo {
70660 char *name;
70661 sa_family_t family;
70662- struct file_operations seq_fops;
70663- struct seq_operations seq_ops;
70664+ file_operations_no_const seq_fops;
70665+ seq_operations_no_const seq_ops;
70666 };
70667
70668 struct tcp_iter_state {
70669diff --git a/include/net/udp.h b/include/net/udp.h
70670index f98abd2..b4b042f 100644
70671--- a/include/net/udp.h
70672+++ b/include/net/udp.h
70673@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
70674 char *name;
70675 sa_family_t family;
70676 struct udp_table *udp_table;
70677- struct file_operations seq_fops;
70678- struct seq_operations seq_ops;
70679+ file_operations_no_const seq_fops;
70680+ seq_operations_no_const seq_ops;
70681 };
70682
70683 struct udp_iter_state {
70684diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
70685index cbb822e..e9c1cbe 100644
70686--- a/include/rdma/iw_cm.h
70687+++ b/include/rdma/iw_cm.h
70688@@ -129,7 +129,7 @@ struct iw_cm_verbs {
70689 int backlog);
70690
70691 int (*destroy_listen)(struct iw_cm_id *cm_id);
70692-};
70693+} __no_const;
70694
70695 /**
70696 * iw_create_cm_id - Create an IW CM identifier.
70697diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
70698index 09a124b..caa8ca8 100644
70699--- a/include/scsi/libfc.h
70700+++ b/include/scsi/libfc.h
70701@@ -675,6 +675,7 @@ struct libfc_function_template {
70702 */
70703 void (*disc_stop_final) (struct fc_lport *);
70704 };
70705+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
70706
70707 /* information used by the discovery layer */
70708 struct fc_disc {
70709@@ -707,7 +708,7 @@ struct fc_lport {
70710 struct fc_disc disc;
70711
70712 /* Operational Information */
70713- struct libfc_function_template tt;
70714+ libfc_function_template_no_const tt;
70715 u8 link_up;
70716 u8 qfull;
70717 enum fc_lport_state state;
70718diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
70719index de8e180..f15e0d7 100644
70720--- a/include/scsi/scsi_device.h
70721+++ b/include/scsi/scsi_device.h
70722@@ -156,9 +156,9 @@ struct scsi_device {
70723 unsigned int max_device_blocked; /* what device_blocked counts down from */
70724 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
70725
70726- atomic_t iorequest_cnt;
70727- atomic_t iodone_cnt;
70728- atomic_t ioerr_cnt;
70729+ atomic_unchecked_t iorequest_cnt;
70730+ atomic_unchecked_t iodone_cnt;
70731+ atomic_unchecked_t ioerr_cnt;
70732
70733 struct device sdev_gendev,
70734 sdev_dev;
70735diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
70736index fc50bd6..81ba9cb 100644
70737--- a/include/scsi/scsi_transport_fc.h
70738+++ b/include/scsi/scsi_transport_fc.h
70739@@ -708,7 +708,7 @@ struct fc_function_template {
70740 unsigned long show_host_system_hostname:1;
70741
70742 unsigned long disable_target_scan:1;
70743-};
70744+} __do_const;
70745
70746
70747 /**
70748diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
70749index 3dae3f7..8440d6f 100644
70750--- a/include/sound/ac97_codec.h
70751+++ b/include/sound/ac97_codec.h
70752@@ -419,15 +419,15 @@
70753 struct snd_ac97;
70754
70755 struct snd_ac97_build_ops {
70756- int (*build_3d) (struct snd_ac97 *ac97);
70757- int (*build_specific) (struct snd_ac97 *ac97);
70758- int (*build_spdif) (struct snd_ac97 *ac97);
70759- int (*build_post_spdif) (struct snd_ac97 *ac97);
70760+ int (* const build_3d) (struct snd_ac97 *ac97);
70761+ int (* const build_specific) (struct snd_ac97 *ac97);
70762+ int (* const build_spdif) (struct snd_ac97 *ac97);
70763+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
70764 #ifdef CONFIG_PM
70765- void (*suspend) (struct snd_ac97 *ac97);
70766- void (*resume) (struct snd_ac97 *ac97);
70767+ void (* const suspend) (struct snd_ac97 *ac97);
70768+ void (* const resume) (struct snd_ac97 *ac97);
70769 #endif
70770- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70771+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70772 };
70773
70774 struct snd_ac97_bus_ops {
70775@@ -477,7 +477,7 @@ struct snd_ac97_template {
70776
70777 struct snd_ac97 {
70778 /* -- lowlevel (hardware) driver specific -- */
70779- struct snd_ac97_build_ops * build_ops;
70780+ const struct snd_ac97_build_ops * build_ops;
70781 void *private_data;
70782 void (*private_free) (struct snd_ac97 *ac97);
70783 /* --- */
70784diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
70785index 891cf1a..a94ba2b 100644
70786--- a/include/sound/ak4xxx-adda.h
70787+++ b/include/sound/ak4xxx-adda.h
70788@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
70789 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
70790 unsigned char val);
70791 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
70792-};
70793+} __no_const;
70794
70795 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
70796
70797diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
70798index 8c05e47..2b5df97 100644
70799--- a/include/sound/hwdep.h
70800+++ b/include/sound/hwdep.h
70801@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
70802 struct snd_hwdep_dsp_status *status);
70803 int (*dsp_load)(struct snd_hwdep *hw,
70804 struct snd_hwdep_dsp_image *image);
70805-};
70806+} __no_const;
70807
70808 struct snd_hwdep {
70809 struct snd_card *card;
70810diff --git a/include/sound/info.h b/include/sound/info.h
70811index 112e894..6fda5b5 100644
70812--- a/include/sound/info.h
70813+++ b/include/sound/info.h
70814@@ -44,7 +44,7 @@ struct snd_info_entry_text {
70815 struct snd_info_buffer *buffer);
70816 void (*write)(struct snd_info_entry *entry,
70817 struct snd_info_buffer *buffer);
70818-};
70819+} __no_const;
70820
70821 struct snd_info_entry_ops {
70822 int (*open)(struct snd_info_entry *entry,
70823diff --git a/include/sound/pcm.h b/include/sound/pcm.h
70824index de6d981..590a550 100644
70825--- a/include/sound/pcm.h
70826+++ b/include/sound/pcm.h
70827@@ -80,6 +80,7 @@ struct snd_pcm_ops {
70828 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
70829 int (*ack)(struct snd_pcm_substream *substream);
70830 };
70831+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
70832
70833 /*
70834 *
70835diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
70836index 736eac7..fe8a80f 100644
70837--- a/include/sound/sb16_csp.h
70838+++ b/include/sound/sb16_csp.h
70839@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
70840 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
70841 int (*csp_stop) (struct snd_sb_csp * p);
70842 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
70843-};
70844+} __no_const;
70845
70846 /*
70847 * CSP private data
70848diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
70849index 444cd6b..3327cc5 100644
70850--- a/include/sound/ymfpci.h
70851+++ b/include/sound/ymfpci.h
70852@@ -358,7 +358,7 @@ struct snd_ymfpci {
70853 spinlock_t reg_lock;
70854 spinlock_t voice_lock;
70855 wait_queue_head_t interrupt_sleep;
70856- atomic_t interrupt_sleep_count;
70857+ atomic_unchecked_t interrupt_sleep_count;
70858 struct snd_info_entry *proc_entry;
70859 const struct firmware *dsp_microcode;
70860 const struct firmware *controller_microcode;
70861diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
70862index b89f9db..f097b38 100644
70863--- a/include/trace/events/irq.h
70864+++ b/include/trace/events/irq.h
70865@@ -34,7 +34,7 @@
70866 */
70867 TRACE_EVENT(irq_handler_entry,
70868
70869- TP_PROTO(int irq, struct irqaction *action),
70870+ TP_PROTO(int irq, const struct irqaction *action),
70871
70872 TP_ARGS(irq, action),
70873
70874@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
70875 */
70876 TRACE_EVENT(irq_handler_exit,
70877
70878- TP_PROTO(int irq, struct irqaction *action, int ret),
70879+ TP_PROTO(int irq, const struct irqaction *action, int ret),
70880
70881 TP_ARGS(irq, action, ret),
70882
70883@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
70884 */
70885 TRACE_EVENT(softirq_entry,
70886
70887- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70888+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70889
70890 TP_ARGS(h, vec),
70891
70892@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
70893 */
70894 TRACE_EVENT(softirq_exit,
70895
70896- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70897+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70898
70899 TP_ARGS(h, vec),
70900
70901diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
70902index 0993a22..32ba2fe 100644
70903--- a/include/video/uvesafb.h
70904+++ b/include/video/uvesafb.h
70905@@ -177,6 +177,7 @@ struct uvesafb_par {
70906 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
70907 u8 pmi_setpal; /* PMI for palette changes */
70908 u16 *pmi_base; /* protected mode interface location */
70909+ u8 *pmi_code; /* protected mode code location */
70910 void *pmi_start;
70911 void *pmi_pal;
70912 u8 *vbe_state_orig; /*
70913diff --git a/init/Kconfig b/init/Kconfig
70914index d72691b..3996e54 100644
70915--- a/init/Kconfig
70916+++ b/init/Kconfig
70917@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
70918
70919 config COMPAT_BRK
70920 bool "Disable heap randomization"
70921- default y
70922+ default n
70923 help
70924 Randomizing heap placement makes heap exploits harder, but it
70925 also breaks ancient binaries (including anything libc5 based).
70926diff --git a/init/do_mounts.c b/init/do_mounts.c
70927index bb008d0..4fa3933 100644
70928--- a/init/do_mounts.c
70929+++ b/init/do_mounts.c
70930@@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
70931
70932 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
70933 {
70934- int err = sys_mount(name, "/root", fs, flags, data);
70935+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
70936 if (err)
70937 return err;
70938
70939- sys_chdir("/root");
70940+ sys_chdir((__force const char __user *)"/root");
70941 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
70942 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
70943 current->fs->pwd.mnt->mnt_sb->s_type->name,
70944@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
70945 va_start(args, fmt);
70946 vsprintf(buf, fmt, args);
70947 va_end(args);
70948- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
70949+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
70950 if (fd >= 0) {
70951 sys_ioctl(fd, FDEJECT, 0);
70952 sys_close(fd);
70953 }
70954 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
70955- fd = sys_open("/dev/console", O_RDWR, 0);
70956+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
70957 if (fd >= 0) {
70958 sys_ioctl(fd, TCGETS, (long)&termios);
70959 termios.c_lflag &= ~ICANON;
70960 sys_ioctl(fd, TCSETSF, (long)&termios);
70961- sys_read(fd, &c, 1);
70962+ sys_read(fd, (char __user *)&c, 1);
70963 termios.c_lflag |= ICANON;
70964 sys_ioctl(fd, TCSETSF, (long)&termios);
70965 sys_close(fd);
70966@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
70967 mount_root();
70968 out:
70969 devtmpfs_mount("dev");
70970- sys_mount(".", "/", NULL, MS_MOVE, NULL);
70971- sys_chroot(".");
70972+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
70973+ sys_chroot((__force char __user *)".");
70974 }
70975diff --git a/init/do_mounts.h b/init/do_mounts.h
70976index f5b978a..69dbfe8 100644
70977--- a/init/do_mounts.h
70978+++ b/init/do_mounts.h
70979@@ -15,15 +15,15 @@ extern int root_mountflags;
70980
70981 static inline int create_dev(char *name, dev_t dev)
70982 {
70983- sys_unlink(name);
70984- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
70985+ sys_unlink((char __force_user *)name);
70986+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
70987 }
70988
70989 #if BITS_PER_LONG == 32
70990 static inline u32 bstat(char *name)
70991 {
70992 struct stat64 stat;
70993- if (sys_stat64(name, &stat) != 0)
70994+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
70995 return 0;
70996 if (!S_ISBLK(stat.st_mode))
70997 return 0;
70998@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
70999 static inline u32 bstat(char *name)
71000 {
71001 struct stat stat;
71002- if (sys_newstat(name, &stat) != 0)
71003+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
71004 return 0;
71005 if (!S_ISBLK(stat.st_mode))
71006 return 0;
71007diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
71008index 614241b..4da046b 100644
71009--- a/init/do_mounts_initrd.c
71010+++ b/init/do_mounts_initrd.c
71011@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
71012 sys_close(old_fd);sys_close(root_fd);
71013 sys_close(0);sys_close(1);sys_close(2);
71014 sys_setsid();
71015- (void) sys_open("/dev/console",O_RDWR,0);
71016+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
71017 (void) sys_dup(0);
71018 (void) sys_dup(0);
71019 return kernel_execve(shell, argv, envp_init);
71020@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
71021 create_dev("/dev/root.old", Root_RAM0);
71022 /* mount initrd on rootfs' /root */
71023 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
71024- sys_mkdir("/old", 0700);
71025- root_fd = sys_open("/", 0, 0);
71026- old_fd = sys_open("/old", 0, 0);
71027+ sys_mkdir((const char __force_user *)"/old", 0700);
71028+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
71029+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
71030 /* move initrd over / and chdir/chroot in initrd root */
71031- sys_chdir("/root");
71032- sys_mount(".", "/", NULL, MS_MOVE, NULL);
71033- sys_chroot(".");
71034+ sys_chdir((const char __force_user *)"/root");
71035+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
71036+ sys_chroot((const char __force_user *)".");
71037
71038 /*
71039 * In case that a resume from disk is carried out by linuxrc or one of
71040@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
71041
71042 /* move initrd to rootfs' /old */
71043 sys_fchdir(old_fd);
71044- sys_mount("/", ".", NULL, MS_MOVE, NULL);
71045+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
71046 /* switch root and cwd back to / of rootfs */
71047 sys_fchdir(root_fd);
71048- sys_chroot(".");
71049+ sys_chroot((const char __force_user *)".");
71050 sys_close(old_fd);
71051 sys_close(root_fd);
71052
71053 if (new_decode_dev(real_root_dev) == Root_RAM0) {
71054- sys_chdir("/old");
71055+ sys_chdir((const char __force_user *)"/old");
71056 return;
71057 }
71058
71059@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
71060 mount_root();
71061
71062 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
71063- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
71064+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
71065 if (!error)
71066 printk("okay\n");
71067 else {
71068- int fd = sys_open("/dev/root.old", O_RDWR, 0);
71069+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
71070 if (error == -ENOENT)
71071 printk("/initrd does not exist. Ignored.\n");
71072 else
71073 printk("failed\n");
71074 printk(KERN_NOTICE "Unmounting old root\n");
71075- sys_umount("/old", MNT_DETACH);
71076+ sys_umount((char __force_user *)"/old", MNT_DETACH);
71077 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
71078 if (fd < 0) {
71079 error = fd;
71080@@ -119,11 +119,11 @@ int __init initrd_load(void)
71081 * mounted in the normal path.
71082 */
71083 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
71084- sys_unlink("/initrd.image");
71085+ sys_unlink((const char __force_user *)"/initrd.image");
71086 handle_initrd();
71087 return 1;
71088 }
71089 }
71090- sys_unlink("/initrd.image");
71091+ sys_unlink((const char __force_user *)"/initrd.image");
71092 return 0;
71093 }
71094diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
71095index 69aebbf..c0bf6a7 100644
71096--- a/init/do_mounts_md.c
71097+++ b/init/do_mounts_md.c
71098@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
71099 partitioned ? "_d" : "", minor,
71100 md_setup_args[ent].device_names);
71101
71102- fd = sys_open(name, 0, 0);
71103+ fd = sys_open((char __force_user *)name, 0, 0);
71104 if (fd < 0) {
71105 printk(KERN_ERR "md: open failed - cannot start "
71106 "array %s\n", name);
71107@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
71108 * array without it
71109 */
71110 sys_close(fd);
71111- fd = sys_open(name, 0, 0);
71112+ fd = sys_open((char __force_user *)name, 0, 0);
71113 sys_ioctl(fd, BLKRRPART, 0);
71114 }
71115 sys_close(fd);
71116@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
71117
71118 wait_for_device_probe();
71119
71120- fd = sys_open("/dev/md0", 0, 0);
71121+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
71122 if (fd >= 0) {
71123 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
71124 sys_close(fd);
71125diff --git a/init/initramfs.c b/init/initramfs.c
71126index 1fd59b8..a01b079 100644
71127--- a/init/initramfs.c
71128+++ b/init/initramfs.c
71129@@ -74,7 +74,7 @@ static void __init free_hash(void)
71130 }
71131 }
71132
71133-static long __init do_utime(char __user *filename, time_t mtime)
71134+static long __init do_utime(__force char __user *filename, time_t mtime)
71135 {
71136 struct timespec t[2];
71137
71138@@ -109,7 +109,7 @@ static void __init dir_utime(void)
71139 struct dir_entry *de, *tmp;
71140 list_for_each_entry_safe(de, tmp, &dir_list, list) {
71141 list_del(&de->list);
71142- do_utime(de->name, de->mtime);
71143+ do_utime((char __force_user *)de->name, de->mtime);
71144 kfree(de->name);
71145 kfree(de);
71146 }
71147@@ -271,7 +271,7 @@ static int __init maybe_link(void)
71148 if (nlink >= 2) {
71149 char *old = find_link(major, minor, ino, mode, collected);
71150 if (old)
71151- return (sys_link(old, collected) < 0) ? -1 : 1;
71152+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
71153 }
71154 return 0;
71155 }
71156@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
71157 {
71158 struct stat st;
71159
71160- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
71161+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
71162 if (S_ISDIR(st.st_mode))
71163- sys_rmdir(path);
71164+ sys_rmdir((char __force_user *)path);
71165 else
71166- sys_unlink(path);
71167+ sys_unlink((char __force_user *)path);
71168 }
71169 }
71170
71171@@ -305,7 +305,7 @@ static int __init do_name(void)
71172 int openflags = O_WRONLY|O_CREAT;
71173 if (ml != 1)
71174 openflags |= O_TRUNC;
71175- wfd = sys_open(collected, openflags, mode);
71176+ wfd = sys_open((char __force_user *)collected, openflags, mode);
71177
71178 if (wfd >= 0) {
71179 sys_fchown(wfd, uid, gid);
71180@@ -317,17 +317,17 @@ static int __init do_name(void)
71181 }
71182 }
71183 } else if (S_ISDIR(mode)) {
71184- sys_mkdir(collected, mode);
71185- sys_chown(collected, uid, gid);
71186- sys_chmod(collected, mode);
71187+ sys_mkdir((char __force_user *)collected, mode);
71188+ sys_chown((char __force_user *)collected, uid, gid);
71189+ sys_chmod((char __force_user *)collected, mode);
71190 dir_add(collected, mtime);
71191 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
71192 S_ISFIFO(mode) || S_ISSOCK(mode)) {
71193 if (maybe_link() == 0) {
71194- sys_mknod(collected, mode, rdev);
71195- sys_chown(collected, uid, gid);
71196- sys_chmod(collected, mode);
71197- do_utime(collected, mtime);
71198+ sys_mknod((char __force_user *)collected, mode, rdev);
71199+ sys_chown((char __force_user *)collected, uid, gid);
71200+ sys_chmod((char __force_user *)collected, mode);
71201+ do_utime((char __force_user *)collected, mtime);
71202 }
71203 }
71204 return 0;
71205@@ -336,15 +336,15 @@ static int __init do_name(void)
71206 static int __init do_copy(void)
71207 {
71208 if (count >= body_len) {
71209- sys_write(wfd, victim, body_len);
71210+ sys_write(wfd, (char __force_user *)victim, body_len);
71211 sys_close(wfd);
71212- do_utime(vcollected, mtime);
71213+ do_utime((char __force_user *)vcollected, mtime);
71214 kfree(vcollected);
71215 eat(body_len);
71216 state = SkipIt;
71217 return 0;
71218 } else {
71219- sys_write(wfd, victim, count);
71220+ sys_write(wfd, (char __force_user *)victim, count);
71221 body_len -= count;
71222 eat(count);
71223 return 1;
71224@@ -355,9 +355,9 @@ static int __init do_symlink(void)
71225 {
71226 collected[N_ALIGN(name_len) + body_len] = '\0';
71227 clean_path(collected, 0);
71228- sys_symlink(collected + N_ALIGN(name_len), collected);
71229- sys_lchown(collected, uid, gid);
71230- do_utime(collected, mtime);
71231+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
71232+ sys_lchown((char __force_user *)collected, uid, gid);
71233+ do_utime((char __force_user *)collected, mtime);
71234 state = SkipIt;
71235 next_state = Reset;
71236 return 0;
71237diff --git a/init/main.c b/init/main.c
71238index 1eb4bd5..fea5bbe 100644
71239--- a/init/main.c
71240+++ b/init/main.c
71241@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
71242 #ifdef CONFIG_TC
71243 extern void tc_init(void);
71244 #endif
71245+extern void grsecurity_init(void);
71246
71247 enum system_states system_state __read_mostly;
71248 EXPORT_SYMBOL(system_state);
71249@@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
71250
71251 __setup("reset_devices", set_reset_devices);
71252
71253+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
71254+extern char pax_enter_kernel_user[];
71255+extern char pax_exit_kernel_user[];
71256+extern pgdval_t clone_pgd_mask;
71257+#endif
71258+
71259+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
71260+static int __init setup_pax_nouderef(char *str)
71261+{
71262+#ifdef CONFIG_X86_32
71263+ unsigned int cpu;
71264+ struct desc_struct *gdt;
71265+
71266+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
71267+ gdt = get_cpu_gdt_table(cpu);
71268+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
71269+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
71270+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
71271+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
71272+ }
71273+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
71274+#else
71275+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
71276+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
71277+ clone_pgd_mask = ~(pgdval_t)0UL;
71278+#endif
71279+
71280+ return 0;
71281+}
71282+early_param("pax_nouderef", setup_pax_nouderef);
71283+#endif
71284+
71285+#ifdef CONFIG_PAX_SOFTMODE
71286+int pax_softmode;
71287+
71288+static int __init setup_pax_softmode(char *str)
71289+{
71290+ get_option(&str, &pax_softmode);
71291+ return 1;
71292+}
71293+__setup("pax_softmode=", setup_pax_softmode);
71294+#endif
71295+
71296 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
71297 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
71298 static const char *panic_later, *panic_param;
71299@@ -705,52 +749,53 @@ int initcall_debug;
71300 core_param(initcall_debug, initcall_debug, bool, 0644);
71301
71302 static char msgbuf[64];
71303-static struct boot_trace_call call;
71304-static struct boot_trace_ret ret;
71305+static struct boot_trace_call trace_call;
71306+static struct boot_trace_ret trace_ret;
71307
71308 int do_one_initcall(initcall_t fn)
71309 {
71310 int count = preempt_count();
71311 ktime_t calltime, delta, rettime;
71312+ const char *msg1 = "", *msg2 = "";
71313
71314 if (initcall_debug) {
71315- call.caller = task_pid_nr(current);
71316- printk("calling %pF @ %i\n", fn, call.caller);
71317+ trace_call.caller = task_pid_nr(current);
71318+ printk("calling %pF @ %i\n", fn, trace_call.caller);
71319 calltime = ktime_get();
71320- trace_boot_call(&call, fn);
71321+ trace_boot_call(&trace_call, fn);
71322 enable_boot_trace();
71323 }
71324
71325- ret.result = fn();
71326+ trace_ret.result = fn();
71327
71328 if (initcall_debug) {
71329 disable_boot_trace();
71330 rettime = ktime_get();
71331 delta = ktime_sub(rettime, calltime);
71332- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71333- trace_boot_ret(&ret, fn);
71334+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71335+ trace_boot_ret(&trace_ret, fn);
71336 printk("initcall %pF returned %d after %Ld usecs\n", fn,
71337- ret.result, ret.duration);
71338+ trace_ret.result, trace_ret.duration);
71339 }
71340
71341 msgbuf[0] = 0;
71342
71343- if (ret.result && ret.result != -ENODEV && initcall_debug)
71344- sprintf(msgbuf, "error code %d ", ret.result);
71345+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
71346+ sprintf(msgbuf, "error code %d ", trace_ret.result);
71347
71348 if (preempt_count() != count) {
71349- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
71350+ msg1 = " preemption imbalance";
71351 preempt_count() = count;
71352 }
71353 if (irqs_disabled()) {
71354- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
71355+ msg2 = " disabled interrupts";
71356 local_irq_enable();
71357 }
71358- if (msgbuf[0]) {
71359- printk("initcall %pF returned with %s\n", fn, msgbuf);
71360+ if (msgbuf[0] || *msg1 || *msg2) {
71361+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
71362 }
71363
71364- return ret.result;
71365+ return trace_ret.result;
71366 }
71367
71368
71369@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
71370 if (!ramdisk_execute_command)
71371 ramdisk_execute_command = "/init";
71372
71373- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
71374+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
71375 ramdisk_execute_command = NULL;
71376 prepare_namespace();
71377 }
71378
71379+ grsecurity_init();
71380+
71381 /*
71382 * Ok, we have completed the initial bootup, and
71383 * we're essentially up and running. Get rid of the
71384diff --git a/init/noinitramfs.c b/init/noinitramfs.c
71385index f4c1a3a..96c19bd 100644
71386--- a/init/noinitramfs.c
71387+++ b/init/noinitramfs.c
71388@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
71389 {
71390 int err;
71391
71392- err = sys_mkdir("/dev", 0755);
71393+ err = sys_mkdir((const char __user *)"/dev", 0755);
71394 if (err < 0)
71395 goto out;
71396
71397@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
71398 if (err < 0)
71399 goto out;
71400
71401- err = sys_mkdir("/root", 0700);
71402+ err = sys_mkdir((const char __user *)"/root", 0700);
71403 if (err < 0)
71404 goto out;
71405
71406diff --git a/ipc/mqueue.c b/ipc/mqueue.c
71407index d01bc14..8df81db 100644
71408--- a/ipc/mqueue.c
71409+++ b/ipc/mqueue.c
71410@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
71411 mq_bytes = (mq_msg_tblsz +
71412 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
71413
71414+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
71415 spin_lock(&mq_lock);
71416 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
71417 u->mq_bytes + mq_bytes >
71418diff --git a/ipc/msg.c b/ipc/msg.c
71419index 779f762..4af9e36 100644
71420--- a/ipc/msg.c
71421+++ b/ipc/msg.c
71422@@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
71423 return security_msg_queue_associate(msq, msgflg);
71424 }
71425
71426+static struct ipc_ops msg_ops = {
71427+ .getnew = newque,
71428+ .associate = msg_security,
71429+ .more_checks = NULL
71430+};
71431+
71432 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
71433 {
71434 struct ipc_namespace *ns;
71435- struct ipc_ops msg_ops;
71436 struct ipc_params msg_params;
71437
71438 ns = current->nsproxy->ipc_ns;
71439
71440- msg_ops.getnew = newque;
71441- msg_ops.associate = msg_security;
71442- msg_ops.more_checks = NULL;
71443-
71444 msg_params.key = key;
71445 msg_params.flg = msgflg;
71446
71447diff --git a/ipc/sem.c b/ipc/sem.c
71448index b781007..f738b04 100644
71449--- a/ipc/sem.c
71450+++ b/ipc/sem.c
71451@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
71452 return 0;
71453 }
71454
71455+static struct ipc_ops sem_ops = {
71456+ .getnew = newary,
71457+ .associate = sem_security,
71458+ .more_checks = sem_more_checks
71459+};
71460+
71461 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71462 {
71463 struct ipc_namespace *ns;
71464- struct ipc_ops sem_ops;
71465 struct ipc_params sem_params;
71466
71467 ns = current->nsproxy->ipc_ns;
71468@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71469 if (nsems < 0 || nsems > ns->sc_semmsl)
71470 return -EINVAL;
71471
71472- sem_ops.getnew = newary;
71473- sem_ops.associate = sem_security;
71474- sem_ops.more_checks = sem_more_checks;
71475-
71476 sem_params.key = key;
71477 sem_params.flg = semflg;
71478 sem_params.u.nsems = nsems;
71479@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
71480 ushort* sem_io = fast_sem_io;
71481 int nsems;
71482
71483+ pax_track_stack();
71484+
71485 sma = sem_lock_check(ns, semid);
71486 if (IS_ERR(sma))
71487 return PTR_ERR(sma);
71488@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
71489 unsigned long jiffies_left = 0;
71490 struct ipc_namespace *ns;
71491
71492+ pax_track_stack();
71493+
71494 ns = current->nsproxy->ipc_ns;
71495
71496 if (nsops < 1 || semid < 0)
71497diff --git a/ipc/shm.c b/ipc/shm.c
71498index d30732c..e4992cd 100644
71499--- a/ipc/shm.c
71500+++ b/ipc/shm.c
71501@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
71502 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
71503 #endif
71504
71505+#ifdef CONFIG_GRKERNSEC
71506+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71507+ const time_t shm_createtime, const uid_t cuid,
71508+ const int shmid);
71509+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71510+ const time_t shm_createtime);
71511+#endif
71512+
71513 void shm_init_ns(struct ipc_namespace *ns)
71514 {
71515 ns->shm_ctlmax = SHMMAX;
71516@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
71517 shp->shm_lprid = 0;
71518 shp->shm_atim = shp->shm_dtim = 0;
71519 shp->shm_ctim = get_seconds();
71520+#ifdef CONFIG_GRKERNSEC
71521+ {
71522+ struct timespec timeval;
71523+ do_posix_clock_monotonic_gettime(&timeval);
71524+
71525+ shp->shm_createtime = timeval.tv_sec;
71526+ }
71527+#endif
71528 shp->shm_segsz = size;
71529 shp->shm_nattch = 0;
71530 shp->shm_file = file;
71531@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
71532 return 0;
71533 }
71534
71535+static struct ipc_ops shm_ops = {
71536+ .getnew = newseg,
71537+ .associate = shm_security,
71538+ .more_checks = shm_more_checks
71539+};
71540+
71541 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
71542 {
71543 struct ipc_namespace *ns;
71544- struct ipc_ops shm_ops;
71545 struct ipc_params shm_params;
71546
71547 ns = current->nsproxy->ipc_ns;
71548
71549- shm_ops.getnew = newseg;
71550- shm_ops.associate = shm_security;
71551- shm_ops.more_checks = shm_more_checks;
71552-
71553 shm_params.key = key;
71554 shm_params.flg = shmflg;
71555 shm_params.u.size = size;
71556@@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71557 f_mode = FMODE_READ | FMODE_WRITE;
71558 }
71559 if (shmflg & SHM_EXEC) {
71560+
71561+#ifdef CONFIG_PAX_MPROTECT
71562+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
71563+ goto out;
71564+#endif
71565+
71566 prot |= PROT_EXEC;
71567 acc_mode |= S_IXUGO;
71568 }
71569@@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71570 if (err)
71571 goto out_unlock;
71572
71573+#ifdef CONFIG_GRKERNSEC
71574+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
71575+ shp->shm_perm.cuid, shmid) ||
71576+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
71577+ err = -EACCES;
71578+ goto out_unlock;
71579+ }
71580+#endif
71581+
71582 path.dentry = dget(shp->shm_file->f_path.dentry);
71583 path.mnt = shp->shm_file->f_path.mnt;
71584 shp->shm_nattch++;
71585+#ifdef CONFIG_GRKERNSEC
71586+ shp->shm_lapid = current->pid;
71587+#endif
71588 size = i_size_read(path.dentry->d_inode);
71589 shm_unlock(shp);
71590
71591diff --git a/kernel/acct.c b/kernel/acct.c
71592index a6605ca..ca91111 100644
71593--- a/kernel/acct.c
71594+++ b/kernel/acct.c
71595@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
71596 */
71597 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
71598 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
71599- file->f_op->write(file, (char *)&ac,
71600+ file->f_op->write(file, (char __force_user *)&ac,
71601 sizeof(acct_t), &file->f_pos);
71602 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
71603 set_fs(fs);
71604diff --git a/kernel/audit.c b/kernel/audit.c
71605index 5feed23..48415fd 100644
71606--- a/kernel/audit.c
71607+++ b/kernel/audit.c
71608@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
71609 3) suppressed due to audit_rate_limit
71610 4) suppressed due to audit_backlog_limit
71611 */
71612-static atomic_t audit_lost = ATOMIC_INIT(0);
71613+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
71614
71615 /* The netlink socket. */
71616 static struct sock *audit_sock;
71617@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
71618 unsigned long now;
71619 int print;
71620
71621- atomic_inc(&audit_lost);
71622+ atomic_inc_unchecked(&audit_lost);
71623
71624 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
71625
71626@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
71627 printk(KERN_WARNING
71628 "audit: audit_lost=%d audit_rate_limit=%d "
71629 "audit_backlog_limit=%d\n",
71630- atomic_read(&audit_lost),
71631+ atomic_read_unchecked(&audit_lost),
71632 audit_rate_limit,
71633 audit_backlog_limit);
71634 audit_panic(message);
71635@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71636 status_set.pid = audit_pid;
71637 status_set.rate_limit = audit_rate_limit;
71638 status_set.backlog_limit = audit_backlog_limit;
71639- status_set.lost = atomic_read(&audit_lost);
71640+ status_set.lost = atomic_read_unchecked(&audit_lost);
71641 status_set.backlog = skb_queue_len(&audit_skb_queue);
71642 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
71643 &status_set, sizeof(status_set));
71644@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71645 spin_unlock_irq(&tsk->sighand->siglock);
71646 }
71647 read_unlock(&tasklist_lock);
71648- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
71649- &s, sizeof(s));
71650+
71651+ if (!err)
71652+ audit_send_reply(NETLINK_CB(skb).pid, seq,
71653+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
71654 break;
71655 }
71656 case AUDIT_TTY_SET: {
71657@@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
71658 avail = audit_expand(ab,
71659 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
71660 if (!avail)
71661- goto out;
71662+ goto out_va_end;
71663 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
71664 }
71665- va_end(args2);
71666 if (len > 0)
71667 skb_put(skb, len);
71668+out_va_end:
71669+ va_end(args2);
71670 out:
71671 return;
71672 }
71673diff --git a/kernel/auditsc.c b/kernel/auditsc.c
71674index 267e484..ac41bc3 100644
71675--- a/kernel/auditsc.c
71676+++ b/kernel/auditsc.c
71677@@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
71678 struct audit_buffer **ab,
71679 struct audit_aux_data_execve *axi)
71680 {
71681- int i;
71682- size_t len, len_sent = 0;
71683+ int i, len;
71684+ size_t len_sent = 0;
71685 const char __user *p;
71686 char *buf;
71687
71688@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
71689 }
71690
71691 /* global counter which is incremented every time something logs in */
71692-static atomic_t session_id = ATOMIC_INIT(0);
71693+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
71694
71695 /**
71696 * audit_set_loginuid - set a task's audit_context loginuid
71697@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
71698 */
71699 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
71700 {
71701- unsigned int sessionid = atomic_inc_return(&session_id);
71702+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
71703 struct audit_context *context = task->audit_context;
71704
71705 if (context && context->in_syscall) {
71706diff --git a/kernel/capability.c b/kernel/capability.c
71707index 8a944f5..db5001e 100644
71708--- a/kernel/capability.c
71709+++ b/kernel/capability.c
71710@@ -305,10 +305,26 @@ int capable(int cap)
71711 BUG();
71712 }
71713
71714- if (security_capable(cap) == 0) {
71715+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
71716 current->flags |= PF_SUPERPRIV;
71717 return 1;
71718 }
71719 return 0;
71720 }
71721+
71722+int capable_nolog(int cap)
71723+{
71724+ if (unlikely(!cap_valid(cap))) {
71725+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
71726+ BUG();
71727+ }
71728+
71729+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
71730+ current->flags |= PF_SUPERPRIV;
71731+ return 1;
71732+ }
71733+ return 0;
71734+}
71735+
71736 EXPORT_SYMBOL(capable);
71737+EXPORT_SYMBOL(capable_nolog);
71738diff --git a/kernel/cgroup.c b/kernel/cgroup.c
71739index 1fbcc74..7000012 100644
71740--- a/kernel/cgroup.c
71741+++ b/kernel/cgroup.c
71742@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
71743 struct hlist_head *hhead;
71744 struct cg_cgroup_link *link;
71745
71746+ pax_track_stack();
71747+
71748 /* First see if we already have a cgroup group that matches
71749 * the desired set */
71750 read_lock(&css_set_lock);
71751diff --git a/kernel/compat.c b/kernel/compat.c
71752index 8bc5578..186e44a 100644
71753--- a/kernel/compat.c
71754+++ b/kernel/compat.c
71755@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
71756 mm_segment_t oldfs;
71757 long ret;
71758
71759- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
71760+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
71761 oldfs = get_fs();
71762 set_fs(KERNEL_DS);
71763 ret = hrtimer_nanosleep_restart(restart);
71764@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
71765 oldfs = get_fs();
71766 set_fs(KERNEL_DS);
71767 ret = hrtimer_nanosleep(&tu,
71768- rmtp ? (struct timespec __user *)&rmt : NULL,
71769+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
71770 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
71771 set_fs(oldfs);
71772
71773@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
71774 mm_segment_t old_fs = get_fs();
71775
71776 set_fs(KERNEL_DS);
71777- ret = sys_sigpending((old_sigset_t __user *) &s);
71778+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
71779 set_fs(old_fs);
71780 if (ret == 0)
71781 ret = put_user(s, set);
71782@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
71783 old_fs = get_fs();
71784 set_fs(KERNEL_DS);
71785 ret = sys_sigprocmask(how,
71786- set ? (old_sigset_t __user *) &s : NULL,
71787- oset ? (old_sigset_t __user *) &s : NULL);
71788+ set ? (old_sigset_t __force_user *) &s : NULL,
71789+ oset ? (old_sigset_t __force_user *) &s : NULL);
71790 set_fs(old_fs);
71791 if (ret == 0)
71792 if (oset)
71793@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
71794 mm_segment_t old_fs = get_fs();
71795
71796 set_fs(KERNEL_DS);
71797- ret = sys_old_getrlimit(resource, &r);
71798+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
71799 set_fs(old_fs);
71800
71801 if (!ret) {
71802@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
71803 mm_segment_t old_fs = get_fs();
71804
71805 set_fs(KERNEL_DS);
71806- ret = sys_getrusage(who, (struct rusage __user *) &r);
71807+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
71808 set_fs(old_fs);
71809
71810 if (ret)
71811@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
71812 set_fs (KERNEL_DS);
71813 ret = sys_wait4(pid,
71814 (stat_addr ?
71815- (unsigned int __user *) &status : NULL),
71816- options, (struct rusage __user *) &r);
71817+ (unsigned int __force_user *) &status : NULL),
71818+ options, (struct rusage __force_user *) &r);
71819 set_fs (old_fs);
71820
71821 if (ret > 0) {
71822@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
71823 memset(&info, 0, sizeof(info));
71824
71825 set_fs(KERNEL_DS);
71826- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
71827- uru ? (struct rusage __user *)&ru : NULL);
71828+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
71829+ uru ? (struct rusage __force_user *)&ru : NULL);
71830 set_fs(old_fs);
71831
71832 if ((ret < 0) || (info.si_signo == 0))
71833@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
71834 oldfs = get_fs();
71835 set_fs(KERNEL_DS);
71836 err = sys_timer_settime(timer_id, flags,
71837- (struct itimerspec __user *) &newts,
71838- (struct itimerspec __user *) &oldts);
71839+ (struct itimerspec __force_user *) &newts,
71840+ (struct itimerspec __force_user *) &oldts);
71841 set_fs(oldfs);
71842 if (!err && old && put_compat_itimerspec(old, &oldts))
71843 return -EFAULT;
71844@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
71845 oldfs = get_fs();
71846 set_fs(KERNEL_DS);
71847 err = sys_timer_gettime(timer_id,
71848- (struct itimerspec __user *) &ts);
71849+ (struct itimerspec __force_user *) &ts);
71850 set_fs(oldfs);
71851 if (!err && put_compat_itimerspec(setting, &ts))
71852 return -EFAULT;
71853@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
71854 oldfs = get_fs();
71855 set_fs(KERNEL_DS);
71856 err = sys_clock_settime(which_clock,
71857- (struct timespec __user *) &ts);
71858+ (struct timespec __force_user *) &ts);
71859 set_fs(oldfs);
71860 return err;
71861 }
71862@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
71863 oldfs = get_fs();
71864 set_fs(KERNEL_DS);
71865 err = sys_clock_gettime(which_clock,
71866- (struct timespec __user *) &ts);
71867+ (struct timespec __force_user *) &ts);
71868 set_fs(oldfs);
71869 if (!err && put_compat_timespec(&ts, tp))
71870 return -EFAULT;
71871@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
71872 oldfs = get_fs();
71873 set_fs(KERNEL_DS);
71874 err = sys_clock_getres(which_clock,
71875- (struct timespec __user *) &ts);
71876+ (struct timespec __force_user *) &ts);
71877 set_fs(oldfs);
71878 if (!err && tp && put_compat_timespec(&ts, tp))
71879 return -EFAULT;
71880@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
71881 long err;
71882 mm_segment_t oldfs;
71883 struct timespec tu;
71884- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
71885+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
71886
71887- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
71888+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
71889 oldfs = get_fs();
71890 set_fs(KERNEL_DS);
71891 err = clock_nanosleep_restart(restart);
71892@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
71893 oldfs = get_fs();
71894 set_fs(KERNEL_DS);
71895 err = sys_clock_nanosleep(which_clock, flags,
71896- (struct timespec __user *) &in,
71897- (struct timespec __user *) &out);
71898+ (struct timespec __force_user *) &in,
71899+ (struct timespec __force_user *) &out);
71900 set_fs(oldfs);
71901
71902 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
71903diff --git a/kernel/configs.c b/kernel/configs.c
71904index abaee68..047facd 100644
71905--- a/kernel/configs.c
71906+++ b/kernel/configs.c
71907@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
71908 struct proc_dir_entry *entry;
71909
71910 /* create the current config file */
71911+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
71912+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
71913+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
71914+ &ikconfig_file_ops);
71915+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71916+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
71917+ &ikconfig_file_ops);
71918+#endif
71919+#else
71920 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
71921 &ikconfig_file_ops);
71922+#endif
71923+
71924 if (!entry)
71925 return -ENOMEM;
71926
71927diff --git a/kernel/cpu.c b/kernel/cpu.c
71928index 3f2f04f..4e53ded 100644
71929--- a/kernel/cpu.c
71930+++ b/kernel/cpu.c
71931@@ -20,7 +20,7 @@
71932 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
71933 static DEFINE_MUTEX(cpu_add_remove_lock);
71934
71935-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
71936+static RAW_NOTIFIER_HEAD(cpu_chain);
71937
71938 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
71939 * Should always be manipulated under cpu_add_remove_lock
71940diff --git a/kernel/cred.c b/kernel/cred.c
71941index 0b5b5fc..f7fe51a 100644
71942--- a/kernel/cred.c
71943+++ b/kernel/cred.c
71944@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
71945 */
71946 void __put_cred(struct cred *cred)
71947 {
71948+ pax_track_stack();
71949+
71950 kdebug("__put_cred(%p{%d,%d})", cred,
71951 atomic_read(&cred->usage),
71952 read_cred_subscribers(cred));
71953@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
71954 {
71955 struct cred *cred;
71956
71957+ pax_track_stack();
71958+
71959 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
71960 atomic_read(&tsk->cred->usage),
71961 read_cred_subscribers(tsk->cred));
71962@@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
71963 validate_creds(cred);
71964 put_cred(cred);
71965 }
71966+
71967+#ifdef CONFIG_GRKERNSEC_SETXID
71968+ cred = (struct cred *) tsk->delayed_cred;
71969+ if (cred) {
71970+ tsk->delayed_cred = NULL;
71971+ validate_creds(cred);
71972+ put_cred(cred);
71973+ }
71974+#endif
71975 }
71976
71977 /**
71978@@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
71979 {
71980 const struct cred *cred;
71981
71982+ pax_track_stack();
71983+
71984 rcu_read_lock();
71985
71986 do {
71987@@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
71988 {
71989 struct cred *new;
71990
71991+ pax_track_stack();
71992+
71993 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
71994 if (!new)
71995 return NULL;
71996@@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
71997 const struct cred *old;
71998 struct cred *new;
71999
72000+ pax_track_stack();
72001+
72002 validate_process_creds();
72003
72004 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
72005@@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
72006 struct thread_group_cred *tgcred = NULL;
72007 struct cred *new;
72008
72009+ pax_track_stack();
72010+
72011 #ifdef CONFIG_KEYS
72012 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
72013 if (!tgcred)
72014@@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
72015 struct cred *new;
72016 int ret;
72017
72018+ pax_track_stack();
72019+
72020 mutex_init(&p->cred_guard_mutex);
72021
72022 if (
72023@@ -523,11 +546,13 @@ error_put:
72024 * Always returns 0 thus allowing this function to be tail-called at the end
72025 * of, say, sys_setgid().
72026 */
72027-int commit_creds(struct cred *new)
72028+static int __commit_creds(struct cred *new)
72029 {
72030 struct task_struct *task = current;
72031 const struct cred *old = task->real_cred;
72032
72033+ pax_track_stack();
72034+
72035 kdebug("commit_creds(%p{%d,%d})", new,
72036 atomic_read(&new->usage),
72037 read_cred_subscribers(new));
72038@@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
72039
72040 get_cred(new); /* we will require a ref for the subj creds too */
72041
72042+ gr_set_role_label(task, new->uid, new->gid);
72043+
72044 /* dumpability changes */
72045 if (old->euid != new->euid ||
72046 old->egid != new->egid ||
72047@@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
72048 key_fsgid_changed(task);
72049
72050 /* do it
72051- * - What if a process setreuid()'s and this brings the
72052- * new uid over his NPROC rlimit? We can check this now
72053- * cheaply with the new uid cache, so if it matters
72054- * we should be checking for it. -DaveM
72055+ * RLIMIT_NPROC limits on user->processes have already been checked
72056+ * in set_user().
72057 */
72058 alter_cred_subscribers(new, 2);
72059 if (new->user != old->user)
72060@@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
72061 put_cred(old);
72062 return 0;
72063 }
72064+
72065+#ifdef CONFIG_GRKERNSEC_SETXID
72066+extern int set_user(struct cred *new);
72067+
72068+void gr_delayed_cred_worker(void)
72069+{
72070+ const struct cred *new = current->delayed_cred;
72071+ struct cred *ncred;
72072+
72073+ current->delayed_cred = NULL;
72074+
72075+ if (current_uid() && new != NULL) {
72076+ // from doing get_cred on it when queueing this
72077+ put_cred(new);
72078+ return;
72079+ } else if (new == NULL)
72080+ return;
72081+
72082+ ncred = prepare_creds();
72083+ if (!ncred)
72084+ goto die;
72085+ // uids
72086+ ncred->uid = new->uid;
72087+ ncred->euid = new->euid;
72088+ ncred->suid = new->suid;
72089+ ncred->fsuid = new->fsuid;
72090+ // gids
72091+ ncred->gid = new->gid;
72092+ ncred->egid = new->egid;
72093+ ncred->sgid = new->sgid;
72094+ ncred->fsgid = new->fsgid;
72095+ // groups
72096+ if (set_groups(ncred, new->group_info) < 0) {
72097+ abort_creds(ncred);
72098+ goto die;
72099+ }
72100+ // caps
72101+ ncred->securebits = new->securebits;
72102+ ncred->cap_inheritable = new->cap_inheritable;
72103+ ncred->cap_permitted = new->cap_permitted;
72104+ ncred->cap_effective = new->cap_effective;
72105+ ncred->cap_bset = new->cap_bset;
72106+
72107+ if (set_user(ncred)) {
72108+ abort_creds(ncred);
72109+ goto die;
72110+ }
72111+
72112+ // from doing get_cred on it when queueing this
72113+ put_cred(new);
72114+
72115+ __commit_creds(ncred);
72116+ return;
72117+die:
72118+ // from doing get_cred on it when queueing this
72119+ put_cred(new);
72120+ do_group_exit(SIGKILL);
72121+}
72122+#endif
72123+
72124+int commit_creds(struct cred *new)
72125+{
72126+#ifdef CONFIG_GRKERNSEC_SETXID
72127+ struct task_struct *t;
72128+
72129+ /* we won't get called with tasklist_lock held for writing
72130+ and interrupts disabled as the cred struct in that case is
72131+ init_cred
72132+ */
72133+ if (grsec_enable_setxid && !current_is_single_threaded() &&
72134+ !current_uid() && new->uid) {
72135+ rcu_read_lock();
72136+ read_lock(&tasklist_lock);
72137+ for (t = next_thread(current); t != current;
72138+ t = next_thread(t)) {
72139+ if (t->delayed_cred == NULL) {
72140+ t->delayed_cred = get_cred(new);
72141+ set_tsk_need_resched(t);
72142+ }
72143+ }
72144+ read_unlock(&tasklist_lock);
72145+ rcu_read_unlock();
72146+ }
72147+#endif
72148+ return __commit_creds(new);
72149+}
72150+
72151 EXPORT_SYMBOL(commit_creds);
72152
72153+
72154 /**
72155 * abort_creds - Discard a set of credentials and unlock the current task
72156 * @new: The credentials that were going to be applied
72157@@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
72158 */
72159 void abort_creds(struct cred *new)
72160 {
72161+ pax_track_stack();
72162+
72163 kdebug("abort_creds(%p{%d,%d})", new,
72164 atomic_read(&new->usage),
72165 read_cred_subscribers(new));
72166@@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
72167 {
72168 const struct cred *old = current->cred;
72169
72170+ pax_track_stack();
72171+
72172 kdebug("override_creds(%p{%d,%d})", new,
72173 atomic_read(&new->usage),
72174 read_cred_subscribers(new));
72175@@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
72176 {
72177 const struct cred *override = current->cred;
72178
72179+ pax_track_stack();
72180+
72181 kdebug("revert_creds(%p{%d,%d})", old,
72182 atomic_read(&old->usage),
72183 read_cred_subscribers(old));
72184@@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
72185 const struct cred *old;
72186 struct cred *new;
72187
72188+ pax_track_stack();
72189+
72190 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
72191 if (!new)
72192 return NULL;
72193@@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
72194 */
72195 int set_security_override(struct cred *new, u32 secid)
72196 {
72197+ pax_track_stack();
72198+
72199 return security_kernel_act_as(new, secid);
72200 }
72201 EXPORT_SYMBOL(set_security_override);
72202@@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
72203 u32 secid;
72204 int ret;
72205
72206+ pax_track_stack();
72207+
72208 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
72209 if (ret < 0)
72210 return ret;
72211diff --git a/kernel/exit.c b/kernel/exit.c
72212index 0f8fae3..9344a56 100644
72213--- a/kernel/exit.c
72214+++ b/kernel/exit.c
72215@@ -55,6 +55,10 @@
72216 #include <asm/pgtable.h>
72217 #include <asm/mmu_context.h>
72218
72219+#ifdef CONFIG_GRKERNSEC
72220+extern rwlock_t grsec_exec_file_lock;
72221+#endif
72222+
72223 static void exit_mm(struct task_struct * tsk);
72224
72225 static void __unhash_process(struct task_struct *p)
72226@@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
72227 struct task_struct *leader;
72228 int zap_leader;
72229 repeat:
72230+#ifdef CONFIG_NET
72231+ gr_del_task_from_ip_table(p);
72232+#endif
72233+
72234 tracehook_prepare_release_task(p);
72235 /* don't need to get the RCU readlock here - the process is dead and
72236 * can't be modifying its own credentials */
72237@@ -397,7 +405,7 @@ int allow_signal(int sig)
72238 * know it'll be handled, so that they don't get converted to
72239 * SIGKILL or just silently dropped.
72240 */
72241- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
72242+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
72243 recalc_sigpending();
72244 spin_unlock_irq(&current->sighand->siglock);
72245 return 0;
72246@@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
72247 vsnprintf(current->comm, sizeof(current->comm), name, args);
72248 va_end(args);
72249
72250+#ifdef CONFIG_GRKERNSEC
72251+ write_lock(&grsec_exec_file_lock);
72252+ if (current->exec_file) {
72253+ fput(current->exec_file);
72254+ current->exec_file = NULL;
72255+ }
72256+ write_unlock(&grsec_exec_file_lock);
72257+#endif
72258+
72259+ gr_set_kernel_label(current);
72260+
72261 /*
72262 * If we were started as result of loading a module, close all of the
72263 * user space pages. We don't need them, and if we didn't close them
72264@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
72265 struct task_struct *tsk = current;
72266 int group_dead;
72267
72268- profile_task_exit(tsk);
72269-
72270- WARN_ON(atomic_read(&tsk->fs_excl));
72271-
72272+ /*
72273+ * Check this first since set_fs() below depends on
72274+ * current_thread_info(), which we better not access when we're in
72275+ * interrupt context. Other than that, we want to do the set_fs()
72276+ * as early as possible.
72277+ */
72278 if (unlikely(in_interrupt()))
72279 panic("Aiee, killing interrupt handler!");
72280- if (unlikely(!tsk->pid))
72281- panic("Attempted to kill the idle task!");
72282
72283 /*
72284- * If do_exit is called because this processes oopsed, it's possible
72285+ * If do_exit is called because this processes Oops'ed, it's possible
72286 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
72287 * continuing. Amongst other possible reasons, this is to prevent
72288 * mm_release()->clear_child_tid() from writing to a user-controlled
72289@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
72290 */
72291 set_fs(USER_DS);
72292
72293+ profile_task_exit(tsk);
72294+
72295+ WARN_ON(atomic_read(&tsk->fs_excl));
72296+
72297+ if (unlikely(!tsk->pid))
72298+ panic("Attempted to kill the idle task!");
72299+
72300 tracehook_report_exit(&code);
72301
72302 validate_creds_for_do_exit(tsk);
72303@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
72304 tsk->exit_code = code;
72305 taskstats_exit(tsk, group_dead);
72306
72307+ gr_acl_handle_psacct(tsk, code);
72308+ gr_acl_handle_exit();
72309+
72310 exit_mm(tsk);
72311
72312 if (group_dead)
72313@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
72314
72315 if (unlikely(wo->wo_flags & WNOWAIT)) {
72316 int exit_code = p->exit_code;
72317- int why, status;
72318+ int why;
72319
72320 get_task_struct(p);
72321 read_unlock(&tasklist_lock);
72322diff --git a/kernel/fork.c b/kernel/fork.c
72323index 4bde56f..29a9bab 100644
72324--- a/kernel/fork.c
72325+++ b/kernel/fork.c
72326@@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
72327 *stackend = STACK_END_MAGIC; /* for overflow detection */
72328
72329 #ifdef CONFIG_CC_STACKPROTECTOR
72330- tsk->stack_canary = get_random_int();
72331+ tsk->stack_canary = pax_get_random_long();
72332 #endif
72333
72334 /* One for us, one for whoever does the "release_task()" (usually parent) */
72335@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72336 mm->locked_vm = 0;
72337 mm->mmap = NULL;
72338 mm->mmap_cache = NULL;
72339- mm->free_area_cache = oldmm->mmap_base;
72340- mm->cached_hole_size = ~0UL;
72341+ mm->free_area_cache = oldmm->free_area_cache;
72342+ mm->cached_hole_size = oldmm->cached_hole_size;
72343 mm->map_count = 0;
72344 cpumask_clear(mm_cpumask(mm));
72345 mm->mm_rb = RB_ROOT;
72346@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72347 tmp->vm_flags &= ~VM_LOCKED;
72348 tmp->vm_mm = mm;
72349 tmp->vm_next = tmp->vm_prev = NULL;
72350+ tmp->vm_mirror = NULL;
72351 anon_vma_link(tmp);
72352 file = tmp->vm_file;
72353 if (file) {
72354@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72355 if (retval)
72356 goto out;
72357 }
72358+
72359+#ifdef CONFIG_PAX_SEGMEXEC
72360+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
72361+ struct vm_area_struct *mpnt_m;
72362+
72363+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
72364+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
72365+
72366+ if (!mpnt->vm_mirror)
72367+ continue;
72368+
72369+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
72370+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
72371+ mpnt->vm_mirror = mpnt_m;
72372+ } else {
72373+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
72374+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
72375+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
72376+ mpnt->vm_mirror->vm_mirror = mpnt;
72377+ }
72378+ }
72379+ BUG_ON(mpnt_m);
72380+ }
72381+#endif
72382+
72383 /* a new mm has just been created */
72384 arch_dup_mmap(oldmm, mm);
72385 retval = 0;
72386@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
72387 write_unlock(&fs->lock);
72388 return -EAGAIN;
72389 }
72390- fs->users++;
72391+ atomic_inc(&fs->users);
72392 write_unlock(&fs->lock);
72393 return 0;
72394 }
72395 tsk->fs = copy_fs_struct(fs);
72396 if (!tsk->fs)
72397 return -ENOMEM;
72398+ gr_set_chroot_entries(tsk, &tsk->fs->root);
72399 return 0;
72400 }
72401
72402@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72403 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
72404 #endif
72405 retval = -EAGAIN;
72406+
72407+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
72408+
72409 if (atomic_read(&p->real_cred->user->processes) >=
72410 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
72411- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
72412- p->real_cred->user != INIT_USER)
72413+ if (p->real_cred->user != INIT_USER &&
72414+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
72415 goto bad_fork_free;
72416 }
72417+ current->flags &= ~PF_NPROC_EXCEEDED;
72418
72419 retval = copy_creds(p, clone_flags);
72420 if (retval < 0)
72421@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72422 goto bad_fork_free_pid;
72423 }
72424
72425+ gr_copy_label(p);
72426+
72427 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
72428 /*
72429 * Clear TID on mm_release()?
72430@@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
72431 bad_fork_free:
72432 free_task(p);
72433 fork_out:
72434+ gr_log_forkfail(retval);
72435+
72436 return ERR_PTR(retval);
72437 }
72438
72439@@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
72440 if (clone_flags & CLONE_PARENT_SETTID)
72441 put_user(nr, parent_tidptr);
72442
72443+ gr_handle_brute_check();
72444+
72445 if (clone_flags & CLONE_VFORK) {
72446 p->vfork_done = &vfork;
72447 init_completion(&vfork);
72448@@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
72449 return 0;
72450
72451 /* don't need lock here; in the worst case we'll do useless copy */
72452- if (fs->users == 1)
72453+ if (atomic_read(&fs->users) == 1)
72454 return 0;
72455
72456 *new_fsp = copy_fs_struct(fs);
72457@@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
72458 fs = current->fs;
72459 write_lock(&fs->lock);
72460 current->fs = new_fs;
72461- if (--fs->users)
72462+ gr_set_chroot_entries(current, &current->fs->root);
72463+ if (atomic_dec_return(&fs->users))
72464 new_fs = NULL;
72465 else
72466 new_fs = fs;
72467diff --git a/kernel/futex.c b/kernel/futex.c
72468index fb98c9f..333faec 100644
72469--- a/kernel/futex.c
72470+++ b/kernel/futex.c
72471@@ -54,6 +54,7 @@
72472 #include <linux/mount.h>
72473 #include <linux/pagemap.h>
72474 #include <linux/syscalls.h>
72475+#include <linux/ptrace.h>
72476 #include <linux/signal.h>
72477 #include <linux/module.h>
72478 #include <linux/magic.h>
72479@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
72480 struct page *page;
72481 int err, ro = 0;
72482
72483+#ifdef CONFIG_PAX_SEGMEXEC
72484+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
72485+ return -EFAULT;
72486+#endif
72487+
72488 /*
72489 * The futex address must be "naturally" aligned.
72490 */
72491@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
72492 struct futex_q q;
72493 int ret;
72494
72495+ pax_track_stack();
72496+
72497 if (!bitset)
72498 return -EINVAL;
72499
72500@@ -1871,7 +1879,7 @@ retry:
72501
72502 restart = &current_thread_info()->restart_block;
72503 restart->fn = futex_wait_restart;
72504- restart->futex.uaddr = (u32 *)uaddr;
72505+ restart->futex.uaddr = uaddr;
72506 restart->futex.val = val;
72507 restart->futex.time = abs_time->tv64;
72508 restart->futex.bitset = bitset;
72509@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
72510 struct futex_q q;
72511 int res, ret;
72512
72513+ pax_track_stack();
72514+
72515 if (!bitset)
72516 return -EINVAL;
72517
72518@@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
72519 if (!p)
72520 goto err_unlock;
72521 ret = -EPERM;
72522+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72523+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
72524+ goto err_unlock;
72525+#endif
72526 pcred = __task_cred(p);
72527 if (cred->euid != pcred->euid &&
72528 cred->euid != pcred->uid &&
72529@@ -2489,7 +2503,7 @@ retry:
72530 */
72531 static inline int fetch_robust_entry(struct robust_list __user **entry,
72532 struct robust_list __user * __user *head,
72533- int *pi)
72534+ unsigned int *pi)
72535 {
72536 unsigned long uentry;
72537
72538@@ -2670,6 +2684,7 @@ static int __init futex_init(void)
72539 {
72540 u32 curval;
72541 int i;
72542+ mm_segment_t oldfs;
72543
72544 /*
72545 * This will fail and we want it. Some arch implementations do
72546@@ -2681,7 +2696,10 @@ static int __init futex_init(void)
72547 * implementation, the non functional ones will return
72548 * -ENOSYS.
72549 */
72550+ oldfs = get_fs();
72551+ set_fs(USER_DS);
72552 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
72553+ set_fs(oldfs);
72554 if (curval == -EFAULT)
72555 futex_cmpxchg_enabled = 1;
72556
72557diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
72558index 2357165..eb25501 100644
72559--- a/kernel/futex_compat.c
72560+++ b/kernel/futex_compat.c
72561@@ -10,6 +10,7 @@
72562 #include <linux/compat.h>
72563 #include <linux/nsproxy.h>
72564 #include <linux/futex.h>
72565+#include <linux/ptrace.h>
72566
72567 #include <asm/uaccess.h>
72568
72569@@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72570 {
72571 struct compat_robust_list_head __user *head;
72572 unsigned long ret;
72573- const struct cred *cred = current_cred(), *pcred;
72574+ const struct cred *cred = current_cred();
72575+ const struct cred *pcred;
72576
72577 if (!futex_cmpxchg_enabled)
72578 return -ENOSYS;
72579@@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72580 if (!p)
72581 goto err_unlock;
72582 ret = -EPERM;
72583+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72584+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
72585+ goto err_unlock;
72586+#endif
72587 pcred = __task_cred(p);
72588 if (cred->euid != pcred->euid &&
72589 cred->euid != pcred->uid &&
72590diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
72591index 9b22d03..6295b62 100644
72592--- a/kernel/gcov/base.c
72593+++ b/kernel/gcov/base.c
72594@@ -102,11 +102,6 @@ void gcov_enable_events(void)
72595 }
72596
72597 #ifdef CONFIG_MODULES
72598-static inline int within(void *addr, void *start, unsigned long size)
72599-{
72600- return ((addr >= start) && (addr < start + size));
72601-}
72602-
72603 /* Update list and generate events when modules are unloaded. */
72604 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72605 void *data)
72606@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72607 prev = NULL;
72608 /* Remove entries located in module from linked list. */
72609 for (info = gcov_info_head; info; info = info->next) {
72610- if (within(info, mod->module_core, mod->core_size)) {
72611+ if (within_module_core_rw((unsigned long)info, mod)) {
72612 if (prev)
72613 prev->next = info->next;
72614 else
72615diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
72616index a6e9d00..a0da4f9 100644
72617--- a/kernel/hrtimer.c
72618+++ b/kernel/hrtimer.c
72619@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
72620 local_irq_restore(flags);
72621 }
72622
72623-static void run_hrtimer_softirq(struct softirq_action *h)
72624+static void run_hrtimer_softirq(void)
72625 {
72626 hrtimer_peek_ahead_timers();
72627 }
72628diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
72629index 8b6b8b6..6bc87df 100644
72630--- a/kernel/kallsyms.c
72631+++ b/kernel/kallsyms.c
72632@@ -11,6 +11,9 @@
72633 * Changed the compression method from stem compression to "table lookup"
72634 * compression (see scripts/kallsyms.c for a more complete description)
72635 */
72636+#ifdef CONFIG_GRKERNSEC_HIDESYM
72637+#define __INCLUDED_BY_HIDESYM 1
72638+#endif
72639 #include <linux/kallsyms.h>
72640 #include <linux/module.h>
72641 #include <linux/init.h>
72642@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
72643
72644 static inline int is_kernel_inittext(unsigned long addr)
72645 {
72646+ if (system_state != SYSTEM_BOOTING)
72647+ return 0;
72648+
72649 if (addr >= (unsigned long)_sinittext
72650 && addr <= (unsigned long)_einittext)
72651 return 1;
72652 return 0;
72653 }
72654
72655+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72656+#ifdef CONFIG_MODULES
72657+static inline int is_module_text(unsigned long addr)
72658+{
72659+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
72660+ return 1;
72661+
72662+ addr = ktla_ktva(addr);
72663+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
72664+}
72665+#else
72666+static inline int is_module_text(unsigned long addr)
72667+{
72668+ return 0;
72669+}
72670+#endif
72671+#endif
72672+
72673 static inline int is_kernel_text(unsigned long addr)
72674 {
72675 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
72676@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
72677
72678 static inline int is_kernel(unsigned long addr)
72679 {
72680+
72681+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72682+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
72683+ return 1;
72684+
72685+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
72686+#else
72687 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
72688+#endif
72689+
72690 return 1;
72691 return in_gate_area_no_task(addr);
72692 }
72693
72694 static int is_ksym_addr(unsigned long addr)
72695 {
72696+
72697+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72698+ if (is_module_text(addr))
72699+ return 0;
72700+#endif
72701+
72702 if (all_var)
72703 return is_kernel(addr);
72704
72705@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
72706
72707 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
72708 {
72709- iter->name[0] = '\0';
72710 iter->nameoff = get_symbol_offset(new_pos);
72711 iter->pos = new_pos;
72712 }
72713@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
72714 {
72715 struct kallsym_iter *iter = m->private;
72716
72717+#ifdef CONFIG_GRKERNSEC_HIDESYM
72718+ if (current_uid())
72719+ return 0;
72720+#endif
72721+
72722 /* Some debugging symbols have no name. Ignore them. */
72723 if (!iter->name[0])
72724 return 0;
72725@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
72726 struct kallsym_iter *iter;
72727 int ret;
72728
72729- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
72730+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
72731 if (!iter)
72732 return -ENOMEM;
72733 reset_iter(iter, 0);
72734diff --git a/kernel/kexec.c b/kernel/kexec.c
72735index f336e21..9c1c20b 100644
72736--- a/kernel/kexec.c
72737+++ b/kernel/kexec.c
72738@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
72739 unsigned long flags)
72740 {
72741 struct compat_kexec_segment in;
72742- struct kexec_segment out, __user *ksegments;
72743+ struct kexec_segment out;
72744+ struct kexec_segment __user *ksegments;
72745 unsigned long i, result;
72746
72747 /* Don't allow clients that don't understand the native
72748diff --git a/kernel/kgdb.c b/kernel/kgdb.c
72749index 53dae4b..9ba3743 100644
72750--- a/kernel/kgdb.c
72751+++ b/kernel/kgdb.c
72752@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
72753 /* Guard for recursive entry */
72754 static int exception_level;
72755
72756-static struct kgdb_io *kgdb_io_ops;
72757+static const struct kgdb_io *kgdb_io_ops;
72758 static DEFINE_SPINLOCK(kgdb_registration_lock);
72759
72760 /* kgdb console driver is loaded */
72761@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
72762 */
72763 static atomic_t passive_cpu_wait[NR_CPUS];
72764 static atomic_t cpu_in_kgdb[NR_CPUS];
72765-atomic_t kgdb_setting_breakpoint;
72766+atomic_unchecked_t kgdb_setting_breakpoint;
72767
72768 struct task_struct *kgdb_usethread;
72769 struct task_struct *kgdb_contthread;
72770@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
72771 sizeof(unsigned long)];
72772
72773 /* to keep track of the CPU which is doing the single stepping*/
72774-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72775+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72776
72777 /*
72778 * If you are debugging a problem where roundup (the collection of
72779@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
72780 return 0;
72781 if (kgdb_connected)
72782 return 1;
72783- if (atomic_read(&kgdb_setting_breakpoint))
72784+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
72785 return 1;
72786 if (print_wait)
72787 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
72788@@ -1426,8 +1426,8 @@ acquirelock:
72789 * instance of the exception handler wanted to come into the
72790 * debugger on a different CPU via a single step
72791 */
72792- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
72793- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
72794+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
72795+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
72796
72797 atomic_set(&kgdb_active, -1);
72798 touch_softlockup_watchdog();
72799@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
72800 *
72801 * Register it with the KGDB core.
72802 */
72803-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
72804+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
72805 {
72806 int err;
72807
72808@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
72809 *
72810 * Unregister it with the KGDB core.
72811 */
72812-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
72813+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
72814 {
72815 BUG_ON(kgdb_connected);
72816
72817@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
72818 */
72819 void kgdb_breakpoint(void)
72820 {
72821- atomic_set(&kgdb_setting_breakpoint, 1);
72822+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
72823 wmb(); /* Sync point before breakpoint */
72824 arch_kgdb_breakpoint();
72825 wmb(); /* Sync point after breakpoint */
72826- atomic_set(&kgdb_setting_breakpoint, 0);
72827+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
72828 }
72829 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
72830
72831diff --git a/kernel/kmod.c b/kernel/kmod.c
72832index d206078..e27ba6a 100644
72833--- a/kernel/kmod.c
72834+++ b/kernel/kmod.c
72835@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
72836 * If module auto-loading support is disabled then this function
72837 * becomes a no-operation.
72838 */
72839-int __request_module(bool wait, const char *fmt, ...)
72840+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
72841 {
72842- va_list args;
72843 char module_name[MODULE_NAME_LEN];
72844 unsigned int max_modprobes;
72845 int ret;
72846- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
72847+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
72848 static char *envp[] = { "HOME=/",
72849 "TERM=linux",
72850 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
72851@@ -84,12 +83,24 @@ int __request_module(bool wait, const char *fmt, ...)
72852 if (ret)
72853 return ret;
72854
72855- va_start(args, fmt);
72856- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
72857- va_end(args);
72858+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
72859 if (ret >= MODULE_NAME_LEN)
72860 return -ENAMETOOLONG;
72861
72862+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72863+ if (!current_uid()) {
72864+ /* hack to workaround consolekit/udisks stupidity */
72865+ read_lock(&tasklist_lock);
72866+ if (!strcmp(current->comm, "mount") &&
72867+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
72868+ read_unlock(&tasklist_lock);
72869+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
72870+ return -EPERM;
72871+ }
72872+ read_unlock(&tasklist_lock);
72873+ }
72874+#endif
72875+
72876 /* If modprobe needs a service that is in a module, we get a recursive
72877 * loop. Limit the number of running kmod threads to max_threads/2 or
72878 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
72879@@ -123,6 +134,48 @@ int __request_module(bool wait, const char *fmt, ...)
72880 atomic_dec(&kmod_concurrent);
72881 return ret;
72882 }
72883+
72884+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
72885+{
72886+ va_list args;
72887+ int ret;
72888+
72889+ va_start(args, fmt);
72890+ ret = ____request_module(wait, module_param, fmt, args);
72891+ va_end(args);
72892+
72893+ return ret;
72894+}
72895+
72896+int __request_module(bool wait, const char *fmt, ...)
72897+{
72898+ va_list args;
72899+ int ret;
72900+
72901+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72902+ if (current_uid()) {
72903+ char module_param[MODULE_NAME_LEN];
72904+
72905+ memset(module_param, 0, sizeof(module_param));
72906+
72907+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
72908+
72909+ va_start(args, fmt);
72910+ ret = ____request_module(wait, module_param, fmt, args);
72911+ va_end(args);
72912+
72913+ return ret;
72914+ }
72915+#endif
72916+
72917+ va_start(args, fmt);
72918+ ret = ____request_module(wait, NULL, fmt, args);
72919+ va_end(args);
72920+
72921+ return ret;
72922+}
72923+
72924+
72925 EXPORT_SYMBOL(__request_module);
72926 #endif /* CONFIG_MODULES */
72927
72928@@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
72929 *
72930 * Thus the __user pointer cast is valid here.
72931 */
72932- sys_wait4(pid, (int __user *)&ret, 0, NULL);
72933+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
72934
72935 /*
72936 * If ret is 0, either ____call_usermodehelper failed and the
72937diff --git a/kernel/kprobes.c b/kernel/kprobes.c
72938index 176d825..77fa8ea 100644
72939--- a/kernel/kprobes.c
72940+++ b/kernel/kprobes.c
72941@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
72942 * kernel image and loaded module images reside. This is required
72943 * so x86_64 can correctly handle the %rip-relative fixups.
72944 */
72945- kip->insns = module_alloc(PAGE_SIZE);
72946+ kip->insns = module_alloc_exec(PAGE_SIZE);
72947 if (!kip->insns) {
72948 kfree(kip);
72949 return NULL;
72950@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
72951 */
72952 if (!list_is_singular(&kprobe_insn_pages)) {
72953 list_del(&kip->list);
72954- module_free(NULL, kip->insns);
72955+ module_free_exec(NULL, kip->insns);
72956 kfree(kip);
72957 }
72958 return 1;
72959@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
72960 {
72961 int i, err = 0;
72962 unsigned long offset = 0, size = 0;
72963- char *modname, namebuf[128];
72964+ char *modname, namebuf[KSYM_NAME_LEN];
72965 const char *symbol_name;
72966 void *addr;
72967 struct kprobe_blackpoint *kb;
72968@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
72969 const char *sym = NULL;
72970 unsigned int i = *(loff_t *) v;
72971 unsigned long offset = 0;
72972- char *modname, namebuf[128];
72973+ char *modname, namebuf[KSYM_NAME_LEN];
72974
72975 head = &kprobe_table[i];
72976 preempt_disable();
72977diff --git a/kernel/lockdep.c b/kernel/lockdep.c
72978index d86fe89..d12fc66 100644
72979--- a/kernel/lockdep.c
72980+++ b/kernel/lockdep.c
72981@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
72982 /*
72983 * Various lockdep statistics:
72984 */
72985-atomic_t chain_lookup_hits;
72986-atomic_t chain_lookup_misses;
72987-atomic_t hardirqs_on_events;
72988-atomic_t hardirqs_off_events;
72989-atomic_t redundant_hardirqs_on;
72990-atomic_t redundant_hardirqs_off;
72991-atomic_t softirqs_on_events;
72992-atomic_t softirqs_off_events;
72993-atomic_t redundant_softirqs_on;
72994-atomic_t redundant_softirqs_off;
72995-atomic_t nr_unused_locks;
72996-atomic_t nr_cyclic_checks;
72997-atomic_t nr_find_usage_forwards_checks;
72998-atomic_t nr_find_usage_backwards_checks;
72999+atomic_unchecked_t chain_lookup_hits;
73000+atomic_unchecked_t chain_lookup_misses;
73001+atomic_unchecked_t hardirqs_on_events;
73002+atomic_unchecked_t hardirqs_off_events;
73003+atomic_unchecked_t redundant_hardirqs_on;
73004+atomic_unchecked_t redundant_hardirqs_off;
73005+atomic_unchecked_t softirqs_on_events;
73006+atomic_unchecked_t softirqs_off_events;
73007+atomic_unchecked_t redundant_softirqs_on;
73008+atomic_unchecked_t redundant_softirqs_off;
73009+atomic_unchecked_t nr_unused_locks;
73010+atomic_unchecked_t nr_cyclic_checks;
73011+atomic_unchecked_t nr_find_usage_forwards_checks;
73012+atomic_unchecked_t nr_find_usage_backwards_checks;
73013 #endif
73014
73015 /*
73016@@ -577,6 +577,10 @@ static int static_obj(void *obj)
73017 int i;
73018 #endif
73019
73020+#ifdef CONFIG_PAX_KERNEXEC
73021+ start = ktla_ktva(start);
73022+#endif
73023+
73024 /*
73025 * static variable?
73026 */
73027@@ -592,8 +596,7 @@ static int static_obj(void *obj)
73028 */
73029 for_each_possible_cpu(i) {
73030 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
73031- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
73032- + per_cpu_offset(i);
73033+ end = start + PERCPU_ENOUGH_ROOM;
73034
73035 if ((addr >= start) && (addr < end))
73036 return 1;
73037@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
73038 if (!static_obj(lock->key)) {
73039 debug_locks_off();
73040 printk("INFO: trying to register non-static key.\n");
73041+ printk("lock:%pS key:%pS.\n", lock, lock->key);
73042 printk("the code is fine but needs lockdep annotation.\n");
73043 printk("turning off the locking correctness validator.\n");
73044 dump_stack();
73045@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
73046 if (!class)
73047 return 0;
73048 }
73049- debug_atomic_inc((atomic_t *)&class->ops);
73050+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
73051 if (very_verbose(class)) {
73052 printk("\nacquire class [%p] %s", class->key, class->name);
73053 if (class->name_version > 1)
73054diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
73055index a2ee95a..092f0f2 100644
73056--- a/kernel/lockdep_internals.h
73057+++ b/kernel/lockdep_internals.h
73058@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
73059 /*
73060 * Various lockdep statistics:
73061 */
73062-extern atomic_t chain_lookup_hits;
73063-extern atomic_t chain_lookup_misses;
73064-extern atomic_t hardirqs_on_events;
73065-extern atomic_t hardirqs_off_events;
73066-extern atomic_t redundant_hardirqs_on;
73067-extern atomic_t redundant_hardirqs_off;
73068-extern atomic_t softirqs_on_events;
73069-extern atomic_t softirqs_off_events;
73070-extern atomic_t redundant_softirqs_on;
73071-extern atomic_t redundant_softirqs_off;
73072-extern atomic_t nr_unused_locks;
73073-extern atomic_t nr_cyclic_checks;
73074-extern atomic_t nr_cyclic_check_recursions;
73075-extern atomic_t nr_find_usage_forwards_checks;
73076-extern atomic_t nr_find_usage_forwards_recursions;
73077-extern atomic_t nr_find_usage_backwards_checks;
73078-extern atomic_t nr_find_usage_backwards_recursions;
73079-# define debug_atomic_inc(ptr) atomic_inc(ptr)
73080-# define debug_atomic_dec(ptr) atomic_dec(ptr)
73081-# define debug_atomic_read(ptr) atomic_read(ptr)
73082+extern atomic_unchecked_t chain_lookup_hits;
73083+extern atomic_unchecked_t chain_lookup_misses;
73084+extern atomic_unchecked_t hardirqs_on_events;
73085+extern atomic_unchecked_t hardirqs_off_events;
73086+extern atomic_unchecked_t redundant_hardirqs_on;
73087+extern atomic_unchecked_t redundant_hardirqs_off;
73088+extern atomic_unchecked_t softirqs_on_events;
73089+extern atomic_unchecked_t softirqs_off_events;
73090+extern atomic_unchecked_t redundant_softirqs_on;
73091+extern atomic_unchecked_t redundant_softirqs_off;
73092+extern atomic_unchecked_t nr_unused_locks;
73093+extern atomic_unchecked_t nr_cyclic_checks;
73094+extern atomic_unchecked_t nr_cyclic_check_recursions;
73095+extern atomic_unchecked_t nr_find_usage_forwards_checks;
73096+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
73097+extern atomic_unchecked_t nr_find_usage_backwards_checks;
73098+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
73099+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
73100+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
73101+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
73102 #else
73103 # define debug_atomic_inc(ptr) do { } while (0)
73104 # define debug_atomic_dec(ptr) do { } while (0)
73105diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
73106index d4aba4f..02a353f 100644
73107--- a/kernel/lockdep_proc.c
73108+++ b/kernel/lockdep_proc.c
73109@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
73110
73111 static void print_name(struct seq_file *m, struct lock_class *class)
73112 {
73113- char str[128];
73114+ char str[KSYM_NAME_LEN];
73115 const char *name = class->name;
73116
73117 if (!name) {
73118diff --git a/kernel/module.c b/kernel/module.c
73119index 4b270e6..2226274 100644
73120--- a/kernel/module.c
73121+++ b/kernel/module.c
73122@@ -55,6 +55,7 @@
73123 #include <linux/async.h>
73124 #include <linux/percpu.h>
73125 #include <linux/kmemleak.h>
73126+#include <linux/grsecurity.h>
73127
73128 #define CREATE_TRACE_POINTS
73129 #include <trace/events/module.h>
73130@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
73131 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
73132
73133 /* Bounds of module allocation, for speeding __module_address */
73134-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
73135+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
73136+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
73137
73138 int register_module_notifier(struct notifier_block * nb)
73139 {
73140@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
73141 return true;
73142
73143 list_for_each_entry_rcu(mod, &modules, list) {
73144- struct symsearch arr[] = {
73145+ struct symsearch modarr[] = {
73146 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
73147 NOT_GPL_ONLY, false },
73148 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
73149@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
73150 #endif
73151 };
73152
73153- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
73154+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
73155 return true;
73156 }
73157 return false;
73158@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
73159 void *ptr;
73160 int cpu;
73161
73162- if (align > PAGE_SIZE) {
73163+ if (align-1 >= PAGE_SIZE) {
73164 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
73165 name, align, PAGE_SIZE);
73166 align = PAGE_SIZE;
73167@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
73168 * /sys/module/foo/sections stuff
73169 * J. Corbet <corbet@lwn.net>
73170 */
73171-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
73172+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
73173
73174 static inline bool sect_empty(const Elf_Shdr *sect)
73175 {
73176@@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
73177 destroy_params(mod->kp, mod->num_kp);
73178
73179 /* This may be NULL, but that's OK */
73180- module_free(mod, mod->module_init);
73181+ module_free(mod, mod->module_init_rw);
73182+ module_free_exec(mod, mod->module_init_rx);
73183 kfree(mod->args);
73184 if (mod->percpu)
73185 percpu_modfree(mod->percpu);
73186@@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
73187 percpu_modfree(mod->refptr);
73188 #endif
73189 /* Free lock-classes: */
73190- lockdep_free_key_range(mod->module_core, mod->core_size);
73191+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
73192+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
73193
73194 /* Finally, free the core (containing the module structure) */
73195- module_free(mod, mod->module_core);
73196+ module_free_exec(mod, mod->module_core_rx);
73197+ module_free(mod, mod->module_core_rw);
73198
73199 #ifdef CONFIG_MPU
73200 update_protections(current->mm);
73201@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73202 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73203 int ret = 0;
73204 const struct kernel_symbol *ksym;
73205+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73206+ int is_fs_load = 0;
73207+ int register_filesystem_found = 0;
73208+ char *p;
73209+
73210+ p = strstr(mod->args, "grsec_modharden_fs");
73211+
73212+ if (p) {
73213+ char *endptr = p + strlen("grsec_modharden_fs");
73214+ /* copy \0 as well */
73215+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
73216+ is_fs_load = 1;
73217+ }
73218+#endif
73219+
73220
73221 for (i = 1; i < n; i++) {
73222+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73223+ const char *name = strtab + sym[i].st_name;
73224+
73225+ /* it's a real shame this will never get ripped and copied
73226+ upstream! ;(
73227+ */
73228+ if (is_fs_load && !strcmp(name, "register_filesystem"))
73229+ register_filesystem_found = 1;
73230+#endif
73231 switch (sym[i].st_shndx) {
73232 case SHN_COMMON:
73233 /* We compiled with -fno-common. These are not
73234@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73235 strtab + sym[i].st_name, mod);
73236 /* Ok if resolved. */
73237 if (ksym) {
73238+ pax_open_kernel();
73239 sym[i].st_value = ksym->value;
73240+ pax_close_kernel();
73241 break;
73242 }
73243
73244@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73245 secbase = (unsigned long)mod->percpu;
73246 else
73247 secbase = sechdrs[sym[i].st_shndx].sh_addr;
73248+ pax_open_kernel();
73249 sym[i].st_value += secbase;
73250+ pax_close_kernel();
73251 break;
73252 }
73253 }
73254
73255+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73256+ if (is_fs_load && !register_filesystem_found) {
73257+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
73258+ ret = -EPERM;
73259+ }
73260+#endif
73261+
73262 return ret;
73263 }
73264
73265@@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
73266 || s->sh_entsize != ~0UL
73267 || strstarts(secstrings + s->sh_name, ".init"))
73268 continue;
73269- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
73270+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
73271+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
73272+ else
73273+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
73274 DEBUGP("\t%s\n", secstrings + s->sh_name);
73275 }
73276- if (m == 0)
73277- mod->core_text_size = mod->core_size;
73278 }
73279
73280 DEBUGP("Init section allocation order:\n");
73281@@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
73282 || s->sh_entsize != ~0UL
73283 || !strstarts(secstrings + s->sh_name, ".init"))
73284 continue;
73285- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
73286- | INIT_OFFSET_MASK);
73287+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
73288+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
73289+ else
73290+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
73291+ s->sh_entsize |= INIT_OFFSET_MASK;
73292 DEBUGP("\t%s\n", secstrings + s->sh_name);
73293 }
73294- if (m == 0)
73295- mod->init_text_size = mod->init_size;
73296 }
73297 }
73298
73299@@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
73300
73301 /* As per nm */
73302 static char elf_type(const Elf_Sym *sym,
73303- Elf_Shdr *sechdrs,
73304- const char *secstrings,
73305- struct module *mod)
73306+ const Elf_Shdr *sechdrs,
73307+ const char *secstrings)
73308 {
73309 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
73310 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
73311@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
73312
73313 /* Put symbol section at end of init part of module. */
73314 symsect->sh_flags |= SHF_ALLOC;
73315- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
73316+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
73317 symindex) | INIT_OFFSET_MASK;
73318 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
73319
73320@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
73321 }
73322
73323 /* Append room for core symbols at end of core part. */
73324- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
73325- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
73326+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
73327+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
73328
73329 /* Put string table section at end of init part of module. */
73330 strsect->sh_flags |= SHF_ALLOC;
73331- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
73332+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
73333 strindex) | INIT_OFFSET_MASK;
73334 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
73335
73336 /* Append room for core symbols' strings at end of core part. */
73337- *pstroffs = mod->core_size;
73338+ *pstroffs = mod->core_size_rx;
73339 __set_bit(0, strmap);
73340- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
73341+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
73342
73343 return symoffs;
73344 }
73345@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
73346 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73347 mod->strtab = (void *)sechdrs[strindex].sh_addr;
73348
73349+ pax_open_kernel();
73350+
73351 /* Set types up while we still have access to sections. */
73352 for (i = 0; i < mod->num_symtab; i++)
73353 mod->symtab[i].st_info
73354- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
73355+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
73356
73357- mod->core_symtab = dst = mod->module_core + symoffs;
73358+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
73359 src = mod->symtab;
73360 *dst = *src;
73361 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
73362@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
73363 }
73364 mod->core_num_syms = ndst;
73365
73366- mod->core_strtab = s = mod->module_core + stroffs;
73367+ mod->core_strtab = s = mod->module_core_rx + stroffs;
73368 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
73369 if (test_bit(i, strmap))
73370 *++s = mod->strtab[i];
73371+
73372+ pax_close_kernel();
73373 }
73374 #else
73375 static inline unsigned long layout_symtab(struct module *mod,
73376@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
73377 #endif
73378 }
73379
73380-static void *module_alloc_update_bounds(unsigned long size)
73381+static void *module_alloc_update_bounds_rw(unsigned long size)
73382 {
73383 void *ret = module_alloc(size);
73384
73385 if (ret) {
73386 /* Update module bounds. */
73387- if ((unsigned long)ret < module_addr_min)
73388- module_addr_min = (unsigned long)ret;
73389- if ((unsigned long)ret + size > module_addr_max)
73390- module_addr_max = (unsigned long)ret + size;
73391+ if ((unsigned long)ret < module_addr_min_rw)
73392+ module_addr_min_rw = (unsigned long)ret;
73393+ if ((unsigned long)ret + size > module_addr_max_rw)
73394+ module_addr_max_rw = (unsigned long)ret + size;
73395+ }
73396+ return ret;
73397+}
73398+
73399+static void *module_alloc_update_bounds_rx(unsigned long size)
73400+{
73401+ void *ret = module_alloc_exec(size);
73402+
73403+ if (ret) {
73404+ /* Update module bounds. */
73405+ if ((unsigned long)ret < module_addr_min_rx)
73406+ module_addr_min_rx = (unsigned long)ret;
73407+ if ((unsigned long)ret + size > module_addr_max_rx)
73408+ module_addr_max_rx = (unsigned long)ret + size;
73409 }
73410 return ret;
73411 }
73412@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73413 unsigned int i;
73414
73415 /* only scan the sections containing data */
73416- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
73417- (unsigned long)mod->module_core,
73418+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
73419+ (unsigned long)mod->module_core_rw,
73420 sizeof(struct module), GFP_KERNEL);
73421
73422 for (i = 1; i < hdr->e_shnum; i++) {
73423@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73424 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
73425 continue;
73426
73427- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
73428- (unsigned long)mod->module_core,
73429+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
73430+ (unsigned long)mod->module_core_rw,
73431 sechdrs[i].sh_size, GFP_KERNEL);
73432 }
73433 }
73434@@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
73435 Elf_Ehdr *hdr;
73436 Elf_Shdr *sechdrs;
73437 char *secstrings, *args, *modmagic, *strtab = NULL;
73438- char *staging;
73439+ char *staging, *license;
73440 unsigned int i;
73441 unsigned int symindex = 0;
73442 unsigned int strindex = 0;
73443@@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
73444 goto free_hdr;
73445 }
73446
73447+ license = get_modinfo(sechdrs, infoindex, "license");
73448+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
73449+ if (!license || !license_is_gpl_compatible(license)) {
73450+ err -ENOEXEC;
73451+ goto free_hdr;
73452+ }
73453+#endif
73454+
73455 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
73456 /* This is allowed: modprobe --force will invalidate it. */
73457 if (!modmagic) {
73458@@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
73459 secstrings, &stroffs, strmap);
73460
73461 /* Do the allocs. */
73462- ptr = module_alloc_update_bounds(mod->core_size);
73463+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
73464 /*
73465 * The pointer to this block is stored in the module structure
73466 * which is inside the block. Just mark it as not being a
73467@@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
73468 err = -ENOMEM;
73469 goto free_percpu;
73470 }
73471- memset(ptr, 0, mod->core_size);
73472- mod->module_core = ptr;
73473+ memset(ptr, 0, mod->core_size_rw);
73474+ mod->module_core_rw = ptr;
73475
73476- ptr = module_alloc_update_bounds(mod->init_size);
73477+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
73478 /*
73479 * The pointer to this block is stored in the module structure
73480 * which is inside the block. This block doesn't need to be
73481 * scanned as it contains data and code that will be freed
73482 * after the module is initialized.
73483 */
73484- kmemleak_ignore(ptr);
73485- if (!ptr && mod->init_size) {
73486+ kmemleak_not_leak(ptr);
73487+ if (!ptr && mod->init_size_rw) {
73488 err = -ENOMEM;
73489- goto free_core;
73490+ goto free_core_rw;
73491 }
73492- memset(ptr, 0, mod->init_size);
73493- mod->module_init = ptr;
73494+ memset(ptr, 0, mod->init_size_rw);
73495+ mod->module_init_rw = ptr;
73496+
73497+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
73498+ kmemleak_not_leak(ptr);
73499+ if (!ptr) {
73500+ err = -ENOMEM;
73501+ goto free_init_rw;
73502+ }
73503+
73504+ pax_open_kernel();
73505+ memset(ptr, 0, mod->core_size_rx);
73506+ pax_close_kernel();
73507+ mod->module_core_rx = ptr;
73508+
73509+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
73510+ kmemleak_not_leak(ptr);
73511+ if (!ptr && mod->init_size_rx) {
73512+ err = -ENOMEM;
73513+ goto free_core_rx;
73514+ }
73515+
73516+ pax_open_kernel();
73517+ memset(ptr, 0, mod->init_size_rx);
73518+ pax_close_kernel();
73519+ mod->module_init_rx = ptr;
73520
73521 /* Transfer each section which specifies SHF_ALLOC */
73522 DEBUGP("final section addresses:\n");
73523@@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
73524 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
73525 continue;
73526
73527- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
73528- dest = mod->module_init
73529- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73530- else
73531- dest = mod->module_core + sechdrs[i].sh_entsize;
73532+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
73533+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73534+ dest = mod->module_init_rw
73535+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73536+ else
73537+ dest = mod->module_init_rx
73538+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73539+ } else {
73540+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73541+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
73542+ else
73543+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
73544+ }
73545
73546- if (sechdrs[i].sh_type != SHT_NOBITS)
73547- memcpy(dest, (void *)sechdrs[i].sh_addr,
73548- sechdrs[i].sh_size);
73549+ if (sechdrs[i].sh_type != SHT_NOBITS) {
73550+
73551+#ifdef CONFIG_PAX_KERNEXEC
73552+#ifdef CONFIG_X86_64
73553+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
73554+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
73555+#endif
73556+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
73557+ pax_open_kernel();
73558+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73559+ pax_close_kernel();
73560+ } else
73561+#endif
73562+
73563+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73564+ }
73565 /* Update sh_addr to point to copy in image. */
73566- sechdrs[i].sh_addr = (unsigned long)dest;
73567+
73568+#ifdef CONFIG_PAX_KERNEXEC
73569+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
73570+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
73571+ else
73572+#endif
73573+
73574+ sechdrs[i].sh_addr = (unsigned long)dest;
73575 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
73576 }
73577 /* Module has been moved. */
73578@@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
73579 mod->name);
73580 if (!mod->refptr) {
73581 err = -ENOMEM;
73582- goto free_init;
73583+ goto free_init_rx;
73584 }
73585 #endif
73586 /* Now we've moved module, initialize linked lists, etc. */
73587@@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
73588 goto free_unload;
73589
73590 /* Set up license info based on the info section */
73591- set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
73592+ set_license(mod, license);
73593
73594 /*
73595 * ndiswrapper is under GPL by itself, but loads proprietary modules.
73596@@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
73597 /* Set up MODINFO_ATTR fields */
73598 setup_modinfo(mod, sechdrs, infoindex);
73599
73600+ mod->args = args;
73601+
73602+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73603+ {
73604+ char *p, *p2;
73605+
73606+ if (strstr(mod->args, "grsec_modharden_netdev")) {
73607+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
73608+ err = -EPERM;
73609+ goto cleanup;
73610+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
73611+ p += strlen("grsec_modharden_normal");
73612+ p2 = strstr(p, "_");
73613+ if (p2) {
73614+ *p2 = '\0';
73615+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
73616+ *p2 = '_';
73617+ }
73618+ err = -EPERM;
73619+ goto cleanup;
73620+ }
73621+ }
73622+#endif
73623+
73624+
73625 /* Fix up syms, so that st_value is a pointer to location. */
73626 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
73627 mod);
73628@@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
73629
73630 /* Now do relocations. */
73631 for (i = 1; i < hdr->e_shnum; i++) {
73632- const char *strtab = (char *)sechdrs[strindex].sh_addr;
73633 unsigned int info = sechdrs[i].sh_info;
73634+ strtab = (char *)sechdrs[strindex].sh_addr;
73635
73636 /* Not a valid relocation section? */
73637 if (info >= hdr->e_shnum)
73638@@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
73639 * Do it before processing of module parameters, so the module
73640 * can provide parameter accessor functions of its own.
73641 */
73642- if (mod->module_init)
73643- flush_icache_range((unsigned long)mod->module_init,
73644- (unsigned long)mod->module_init
73645- + mod->init_size);
73646- flush_icache_range((unsigned long)mod->module_core,
73647- (unsigned long)mod->module_core + mod->core_size);
73648+ if (mod->module_init_rx)
73649+ flush_icache_range((unsigned long)mod->module_init_rx,
73650+ (unsigned long)mod->module_init_rx
73651+ + mod->init_size_rx);
73652+ flush_icache_range((unsigned long)mod->module_core_rx,
73653+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
73654
73655 set_fs(old_fs);
73656
73657- mod->args = args;
73658 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
73659 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
73660 mod->name);
73661@@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
73662 free_unload:
73663 module_unload_free(mod);
73664 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
73665+ free_init_rx:
73666 percpu_modfree(mod->refptr);
73667- free_init:
73668 #endif
73669- module_free(mod, mod->module_init);
73670- free_core:
73671- module_free(mod, mod->module_core);
73672+ module_free_exec(mod, mod->module_init_rx);
73673+ free_core_rx:
73674+ module_free_exec(mod, mod->module_core_rx);
73675+ free_init_rw:
73676+ module_free(mod, mod->module_init_rw);
73677+ free_core_rw:
73678+ module_free(mod, mod->module_core_rw);
73679 /* mod will be freed with core. Don't access it beyond this line! */
73680 free_percpu:
73681 if (percpu)
73682@@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
73683 mod->symtab = mod->core_symtab;
73684 mod->strtab = mod->core_strtab;
73685 #endif
73686- module_free(mod, mod->module_init);
73687- mod->module_init = NULL;
73688- mod->init_size = 0;
73689- mod->init_text_size = 0;
73690+ module_free(mod, mod->module_init_rw);
73691+ module_free_exec(mod, mod->module_init_rx);
73692+ mod->module_init_rw = NULL;
73693+ mod->module_init_rx = NULL;
73694+ mod->init_size_rw = 0;
73695+ mod->init_size_rx = 0;
73696 mutex_unlock(&module_mutex);
73697
73698 return 0;
73699@@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
73700 unsigned long nextval;
73701
73702 /* At worse, next value is at end of module */
73703- if (within_module_init(addr, mod))
73704- nextval = (unsigned long)mod->module_init+mod->init_text_size;
73705+ if (within_module_init_rx(addr, mod))
73706+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
73707+ else if (within_module_init_rw(addr, mod))
73708+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
73709+ else if (within_module_core_rx(addr, mod))
73710+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
73711+ else if (within_module_core_rw(addr, mod))
73712+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
73713 else
73714- nextval = (unsigned long)mod->module_core+mod->core_text_size;
73715+ return NULL;
73716
73717 /* Scan for closest preceeding symbol, and next symbol. (ELF
73718 starts real symbols at 1). */
73719@@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
73720 char buf[8];
73721
73722 seq_printf(m, "%s %u",
73723- mod->name, mod->init_size + mod->core_size);
73724+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
73725 print_unload_info(m, mod);
73726
73727 /* Informative for users. */
73728@@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
73729 mod->state == MODULE_STATE_COMING ? "Loading":
73730 "Live");
73731 /* Used by oprofile and other similar tools. */
73732- seq_printf(m, " 0x%p", mod->module_core);
73733+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
73734
73735 /* Taints info */
73736 if (mod->taints)
73737@@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
73738
73739 static int __init proc_modules_init(void)
73740 {
73741+#ifndef CONFIG_GRKERNSEC_HIDESYM
73742+#ifdef CONFIG_GRKERNSEC_PROC_USER
73743+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73744+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73745+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
73746+#else
73747 proc_create("modules", 0, NULL, &proc_modules_operations);
73748+#endif
73749+#else
73750+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73751+#endif
73752 return 0;
73753 }
73754 module_init(proc_modules_init);
73755@@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
73756 {
73757 struct module *mod;
73758
73759- if (addr < module_addr_min || addr > module_addr_max)
73760+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
73761+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
73762 return NULL;
73763
73764 list_for_each_entry_rcu(mod, &modules, list)
73765- if (within_module_core(addr, mod)
73766- || within_module_init(addr, mod))
73767+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
73768 return mod;
73769 return NULL;
73770 }
73771@@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
73772 */
73773 struct module *__module_text_address(unsigned long addr)
73774 {
73775- struct module *mod = __module_address(addr);
73776+ struct module *mod;
73777+
73778+#ifdef CONFIG_X86_32
73779+ addr = ktla_ktva(addr);
73780+#endif
73781+
73782+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
73783+ return NULL;
73784+
73785+ mod = __module_address(addr);
73786+
73787 if (mod) {
73788 /* Make sure it's within the text section. */
73789- if (!within(addr, mod->module_init, mod->init_text_size)
73790- && !within(addr, mod->module_core, mod->core_text_size))
73791+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
73792 mod = NULL;
73793 }
73794 return mod;
73795diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
73796index ec815a9..fe46e99 100644
73797--- a/kernel/mutex-debug.c
73798+++ b/kernel/mutex-debug.c
73799@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
73800 }
73801
73802 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73803- struct thread_info *ti)
73804+ struct task_struct *task)
73805 {
73806 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
73807
73808 /* Mark the current thread as blocked on the lock: */
73809- ti->task->blocked_on = waiter;
73810+ task->blocked_on = waiter;
73811 }
73812
73813 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73814- struct thread_info *ti)
73815+ struct task_struct *task)
73816 {
73817 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
73818- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
73819- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
73820- ti->task->blocked_on = NULL;
73821+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
73822+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
73823+ task->blocked_on = NULL;
73824
73825 list_del_init(&waiter->list);
73826 waiter->task = NULL;
73827@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
73828 return;
73829
73830 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
73831- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
73832+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
73833 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
73834 mutex_clear_owner(lock);
73835 }
73836diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
73837index 6b2d735..372d3c4 100644
73838--- a/kernel/mutex-debug.h
73839+++ b/kernel/mutex-debug.h
73840@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
73841 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
73842 extern void debug_mutex_add_waiter(struct mutex *lock,
73843 struct mutex_waiter *waiter,
73844- struct thread_info *ti);
73845+ struct task_struct *task);
73846 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73847- struct thread_info *ti);
73848+ struct task_struct *task);
73849 extern void debug_mutex_unlock(struct mutex *lock);
73850 extern void debug_mutex_init(struct mutex *lock, const char *name,
73851 struct lock_class_key *key);
73852
73853 static inline void mutex_set_owner(struct mutex *lock)
73854 {
73855- lock->owner = current_thread_info();
73856+ lock->owner = current;
73857 }
73858
73859 static inline void mutex_clear_owner(struct mutex *lock)
73860diff --git a/kernel/mutex.c b/kernel/mutex.c
73861index f85644c..5ee9f77 100644
73862--- a/kernel/mutex.c
73863+++ b/kernel/mutex.c
73864@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73865 */
73866
73867 for (;;) {
73868- struct thread_info *owner;
73869+ struct task_struct *owner;
73870
73871 /*
73872 * If we own the BKL, then don't spin. The owner of
73873@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73874 spin_lock_mutex(&lock->wait_lock, flags);
73875
73876 debug_mutex_lock_common(lock, &waiter);
73877- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
73878+ debug_mutex_add_waiter(lock, &waiter, task);
73879
73880 /* add waiting tasks to the end of the waitqueue (FIFO): */
73881 list_add_tail(&waiter.list, &lock->wait_list);
73882@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73883 * TASK_UNINTERRUPTIBLE case.)
73884 */
73885 if (unlikely(signal_pending_state(state, task))) {
73886- mutex_remove_waiter(lock, &waiter,
73887- task_thread_info(task));
73888+ mutex_remove_waiter(lock, &waiter, task);
73889 mutex_release(&lock->dep_map, 1, ip);
73890 spin_unlock_mutex(&lock->wait_lock, flags);
73891
73892@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73893 done:
73894 lock_acquired(&lock->dep_map, ip);
73895 /* got the lock - rejoice! */
73896- mutex_remove_waiter(lock, &waiter, current_thread_info());
73897+ mutex_remove_waiter(lock, &waiter, task);
73898 mutex_set_owner(lock);
73899
73900 /* set it to 0 if there are no waiters left: */
73901diff --git a/kernel/mutex.h b/kernel/mutex.h
73902index 67578ca..4115fbf 100644
73903--- a/kernel/mutex.h
73904+++ b/kernel/mutex.h
73905@@ -19,7 +19,7 @@
73906 #ifdef CONFIG_SMP
73907 static inline void mutex_set_owner(struct mutex *lock)
73908 {
73909- lock->owner = current_thread_info();
73910+ lock->owner = current;
73911 }
73912
73913 static inline void mutex_clear_owner(struct mutex *lock)
73914diff --git a/kernel/panic.c b/kernel/panic.c
73915index 96b45d0..ff70a46 100644
73916--- a/kernel/panic.c
73917+++ b/kernel/panic.c
73918@@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
73919 va_end(args);
73920 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
73921 #ifdef CONFIG_DEBUG_BUGVERBOSE
73922- dump_stack();
73923+ /*
73924+ * Avoid nested stack-dumping if a panic occurs during oops processing
73925+ */
73926+ if (!oops_in_progress)
73927+ dump_stack();
73928 #endif
73929
73930 /*
73931@@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
73932 const char *board;
73933
73934 printk(KERN_WARNING "------------[ cut here ]------------\n");
73935- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
73936+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
73937 board = dmi_get_system_info(DMI_PRODUCT_NAME);
73938 if (board)
73939 printk(KERN_WARNING "Hardware name: %s\n", board);
73940@@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
73941 */
73942 void __stack_chk_fail(void)
73943 {
73944- panic("stack-protector: Kernel stack is corrupted in: %p\n",
73945+ dump_stack();
73946+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
73947 __builtin_return_address(0));
73948 }
73949 EXPORT_SYMBOL(__stack_chk_fail);
73950diff --git a/kernel/params.c b/kernel/params.c
73951index d656c27..21e452c 100644
73952--- a/kernel/params.c
73953+++ b/kernel/params.c
73954@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
73955 return ret;
73956 }
73957
73958-static struct sysfs_ops module_sysfs_ops = {
73959+static const struct sysfs_ops module_sysfs_ops = {
73960 .show = module_attr_show,
73961 .store = module_attr_store,
73962 };
73963@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
73964 return 0;
73965 }
73966
73967-static struct kset_uevent_ops module_uevent_ops = {
73968+static const struct kset_uevent_ops module_uevent_ops = {
73969 .filter = uevent_filter,
73970 };
73971
73972diff --git a/kernel/perf_event.c b/kernel/perf_event.c
73973index 37ebc14..9c121d9 100644
73974--- a/kernel/perf_event.c
73975+++ b/kernel/perf_event.c
73976@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
73977 */
73978 int sysctl_perf_event_sample_rate __read_mostly = 100000;
73979
73980-static atomic64_t perf_event_id;
73981+static atomic64_unchecked_t perf_event_id;
73982
73983 /*
73984 * Lock for (sysadmin-configurable) event reservations:
73985@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
73986 * In order to keep per-task stats reliable we need to flip the event
73987 * values when we flip the contexts.
73988 */
73989- value = atomic64_read(&next_event->count);
73990- value = atomic64_xchg(&event->count, value);
73991- atomic64_set(&next_event->count, value);
73992+ value = atomic64_read_unchecked(&next_event->count);
73993+ value = atomic64_xchg_unchecked(&event->count, value);
73994+ atomic64_set_unchecked(&next_event->count, value);
73995
73996 swap(event->total_time_enabled, next_event->total_time_enabled);
73997 swap(event->total_time_running, next_event->total_time_running);
73998@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
73999 update_event_times(event);
74000 }
74001
74002- return atomic64_read(&event->count);
74003+ return atomic64_read_unchecked(&event->count);
74004 }
74005
74006 /*
74007@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
74008 values[n++] = 1 + leader->nr_siblings;
74009 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74010 values[n++] = leader->total_time_enabled +
74011- atomic64_read(&leader->child_total_time_enabled);
74012+ atomic64_read_unchecked(&leader->child_total_time_enabled);
74013 }
74014 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74015 values[n++] = leader->total_time_running +
74016- atomic64_read(&leader->child_total_time_running);
74017+ atomic64_read_unchecked(&leader->child_total_time_running);
74018 }
74019
74020 size = n * sizeof(u64);
74021@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
74022 values[n++] = perf_event_read_value(event);
74023 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74024 values[n++] = event->total_time_enabled +
74025- atomic64_read(&event->child_total_time_enabled);
74026+ atomic64_read_unchecked(&event->child_total_time_enabled);
74027 }
74028 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74029 values[n++] = event->total_time_running +
74030- atomic64_read(&event->child_total_time_running);
74031+ atomic64_read_unchecked(&event->child_total_time_running);
74032 }
74033 if (read_format & PERF_FORMAT_ID)
74034 values[n++] = primary_event_id(event);
74035@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
74036 static void perf_event_reset(struct perf_event *event)
74037 {
74038 (void)perf_event_read(event);
74039- atomic64_set(&event->count, 0);
74040+ atomic64_set_unchecked(&event->count, 0);
74041 perf_event_update_userpage(event);
74042 }
74043
74044@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
74045 ++userpg->lock;
74046 barrier();
74047 userpg->index = perf_event_index(event);
74048- userpg->offset = atomic64_read(&event->count);
74049+ userpg->offset = atomic64_read_unchecked(&event->count);
74050 if (event->state == PERF_EVENT_STATE_ACTIVE)
74051- userpg->offset -= atomic64_read(&event->hw.prev_count);
74052+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
74053
74054 userpg->time_enabled = event->total_time_enabled +
74055- atomic64_read(&event->child_total_time_enabled);
74056+ atomic64_read_unchecked(&event->child_total_time_enabled);
74057
74058 userpg->time_running = event->total_time_running +
74059- atomic64_read(&event->child_total_time_running);
74060+ atomic64_read_unchecked(&event->child_total_time_running);
74061
74062 barrier();
74063 ++userpg->lock;
74064@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
74065 u64 values[4];
74066 int n = 0;
74067
74068- values[n++] = atomic64_read(&event->count);
74069+ values[n++] = atomic64_read_unchecked(&event->count);
74070 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74071 values[n++] = event->total_time_enabled +
74072- atomic64_read(&event->child_total_time_enabled);
74073+ atomic64_read_unchecked(&event->child_total_time_enabled);
74074 }
74075 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74076 values[n++] = event->total_time_running +
74077- atomic64_read(&event->child_total_time_running);
74078+ atomic64_read_unchecked(&event->child_total_time_running);
74079 }
74080 if (read_format & PERF_FORMAT_ID)
74081 values[n++] = primary_event_id(event);
74082@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74083 if (leader != event)
74084 leader->pmu->read(leader);
74085
74086- values[n++] = atomic64_read(&leader->count);
74087+ values[n++] = atomic64_read_unchecked(&leader->count);
74088 if (read_format & PERF_FORMAT_ID)
74089 values[n++] = primary_event_id(leader);
74090
74091@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74092 if (sub != event)
74093 sub->pmu->read(sub);
74094
74095- values[n++] = atomic64_read(&sub->count);
74096+ values[n++] = atomic64_read_unchecked(&sub->count);
74097 if (read_format & PERF_FORMAT_ID)
74098 values[n++] = primary_event_id(sub);
74099
74100@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
74101 * need to add enough zero bytes after the string to handle
74102 * the 64bit alignment we do later.
74103 */
74104- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
74105+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
74106 if (!buf) {
74107 name = strncpy(tmp, "//enomem", sizeof(tmp));
74108 goto got_name;
74109 }
74110- name = d_path(&file->f_path, buf, PATH_MAX);
74111+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
74112 if (IS_ERR(name)) {
74113 name = strncpy(tmp, "//toolong", sizeof(tmp));
74114 goto got_name;
74115@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
74116 {
74117 struct hw_perf_event *hwc = &event->hw;
74118
74119- atomic64_add(nr, &event->count);
74120+ atomic64_add_unchecked(nr, &event->count);
74121
74122 if (!hwc->sample_period)
74123 return;
74124@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
74125 u64 now;
74126
74127 now = cpu_clock(cpu);
74128- prev = atomic64_read(&event->hw.prev_count);
74129- atomic64_set(&event->hw.prev_count, now);
74130- atomic64_add(now - prev, &event->count);
74131+ prev = atomic64_read_unchecked(&event->hw.prev_count);
74132+ atomic64_set_unchecked(&event->hw.prev_count, now);
74133+ atomic64_add_unchecked(now - prev, &event->count);
74134 }
74135
74136 static int cpu_clock_perf_event_enable(struct perf_event *event)
74137@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
74138 struct hw_perf_event *hwc = &event->hw;
74139 int cpu = raw_smp_processor_id();
74140
74141- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
74142+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
74143 perf_swevent_start_hrtimer(event);
74144
74145 return 0;
74146@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
74147 u64 prev;
74148 s64 delta;
74149
74150- prev = atomic64_xchg(&event->hw.prev_count, now);
74151+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
74152 delta = now - prev;
74153- atomic64_add(delta, &event->count);
74154+ atomic64_add_unchecked(delta, &event->count);
74155 }
74156
74157 static int task_clock_perf_event_enable(struct perf_event *event)
74158@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
74159
74160 now = event->ctx->time;
74161
74162- atomic64_set(&hwc->prev_count, now);
74163+ atomic64_set_unchecked(&hwc->prev_count, now);
74164
74165 perf_swevent_start_hrtimer(event);
74166
74167@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
74168 event->parent = parent_event;
74169
74170 event->ns = get_pid_ns(current->nsproxy->pid_ns);
74171- event->id = atomic64_inc_return(&perf_event_id);
74172+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
74173
74174 event->state = PERF_EVENT_STATE_INACTIVE;
74175
74176@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
74177 if (child_event->attr.inherit_stat)
74178 perf_event_read_event(child_event, child);
74179
74180- child_val = atomic64_read(&child_event->count);
74181+ child_val = atomic64_read_unchecked(&child_event->count);
74182
74183 /*
74184 * Add back the child's count to the parent's count:
74185 */
74186- atomic64_add(child_val, &parent_event->count);
74187- atomic64_add(child_event->total_time_enabled,
74188+ atomic64_add_unchecked(child_val, &parent_event->count);
74189+ atomic64_add_unchecked(child_event->total_time_enabled,
74190 &parent_event->child_total_time_enabled);
74191- atomic64_add(child_event->total_time_running,
74192+ atomic64_add_unchecked(child_event->total_time_running,
74193 &parent_event->child_total_time_running);
74194
74195 /*
74196diff --git a/kernel/pid.c b/kernel/pid.c
74197index fce7198..4f23a7e 100644
74198--- a/kernel/pid.c
74199+++ b/kernel/pid.c
74200@@ -33,6 +33,7 @@
74201 #include <linux/rculist.h>
74202 #include <linux/bootmem.h>
74203 #include <linux/hash.h>
74204+#include <linux/security.h>
74205 #include <linux/pid_namespace.h>
74206 #include <linux/init_task.h>
74207 #include <linux/syscalls.h>
74208@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
74209
74210 int pid_max = PID_MAX_DEFAULT;
74211
74212-#define RESERVED_PIDS 300
74213+#define RESERVED_PIDS 500
74214
74215 int pid_max_min = RESERVED_PIDS + 1;
74216 int pid_max_max = PID_MAX_LIMIT;
74217@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
74218 */
74219 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
74220 {
74221- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
74222+ struct task_struct *task;
74223+
74224+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
74225+
74226+ if (gr_pid_is_chrooted(task))
74227+ return NULL;
74228+
74229+ return task;
74230 }
74231
74232 struct task_struct *find_task_by_vpid(pid_t vnr)
74233@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
74234 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
74235 }
74236
74237+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
74238+{
74239+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
74240+}
74241+
74242 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
74243 {
74244 struct pid *pid;
74245diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
74246index 5c9dc22..d271117 100644
74247--- a/kernel/posix-cpu-timers.c
74248+++ b/kernel/posix-cpu-timers.c
74249@@ -6,6 +6,7 @@
74250 #include <linux/posix-timers.h>
74251 #include <linux/errno.h>
74252 #include <linux/math64.h>
74253+#include <linux/security.h>
74254 #include <asm/uaccess.h>
74255 #include <linux/kernel_stat.h>
74256 #include <trace/events/timer.h>
74257@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
74258
74259 static __init int init_posix_cpu_timers(void)
74260 {
74261- struct k_clock process = {
74262+ static struct k_clock process = {
74263 .clock_getres = process_cpu_clock_getres,
74264 .clock_get = process_cpu_clock_get,
74265 .clock_set = do_posix_clock_nosettime,
74266@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
74267 .nsleep = process_cpu_nsleep,
74268 .nsleep_restart = process_cpu_nsleep_restart,
74269 };
74270- struct k_clock thread = {
74271+ static struct k_clock thread = {
74272 .clock_getres = thread_cpu_clock_getres,
74273 .clock_get = thread_cpu_clock_get,
74274 .clock_set = do_posix_clock_nosettime,
74275diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
74276index 5e76d22..cf1baeb 100644
74277--- a/kernel/posix-timers.c
74278+++ b/kernel/posix-timers.c
74279@@ -42,6 +42,7 @@
74280 #include <linux/compiler.h>
74281 #include <linux/idr.h>
74282 #include <linux/posix-timers.h>
74283+#include <linux/grsecurity.h>
74284 #include <linux/syscalls.h>
74285 #include <linux/wait.h>
74286 #include <linux/workqueue.h>
74287@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
74288 * which we beg off on and pass to do_sys_settimeofday().
74289 */
74290
74291-static struct k_clock posix_clocks[MAX_CLOCKS];
74292+static struct k_clock *posix_clocks[MAX_CLOCKS];
74293
74294 /*
74295 * These ones are defined below.
74296@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
74297 */
74298 #define CLOCK_DISPATCH(clock, call, arglist) \
74299 ((clock) < 0 ? posix_cpu_##call arglist : \
74300- (posix_clocks[clock].call != NULL \
74301- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
74302+ (posix_clocks[clock]->call != NULL \
74303+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
74304
74305 /*
74306 * Default clock hook functions when the struct k_clock passed
74307@@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
74308 struct timespec *tp)
74309 {
74310 tp->tv_sec = 0;
74311- tp->tv_nsec = posix_clocks[which_clock].res;
74312+ tp->tv_nsec = posix_clocks[which_clock]->res;
74313 return 0;
74314 }
74315
74316@@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
74317 return 0;
74318 if ((unsigned) which_clock >= MAX_CLOCKS)
74319 return 1;
74320- if (posix_clocks[which_clock].clock_getres != NULL)
74321+ if (posix_clocks[which_clock] == NULL)
74322 return 0;
74323- if (posix_clocks[which_clock].res != 0)
74324+ if (posix_clocks[which_clock]->clock_getres != NULL)
74325+ return 0;
74326+ if (posix_clocks[which_clock]->res != 0)
74327 return 0;
74328 return 1;
74329 }
74330@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
74331 */
74332 static __init int init_posix_timers(void)
74333 {
74334- struct k_clock clock_realtime = {
74335+ static struct k_clock clock_realtime = {
74336 .clock_getres = hrtimer_get_res,
74337 };
74338- struct k_clock clock_monotonic = {
74339+ static struct k_clock clock_monotonic = {
74340 .clock_getres = hrtimer_get_res,
74341 .clock_get = posix_ktime_get_ts,
74342 .clock_set = do_posix_clock_nosettime,
74343 };
74344- struct k_clock clock_monotonic_raw = {
74345+ static struct k_clock clock_monotonic_raw = {
74346 .clock_getres = hrtimer_get_res,
74347 .clock_get = posix_get_monotonic_raw,
74348 .clock_set = do_posix_clock_nosettime,
74349 .timer_create = no_timer_create,
74350 .nsleep = no_nsleep,
74351 };
74352- struct k_clock clock_realtime_coarse = {
74353+ static struct k_clock clock_realtime_coarse = {
74354 .clock_getres = posix_get_coarse_res,
74355 .clock_get = posix_get_realtime_coarse,
74356 .clock_set = do_posix_clock_nosettime,
74357 .timer_create = no_timer_create,
74358 .nsleep = no_nsleep,
74359 };
74360- struct k_clock clock_monotonic_coarse = {
74361+ static struct k_clock clock_monotonic_coarse = {
74362 .clock_getres = posix_get_coarse_res,
74363 .clock_get = posix_get_monotonic_coarse,
74364 .clock_set = do_posix_clock_nosettime,
74365@@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
74366 .nsleep = no_nsleep,
74367 };
74368
74369+ pax_track_stack();
74370+
74371 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
74372 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
74373 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
74374@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
74375 return;
74376 }
74377
74378- posix_clocks[clock_id] = *new_clock;
74379+ posix_clocks[clock_id] = new_clock;
74380 }
74381 EXPORT_SYMBOL_GPL(register_posix_clock);
74382
74383@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
74384 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
74385 return -EFAULT;
74386
74387+ /* only the CLOCK_REALTIME clock can be set, all other clocks
74388+ have their clock_set fptr set to a nosettime dummy function
74389+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
74390+ call common_clock_set, which calls do_sys_settimeofday, which
74391+ we hook
74392+ */
74393+
74394 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
74395 }
74396
74397diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
74398index 04a9e90..bc355aa 100644
74399--- a/kernel/power/hibernate.c
74400+++ b/kernel/power/hibernate.c
74401@@ -48,14 +48,14 @@ enum {
74402
74403 static int hibernation_mode = HIBERNATION_SHUTDOWN;
74404
74405-static struct platform_hibernation_ops *hibernation_ops;
74406+static const struct platform_hibernation_ops *hibernation_ops;
74407
74408 /**
74409 * hibernation_set_ops - set the global hibernate operations
74410 * @ops: the hibernation operations to use in subsequent hibernation transitions
74411 */
74412
74413-void hibernation_set_ops(struct platform_hibernation_ops *ops)
74414+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
74415 {
74416 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
74417 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
74418diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
74419index e8b3370..484c2e4 100644
74420--- a/kernel/power/poweroff.c
74421+++ b/kernel/power/poweroff.c
74422@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
74423 .enable_mask = SYSRQ_ENABLE_BOOT,
74424 };
74425
74426-static int pm_sysrq_init(void)
74427+static int __init pm_sysrq_init(void)
74428 {
74429 register_sysrq_key('o', &sysrq_poweroff_op);
74430 return 0;
74431diff --git a/kernel/power/process.c b/kernel/power/process.c
74432index e7cd671..56d5f459 100644
74433--- a/kernel/power/process.c
74434+++ b/kernel/power/process.c
74435@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
74436 struct timeval start, end;
74437 u64 elapsed_csecs64;
74438 unsigned int elapsed_csecs;
74439+ bool timedout = false;
74440
74441 do_gettimeofday(&start);
74442
74443 end_time = jiffies + TIMEOUT;
74444 do {
74445 todo = 0;
74446+ if (time_after(jiffies, end_time))
74447+ timedout = true;
74448 read_lock(&tasklist_lock);
74449 do_each_thread(g, p) {
74450 if (frozen(p) || !freezeable(p))
74451@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
74452 * It is "frozen enough". If the task does wake
74453 * up, it will immediately call try_to_freeze.
74454 */
74455- if (!task_is_stopped_or_traced(p) &&
74456- !freezer_should_skip(p))
74457+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
74458 todo++;
74459+ if (timedout) {
74460+ printk(KERN_ERR "Task refusing to freeze:\n");
74461+ sched_show_task(p);
74462+ }
74463+ }
74464 } while_each_thread(g, p);
74465 read_unlock(&tasklist_lock);
74466 yield(); /* Yield is okay here */
74467- if (time_after(jiffies, end_time))
74468- break;
74469- } while (todo);
74470+ } while (todo && !timedout);
74471
74472 do_gettimeofday(&end);
74473 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
74474diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
74475index 40dd021..fb30ceb 100644
74476--- a/kernel/power/suspend.c
74477+++ b/kernel/power/suspend.c
74478@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
74479 [PM_SUSPEND_MEM] = "mem",
74480 };
74481
74482-static struct platform_suspend_ops *suspend_ops;
74483+static const struct platform_suspend_ops *suspend_ops;
74484
74485 /**
74486 * suspend_set_ops - Set the global suspend method table.
74487 * @ops: Pointer to ops structure.
74488 */
74489-void suspend_set_ops(struct platform_suspend_ops *ops)
74490+void suspend_set_ops(const struct platform_suspend_ops *ops)
74491 {
74492 mutex_lock(&pm_mutex);
74493 suspend_ops = ops;
74494diff --git a/kernel/printk.c b/kernel/printk.c
74495index 4cade47..4d17900 100644
74496--- a/kernel/printk.c
74497+++ b/kernel/printk.c
74498@@ -33,6 +33,7 @@
74499 #include <linux/bootmem.h>
74500 #include <linux/syscalls.h>
74501 #include <linux/kexec.h>
74502+#include <linux/syslog.h>
74503
74504 #include <asm/uaccess.h>
74505
74506@@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
74507 }
74508 #endif
74509
74510-/*
74511- * Commands to do_syslog:
74512- *
74513- * 0 -- Close the log. Currently a NOP.
74514- * 1 -- Open the log. Currently a NOP.
74515- * 2 -- Read from the log.
74516- * 3 -- Read all messages remaining in the ring buffer.
74517- * 4 -- Read and clear all messages remaining in the ring buffer
74518- * 5 -- Clear ring buffer.
74519- * 6 -- Disable printk's to console
74520- * 7 -- Enable printk's to console
74521- * 8 -- Set level of messages printed to console
74522- * 9 -- Return number of unread characters in the log buffer
74523- * 10 -- Return size of the log buffer
74524- */
74525-int do_syslog(int type, char __user *buf, int len)
74526+int do_syslog(int type, char __user *buf, int len, bool from_file)
74527 {
74528 unsigned i, j, limit, count;
74529 int do_clear = 0;
74530 char c;
74531 int error = 0;
74532
74533- error = security_syslog(type);
74534+#ifdef CONFIG_GRKERNSEC_DMESG
74535+ if (grsec_enable_dmesg &&
74536+ (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
74537+ !capable(CAP_SYS_ADMIN))
74538+ return -EPERM;
74539+#endif
74540+
74541+ error = security_syslog(type, from_file);
74542 if (error)
74543 return error;
74544
74545 switch (type) {
74546- case 0: /* Close log */
74547+ case SYSLOG_ACTION_CLOSE: /* Close log */
74548 break;
74549- case 1: /* Open log */
74550+ case SYSLOG_ACTION_OPEN: /* Open log */
74551 break;
74552- case 2: /* Read from log */
74553+ case SYSLOG_ACTION_READ: /* Read from log */
74554 error = -EINVAL;
74555 if (!buf || len < 0)
74556 goto out;
74557@@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
74558 if (!error)
74559 error = i;
74560 break;
74561- case 4: /* Read/clear last kernel messages */
74562+ /* Read/clear last kernel messages */
74563+ case SYSLOG_ACTION_READ_CLEAR:
74564 do_clear = 1;
74565 /* FALL THRU */
74566- case 3: /* Read last kernel messages */
74567+ /* Read last kernel messages */
74568+ case SYSLOG_ACTION_READ_ALL:
74569 error = -EINVAL;
74570 if (!buf || len < 0)
74571 goto out;
74572@@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
74573 }
74574 }
74575 break;
74576- case 5: /* Clear ring buffer */
74577+ /* Clear ring buffer */
74578+ case SYSLOG_ACTION_CLEAR:
74579 logged_chars = 0;
74580 break;
74581- case 6: /* Disable logging to console */
74582+ /* Disable logging to console */
74583+ case SYSLOG_ACTION_CONSOLE_OFF:
74584 if (saved_console_loglevel == -1)
74585 saved_console_loglevel = console_loglevel;
74586 console_loglevel = minimum_console_loglevel;
74587 break;
74588- case 7: /* Enable logging to console */
74589+ /* Enable logging to console */
74590+ case SYSLOG_ACTION_CONSOLE_ON:
74591 if (saved_console_loglevel != -1) {
74592 console_loglevel = saved_console_loglevel;
74593 saved_console_loglevel = -1;
74594 }
74595 break;
74596- case 8: /* Set level of messages printed to console */
74597+ /* Set level of messages printed to console */
74598+ case SYSLOG_ACTION_CONSOLE_LEVEL:
74599 error = -EINVAL;
74600 if (len < 1 || len > 8)
74601 goto out;
74602@@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
74603 saved_console_loglevel = -1;
74604 error = 0;
74605 break;
74606- case 9: /* Number of chars in the log buffer */
74607+ /* Number of chars in the log buffer */
74608+ case SYSLOG_ACTION_SIZE_UNREAD:
74609 error = log_end - log_start;
74610 break;
74611- case 10: /* Size of the log buffer */
74612+ /* Size of the log buffer */
74613+ case SYSLOG_ACTION_SIZE_BUFFER:
74614 error = log_buf_len;
74615 break;
74616 default:
74617@@ -415,7 +416,7 @@ out:
74618
74619 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
74620 {
74621- return do_syslog(type, buf, len);
74622+ return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
74623 }
74624
74625 /*
74626diff --git a/kernel/profile.c b/kernel/profile.c
74627index dfadc5b..7f59404 100644
74628--- a/kernel/profile.c
74629+++ b/kernel/profile.c
74630@@ -39,7 +39,7 @@ struct profile_hit {
74631 /* Oprofile timer tick hook */
74632 static int (*timer_hook)(struct pt_regs *) __read_mostly;
74633
74634-static atomic_t *prof_buffer;
74635+static atomic_unchecked_t *prof_buffer;
74636 static unsigned long prof_len, prof_shift;
74637
74638 int prof_on __read_mostly;
74639@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
74640 hits[i].pc = 0;
74641 continue;
74642 }
74643- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74644+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74645 hits[i].hits = hits[i].pc = 0;
74646 }
74647 }
74648@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74649 * Add the current hit(s) and flush the write-queue out
74650 * to the global buffer:
74651 */
74652- atomic_add(nr_hits, &prof_buffer[pc]);
74653+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
74654 for (i = 0; i < NR_PROFILE_HIT; ++i) {
74655- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74656+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74657 hits[i].pc = hits[i].hits = 0;
74658 }
74659 out:
74660@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74661 if (prof_on != type || !prof_buffer)
74662 return;
74663 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
74664- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74665+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74666 }
74667 #endif /* !CONFIG_SMP */
74668 EXPORT_SYMBOL_GPL(profile_hits);
74669@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
74670 return -EFAULT;
74671 buf++; p++; count--; read++;
74672 }
74673- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
74674+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
74675 if (copy_to_user(buf, (void *)pnt, count))
74676 return -EFAULT;
74677 read += count;
74678@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
74679 }
74680 #endif
74681 profile_discard_flip_buffers();
74682- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
74683+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
74684 return count;
74685 }
74686
74687diff --git a/kernel/ptrace.c b/kernel/ptrace.c
74688index 05625f6..733bf70 100644
74689--- a/kernel/ptrace.c
74690+++ b/kernel/ptrace.c
74691@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
74692 return ret;
74693 }
74694
74695-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74696+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
74697+ unsigned int log)
74698 {
74699 const struct cred *cred = current_cred(), *tcred;
74700
74701@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74702 cred->gid != tcred->egid ||
74703 cred->gid != tcred->sgid ||
74704 cred->gid != tcred->gid) &&
74705- !capable(CAP_SYS_PTRACE)) {
74706+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74707+ (log && !capable(CAP_SYS_PTRACE)))
74708+ ) {
74709 rcu_read_unlock();
74710 return -EPERM;
74711 }
74712@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74713 smp_rmb();
74714 if (task->mm)
74715 dumpable = get_dumpable(task->mm);
74716- if (!dumpable && !capable(CAP_SYS_PTRACE))
74717+ if (!dumpable &&
74718+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74719+ (log && !capable(CAP_SYS_PTRACE))))
74720 return -EPERM;
74721
74722 return security_ptrace_access_check(task, mode);
74723@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
74724 {
74725 int err;
74726 task_lock(task);
74727- err = __ptrace_may_access(task, mode);
74728+ err = __ptrace_may_access(task, mode, 0);
74729+ task_unlock(task);
74730+ return !err;
74731+}
74732+
74733+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
74734+{
74735+ int err;
74736+ task_lock(task);
74737+ err = __ptrace_may_access(task, mode, 1);
74738 task_unlock(task);
74739 return !err;
74740 }
74741@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
74742 goto out;
74743
74744 task_lock(task);
74745- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
74746+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
74747 task_unlock(task);
74748 if (retval)
74749 goto unlock_creds;
74750@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
74751 goto unlock_tasklist;
74752
74753 task->ptrace = PT_PTRACED;
74754- if (capable(CAP_SYS_PTRACE))
74755+ if (capable_nolog(CAP_SYS_PTRACE))
74756 task->ptrace |= PT_PTRACE_CAP;
74757
74758 __ptrace_link(task, current);
74759@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
74760 {
74761 int copied = 0;
74762
74763+ pax_track_stack();
74764+
74765 while (len > 0) {
74766 char buf[128];
74767 int this_len, retval;
74768@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
74769 {
74770 int copied = 0;
74771
74772+ pax_track_stack();
74773+
74774 while (len > 0) {
74775 char buf[128];
74776 int this_len, retval;
74777@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
74778 int ret = -EIO;
74779 siginfo_t siginfo;
74780
74781+ pax_track_stack();
74782+
74783 switch (request) {
74784 case PTRACE_PEEKTEXT:
74785 case PTRACE_PEEKDATA:
74786@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
74787 ret = ptrace_setoptions(child, data);
74788 break;
74789 case PTRACE_GETEVENTMSG:
74790- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
74791+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
74792 break;
74793
74794 case PTRACE_GETSIGINFO:
74795 ret = ptrace_getsiginfo(child, &siginfo);
74796 if (!ret)
74797- ret = copy_siginfo_to_user((siginfo_t __user *) data,
74798+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
74799 &siginfo);
74800 break;
74801
74802 case PTRACE_SETSIGINFO:
74803- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
74804+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
74805 sizeof siginfo))
74806 ret = -EFAULT;
74807 else
74808@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
74809 goto out;
74810 }
74811
74812+ if (gr_handle_ptrace(child, request)) {
74813+ ret = -EPERM;
74814+ goto out_put_task_struct;
74815+ }
74816+
74817 if (request == PTRACE_ATTACH) {
74818 ret = ptrace_attach(child);
74819 /*
74820 * Some architectures need to do book-keeping after
74821 * a ptrace attach.
74822 */
74823- if (!ret)
74824+ if (!ret) {
74825 arch_ptrace_attach(child);
74826+ gr_audit_ptrace(child);
74827+ }
74828 goto out_put_task_struct;
74829 }
74830
74831@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
74832 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
74833 if (copied != sizeof(tmp))
74834 return -EIO;
74835- return put_user(tmp, (unsigned long __user *)data);
74836+ return put_user(tmp, (__force unsigned long __user *)data);
74837 }
74838
74839 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
74840@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
74841 siginfo_t siginfo;
74842 int ret;
74843
74844+ pax_track_stack();
74845+
74846 switch (request) {
74847 case PTRACE_PEEKTEXT:
74848 case PTRACE_PEEKDATA:
74849@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
74850 goto out;
74851 }
74852
74853+ if (gr_handle_ptrace(child, request)) {
74854+ ret = -EPERM;
74855+ goto out_put_task_struct;
74856+ }
74857+
74858 if (request == PTRACE_ATTACH) {
74859 ret = ptrace_attach(child);
74860 /*
74861 * Some architectures need to do book-keeping after
74862 * a ptrace attach.
74863 */
74864- if (!ret)
74865+ if (!ret) {
74866 arch_ptrace_attach(child);
74867+ gr_audit_ptrace(child);
74868+ }
74869 goto out_put_task_struct;
74870 }
74871
74872diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
74873index 697c0a0..2402696 100644
74874--- a/kernel/rcutorture.c
74875+++ b/kernel/rcutorture.c
74876@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
74877 { 0 };
74878 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
74879 { 0 };
74880-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74881-static atomic_t n_rcu_torture_alloc;
74882-static atomic_t n_rcu_torture_alloc_fail;
74883-static atomic_t n_rcu_torture_free;
74884-static atomic_t n_rcu_torture_mberror;
74885-static atomic_t n_rcu_torture_error;
74886+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74887+static atomic_unchecked_t n_rcu_torture_alloc;
74888+static atomic_unchecked_t n_rcu_torture_alloc_fail;
74889+static atomic_unchecked_t n_rcu_torture_free;
74890+static atomic_unchecked_t n_rcu_torture_mberror;
74891+static atomic_unchecked_t n_rcu_torture_error;
74892 static long n_rcu_torture_timers;
74893 static struct list_head rcu_torture_removed;
74894 static cpumask_var_t shuffle_tmp_mask;
74895@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
74896
74897 spin_lock_bh(&rcu_torture_lock);
74898 if (list_empty(&rcu_torture_freelist)) {
74899- atomic_inc(&n_rcu_torture_alloc_fail);
74900+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
74901 spin_unlock_bh(&rcu_torture_lock);
74902 return NULL;
74903 }
74904- atomic_inc(&n_rcu_torture_alloc);
74905+ atomic_inc_unchecked(&n_rcu_torture_alloc);
74906 p = rcu_torture_freelist.next;
74907 list_del_init(p);
74908 spin_unlock_bh(&rcu_torture_lock);
74909@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
74910 static void
74911 rcu_torture_free(struct rcu_torture *p)
74912 {
74913- atomic_inc(&n_rcu_torture_free);
74914+ atomic_inc_unchecked(&n_rcu_torture_free);
74915 spin_lock_bh(&rcu_torture_lock);
74916 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
74917 spin_unlock_bh(&rcu_torture_lock);
74918@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
74919 i = rp->rtort_pipe_count;
74920 if (i > RCU_TORTURE_PIPE_LEN)
74921 i = RCU_TORTURE_PIPE_LEN;
74922- atomic_inc(&rcu_torture_wcount[i]);
74923+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
74924 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74925 rp->rtort_mbtest = 0;
74926 rcu_torture_free(rp);
74927@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
74928 i = rp->rtort_pipe_count;
74929 if (i > RCU_TORTURE_PIPE_LEN)
74930 i = RCU_TORTURE_PIPE_LEN;
74931- atomic_inc(&rcu_torture_wcount[i]);
74932+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
74933 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74934 rp->rtort_mbtest = 0;
74935 list_del(&rp->rtort_free);
74936@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
74937 i = old_rp->rtort_pipe_count;
74938 if (i > RCU_TORTURE_PIPE_LEN)
74939 i = RCU_TORTURE_PIPE_LEN;
74940- atomic_inc(&rcu_torture_wcount[i]);
74941+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
74942 old_rp->rtort_pipe_count++;
74943 cur_ops->deferred_free(old_rp);
74944 }
74945@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
74946 return;
74947 }
74948 if (p->rtort_mbtest == 0)
74949- atomic_inc(&n_rcu_torture_mberror);
74950+ atomic_inc_unchecked(&n_rcu_torture_mberror);
74951 spin_lock(&rand_lock);
74952 cur_ops->read_delay(&rand);
74953 n_rcu_torture_timers++;
74954@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
74955 continue;
74956 }
74957 if (p->rtort_mbtest == 0)
74958- atomic_inc(&n_rcu_torture_mberror);
74959+ atomic_inc_unchecked(&n_rcu_torture_mberror);
74960 cur_ops->read_delay(&rand);
74961 preempt_disable();
74962 pipe_count = p->rtort_pipe_count;
74963@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
74964 rcu_torture_current,
74965 rcu_torture_current_version,
74966 list_empty(&rcu_torture_freelist),
74967- atomic_read(&n_rcu_torture_alloc),
74968- atomic_read(&n_rcu_torture_alloc_fail),
74969- atomic_read(&n_rcu_torture_free),
74970- atomic_read(&n_rcu_torture_mberror),
74971+ atomic_read_unchecked(&n_rcu_torture_alloc),
74972+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
74973+ atomic_read_unchecked(&n_rcu_torture_free),
74974+ atomic_read_unchecked(&n_rcu_torture_mberror),
74975 n_rcu_torture_timers);
74976- if (atomic_read(&n_rcu_torture_mberror) != 0)
74977+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
74978 cnt += sprintf(&page[cnt], " !!!");
74979 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
74980 if (i > 1) {
74981 cnt += sprintf(&page[cnt], "!!! ");
74982- atomic_inc(&n_rcu_torture_error);
74983+ atomic_inc_unchecked(&n_rcu_torture_error);
74984 WARN_ON_ONCE(1);
74985 }
74986 cnt += sprintf(&page[cnt], "Reader Pipe: ");
74987@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
74988 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
74989 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
74990 cnt += sprintf(&page[cnt], " %d",
74991- atomic_read(&rcu_torture_wcount[i]));
74992+ atomic_read_unchecked(&rcu_torture_wcount[i]));
74993 }
74994 cnt += sprintf(&page[cnt], "\n");
74995 if (cur_ops->stats)
74996@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
74997
74998 if (cur_ops->cleanup)
74999 cur_ops->cleanup();
75000- if (atomic_read(&n_rcu_torture_error))
75001+ if (atomic_read_unchecked(&n_rcu_torture_error))
75002 rcu_torture_print_module_parms("End of test: FAILURE");
75003 else
75004 rcu_torture_print_module_parms("End of test: SUCCESS");
75005@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
75006
75007 rcu_torture_current = NULL;
75008 rcu_torture_current_version = 0;
75009- atomic_set(&n_rcu_torture_alloc, 0);
75010- atomic_set(&n_rcu_torture_alloc_fail, 0);
75011- atomic_set(&n_rcu_torture_free, 0);
75012- atomic_set(&n_rcu_torture_mberror, 0);
75013- atomic_set(&n_rcu_torture_error, 0);
75014+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
75015+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
75016+ atomic_set_unchecked(&n_rcu_torture_free, 0);
75017+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
75018+ atomic_set_unchecked(&n_rcu_torture_error, 0);
75019 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
75020- atomic_set(&rcu_torture_wcount[i], 0);
75021+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
75022 for_each_possible_cpu(cpu) {
75023 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75024 per_cpu(rcu_torture_count, cpu)[i] = 0;
75025diff --git a/kernel/rcutree.c b/kernel/rcutree.c
75026index 683c4f3..97f54c6 100644
75027--- a/kernel/rcutree.c
75028+++ b/kernel/rcutree.c
75029@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
75030 /*
75031 * Do softirq processing for the current CPU.
75032 */
75033-static void rcu_process_callbacks(struct softirq_action *unused)
75034+static void rcu_process_callbacks(void)
75035 {
75036 /*
75037 * Memory references from any prior RCU read-side critical sections
75038diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
75039index c03edf7..ac1b341 100644
75040--- a/kernel/rcutree_plugin.h
75041+++ b/kernel/rcutree_plugin.h
75042@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
75043 */
75044 void __rcu_read_lock(void)
75045 {
75046- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
75047+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
75048 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
75049 }
75050 EXPORT_SYMBOL_GPL(__rcu_read_lock);
75051@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
75052 struct task_struct *t = current;
75053
75054 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
75055- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
75056+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
75057 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
75058 rcu_read_unlock_special(t);
75059 }
75060diff --git a/kernel/relay.c b/kernel/relay.c
75061index 760c262..908e9ee 100644
75062--- a/kernel/relay.c
75063+++ b/kernel/relay.c
75064@@ -171,10 +171,14 @@ depopulate:
75065 */
75066 static struct rchan_buf *relay_create_buf(struct rchan *chan)
75067 {
75068- struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
75069+ struct rchan_buf *buf;
75070+
75071+ if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
75072+ return NULL;
75073+
75074+ buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
75075 if (!buf)
75076 return NULL;
75077-
75078 buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
75079 if (!buf->padding)
75080 goto free_buf;
75081@@ -581,6 +585,8 @@ struct rchan *relay_open(const char *base_filename,
75082
75083 if (!(subbuf_size && n_subbufs))
75084 return NULL;
75085+ if (subbuf_size > UINT_MAX / n_subbufs)
75086+ return NULL;
75087
75088 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
75089 if (!chan)
75090@@ -1222,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in,
75091 unsigned int flags,
75092 int *nonpad_ret)
75093 {
75094- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
75095+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
75096 struct rchan_buf *rbuf = in->private_data;
75097 unsigned int subbuf_size = rbuf->chan->subbuf_size;
75098 uint64_t pos = (uint64_t) *ppos;
75099@@ -1241,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in,
75100 .ops = &relay_pipe_buf_ops,
75101 .spd_release = relay_page_release,
75102 };
75103+ ssize_t ret;
75104+
75105+ pax_track_stack();
75106
75107 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
75108 return 0;
75109diff --git a/kernel/resource.c b/kernel/resource.c
75110index fb11a58..4e61ae1 100644
75111--- a/kernel/resource.c
75112+++ b/kernel/resource.c
75113@@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
75114
75115 static int __init ioresources_init(void)
75116 {
75117+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75118+#ifdef CONFIG_GRKERNSEC_PROC_USER
75119+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
75120+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
75121+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75122+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
75123+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
75124+#endif
75125+#else
75126 proc_create("ioports", 0, NULL, &proc_ioports_operations);
75127 proc_create("iomem", 0, NULL, &proc_iomem_operations);
75128+#endif
75129 return 0;
75130 }
75131 __initcall(ioresources_init);
75132diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
75133index a56f629..1fc4989 100644
75134--- a/kernel/rtmutex-tester.c
75135+++ b/kernel/rtmutex-tester.c
75136@@ -21,7 +21,7 @@
75137 #define MAX_RT_TEST_MUTEXES 8
75138
75139 static spinlock_t rttest_lock;
75140-static atomic_t rttest_event;
75141+static atomic_unchecked_t rttest_event;
75142
75143 struct test_thread_data {
75144 int opcode;
75145@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75146
75147 case RTTEST_LOCKCONT:
75148 td->mutexes[td->opdata] = 1;
75149- td->event = atomic_add_return(1, &rttest_event);
75150+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75151 return 0;
75152
75153 case RTTEST_RESET:
75154@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75155 return 0;
75156
75157 case RTTEST_RESETEVENT:
75158- atomic_set(&rttest_event, 0);
75159+ atomic_set_unchecked(&rttest_event, 0);
75160 return 0;
75161
75162 default:
75163@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75164 return ret;
75165
75166 td->mutexes[id] = 1;
75167- td->event = atomic_add_return(1, &rttest_event);
75168+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75169 rt_mutex_lock(&mutexes[id]);
75170- td->event = atomic_add_return(1, &rttest_event);
75171+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75172 td->mutexes[id] = 4;
75173 return 0;
75174
75175@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75176 return ret;
75177
75178 td->mutexes[id] = 1;
75179- td->event = atomic_add_return(1, &rttest_event);
75180+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75181 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
75182- td->event = atomic_add_return(1, &rttest_event);
75183+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75184 td->mutexes[id] = ret ? 0 : 4;
75185 return ret ? -EINTR : 0;
75186
75187@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75188 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
75189 return ret;
75190
75191- td->event = atomic_add_return(1, &rttest_event);
75192+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75193 rt_mutex_unlock(&mutexes[id]);
75194- td->event = atomic_add_return(1, &rttest_event);
75195+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75196 td->mutexes[id] = 0;
75197 return 0;
75198
75199@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75200 break;
75201
75202 td->mutexes[dat] = 2;
75203- td->event = atomic_add_return(1, &rttest_event);
75204+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75205 break;
75206
75207 case RTTEST_LOCKBKL:
75208@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75209 return;
75210
75211 td->mutexes[dat] = 3;
75212- td->event = atomic_add_return(1, &rttest_event);
75213+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75214 break;
75215
75216 case RTTEST_LOCKNOWAIT:
75217@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75218 return;
75219
75220 td->mutexes[dat] = 1;
75221- td->event = atomic_add_return(1, &rttest_event);
75222+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75223 return;
75224
75225 case RTTEST_LOCKBKL:
75226diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
75227index 29bd4ba..8c5de90 100644
75228--- a/kernel/rtmutex.c
75229+++ b/kernel/rtmutex.c
75230@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
75231 */
75232 spin_lock_irqsave(&pendowner->pi_lock, flags);
75233
75234- WARN_ON(!pendowner->pi_blocked_on);
75235+ BUG_ON(!pendowner->pi_blocked_on);
75236 WARN_ON(pendowner->pi_blocked_on != waiter);
75237 WARN_ON(pendowner->pi_blocked_on->lock != lock);
75238
75239diff --git a/kernel/sched.c b/kernel/sched.c
75240index 0591df8..e3af3a4 100644
75241--- a/kernel/sched.c
75242+++ b/kernel/sched.c
75243@@ -5043,7 +5043,7 @@ out:
75244 * In CONFIG_NO_HZ case, the idle load balance owner will do the
75245 * rebalancing for all the cpus for whom scheduler ticks are stopped.
75246 */
75247-static void run_rebalance_domains(struct softirq_action *h)
75248+static void run_rebalance_domains(void)
75249 {
75250 int this_cpu = smp_processor_id();
75251 struct rq *this_rq = cpu_rq(this_cpu);
75252@@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
75253 }
75254 }
75255
75256+#ifdef CONFIG_GRKERNSEC_SETXID
75257+extern void gr_delayed_cred_worker(void);
75258+static inline void gr_cred_schedule(void)
75259+{
75260+ if (unlikely(current->delayed_cred))
75261+ gr_delayed_cred_worker();
75262+}
75263+#else
75264+static inline void gr_cred_schedule(void)
75265+{
75266+}
75267+#endif
75268+
75269 /*
75270 * schedule() is the main scheduler function.
75271 */
75272@@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
75273 struct rq *rq;
75274 int cpu;
75275
75276+ pax_track_stack();
75277+
75278 need_resched:
75279 preempt_disable();
75280 cpu = smp_processor_id();
75281@@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
75282
75283 schedule_debug(prev);
75284
75285+ gr_cred_schedule();
75286+
75287 if (sched_feat(HRTICK))
75288 hrtick_clear(rq);
75289
75290@@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
75291 * Look out! "owner" is an entirely speculative pointer
75292 * access and not reliable.
75293 */
75294-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75295+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
75296 {
75297 unsigned int cpu;
75298 struct rq *rq;
75299@@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75300 * DEBUG_PAGEALLOC could have unmapped it if
75301 * the mutex owner just released it and exited.
75302 */
75303- if (probe_kernel_address(&owner->cpu, cpu))
75304+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
75305 return 0;
75306 #else
75307- cpu = owner->cpu;
75308+ cpu = task_thread_info(owner)->cpu;
75309 #endif
75310
75311 /*
75312@@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75313 /*
75314 * Is that owner really running on that cpu?
75315 */
75316- if (task_thread_info(rq->curr) != owner || need_resched())
75317+ if (rq->curr != owner || need_resched())
75318 return 0;
75319
75320 cpu_relax();
75321@@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
75322 /* convert nice value [19,-20] to rlimit style value [1,40] */
75323 int nice_rlim = 20 - nice;
75324
75325+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
75326+
75327 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
75328 capable(CAP_SYS_NICE));
75329 }
75330@@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
75331 if (nice > 19)
75332 nice = 19;
75333
75334- if (increment < 0 && !can_nice(current, nice))
75335+ if (increment < 0 && (!can_nice(current, nice) ||
75336+ gr_handle_chroot_nice()))
75337 return -EPERM;
75338
75339 retval = security_task_setnice(current, nice);
75340@@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
75341 long power;
75342 int weight;
75343
75344- WARN_ON(!sd || !sd->groups);
75345+ BUG_ON(!sd || !sd->groups);
75346
75347 if (cpu != group_first_cpu(sd->groups))
75348 return;
75349diff --git a/kernel/signal.c b/kernel/signal.c
75350index 2494827..cda80a0 100644
75351--- a/kernel/signal.c
75352+++ b/kernel/signal.c
75353@@ -41,12 +41,12 @@
75354
75355 static struct kmem_cache *sigqueue_cachep;
75356
75357-static void __user *sig_handler(struct task_struct *t, int sig)
75358+static __sighandler_t sig_handler(struct task_struct *t, int sig)
75359 {
75360 return t->sighand->action[sig - 1].sa.sa_handler;
75361 }
75362
75363-static int sig_handler_ignored(void __user *handler, int sig)
75364+static int sig_handler_ignored(__sighandler_t handler, int sig)
75365 {
75366 /* Is it explicitly or implicitly ignored? */
75367 return handler == SIG_IGN ||
75368@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
75369 static int sig_task_ignored(struct task_struct *t, int sig,
75370 int from_ancestor_ns)
75371 {
75372- void __user *handler;
75373+ __sighandler_t handler;
75374
75375 handler = sig_handler(t, sig);
75376
75377@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
75378 */
75379 user = get_uid(__task_cred(t)->user);
75380 atomic_inc(&user->sigpending);
75381+
75382+ if (!override_rlimit)
75383+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
75384 if (override_rlimit ||
75385 atomic_read(&user->sigpending) <=
75386 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
75387@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
75388
75389 int unhandled_signal(struct task_struct *tsk, int sig)
75390 {
75391- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
75392+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
75393 if (is_global_init(tsk))
75394 return 1;
75395 if (handler != SIG_IGN && handler != SIG_DFL)
75396@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
75397 }
75398 }
75399
75400+ /* allow glibc communication via tgkill to other threads in our
75401+ thread group */
75402+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
75403+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
75404+ && gr_handle_signal(t, sig))
75405+ return -EPERM;
75406+
75407 return security_task_kill(t, info, sig, 0);
75408 }
75409
75410@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75411 return send_signal(sig, info, p, 1);
75412 }
75413
75414-static int
75415+int
75416 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75417 {
75418 return send_signal(sig, info, t, 0);
75419@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75420 unsigned long int flags;
75421 int ret, blocked, ignored;
75422 struct k_sigaction *action;
75423+ int is_unhandled = 0;
75424
75425 spin_lock_irqsave(&t->sighand->siglock, flags);
75426 action = &t->sighand->action[sig-1];
75427@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75428 }
75429 if (action->sa.sa_handler == SIG_DFL)
75430 t->signal->flags &= ~SIGNAL_UNKILLABLE;
75431+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
75432+ is_unhandled = 1;
75433 ret = specific_send_sig_info(sig, info, t);
75434 spin_unlock_irqrestore(&t->sighand->siglock, flags);
75435
75436+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
75437+ normal operation */
75438+ if (is_unhandled) {
75439+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
75440+ gr_handle_crash(t, sig);
75441+ }
75442+
75443 return ret;
75444 }
75445
75446@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75447 {
75448 int ret = check_kill_permission(sig, info, p);
75449
75450- if (!ret && sig)
75451+ if (!ret && sig) {
75452 ret = do_send_sig_info(sig, info, p, true);
75453+ if (!ret)
75454+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
75455+ }
75456
75457 return ret;
75458 }
75459@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
75460 {
75461 siginfo_t info;
75462
75463+ pax_track_stack();
75464+
75465 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
75466
75467 memset(&info, 0, sizeof info);
75468@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
75469 int error = -ESRCH;
75470
75471 rcu_read_lock();
75472- p = find_task_by_vpid(pid);
75473+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75474+ /* allow glibc communication via tgkill to other threads in our
75475+ thread group */
75476+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
75477+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
75478+ p = find_task_by_vpid_unrestricted(pid);
75479+ else
75480+#endif
75481+ p = find_task_by_vpid(pid);
75482 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
75483 error = check_kill_permission(sig, info, p);
75484 /*
75485diff --git a/kernel/smp.c b/kernel/smp.c
75486index aa9cff3..631a0de 100644
75487--- a/kernel/smp.c
75488+++ b/kernel/smp.c
75489@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
75490 }
75491 EXPORT_SYMBOL(smp_call_function);
75492
75493-void ipi_call_lock(void)
75494+void ipi_call_lock(void) __acquires(call_function.lock)
75495 {
75496 spin_lock(&call_function.lock);
75497 }
75498
75499-void ipi_call_unlock(void)
75500+void ipi_call_unlock(void) __releases(call_function.lock)
75501 {
75502 spin_unlock(&call_function.lock);
75503 }
75504
75505-void ipi_call_lock_irq(void)
75506+void ipi_call_lock_irq(void) __acquires(call_function.lock)
75507 {
75508 spin_lock_irq(&call_function.lock);
75509 }
75510
75511-void ipi_call_unlock_irq(void)
75512+void ipi_call_unlock_irq(void) __releases(call_function.lock)
75513 {
75514 spin_unlock_irq(&call_function.lock);
75515 }
75516diff --git a/kernel/softirq.c b/kernel/softirq.c
75517index 04a0252..580c512 100644
75518--- a/kernel/softirq.c
75519+++ b/kernel/softirq.c
75520@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
75521
75522 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
75523
75524-char *softirq_to_name[NR_SOFTIRQS] = {
75525+const char * const softirq_to_name[NR_SOFTIRQS] = {
75526 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
75527 "TASKLET", "SCHED", "HRTIMER", "RCU"
75528 };
75529@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
75530
75531 asmlinkage void __do_softirq(void)
75532 {
75533- struct softirq_action *h;
75534+ const struct softirq_action *h;
75535 __u32 pending;
75536 int max_restart = MAX_SOFTIRQ_RESTART;
75537 int cpu;
75538@@ -233,7 +233,7 @@ restart:
75539 kstat_incr_softirqs_this_cpu(h - softirq_vec);
75540
75541 trace_softirq_entry(h, softirq_vec);
75542- h->action(h);
75543+ h->action();
75544 trace_softirq_exit(h, softirq_vec);
75545 if (unlikely(prev_count != preempt_count())) {
75546 printk(KERN_ERR "huh, entered softirq %td %s %p"
75547@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
75548 local_irq_restore(flags);
75549 }
75550
75551-void open_softirq(int nr, void (*action)(struct softirq_action *))
75552+void open_softirq(int nr, void (*action)(void))
75553 {
75554- softirq_vec[nr].action = action;
75555+ pax_open_kernel();
75556+ *(void **)&softirq_vec[nr].action = action;
75557+ pax_close_kernel();
75558 }
75559
75560 /*
75561@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
75562
75563 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
75564
75565-static void tasklet_action(struct softirq_action *a)
75566+static void tasklet_action(void)
75567 {
75568 struct tasklet_struct *list;
75569
75570@@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
75571 }
75572 }
75573
75574-static void tasklet_hi_action(struct softirq_action *a)
75575+static void tasklet_hi_action(void)
75576 {
75577 struct tasklet_struct *list;
75578
75579diff --git a/kernel/sys.c b/kernel/sys.c
75580index e9512b1..f07185f 100644
75581--- a/kernel/sys.c
75582+++ b/kernel/sys.c
75583@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
75584 error = -EACCES;
75585 goto out;
75586 }
75587+
75588+ if (gr_handle_chroot_setpriority(p, niceval)) {
75589+ error = -EACCES;
75590+ goto out;
75591+ }
75592+
75593 no_nice = security_task_setnice(p, niceval);
75594 if (no_nice) {
75595 error = no_nice;
75596@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
75597 !(user = find_user(who)))
75598 goto out_unlock; /* No processes for this user */
75599
75600- do_each_thread(g, p)
75601+ do_each_thread(g, p) {
75602 if (__task_cred(p)->uid == who)
75603 error = set_one_prio(p, niceval, error);
75604- while_each_thread(g, p);
75605+ } while_each_thread(g, p);
75606 if (who != cred->uid)
75607 free_uid(user); /* For find_user() */
75608 break;
75609@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
75610 !(user = find_user(who)))
75611 goto out_unlock; /* No processes for this user */
75612
75613- do_each_thread(g, p)
75614+ do_each_thread(g, p) {
75615 if (__task_cred(p)->uid == who) {
75616 niceval = 20 - task_nice(p);
75617 if (niceval > retval)
75618 retval = niceval;
75619 }
75620- while_each_thread(g, p);
75621+ } while_each_thread(g, p);
75622 if (who != cred->uid)
75623 free_uid(user); /* for find_user() */
75624 break;
75625@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
75626 goto error;
75627 }
75628
75629+ if (gr_check_group_change(new->gid, new->egid, -1))
75630+ goto error;
75631+
75632 if (rgid != (gid_t) -1 ||
75633 (egid != (gid_t) -1 && egid != old->gid))
75634 new->sgid = new->egid;
75635@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
75636 goto error;
75637
75638 retval = -EPERM;
75639+
75640+ if (gr_check_group_change(gid, gid, gid))
75641+ goto error;
75642+
75643 if (capable(CAP_SETGID))
75644 new->gid = new->egid = new->sgid = new->fsgid = gid;
75645 else if (gid == old->gid || gid == old->sgid)
75646@@ -559,7 +572,7 @@ error:
75647 /*
75648 * change the user struct in a credentials set to match the new UID
75649 */
75650-static int set_user(struct cred *new)
75651+int set_user(struct cred *new)
75652 {
75653 struct user_struct *new_user;
75654
75655@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
75656 if (!new_user)
75657 return -EAGAIN;
75658
75659+ /*
75660+ * We don't fail in case of NPROC limit excess here because too many
75661+ * poorly written programs don't check set*uid() return code, assuming
75662+ * it never fails if called by root. We may still enforce NPROC limit
75663+ * for programs doing set*uid()+execve() by harmlessly deferring the
75664+ * failure to the execve() stage.
75665+ */
75666 if (atomic_read(&new_user->processes) >=
75667 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
75668- new_user != INIT_USER) {
75669- free_uid(new_user);
75670- return -EAGAIN;
75671- }
75672+ new_user != INIT_USER)
75673+ current->flags |= PF_NPROC_EXCEEDED;
75674+ else
75675+ current->flags &= ~PF_NPROC_EXCEEDED;
75676
75677 free_uid(new->user);
75678 new->user = new_user;
75679@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
75680 goto error;
75681 }
75682
75683+ if (gr_check_user_change(new->uid, new->euid, -1))
75684+ goto error;
75685+
75686 if (new->uid != old->uid) {
75687 retval = set_user(new);
75688 if (retval < 0)
75689@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
75690 goto error;
75691
75692 retval = -EPERM;
75693+
75694+ if (gr_check_crash_uid(uid))
75695+ goto error;
75696+ if (gr_check_user_change(uid, uid, uid))
75697+ goto error;
75698+
75699 if (capable(CAP_SETUID)) {
75700 new->suid = new->uid = uid;
75701 if (uid != old->uid) {
75702@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
75703 goto error;
75704 }
75705
75706+ if (gr_check_user_change(ruid, euid, -1))
75707+ goto error;
75708+
75709 if (ruid != (uid_t) -1) {
75710 new->uid = ruid;
75711 if (ruid != old->uid) {
75712@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
75713 goto error;
75714 }
75715
75716+ if (gr_check_group_change(rgid, egid, -1))
75717+ goto error;
75718+
75719 if (rgid != (gid_t) -1)
75720 new->gid = rgid;
75721 if (egid != (gid_t) -1)
75722@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
75723 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
75724 goto error;
75725
75726+ if (gr_check_user_change(-1, -1, uid))
75727+ goto error;
75728+
75729 if (uid == old->uid || uid == old->euid ||
75730 uid == old->suid || uid == old->fsuid ||
75731 capable(CAP_SETUID)) {
75732@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
75733 if (gid == old->gid || gid == old->egid ||
75734 gid == old->sgid || gid == old->fsgid ||
75735 capable(CAP_SETGID)) {
75736+ if (gr_check_group_change(-1, -1, gid))
75737+ goto error;
75738+
75739 if (gid != old_fsgid) {
75740 new->fsgid = gid;
75741 goto change_okay;
75742@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
75743 error = get_dumpable(me->mm);
75744 break;
75745 case PR_SET_DUMPABLE:
75746- if (arg2 < 0 || arg2 > 1) {
75747+ if (arg2 > 1) {
75748 error = -EINVAL;
75749 break;
75750 }
75751diff --git a/kernel/sysctl.c b/kernel/sysctl.c
75752index b8bd058..ab6a76be 100644
75753--- a/kernel/sysctl.c
75754+++ b/kernel/sysctl.c
75755@@ -63,6 +63,13 @@
75756 static int deprecated_sysctl_warning(struct __sysctl_args *args);
75757
75758 #if defined(CONFIG_SYSCTL)
75759+#include <linux/grsecurity.h>
75760+#include <linux/grinternal.h>
75761+
75762+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
75763+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
75764+ const int op);
75765+extern int gr_handle_chroot_sysctl(const int op);
75766
75767 /* External variables not in a header file. */
75768 extern int C_A_D;
75769@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
75770 static int proc_taint(struct ctl_table *table, int write,
75771 void __user *buffer, size_t *lenp, loff_t *ppos);
75772 #endif
75773+extern ctl_table grsecurity_table[];
75774
75775 static struct ctl_table root_table[];
75776 static struct ctl_table_root sysctl_table_root;
75777@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
75778 int sysctl_legacy_va_layout;
75779 #endif
75780
75781+#ifdef CONFIG_PAX_SOFTMODE
75782+static ctl_table pax_table[] = {
75783+ {
75784+ .ctl_name = CTL_UNNUMBERED,
75785+ .procname = "softmode",
75786+ .data = &pax_softmode,
75787+ .maxlen = sizeof(unsigned int),
75788+ .mode = 0600,
75789+ .proc_handler = &proc_dointvec,
75790+ },
75791+
75792+ { .ctl_name = 0 }
75793+};
75794+#endif
75795+
75796 extern int prove_locking;
75797 extern int lock_stat;
75798
75799@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
75800 #endif
75801
75802 static struct ctl_table kern_table[] = {
75803+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
75804+ {
75805+ .ctl_name = CTL_UNNUMBERED,
75806+ .procname = "grsecurity",
75807+ .mode = 0500,
75808+ .child = grsecurity_table,
75809+ },
75810+#endif
75811+
75812+#ifdef CONFIG_PAX_SOFTMODE
75813+ {
75814+ .ctl_name = CTL_UNNUMBERED,
75815+ .procname = "pax",
75816+ .mode = 0500,
75817+ .child = pax_table,
75818+ },
75819+#endif
75820+
75821 {
75822 .ctl_name = CTL_UNNUMBERED,
75823 .procname = "sched_child_runs_first",
75824@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
75825 .data = &modprobe_path,
75826 .maxlen = KMOD_PATH_LEN,
75827 .mode = 0644,
75828- .proc_handler = &proc_dostring,
75829- .strategy = &sysctl_string,
75830+ .proc_handler = &proc_dostring_modpriv,
75831+ .strategy = &sysctl_string_modpriv,
75832 },
75833 {
75834 .ctl_name = CTL_UNNUMBERED,
75835@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
75836 .mode = 0644,
75837 .proc_handler = &proc_dointvec
75838 },
75839+ {
75840+ .procname = "heap_stack_gap",
75841+ .data = &sysctl_heap_stack_gap,
75842+ .maxlen = sizeof(sysctl_heap_stack_gap),
75843+ .mode = 0644,
75844+ .proc_handler = proc_doulongvec_minmax,
75845+ },
75846 #else
75847 {
75848 .ctl_name = CTL_UNNUMBERED,
75849@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
75850 return 0;
75851 }
75852
75853+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
75854+
75855 static int parse_table(int __user *name, int nlen,
75856 void __user *oldval, size_t __user *oldlenp,
75857 void __user *newval, size_t newlen,
75858@@ -1821,7 +1871,7 @@ repeat:
75859 if (n == table->ctl_name) {
75860 int error;
75861 if (table->child) {
75862- if (sysctl_perm(root, table, MAY_EXEC))
75863+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
75864 return -EPERM;
75865 name++;
75866 nlen--;
75867@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
75868 int error;
75869 int mode;
75870
75871+ if (table->parent != NULL && table->parent->procname != NULL &&
75872+ table->procname != NULL &&
75873+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
75874+ return -EACCES;
75875+ if (gr_handle_chroot_sysctl(op))
75876+ return -EACCES;
75877+ error = gr_handle_sysctl(table, op);
75878+ if (error)
75879+ return error;
75880+
75881+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75882+ if (error)
75883+ return error;
75884+
75885+ if (root->permissions)
75886+ mode = root->permissions(root, current->nsproxy, table);
75887+ else
75888+ mode = table->mode;
75889+
75890+ return test_perm(mode, op);
75891+}
75892+
75893+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
75894+{
75895+ int error;
75896+ int mode;
75897+
75898 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75899 if (error)
75900 return error;
75901@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
75902 buffer, lenp, ppos);
75903 }
75904
75905+int proc_dostring_modpriv(struct ctl_table *table, int write,
75906+ void __user *buffer, size_t *lenp, loff_t *ppos)
75907+{
75908+ if (write && !capable(CAP_SYS_MODULE))
75909+ return -EPERM;
75910+
75911+ return _proc_do_string(table->data, table->maxlen, write,
75912+ buffer, lenp, ppos);
75913+}
75914+
75915
75916 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
75917 int *valp,
75918@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
75919 vleft = table->maxlen / sizeof(unsigned long);
75920 left = *lenp;
75921
75922- for (; left && vleft--; i++, min++, max++, first=0) {
75923+ for (; left && vleft--; i++, first=0) {
75924 if (write) {
75925 while (left) {
75926 char c;
75927@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
75928 return -ENOSYS;
75929 }
75930
75931+int proc_dostring_modpriv(struct ctl_table *table, int write,
75932+ void __user *buffer, size_t *lenp, loff_t *ppos)
75933+{
75934+ return -ENOSYS;
75935+}
75936+
75937 int proc_dointvec(struct ctl_table *table, int write,
75938 void __user *buffer, size_t *lenp, loff_t *ppos)
75939 {
75940@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
75941 return 1;
75942 }
75943
75944+int sysctl_string_modpriv(struct ctl_table *table,
75945+ void __user *oldval, size_t __user *oldlenp,
75946+ void __user *newval, size_t newlen)
75947+{
75948+ if (newval && newlen && !capable(CAP_SYS_MODULE))
75949+ return -EPERM;
75950+
75951+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
75952+}
75953+
75954 /*
75955 * This function makes sure that all of the integers in the vector
75956 * are between the minimum and maximum values given in the arrays
75957@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
75958 return -ENOSYS;
75959 }
75960
75961+int sysctl_string_modpriv(struct ctl_table *table,
75962+ void __user *oldval, size_t __user *oldlenp,
75963+ void __user *newval, size_t newlen)
75964+{
75965+ return -ENOSYS;
75966+}
75967+
75968 int sysctl_intvec(struct ctl_table *table,
75969 void __user *oldval, size_t __user *oldlenp,
75970 void __user *newval, size_t newlen)
75971@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
75972 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
75973 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
75974 EXPORT_SYMBOL(proc_dostring);
75975+EXPORT_SYMBOL(proc_dostring_modpriv);
75976 EXPORT_SYMBOL(proc_doulongvec_minmax);
75977 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
75978 EXPORT_SYMBOL(register_sysctl_table);
75979@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
75980 EXPORT_SYMBOL(sysctl_jiffies);
75981 EXPORT_SYMBOL(sysctl_ms_jiffies);
75982 EXPORT_SYMBOL(sysctl_string);
75983+EXPORT_SYMBOL(sysctl_string_modpriv);
75984 EXPORT_SYMBOL(sysctl_data);
75985 EXPORT_SYMBOL(unregister_sysctl_table);
75986diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
75987index 469193c..ea3ecb2 100644
75988--- a/kernel/sysctl_check.c
75989+++ b/kernel/sysctl_check.c
75990@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
75991 } else {
75992 if ((table->strategy == sysctl_data) ||
75993 (table->strategy == sysctl_string) ||
75994+ (table->strategy == sysctl_string_modpriv) ||
75995 (table->strategy == sysctl_intvec) ||
75996 (table->strategy == sysctl_jiffies) ||
75997 (table->strategy == sysctl_ms_jiffies) ||
75998 (table->proc_handler == proc_dostring) ||
75999+ (table->proc_handler == proc_dostring_modpriv) ||
76000 (table->proc_handler == proc_dointvec) ||
76001 (table->proc_handler == proc_dointvec_minmax) ||
76002 (table->proc_handler == proc_dointvec_jiffies) ||
76003diff --git a/kernel/taskstats.c b/kernel/taskstats.c
76004index a4ef542..798bcd7 100644
76005--- a/kernel/taskstats.c
76006+++ b/kernel/taskstats.c
76007@@ -26,9 +26,12 @@
76008 #include <linux/cgroup.h>
76009 #include <linux/fs.h>
76010 #include <linux/file.h>
76011+#include <linux/grsecurity.h>
76012 #include <net/genetlink.h>
76013 #include <asm/atomic.h>
76014
76015+extern int gr_is_taskstats_denied(int pid);
76016+
76017 /*
76018 * Maximum length of a cpumask that can be specified in
76019 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
76020@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
76021 size_t size;
76022 cpumask_var_t mask;
76023
76024+ if (gr_is_taskstats_denied(current->pid))
76025+ return -EACCES;
76026+
76027 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
76028 return -ENOMEM;
76029
76030diff --git a/kernel/time.c b/kernel/time.c
76031index 33df60e..ca768bd 100644
76032--- a/kernel/time.c
76033+++ b/kernel/time.c
76034@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
76035 return error;
76036
76037 if (tz) {
76038+ /* we log in do_settimeofday called below, so don't log twice
76039+ */
76040+ if (!tv)
76041+ gr_log_timechange();
76042+
76043 /* SMP safe, global irq locking makes it work. */
76044 sys_tz = *tz;
76045 update_vsyscall_tz();
76046@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
76047 * Avoid unnecessary multiplications/divisions in the
76048 * two most common HZ cases:
76049 */
76050-unsigned int inline jiffies_to_msecs(const unsigned long j)
76051+inline unsigned int jiffies_to_msecs(const unsigned long j)
76052 {
76053 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
76054 return (MSEC_PER_SEC / HZ) * j;
76055@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
76056 }
76057 EXPORT_SYMBOL(jiffies_to_msecs);
76058
76059-unsigned int inline jiffies_to_usecs(const unsigned long j)
76060+inline unsigned int jiffies_to_usecs(const unsigned long j)
76061 {
76062 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
76063 return (USEC_PER_SEC / HZ) * j;
76064diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
76065index 57b953f..06f149f 100644
76066--- a/kernel/time/tick-broadcast.c
76067+++ b/kernel/time/tick-broadcast.c
76068@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
76069 * then clear the broadcast bit.
76070 */
76071 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
76072- int cpu = smp_processor_id();
76073+ cpu = smp_processor_id();
76074
76075 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
76076 tick_broadcast_clear_oneshot(cpu);
76077diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
76078index 4a71cff..ffb5548 100644
76079--- a/kernel/time/timekeeping.c
76080+++ b/kernel/time/timekeeping.c
76081@@ -14,6 +14,7 @@
76082 #include <linux/init.h>
76083 #include <linux/mm.h>
76084 #include <linux/sched.h>
76085+#include <linux/grsecurity.h>
76086 #include <linux/sysdev.h>
76087 #include <linux/clocksource.h>
76088 #include <linux/jiffies.h>
76089@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
76090 */
76091 struct timespec ts = xtime;
76092 timespec_add_ns(&ts, nsec);
76093- ACCESS_ONCE(xtime_cache) = ts;
76094+ ACCESS_ONCE_RW(xtime_cache) = ts;
76095 }
76096
76097 /* must hold xtime_lock */
76098@@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
76099 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
76100 return -EINVAL;
76101
76102+ gr_log_timechange();
76103+
76104 write_seqlock_irqsave(&xtime_lock, flags);
76105
76106 timekeeping_forward_now();
76107diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
76108index 54c0dda..e9095d9 100644
76109--- a/kernel/time/timer_list.c
76110+++ b/kernel/time/timer_list.c
76111@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
76112
76113 static void print_name_offset(struct seq_file *m, void *sym)
76114 {
76115+#ifdef CONFIG_GRKERNSEC_HIDESYM
76116+ SEQ_printf(m, "<%p>", NULL);
76117+#else
76118 char symname[KSYM_NAME_LEN];
76119
76120 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
76121 SEQ_printf(m, "<%p>", sym);
76122 else
76123 SEQ_printf(m, "%s", symname);
76124+#endif
76125 }
76126
76127 static void
76128@@ -112,7 +116,11 @@ next_one:
76129 static void
76130 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
76131 {
76132+#ifdef CONFIG_GRKERNSEC_HIDESYM
76133+ SEQ_printf(m, " .base: %p\n", NULL);
76134+#else
76135 SEQ_printf(m, " .base: %p\n", base);
76136+#endif
76137 SEQ_printf(m, " .index: %d\n",
76138 base->index);
76139 SEQ_printf(m, " .resolution: %Lu nsecs\n",
76140@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
76141 {
76142 struct proc_dir_entry *pe;
76143
76144+#ifdef CONFIG_GRKERNSEC_PROC_ADD
76145+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
76146+#else
76147 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
76148+#endif
76149 if (!pe)
76150 return -ENOMEM;
76151 return 0;
76152diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
76153index ee5681f..634089b 100644
76154--- a/kernel/time/timer_stats.c
76155+++ b/kernel/time/timer_stats.c
76156@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
76157 static unsigned long nr_entries;
76158 static struct entry entries[MAX_ENTRIES];
76159
76160-static atomic_t overflow_count;
76161+static atomic_unchecked_t overflow_count;
76162
76163 /*
76164 * The entries are in a hash-table, for fast lookup:
76165@@ -140,7 +140,7 @@ static void reset_entries(void)
76166 nr_entries = 0;
76167 memset(entries, 0, sizeof(entries));
76168 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
76169- atomic_set(&overflow_count, 0);
76170+ atomic_set_unchecked(&overflow_count, 0);
76171 }
76172
76173 static struct entry *alloc_entry(void)
76174@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
76175 if (likely(entry))
76176 entry->count++;
76177 else
76178- atomic_inc(&overflow_count);
76179+ atomic_inc_unchecked(&overflow_count);
76180
76181 out_unlock:
76182 spin_unlock_irqrestore(lock, flags);
76183@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
76184
76185 static void print_name_offset(struct seq_file *m, unsigned long addr)
76186 {
76187+#ifdef CONFIG_GRKERNSEC_HIDESYM
76188+ seq_printf(m, "<%p>", NULL);
76189+#else
76190 char symname[KSYM_NAME_LEN];
76191
76192 if (lookup_symbol_name(addr, symname) < 0)
76193 seq_printf(m, "<%p>", (void *)addr);
76194 else
76195 seq_printf(m, "%s", symname);
76196+#endif
76197 }
76198
76199 static int tstats_show(struct seq_file *m, void *v)
76200@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
76201
76202 seq_puts(m, "Timer Stats Version: v0.2\n");
76203 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
76204- if (atomic_read(&overflow_count))
76205+ if (atomic_read_unchecked(&overflow_count))
76206 seq_printf(m, "Overflow: %d entries\n",
76207- atomic_read(&overflow_count));
76208+ atomic_read_unchecked(&overflow_count));
76209
76210 for (i = 0; i < nr_entries; i++) {
76211 entry = entries + i;
76212@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
76213 {
76214 struct proc_dir_entry *pe;
76215
76216+#ifdef CONFIG_GRKERNSEC_PROC_ADD
76217+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
76218+#else
76219 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
76220+#endif
76221 if (!pe)
76222 return -ENOMEM;
76223 return 0;
76224diff --git a/kernel/timer.c b/kernel/timer.c
76225index cb3c1f1..8bf5526 100644
76226--- a/kernel/timer.c
76227+++ b/kernel/timer.c
76228@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
76229 /*
76230 * This function runs timers and the timer-tq in bottom half context.
76231 */
76232-static void run_timer_softirq(struct softirq_action *h)
76233+static void run_timer_softirq(void)
76234 {
76235 struct tvec_base *base = __get_cpu_var(tvec_bases);
76236
76237diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
76238index d9d6206..f19467e 100644
76239--- a/kernel/trace/blktrace.c
76240+++ b/kernel/trace/blktrace.c
76241@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
76242 struct blk_trace *bt = filp->private_data;
76243 char buf[16];
76244
76245- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
76246+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
76247
76248 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
76249 }
76250@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
76251 return 1;
76252
76253 bt = buf->chan->private_data;
76254- atomic_inc(&bt->dropped);
76255+ atomic_inc_unchecked(&bt->dropped);
76256 return 0;
76257 }
76258
76259@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
76260
76261 bt->dir = dir;
76262 bt->dev = dev;
76263- atomic_set(&bt->dropped, 0);
76264+ atomic_set_unchecked(&bt->dropped, 0);
76265
76266 ret = -EIO;
76267 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
76268diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
76269index 4872937..c794d40 100644
76270--- a/kernel/trace/ftrace.c
76271+++ b/kernel/trace/ftrace.c
76272@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
76273
76274 ip = rec->ip;
76275
76276+ ret = ftrace_arch_code_modify_prepare();
76277+ FTRACE_WARN_ON(ret);
76278+ if (ret)
76279+ return 0;
76280+
76281 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
76282+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
76283 if (ret) {
76284 ftrace_bug(ret, ip);
76285 rec->flags |= FTRACE_FL_FAILED;
76286- return 0;
76287 }
76288- return 1;
76289+ return ret ? 0 : 1;
76290 }
76291
76292 /*
76293diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
76294index e749a05..19c6e94 100644
76295--- a/kernel/trace/ring_buffer.c
76296+++ b/kernel/trace/ring_buffer.c
76297@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
76298 * the reader page). But if the next page is a header page,
76299 * its flags will be non zero.
76300 */
76301-static int inline
76302+static inline int
76303 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
76304 struct buffer_page *page, struct list_head *list)
76305 {
76306diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
76307index a2a2d1f..7f32b09 100644
76308--- a/kernel/trace/trace.c
76309+++ b/kernel/trace/trace.c
76310@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
76311 size_t rem;
76312 unsigned int i;
76313
76314+ pax_track_stack();
76315+
76316 /* copy the tracer to avoid using a global lock all around */
76317 mutex_lock(&trace_types_lock);
76318 if (unlikely(old_tracer != current_trace && current_trace)) {
76319@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
76320 int entries, size, i;
76321 size_t ret;
76322
76323+ pax_track_stack();
76324+
76325 if (*ppos & (PAGE_SIZE - 1)) {
76326 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
76327 return -EINVAL;
76328@@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
76329 };
76330 #endif
76331
76332-static struct dentry *d_tracer;
76333-
76334 struct dentry *tracing_init_dentry(void)
76335 {
76336+ static struct dentry *d_tracer;
76337 static int once;
76338
76339 if (d_tracer)
76340@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
76341 return d_tracer;
76342 }
76343
76344-static struct dentry *d_percpu;
76345-
76346 struct dentry *tracing_dentry_percpu(void)
76347 {
76348+ static struct dentry *d_percpu;
76349 static int once;
76350 struct dentry *d_tracer;
76351
76352diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
76353index d128f65..f37b4af 100644
76354--- a/kernel/trace/trace_events.c
76355+++ b/kernel/trace/trace_events.c
76356@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
76357 * Modules must own their file_operations to keep up with
76358 * reference counting.
76359 */
76360+
76361 struct ftrace_module_file_ops {
76362 struct list_head list;
76363 struct module *mod;
76364- struct file_operations id;
76365- struct file_operations enable;
76366- struct file_operations format;
76367- struct file_operations filter;
76368 };
76369
76370 static void remove_subsystem_dir(const char *name)
76371@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
76372
76373 file_ops->mod = mod;
76374
76375- file_ops->id = ftrace_event_id_fops;
76376- file_ops->id.owner = mod;
76377-
76378- file_ops->enable = ftrace_enable_fops;
76379- file_ops->enable.owner = mod;
76380-
76381- file_ops->filter = ftrace_event_filter_fops;
76382- file_ops->filter.owner = mod;
76383-
76384- file_ops->format = ftrace_event_format_fops;
76385- file_ops->format.owner = mod;
76386+ pax_open_kernel();
76387+ *(void **)&mod->trace_id.owner = mod;
76388+ *(void **)&mod->trace_enable.owner = mod;
76389+ *(void **)&mod->trace_filter.owner = mod;
76390+ *(void **)&mod->trace_format.owner = mod;
76391+ pax_close_kernel();
76392
76393 list_add(&file_ops->list, &ftrace_module_file_list);
76394
76395@@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
76396 call->mod = mod;
76397 list_add(&call->list, &ftrace_events);
76398 event_create_dir(call, d_events,
76399- &file_ops->id, &file_ops->enable,
76400- &file_ops->filter, &file_ops->format);
76401+ &mod->trace_id, &mod->trace_enable,
76402+ &mod->trace_filter, &mod->trace_format);
76403 }
76404 }
76405
76406diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
76407index 0acd834..b800b56 100644
76408--- a/kernel/trace/trace_mmiotrace.c
76409+++ b/kernel/trace/trace_mmiotrace.c
76410@@ -23,7 +23,7 @@ struct header_iter {
76411 static struct trace_array *mmio_trace_array;
76412 static bool overrun_detected;
76413 static unsigned long prev_overruns;
76414-static atomic_t dropped_count;
76415+static atomic_unchecked_t dropped_count;
76416
76417 static void mmio_reset_data(struct trace_array *tr)
76418 {
76419@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
76420
76421 static unsigned long count_overruns(struct trace_iterator *iter)
76422 {
76423- unsigned long cnt = atomic_xchg(&dropped_count, 0);
76424+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
76425 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
76426
76427 if (over > prev_overruns)
76428@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
76429 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
76430 sizeof(*entry), 0, pc);
76431 if (!event) {
76432- atomic_inc(&dropped_count);
76433+ atomic_inc_unchecked(&dropped_count);
76434 return;
76435 }
76436 entry = ring_buffer_event_data(event);
76437@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
76438 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
76439 sizeof(*entry), 0, pc);
76440 if (!event) {
76441- atomic_inc(&dropped_count);
76442+ atomic_inc_unchecked(&dropped_count);
76443 return;
76444 }
76445 entry = ring_buffer_event_data(event);
76446diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
76447index b6c12c6..41fdc53 100644
76448--- a/kernel/trace/trace_output.c
76449+++ b/kernel/trace/trace_output.c
76450@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
76451 return 0;
76452 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
76453 if (!IS_ERR(p)) {
76454- p = mangle_path(s->buffer + s->len, p, "\n");
76455+ p = mangle_path(s->buffer + s->len, p, "\n\\");
76456 if (p) {
76457 s->len = p - s->buffer;
76458 return 1;
76459diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
76460index 8504ac7..ecf0adb 100644
76461--- a/kernel/trace/trace_stack.c
76462+++ b/kernel/trace/trace_stack.c
76463@@ -50,7 +50,7 @@ static inline void check_stack(void)
76464 return;
76465
76466 /* we do not handle interrupt stacks yet */
76467- if (!object_is_on_stack(&this_size))
76468+ if (!object_starts_on_stack(&this_size))
76469 return;
76470
76471 local_irq_save(flags);
76472diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
76473index 40cafb0..d5ead43 100644
76474--- a/kernel/trace/trace_workqueue.c
76475+++ b/kernel/trace/trace_workqueue.c
76476@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
76477 int cpu;
76478 pid_t pid;
76479 /* Can be inserted from interrupt or user context, need to be atomic */
76480- atomic_t inserted;
76481+ atomic_unchecked_t inserted;
76482 /*
76483 * Don't need to be atomic, works are serialized in a single workqueue thread
76484 * on a single CPU.
76485@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
76486 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
76487 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
76488 if (node->pid == wq_thread->pid) {
76489- atomic_inc(&node->inserted);
76490+ atomic_inc_unchecked(&node->inserted);
76491 goto found;
76492 }
76493 }
76494@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
76495 tsk = get_pid_task(pid, PIDTYPE_PID);
76496 if (tsk) {
76497 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
76498- atomic_read(&cws->inserted), cws->executed,
76499+ atomic_read_unchecked(&cws->inserted), cws->executed,
76500 tsk->comm);
76501 put_task_struct(tsk);
76502 }
76503diff --git a/kernel/user.c b/kernel/user.c
76504index 1b91701..8795237 100644
76505--- a/kernel/user.c
76506+++ b/kernel/user.c
76507@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
76508 spin_lock_irq(&uidhash_lock);
76509 up = uid_hash_find(uid, hashent);
76510 if (up) {
76511+ put_user_ns(ns);
76512 key_put(new->uid_keyring);
76513 key_put(new->session_keyring);
76514 kmem_cache_free(uid_cachep, new);
76515diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
76516index 234ceb1..ad74049 100644
76517--- a/lib/Kconfig.debug
76518+++ b/lib/Kconfig.debug
76519@@ -905,7 +905,7 @@ config LATENCYTOP
76520 select STACKTRACE
76521 select SCHEDSTATS
76522 select SCHED_DEBUG
76523- depends on HAVE_LATENCYTOP_SUPPORT
76524+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
76525 help
76526 Enable this option if you want to use the LatencyTOP tool
76527 to find out which userspace is blocking on what kernel operations.
76528diff --git a/lib/bitmap.c b/lib/bitmap.c
76529index 7025658..8d14cab 100644
76530--- a/lib/bitmap.c
76531+++ b/lib/bitmap.c
76532@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
76533 {
76534 int c, old_c, totaldigits, ndigits, nchunks, nbits;
76535 u32 chunk;
76536- const char __user *ubuf = buf;
76537+ const char __user *ubuf = (const char __force_user *)buf;
76538
76539 bitmap_zero(maskp, nmaskbits);
76540
76541@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
76542 {
76543 if (!access_ok(VERIFY_READ, ubuf, ulen))
76544 return -EFAULT;
76545- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
76546+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
76547 }
76548 EXPORT_SYMBOL(bitmap_parse_user);
76549
76550diff --git a/lib/bug.c b/lib/bug.c
76551index 300e41a..2779eb0 100644
76552--- a/lib/bug.c
76553+++ b/lib/bug.c
76554@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
76555 return BUG_TRAP_TYPE_NONE;
76556
76557 bug = find_bug(bugaddr);
76558+ if (!bug)
76559+ return BUG_TRAP_TYPE_NONE;
76560
76561 printk(KERN_EMERG "------------[ cut here ]------------\n");
76562
76563diff --git a/lib/debugobjects.c b/lib/debugobjects.c
76564index 2b413db..e21d207 100644
76565--- a/lib/debugobjects.c
76566+++ b/lib/debugobjects.c
76567@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
76568 if (limit > 4)
76569 return;
76570
76571- is_on_stack = object_is_on_stack(addr);
76572+ is_on_stack = object_starts_on_stack(addr);
76573 if (is_on_stack == onstack)
76574 return;
76575
76576diff --git a/lib/devres.c b/lib/devres.c
76577index 72c8909..7543868 100644
76578--- a/lib/devres.c
76579+++ b/lib/devres.c
76580@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
76581 {
76582 iounmap(addr);
76583 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
76584- (void *)addr));
76585+ (void __force *)addr));
76586 }
76587 EXPORT_SYMBOL(devm_iounmap);
76588
76589@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
76590 {
76591 ioport_unmap(addr);
76592 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
76593- devm_ioport_map_match, (void *)addr));
76594+ devm_ioport_map_match, (void __force *)addr));
76595 }
76596 EXPORT_SYMBOL(devm_ioport_unmap);
76597
76598diff --git a/lib/dma-debug.c b/lib/dma-debug.c
76599index 084e879..0674448 100644
76600--- a/lib/dma-debug.c
76601+++ b/lib/dma-debug.c
76602@@ -861,7 +861,7 @@ out:
76603
76604 static void check_for_stack(struct device *dev, void *addr)
76605 {
76606- if (object_is_on_stack(addr))
76607+ if (object_starts_on_stack(addr))
76608 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
76609 "stack [addr=%p]\n", addr);
76610 }
76611diff --git a/lib/idr.c b/lib/idr.c
76612index eda7ba3..915dfae 100644
76613--- a/lib/idr.c
76614+++ b/lib/idr.c
76615@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
76616 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
76617
76618 /* if already at the top layer, we need to grow */
76619- if (id >= 1 << (idp->layers * IDR_BITS)) {
76620+ if (id >= (1 << (idp->layers * IDR_BITS))) {
76621 *starting_id = id;
76622 return IDR_NEED_TO_GROW;
76623 }
76624diff --git a/lib/inflate.c b/lib/inflate.c
76625index d102559..4215f31 100644
76626--- a/lib/inflate.c
76627+++ b/lib/inflate.c
76628@@ -266,7 +266,7 @@ static void free(void *where)
76629 malloc_ptr = free_mem_ptr;
76630 }
76631 #else
76632-#define malloc(a) kmalloc(a, GFP_KERNEL)
76633+#define malloc(a) kmalloc((a), GFP_KERNEL)
76634 #define free(a) kfree(a)
76635 #endif
76636
76637diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
76638index bd2bea9..6b3c95e 100644
76639--- a/lib/is_single_threaded.c
76640+++ b/lib/is_single_threaded.c
76641@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
76642 struct task_struct *p, *t;
76643 bool ret;
76644
76645+ if (!mm)
76646+ return true;
76647+
76648 if (atomic_read(&task->signal->live) != 1)
76649 return false;
76650
76651diff --git a/lib/kobject.c b/lib/kobject.c
76652index b512b74..8115eb1 100644
76653--- a/lib/kobject.c
76654+++ b/lib/kobject.c
76655@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
76656 return ret;
76657 }
76658
76659-struct sysfs_ops kobj_sysfs_ops = {
76660+const struct sysfs_ops kobj_sysfs_ops = {
76661 .show = kobj_attr_show,
76662 .store = kobj_attr_store,
76663 };
76664@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
76665 * If the kset was not able to be created, NULL will be returned.
76666 */
76667 static struct kset *kset_create(const char *name,
76668- struct kset_uevent_ops *uevent_ops,
76669+ const struct kset_uevent_ops *uevent_ops,
76670 struct kobject *parent_kobj)
76671 {
76672 struct kset *kset;
76673@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
76674 * If the kset was not able to be created, NULL will be returned.
76675 */
76676 struct kset *kset_create_and_add(const char *name,
76677- struct kset_uevent_ops *uevent_ops,
76678+ const struct kset_uevent_ops *uevent_ops,
76679 struct kobject *parent_kobj)
76680 {
76681 struct kset *kset;
76682diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
76683index 507b821..0bf8ed0 100644
76684--- a/lib/kobject_uevent.c
76685+++ b/lib/kobject_uevent.c
76686@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
76687 const char *subsystem;
76688 struct kobject *top_kobj;
76689 struct kset *kset;
76690- struct kset_uevent_ops *uevent_ops;
76691+ const struct kset_uevent_ops *uevent_ops;
76692 u64 seq;
76693 int i = 0;
76694 int retval = 0;
76695diff --git a/lib/kref.c b/lib/kref.c
76696index 9ecd6e8..12c94c1 100644
76697--- a/lib/kref.c
76698+++ b/lib/kref.c
76699@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
76700 */
76701 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
76702 {
76703- WARN_ON(release == NULL);
76704+ BUG_ON(release == NULL);
76705 WARN_ON(release == (void (*)(struct kref *))kfree);
76706
76707 if (atomic_dec_and_test(&kref->refcount)) {
76708diff --git a/lib/parser.c b/lib/parser.c
76709index b00d020..1b34325 100644
76710--- a/lib/parser.c
76711+++ b/lib/parser.c
76712@@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
76713 char *buf;
76714 int ret;
76715
76716- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
76717+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
76718 if (!buf)
76719 return -ENOMEM;
76720 memcpy(buf, s->from, s->to - s->from);
76721diff --git a/lib/radix-tree.c b/lib/radix-tree.c
76722index 92cdd99..a8149d7 100644
76723--- a/lib/radix-tree.c
76724+++ b/lib/radix-tree.c
76725@@ -81,7 +81,7 @@ struct radix_tree_preload {
76726 int nr;
76727 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
76728 };
76729-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
76730+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
76731
76732 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
76733 {
76734diff --git a/lib/random32.c b/lib/random32.c
76735index 217d5c4..45aba8a 100644
76736--- a/lib/random32.c
76737+++ b/lib/random32.c
76738@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
76739 */
76740 static inline u32 __seed(u32 x, u32 m)
76741 {
76742- return (x < m) ? x + m : x;
76743+ return (x <= m) ? x + m + 1 : x;
76744 }
76745
76746 /**
76747diff --git a/lib/vsprintf.c b/lib/vsprintf.c
76748index 33bed5e..1477e46 100644
76749--- a/lib/vsprintf.c
76750+++ b/lib/vsprintf.c
76751@@ -16,6 +16,9 @@
76752 * - scnprintf and vscnprintf
76753 */
76754
76755+#ifdef CONFIG_GRKERNSEC_HIDESYM
76756+#define __INCLUDED_BY_HIDESYM 1
76757+#endif
76758 #include <stdarg.h>
76759 #include <linux/module.h>
76760 #include <linux/types.h>
76761@@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
76762 return buf;
76763 }
76764
76765-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
76766+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
76767 {
76768 int len, i;
76769
76770 if ((unsigned long)s < PAGE_SIZE)
76771- s = "<NULL>";
76772+ s = "(null)";
76773
76774 len = strnlen(s, spec.precision);
76775
76776@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
76777 unsigned long value = (unsigned long) ptr;
76778 #ifdef CONFIG_KALLSYMS
76779 char sym[KSYM_SYMBOL_LEN];
76780- if (ext != 'f' && ext != 's')
76781+ if (ext != 'f' && ext != 's' && ext != 'a')
76782 sprint_symbol(sym, value);
76783 else
76784 kallsyms_lookup(value, NULL, NULL, NULL, sym);
76785@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
76786 * - 'f' For simple symbolic function names without offset
76787 * - 'S' For symbolic direct pointers with offset
76788 * - 's' For symbolic direct pointers without offset
76789+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
76790+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
76791 * - 'R' For a struct resource pointer, it prints the range of
76792 * addresses (not the name nor the flags)
76793 * - 'M' For a 6-byte MAC address, it prints the address in the
76794@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76795 struct printf_spec spec)
76796 {
76797 if (!ptr)
76798- return string(buf, end, "(null)", spec);
76799+ return string(buf, end, "(nil)", spec);
76800
76801 switch (*fmt) {
76802 case 'F':
76803@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76804 case 's':
76805 /* Fallthrough */
76806 case 'S':
76807+#ifdef CONFIG_GRKERNSEC_HIDESYM
76808+ break;
76809+#else
76810+ return symbol_string(buf, end, ptr, spec, *fmt);
76811+#endif
76812+ case 'a':
76813+ /* Fallthrough */
76814+ case 'A':
76815 return symbol_string(buf, end, ptr, spec, *fmt);
76816 case 'R':
76817 return resource_string(buf, end, ptr, spec);
76818@@ -1445,7 +1458,7 @@ do { \
76819 size_t len;
76820 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
76821 || (unsigned long)save_str < PAGE_SIZE)
76822- save_str = "<NULL>";
76823+ save_str = "(null)";
76824 len = strlen(save_str);
76825 if (str + len + 1 < end)
76826 memcpy(str, save_str, len + 1);
76827@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76828 typeof(type) value; \
76829 if (sizeof(type) == 8) { \
76830 args = PTR_ALIGN(args, sizeof(u32)); \
76831- *(u32 *)&value = *(u32 *)args; \
76832- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
76833+ *(u32 *)&value = *(const u32 *)args; \
76834+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
76835 } else { \
76836 args = PTR_ALIGN(args, sizeof(type)); \
76837- value = *(typeof(type) *)args; \
76838+ value = *(const typeof(type) *)args; \
76839 } \
76840 args += sizeof(type); \
76841 value; \
76842@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76843 const char *str_arg = args;
76844 size_t len = strlen(str_arg);
76845 args += len + 1;
76846- str = string(str, end, (char *)str_arg, spec);
76847+ str = string(str, end, str_arg, spec);
76848 break;
76849 }
76850
76851diff --git a/localversion-grsec b/localversion-grsec
76852new file mode 100644
76853index 0000000..7cd6065
76854--- /dev/null
76855+++ b/localversion-grsec
76856@@ -0,0 +1 @@
76857+-grsec
76858diff --git a/mm/Kconfig b/mm/Kconfig
76859index 2c19c0b..f3c3f83 100644
76860--- a/mm/Kconfig
76861+++ b/mm/Kconfig
76862@@ -228,7 +228,7 @@ config KSM
76863 config DEFAULT_MMAP_MIN_ADDR
76864 int "Low address space to protect from user allocation"
76865 depends on MMU
76866- default 4096
76867+ default 65536
76868 help
76869 This is the portion of low virtual memory which should be protected
76870 from userspace allocation. Keeping a user from writing to low pages
76871diff --git a/mm/backing-dev.c b/mm/backing-dev.c
76872index 67a33a5..094dcf1 100644
76873--- a/mm/backing-dev.c
76874+++ b/mm/backing-dev.c
76875@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
76876 list_add_tail_rcu(&wb->list, &bdi->wb_list);
76877 spin_unlock(&bdi->wb_lock);
76878
76879- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
76880+ tsk->flags |= PF_SWAPWRITE;
76881 set_freezable();
76882
76883 /*
76884@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
76885 * Add the default flusher task that gets created for any bdi
76886 * that has dirty data pending writeout
76887 */
76888-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76889+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76890 {
76891 if (!bdi_cap_writeback_dirty(bdi))
76892 return;
76893diff --git a/mm/filemap.c b/mm/filemap.c
76894index a1fe378..e26702f 100644
76895--- a/mm/filemap.c
76896+++ b/mm/filemap.c
76897@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
76898 struct address_space *mapping = file->f_mapping;
76899
76900 if (!mapping->a_ops->readpage)
76901- return -ENOEXEC;
76902+ return -ENODEV;
76903 file_accessed(file);
76904 vma->vm_ops = &generic_file_vm_ops;
76905 vma->vm_flags |= VM_CAN_NONLINEAR;
76906@@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
76907 *pos = i_size_read(inode);
76908
76909 if (limit != RLIM_INFINITY) {
76910+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
76911 if (*pos >= limit) {
76912 send_sig(SIGXFSZ, current, 0);
76913 return -EFBIG;
76914diff --git a/mm/fremap.c b/mm/fremap.c
76915index b6ec85a..a24ac22 100644
76916--- a/mm/fremap.c
76917+++ b/mm/fremap.c
76918@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76919 retry:
76920 vma = find_vma(mm, start);
76921
76922+#ifdef CONFIG_PAX_SEGMEXEC
76923+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
76924+ goto out;
76925+#endif
76926+
76927 /*
76928 * Make sure the vma is shared, that it supports prefaulting,
76929 * and that the remapped range is valid and fully within
76930@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76931 /*
76932 * drop PG_Mlocked flag for over-mapped range
76933 */
76934- unsigned int saved_flags = vma->vm_flags;
76935+ unsigned long saved_flags = vma->vm_flags;
76936 munlock_vma_pages_range(vma, start, start + size);
76937 vma->vm_flags = saved_flags;
76938 }
76939diff --git a/mm/highmem.c b/mm/highmem.c
76940index 9c1e627..5ca9447 100644
76941--- a/mm/highmem.c
76942+++ b/mm/highmem.c
76943@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
76944 * So no dangers, even with speculative execution.
76945 */
76946 page = pte_page(pkmap_page_table[i]);
76947+ pax_open_kernel();
76948 pte_clear(&init_mm, (unsigned long)page_address(page),
76949 &pkmap_page_table[i]);
76950-
76951+ pax_close_kernel();
76952 set_page_address(page, NULL);
76953 need_flush = 1;
76954 }
76955@@ -177,9 +178,11 @@ start:
76956 }
76957 }
76958 vaddr = PKMAP_ADDR(last_pkmap_nr);
76959+
76960+ pax_open_kernel();
76961 set_pte_at(&init_mm, vaddr,
76962 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
76963-
76964+ pax_close_kernel();
76965 pkmap_count[last_pkmap_nr] = 1;
76966 set_page_address(page, (void *)vaddr);
76967
76968diff --git a/mm/hugetlb.c b/mm/hugetlb.c
76969index 5e1e508..ac70275 100644
76970--- a/mm/hugetlb.c
76971+++ b/mm/hugetlb.c
76972@@ -869,6 +869,7 @@ free:
76973 list_del(&page->lru);
76974 enqueue_huge_page(h, page);
76975 }
76976+ spin_unlock(&hugetlb_lock);
76977
76978 /* Free unnecessary surplus pages to the buddy allocator */
76979 if (!list_empty(&surplus_list)) {
76980@@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
76981 return 1;
76982 }
76983
76984+#ifdef CONFIG_PAX_SEGMEXEC
76985+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
76986+{
76987+ struct mm_struct *mm = vma->vm_mm;
76988+ struct vm_area_struct *vma_m;
76989+ unsigned long address_m;
76990+ pte_t *ptep_m;
76991+
76992+ vma_m = pax_find_mirror_vma(vma);
76993+ if (!vma_m)
76994+ return;
76995+
76996+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76997+ address_m = address + SEGMEXEC_TASK_SIZE;
76998+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
76999+ get_page(page_m);
77000+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
77001+}
77002+#endif
77003+
77004 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
77005 unsigned long address, pte_t *ptep, pte_t pte,
77006 struct page *pagecache_page)
77007@@ -2004,6 +2025,11 @@ retry_avoidcopy:
77008 huge_ptep_clear_flush(vma, address, ptep);
77009 set_huge_pte_at(mm, address, ptep,
77010 make_huge_pte(vma, new_page, 1));
77011+
77012+#ifdef CONFIG_PAX_SEGMEXEC
77013+ pax_mirror_huge_pte(vma, address, new_page);
77014+#endif
77015+
77016 /* Make the old page be freed below */
77017 new_page = old_page;
77018 }
77019@@ -2135,6 +2161,10 @@ retry:
77020 && (vma->vm_flags & VM_SHARED)));
77021 set_huge_pte_at(mm, address, ptep, new_pte);
77022
77023+#ifdef CONFIG_PAX_SEGMEXEC
77024+ pax_mirror_huge_pte(vma, address, page);
77025+#endif
77026+
77027 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
77028 /* Optimization, do the COW without a second fault */
77029 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
77030@@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77031 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
77032 struct hstate *h = hstate_vma(vma);
77033
77034+#ifdef CONFIG_PAX_SEGMEXEC
77035+ struct vm_area_struct *vma_m;
77036+
77037+ vma_m = pax_find_mirror_vma(vma);
77038+ if (vma_m) {
77039+ unsigned long address_m;
77040+
77041+ if (vma->vm_start > vma_m->vm_start) {
77042+ address_m = address;
77043+ address -= SEGMEXEC_TASK_SIZE;
77044+ vma = vma_m;
77045+ h = hstate_vma(vma);
77046+ } else
77047+ address_m = address + SEGMEXEC_TASK_SIZE;
77048+
77049+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
77050+ return VM_FAULT_OOM;
77051+ address_m &= HPAGE_MASK;
77052+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
77053+ }
77054+#endif
77055+
77056 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
77057 if (!ptep)
77058 return VM_FAULT_OOM;
77059diff --git a/mm/internal.h b/mm/internal.h
77060index f03e8e2..7354343 100644
77061--- a/mm/internal.h
77062+++ b/mm/internal.h
77063@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
77064 * in mm/page_alloc.c
77065 */
77066 extern void __free_pages_bootmem(struct page *page, unsigned int order);
77067+extern void free_compound_page(struct page *page);
77068 extern void prep_compound_page(struct page *page, unsigned long order);
77069
77070
77071diff --git a/mm/kmemleak.c b/mm/kmemleak.c
77072index c346660..b47382f 100644
77073--- a/mm/kmemleak.c
77074+++ b/mm/kmemleak.c
77075@@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
77076
77077 for (i = 0; i < object->trace_len; i++) {
77078 void *ptr = (void *)object->trace[i];
77079- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
77080+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
77081 }
77082 }
77083
77084diff --git a/mm/maccess.c b/mm/maccess.c
77085index 9073695..1127f348 100644
77086--- a/mm/maccess.c
77087+++ b/mm/maccess.c
77088@@ -14,7 +14,7 @@
77089 * Safely read from address @src to the buffer at @dst. If a kernel fault
77090 * happens, handle that and return -EFAULT.
77091 */
77092-long probe_kernel_read(void *dst, void *src, size_t size)
77093+long probe_kernel_read(void *dst, const void *src, size_t size)
77094 {
77095 long ret;
77096 mm_segment_t old_fs = get_fs();
77097@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
77098 set_fs(KERNEL_DS);
77099 pagefault_disable();
77100 ret = __copy_from_user_inatomic(dst,
77101- (__force const void __user *)src, size);
77102+ (const void __force_user *)src, size);
77103 pagefault_enable();
77104 set_fs(old_fs);
77105
77106@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
77107 * Safely write to address @dst from the buffer at @src. If a kernel fault
77108 * happens, handle that and return -EFAULT.
77109 */
77110-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
77111+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
77112 {
77113 long ret;
77114 mm_segment_t old_fs = get_fs();
77115
77116 set_fs(KERNEL_DS);
77117 pagefault_disable();
77118- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
77119+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
77120 pagefault_enable();
77121 set_fs(old_fs);
77122
77123diff --git a/mm/madvise.c b/mm/madvise.c
77124index 35b1479..499f7d4 100644
77125--- a/mm/madvise.c
77126+++ b/mm/madvise.c
77127@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
77128 pgoff_t pgoff;
77129 unsigned long new_flags = vma->vm_flags;
77130
77131+#ifdef CONFIG_PAX_SEGMEXEC
77132+ struct vm_area_struct *vma_m;
77133+#endif
77134+
77135 switch (behavior) {
77136 case MADV_NORMAL:
77137 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
77138@@ -103,6 +107,13 @@ success:
77139 /*
77140 * vm_flags is protected by the mmap_sem held in write mode.
77141 */
77142+
77143+#ifdef CONFIG_PAX_SEGMEXEC
77144+ vma_m = pax_find_mirror_vma(vma);
77145+ if (vma_m)
77146+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
77147+#endif
77148+
77149 vma->vm_flags = new_flags;
77150
77151 out:
77152@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77153 struct vm_area_struct ** prev,
77154 unsigned long start, unsigned long end)
77155 {
77156+
77157+#ifdef CONFIG_PAX_SEGMEXEC
77158+ struct vm_area_struct *vma_m;
77159+#endif
77160+
77161 *prev = vma;
77162 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
77163 return -EINVAL;
77164@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77165 zap_page_range(vma, start, end - start, &details);
77166 } else
77167 zap_page_range(vma, start, end - start, NULL);
77168+
77169+#ifdef CONFIG_PAX_SEGMEXEC
77170+ vma_m = pax_find_mirror_vma(vma);
77171+ if (vma_m) {
77172+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
77173+ struct zap_details details = {
77174+ .nonlinear_vma = vma_m,
77175+ .last_index = ULONG_MAX,
77176+ };
77177+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
77178+ } else
77179+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
77180+ }
77181+#endif
77182+
77183 return 0;
77184 }
77185
77186@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
77187 if (end < start)
77188 goto out;
77189
77190+#ifdef CONFIG_PAX_SEGMEXEC
77191+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
77192+ if (end > SEGMEXEC_TASK_SIZE)
77193+ goto out;
77194+ } else
77195+#endif
77196+
77197+ if (end > TASK_SIZE)
77198+ goto out;
77199+
77200 error = 0;
77201 if (end == start)
77202 goto out;
77203diff --git a/mm/memory-failure.c b/mm/memory-failure.c
77204index 8aeba53..b4a4198 100644
77205--- a/mm/memory-failure.c
77206+++ b/mm/memory-failure.c
77207@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
77208
77209 int sysctl_memory_failure_recovery __read_mostly = 1;
77210
77211-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
77212+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
77213
77214 /*
77215 * Send all the processes who have the page mapped an ``action optional''
77216@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
77217 si.si_signo = SIGBUS;
77218 si.si_errno = 0;
77219 si.si_code = BUS_MCEERR_AO;
77220- si.si_addr = (void *)addr;
77221+ si.si_addr = (void __user *)addr;
77222 #ifdef __ARCH_SI_TRAPNO
77223 si.si_trapno = trapno;
77224 #endif
77225@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
77226 return 0;
77227 }
77228
77229- atomic_long_add(1, &mce_bad_pages);
77230+ atomic_long_add_unchecked(1, &mce_bad_pages);
77231
77232 /*
77233 * We need/can do nothing about count=0 pages.
77234diff --git a/mm/memory.c b/mm/memory.c
77235index 6c836d3..48f3264 100644
77236--- a/mm/memory.c
77237+++ b/mm/memory.c
77238@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
77239 return;
77240
77241 pmd = pmd_offset(pud, start);
77242+
77243+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
77244 pud_clear(pud);
77245 pmd_free_tlb(tlb, pmd, start);
77246+#endif
77247+
77248 }
77249
77250 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
77251@@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
77252 if (end - 1 > ceiling - 1)
77253 return;
77254
77255+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
77256 pud = pud_offset(pgd, start);
77257 pgd_clear(pgd);
77258 pud_free_tlb(tlb, pud, start);
77259+#endif
77260+
77261 }
77262
77263 /*
77264@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77265 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
77266 i = 0;
77267
77268- do {
77269+ while (nr_pages) {
77270 struct vm_area_struct *vma;
77271
77272- vma = find_extend_vma(mm, start);
77273+ vma = find_vma(mm, start);
77274 if (!vma && in_gate_area(tsk, start)) {
77275 unsigned long pg = start & PAGE_MASK;
77276 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
77277@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77278 continue;
77279 }
77280
77281- if (!vma ||
77282+ if (!vma || start < vma->vm_start ||
77283 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
77284 !(vm_flags & vma->vm_flags))
77285 return i ? : -EFAULT;
77286@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77287 start += PAGE_SIZE;
77288 nr_pages--;
77289 } while (nr_pages && start < vma->vm_end);
77290- } while (nr_pages);
77291+ }
77292 return i;
77293 }
77294
77295@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
77296 page_add_file_rmap(page);
77297 set_pte_at(mm, addr, pte, mk_pte(page, prot));
77298
77299+#ifdef CONFIG_PAX_SEGMEXEC
77300+ pax_mirror_file_pte(vma, addr, page, ptl);
77301+#endif
77302+
77303 retval = 0;
77304 pte_unmap_unlock(pte, ptl);
77305 return retval;
77306@@ -1560,10 +1571,22 @@ out:
77307 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
77308 struct page *page)
77309 {
77310+
77311+#ifdef CONFIG_PAX_SEGMEXEC
77312+ struct vm_area_struct *vma_m;
77313+#endif
77314+
77315 if (addr < vma->vm_start || addr >= vma->vm_end)
77316 return -EFAULT;
77317 if (!page_count(page))
77318 return -EINVAL;
77319+
77320+#ifdef CONFIG_PAX_SEGMEXEC
77321+ vma_m = pax_find_mirror_vma(vma);
77322+ if (vma_m)
77323+ vma_m->vm_flags |= VM_INSERTPAGE;
77324+#endif
77325+
77326 vma->vm_flags |= VM_INSERTPAGE;
77327 return insert_page(vma, addr, page, vma->vm_page_prot);
77328 }
77329@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
77330 unsigned long pfn)
77331 {
77332 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
77333+ BUG_ON(vma->vm_mirror);
77334
77335 if (addr < vma->vm_start || addr >= vma->vm_end)
77336 return -EFAULT;
77337@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
77338 copy_user_highpage(dst, src, va, vma);
77339 }
77340
77341+#ifdef CONFIG_PAX_SEGMEXEC
77342+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
77343+{
77344+ struct mm_struct *mm = vma->vm_mm;
77345+ spinlock_t *ptl;
77346+ pte_t *pte, entry;
77347+
77348+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
77349+ entry = *pte;
77350+ if (!pte_present(entry)) {
77351+ if (!pte_none(entry)) {
77352+ BUG_ON(pte_file(entry));
77353+ free_swap_and_cache(pte_to_swp_entry(entry));
77354+ pte_clear_not_present_full(mm, address, pte, 0);
77355+ }
77356+ } else {
77357+ struct page *page;
77358+
77359+ flush_cache_page(vma, address, pte_pfn(entry));
77360+ entry = ptep_clear_flush(vma, address, pte);
77361+ BUG_ON(pte_dirty(entry));
77362+ page = vm_normal_page(vma, address, entry);
77363+ if (page) {
77364+ update_hiwater_rss(mm);
77365+ if (PageAnon(page))
77366+ dec_mm_counter(mm, anon_rss);
77367+ else
77368+ dec_mm_counter(mm, file_rss);
77369+ page_remove_rmap(page);
77370+ page_cache_release(page);
77371+ }
77372+ }
77373+ pte_unmap_unlock(pte, ptl);
77374+}
77375+
77376+/* PaX: if vma is mirrored, synchronize the mirror's PTE
77377+ *
77378+ * the ptl of the lower mapped page is held on entry and is not released on exit
77379+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
77380+ */
77381+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77382+{
77383+ struct mm_struct *mm = vma->vm_mm;
77384+ unsigned long address_m;
77385+ spinlock_t *ptl_m;
77386+ struct vm_area_struct *vma_m;
77387+ pmd_t *pmd_m;
77388+ pte_t *pte_m, entry_m;
77389+
77390+ BUG_ON(!page_m || !PageAnon(page_m));
77391+
77392+ vma_m = pax_find_mirror_vma(vma);
77393+ if (!vma_m)
77394+ return;
77395+
77396+ BUG_ON(!PageLocked(page_m));
77397+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77398+ address_m = address + SEGMEXEC_TASK_SIZE;
77399+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77400+ pte_m = pte_offset_map_nested(pmd_m, address_m);
77401+ ptl_m = pte_lockptr(mm, pmd_m);
77402+ if (ptl != ptl_m) {
77403+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77404+ if (!pte_none(*pte_m))
77405+ goto out;
77406+ }
77407+
77408+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77409+ page_cache_get(page_m);
77410+ page_add_anon_rmap(page_m, vma_m, address_m);
77411+ inc_mm_counter(mm, anon_rss);
77412+ set_pte_at(mm, address_m, pte_m, entry_m);
77413+ update_mmu_cache(vma_m, address_m, entry_m);
77414+out:
77415+ if (ptl != ptl_m)
77416+ spin_unlock(ptl_m);
77417+ pte_unmap_nested(pte_m);
77418+ unlock_page(page_m);
77419+}
77420+
77421+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77422+{
77423+ struct mm_struct *mm = vma->vm_mm;
77424+ unsigned long address_m;
77425+ spinlock_t *ptl_m;
77426+ struct vm_area_struct *vma_m;
77427+ pmd_t *pmd_m;
77428+ pte_t *pte_m, entry_m;
77429+
77430+ BUG_ON(!page_m || PageAnon(page_m));
77431+
77432+ vma_m = pax_find_mirror_vma(vma);
77433+ if (!vma_m)
77434+ return;
77435+
77436+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77437+ address_m = address + SEGMEXEC_TASK_SIZE;
77438+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77439+ pte_m = pte_offset_map_nested(pmd_m, address_m);
77440+ ptl_m = pte_lockptr(mm, pmd_m);
77441+ if (ptl != ptl_m) {
77442+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77443+ if (!pte_none(*pte_m))
77444+ goto out;
77445+ }
77446+
77447+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77448+ page_cache_get(page_m);
77449+ page_add_file_rmap(page_m);
77450+ inc_mm_counter(mm, file_rss);
77451+ set_pte_at(mm, address_m, pte_m, entry_m);
77452+ update_mmu_cache(vma_m, address_m, entry_m);
77453+out:
77454+ if (ptl != ptl_m)
77455+ spin_unlock(ptl_m);
77456+ pte_unmap_nested(pte_m);
77457+}
77458+
77459+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
77460+{
77461+ struct mm_struct *mm = vma->vm_mm;
77462+ unsigned long address_m;
77463+ spinlock_t *ptl_m;
77464+ struct vm_area_struct *vma_m;
77465+ pmd_t *pmd_m;
77466+ pte_t *pte_m, entry_m;
77467+
77468+ vma_m = pax_find_mirror_vma(vma);
77469+ if (!vma_m)
77470+ return;
77471+
77472+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77473+ address_m = address + SEGMEXEC_TASK_SIZE;
77474+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77475+ pte_m = pte_offset_map_nested(pmd_m, address_m);
77476+ ptl_m = pte_lockptr(mm, pmd_m);
77477+ if (ptl != ptl_m) {
77478+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77479+ if (!pte_none(*pte_m))
77480+ goto out;
77481+ }
77482+
77483+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
77484+ set_pte_at(mm, address_m, pte_m, entry_m);
77485+out:
77486+ if (ptl != ptl_m)
77487+ spin_unlock(ptl_m);
77488+ pte_unmap_nested(pte_m);
77489+}
77490+
77491+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
77492+{
77493+ struct page *page_m;
77494+ pte_t entry;
77495+
77496+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
77497+ goto out;
77498+
77499+ entry = *pte;
77500+ page_m = vm_normal_page(vma, address, entry);
77501+ if (!page_m)
77502+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
77503+ else if (PageAnon(page_m)) {
77504+ if (pax_find_mirror_vma(vma)) {
77505+ pte_unmap_unlock(pte, ptl);
77506+ lock_page(page_m);
77507+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
77508+ if (pte_same(entry, *pte))
77509+ pax_mirror_anon_pte(vma, address, page_m, ptl);
77510+ else
77511+ unlock_page(page_m);
77512+ }
77513+ } else
77514+ pax_mirror_file_pte(vma, address, page_m, ptl);
77515+
77516+out:
77517+ pte_unmap_unlock(pte, ptl);
77518+}
77519+#endif
77520+
77521 /*
77522 * This routine handles present pages, when users try to write
77523 * to a shared page. It is done by copying the page to a new address
77524@@ -2156,6 +2360,12 @@ gotten:
77525 */
77526 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77527 if (likely(pte_same(*page_table, orig_pte))) {
77528+
77529+#ifdef CONFIG_PAX_SEGMEXEC
77530+ if (pax_find_mirror_vma(vma))
77531+ BUG_ON(!trylock_page(new_page));
77532+#endif
77533+
77534 if (old_page) {
77535 if (!PageAnon(old_page)) {
77536 dec_mm_counter(mm, file_rss);
77537@@ -2207,6 +2417,10 @@ gotten:
77538 page_remove_rmap(old_page);
77539 }
77540
77541+#ifdef CONFIG_PAX_SEGMEXEC
77542+ pax_mirror_anon_pte(vma, address, new_page, ptl);
77543+#endif
77544+
77545 /* Free the old page.. */
77546 new_page = old_page;
77547 ret |= VM_FAULT_WRITE;
77548@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77549 swap_free(entry);
77550 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
77551 try_to_free_swap(page);
77552+
77553+#ifdef CONFIG_PAX_SEGMEXEC
77554+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
77555+#endif
77556+
77557 unlock_page(page);
77558
77559 if (flags & FAULT_FLAG_WRITE) {
77560@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77561
77562 /* No need to invalidate - it was non-present before */
77563 update_mmu_cache(vma, address, pte);
77564+
77565+#ifdef CONFIG_PAX_SEGMEXEC
77566+ pax_mirror_anon_pte(vma, address, page, ptl);
77567+#endif
77568+
77569 unlock:
77570 pte_unmap_unlock(page_table, ptl);
77571 out:
77572@@ -2632,40 +2856,6 @@ out_release:
77573 }
77574
77575 /*
77576- * This is like a special single-page "expand_{down|up}wards()",
77577- * except we must first make sure that 'address{-|+}PAGE_SIZE'
77578- * doesn't hit another vma.
77579- */
77580-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
77581-{
77582- address &= PAGE_MASK;
77583- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
77584- struct vm_area_struct *prev = vma->vm_prev;
77585-
77586- /*
77587- * Is there a mapping abutting this one below?
77588- *
77589- * That's only ok if it's the same stack mapping
77590- * that has gotten split..
77591- */
77592- if (prev && prev->vm_end == address)
77593- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
77594-
77595- expand_stack(vma, address - PAGE_SIZE);
77596- }
77597- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
77598- struct vm_area_struct *next = vma->vm_next;
77599-
77600- /* As VM_GROWSDOWN but s/below/above/ */
77601- if (next && next->vm_start == address + PAGE_SIZE)
77602- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
77603-
77604- expand_upwards(vma, address + PAGE_SIZE);
77605- }
77606- return 0;
77607-}
77608-
77609-/*
77610 * We enter with non-exclusive mmap_sem (to exclude vma changes,
77611 * but allow concurrent faults), and pte mapped but not yet locked.
77612 * We return with mmap_sem still held, but pte unmapped and unlocked.
77613@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77614 unsigned long address, pte_t *page_table, pmd_t *pmd,
77615 unsigned int flags)
77616 {
77617- struct page *page;
77618+ struct page *page = NULL;
77619 spinlock_t *ptl;
77620 pte_t entry;
77621
77622- pte_unmap(page_table);
77623-
77624- /* Check if we need to add a guard page to the stack */
77625- if (check_stack_guard_page(vma, address) < 0)
77626- return VM_FAULT_SIGBUS;
77627-
77628- /* Use the zero-page for reads */
77629 if (!(flags & FAULT_FLAG_WRITE)) {
77630 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
77631 vma->vm_page_prot));
77632- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77633+ ptl = pte_lockptr(mm, pmd);
77634+ spin_lock(ptl);
77635 if (!pte_none(*page_table))
77636 goto unlock;
77637 goto setpte;
77638 }
77639
77640 /* Allocate our own private page. */
77641+ pte_unmap(page_table);
77642+
77643 if (unlikely(anon_vma_prepare(vma)))
77644 goto oom;
77645 page = alloc_zeroed_user_highpage_movable(vma, address);
77646@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77647 if (!pte_none(*page_table))
77648 goto release;
77649
77650+#ifdef CONFIG_PAX_SEGMEXEC
77651+ if (pax_find_mirror_vma(vma))
77652+ BUG_ON(!trylock_page(page));
77653+#endif
77654+
77655 inc_mm_counter(mm, anon_rss);
77656 page_add_new_anon_rmap(page, vma, address);
77657 setpte:
77658@@ -2720,6 +2911,12 @@ setpte:
77659
77660 /* No need to invalidate - it was non-present before */
77661 update_mmu_cache(vma, address, entry);
77662+
77663+#ifdef CONFIG_PAX_SEGMEXEC
77664+ if (page)
77665+ pax_mirror_anon_pte(vma, address, page, ptl);
77666+#endif
77667+
77668 unlock:
77669 pte_unmap_unlock(page_table, ptl);
77670 return 0;
77671@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77672 */
77673 /* Only go through if we didn't race with anybody else... */
77674 if (likely(pte_same(*page_table, orig_pte))) {
77675+
77676+#ifdef CONFIG_PAX_SEGMEXEC
77677+ if (anon && pax_find_mirror_vma(vma))
77678+ BUG_ON(!trylock_page(page));
77679+#endif
77680+
77681 flush_icache_page(vma, page);
77682 entry = mk_pte(page, vma->vm_page_prot);
77683 if (flags & FAULT_FLAG_WRITE)
77684@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77685
77686 /* no need to invalidate: a not-present page won't be cached */
77687 update_mmu_cache(vma, address, entry);
77688+
77689+#ifdef CONFIG_PAX_SEGMEXEC
77690+ if (anon)
77691+ pax_mirror_anon_pte(vma, address, page, ptl);
77692+ else
77693+ pax_mirror_file_pte(vma, address, page, ptl);
77694+#endif
77695+
77696 } else {
77697 if (charged)
77698 mem_cgroup_uncharge_page(page);
77699@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
77700 if (flags & FAULT_FLAG_WRITE)
77701 flush_tlb_page(vma, address);
77702 }
77703+
77704+#ifdef CONFIG_PAX_SEGMEXEC
77705+ pax_mirror_pte(vma, address, pte, pmd, ptl);
77706+ return 0;
77707+#endif
77708+
77709 unlock:
77710 pte_unmap_unlock(pte, ptl);
77711 return 0;
77712@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77713 pmd_t *pmd;
77714 pte_t *pte;
77715
77716+#ifdef CONFIG_PAX_SEGMEXEC
77717+ struct vm_area_struct *vma_m;
77718+#endif
77719+
77720 __set_current_state(TASK_RUNNING);
77721
77722 count_vm_event(PGFAULT);
77723@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77724 if (unlikely(is_vm_hugetlb_page(vma)))
77725 return hugetlb_fault(mm, vma, address, flags);
77726
77727+#ifdef CONFIG_PAX_SEGMEXEC
77728+ vma_m = pax_find_mirror_vma(vma);
77729+ if (vma_m) {
77730+ unsigned long address_m;
77731+ pgd_t *pgd_m;
77732+ pud_t *pud_m;
77733+ pmd_t *pmd_m;
77734+
77735+ if (vma->vm_start > vma_m->vm_start) {
77736+ address_m = address;
77737+ address -= SEGMEXEC_TASK_SIZE;
77738+ vma = vma_m;
77739+ } else
77740+ address_m = address + SEGMEXEC_TASK_SIZE;
77741+
77742+ pgd_m = pgd_offset(mm, address_m);
77743+ pud_m = pud_alloc(mm, pgd_m, address_m);
77744+ if (!pud_m)
77745+ return VM_FAULT_OOM;
77746+ pmd_m = pmd_alloc(mm, pud_m, address_m);
77747+ if (!pmd_m)
77748+ return VM_FAULT_OOM;
77749+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
77750+ return VM_FAULT_OOM;
77751+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
77752+ }
77753+#endif
77754+
77755 pgd = pgd_offset(mm, address);
77756 pud = pud_alloc(mm, pgd, address);
77757 if (!pud)
77758@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
77759 gate_vma.vm_start = FIXADDR_USER_START;
77760 gate_vma.vm_end = FIXADDR_USER_END;
77761 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
77762- gate_vma.vm_page_prot = __P101;
77763+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
77764 /*
77765 * Make sure the vDSO gets into every core dump.
77766 * Dumping its contents makes post-mortem fully interpretable later
77767diff --git a/mm/mempolicy.c b/mm/mempolicy.c
77768index 3c6e3e2..b1ddbb8 100644
77769--- a/mm/mempolicy.c
77770+++ b/mm/mempolicy.c
77771@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77772 struct vm_area_struct *next;
77773 int err;
77774
77775+#ifdef CONFIG_PAX_SEGMEXEC
77776+ struct vm_area_struct *vma_m;
77777+#endif
77778+
77779 err = 0;
77780 for (; vma && vma->vm_start < end; vma = next) {
77781 next = vma->vm_next;
77782@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77783 err = policy_vma(vma, new);
77784 if (err)
77785 break;
77786+
77787+#ifdef CONFIG_PAX_SEGMEXEC
77788+ vma_m = pax_find_mirror_vma(vma);
77789+ if (vma_m) {
77790+ err = policy_vma(vma_m, new);
77791+ if (err)
77792+ break;
77793+ }
77794+#endif
77795+
77796 }
77797 return err;
77798 }
77799@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
77800
77801 if (end < start)
77802 return -EINVAL;
77803+
77804+#ifdef CONFIG_PAX_SEGMEXEC
77805+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
77806+ if (end > SEGMEXEC_TASK_SIZE)
77807+ return -EINVAL;
77808+ } else
77809+#endif
77810+
77811+ if (end > TASK_SIZE)
77812+ return -EINVAL;
77813+
77814 if (end == start)
77815 return 0;
77816
77817@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77818 if (!mm)
77819 return -EINVAL;
77820
77821+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77822+ if (mm != current->mm &&
77823+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77824+ err = -EPERM;
77825+ goto out;
77826+ }
77827+#endif
77828+
77829 /*
77830 * Check if this process has the right to modify the specified
77831 * process. The right exists if the process has administrative
77832@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77833 rcu_read_lock();
77834 tcred = __task_cred(task);
77835 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77836- cred->uid != tcred->suid && cred->uid != tcred->uid &&
77837- !capable(CAP_SYS_NICE)) {
77838+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77839 rcu_read_unlock();
77840 err = -EPERM;
77841 goto out;
77842@@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
77843 }
77844 #endif
77845
77846+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77847+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
77848+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
77849+ _mm->pax_flags & MF_PAX_SEGMEXEC))
77850+#endif
77851+
77852 /*
77853 * Display pages allocated per node and memory policy via /proc.
77854 */
77855@@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
77856 int n;
77857 char buffer[50];
77858
77859+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77860+ if (current->exec_id != m->exec_id) {
77861+ gr_log_badprocpid("numa_maps");
77862+ return 0;
77863+ }
77864+#endif
77865+
77866 if (!mm)
77867 return 0;
77868
77869@@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
77870 mpol_to_str(buffer, sizeof(buffer), pol, 0);
77871 mpol_cond_put(pol);
77872
77873+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77874+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
77875+#else
77876 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
77877+#endif
77878
77879 if (file) {
77880 seq_printf(m, " file=");
77881- seq_path(m, &file->f_path, "\n\t= ");
77882+ seq_path(m, &file->f_path, "\n\t\\= ");
77883 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
77884 seq_printf(m, " heap");
77885 } else if (vma->vm_start <= mm->start_stack &&
77886diff --git a/mm/migrate.c b/mm/migrate.c
77887index aaca868..2ebecdc 100644
77888--- a/mm/migrate.c
77889+++ b/mm/migrate.c
77890@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
77891 unsigned long chunk_start;
77892 int err;
77893
77894+ pax_track_stack();
77895+
77896 task_nodes = cpuset_mems_allowed(task);
77897
77898 err = -ENOMEM;
77899@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77900 if (!mm)
77901 return -EINVAL;
77902
77903+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77904+ if (mm != current->mm &&
77905+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77906+ err = -EPERM;
77907+ goto out;
77908+ }
77909+#endif
77910+
77911 /*
77912 * Check if this process has the right to modify the specified
77913 * process. The right exists if the process has administrative
77914@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77915 rcu_read_lock();
77916 tcred = __task_cred(task);
77917 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77918- cred->uid != tcred->suid && cred->uid != tcred->uid &&
77919- !capable(CAP_SYS_NICE)) {
77920+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77921 rcu_read_unlock();
77922 err = -EPERM;
77923 goto out;
77924diff --git a/mm/mlock.c b/mm/mlock.c
77925index 2d846cf..98134d2 100644
77926--- a/mm/mlock.c
77927+++ b/mm/mlock.c
77928@@ -13,6 +13,7 @@
77929 #include <linux/pagemap.h>
77930 #include <linux/mempolicy.h>
77931 #include <linux/syscalls.h>
77932+#include <linux/security.h>
77933 #include <linux/sched.h>
77934 #include <linux/module.h>
77935 #include <linux/rmap.h>
77936@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
77937 }
77938 }
77939
77940-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
77941-{
77942- return (vma->vm_flags & VM_GROWSDOWN) &&
77943- (vma->vm_start == addr) &&
77944- !vma_stack_continue(vma->vm_prev, addr);
77945-}
77946-
77947 /**
77948 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
77949 * @vma: target vma
77950@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
77951 if (vma->vm_flags & VM_WRITE)
77952 gup_flags |= FOLL_WRITE;
77953
77954- /* We don't try to access the guard page of a stack vma */
77955- if (stack_guard_page(vma, start)) {
77956- addr += PAGE_SIZE;
77957- nr_pages--;
77958- }
77959-
77960 while (nr_pages > 0) {
77961 int i;
77962
77963@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
77964 {
77965 unsigned long nstart, end, tmp;
77966 struct vm_area_struct * vma, * prev;
77967- int error;
77968+ int error = -EINVAL;
77969
77970 len = PAGE_ALIGN(len);
77971 end = start + len;
77972@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
77973 return -EINVAL;
77974 if (end == start)
77975 return 0;
77976+ if (end > TASK_SIZE)
77977+ return -EINVAL;
77978+
77979 vma = find_vma_prev(current->mm, start, &prev);
77980 if (!vma || vma->vm_start > start)
77981 return -ENOMEM;
77982@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
77983 for (nstart = start ; ; ) {
77984 unsigned int newflags;
77985
77986+#ifdef CONFIG_PAX_SEGMEXEC
77987+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
77988+ break;
77989+#endif
77990+
77991 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
77992
77993 newflags = vma->vm_flags | VM_LOCKED;
77994@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
77995 lock_limit >>= PAGE_SHIFT;
77996
77997 /* check against resource limits */
77998+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
77999 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
78000 error = do_mlock(start, len, 1);
78001 up_write(&current->mm->mmap_sem);
78002@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
78003 static int do_mlockall(int flags)
78004 {
78005 struct vm_area_struct * vma, * prev = NULL;
78006- unsigned int def_flags = 0;
78007
78008 if (flags & MCL_FUTURE)
78009- def_flags = VM_LOCKED;
78010- current->mm->def_flags = def_flags;
78011+ current->mm->def_flags |= VM_LOCKED;
78012+ else
78013+ current->mm->def_flags &= ~VM_LOCKED;
78014 if (flags == MCL_FUTURE)
78015 goto out;
78016
78017 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
78018- unsigned int newflags;
78019+ unsigned long newflags;
78020
78021+#ifdef CONFIG_PAX_SEGMEXEC
78022+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
78023+ break;
78024+#endif
78025+
78026+ BUG_ON(vma->vm_end > TASK_SIZE);
78027 newflags = vma->vm_flags | VM_LOCKED;
78028 if (!(flags & MCL_CURRENT))
78029 newflags &= ~VM_LOCKED;
78030@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
78031 lock_limit >>= PAGE_SHIFT;
78032
78033 ret = -ENOMEM;
78034+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
78035 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
78036 capable(CAP_IPC_LOCK))
78037 ret = do_mlockall(flags);
78038diff --git a/mm/mmap.c b/mm/mmap.c
78039index 4b80cbf..cd3731c 100644
78040--- a/mm/mmap.c
78041+++ b/mm/mmap.c
78042@@ -45,6 +45,16 @@
78043 #define arch_rebalance_pgtables(addr, len) (addr)
78044 #endif
78045
78046+static inline void verify_mm_writelocked(struct mm_struct *mm)
78047+{
78048+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
78049+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
78050+ up_read(&mm->mmap_sem);
78051+ BUG();
78052+ }
78053+#endif
78054+}
78055+
78056 static void unmap_region(struct mm_struct *mm,
78057 struct vm_area_struct *vma, struct vm_area_struct *prev,
78058 unsigned long start, unsigned long end);
78059@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
78060 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
78061 *
78062 */
78063-pgprot_t protection_map[16] = {
78064+pgprot_t protection_map[16] __read_only = {
78065 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
78066 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
78067 };
78068
78069 pgprot_t vm_get_page_prot(unsigned long vm_flags)
78070 {
78071- return __pgprot(pgprot_val(protection_map[vm_flags &
78072+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
78073 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
78074 pgprot_val(arch_vm_get_page_prot(vm_flags)));
78075+
78076+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78077+ if (!nx_enabled &&
78078+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
78079+ (vm_flags & (VM_READ | VM_WRITE)))
78080+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
78081+#endif
78082+
78083+ return prot;
78084 }
78085 EXPORT_SYMBOL(vm_get_page_prot);
78086
78087 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
78088 int sysctl_overcommit_ratio = 50; /* default is 50% */
78089 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
78090+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
78091 struct percpu_counter vm_committed_as;
78092
78093 /*
78094@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
78095 struct vm_area_struct *next = vma->vm_next;
78096
78097 might_sleep();
78098+ BUG_ON(vma->vm_mirror);
78099 if (vma->vm_ops && vma->vm_ops->close)
78100 vma->vm_ops->close(vma);
78101 if (vma->vm_file) {
78102@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
78103 * not page aligned -Ram Gupta
78104 */
78105 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
78106+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
78107 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
78108 (mm->end_data - mm->start_data) > rlim)
78109 goto out;
78110@@ -704,6 +726,12 @@ static int
78111 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
78112 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78113 {
78114+
78115+#ifdef CONFIG_PAX_SEGMEXEC
78116+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
78117+ return 0;
78118+#endif
78119+
78120 if (is_mergeable_vma(vma, file, vm_flags) &&
78121 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78122 if (vma->vm_pgoff == vm_pgoff)
78123@@ -723,6 +751,12 @@ static int
78124 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78125 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78126 {
78127+
78128+#ifdef CONFIG_PAX_SEGMEXEC
78129+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
78130+ return 0;
78131+#endif
78132+
78133 if (is_mergeable_vma(vma, file, vm_flags) &&
78134 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78135 pgoff_t vm_pglen;
78136@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78137 struct vm_area_struct *vma_merge(struct mm_struct *mm,
78138 struct vm_area_struct *prev, unsigned long addr,
78139 unsigned long end, unsigned long vm_flags,
78140- struct anon_vma *anon_vma, struct file *file,
78141+ struct anon_vma *anon_vma, struct file *file,
78142 pgoff_t pgoff, struct mempolicy *policy)
78143 {
78144 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
78145 struct vm_area_struct *area, *next;
78146
78147+#ifdef CONFIG_PAX_SEGMEXEC
78148+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
78149+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
78150+
78151+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
78152+#endif
78153+
78154 /*
78155 * We later require that vma->vm_flags == vm_flags,
78156 * so this tests vma->vm_flags & VM_SPECIAL, too.
78157@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78158 if (next && next->vm_end == end) /* cases 6, 7, 8 */
78159 next = next->vm_next;
78160
78161+#ifdef CONFIG_PAX_SEGMEXEC
78162+ if (prev)
78163+ prev_m = pax_find_mirror_vma(prev);
78164+ if (area)
78165+ area_m = pax_find_mirror_vma(area);
78166+ if (next)
78167+ next_m = pax_find_mirror_vma(next);
78168+#endif
78169+
78170 /*
78171 * Can it merge with the predecessor?
78172 */
78173@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78174 /* cases 1, 6 */
78175 vma_adjust(prev, prev->vm_start,
78176 next->vm_end, prev->vm_pgoff, NULL);
78177- } else /* cases 2, 5, 7 */
78178+
78179+#ifdef CONFIG_PAX_SEGMEXEC
78180+ if (prev_m)
78181+ vma_adjust(prev_m, prev_m->vm_start,
78182+ next_m->vm_end, prev_m->vm_pgoff, NULL);
78183+#endif
78184+
78185+ } else { /* cases 2, 5, 7 */
78186 vma_adjust(prev, prev->vm_start,
78187 end, prev->vm_pgoff, NULL);
78188+
78189+#ifdef CONFIG_PAX_SEGMEXEC
78190+ if (prev_m)
78191+ vma_adjust(prev_m, prev_m->vm_start,
78192+ end_m, prev_m->vm_pgoff, NULL);
78193+#endif
78194+
78195+ }
78196 return prev;
78197 }
78198
78199@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78200 mpol_equal(policy, vma_policy(next)) &&
78201 can_vma_merge_before(next, vm_flags,
78202 anon_vma, file, pgoff+pglen)) {
78203- if (prev && addr < prev->vm_end) /* case 4 */
78204+ if (prev && addr < prev->vm_end) { /* case 4 */
78205 vma_adjust(prev, prev->vm_start,
78206 addr, prev->vm_pgoff, NULL);
78207- else /* cases 3, 8 */
78208+
78209+#ifdef CONFIG_PAX_SEGMEXEC
78210+ if (prev_m)
78211+ vma_adjust(prev_m, prev_m->vm_start,
78212+ addr_m, prev_m->vm_pgoff, NULL);
78213+#endif
78214+
78215+ } else { /* cases 3, 8 */
78216 vma_adjust(area, addr, next->vm_end,
78217 next->vm_pgoff - pglen, NULL);
78218+
78219+#ifdef CONFIG_PAX_SEGMEXEC
78220+ if (area_m)
78221+ vma_adjust(area_m, addr_m, next_m->vm_end,
78222+ next_m->vm_pgoff - pglen, NULL);
78223+#endif
78224+
78225+ }
78226 return area;
78227 }
78228
78229@@ -898,14 +978,11 @@ none:
78230 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
78231 struct file *file, long pages)
78232 {
78233- const unsigned long stack_flags
78234- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
78235-
78236 if (file) {
78237 mm->shared_vm += pages;
78238 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
78239 mm->exec_vm += pages;
78240- } else if (flags & stack_flags)
78241+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
78242 mm->stack_vm += pages;
78243 if (flags & (VM_RESERVED|VM_IO))
78244 mm->reserved_vm += pages;
78245@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78246 * (the exception is when the underlying filesystem is noexec
78247 * mounted, in which case we dont add PROT_EXEC.)
78248 */
78249- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
78250+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
78251 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
78252 prot |= PROT_EXEC;
78253
78254@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78255 /* Obtain the address to map to. we verify (or select) it and ensure
78256 * that it represents a valid section of the address space.
78257 */
78258- addr = get_unmapped_area(file, addr, len, pgoff, flags);
78259+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
78260 if (addr & ~PAGE_MASK)
78261 return addr;
78262
78263@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78264 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
78265 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
78266
78267+#ifdef CONFIG_PAX_MPROTECT
78268+ if (mm->pax_flags & MF_PAX_MPROTECT) {
78269+#ifndef CONFIG_PAX_MPROTECT_COMPAT
78270+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
78271+ gr_log_rwxmmap(file);
78272+
78273+#ifdef CONFIG_PAX_EMUPLT
78274+ vm_flags &= ~VM_EXEC;
78275+#else
78276+ return -EPERM;
78277+#endif
78278+
78279+ }
78280+
78281+ if (!(vm_flags & VM_EXEC))
78282+ vm_flags &= ~VM_MAYEXEC;
78283+#else
78284+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
78285+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
78286+#endif
78287+ else
78288+ vm_flags &= ~VM_MAYWRITE;
78289+ }
78290+#endif
78291+
78292+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78293+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
78294+ vm_flags &= ~VM_PAGEEXEC;
78295+#endif
78296+
78297 if (flags & MAP_LOCKED)
78298 if (!can_do_mlock())
78299 return -EPERM;
78300@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78301 locked += mm->locked_vm;
78302 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
78303 lock_limit >>= PAGE_SHIFT;
78304+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78305 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
78306 return -EAGAIN;
78307 }
78308@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78309 if (error)
78310 return error;
78311
78312+ if (!gr_acl_handle_mmap(file, prot))
78313+ return -EACCES;
78314+
78315 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
78316 }
78317 EXPORT_SYMBOL(do_mmap_pgoff);
78318@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
78319 */
78320 int vma_wants_writenotify(struct vm_area_struct *vma)
78321 {
78322- unsigned int vm_flags = vma->vm_flags;
78323+ unsigned long vm_flags = vma->vm_flags;
78324
78325 /* If it was private or non-writable, the write bit is already clear */
78326- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
78327+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
78328 return 0;
78329
78330 /* The backer wishes to know when pages are first written to? */
78331@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
78332 unsigned long charged = 0;
78333 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
78334
78335+#ifdef CONFIG_PAX_SEGMEXEC
78336+ struct vm_area_struct *vma_m = NULL;
78337+#endif
78338+
78339+ /*
78340+ * mm->mmap_sem is required to protect against another thread
78341+ * changing the mappings in case we sleep.
78342+ */
78343+ verify_mm_writelocked(mm);
78344+
78345 /* Clear old maps */
78346 error = -ENOMEM;
78347-munmap_back:
78348 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78349 if (vma && vma->vm_start < addr + len) {
78350 if (do_munmap(mm, addr, len))
78351 return -ENOMEM;
78352- goto munmap_back;
78353+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78354+ BUG_ON(vma && vma->vm_start < addr + len);
78355 }
78356
78357 /* Check against address space limit. */
78358@@ -1173,6 +1294,16 @@ munmap_back:
78359 goto unacct_error;
78360 }
78361
78362+#ifdef CONFIG_PAX_SEGMEXEC
78363+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
78364+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78365+ if (!vma_m) {
78366+ error = -ENOMEM;
78367+ goto free_vma;
78368+ }
78369+ }
78370+#endif
78371+
78372 vma->vm_mm = mm;
78373 vma->vm_start = addr;
78374 vma->vm_end = addr + len;
78375@@ -1195,6 +1326,19 @@ munmap_back:
78376 error = file->f_op->mmap(file, vma);
78377 if (error)
78378 goto unmap_and_free_vma;
78379+
78380+#ifdef CONFIG_PAX_SEGMEXEC
78381+ if (vma_m && (vm_flags & VM_EXECUTABLE))
78382+ added_exe_file_vma(mm);
78383+#endif
78384+
78385+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78386+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
78387+ vma->vm_flags |= VM_PAGEEXEC;
78388+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78389+ }
78390+#endif
78391+
78392 if (vm_flags & VM_EXECUTABLE)
78393 added_exe_file_vma(mm);
78394
78395@@ -1218,6 +1362,11 @@ munmap_back:
78396 vma_link(mm, vma, prev, rb_link, rb_parent);
78397 file = vma->vm_file;
78398
78399+#ifdef CONFIG_PAX_SEGMEXEC
78400+ if (vma_m)
78401+ pax_mirror_vma(vma_m, vma);
78402+#endif
78403+
78404 /* Once vma denies write, undo our temporary denial count */
78405 if (correct_wcount)
78406 atomic_inc(&inode->i_writecount);
78407@@ -1226,6 +1375,7 @@ out:
78408
78409 mm->total_vm += len >> PAGE_SHIFT;
78410 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
78411+ track_exec_limit(mm, addr, addr + len, vm_flags);
78412 if (vm_flags & VM_LOCKED) {
78413 /*
78414 * makes pages present; downgrades, drops, reacquires mmap_sem
78415@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
78416 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
78417 charged = 0;
78418 free_vma:
78419+
78420+#ifdef CONFIG_PAX_SEGMEXEC
78421+ if (vma_m)
78422+ kmem_cache_free(vm_area_cachep, vma_m);
78423+#endif
78424+
78425 kmem_cache_free(vm_area_cachep, vma);
78426 unacct_error:
78427 if (charged)
78428@@ -1255,6 +1411,44 @@ unacct_error:
78429 return error;
78430 }
78431
78432+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
78433+{
78434+ if (!vma) {
78435+#ifdef CONFIG_STACK_GROWSUP
78436+ if (addr > sysctl_heap_stack_gap)
78437+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
78438+ else
78439+ vma = find_vma(current->mm, 0);
78440+ if (vma && (vma->vm_flags & VM_GROWSUP))
78441+ return false;
78442+#endif
78443+ return true;
78444+ }
78445+
78446+ if (addr + len > vma->vm_start)
78447+ return false;
78448+
78449+ if (vma->vm_flags & VM_GROWSDOWN)
78450+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
78451+#ifdef CONFIG_STACK_GROWSUP
78452+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
78453+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
78454+#endif
78455+
78456+ return true;
78457+}
78458+
78459+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
78460+{
78461+ if (vma->vm_start < len)
78462+ return -ENOMEM;
78463+ if (!(vma->vm_flags & VM_GROWSDOWN))
78464+ return vma->vm_start - len;
78465+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
78466+ return vma->vm_start - len - sysctl_heap_stack_gap;
78467+ return -ENOMEM;
78468+}
78469+
78470 /* Get an address range which is currently unmapped.
78471 * For shmat() with addr=0.
78472 *
78473@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
78474 if (flags & MAP_FIXED)
78475 return addr;
78476
78477+#ifdef CONFIG_PAX_RANDMMAP
78478+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78479+#endif
78480+
78481 if (addr) {
78482 addr = PAGE_ALIGN(addr);
78483- vma = find_vma(mm, addr);
78484- if (TASK_SIZE - len >= addr &&
78485- (!vma || addr + len <= vma->vm_start))
78486- return addr;
78487+ if (TASK_SIZE - len >= addr) {
78488+ vma = find_vma(mm, addr);
78489+ if (check_heap_stack_gap(vma, addr, len))
78490+ return addr;
78491+ }
78492 }
78493 if (len > mm->cached_hole_size) {
78494- start_addr = addr = mm->free_area_cache;
78495+ start_addr = addr = mm->free_area_cache;
78496 } else {
78497- start_addr = addr = TASK_UNMAPPED_BASE;
78498- mm->cached_hole_size = 0;
78499+ start_addr = addr = mm->mmap_base;
78500+ mm->cached_hole_size = 0;
78501 }
78502
78503 full_search:
78504@@ -1303,34 +1502,40 @@ full_search:
78505 * Start a new search - just in case we missed
78506 * some holes.
78507 */
78508- if (start_addr != TASK_UNMAPPED_BASE) {
78509- addr = TASK_UNMAPPED_BASE;
78510- start_addr = addr;
78511+ if (start_addr != mm->mmap_base) {
78512+ start_addr = addr = mm->mmap_base;
78513 mm->cached_hole_size = 0;
78514 goto full_search;
78515 }
78516 return -ENOMEM;
78517 }
78518- if (!vma || addr + len <= vma->vm_start) {
78519- /*
78520- * Remember the place where we stopped the search:
78521- */
78522- mm->free_area_cache = addr + len;
78523- return addr;
78524- }
78525+ if (check_heap_stack_gap(vma, addr, len))
78526+ break;
78527 if (addr + mm->cached_hole_size < vma->vm_start)
78528 mm->cached_hole_size = vma->vm_start - addr;
78529 addr = vma->vm_end;
78530 }
78531+
78532+ /*
78533+ * Remember the place where we stopped the search:
78534+ */
78535+ mm->free_area_cache = addr + len;
78536+ return addr;
78537 }
78538 #endif
78539
78540 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
78541 {
78542+
78543+#ifdef CONFIG_PAX_SEGMEXEC
78544+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78545+ return;
78546+#endif
78547+
78548 /*
78549 * Is this a new hole at the lowest possible address?
78550 */
78551- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
78552+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
78553 mm->free_area_cache = addr;
78554 mm->cached_hole_size = ~0UL;
78555 }
78556@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78557 {
78558 struct vm_area_struct *vma;
78559 struct mm_struct *mm = current->mm;
78560- unsigned long addr = addr0;
78561+ unsigned long base = mm->mmap_base, addr = addr0;
78562
78563 /* requested length too big for entire address space */
78564 if (len > TASK_SIZE)
78565@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78566 if (flags & MAP_FIXED)
78567 return addr;
78568
78569+#ifdef CONFIG_PAX_RANDMMAP
78570+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78571+#endif
78572+
78573 /* requesting a specific address */
78574 if (addr) {
78575 addr = PAGE_ALIGN(addr);
78576- vma = find_vma(mm, addr);
78577- if (TASK_SIZE - len >= addr &&
78578- (!vma || addr + len <= vma->vm_start))
78579- return addr;
78580+ if (TASK_SIZE - len >= addr) {
78581+ vma = find_vma(mm, addr);
78582+ if (check_heap_stack_gap(vma, addr, len))
78583+ return addr;
78584+ }
78585 }
78586
78587 /* check if free_area_cache is useful for us */
78588@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78589 /* make sure it can fit in the remaining address space */
78590 if (addr > len) {
78591 vma = find_vma(mm, addr-len);
78592- if (!vma || addr <= vma->vm_start)
78593+ if (check_heap_stack_gap(vma, addr - len, len))
78594 /* remember the address as a hint for next time */
78595 return (mm->free_area_cache = addr-len);
78596 }
78597@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78598 * return with success:
78599 */
78600 vma = find_vma(mm, addr);
78601- if (!vma || addr+len <= vma->vm_start)
78602+ if (check_heap_stack_gap(vma, addr, len))
78603 /* remember the address as a hint for next time */
78604 return (mm->free_area_cache = addr);
78605
78606@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78607 mm->cached_hole_size = vma->vm_start - addr;
78608
78609 /* try just below the current vma->vm_start */
78610- addr = vma->vm_start-len;
78611- } while (len < vma->vm_start);
78612+ addr = skip_heap_stack_gap(vma, len);
78613+ } while (!IS_ERR_VALUE(addr));
78614
78615 bottomup:
78616 /*
78617@@ -1414,13 +1624,21 @@ bottomup:
78618 * can happen with large stack limits and large mmap()
78619 * allocations.
78620 */
78621+ mm->mmap_base = TASK_UNMAPPED_BASE;
78622+
78623+#ifdef CONFIG_PAX_RANDMMAP
78624+ if (mm->pax_flags & MF_PAX_RANDMMAP)
78625+ mm->mmap_base += mm->delta_mmap;
78626+#endif
78627+
78628+ mm->free_area_cache = mm->mmap_base;
78629 mm->cached_hole_size = ~0UL;
78630- mm->free_area_cache = TASK_UNMAPPED_BASE;
78631 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
78632 /*
78633 * Restore the topdown base:
78634 */
78635- mm->free_area_cache = mm->mmap_base;
78636+ mm->mmap_base = base;
78637+ mm->free_area_cache = base;
78638 mm->cached_hole_size = ~0UL;
78639
78640 return addr;
78641@@ -1429,6 +1647,12 @@ bottomup:
78642
78643 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78644 {
78645+
78646+#ifdef CONFIG_PAX_SEGMEXEC
78647+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78648+ return;
78649+#endif
78650+
78651 /*
78652 * Is this a new hole at the highest possible address?
78653 */
78654@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78655 mm->free_area_cache = addr;
78656
78657 /* dont allow allocations above current base */
78658- if (mm->free_area_cache > mm->mmap_base)
78659+ if (mm->free_area_cache > mm->mmap_base) {
78660 mm->free_area_cache = mm->mmap_base;
78661+ mm->cached_hole_size = ~0UL;
78662+ }
78663 }
78664
78665 unsigned long
78666@@ -1510,40 +1736,41 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
78667
78668 EXPORT_SYMBOL(find_vma);
78669
78670-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
78671+/*
78672+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
78673+ * Note: pprev is set to NULL when return value is NULL.
78674+ */
78675 struct vm_area_struct *
78676 find_vma_prev(struct mm_struct *mm, unsigned long addr,
78677 struct vm_area_struct **pprev)
78678 {
78679- struct vm_area_struct *vma = NULL, *prev = NULL;
78680- struct rb_node *rb_node;
78681- if (!mm)
78682- goto out;
78683+ struct vm_area_struct *vma;
78684
78685- /* Guard against addr being lower than the first VMA */
78686- vma = mm->mmap;
78687+ vma = find_vma(mm, addr);
78688+ *pprev = vma ? vma->vm_prev : NULL;
78689+ return vma;
78690+}
78691
78692- /* Go through the RB tree quickly. */
78693- rb_node = mm->mm_rb.rb_node;
78694+#ifdef CONFIG_PAX_SEGMEXEC
78695+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
78696+{
78697+ struct vm_area_struct *vma_m;
78698
78699- while (rb_node) {
78700- struct vm_area_struct *vma_tmp;
78701- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
78702-
78703- if (addr < vma_tmp->vm_end) {
78704- rb_node = rb_node->rb_left;
78705- } else {
78706- prev = vma_tmp;
78707- if (!prev->vm_next || (addr < prev->vm_next->vm_end))
78708- break;
78709- rb_node = rb_node->rb_right;
78710- }
78711+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
78712+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
78713+ BUG_ON(vma->vm_mirror);
78714+ return NULL;
78715 }
78716-
78717-out:
78718- *pprev = prev;
78719- return prev ? prev->vm_next : vma;
78720+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
78721+ vma_m = vma->vm_mirror;
78722+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
78723+ BUG_ON(vma->vm_file != vma_m->vm_file);
78724+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
78725+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
78726+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
78727+ return vma_m;
78728 }
78729+#endif
78730
78731 /*
78732 * Verify that the stack growth is acceptable and
78733@@ -1561,6 +1788,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78734 return -ENOMEM;
78735
78736 /* Stack limit test */
78737+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
78738 if (size > rlim[RLIMIT_STACK].rlim_cur)
78739 return -ENOMEM;
78740
78741@@ -1570,6 +1798,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78742 unsigned long limit;
78743 locked = mm->locked_vm + grow;
78744 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
78745+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78746 if (locked > limit && !capable(CAP_IPC_LOCK))
78747 return -ENOMEM;
78748 }
78749@@ -1600,37 +1829,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78750 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
78751 * vma is the last one with address > vma->vm_end. Have to extend vma.
78752 */
78753+#ifndef CONFIG_IA64
78754+static
78755+#endif
78756 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78757 {
78758 int error;
78759+ bool locknext;
78760
78761 if (!(vma->vm_flags & VM_GROWSUP))
78762 return -EFAULT;
78763
78764+ /* Also guard against wrapping around to address 0. */
78765+ if (address < PAGE_ALIGN(address+1))
78766+ address = PAGE_ALIGN(address+1);
78767+ else
78768+ return -ENOMEM;
78769+
78770 /*
78771 * We must make sure the anon_vma is allocated
78772 * so that the anon_vma locking is not a noop.
78773 */
78774 if (unlikely(anon_vma_prepare(vma)))
78775 return -ENOMEM;
78776+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
78777+ if (locknext && anon_vma_prepare(vma->vm_next))
78778+ return -ENOMEM;
78779 anon_vma_lock(vma);
78780+ if (locknext)
78781+ anon_vma_lock(vma->vm_next);
78782
78783 /*
78784 * vma->vm_start/vm_end cannot change under us because the caller
78785 * is required to hold the mmap_sem in read mode. We need the
78786- * anon_vma lock to serialize against concurrent expand_stacks.
78787- * Also guard against wrapping around to address 0.
78788+ * anon_vma locks to serialize against concurrent expand_stacks
78789+ * and expand_upwards.
78790 */
78791- if (address < PAGE_ALIGN(address+4))
78792- address = PAGE_ALIGN(address+4);
78793- else {
78794- anon_vma_unlock(vma);
78795- return -ENOMEM;
78796- }
78797 error = 0;
78798
78799 /* Somebody else might have raced and expanded it already */
78800- if (address > vma->vm_end) {
78801+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
78802+ error = -ENOMEM;
78803+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
78804 unsigned long size, grow;
78805
78806 size = address - vma->vm_start;
78807@@ -1643,6 +1883,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78808 vma->vm_end = address;
78809 }
78810 }
78811+ if (locknext)
78812+ anon_vma_unlock(vma->vm_next);
78813 anon_vma_unlock(vma);
78814 return error;
78815 }
78816@@ -1655,6 +1897,8 @@ static int expand_downwards(struct vm_area_struct *vma,
78817 unsigned long address)
78818 {
78819 int error;
78820+ bool lockprev = false;
78821+ struct vm_area_struct *prev;
78822
78823 /*
78824 * We must make sure the anon_vma is allocated
78825@@ -1668,6 +1912,15 @@ static int expand_downwards(struct vm_area_struct *vma,
78826 if (error)
78827 return error;
78828
78829+ prev = vma->vm_prev;
78830+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
78831+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
78832+#endif
78833+ if (lockprev && anon_vma_prepare(prev))
78834+ return -ENOMEM;
78835+ if (lockprev)
78836+ anon_vma_lock(prev);
78837+
78838 anon_vma_lock(vma);
78839
78840 /*
78841@@ -1677,9 +1930,17 @@ static int expand_downwards(struct vm_area_struct *vma,
78842 */
78843
78844 /* Somebody else might have raced and expanded it already */
78845- if (address < vma->vm_start) {
78846+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
78847+ error = -ENOMEM;
78848+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
78849 unsigned long size, grow;
78850
78851+#ifdef CONFIG_PAX_SEGMEXEC
78852+ struct vm_area_struct *vma_m;
78853+
78854+ vma_m = pax_find_mirror_vma(vma);
78855+#endif
78856+
78857 size = vma->vm_end - address;
78858 grow = (vma->vm_start - address) >> PAGE_SHIFT;
78859
78860@@ -1689,10 +1950,22 @@ static int expand_downwards(struct vm_area_struct *vma,
78861 if (!error) {
78862 vma->vm_start = address;
78863 vma->vm_pgoff -= grow;
78864+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
78865+
78866+#ifdef CONFIG_PAX_SEGMEXEC
78867+ if (vma_m) {
78868+ vma_m->vm_start -= grow << PAGE_SHIFT;
78869+ vma_m->vm_pgoff -= grow;
78870+ }
78871+#endif
78872+
78873+
78874 }
78875 }
78876 }
78877 anon_vma_unlock(vma);
78878+ if (lockprev)
78879+ anon_vma_unlock(prev);
78880 return error;
78881 }
78882
78883@@ -1768,6 +2041,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
78884 do {
78885 long nrpages = vma_pages(vma);
78886
78887+#ifdef CONFIG_PAX_SEGMEXEC
78888+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
78889+ vma = remove_vma(vma);
78890+ continue;
78891+ }
78892+#endif
78893+
78894 mm->total_vm -= nrpages;
78895 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
78896 vma = remove_vma(vma);
78897@@ -1813,6 +2093,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
78898 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
78899 vma->vm_prev = NULL;
78900 do {
78901+
78902+#ifdef CONFIG_PAX_SEGMEXEC
78903+ if (vma->vm_mirror) {
78904+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
78905+ vma->vm_mirror->vm_mirror = NULL;
78906+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
78907+ vma->vm_mirror = NULL;
78908+ }
78909+#endif
78910+
78911 rb_erase(&vma->vm_rb, &mm->mm_rb);
78912 mm->map_count--;
78913 tail_vma = vma;
78914@@ -1840,10 +2130,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78915 struct mempolicy *pol;
78916 struct vm_area_struct *new;
78917
78918+#ifdef CONFIG_PAX_SEGMEXEC
78919+ struct vm_area_struct *vma_m, *new_m = NULL;
78920+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
78921+#endif
78922+
78923 if (is_vm_hugetlb_page(vma) && (addr &
78924 ~(huge_page_mask(hstate_vma(vma)))))
78925 return -EINVAL;
78926
78927+#ifdef CONFIG_PAX_SEGMEXEC
78928+ vma_m = pax_find_mirror_vma(vma);
78929+
78930+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
78931+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
78932+ if (mm->map_count >= sysctl_max_map_count-1)
78933+ return -ENOMEM;
78934+ } else
78935+#endif
78936+
78937 if (mm->map_count >= sysctl_max_map_count)
78938 return -ENOMEM;
78939
78940@@ -1851,6 +2156,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78941 if (!new)
78942 return -ENOMEM;
78943
78944+#ifdef CONFIG_PAX_SEGMEXEC
78945+ if (vma_m) {
78946+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
78947+ if (!new_m) {
78948+ kmem_cache_free(vm_area_cachep, new);
78949+ return -ENOMEM;
78950+ }
78951+ }
78952+#endif
78953+
78954 /* most fields are the same, copy all, and then fixup */
78955 *new = *vma;
78956
78957@@ -1861,8 +2176,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78958 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
78959 }
78960
78961+#ifdef CONFIG_PAX_SEGMEXEC
78962+ if (vma_m) {
78963+ *new_m = *vma_m;
78964+ new_m->vm_mirror = new;
78965+ new->vm_mirror = new_m;
78966+
78967+ if (new_below)
78968+ new_m->vm_end = addr_m;
78969+ else {
78970+ new_m->vm_start = addr_m;
78971+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
78972+ }
78973+ }
78974+#endif
78975+
78976 pol = mpol_dup(vma_policy(vma));
78977 if (IS_ERR(pol)) {
78978+
78979+#ifdef CONFIG_PAX_SEGMEXEC
78980+ if (new_m)
78981+ kmem_cache_free(vm_area_cachep, new_m);
78982+#endif
78983+
78984 kmem_cache_free(vm_area_cachep, new);
78985 return PTR_ERR(pol);
78986 }
78987@@ -1883,6 +2219,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78988 else
78989 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
78990
78991+#ifdef CONFIG_PAX_SEGMEXEC
78992+ if (vma_m) {
78993+ mpol_get(pol);
78994+ vma_set_policy(new_m, pol);
78995+
78996+ if (new_m->vm_file) {
78997+ get_file(new_m->vm_file);
78998+ if (vma_m->vm_flags & VM_EXECUTABLE)
78999+ added_exe_file_vma(mm);
79000+ }
79001+
79002+ if (new_m->vm_ops && new_m->vm_ops->open)
79003+ new_m->vm_ops->open(new_m);
79004+
79005+ if (new_below)
79006+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
79007+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
79008+ else
79009+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
79010+ }
79011+#endif
79012+
79013 return 0;
79014 }
79015
79016@@ -1891,11 +2249,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79017 * work. This now handles partial unmappings.
79018 * Jeremy Fitzhardinge <jeremy@goop.org>
79019 */
79020+#ifdef CONFIG_PAX_SEGMEXEC
79021 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79022 {
79023+ int ret = __do_munmap(mm, start, len);
79024+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
79025+ return ret;
79026+
79027+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
79028+}
79029+
79030+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79031+#else
79032+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79033+#endif
79034+{
79035 unsigned long end;
79036 struct vm_area_struct *vma, *prev, *last;
79037
79038+ /*
79039+ * mm->mmap_sem is required to protect against another thread
79040+ * changing the mappings in case we sleep.
79041+ */
79042+ verify_mm_writelocked(mm);
79043+
79044 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
79045 return -EINVAL;
79046
79047@@ -1959,6 +2336,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79048 /* Fix up all other VM information */
79049 remove_vma_list(mm, vma);
79050
79051+ track_exec_limit(mm, start, end, 0UL);
79052+
79053 return 0;
79054 }
79055
79056@@ -1971,22 +2350,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
79057
79058 profile_munmap(addr);
79059
79060+#ifdef CONFIG_PAX_SEGMEXEC
79061+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
79062+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
79063+ return -EINVAL;
79064+#endif
79065+
79066 down_write(&mm->mmap_sem);
79067 ret = do_munmap(mm, addr, len);
79068 up_write(&mm->mmap_sem);
79069 return ret;
79070 }
79071
79072-static inline void verify_mm_writelocked(struct mm_struct *mm)
79073-{
79074-#ifdef CONFIG_DEBUG_VM
79075- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
79076- WARN_ON(1);
79077- up_read(&mm->mmap_sem);
79078- }
79079-#endif
79080-}
79081-
79082 /*
79083 * this is really a simplified "do_mmap". it only handles
79084 * anonymous maps. eventually we may be able to do some
79085@@ -2000,6 +2375,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79086 struct rb_node ** rb_link, * rb_parent;
79087 pgoff_t pgoff = addr >> PAGE_SHIFT;
79088 int error;
79089+ unsigned long charged;
79090
79091 len = PAGE_ALIGN(len);
79092 if (!len)
79093@@ -2011,16 +2387,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79094
79095 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
79096
79097+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
79098+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
79099+ flags &= ~VM_EXEC;
79100+
79101+#ifdef CONFIG_PAX_MPROTECT
79102+ if (mm->pax_flags & MF_PAX_MPROTECT)
79103+ flags &= ~VM_MAYEXEC;
79104+#endif
79105+
79106+ }
79107+#endif
79108+
79109 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
79110 if (error & ~PAGE_MASK)
79111 return error;
79112
79113+ charged = len >> PAGE_SHIFT;
79114+
79115 /*
79116 * mlock MCL_FUTURE?
79117 */
79118 if (mm->def_flags & VM_LOCKED) {
79119 unsigned long locked, lock_limit;
79120- locked = len >> PAGE_SHIFT;
79121+ locked = charged;
79122 locked += mm->locked_vm;
79123 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
79124 lock_limit >>= PAGE_SHIFT;
79125@@ -2037,22 +2427,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79126 /*
79127 * Clear old maps. this also does some error checking for us
79128 */
79129- munmap_back:
79130 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79131 if (vma && vma->vm_start < addr + len) {
79132 if (do_munmap(mm, addr, len))
79133 return -ENOMEM;
79134- goto munmap_back;
79135+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79136+ BUG_ON(vma && vma->vm_start < addr + len);
79137 }
79138
79139 /* Check against address space limits *after* clearing old maps... */
79140- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
79141+ if (!may_expand_vm(mm, charged))
79142 return -ENOMEM;
79143
79144 if (mm->map_count > sysctl_max_map_count)
79145 return -ENOMEM;
79146
79147- if (security_vm_enough_memory(len >> PAGE_SHIFT))
79148+ if (security_vm_enough_memory(charged))
79149 return -ENOMEM;
79150
79151 /* Can we just expand an old private anonymous mapping? */
79152@@ -2066,7 +2456,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79153 */
79154 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79155 if (!vma) {
79156- vm_unacct_memory(len >> PAGE_SHIFT);
79157+ vm_unacct_memory(charged);
79158 return -ENOMEM;
79159 }
79160
79161@@ -2078,11 +2468,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79162 vma->vm_page_prot = vm_get_page_prot(flags);
79163 vma_link(mm, vma, prev, rb_link, rb_parent);
79164 out:
79165- mm->total_vm += len >> PAGE_SHIFT;
79166+ mm->total_vm += charged;
79167 if (flags & VM_LOCKED) {
79168 if (!mlock_vma_pages_range(vma, addr, addr + len))
79169- mm->locked_vm += (len >> PAGE_SHIFT);
79170+ mm->locked_vm += charged;
79171 }
79172+ track_exec_limit(mm, addr, addr + len, flags);
79173 return addr;
79174 }
79175
79176@@ -2129,8 +2520,10 @@ void exit_mmap(struct mm_struct *mm)
79177 * Walk the list again, actually closing and freeing it,
79178 * with preemption enabled, without holding any MM locks.
79179 */
79180- while (vma)
79181+ while (vma) {
79182+ vma->vm_mirror = NULL;
79183 vma = remove_vma(vma);
79184+ }
79185
79186 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
79187 }
79188@@ -2144,6 +2537,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
79189 struct vm_area_struct * __vma, * prev;
79190 struct rb_node ** rb_link, * rb_parent;
79191
79192+#ifdef CONFIG_PAX_SEGMEXEC
79193+ struct vm_area_struct *vma_m = NULL;
79194+#endif
79195+
79196 /*
79197 * The vm_pgoff of a purely anonymous vma should be irrelevant
79198 * until its first write fault, when page's anon_vma and index
79199@@ -2166,7 +2563,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
79200 if ((vma->vm_flags & VM_ACCOUNT) &&
79201 security_vm_enough_memory_mm(mm, vma_pages(vma)))
79202 return -ENOMEM;
79203+
79204+#ifdef CONFIG_PAX_SEGMEXEC
79205+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
79206+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79207+ if (!vma_m)
79208+ return -ENOMEM;
79209+ }
79210+#endif
79211+
79212 vma_link(mm, vma, prev, rb_link, rb_parent);
79213+
79214+#ifdef CONFIG_PAX_SEGMEXEC
79215+ if (vma_m)
79216+ pax_mirror_vma(vma_m, vma);
79217+#endif
79218+
79219 return 0;
79220 }
79221
79222@@ -2184,6 +2596,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
79223 struct rb_node **rb_link, *rb_parent;
79224 struct mempolicy *pol;
79225
79226+ BUG_ON(vma->vm_mirror);
79227+
79228 /*
79229 * If anonymous vma has not yet been faulted, update new pgoff
79230 * to match new location, to increase its chance of merging.
79231@@ -2227,6 +2641,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
79232 return new_vma;
79233 }
79234
79235+#ifdef CONFIG_PAX_SEGMEXEC
79236+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
79237+{
79238+ struct vm_area_struct *prev_m;
79239+ struct rb_node **rb_link_m, *rb_parent_m;
79240+ struct mempolicy *pol_m;
79241+
79242+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
79243+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
79244+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
79245+ *vma_m = *vma;
79246+ pol_m = vma_policy(vma_m);
79247+ mpol_get(pol_m);
79248+ vma_set_policy(vma_m, pol_m);
79249+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
79250+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
79251+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
79252+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
79253+ if (vma_m->vm_file)
79254+ get_file(vma_m->vm_file);
79255+ if (vma_m->vm_ops && vma_m->vm_ops->open)
79256+ vma_m->vm_ops->open(vma_m);
79257+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
79258+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
79259+ vma_m->vm_mirror = vma;
79260+ vma->vm_mirror = vma_m;
79261+}
79262+#endif
79263+
79264 /*
79265 * Return true if the calling process may expand its vm space by the passed
79266 * number of pages
79267@@ -2237,7 +2680,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
79268 unsigned long lim;
79269
79270 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
79271-
79272+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
79273 if (cur + npages > lim)
79274 return 0;
79275 return 1;
79276@@ -2307,6 +2750,22 @@ int install_special_mapping(struct mm_struct *mm,
79277 vma->vm_start = addr;
79278 vma->vm_end = addr + len;
79279
79280+#ifdef CONFIG_PAX_MPROTECT
79281+ if (mm->pax_flags & MF_PAX_MPROTECT) {
79282+#ifndef CONFIG_PAX_MPROTECT_COMPAT
79283+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
79284+ return -EPERM;
79285+ if (!(vm_flags & VM_EXEC))
79286+ vm_flags &= ~VM_MAYEXEC;
79287+#else
79288+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
79289+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
79290+#endif
79291+ else
79292+ vm_flags &= ~VM_MAYWRITE;
79293+ }
79294+#endif
79295+
79296 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
79297 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
79298
79299diff --git a/mm/mprotect.c b/mm/mprotect.c
79300index 1737c7e..c7faeb4 100644
79301--- a/mm/mprotect.c
79302+++ b/mm/mprotect.c
79303@@ -24,10 +24,16 @@
79304 #include <linux/mmu_notifier.h>
79305 #include <linux/migrate.h>
79306 #include <linux/perf_event.h>
79307+
79308+#ifdef CONFIG_PAX_MPROTECT
79309+#include <linux/elf.h>
79310+#endif
79311+
79312 #include <asm/uaccess.h>
79313 #include <asm/pgtable.h>
79314 #include <asm/cacheflush.h>
79315 #include <asm/tlbflush.h>
79316+#include <asm/mmu_context.h>
79317
79318 #ifndef pgprot_modify
79319 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
79320@@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
79321 flush_tlb_range(vma, start, end);
79322 }
79323
79324+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79325+/* called while holding the mmap semaphor for writing except stack expansion */
79326+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
79327+{
79328+ unsigned long oldlimit, newlimit = 0UL;
79329+
79330+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
79331+ return;
79332+
79333+ spin_lock(&mm->page_table_lock);
79334+ oldlimit = mm->context.user_cs_limit;
79335+ if ((prot & VM_EXEC) && oldlimit < end)
79336+ /* USER_CS limit moved up */
79337+ newlimit = end;
79338+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
79339+ /* USER_CS limit moved down */
79340+ newlimit = start;
79341+
79342+ if (newlimit) {
79343+ mm->context.user_cs_limit = newlimit;
79344+
79345+#ifdef CONFIG_SMP
79346+ wmb();
79347+ cpus_clear(mm->context.cpu_user_cs_mask);
79348+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
79349+#endif
79350+
79351+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
79352+ }
79353+ spin_unlock(&mm->page_table_lock);
79354+ if (newlimit == end) {
79355+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
79356+
79357+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
79358+ if (is_vm_hugetlb_page(vma))
79359+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
79360+ else
79361+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
79362+ }
79363+}
79364+#endif
79365+
79366 int
79367 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79368 unsigned long start, unsigned long end, unsigned long newflags)
79369@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79370 int error;
79371 int dirty_accountable = 0;
79372
79373+#ifdef CONFIG_PAX_SEGMEXEC
79374+ struct vm_area_struct *vma_m = NULL;
79375+ unsigned long start_m, end_m;
79376+
79377+ start_m = start + SEGMEXEC_TASK_SIZE;
79378+ end_m = end + SEGMEXEC_TASK_SIZE;
79379+#endif
79380+
79381 if (newflags == oldflags) {
79382 *pprev = vma;
79383 return 0;
79384 }
79385
79386+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
79387+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
79388+
79389+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
79390+ return -ENOMEM;
79391+
79392+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
79393+ return -ENOMEM;
79394+ }
79395+
79396 /*
79397 * If we make a private mapping writable we increase our commit;
79398 * but (without finer accounting) cannot reduce our commit if we
79399@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79400 }
79401 }
79402
79403+#ifdef CONFIG_PAX_SEGMEXEC
79404+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
79405+ if (start != vma->vm_start) {
79406+ error = split_vma(mm, vma, start, 1);
79407+ if (error)
79408+ goto fail;
79409+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
79410+ *pprev = (*pprev)->vm_next;
79411+ }
79412+
79413+ if (end != vma->vm_end) {
79414+ error = split_vma(mm, vma, end, 0);
79415+ if (error)
79416+ goto fail;
79417+ }
79418+
79419+ if (pax_find_mirror_vma(vma)) {
79420+ error = __do_munmap(mm, start_m, end_m - start_m);
79421+ if (error)
79422+ goto fail;
79423+ } else {
79424+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79425+ if (!vma_m) {
79426+ error = -ENOMEM;
79427+ goto fail;
79428+ }
79429+ vma->vm_flags = newflags;
79430+ pax_mirror_vma(vma_m, vma);
79431+ }
79432+ }
79433+#endif
79434+
79435 /*
79436 * First try to merge with previous and/or next vma.
79437 */
79438@@ -195,9 +293,21 @@ success:
79439 * vm_flags and vm_page_prot are protected by the mmap_sem
79440 * held in write mode.
79441 */
79442+
79443+#ifdef CONFIG_PAX_SEGMEXEC
79444+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
79445+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
79446+#endif
79447+
79448 vma->vm_flags = newflags;
79449+
79450+#ifdef CONFIG_PAX_MPROTECT
79451+ if (mm->binfmt && mm->binfmt->handle_mprotect)
79452+ mm->binfmt->handle_mprotect(vma, newflags);
79453+#endif
79454+
79455 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
79456- vm_get_page_prot(newflags));
79457+ vm_get_page_prot(vma->vm_flags));
79458
79459 if (vma_wants_writenotify(vma)) {
79460 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
79461@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79462 end = start + len;
79463 if (end <= start)
79464 return -ENOMEM;
79465+
79466+#ifdef CONFIG_PAX_SEGMEXEC
79467+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
79468+ if (end > SEGMEXEC_TASK_SIZE)
79469+ return -EINVAL;
79470+ } else
79471+#endif
79472+
79473+ if (end > TASK_SIZE)
79474+ return -EINVAL;
79475+
79476 if (!arch_validate_prot(prot))
79477 return -EINVAL;
79478
79479@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79480 /*
79481 * Does the application expect PROT_READ to imply PROT_EXEC:
79482 */
79483- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
79484+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
79485 prot |= PROT_EXEC;
79486
79487 vm_flags = calc_vm_prot_bits(prot);
79488@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79489 if (start > vma->vm_start)
79490 prev = vma;
79491
79492+#ifdef CONFIG_PAX_MPROTECT
79493+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
79494+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
79495+#endif
79496+
79497 for (nstart = start ; ; ) {
79498 unsigned long newflags;
79499
79500@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79501
79502 /* newflags >> 4 shift VM_MAY% in place of VM_% */
79503 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
79504+ if (prot & (PROT_WRITE | PROT_EXEC))
79505+ gr_log_rwxmprotect(vma->vm_file);
79506+
79507+ error = -EACCES;
79508+ goto out;
79509+ }
79510+
79511+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
79512 error = -EACCES;
79513 goto out;
79514 }
79515@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79516 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
79517 if (error)
79518 goto out;
79519+
79520+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
79521+
79522 nstart = tmp;
79523
79524 if (nstart < prev->vm_end)
79525diff --git a/mm/mremap.c b/mm/mremap.c
79526index 3e98d79..1706cec 100644
79527--- a/mm/mremap.c
79528+++ b/mm/mremap.c
79529@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
79530 continue;
79531 pte = ptep_clear_flush(vma, old_addr, old_pte);
79532 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
79533+
79534+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79535+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
79536+ pte = pte_exprotect(pte);
79537+#endif
79538+
79539 set_pte_at(mm, new_addr, new_pte, pte);
79540 }
79541
79542@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
79543 if (is_vm_hugetlb_page(vma))
79544 goto Einval;
79545
79546+#ifdef CONFIG_PAX_SEGMEXEC
79547+ if (pax_find_mirror_vma(vma))
79548+ goto Einval;
79549+#endif
79550+
79551 /* We can't remap across vm area boundaries */
79552 if (old_len > vma->vm_end - addr)
79553 goto Efault;
79554@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
79555 unsigned long ret = -EINVAL;
79556 unsigned long charged = 0;
79557 unsigned long map_flags;
79558+ unsigned long pax_task_size = TASK_SIZE;
79559
79560 if (new_addr & ~PAGE_MASK)
79561 goto out;
79562
79563- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
79564+#ifdef CONFIG_PAX_SEGMEXEC
79565+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
79566+ pax_task_size = SEGMEXEC_TASK_SIZE;
79567+#endif
79568+
79569+ pax_task_size -= PAGE_SIZE;
79570+
79571+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
79572 goto out;
79573
79574 /* Check if the location we're moving into overlaps the
79575 * old location at all, and fail if it does.
79576 */
79577- if ((new_addr <= addr) && (new_addr+new_len) > addr)
79578- goto out;
79579-
79580- if ((addr <= new_addr) && (addr+old_len) > new_addr)
79581+ if (addr + old_len > new_addr && new_addr + new_len > addr)
79582 goto out;
79583
79584 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79585@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
79586 struct vm_area_struct *vma;
79587 unsigned long ret = -EINVAL;
79588 unsigned long charged = 0;
79589+ unsigned long pax_task_size = TASK_SIZE;
79590
79591 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
79592 goto out;
79593@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
79594 if (!new_len)
79595 goto out;
79596
79597+#ifdef CONFIG_PAX_SEGMEXEC
79598+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
79599+ pax_task_size = SEGMEXEC_TASK_SIZE;
79600+#endif
79601+
79602+ pax_task_size -= PAGE_SIZE;
79603+
79604+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
79605+ old_len > pax_task_size || addr > pax_task_size-old_len)
79606+ goto out;
79607+
79608 if (flags & MREMAP_FIXED) {
79609 if (flags & MREMAP_MAYMOVE)
79610 ret = mremap_to(addr, old_len, new_addr, new_len);
79611@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
79612 addr + new_len);
79613 }
79614 ret = addr;
79615+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
79616 goto out;
79617 }
79618 }
79619@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
79620 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79621 if (ret)
79622 goto out;
79623+
79624+ map_flags = vma->vm_flags;
79625 ret = move_vma(vma, addr, old_len, new_len, new_addr);
79626+ if (!(ret & ~PAGE_MASK)) {
79627+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
79628+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
79629+ }
79630 }
79631 out:
79632 if (ret & ~PAGE_MASK)
79633diff --git a/mm/nommu.c b/mm/nommu.c
79634index 406e8d4..53970d3 100644
79635--- a/mm/nommu.c
79636+++ b/mm/nommu.c
79637@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
79638 int sysctl_overcommit_ratio = 50; /* default is 50% */
79639 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
79640 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
79641-int heap_stack_gap = 0;
79642
79643 atomic_long_t mmap_pages_allocated;
79644
79645@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
79646 EXPORT_SYMBOL(find_vma);
79647
79648 /*
79649- * find a VMA
79650- * - we don't extend stack VMAs under NOMMU conditions
79651- */
79652-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
79653-{
79654- return find_vma(mm, addr);
79655-}
79656-
79657-/*
79658 * expand a stack to a given address
79659 * - not supported under NOMMU conditions
79660 */
79661diff --git a/mm/page_alloc.c b/mm/page_alloc.c
79662index 3ecab7e..594a471 100644
79663--- a/mm/page_alloc.c
79664+++ b/mm/page_alloc.c
79665@@ -289,7 +289,7 @@ out:
79666 * This usage means that zero-order pages may not be compound.
79667 */
79668
79669-static void free_compound_page(struct page *page)
79670+void free_compound_page(struct page *page)
79671 {
79672 __free_pages_ok(page, compound_order(page));
79673 }
79674@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79675 int bad = 0;
79676 int wasMlocked = __TestClearPageMlocked(page);
79677
79678+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79679+ unsigned long index = 1UL << order;
79680+#endif
79681+
79682 kmemcheck_free_shadow(page, order);
79683
79684 for (i = 0 ; i < (1 << order) ; ++i)
79685@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79686 debug_check_no_obj_freed(page_address(page),
79687 PAGE_SIZE << order);
79688 }
79689+
79690+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79691+ for (; index; --index)
79692+ sanitize_highpage(page + index - 1);
79693+#endif
79694+
79695 arch_free_page(page, order);
79696 kernel_map_pages(page, 1 << order, 0);
79697
79698@@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
79699 arch_alloc_page(page, order);
79700 kernel_map_pages(page, 1 << order, 1);
79701
79702+#ifndef CONFIG_PAX_MEMORY_SANITIZE
79703 if (gfp_flags & __GFP_ZERO)
79704 prep_zero_page(page, order, gfp_flags);
79705+#endif
79706
79707 if (order && (gfp_flags & __GFP_COMP))
79708 prep_compound_page(page, order);
79709@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
79710 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
79711 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
79712 }
79713+
79714+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79715+ sanitize_highpage(page);
79716+#endif
79717+
79718 arch_free_page(page, 0);
79719 kernel_map_pages(page, 1, 0);
79720
79721@@ -2179,6 +2196,8 @@ void show_free_areas(void)
79722 int cpu;
79723 struct zone *zone;
79724
79725+ pax_track_stack();
79726+
79727 for_each_populated_zone(zone) {
79728 show_node(zone);
79729 printk("%s per-cpu:\n", zone->name);
79730@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
79731 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
79732 }
79733 #else
79734-static void inline setup_usemap(struct pglist_data *pgdat,
79735+static inline void setup_usemap(struct pglist_data *pgdat,
79736 struct zone *zone, unsigned long zonesize) {}
79737 #endif /* CONFIG_SPARSEMEM */
79738
79739diff --git a/mm/percpu.c b/mm/percpu.c
79740index c90614a..5f7b7b8 100644
79741--- a/mm/percpu.c
79742+++ b/mm/percpu.c
79743@@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
79744 static unsigned int pcpu_high_unit_cpu __read_mostly;
79745
79746 /* the address of the first chunk which starts with the kernel static area */
79747-void *pcpu_base_addr __read_mostly;
79748+void *pcpu_base_addr __read_only;
79749 EXPORT_SYMBOL_GPL(pcpu_base_addr);
79750
79751 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
79752diff --git a/mm/rmap.c b/mm/rmap.c
79753index dd43373..d848cd7 100644
79754--- a/mm/rmap.c
79755+++ b/mm/rmap.c
79756@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
79757 /* page_table_lock to protect against threads */
79758 spin_lock(&mm->page_table_lock);
79759 if (likely(!vma->anon_vma)) {
79760+
79761+#ifdef CONFIG_PAX_SEGMEXEC
79762+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
79763+
79764+ if (vma_m) {
79765+ BUG_ON(vma_m->anon_vma);
79766+ vma_m->anon_vma = anon_vma;
79767+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
79768+ }
79769+#endif
79770+
79771 vma->anon_vma = anon_vma;
79772 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
79773 allocated = NULL;
79774diff --git a/mm/shmem.c b/mm/shmem.c
79775index 3e0005b..1d659a8 100644
79776--- a/mm/shmem.c
79777+++ b/mm/shmem.c
79778@@ -31,7 +31,7 @@
79779 #include <linux/swap.h>
79780 #include <linux/ima.h>
79781
79782-static struct vfsmount *shm_mnt;
79783+struct vfsmount *shm_mnt;
79784
79785 #ifdef CONFIG_SHMEM
79786 /*
79787@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
79788 goto unlock;
79789 }
79790 entry = shmem_swp_entry(info, index, NULL);
79791+ if (!entry)
79792+ goto unlock;
79793 if (entry->val) {
79794 /*
79795 * The more uptodate page coming down from a stacked
79796@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
79797 struct vm_area_struct pvma;
79798 struct page *page;
79799
79800+ pax_track_stack();
79801+
79802 spol = mpol_cond_copy(&mpol,
79803 mpol_shared_policy_lookup(&info->policy, idx));
79804
79805@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
79806
79807 info = SHMEM_I(inode);
79808 inode->i_size = len-1;
79809- if (len <= (char *)inode - (char *)info) {
79810+ if (len <= (char *)inode - (char *)info && len <= 64) {
79811 /* do it inline */
79812 memcpy(info, symname, len);
79813 inode->i_op = &shmem_symlink_inline_operations;
79814@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
79815 int err = -ENOMEM;
79816
79817 /* Round up to L1_CACHE_BYTES to resist false sharing */
79818- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
79819- L1_CACHE_BYTES), GFP_KERNEL);
79820+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
79821 if (!sbinfo)
79822 return -ENOMEM;
79823
79824diff --git a/mm/slab.c b/mm/slab.c
79825index c8d466a..909e01e 100644
79826--- a/mm/slab.c
79827+++ b/mm/slab.c
79828@@ -174,7 +174,7 @@
79829
79830 /* Legal flag mask for kmem_cache_create(). */
79831 #if DEBUG
79832-# define CREATE_MASK (SLAB_RED_ZONE | \
79833+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
79834 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
79835 SLAB_CACHE_DMA | \
79836 SLAB_STORE_USER | \
79837@@ -182,7 +182,7 @@
79838 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79839 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
79840 #else
79841-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
79842+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
79843 SLAB_CACHE_DMA | \
79844 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
79845 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79846@@ -308,7 +308,7 @@ struct kmem_list3 {
79847 * Need this for bootstrapping a per node allocator.
79848 */
79849 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
79850-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
79851+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
79852 #define CACHE_CACHE 0
79853 #define SIZE_AC MAX_NUMNODES
79854 #define SIZE_L3 (2 * MAX_NUMNODES)
79855@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
79856 if ((x)->max_freeable < i) \
79857 (x)->max_freeable = i; \
79858 } while (0)
79859-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
79860-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
79861-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
79862-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
79863+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
79864+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
79865+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
79866+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
79867 #else
79868 #define STATS_INC_ACTIVE(x) do { } while (0)
79869 #define STATS_DEC_ACTIVE(x) do { } while (0)
79870@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
79871 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
79872 */
79873 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
79874- const struct slab *slab, void *obj)
79875+ const struct slab *slab, const void *obj)
79876 {
79877 u32 offset = (obj - slab->s_mem);
79878 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
79879@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
79880 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
79881 sizes[INDEX_AC].cs_size,
79882 ARCH_KMALLOC_MINALIGN,
79883- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79884+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79885 NULL);
79886
79887 if (INDEX_AC != INDEX_L3) {
79888@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
79889 kmem_cache_create(names[INDEX_L3].name,
79890 sizes[INDEX_L3].cs_size,
79891 ARCH_KMALLOC_MINALIGN,
79892- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79893+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79894 NULL);
79895 }
79896
79897@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
79898 sizes->cs_cachep = kmem_cache_create(names->name,
79899 sizes->cs_size,
79900 ARCH_KMALLOC_MINALIGN,
79901- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79902+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79903 NULL);
79904 }
79905 #ifdef CONFIG_ZONE_DMA
79906@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
79907 }
79908 /* cpu stats */
79909 {
79910- unsigned long allochit = atomic_read(&cachep->allochit);
79911- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
79912- unsigned long freehit = atomic_read(&cachep->freehit);
79913- unsigned long freemiss = atomic_read(&cachep->freemiss);
79914+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
79915+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
79916+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
79917+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
79918
79919 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
79920 allochit, allocmiss, freehit, freemiss);
79921@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
79922
79923 static int __init slab_proc_init(void)
79924 {
79925- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
79926+ mode_t gr_mode = S_IRUGO;
79927+
79928+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79929+ gr_mode = S_IRUSR;
79930+#endif
79931+
79932+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
79933 #ifdef CONFIG_DEBUG_SLAB_LEAK
79934- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
79935+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
79936 #endif
79937 return 0;
79938 }
79939 module_init(slab_proc_init);
79940 #endif
79941
79942+void check_object_size(const void *ptr, unsigned long n, bool to)
79943+{
79944+
79945+#ifdef CONFIG_PAX_USERCOPY
79946+ struct page *page;
79947+ struct kmem_cache *cachep = NULL;
79948+ struct slab *slabp;
79949+ unsigned int objnr;
79950+ unsigned long offset;
79951+ const char *type;
79952+
79953+ if (!n)
79954+ return;
79955+
79956+ type = "<null>";
79957+ if (ZERO_OR_NULL_PTR(ptr))
79958+ goto report;
79959+
79960+ if (!virt_addr_valid(ptr))
79961+ return;
79962+
79963+ page = virt_to_head_page(ptr);
79964+
79965+ type = "<process stack>";
79966+ if (!PageSlab(page)) {
79967+ if (object_is_on_stack(ptr, n) == -1)
79968+ goto report;
79969+ return;
79970+ }
79971+
79972+ cachep = page_get_cache(page);
79973+ type = cachep->name;
79974+ if (!(cachep->flags & SLAB_USERCOPY))
79975+ goto report;
79976+
79977+ slabp = page_get_slab(page);
79978+ objnr = obj_to_index(cachep, slabp, ptr);
79979+ BUG_ON(objnr >= cachep->num);
79980+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
79981+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
79982+ return;
79983+
79984+report:
79985+ pax_report_usercopy(ptr, n, to, type);
79986+#endif
79987+
79988+}
79989+EXPORT_SYMBOL(check_object_size);
79990+
79991 /**
79992 * ksize - get the actual amount of memory allocated for a given object
79993 * @objp: Pointer to the object
79994diff --git a/mm/slob.c b/mm/slob.c
79995index 837ebd6..0bd23bc 100644
79996--- a/mm/slob.c
79997+++ b/mm/slob.c
79998@@ -29,7 +29,7 @@
79999 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
80000 * alloc_pages() directly, allocating compound pages so the page order
80001 * does not have to be separately tracked, and also stores the exact
80002- * allocation size in page->private so that it can be used to accurately
80003+ * allocation size in slob_page->size so that it can be used to accurately
80004 * provide ksize(). These objects are detected in kfree() because slob_page()
80005 * is false for them.
80006 *
80007@@ -58,6 +58,7 @@
80008 */
80009
80010 #include <linux/kernel.h>
80011+#include <linux/sched.h>
80012 #include <linux/slab.h>
80013 #include <linux/mm.h>
80014 #include <linux/swap.h> /* struct reclaim_state */
80015@@ -100,7 +101,8 @@ struct slob_page {
80016 unsigned long flags; /* mandatory */
80017 atomic_t _count; /* mandatory */
80018 slobidx_t units; /* free units left in page */
80019- unsigned long pad[2];
80020+ unsigned long pad[1];
80021+ unsigned long size; /* size when >=PAGE_SIZE */
80022 slob_t *free; /* first free slob_t in page */
80023 struct list_head list; /* linked list of free pages */
80024 };
80025@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
80026 */
80027 static inline int is_slob_page(struct slob_page *sp)
80028 {
80029- return PageSlab((struct page *)sp);
80030+ return PageSlab((struct page *)sp) && !sp->size;
80031 }
80032
80033 static inline void set_slob_page(struct slob_page *sp)
80034@@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
80035
80036 static inline struct slob_page *slob_page(const void *addr)
80037 {
80038- return (struct slob_page *)virt_to_page(addr);
80039+ return (struct slob_page *)virt_to_head_page(addr);
80040 }
80041
80042 /*
80043@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
80044 /*
80045 * Return the size of a slob block.
80046 */
80047-static slobidx_t slob_units(slob_t *s)
80048+static slobidx_t slob_units(const slob_t *s)
80049 {
80050 if (s->units > 0)
80051 return s->units;
80052@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
80053 /*
80054 * Return the next free slob block pointer after this one.
80055 */
80056-static slob_t *slob_next(slob_t *s)
80057+static slob_t *slob_next(const slob_t *s)
80058 {
80059 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
80060 slobidx_t next;
80061@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
80062 /*
80063 * Returns true if s is the last free block in its page.
80064 */
80065-static int slob_last(slob_t *s)
80066+static int slob_last(const slob_t *s)
80067 {
80068 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
80069 }
80070@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
80071 if (!page)
80072 return NULL;
80073
80074+ set_slob_page(page);
80075 return page_address(page);
80076 }
80077
80078@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
80079 if (!b)
80080 return NULL;
80081 sp = slob_page(b);
80082- set_slob_page(sp);
80083
80084 spin_lock_irqsave(&slob_lock, flags);
80085 sp->units = SLOB_UNITS(PAGE_SIZE);
80086 sp->free = b;
80087+ sp->size = 0;
80088 INIT_LIST_HEAD(&sp->list);
80089 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
80090 set_slob_page_free(sp, slob_list);
80091@@ -475,10 +478,9 @@ out:
80092 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
80093 #endif
80094
80095-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80096+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
80097 {
80098- unsigned int *m;
80099- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80100+ slob_t *m;
80101 void *ret;
80102
80103 lockdep_trace_alloc(gfp);
80104@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80105
80106 if (!m)
80107 return NULL;
80108- *m = size;
80109+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
80110+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
80111+ m[0].units = size;
80112+ m[1].units = align;
80113 ret = (void *)m + align;
80114
80115 trace_kmalloc_node(_RET_IP_, ret,
80116@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80117
80118 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
80119 if (ret) {
80120- struct page *page;
80121- page = virt_to_page(ret);
80122- page->private = size;
80123+ struct slob_page *sp;
80124+ sp = slob_page(ret);
80125+ sp->size = size;
80126 }
80127
80128 trace_kmalloc_node(_RET_IP_, ret,
80129 size, PAGE_SIZE << order, gfp, node);
80130 }
80131
80132- kmemleak_alloc(ret, size, 1, gfp);
80133+ return ret;
80134+}
80135+
80136+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80137+{
80138+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80139+ void *ret = __kmalloc_node_align(size, gfp, node, align);
80140+
80141+ if (!ZERO_OR_NULL_PTR(ret))
80142+ kmemleak_alloc(ret, size, 1, gfp);
80143 return ret;
80144 }
80145 EXPORT_SYMBOL(__kmalloc_node);
80146@@ -528,13 +542,92 @@ void kfree(const void *block)
80147 sp = slob_page(block);
80148 if (is_slob_page(sp)) {
80149 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80150- unsigned int *m = (unsigned int *)(block - align);
80151- slob_free(m, *m + align);
80152- } else
80153+ slob_t *m = (slob_t *)(block - align);
80154+ slob_free(m, m[0].units + align);
80155+ } else {
80156+ clear_slob_page(sp);
80157+ free_slob_page(sp);
80158+ sp->size = 0;
80159 put_page(&sp->page);
80160+ }
80161 }
80162 EXPORT_SYMBOL(kfree);
80163
80164+void check_object_size(const void *ptr, unsigned long n, bool to)
80165+{
80166+
80167+#ifdef CONFIG_PAX_USERCOPY
80168+ struct slob_page *sp;
80169+ const slob_t *free;
80170+ const void *base;
80171+ unsigned long flags;
80172+ const char *type;
80173+
80174+ if (!n)
80175+ return;
80176+
80177+ type = "<null>";
80178+ if (ZERO_OR_NULL_PTR(ptr))
80179+ goto report;
80180+
80181+ if (!virt_addr_valid(ptr))
80182+ return;
80183+
80184+ type = "<process stack>";
80185+ sp = slob_page(ptr);
80186+ if (!PageSlab((struct page *)sp)) {
80187+ if (object_is_on_stack(ptr, n) == -1)
80188+ goto report;
80189+ return;
80190+ }
80191+
80192+ type = "<slob>";
80193+ if (sp->size) {
80194+ base = page_address(&sp->page);
80195+ if (base <= ptr && n <= sp->size - (ptr - base))
80196+ return;
80197+ goto report;
80198+ }
80199+
80200+ /* some tricky double walking to find the chunk */
80201+ spin_lock_irqsave(&slob_lock, flags);
80202+ base = (void *)((unsigned long)ptr & PAGE_MASK);
80203+ free = sp->free;
80204+
80205+ while (!slob_last(free) && (void *)free <= ptr) {
80206+ base = free + slob_units(free);
80207+ free = slob_next(free);
80208+ }
80209+
80210+ while (base < (void *)free) {
80211+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
80212+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
80213+ int offset;
80214+
80215+ if (ptr < base + align)
80216+ break;
80217+
80218+ offset = ptr - base - align;
80219+ if (offset >= m) {
80220+ base += size;
80221+ continue;
80222+ }
80223+
80224+ if (n > m - offset)
80225+ break;
80226+
80227+ spin_unlock_irqrestore(&slob_lock, flags);
80228+ return;
80229+ }
80230+
80231+ spin_unlock_irqrestore(&slob_lock, flags);
80232+report:
80233+ pax_report_usercopy(ptr, n, to, type);
80234+#endif
80235+
80236+}
80237+EXPORT_SYMBOL(check_object_size);
80238+
80239 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
80240 size_t ksize(const void *block)
80241 {
80242@@ -547,10 +640,10 @@ size_t ksize(const void *block)
80243 sp = slob_page(block);
80244 if (is_slob_page(sp)) {
80245 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80246- unsigned int *m = (unsigned int *)(block - align);
80247- return SLOB_UNITS(*m) * SLOB_UNIT;
80248+ slob_t *m = (slob_t *)(block - align);
80249+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
80250 } else
80251- return sp->page.private;
80252+ return sp->size;
80253 }
80254 EXPORT_SYMBOL(ksize);
80255
80256@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80257 {
80258 struct kmem_cache *c;
80259
80260+#ifdef CONFIG_PAX_USERCOPY
80261+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
80262+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
80263+#else
80264 c = slob_alloc(sizeof(struct kmem_cache),
80265 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
80266+#endif
80267
80268 if (c) {
80269 c->name = name;
80270@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
80271 {
80272 void *b;
80273
80274+#ifdef CONFIG_PAX_USERCOPY
80275+ b = __kmalloc_node_align(c->size, flags, node, c->align);
80276+#else
80277 if (c->size < PAGE_SIZE) {
80278 b = slob_alloc(c->size, flags, c->align, node);
80279 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
80280 SLOB_UNITS(c->size) * SLOB_UNIT,
80281 flags, node);
80282 } else {
80283+ struct slob_page *sp;
80284+
80285 b = slob_new_pages(flags, get_order(c->size), node);
80286+ sp = slob_page(b);
80287+ sp->size = c->size;
80288 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
80289 PAGE_SIZE << get_order(c->size),
80290 flags, node);
80291 }
80292+#endif
80293
80294 if (c->ctor)
80295 c->ctor(b);
80296@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
80297
80298 static void __kmem_cache_free(void *b, int size)
80299 {
80300- if (size < PAGE_SIZE)
80301+ struct slob_page *sp = slob_page(b);
80302+
80303+ if (is_slob_page(sp))
80304 slob_free(b, size);
80305- else
80306+ else {
80307+ clear_slob_page(sp);
80308+ free_slob_page(sp);
80309+ sp->size = 0;
80310 slob_free_pages(b, get_order(size));
80311+ }
80312 }
80313
80314 static void kmem_rcu_free(struct rcu_head *head)
80315@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
80316
80317 void kmem_cache_free(struct kmem_cache *c, void *b)
80318 {
80319+ int size = c->size;
80320+
80321+#ifdef CONFIG_PAX_USERCOPY
80322+ if (size + c->align < PAGE_SIZE) {
80323+ size += c->align;
80324+ b -= c->align;
80325+ }
80326+#endif
80327+
80328 kmemleak_free_recursive(b, c->flags);
80329 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
80330 struct slob_rcu *slob_rcu;
80331- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
80332+ slob_rcu = b + (size - sizeof(struct slob_rcu));
80333 INIT_RCU_HEAD(&slob_rcu->head);
80334- slob_rcu->size = c->size;
80335+ slob_rcu->size = size;
80336 call_rcu(&slob_rcu->head, kmem_rcu_free);
80337 } else {
80338- __kmem_cache_free(b, c->size);
80339+ __kmem_cache_free(b, size);
80340 }
80341
80342+#ifdef CONFIG_PAX_USERCOPY
80343+ trace_kfree(_RET_IP_, b);
80344+#else
80345 trace_kmem_cache_free(_RET_IP_, b);
80346+#endif
80347+
80348 }
80349 EXPORT_SYMBOL(kmem_cache_free);
80350
80351diff --git a/mm/slub.c b/mm/slub.c
80352index 4996fc7..87e01d0 100644
80353--- a/mm/slub.c
80354+++ b/mm/slub.c
80355@@ -201,7 +201,7 @@ struct track {
80356
80357 enum track_item { TRACK_ALLOC, TRACK_FREE };
80358
80359-#ifdef CONFIG_SLUB_DEBUG
80360+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80361 static int sysfs_slab_add(struct kmem_cache *);
80362 static int sysfs_slab_alias(struct kmem_cache *, const char *);
80363 static void sysfs_slab_remove(struct kmem_cache *);
80364@@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
80365 if (!t->addr)
80366 return;
80367
80368- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
80369+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
80370 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
80371 }
80372
80373@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
80374
80375 page = virt_to_head_page(x);
80376
80377+ BUG_ON(!PageSlab(page));
80378+
80379 slab_free(s, page, x, _RET_IP_);
80380
80381 trace_kmem_cache_free(_RET_IP_, x);
80382@@ -1937,7 +1939,7 @@ static int slub_min_objects;
80383 * Merge control. If this is set then no merging of slab caches will occur.
80384 * (Could be removed. This was introduced to pacify the merge skeptics.)
80385 */
80386-static int slub_nomerge;
80387+static int slub_nomerge = 1;
80388
80389 /*
80390 * Calculate the order of allocation given an slab object size.
80391@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
80392 * list to avoid pounding the page allocator excessively.
80393 */
80394 set_min_partial(s, ilog2(s->size));
80395- s->refcount = 1;
80396+ atomic_set(&s->refcount, 1);
80397 #ifdef CONFIG_NUMA
80398 s->remote_node_defrag_ratio = 1000;
80399 #endif
80400@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
80401 void kmem_cache_destroy(struct kmem_cache *s)
80402 {
80403 down_write(&slub_lock);
80404- s->refcount--;
80405- if (!s->refcount) {
80406+ if (atomic_dec_and_test(&s->refcount)) {
80407 list_del(&s->list);
80408 up_write(&slub_lock);
80409 if (kmem_cache_close(s)) {
80410@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
80411 __setup("slub_nomerge", setup_slub_nomerge);
80412
80413 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
80414- const char *name, int size, gfp_t gfp_flags)
80415+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
80416 {
80417- unsigned int flags = 0;
80418-
80419 if (gfp_flags & SLUB_DMA)
80420- flags = SLAB_CACHE_DMA;
80421+ flags |= SLAB_CACHE_DMA;
80422
80423 /*
80424 * This function is called with IRQs disabled during early-boot on
80425@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
80426 EXPORT_SYMBOL(__kmalloc_node);
80427 #endif
80428
80429+void check_object_size(const void *ptr, unsigned long n, bool to)
80430+{
80431+
80432+#ifdef CONFIG_PAX_USERCOPY
80433+ struct page *page;
80434+ struct kmem_cache *s = NULL;
80435+ unsigned long offset;
80436+ const char *type;
80437+
80438+ if (!n)
80439+ return;
80440+
80441+ type = "<null>";
80442+ if (ZERO_OR_NULL_PTR(ptr))
80443+ goto report;
80444+
80445+ if (!virt_addr_valid(ptr))
80446+ return;
80447+
80448+ page = get_object_page(ptr);
80449+
80450+ type = "<process stack>";
80451+ if (!page) {
80452+ if (object_is_on_stack(ptr, n) == -1)
80453+ goto report;
80454+ return;
80455+ }
80456+
80457+ s = page->slab;
80458+ type = s->name;
80459+ if (!(s->flags & SLAB_USERCOPY))
80460+ goto report;
80461+
80462+ offset = (ptr - page_address(page)) % s->size;
80463+ if (offset <= s->objsize && n <= s->objsize - offset)
80464+ return;
80465+
80466+report:
80467+ pax_report_usercopy(ptr, n, to, type);
80468+#endif
80469+
80470+}
80471+EXPORT_SYMBOL(check_object_size);
80472+
80473 size_t ksize(const void *object)
80474 {
80475 struct page *page;
80476@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
80477 * kmem_cache_open for slab_state == DOWN.
80478 */
80479 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
80480- sizeof(struct kmem_cache_node), GFP_NOWAIT);
80481- kmalloc_caches[0].refcount = -1;
80482+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
80483+ atomic_set(&kmalloc_caches[0].refcount, -1);
80484 caches++;
80485
80486 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
80487@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
80488 /* Caches that are not of the two-to-the-power-of size */
80489 if (KMALLOC_MIN_SIZE <= 32) {
80490 create_kmalloc_cache(&kmalloc_caches[1],
80491- "kmalloc-96", 96, GFP_NOWAIT);
80492+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
80493 caches++;
80494 }
80495 if (KMALLOC_MIN_SIZE <= 64) {
80496 create_kmalloc_cache(&kmalloc_caches[2],
80497- "kmalloc-192", 192, GFP_NOWAIT);
80498+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
80499 caches++;
80500 }
80501
80502 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
80503 create_kmalloc_cache(&kmalloc_caches[i],
80504- "kmalloc", 1 << i, GFP_NOWAIT);
80505+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
80506 caches++;
80507 }
80508
80509@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
80510 /*
80511 * We may have set a slab to be unmergeable during bootstrap.
80512 */
80513- if (s->refcount < 0)
80514+ if (atomic_read(&s->refcount) < 0)
80515 return 1;
80516
80517 return 0;
80518@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80519 if (s) {
80520 int cpu;
80521
80522- s->refcount++;
80523+ atomic_inc(&s->refcount);
80524 /*
80525 * Adjust the object sizes so that we clear
80526 * the complete object on kzalloc.
80527@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80528
80529 if (sysfs_slab_alias(s, name)) {
80530 down_write(&slub_lock);
80531- s->refcount--;
80532+ atomic_dec(&s->refcount);
80533 up_write(&slub_lock);
80534 goto err;
80535 }
80536@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
80537
80538 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
80539 {
80540- return sprintf(buf, "%d\n", s->refcount - 1);
80541+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
80542 }
80543 SLAB_ATTR_RO(aliases);
80544
80545@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
80546 kfree(s);
80547 }
80548
80549-static struct sysfs_ops slab_sysfs_ops = {
80550+static const struct sysfs_ops slab_sysfs_ops = {
80551 .show = slab_attr_show,
80552 .store = slab_attr_store,
80553 };
80554@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
80555 return 0;
80556 }
80557
80558-static struct kset_uevent_ops slab_uevent_ops = {
80559+static const struct kset_uevent_ops slab_uevent_ops = {
80560 .filter = uevent_filter,
80561 };
80562
80563@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
80564 return name;
80565 }
80566
80567+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80568 static int sysfs_slab_add(struct kmem_cache *s)
80569 {
80570 int err;
80571@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
80572 kobject_del(&s->kobj);
80573 kobject_put(&s->kobj);
80574 }
80575+#endif
80576
80577 /*
80578 * Need to buffer aliases during bootup until sysfs becomes
80579@@ -4632,6 +4677,7 @@ struct saved_alias {
80580
80581 static struct saved_alias *alias_list;
80582
80583+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80584 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80585 {
80586 struct saved_alias *al;
80587@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80588 alias_list = al;
80589 return 0;
80590 }
80591+#endif
80592
80593 static int __init slab_sysfs_init(void)
80594 {
80595@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
80596
80597 static int __init slab_proc_init(void)
80598 {
80599- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
80600+ mode_t gr_mode = S_IRUGO;
80601+
80602+#ifdef CONFIG_GRKERNSEC_PROC_ADD
80603+ gr_mode = S_IRUSR;
80604+#endif
80605+
80606+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
80607 return 0;
80608 }
80609 module_init(slab_proc_init);
80610diff --git a/mm/swap.c b/mm/swap.c
80611index 308e57d..5de19c0 100644
80612--- a/mm/swap.c
80613+++ b/mm/swap.c
80614@@ -30,6 +30,7 @@
80615 #include <linux/notifier.h>
80616 #include <linux/backing-dev.h>
80617 #include <linux/memcontrol.h>
80618+#include <linux/hugetlb.h>
80619
80620 #include "internal.h"
80621
80622@@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
80623 compound_page_dtor *dtor;
80624
80625 dtor = get_compound_page_dtor(page);
80626+ if (!PageHuge(page))
80627+ BUG_ON(dtor != free_compound_page);
80628 (*dtor)(page);
80629 }
80630 }
80631diff --git a/mm/util.c b/mm/util.c
80632index e48b493..24a601d 100644
80633--- a/mm/util.c
80634+++ b/mm/util.c
80635@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
80636 void arch_pick_mmap_layout(struct mm_struct *mm)
80637 {
80638 mm->mmap_base = TASK_UNMAPPED_BASE;
80639+
80640+#ifdef CONFIG_PAX_RANDMMAP
80641+ if (mm->pax_flags & MF_PAX_RANDMMAP)
80642+ mm->mmap_base += mm->delta_mmap;
80643+#endif
80644+
80645 mm->get_unmapped_area = arch_get_unmapped_area;
80646 mm->unmap_area = arch_unmap_area;
80647 }
80648diff --git a/mm/vmalloc.c b/mm/vmalloc.c
80649index f34ffd0..e60c44f 100644
80650--- a/mm/vmalloc.c
80651+++ b/mm/vmalloc.c
80652@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
80653
80654 pte = pte_offset_kernel(pmd, addr);
80655 do {
80656- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80657- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80658+
80659+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80660+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
80661+ BUG_ON(!pte_exec(*pte));
80662+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
80663+ continue;
80664+ }
80665+#endif
80666+
80667+ {
80668+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80669+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80670+ }
80671 } while (pte++, addr += PAGE_SIZE, addr != end);
80672 }
80673
80674@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80675 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
80676 {
80677 pte_t *pte;
80678+ int ret = -ENOMEM;
80679
80680 /*
80681 * nr is a running index into the array which helps higher level
80682@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80683 pte = pte_alloc_kernel(pmd, addr);
80684 if (!pte)
80685 return -ENOMEM;
80686+
80687+ pax_open_kernel();
80688 do {
80689 struct page *page = pages[*nr];
80690
80691- if (WARN_ON(!pte_none(*pte)))
80692- return -EBUSY;
80693- if (WARN_ON(!page))
80694- return -ENOMEM;
80695+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80696+ if (!(pgprot_val(prot) & _PAGE_NX))
80697+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
80698+ else
80699+#endif
80700+
80701+ if (WARN_ON(!pte_none(*pte))) {
80702+ ret = -EBUSY;
80703+ goto out;
80704+ }
80705+ if (WARN_ON(!page)) {
80706+ ret = -ENOMEM;
80707+ goto out;
80708+ }
80709 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
80710 (*nr)++;
80711 } while (pte++, addr += PAGE_SIZE, addr != end);
80712- return 0;
80713+ ret = 0;
80714+out:
80715+ pax_close_kernel();
80716+ return ret;
80717 }
80718
80719 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
80720@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
80721 * and fall back on vmalloc() if that fails. Others
80722 * just put it in the vmalloc space.
80723 */
80724-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
80725+#ifdef CONFIG_MODULES
80726+#ifdef MODULES_VADDR
80727 unsigned long addr = (unsigned long)x;
80728 if (addr >= MODULES_VADDR && addr < MODULES_END)
80729 return 1;
80730 #endif
80731+
80732+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80733+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
80734+ return 1;
80735+#endif
80736+
80737+#endif
80738+
80739 return is_vmalloc_addr(x);
80740 }
80741
80742@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
80743
80744 if (!pgd_none(*pgd)) {
80745 pud_t *pud = pud_offset(pgd, addr);
80746+#ifdef CONFIG_X86
80747+ if (!pud_large(*pud))
80748+#endif
80749 if (!pud_none(*pud)) {
80750 pmd_t *pmd = pmd_offset(pud, addr);
80751+#ifdef CONFIG_X86
80752+ if (!pmd_large(*pmd))
80753+#endif
80754 if (!pmd_none(*pmd)) {
80755 pte_t *ptep, pte;
80756
80757@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
80758 struct rb_node *tmp;
80759
80760 while (*p) {
80761- struct vmap_area *tmp;
80762+ struct vmap_area *varea;
80763
80764 parent = *p;
80765- tmp = rb_entry(parent, struct vmap_area, rb_node);
80766- if (va->va_start < tmp->va_end)
80767+ varea = rb_entry(parent, struct vmap_area, rb_node);
80768+ if (va->va_start < varea->va_end)
80769 p = &(*p)->rb_left;
80770- else if (va->va_end > tmp->va_start)
80771+ else if (va->va_end > varea->va_start)
80772 p = &(*p)->rb_right;
80773 else
80774 BUG();
80775@@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
80776 struct vm_struct *area;
80777
80778 BUG_ON(in_interrupt());
80779+
80780+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80781+ if (flags & VM_KERNEXEC) {
80782+ if (start != VMALLOC_START || end != VMALLOC_END)
80783+ return NULL;
80784+ start = (unsigned long)MODULES_EXEC_VADDR;
80785+ end = (unsigned long)MODULES_EXEC_END;
80786+ }
80787+#endif
80788+
80789 if (flags & VM_IOREMAP) {
80790 int bit = fls(size);
80791
80792@@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
80793 if (count > totalram_pages)
80794 return NULL;
80795
80796+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80797+ if (!(pgprot_val(prot) & _PAGE_NX))
80798+ flags |= VM_KERNEXEC;
80799+#endif
80800+
80801 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
80802 __builtin_return_address(0));
80803 if (!area)
80804@@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80805 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
80806 return NULL;
80807
80808+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80809+ if (!(pgprot_val(prot) & _PAGE_NX))
80810+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
80811+ VMALLOC_START, VMALLOC_END, node,
80812+ gfp_mask, caller);
80813+ else
80814+#endif
80815+
80816 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
80817 VMALLOC_START, VMALLOC_END, node,
80818 gfp_mask, caller);
80819@@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80820 return addr;
80821 }
80822
80823+#undef __vmalloc
80824 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
80825 {
80826 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
80827@@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
80828 * For tight control over page level allocator and protection flags
80829 * use __vmalloc() instead.
80830 */
80831+#undef vmalloc
80832 void *vmalloc(unsigned long size)
80833 {
80834 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80835@@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
80836 * The resulting memory area is zeroed so it can be mapped to userspace
80837 * without leaking data.
80838 */
80839+#undef vmalloc_user
80840 void *vmalloc_user(unsigned long size)
80841 {
80842 struct vm_struct *area;
80843@@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
80844 * For tight control over page level allocator and protection flags
80845 * use __vmalloc() instead.
80846 */
80847+#undef vmalloc_node
80848 void *vmalloc_node(unsigned long size, int node)
80849 {
80850 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80851@@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
80852 * For tight control over page level allocator and protection flags
80853 * use __vmalloc() instead.
80854 */
80855-
80856+#undef vmalloc_exec
80857 void *vmalloc_exec(unsigned long size)
80858 {
80859- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
80860+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
80861 -1, __builtin_return_address(0));
80862 }
80863
80864@@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
80865 * Allocate enough 32bit PA addressable pages to cover @size from the
80866 * page level allocator and map them into contiguous kernel virtual space.
80867 */
80868+#undef vmalloc_32
80869 void *vmalloc_32(unsigned long size)
80870 {
80871 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
80872@@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
80873 * The resulting memory area is 32bit addressable and zeroed so it can be
80874 * mapped to userspace without leaking data.
80875 */
80876+#undef vmalloc_32_user
80877 void *vmalloc_32_user(unsigned long size)
80878 {
80879 struct vm_struct *area;
80880@@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
80881 unsigned long uaddr = vma->vm_start;
80882 unsigned long usize = vma->vm_end - vma->vm_start;
80883
80884+ BUG_ON(vma->vm_mirror);
80885+
80886 if ((PAGE_SIZE-1) & (unsigned long)addr)
80887 return -EINVAL;
80888
80889diff --git a/mm/vmstat.c b/mm/vmstat.c
80890index 42d76c6..5643dc4 100644
80891--- a/mm/vmstat.c
80892+++ b/mm/vmstat.c
80893@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
80894 *
80895 * vm_stat contains the global counters
80896 */
80897-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80898+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80899 EXPORT_SYMBOL(vm_stat);
80900
80901 #ifdef CONFIG_SMP
80902@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
80903 v = p->vm_stat_diff[i];
80904 p->vm_stat_diff[i] = 0;
80905 local_irq_restore(flags);
80906- atomic_long_add(v, &zone->vm_stat[i]);
80907+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
80908 global_diff[i] += v;
80909 #ifdef CONFIG_NUMA
80910 /* 3 seconds idle till flush */
80911@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
80912
80913 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
80914 if (global_diff[i])
80915- atomic_long_add(global_diff[i], &vm_stat[i]);
80916+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
80917 }
80918
80919 #endif
80920@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
80921 start_cpu_timer(cpu);
80922 #endif
80923 #ifdef CONFIG_PROC_FS
80924- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
80925- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
80926- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
80927- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
80928+ {
80929+ mode_t gr_mode = S_IRUGO;
80930+#ifdef CONFIG_GRKERNSEC_PROC_ADD
80931+ gr_mode = S_IRUSR;
80932+#endif
80933+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
80934+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
80935+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
80936+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
80937+#else
80938+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
80939+#endif
80940+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
80941+ }
80942 #endif
80943 return 0;
80944 }
80945diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
80946index a29c5ab..6143f20 100644
80947--- a/net/8021q/vlan.c
80948+++ b/net/8021q/vlan.c
80949@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
80950 err = -EPERM;
80951 if (!capable(CAP_NET_ADMIN))
80952 break;
80953- if ((args.u.name_type >= 0) &&
80954- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
80955+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
80956 struct vlan_net *vn;
80957
80958 vn = net_generic(net, vlan_net_id);
80959diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
80960index a2d2984..f9eb711 100644
80961--- a/net/9p/trans_fd.c
80962+++ b/net/9p/trans_fd.c
80963@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
80964 oldfs = get_fs();
80965 set_fs(get_ds());
80966 /* The cast to a user pointer is valid due to the set_fs() */
80967- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
80968+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
80969 set_fs(oldfs);
80970
80971 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
80972diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
80973index 02cc7e7..4514f1b 100644
80974--- a/net/atm/atm_misc.c
80975+++ b/net/atm/atm_misc.c
80976@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
80977 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
80978 return 1;
80979 atm_return(vcc,truesize);
80980- atomic_inc(&vcc->stats->rx_drop);
80981+ atomic_inc_unchecked(&vcc->stats->rx_drop);
80982 return 0;
80983 }
80984
80985@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
80986 }
80987 }
80988 atm_return(vcc,guess);
80989- atomic_inc(&vcc->stats->rx_drop);
80990+ atomic_inc_unchecked(&vcc->stats->rx_drop);
80991 return NULL;
80992 }
80993
80994@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
80995
80996 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80997 {
80998-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
80999+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81000 __SONET_ITEMS
81001 #undef __HANDLE_ITEM
81002 }
81003@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81004
81005 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81006 {
81007-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
81008+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
81009 __SONET_ITEMS
81010 #undef __HANDLE_ITEM
81011 }
81012diff --git a/net/atm/lec.h b/net/atm/lec.h
81013index 9d14d19..5c145f3 100644
81014--- a/net/atm/lec.h
81015+++ b/net/atm/lec.h
81016@@ -48,7 +48,7 @@ struct lane2_ops {
81017 const u8 *tlvs, u32 sizeoftlvs);
81018 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
81019 const u8 *tlvs, u32 sizeoftlvs);
81020-};
81021+} __no_const;
81022
81023 /*
81024 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
81025diff --git a/net/atm/mpc.h b/net/atm/mpc.h
81026index 0919a88..a23d54e 100644
81027--- a/net/atm/mpc.h
81028+++ b/net/atm/mpc.h
81029@@ -33,7 +33,7 @@ struct mpoa_client {
81030 struct mpc_parameters parameters; /* parameters for this client */
81031
81032 const struct net_device_ops *old_ops;
81033- struct net_device_ops new_ops;
81034+ net_device_ops_no_const new_ops;
81035 };
81036
81037
81038diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
81039index 4504a4b..1733f1e 100644
81040--- a/net/atm/mpoa_caches.c
81041+++ b/net/atm/mpoa_caches.c
81042@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
81043 struct timeval now;
81044 struct k_message msg;
81045
81046+ pax_track_stack();
81047+
81048 do_gettimeofday(&now);
81049
81050 write_lock_irq(&client->egress_lock);
81051diff --git a/net/atm/proc.c b/net/atm/proc.c
81052index ab8419a..aa91497 100644
81053--- a/net/atm/proc.c
81054+++ b/net/atm/proc.c
81055@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
81056 const struct k_atm_aal_stats *stats)
81057 {
81058 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
81059- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
81060- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
81061- atomic_read(&stats->rx_drop));
81062+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
81063+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
81064+ atomic_read_unchecked(&stats->rx_drop));
81065 }
81066
81067 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
81068@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
81069 {
81070 struct sock *sk = sk_atm(vcc);
81071
81072+#ifdef CONFIG_GRKERNSEC_HIDESYM
81073+ seq_printf(seq, "%p ", NULL);
81074+#else
81075 seq_printf(seq, "%p ", vcc);
81076+#endif
81077+
81078 if (!vcc->dev)
81079 seq_printf(seq, "Unassigned ");
81080 else
81081@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
81082 {
81083 if (!vcc->dev)
81084 seq_printf(seq, sizeof(void *) == 4 ?
81085+#ifdef CONFIG_GRKERNSEC_HIDESYM
81086+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
81087+#else
81088 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
81089+#endif
81090 else
81091 seq_printf(seq, "%3d %3d %5d ",
81092 vcc->dev->number, vcc->vpi, vcc->vci);
81093diff --git a/net/atm/resources.c b/net/atm/resources.c
81094index 56b7322..c48b84e 100644
81095--- a/net/atm/resources.c
81096+++ b/net/atm/resources.c
81097@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
81098 static void copy_aal_stats(struct k_atm_aal_stats *from,
81099 struct atm_aal_stats *to)
81100 {
81101-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
81102+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81103 __AAL_STAT_ITEMS
81104 #undef __HANDLE_ITEM
81105 }
81106@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
81107 static void subtract_aal_stats(struct k_atm_aal_stats *from,
81108 struct atm_aal_stats *to)
81109 {
81110-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
81111+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
81112 __AAL_STAT_ITEMS
81113 #undef __HANDLE_ITEM
81114 }
81115diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
81116index 8567d47..bba2292 100644
81117--- a/net/bridge/br_private.h
81118+++ b/net/bridge/br_private.h
81119@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
81120
81121 #ifdef CONFIG_SYSFS
81122 /* br_sysfs_if.c */
81123-extern struct sysfs_ops brport_sysfs_ops;
81124+extern const struct sysfs_ops brport_sysfs_ops;
81125 extern int br_sysfs_addif(struct net_bridge_port *p);
81126
81127 /* br_sysfs_br.c */
81128diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
81129index 9a52ac5..c97538e 100644
81130--- a/net/bridge/br_stp_if.c
81131+++ b/net/bridge/br_stp_if.c
81132@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
81133 char *envp[] = { NULL };
81134
81135 if (br->stp_enabled == BR_USER_STP) {
81136- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
81137+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
81138 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
81139 br->dev->name, r);
81140
81141diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
81142index 820643a..ce77fb3 100644
81143--- a/net/bridge/br_sysfs_if.c
81144+++ b/net/bridge/br_sysfs_if.c
81145@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
81146 return ret;
81147 }
81148
81149-struct sysfs_ops brport_sysfs_ops = {
81150+const struct sysfs_ops brport_sysfs_ops = {
81151 .show = brport_show,
81152 .store = brport_store,
81153 };
81154diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
81155index d73d47f..72df42a 100644
81156--- a/net/bridge/netfilter/ebtables.c
81157+++ b/net/bridge/netfilter/ebtables.c
81158@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
81159 unsigned int entries_size, nentries;
81160 char *entries;
81161
81162+ pax_track_stack();
81163+
81164 if (cmd == EBT_SO_GET_ENTRIES) {
81165 entries_size = t->private->entries_size;
81166 nentries = t->private->nentries;
81167diff --git a/net/can/bcm.c b/net/can/bcm.c
81168index 2ffd2e0..72a7486 100644
81169--- a/net/can/bcm.c
81170+++ b/net/can/bcm.c
81171@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
81172 struct bcm_sock *bo = bcm_sk(sk);
81173 struct bcm_op *op;
81174
81175+#ifdef CONFIG_GRKERNSEC_HIDESYM
81176+ seq_printf(m, ">>> socket %p", NULL);
81177+ seq_printf(m, " / sk %p", NULL);
81178+ seq_printf(m, " / bo %p", NULL);
81179+#else
81180 seq_printf(m, ">>> socket %p", sk->sk_socket);
81181 seq_printf(m, " / sk %p", sk);
81182 seq_printf(m, " / bo %p", bo);
81183+#endif
81184 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
81185 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
81186 seq_printf(m, " <<<\n");
81187diff --git a/net/compat.c b/net/compat.c
81188index 9559afc..ccd74e1 100644
81189--- a/net/compat.c
81190+++ b/net/compat.c
81191@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
81192 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
81193 __get_user(kmsg->msg_flags, &umsg->msg_flags))
81194 return -EFAULT;
81195- kmsg->msg_name = compat_ptr(tmp1);
81196- kmsg->msg_iov = compat_ptr(tmp2);
81197- kmsg->msg_control = compat_ptr(tmp3);
81198+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
81199+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
81200+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
81201 return 0;
81202 }
81203
81204@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
81205 kern_msg->msg_name = NULL;
81206
81207 tot_len = iov_from_user_compat_to_kern(kern_iov,
81208- (struct compat_iovec __user *)kern_msg->msg_iov,
81209+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
81210 kern_msg->msg_iovlen);
81211 if (tot_len >= 0)
81212 kern_msg->msg_iov = kern_iov;
81213@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
81214
81215 #define CMSG_COMPAT_FIRSTHDR(msg) \
81216 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
81217- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
81218+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
81219 (struct compat_cmsghdr __user *)NULL)
81220
81221 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
81222 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
81223 (ucmlen) <= (unsigned long) \
81224 ((mhdr)->msg_controllen - \
81225- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
81226+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
81227
81228 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
81229 struct compat_cmsghdr __user *cmsg, int cmsg_len)
81230 {
81231 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
81232- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
81233+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
81234 msg->msg_controllen)
81235 return NULL;
81236 return (struct compat_cmsghdr __user *)ptr;
81237@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
81238 {
81239 struct compat_timeval ctv;
81240 struct compat_timespec cts[3];
81241- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
81242+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
81243 struct compat_cmsghdr cmhdr;
81244 int cmlen;
81245
81246@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
81247
81248 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
81249 {
81250- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
81251+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
81252 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
81253 int fdnum = scm->fp->count;
81254 struct file **fp = scm->fp->fp;
81255@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
81256 len = sizeof(ktime);
81257 old_fs = get_fs();
81258 set_fs(KERNEL_DS);
81259- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
81260+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
81261 set_fs(old_fs);
81262
81263 if (!err) {
81264@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81265 case MCAST_JOIN_GROUP:
81266 case MCAST_LEAVE_GROUP:
81267 {
81268- struct compat_group_req __user *gr32 = (void *)optval;
81269+ struct compat_group_req __user *gr32 = (void __user *)optval;
81270 struct group_req __user *kgr =
81271 compat_alloc_user_space(sizeof(struct group_req));
81272 u32 interface;
81273@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81274 case MCAST_BLOCK_SOURCE:
81275 case MCAST_UNBLOCK_SOURCE:
81276 {
81277- struct compat_group_source_req __user *gsr32 = (void *)optval;
81278+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
81279 struct group_source_req __user *kgsr = compat_alloc_user_space(
81280 sizeof(struct group_source_req));
81281 u32 interface;
81282@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81283 }
81284 case MCAST_MSFILTER:
81285 {
81286- struct compat_group_filter __user *gf32 = (void *)optval;
81287+ struct compat_group_filter __user *gf32 = (void __user *)optval;
81288 struct group_filter __user *kgf;
81289 u32 interface, fmode, numsrc;
81290
81291diff --git a/net/core/dev.c b/net/core/dev.c
81292index 84a0705..575db4c 100644
81293--- a/net/core/dev.c
81294+++ b/net/core/dev.c
81295@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
81296 if (no_module && capable(CAP_NET_ADMIN))
81297 no_module = request_module("netdev-%s", name);
81298 if (no_module && capable(CAP_SYS_MODULE)) {
81299+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81300+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
81301+#else
81302 if (!request_module("%s", name))
81303 pr_err("Loading kernel module for a network device "
81304 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
81305 "instead\n", name);
81306+#endif
81307 }
81308 }
81309 EXPORT_SYMBOL(dev_load);
81310@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
81311
81312 struct dev_gso_cb {
81313 void (*destructor)(struct sk_buff *skb);
81314-};
81315+} __no_const;
81316
81317 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
81318
81319@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
81320 }
81321 EXPORT_SYMBOL(netif_rx_ni);
81322
81323-static void net_tx_action(struct softirq_action *h)
81324+static void net_tx_action(void)
81325 {
81326 struct softnet_data *sd = &__get_cpu_var(softnet_data);
81327
81328@@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
81329 EXPORT_SYMBOL(netif_napi_del);
81330
81331
81332-static void net_rx_action(struct softirq_action *h)
81333+static void net_rx_action(void)
81334 {
81335 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
81336 unsigned long time_limit = jiffies + 2;
81337diff --git a/net/core/flow.c b/net/core/flow.c
81338index 9601587..8c4824e 100644
81339--- a/net/core/flow.c
81340+++ b/net/core/flow.c
81341@@ -35,11 +35,11 @@ struct flow_cache_entry {
81342 atomic_t *object_ref;
81343 };
81344
81345-atomic_t flow_cache_genid = ATOMIC_INIT(0);
81346+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
81347
81348 static u32 flow_hash_shift;
81349 #define flow_hash_size (1 << flow_hash_shift)
81350-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
81351+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
81352
81353 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
81354
81355@@ -52,7 +52,7 @@ struct flow_percpu_info {
81356 u32 hash_rnd;
81357 int count;
81358 };
81359-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
81360+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
81361
81362 #define flow_hash_rnd_recalc(cpu) \
81363 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
81364@@ -69,7 +69,7 @@ struct flow_flush_info {
81365 atomic_t cpuleft;
81366 struct completion completion;
81367 };
81368-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
81369+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
81370
81371 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
81372
81373@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
81374 if (fle->family == family &&
81375 fle->dir == dir &&
81376 flow_key_compare(key, &fle->key) == 0) {
81377- if (fle->genid == atomic_read(&flow_cache_genid)) {
81378+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
81379 void *ret = fle->object;
81380
81381 if (ret)
81382@@ -228,7 +228,7 @@ nocache:
81383 err = resolver(net, key, family, dir, &obj, &obj_ref);
81384
81385 if (fle && !err) {
81386- fle->genid = atomic_read(&flow_cache_genid);
81387+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
81388
81389 if (fle->object)
81390 atomic_dec(fle->object_ref);
81391@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
81392
81393 fle = flow_table(cpu)[i];
81394 for (; fle; fle = fle->next) {
81395- unsigned genid = atomic_read(&flow_cache_genid);
81396+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
81397
81398 if (!fle->object || fle->genid == genid)
81399 continue;
81400diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
81401index d4fd895..ac9b1e6 100644
81402--- a/net/core/rtnetlink.c
81403+++ b/net/core/rtnetlink.c
81404@@ -57,7 +57,7 @@ struct rtnl_link
81405 {
81406 rtnl_doit_func doit;
81407 rtnl_dumpit_func dumpit;
81408-};
81409+} __no_const;
81410
81411 static DEFINE_MUTEX(rtnl_mutex);
81412
81413diff --git a/net/core/scm.c b/net/core/scm.c
81414index d98eafc..1a190a9 100644
81415--- a/net/core/scm.c
81416+++ b/net/core/scm.c
81417@@ -191,7 +191,7 @@ error:
81418 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81419 {
81420 struct cmsghdr __user *cm
81421- = (__force struct cmsghdr __user *)msg->msg_control;
81422+ = (struct cmsghdr __force_user *)msg->msg_control;
81423 struct cmsghdr cmhdr;
81424 int cmlen = CMSG_LEN(len);
81425 int err;
81426@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81427 err = -EFAULT;
81428 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
81429 goto out;
81430- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
81431+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
81432 goto out;
81433 cmlen = CMSG_SPACE(len);
81434 if (msg->msg_controllen < cmlen)
81435@@ -229,7 +229,7 @@ out:
81436 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81437 {
81438 struct cmsghdr __user *cm
81439- = (__force struct cmsghdr __user*)msg->msg_control;
81440+ = (struct cmsghdr __force_user *)msg->msg_control;
81441
81442 int fdmax = 0;
81443 int fdnum = scm->fp->count;
81444@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81445 if (fdnum < fdmax)
81446 fdmax = fdnum;
81447
81448- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
81449+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
81450 i++, cmfptr++)
81451 {
81452 int new_fd;
81453diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
81454index 45329d7..626aaa6 100644
81455--- a/net/core/secure_seq.c
81456+++ b/net/core/secure_seq.c
81457@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
81458 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
81459
81460 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81461- __be16 dport)
81462+ __be16 dport)
81463 {
81464 u32 secret[MD5_MESSAGE_BYTES / 4];
81465 u32 hash[MD5_DIGEST_WORDS];
81466@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81467 secret[i] = net_secret[i];
81468
81469 md5_transform(hash, secret);
81470-
81471 return hash[0];
81472 }
81473 #endif
81474diff --git a/net/core/skbuff.c b/net/core/skbuff.c
81475index 025f924..70a71c4 100644
81476--- a/net/core/skbuff.c
81477+++ b/net/core/skbuff.c
81478@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
81479 struct sk_buff *frag_iter;
81480 struct sock *sk = skb->sk;
81481
81482+ pax_track_stack();
81483+
81484 /*
81485 * __skb_splice_bits() only fails if the output has no room left,
81486 * so no point in going over the frag_list for the error case.
81487diff --git a/net/core/sock.c b/net/core/sock.c
81488index 6605e75..3acebda 100644
81489--- a/net/core/sock.c
81490+++ b/net/core/sock.c
81491@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
81492 break;
81493
81494 case SO_PEERCRED:
81495+ {
81496+ struct ucred peercred;
81497 if (len > sizeof(sk->sk_peercred))
81498 len = sizeof(sk->sk_peercred);
81499- if (copy_to_user(optval, &sk->sk_peercred, len))
81500+ peercred = sk->sk_peercred;
81501+ if (copy_to_user(optval, &peercred, len))
81502 return -EFAULT;
81503 goto lenout;
81504+ }
81505
81506 case SO_PEERNAME:
81507 {
81508@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
81509 */
81510 smp_wmb();
81511 atomic_set(&sk->sk_refcnt, 1);
81512- atomic_set(&sk->sk_drops, 0);
81513+ atomic_set_unchecked(&sk->sk_drops, 0);
81514 }
81515 EXPORT_SYMBOL(sock_init_data);
81516
81517diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
81518index 2036568..c55883d 100644
81519--- a/net/decnet/sysctl_net_decnet.c
81520+++ b/net/decnet/sysctl_net_decnet.c
81521@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
81522
81523 if (len > *lenp) len = *lenp;
81524
81525- if (copy_to_user(buffer, addr, len))
81526+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
81527 return -EFAULT;
81528
81529 *lenp = len;
81530@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
81531
81532 if (len > *lenp) len = *lenp;
81533
81534- if (copy_to_user(buffer, devname, len))
81535+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
81536 return -EFAULT;
81537
81538 *lenp = len;
81539diff --git a/net/econet/Kconfig b/net/econet/Kconfig
81540index 39a2d29..f39c0fe 100644
81541--- a/net/econet/Kconfig
81542+++ b/net/econet/Kconfig
81543@@ -4,7 +4,7 @@
81544
81545 config ECONET
81546 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
81547- depends on EXPERIMENTAL && INET
81548+ depends on EXPERIMENTAL && INET && BROKEN
81549 ---help---
81550 Econet is a fairly old and slow networking protocol mainly used by
81551 Acorn computers to access file and print servers. It uses native
81552diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
81553index a413b1b..380849c 100644
81554--- a/net/ieee802154/dgram.c
81555+++ b/net/ieee802154/dgram.c
81556@@ -318,7 +318,7 @@ out:
81557 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
81558 {
81559 if (sock_queue_rcv_skb(sk, skb) < 0) {
81560- atomic_inc(&sk->sk_drops);
81561+ atomic_inc_unchecked(&sk->sk_drops);
81562 kfree_skb(skb);
81563 return NET_RX_DROP;
81564 }
81565diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
81566index 30e74ee..bfc6ee0 100644
81567--- a/net/ieee802154/raw.c
81568+++ b/net/ieee802154/raw.c
81569@@ -206,7 +206,7 @@ out:
81570 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
81571 {
81572 if (sock_queue_rcv_skb(sk, skb) < 0) {
81573- atomic_inc(&sk->sk_drops);
81574+ atomic_inc_unchecked(&sk->sk_drops);
81575 kfree_skb(skb);
81576 return NET_RX_DROP;
81577 }
81578diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
81579index dba56d2..acee5d6 100644
81580--- a/net/ipv4/inet_diag.c
81581+++ b/net/ipv4/inet_diag.c
81582@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
81583 r->idiag_retrans = 0;
81584
81585 r->id.idiag_if = sk->sk_bound_dev_if;
81586+#ifdef CONFIG_GRKERNSEC_HIDESYM
81587+ r->id.idiag_cookie[0] = 0;
81588+ r->id.idiag_cookie[1] = 0;
81589+#else
81590 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
81591 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
81592+#endif
81593
81594 r->id.idiag_sport = inet->sport;
81595 r->id.idiag_dport = inet->dport;
81596@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
81597 r->idiag_family = tw->tw_family;
81598 r->idiag_retrans = 0;
81599 r->id.idiag_if = tw->tw_bound_dev_if;
81600+
81601+#ifdef CONFIG_GRKERNSEC_HIDESYM
81602+ r->id.idiag_cookie[0] = 0;
81603+ r->id.idiag_cookie[1] = 0;
81604+#else
81605 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
81606 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
81607+#endif
81608+
81609 r->id.idiag_sport = tw->tw_sport;
81610 r->id.idiag_dport = tw->tw_dport;
81611 r->id.idiag_src[0] = tw->tw_rcv_saddr;
81612@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
81613 if (sk == NULL)
81614 goto unlock;
81615
81616+#ifndef CONFIG_GRKERNSEC_HIDESYM
81617 err = -ESTALE;
81618 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
81619 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
81620 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
81621 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
81622 goto out;
81623+#endif
81624
81625 err = -ENOMEM;
81626 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
81627@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
81628 r->idiag_retrans = req->retrans;
81629
81630 r->id.idiag_if = sk->sk_bound_dev_if;
81631+
81632+#ifdef CONFIG_GRKERNSEC_HIDESYM
81633+ r->id.idiag_cookie[0] = 0;
81634+ r->id.idiag_cookie[1] = 0;
81635+#else
81636 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
81637 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
81638+#endif
81639
81640 tmo = req->expires - jiffies;
81641 if (tmo < 0)
81642diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
81643index d717267..56de7e7 100644
81644--- a/net/ipv4/inet_hashtables.c
81645+++ b/net/ipv4/inet_hashtables.c
81646@@ -18,12 +18,15 @@
81647 #include <linux/sched.h>
81648 #include <linux/slab.h>
81649 #include <linux/wait.h>
81650+#include <linux/security.h>
81651
81652 #include <net/inet_connection_sock.h>
81653 #include <net/inet_hashtables.h>
81654 #include <net/secure_seq.h>
81655 #include <net/ip.h>
81656
81657+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
81658+
81659 /*
81660 * Allocate and initialize a new local port bind bucket.
81661 * The bindhash mutex for snum's hash chain must be held here.
81662@@ -491,6 +494,8 @@ ok:
81663 }
81664 spin_unlock(&head->lock);
81665
81666+ gr_update_task_in_ip_table(current, inet_sk(sk));
81667+
81668 if (tw) {
81669 inet_twsk_deschedule(tw, death_row);
81670 inet_twsk_put(tw);
81671diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
81672index 13b229f..6956484 100644
81673--- a/net/ipv4/inetpeer.c
81674+++ b/net/ipv4/inetpeer.c
81675@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81676 struct inet_peer *p, *n;
81677 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
81678
81679+ pax_track_stack();
81680+
81681 /* Look up for the address quickly. */
81682 read_lock_bh(&peer_pool_lock);
81683 p = lookup(daddr, NULL);
81684@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81685 return NULL;
81686 n->v4daddr = daddr;
81687 atomic_set(&n->refcnt, 1);
81688- atomic_set(&n->rid, 0);
81689+ atomic_set_unchecked(&n->rid, 0);
81690 n->ip_id_count = secure_ip_id(daddr);
81691 n->tcp_ts_stamp = 0;
81692
81693diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
81694index d3fe10b..feeafc9 100644
81695--- a/net/ipv4/ip_fragment.c
81696+++ b/net/ipv4/ip_fragment.c
81697@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
81698 return 0;
81699
81700 start = qp->rid;
81701- end = atomic_inc_return(&peer->rid);
81702+ end = atomic_inc_return_unchecked(&peer->rid);
81703 qp->rid = end;
81704
81705 rc = qp->q.fragments && (end - start) > max;
81706diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
81707index e982b5c..f079d75 100644
81708--- a/net/ipv4/ip_sockglue.c
81709+++ b/net/ipv4/ip_sockglue.c
81710@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81711 int val;
81712 int len;
81713
81714+ pax_track_stack();
81715+
81716 if (level != SOL_IP)
81717 return -EOPNOTSUPP;
81718
81719@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81720 if (sk->sk_type != SOCK_STREAM)
81721 return -ENOPROTOOPT;
81722
81723- msg.msg_control = optval;
81724+ msg.msg_control = (void __force_kernel *)optval;
81725 msg.msg_controllen = len;
81726 msg.msg_flags = 0;
81727
81728diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
81729index f8d04c2..c1188f2 100644
81730--- a/net/ipv4/ipconfig.c
81731+++ b/net/ipv4/ipconfig.c
81732@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
81733
81734 mm_segment_t oldfs = get_fs();
81735 set_fs(get_ds());
81736- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81737+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81738 set_fs(oldfs);
81739 return res;
81740 }
81741@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
81742
81743 mm_segment_t oldfs = get_fs();
81744 set_fs(get_ds());
81745- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81746+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81747 set_fs(oldfs);
81748 return res;
81749 }
81750@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
81751
81752 mm_segment_t oldfs = get_fs();
81753 set_fs(get_ds());
81754- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
81755+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
81756 set_fs(oldfs);
81757 return res;
81758 }
81759diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
81760index c8b0cc3..4da5ae2 100644
81761--- a/net/ipv4/netfilter/arp_tables.c
81762+++ b/net/ipv4/netfilter/arp_tables.c
81763@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81764 private = &tmp;
81765 }
81766 #endif
81767+ memset(&info, 0, sizeof(info));
81768 info.valid_hooks = t->valid_hooks;
81769 memcpy(info.hook_entry, private->hook_entry,
81770 sizeof(info.hook_entry));
81771diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
81772index c156db2..e772975 100644
81773--- a/net/ipv4/netfilter/ip_queue.c
81774+++ b/net/ipv4/netfilter/ip_queue.c
81775@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81776
81777 if (v->data_len < sizeof(*user_iph))
81778 return 0;
81779+ if (v->data_len > 65535)
81780+ return -EMSGSIZE;
81781+
81782 diff = v->data_len - e->skb->len;
81783 if (diff < 0) {
81784 if (pskb_trim(e->skb, v->data_len))
81785@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
81786 static inline void
81787 __ipq_rcv_skb(struct sk_buff *skb)
81788 {
81789- int status, type, pid, flags, nlmsglen, skblen;
81790+ int status, type, pid, flags;
81791+ unsigned int nlmsglen, skblen;
81792 struct nlmsghdr *nlh;
81793
81794 skblen = skb->len;
81795diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
81796index 0606db1..02e7e4c 100644
81797--- a/net/ipv4/netfilter/ip_tables.c
81798+++ b/net/ipv4/netfilter/ip_tables.c
81799@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81800 private = &tmp;
81801 }
81802 #endif
81803+ memset(&info, 0, sizeof(info));
81804 info.valid_hooks = t->valid_hooks;
81805 memcpy(info.hook_entry, private->hook_entry,
81806 sizeof(info.hook_entry));
81807diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81808index d9521f6..3c3eb25 100644
81809--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
81810+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81811@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
81812
81813 *len = 0;
81814
81815- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
81816+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
81817 if (*octets == NULL) {
81818 if (net_ratelimit())
81819 printk("OOM in bsalg (%d)\n", __LINE__);
81820diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
81821index ab996f9..3da5f96 100644
81822--- a/net/ipv4/raw.c
81823+++ b/net/ipv4/raw.c
81824@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81825 /* Charge it to the socket. */
81826
81827 if (sock_queue_rcv_skb(sk, skb) < 0) {
81828- atomic_inc(&sk->sk_drops);
81829+ atomic_inc_unchecked(&sk->sk_drops);
81830 kfree_skb(skb);
81831 return NET_RX_DROP;
81832 }
81833@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81834 int raw_rcv(struct sock *sk, struct sk_buff *skb)
81835 {
81836 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
81837- atomic_inc(&sk->sk_drops);
81838+ atomic_inc_unchecked(&sk->sk_drops);
81839 kfree_skb(skb);
81840 return NET_RX_DROP;
81841 }
81842@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
81843
81844 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
81845 {
81846+ struct icmp_filter filter;
81847+
81848+ if (optlen < 0)
81849+ return -EINVAL;
81850 if (optlen > sizeof(struct icmp_filter))
81851 optlen = sizeof(struct icmp_filter);
81852- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
81853+ if (copy_from_user(&filter, optval, optlen))
81854 return -EFAULT;
81855+ raw_sk(sk)->filter = filter;
81856+
81857 return 0;
81858 }
81859
81860 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
81861 {
81862 int len, ret = -EFAULT;
81863+ struct icmp_filter filter;
81864
81865 if (get_user(len, optlen))
81866 goto out;
81867@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
81868 if (len > sizeof(struct icmp_filter))
81869 len = sizeof(struct icmp_filter);
81870 ret = -EFAULT;
81871- if (put_user(len, optlen) ||
81872- copy_to_user(optval, &raw_sk(sk)->filter, len))
81873+ filter = raw_sk(sk)->filter;
81874+ if (put_user(len, optlen) || len > sizeof filter ||
81875+ copy_to_user(optval, &filter, len))
81876 goto out;
81877 ret = 0;
81878 out: return ret;
81879@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
81880 sk_wmem_alloc_get(sp),
81881 sk_rmem_alloc_get(sp),
81882 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
81883- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
81884+ atomic_read(&sp->sk_refcnt),
81885+#ifdef CONFIG_GRKERNSEC_HIDESYM
81886+ NULL,
81887+#else
81888+ sp,
81889+#endif
81890+ atomic_read_unchecked(&sp->sk_drops));
81891 }
81892
81893 static int raw_seq_show(struct seq_file *seq, void *v)
81894diff --git a/net/ipv4/route.c b/net/ipv4/route.c
81895index 58f141b..b759702 100644
81896--- a/net/ipv4/route.c
81897+++ b/net/ipv4/route.c
81898@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
81899
81900 static inline int rt_genid(struct net *net)
81901 {
81902- return atomic_read(&net->ipv4.rt_genid);
81903+ return atomic_read_unchecked(&net->ipv4.rt_genid);
81904 }
81905
81906 #ifdef CONFIG_PROC_FS
81907@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
81908 unsigned char shuffle;
81909
81910 get_random_bytes(&shuffle, sizeof(shuffle));
81911- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
81912+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
81913 }
81914
81915 /*
81916@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
81917
81918 static __net_init int rt_secret_timer_init(struct net *net)
81919 {
81920- atomic_set(&net->ipv4.rt_genid,
81921+ atomic_set_unchecked(&net->ipv4.rt_genid,
81922 (int) ((num_physpages ^ (num_physpages>>8)) ^
81923 (jiffies ^ (jiffies >> 7))));
81924
81925diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
81926index f095659..adc892a 100644
81927--- a/net/ipv4/tcp.c
81928+++ b/net/ipv4/tcp.c
81929@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
81930 int val;
81931 int err = 0;
81932
81933+ pax_track_stack();
81934+
81935 /* This is a string value all the others are int's */
81936 if (optname == TCP_CONGESTION) {
81937 char name[TCP_CA_NAME_MAX];
81938@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
81939 struct tcp_sock *tp = tcp_sk(sk);
81940 int val, len;
81941
81942+ pax_track_stack();
81943+
81944 if (get_user(len, optlen))
81945 return -EFAULT;
81946
81947diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
81948index 6fc7961..33bad4a 100644
81949--- a/net/ipv4/tcp_ipv4.c
81950+++ b/net/ipv4/tcp_ipv4.c
81951@@ -85,6 +85,9 @@
81952 int sysctl_tcp_tw_reuse __read_mostly;
81953 int sysctl_tcp_low_latency __read_mostly;
81954
81955+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81956+extern int grsec_enable_blackhole;
81957+#endif
81958
81959 #ifdef CONFIG_TCP_MD5SIG
81960 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
81961@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
81962 return 0;
81963
81964 reset:
81965+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81966+ if (!grsec_enable_blackhole)
81967+#endif
81968 tcp_v4_send_reset(rsk, skb);
81969 discard:
81970 kfree_skb(skb);
81971@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
81972 TCP_SKB_CB(skb)->sacked = 0;
81973
81974 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
81975- if (!sk)
81976+ if (!sk) {
81977+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81978+ ret = 1;
81979+#endif
81980 goto no_tcp_socket;
81981+ }
81982
81983 process:
81984- if (sk->sk_state == TCP_TIME_WAIT)
81985+ if (sk->sk_state == TCP_TIME_WAIT) {
81986+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81987+ ret = 2;
81988+#endif
81989 goto do_time_wait;
81990+ }
81991
81992 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
81993 goto discard_and_relse;
81994@@ -1651,6 +1665,10 @@ no_tcp_socket:
81995 bad_packet:
81996 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
81997 } else {
81998+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81999+ if (!grsec_enable_blackhole || (ret == 1 &&
82000+ (skb->dev->flags & IFF_LOOPBACK)))
82001+#endif
82002 tcp_v4_send_reset(NULL, skb);
82003 }
82004
82005@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
82006 0, /* non standard timer */
82007 0, /* open_requests have no inode */
82008 atomic_read(&sk->sk_refcnt),
82009+#ifdef CONFIG_GRKERNSEC_HIDESYM
82010+ NULL,
82011+#else
82012 req,
82013+#endif
82014 len);
82015 }
82016
82017@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
82018 sock_i_uid(sk),
82019 icsk->icsk_probes_out,
82020 sock_i_ino(sk),
82021- atomic_read(&sk->sk_refcnt), sk,
82022+ atomic_read(&sk->sk_refcnt),
82023+#ifdef CONFIG_GRKERNSEC_HIDESYM
82024+ NULL,
82025+#else
82026+ sk,
82027+#endif
82028 jiffies_to_clock_t(icsk->icsk_rto),
82029 jiffies_to_clock_t(icsk->icsk_ack.ato),
82030 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
82031@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
82032 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
82033 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
82034 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82035- atomic_read(&tw->tw_refcnt), tw, len);
82036+ atomic_read(&tw->tw_refcnt),
82037+#ifdef CONFIG_GRKERNSEC_HIDESYM
82038+ NULL,
82039+#else
82040+ tw,
82041+#endif
82042+ len);
82043 }
82044
82045 #define TMPSZ 150
82046diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
82047index 4c03598..e09a8e8 100644
82048--- a/net/ipv4/tcp_minisocks.c
82049+++ b/net/ipv4/tcp_minisocks.c
82050@@ -26,6 +26,10 @@
82051 #include <net/inet_common.h>
82052 #include <net/xfrm.h>
82053
82054+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82055+extern int grsec_enable_blackhole;
82056+#endif
82057+
82058 #ifdef CONFIG_SYSCTL
82059 #define SYNC_INIT 0 /* let the user enable it */
82060 #else
82061@@ -672,6 +676,10 @@ listen_overflow:
82062
82063 embryonic_reset:
82064 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
82065+
82066+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82067+ if (!grsec_enable_blackhole)
82068+#endif
82069 if (!(flg & TCP_FLAG_RST))
82070 req->rsk_ops->send_reset(sk, skb);
82071
82072diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
82073index af83bdf..ec91cb2 100644
82074--- a/net/ipv4/tcp_output.c
82075+++ b/net/ipv4/tcp_output.c
82076@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
82077 __u8 *md5_hash_location;
82078 int mss;
82079
82080+ pax_track_stack();
82081+
82082 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
82083 if (skb == NULL)
82084 return NULL;
82085diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
82086index 59f5b5e..193860f 100644
82087--- a/net/ipv4/tcp_probe.c
82088+++ b/net/ipv4/tcp_probe.c
82089@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
82090 if (cnt + width >= len)
82091 break;
82092
82093- if (copy_to_user(buf + cnt, tbuf, width))
82094+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
82095 return -EFAULT;
82096 cnt += width;
82097 }
82098diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
82099index 57d5501..a9ed13a 100644
82100--- a/net/ipv4/tcp_timer.c
82101+++ b/net/ipv4/tcp_timer.c
82102@@ -21,6 +21,10 @@
82103 #include <linux/module.h>
82104 #include <net/tcp.h>
82105
82106+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82107+extern int grsec_lastack_retries;
82108+#endif
82109+
82110 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
82111 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
82112 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
82113@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
82114 }
82115 }
82116
82117+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82118+ if ((sk->sk_state == TCP_LAST_ACK) &&
82119+ (grsec_lastack_retries > 0) &&
82120+ (grsec_lastack_retries < retry_until))
82121+ retry_until = grsec_lastack_retries;
82122+#endif
82123+
82124 if (retransmits_timed_out(sk, retry_until)) {
82125 /* Has it gone just too far? */
82126 tcp_write_err(sk);
82127diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
82128index 8e28770..72105c8 100644
82129--- a/net/ipv4/udp.c
82130+++ b/net/ipv4/udp.c
82131@@ -86,6 +86,7 @@
82132 #include <linux/types.h>
82133 #include <linux/fcntl.h>
82134 #include <linux/module.h>
82135+#include <linux/security.h>
82136 #include <linux/socket.h>
82137 #include <linux/sockios.h>
82138 #include <linux/igmp.h>
82139@@ -106,6 +107,10 @@
82140 #include <net/xfrm.h>
82141 #include "udp_impl.h"
82142
82143+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82144+extern int grsec_enable_blackhole;
82145+#endif
82146+
82147 struct udp_table udp_table;
82148 EXPORT_SYMBOL(udp_table);
82149
82150@@ -371,6 +376,9 @@ found:
82151 return s;
82152 }
82153
82154+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
82155+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
82156+
82157 /*
82158 * This routine is called by the ICMP module when it gets some
82159 * sort of error condition. If err < 0 then the socket should
82160@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
82161 dport = usin->sin_port;
82162 if (dport == 0)
82163 return -EINVAL;
82164+
82165+ err = gr_search_udp_sendmsg(sk, usin);
82166+ if (err)
82167+ return err;
82168 } else {
82169 if (sk->sk_state != TCP_ESTABLISHED)
82170 return -EDESTADDRREQ;
82171+
82172+ err = gr_search_udp_sendmsg(sk, NULL);
82173+ if (err)
82174+ return err;
82175+
82176 daddr = inet->daddr;
82177 dport = inet->dport;
82178 /* Open fast path for connected socket.
82179@@ -945,6 +962,10 @@ try_again:
82180 if (!skb)
82181 goto out;
82182
82183+ err = gr_search_udp_recvmsg(sk, skb);
82184+ if (err)
82185+ goto out_free;
82186+
82187 ulen = skb->len - sizeof(struct udphdr);
82188 copied = len;
82189 if (copied > ulen)
82190@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
82191 if (rc == -ENOMEM) {
82192 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
82193 is_udplite);
82194- atomic_inc(&sk->sk_drops);
82195+ atomic_inc_unchecked(&sk->sk_drops);
82196 }
82197 goto drop;
82198 }
82199@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82200 goto csum_error;
82201
82202 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
82203+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82204+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82205+#endif
82206 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
82207
82208 /*
82209@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
82210 sk_wmem_alloc_get(sp),
82211 sk_rmem_alloc_get(sp),
82212 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
82213- atomic_read(&sp->sk_refcnt), sp,
82214- atomic_read(&sp->sk_drops), len);
82215+ atomic_read(&sp->sk_refcnt),
82216+#ifdef CONFIG_GRKERNSEC_HIDESYM
82217+ NULL,
82218+#else
82219+ sp,
82220+#endif
82221+ atomic_read_unchecked(&sp->sk_drops), len);
82222 }
82223
82224 int udp4_seq_show(struct seq_file *seq, void *v)
82225diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
82226index 8ac3d09..fc58c5f 100644
82227--- a/net/ipv6/addrconf.c
82228+++ b/net/ipv6/addrconf.c
82229@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
82230 p.iph.ihl = 5;
82231 p.iph.protocol = IPPROTO_IPV6;
82232 p.iph.ttl = 64;
82233- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
82234+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
82235
82236 if (ops->ndo_do_ioctl) {
82237 mm_segment_t oldfs = get_fs();
82238diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
82239index cc4797d..7cfdfcc 100644
82240--- a/net/ipv6/inet6_connection_sock.c
82241+++ b/net/ipv6/inet6_connection_sock.c
82242@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
82243 #ifdef CONFIG_XFRM
82244 {
82245 struct rt6_info *rt = (struct rt6_info *)dst;
82246- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
82247+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
82248 }
82249 #endif
82250 }
82251@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
82252 #ifdef CONFIG_XFRM
82253 if (dst) {
82254 struct rt6_info *rt = (struct rt6_info *)dst;
82255- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
82256+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
82257 sk->sk_dst_cache = NULL;
82258 dst_release(dst);
82259 dst = NULL;
82260diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
82261index 093e9b2..f72cddb 100644
82262--- a/net/ipv6/inet6_hashtables.c
82263+++ b/net/ipv6/inet6_hashtables.c
82264@@ -119,7 +119,7 @@ out:
82265 }
82266 EXPORT_SYMBOL(__inet6_lookup_established);
82267
82268-static int inline compute_score(struct sock *sk, struct net *net,
82269+static inline int compute_score(struct sock *sk, struct net *net,
82270 const unsigned short hnum,
82271 const struct in6_addr *daddr,
82272 const int dif)
82273diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
82274index 4f7aaf6..f7acf45 100644
82275--- a/net/ipv6/ipv6_sockglue.c
82276+++ b/net/ipv6/ipv6_sockglue.c
82277@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
82278 int val, valbool;
82279 int retv = -ENOPROTOOPT;
82280
82281+ pax_track_stack();
82282+
82283 if (optval == NULL)
82284 val=0;
82285 else {
82286@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
82287 int len;
82288 int val;
82289
82290+ pax_track_stack();
82291+
82292 if (ip6_mroute_opt(optname))
82293 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
82294
82295@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
82296 if (sk->sk_type != SOCK_STREAM)
82297 return -ENOPROTOOPT;
82298
82299- msg.msg_control = optval;
82300+ msg.msg_control = (void __force_kernel *)optval;
82301 msg.msg_controllen = len;
82302 msg.msg_flags = 0;
82303
82304diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
82305index 1cf3f0c..1d4376f 100644
82306--- a/net/ipv6/netfilter/ip6_queue.c
82307+++ b/net/ipv6/netfilter/ip6_queue.c
82308@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
82309
82310 if (v->data_len < sizeof(*user_iph))
82311 return 0;
82312+ if (v->data_len > 65535)
82313+ return -EMSGSIZE;
82314+
82315 diff = v->data_len - e->skb->len;
82316 if (diff < 0) {
82317 if (pskb_trim(e->skb, v->data_len))
82318@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
82319 static inline void
82320 __ipq_rcv_skb(struct sk_buff *skb)
82321 {
82322- int status, type, pid, flags, nlmsglen, skblen;
82323+ int status, type, pid, flags;
82324+ unsigned int nlmsglen, skblen;
82325 struct nlmsghdr *nlh;
82326
82327 skblen = skb->len;
82328diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
82329index 78b5a36..7f37433 100644
82330--- a/net/ipv6/netfilter/ip6_tables.c
82331+++ b/net/ipv6/netfilter/ip6_tables.c
82332@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
82333 private = &tmp;
82334 }
82335 #endif
82336+ memset(&info, 0, sizeof(info));
82337 info.valid_hooks = t->valid_hooks;
82338 memcpy(info.hook_entry, private->hook_entry,
82339 sizeof(info.hook_entry));
82340diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
82341index 4f24570..b813b34 100644
82342--- a/net/ipv6/raw.c
82343+++ b/net/ipv6/raw.c
82344@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
82345 {
82346 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
82347 skb_checksum_complete(skb)) {
82348- atomic_inc(&sk->sk_drops);
82349+ atomic_inc_unchecked(&sk->sk_drops);
82350 kfree_skb(skb);
82351 return NET_RX_DROP;
82352 }
82353
82354 /* Charge it to the socket. */
82355 if (sock_queue_rcv_skb(sk,skb)<0) {
82356- atomic_inc(&sk->sk_drops);
82357+ atomic_inc_unchecked(&sk->sk_drops);
82358 kfree_skb(skb);
82359 return NET_RX_DROP;
82360 }
82361@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82362 struct raw6_sock *rp = raw6_sk(sk);
82363
82364 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
82365- atomic_inc(&sk->sk_drops);
82366+ atomic_inc_unchecked(&sk->sk_drops);
82367 kfree_skb(skb);
82368 return NET_RX_DROP;
82369 }
82370@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82371
82372 if (inet->hdrincl) {
82373 if (skb_checksum_complete(skb)) {
82374- atomic_inc(&sk->sk_drops);
82375+ atomic_inc_unchecked(&sk->sk_drops);
82376 kfree_skb(skb);
82377 return NET_RX_DROP;
82378 }
82379@@ -518,7 +518,7 @@ csum_copy_err:
82380 as some normal condition.
82381 */
82382 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
82383- atomic_inc(&sk->sk_drops);
82384+ atomic_inc_unchecked(&sk->sk_drops);
82385 goto out;
82386 }
82387
82388@@ -600,7 +600,7 @@ out:
82389 return err;
82390 }
82391
82392-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
82393+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
82394 struct flowi *fl, struct rt6_info *rt,
82395 unsigned int flags)
82396 {
82397@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
82398 u16 proto;
82399 int err;
82400
82401+ pax_track_stack();
82402+
82403 /* Rough check on arithmetic overflow,
82404 better check is made in ip6_append_data().
82405 */
82406@@ -916,12 +918,17 @@ do_confirm:
82407 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
82408 char __user *optval, int optlen)
82409 {
82410+ struct icmp6_filter filter;
82411+
82412 switch (optname) {
82413 case ICMPV6_FILTER:
82414+ if (optlen < 0)
82415+ return -EINVAL;
82416 if (optlen > sizeof(struct icmp6_filter))
82417 optlen = sizeof(struct icmp6_filter);
82418- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
82419+ if (copy_from_user(&filter, optval, optlen))
82420 return -EFAULT;
82421+ raw6_sk(sk)->filter = filter;
82422 return 0;
82423 default:
82424 return -ENOPROTOOPT;
82425@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82426 char __user *optval, int __user *optlen)
82427 {
82428 int len;
82429+ struct icmp6_filter filter;
82430
82431 switch (optname) {
82432 case ICMPV6_FILTER:
82433@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82434 len = sizeof(struct icmp6_filter);
82435 if (put_user(len, optlen))
82436 return -EFAULT;
82437- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
82438+ filter = raw6_sk(sk)->filter;
82439+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
82440 return -EFAULT;
82441 return 0;
82442 default:
82443@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
82444 0, 0L, 0,
82445 sock_i_uid(sp), 0,
82446 sock_i_ino(sp),
82447- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
82448+ atomic_read(&sp->sk_refcnt),
82449+#ifdef CONFIG_GRKERNSEC_HIDESYM
82450+ NULL,
82451+#else
82452+ sp,
82453+#endif
82454+ atomic_read_unchecked(&sp->sk_drops));
82455 }
82456
82457 static int raw6_seq_show(struct seq_file *seq, void *v)
82458diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
82459index faae6df..d4430c1 100644
82460--- a/net/ipv6/tcp_ipv6.c
82461+++ b/net/ipv6/tcp_ipv6.c
82462@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
82463 }
82464 #endif
82465
82466+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82467+extern int grsec_enable_blackhole;
82468+#endif
82469+
82470 static void tcp_v6_hash(struct sock *sk)
82471 {
82472 if (sk->sk_state != TCP_CLOSE) {
82473@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
82474 return 0;
82475
82476 reset:
82477+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82478+ if (!grsec_enable_blackhole)
82479+#endif
82480 tcp_v6_send_reset(sk, skb);
82481 discard:
82482 if (opt_skb)
82483@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
82484 TCP_SKB_CB(skb)->sacked = 0;
82485
82486 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82487- if (!sk)
82488+ if (!sk) {
82489+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82490+ ret = 1;
82491+#endif
82492 goto no_tcp_socket;
82493+ }
82494
82495 process:
82496- if (sk->sk_state == TCP_TIME_WAIT)
82497+ if (sk->sk_state == TCP_TIME_WAIT) {
82498+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82499+ ret = 2;
82500+#endif
82501 goto do_time_wait;
82502+ }
82503
82504 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
82505 goto discard_and_relse;
82506@@ -1701,6 +1716,10 @@ no_tcp_socket:
82507 bad_packet:
82508 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82509 } else {
82510+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82511+ if (!grsec_enable_blackhole || (ret == 1 &&
82512+ (skb->dev->flags & IFF_LOOPBACK)))
82513+#endif
82514 tcp_v6_send_reset(NULL, skb);
82515 }
82516
82517@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
82518 uid,
82519 0, /* non standard timer */
82520 0, /* open_requests have no inode */
82521- 0, req);
82522+ 0,
82523+#ifdef CONFIG_GRKERNSEC_HIDESYM
82524+ NULL
82525+#else
82526+ req
82527+#endif
82528+ );
82529 }
82530
82531 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82532@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82533 sock_i_uid(sp),
82534 icsk->icsk_probes_out,
82535 sock_i_ino(sp),
82536- atomic_read(&sp->sk_refcnt), sp,
82537+ atomic_read(&sp->sk_refcnt),
82538+#ifdef CONFIG_GRKERNSEC_HIDESYM
82539+ NULL,
82540+#else
82541+ sp,
82542+#endif
82543 jiffies_to_clock_t(icsk->icsk_rto),
82544 jiffies_to_clock_t(icsk->icsk_ack.ato),
82545 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
82546@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
82547 dest->s6_addr32[2], dest->s6_addr32[3], destp,
82548 tw->tw_substate, 0, 0,
82549 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82550- atomic_read(&tw->tw_refcnt), tw);
82551+ atomic_read(&tw->tw_refcnt),
82552+#ifdef CONFIG_GRKERNSEC_HIDESYM
82553+ NULL
82554+#else
82555+ tw
82556+#endif
82557+ );
82558 }
82559
82560 static int tcp6_seq_show(struct seq_file *seq, void *v)
82561diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
82562index 9cc6289..052c521 100644
82563--- a/net/ipv6/udp.c
82564+++ b/net/ipv6/udp.c
82565@@ -49,6 +49,10 @@
82566 #include <linux/seq_file.h>
82567 #include "udp_impl.h"
82568
82569+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82570+extern int grsec_enable_blackhole;
82571+#endif
82572+
82573 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
82574 {
82575 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
82576@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
82577 if (rc == -ENOMEM) {
82578 UDP6_INC_STATS_BH(sock_net(sk),
82579 UDP_MIB_RCVBUFERRORS, is_udplite);
82580- atomic_inc(&sk->sk_drops);
82581+ atomic_inc_unchecked(&sk->sk_drops);
82582 }
82583 goto drop;
82584 }
82585@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82586 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
82587 proto == IPPROTO_UDPLITE);
82588
82589+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82590+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82591+#endif
82592 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
82593
82594 kfree_skb(skb);
82595@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
82596 0, 0L, 0,
82597 sock_i_uid(sp), 0,
82598 sock_i_ino(sp),
82599- atomic_read(&sp->sk_refcnt), sp,
82600- atomic_read(&sp->sk_drops));
82601+ atomic_read(&sp->sk_refcnt),
82602+#ifdef CONFIG_GRKERNSEC_HIDESYM
82603+ NULL,
82604+#else
82605+ sp,
82606+#endif
82607+ atomic_read_unchecked(&sp->sk_drops));
82608 }
82609
82610 int udp6_seq_show(struct seq_file *seq, void *v)
82611diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
82612index 811984d..11f59b7 100644
82613--- a/net/irda/ircomm/ircomm_tty.c
82614+++ b/net/irda/ircomm/ircomm_tty.c
82615@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82616 add_wait_queue(&self->open_wait, &wait);
82617
82618 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
82619- __FILE__,__LINE__, tty->driver->name, self->open_count );
82620+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82621
82622 /* As far as I can see, we protect open_count - Jean II */
82623 spin_lock_irqsave(&self->spinlock, flags);
82624 if (!tty_hung_up_p(filp)) {
82625 extra_count = 1;
82626- self->open_count--;
82627+ local_dec(&self->open_count);
82628 }
82629 spin_unlock_irqrestore(&self->spinlock, flags);
82630- self->blocked_open++;
82631+ local_inc(&self->blocked_open);
82632
82633 while (1) {
82634 if (tty->termios->c_cflag & CBAUD) {
82635@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82636 }
82637
82638 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
82639- __FILE__,__LINE__, tty->driver->name, self->open_count );
82640+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82641
82642 schedule();
82643 }
82644@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82645 if (extra_count) {
82646 /* ++ is not atomic, so this should be protected - Jean II */
82647 spin_lock_irqsave(&self->spinlock, flags);
82648- self->open_count++;
82649+ local_inc(&self->open_count);
82650 spin_unlock_irqrestore(&self->spinlock, flags);
82651 }
82652- self->blocked_open--;
82653+ local_dec(&self->blocked_open);
82654
82655 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
82656- __FILE__,__LINE__, tty->driver->name, self->open_count);
82657+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
82658
82659 if (!retval)
82660 self->flags |= ASYNC_NORMAL_ACTIVE;
82661@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
82662 }
82663 /* ++ is not atomic, so this should be protected - Jean II */
82664 spin_lock_irqsave(&self->spinlock, flags);
82665- self->open_count++;
82666+ local_inc(&self->open_count);
82667
82668 tty->driver_data = self;
82669 self->tty = tty;
82670 spin_unlock_irqrestore(&self->spinlock, flags);
82671
82672 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
82673- self->line, self->open_count);
82674+ self->line, local_read(&self->open_count));
82675
82676 /* Not really used by us, but lets do it anyway */
82677 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
82678@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82679 return;
82680 }
82681
82682- if ((tty->count == 1) && (self->open_count != 1)) {
82683+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
82684 /*
82685 * Uh, oh. tty->count is 1, which means that the tty
82686 * structure will be freed. state->count should always
82687@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82688 */
82689 IRDA_DEBUG(0, "%s(), bad serial port count; "
82690 "tty->count is 1, state->count is %d\n", __func__ ,
82691- self->open_count);
82692- self->open_count = 1;
82693+ local_read(&self->open_count));
82694+ local_set(&self->open_count, 1);
82695 }
82696
82697- if (--self->open_count < 0) {
82698+ if (local_dec_return(&self->open_count) < 0) {
82699 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
82700- __func__, self->line, self->open_count);
82701- self->open_count = 0;
82702+ __func__, self->line, local_read(&self->open_count));
82703+ local_set(&self->open_count, 0);
82704 }
82705- if (self->open_count) {
82706+ if (local_read(&self->open_count)) {
82707 spin_unlock_irqrestore(&self->spinlock, flags);
82708
82709 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
82710@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82711 tty->closing = 0;
82712 self->tty = NULL;
82713
82714- if (self->blocked_open) {
82715+ if (local_read(&self->blocked_open)) {
82716 if (self->close_delay)
82717 schedule_timeout_interruptible(self->close_delay);
82718 wake_up_interruptible(&self->open_wait);
82719@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
82720 spin_lock_irqsave(&self->spinlock, flags);
82721 self->flags &= ~ASYNC_NORMAL_ACTIVE;
82722 self->tty = NULL;
82723- self->open_count = 0;
82724+ local_set(&self->open_count, 0);
82725 spin_unlock_irqrestore(&self->spinlock, flags);
82726
82727 wake_up_interruptible(&self->open_wait);
82728@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
82729 seq_putc(m, '\n');
82730
82731 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
82732- seq_printf(m, "Open count: %d\n", self->open_count);
82733+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
82734 seq_printf(m, "Max data size: %d\n", self->max_data_size);
82735 seq_printf(m, "Max header size: %d\n", self->max_header_size);
82736
82737diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
82738index bada1b9..f325943 100644
82739--- a/net/iucv/af_iucv.c
82740+++ b/net/iucv/af_iucv.c
82741@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
82742
82743 write_lock_bh(&iucv_sk_list.lock);
82744
82745- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
82746+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82747 while (__iucv_get_sock_by_name(name)) {
82748 sprintf(name, "%08x",
82749- atomic_inc_return(&iucv_sk_list.autobind_name));
82750+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82751 }
82752
82753 write_unlock_bh(&iucv_sk_list.lock);
82754diff --git a/net/key/af_key.c b/net/key/af_key.c
82755index 4e98193..439b449 100644
82756--- a/net/key/af_key.c
82757+++ b/net/key/af_key.c
82758@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
82759 struct xfrm_migrate m[XFRM_MAX_DEPTH];
82760 struct xfrm_kmaddress k;
82761
82762+ pax_track_stack();
82763+
82764 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
82765 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
82766 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
82767@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
82768 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
82769 else
82770 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
82771+#ifdef CONFIG_GRKERNSEC_HIDESYM
82772+ NULL,
82773+#else
82774 s,
82775+#endif
82776 atomic_read(&s->sk_refcnt),
82777 sk_rmem_alloc_get(s),
82778 sk_wmem_alloc_get(s),
82779diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
82780index bda96d1..c038b72 100644
82781--- a/net/lapb/lapb_iface.c
82782+++ b/net/lapb/lapb_iface.c
82783@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
82784 goto out;
82785
82786 lapb->dev = dev;
82787- lapb->callbacks = *callbacks;
82788+ lapb->callbacks = callbacks;
82789
82790 __lapb_insert_cb(lapb);
82791
82792@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
82793
82794 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
82795 {
82796- if (lapb->callbacks.connect_confirmation)
82797- lapb->callbacks.connect_confirmation(lapb->dev, reason);
82798+ if (lapb->callbacks->connect_confirmation)
82799+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
82800 }
82801
82802 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
82803 {
82804- if (lapb->callbacks.connect_indication)
82805- lapb->callbacks.connect_indication(lapb->dev, reason);
82806+ if (lapb->callbacks->connect_indication)
82807+ lapb->callbacks->connect_indication(lapb->dev, reason);
82808 }
82809
82810 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
82811 {
82812- if (lapb->callbacks.disconnect_confirmation)
82813- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
82814+ if (lapb->callbacks->disconnect_confirmation)
82815+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
82816 }
82817
82818 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
82819 {
82820- if (lapb->callbacks.disconnect_indication)
82821- lapb->callbacks.disconnect_indication(lapb->dev, reason);
82822+ if (lapb->callbacks->disconnect_indication)
82823+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
82824 }
82825
82826 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
82827 {
82828- if (lapb->callbacks.data_indication)
82829- return lapb->callbacks.data_indication(lapb->dev, skb);
82830+ if (lapb->callbacks->data_indication)
82831+ return lapb->callbacks->data_indication(lapb->dev, skb);
82832
82833 kfree_skb(skb);
82834 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
82835@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
82836 {
82837 int used = 0;
82838
82839- if (lapb->callbacks.data_transmit) {
82840- lapb->callbacks.data_transmit(lapb->dev, skb);
82841+ if (lapb->callbacks->data_transmit) {
82842+ lapb->callbacks->data_transmit(lapb->dev, skb);
82843 used = 1;
82844 }
82845
82846diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
82847index fe2d3f8..e57f683 100644
82848--- a/net/mac80211/cfg.c
82849+++ b/net/mac80211/cfg.c
82850@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
82851 return err;
82852 }
82853
82854-struct cfg80211_ops mac80211_config_ops = {
82855+const struct cfg80211_ops mac80211_config_ops = {
82856 .add_virtual_intf = ieee80211_add_iface,
82857 .del_virtual_intf = ieee80211_del_iface,
82858 .change_virtual_intf = ieee80211_change_iface,
82859diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
82860index 7d7879f..2d51f62 100644
82861--- a/net/mac80211/cfg.h
82862+++ b/net/mac80211/cfg.h
82863@@ -4,6 +4,6 @@
82864 #ifndef __CFG_H
82865 #define __CFG_H
82866
82867-extern struct cfg80211_ops mac80211_config_ops;
82868+extern const struct cfg80211_ops mac80211_config_ops;
82869
82870 #endif /* __CFG_H */
82871diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
82872index 99c7525..9cb4937 100644
82873--- a/net/mac80211/debugfs_key.c
82874+++ b/net/mac80211/debugfs_key.c
82875@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
82876 size_t count, loff_t *ppos)
82877 {
82878 struct ieee80211_key *key = file->private_data;
82879- int i, res, bufsize = 2 * key->conf.keylen + 2;
82880+ int i, bufsize = 2 * key->conf.keylen + 2;
82881 char *buf = kmalloc(bufsize, GFP_KERNEL);
82882 char *p = buf;
82883+ ssize_t res;
82884+
82885+ if (buf == NULL)
82886+ return -ENOMEM;
82887
82888 for (i = 0; i < key->conf.keylen; i++)
82889 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
82890diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
82891index 33a2e89..08650c8 100644
82892--- a/net/mac80211/debugfs_sta.c
82893+++ b/net/mac80211/debugfs_sta.c
82894@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
82895 int i;
82896 struct sta_info *sta = file->private_data;
82897
82898+ pax_track_stack();
82899+
82900 spin_lock_bh(&sta->lock);
82901 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
82902 sta->ampdu_mlme.dialog_token_allocator + 1);
82903diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
82904index ca62bfe..6657a03 100644
82905--- a/net/mac80211/ieee80211_i.h
82906+++ b/net/mac80211/ieee80211_i.h
82907@@ -25,6 +25,7 @@
82908 #include <linux/etherdevice.h>
82909 #include <net/cfg80211.h>
82910 #include <net/mac80211.h>
82911+#include <asm/local.h>
82912 #include "key.h"
82913 #include "sta_info.h"
82914
82915@@ -635,7 +636,7 @@ struct ieee80211_local {
82916 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
82917 spinlock_t queue_stop_reason_lock;
82918
82919- int open_count;
82920+ local_t open_count;
82921 int monitors, cooked_mntrs;
82922 /* number of interfaces with corresponding FIF_ flags */
82923 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
82924diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
82925index 079c500..eb3c6d4 100644
82926--- a/net/mac80211/iface.c
82927+++ b/net/mac80211/iface.c
82928@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
82929 break;
82930 }
82931
82932- if (local->open_count == 0) {
82933+ if (local_read(&local->open_count) == 0) {
82934 res = drv_start(local);
82935 if (res)
82936 goto err_del_bss;
82937@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
82938 * Validate the MAC address for this device.
82939 */
82940 if (!is_valid_ether_addr(dev->dev_addr)) {
82941- if (!local->open_count)
82942+ if (!local_read(&local->open_count))
82943 drv_stop(local);
82944 return -EADDRNOTAVAIL;
82945 }
82946@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
82947
82948 hw_reconf_flags |= __ieee80211_recalc_idle(local);
82949
82950- local->open_count++;
82951+ local_inc(&local->open_count);
82952 if (hw_reconf_flags) {
82953 ieee80211_hw_config(local, hw_reconf_flags);
82954 /*
82955@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
82956 err_del_interface:
82957 drv_remove_interface(local, &conf);
82958 err_stop:
82959- if (!local->open_count)
82960+ if (!local_read(&local->open_count))
82961 drv_stop(local);
82962 err_del_bss:
82963 sdata->bss = NULL;
82964@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
82965 WARN_ON(!list_empty(&sdata->u.ap.vlans));
82966 }
82967
82968- local->open_count--;
82969+ local_dec(&local->open_count);
82970
82971 switch (sdata->vif.type) {
82972 case NL80211_IFTYPE_AP_VLAN:
82973@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
82974
82975 ieee80211_recalc_ps(local, -1);
82976
82977- if (local->open_count == 0) {
82978+ if (local_read(&local->open_count) == 0) {
82979 ieee80211_clear_tx_pending(local);
82980 ieee80211_stop_device(local);
82981
82982diff --git a/net/mac80211/main.c b/net/mac80211/main.c
82983index 2dfe176..74e4388 100644
82984--- a/net/mac80211/main.c
82985+++ b/net/mac80211/main.c
82986@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
82987 local->hw.conf.power_level = power;
82988 }
82989
82990- if (changed && local->open_count) {
82991+ if (changed && local_read(&local->open_count)) {
82992 ret = drv_config(local, changed);
82993 /*
82994 * Goal:
82995diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
82996index e67eea7..fcc227e 100644
82997--- a/net/mac80211/mlme.c
82998+++ b/net/mac80211/mlme.c
82999@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
83000 bool have_higher_than_11mbit = false, newsta = false;
83001 u16 ap_ht_cap_flags;
83002
83003+ pax_track_stack();
83004+
83005 /*
83006 * AssocResp and ReassocResp have identical structure, so process both
83007 * of them in this function.
83008diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
83009index e535f1c..4d733d1 100644
83010--- a/net/mac80211/pm.c
83011+++ b/net/mac80211/pm.c
83012@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
83013 }
83014
83015 /* stop hardware - this must stop RX */
83016- if (local->open_count)
83017+ if (local_read(&local->open_count))
83018 ieee80211_stop_device(local);
83019
83020 local->suspended = true;
83021diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
83022index b33efc4..0a2efb6 100644
83023--- a/net/mac80211/rate.c
83024+++ b/net/mac80211/rate.c
83025@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
83026 struct rate_control_ref *ref, *old;
83027
83028 ASSERT_RTNL();
83029- if (local->open_count)
83030+ if (local_read(&local->open_count))
83031 return -EBUSY;
83032
83033 ref = rate_control_alloc(name, local);
83034diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
83035index b1d7904..57e4da7 100644
83036--- a/net/mac80211/tx.c
83037+++ b/net/mac80211/tx.c
83038@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
83039 return cpu_to_le16(dur);
83040 }
83041
83042-static int inline is_ieee80211_device(struct ieee80211_local *local,
83043+static inline int is_ieee80211_device(struct ieee80211_local *local,
83044 struct net_device *dev)
83045 {
83046 return local == wdev_priv(dev->ieee80211_ptr);
83047diff --git a/net/mac80211/util.c b/net/mac80211/util.c
83048index 31b1085..48fb26d 100644
83049--- a/net/mac80211/util.c
83050+++ b/net/mac80211/util.c
83051@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
83052 local->resuming = true;
83053
83054 /* restart hardware */
83055- if (local->open_count) {
83056+ if (local_read(&local->open_count)) {
83057 /*
83058 * Upon resume hardware can sometimes be goofy due to
83059 * various platform / driver / bus issues, so restarting
83060diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
83061index 634d14a..b35a608 100644
83062--- a/net/netfilter/Kconfig
83063+++ b/net/netfilter/Kconfig
83064@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
83065
83066 To compile it as a module, choose M here. If unsure, say N.
83067
83068+config NETFILTER_XT_MATCH_GRADM
83069+ tristate '"gradm" match support'
83070+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
83071+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
83072+ ---help---
83073+ The gradm match allows to match on grsecurity RBAC being enabled.
83074+ It is useful when iptables rules are applied early on bootup to
83075+ prevent connections to the machine (except from a trusted host)
83076+ while the RBAC system is disabled.
83077+
83078 config NETFILTER_XT_MATCH_HASHLIMIT
83079 tristate '"hashlimit" match support'
83080 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
83081diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
83082index 49f62ee..a17b2c6 100644
83083--- a/net/netfilter/Makefile
83084+++ b/net/netfilter/Makefile
83085@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
83086 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
83087 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
83088 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
83089+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
83090 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
83091 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
83092 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
83093diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
83094index 3c7e427..724043c 100644
83095--- a/net/netfilter/ipvs/ip_vs_app.c
83096+++ b/net/netfilter/ipvs/ip_vs_app.c
83097@@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
83098 .open = ip_vs_app_open,
83099 .read = seq_read,
83100 .llseek = seq_lseek,
83101- .release = seq_release,
83102+ .release = seq_release_net,
83103 };
83104 #endif
83105
83106diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
83107index 95682e5..457dbac 100644
83108--- a/net/netfilter/ipvs/ip_vs_conn.c
83109+++ b/net/netfilter/ipvs/ip_vs_conn.c
83110@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
83111 /* if the connection is not template and is created
83112 * by sync, preserve the activity flag.
83113 */
83114- cp->flags |= atomic_read(&dest->conn_flags) &
83115+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
83116 (~IP_VS_CONN_F_INACTIVE);
83117 else
83118- cp->flags |= atomic_read(&dest->conn_flags);
83119+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
83120 cp->dest = dest;
83121
83122 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
83123@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
83124 atomic_set(&cp->refcnt, 1);
83125
83126 atomic_set(&cp->n_control, 0);
83127- atomic_set(&cp->in_pkts, 0);
83128+ atomic_set_unchecked(&cp->in_pkts, 0);
83129
83130 atomic_inc(&ip_vs_conn_count);
83131 if (flags & IP_VS_CONN_F_NO_CPORT)
83132@@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
83133 .open = ip_vs_conn_open,
83134 .read = seq_read,
83135 .llseek = seq_lseek,
83136- .release = seq_release,
83137+ .release = seq_release_net,
83138 };
83139
83140 static const char *ip_vs_origin_name(unsigned flags)
83141@@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
83142 .open = ip_vs_conn_sync_open,
83143 .read = seq_read,
83144 .llseek = seq_lseek,
83145- .release = seq_release,
83146+ .release = seq_release_net,
83147 };
83148
83149 #endif
83150@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
83151
83152 /* Don't drop the entry if its number of incoming packets is not
83153 located in [0, 8] */
83154- i = atomic_read(&cp->in_pkts);
83155+ i = atomic_read_unchecked(&cp->in_pkts);
83156 if (i > 8 || i < 0) return 0;
83157
83158 if (!todrop_rate[i]) return 0;
83159diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
83160index b95699f..5fee919 100644
83161--- a/net/netfilter/ipvs/ip_vs_core.c
83162+++ b/net/netfilter/ipvs/ip_vs_core.c
83163@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
83164 ret = cp->packet_xmit(skb, cp, pp);
83165 /* do not touch skb anymore */
83166
83167- atomic_inc(&cp->in_pkts);
83168+ atomic_inc_unchecked(&cp->in_pkts);
83169 ip_vs_conn_put(cp);
83170 return ret;
83171 }
83172@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
83173 * Sync connection if it is about to close to
83174 * encorage the standby servers to update the connections timeout
83175 */
83176- pkts = atomic_add_return(1, &cp->in_pkts);
83177+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
83178 if (af == AF_INET &&
83179 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
83180 (((cp->protocol != IPPROTO_TCP ||
83181diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
83182index 02b2610..2d89424 100644
83183--- a/net/netfilter/ipvs/ip_vs_ctl.c
83184+++ b/net/netfilter/ipvs/ip_vs_ctl.c
83185@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
83186 ip_vs_rs_hash(dest);
83187 write_unlock_bh(&__ip_vs_rs_lock);
83188 }
83189- atomic_set(&dest->conn_flags, conn_flags);
83190+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
83191
83192 /* bind the service */
83193 if (!dest->svc) {
83194@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
83195 " %-7s %-6d %-10d %-10d\n",
83196 &dest->addr.in6,
83197 ntohs(dest->port),
83198- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
83199+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
83200 atomic_read(&dest->weight),
83201 atomic_read(&dest->activeconns),
83202 atomic_read(&dest->inactconns));
83203@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
83204 "%-7s %-6d %-10d %-10d\n",
83205 ntohl(dest->addr.ip),
83206 ntohs(dest->port),
83207- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
83208+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
83209 atomic_read(&dest->weight),
83210 atomic_read(&dest->activeconns),
83211 atomic_read(&dest->inactconns));
83212@@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
83213 .open = ip_vs_info_open,
83214 .read = seq_read,
83215 .llseek = seq_lseek,
83216- .release = seq_release_private,
83217+ .release = seq_release_net,
83218 };
83219
83220 #endif
83221@@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
83222 .open = ip_vs_stats_seq_open,
83223 .read = seq_read,
83224 .llseek = seq_lseek,
83225- .release = single_release,
83226+ .release = single_release_net,
83227 };
83228
83229 #endif
83230@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
83231
83232 entry.addr = dest->addr.ip;
83233 entry.port = dest->port;
83234- entry.conn_flags = atomic_read(&dest->conn_flags);
83235+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
83236 entry.weight = atomic_read(&dest->weight);
83237 entry.u_threshold = dest->u_threshold;
83238 entry.l_threshold = dest->l_threshold;
83239@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
83240 unsigned char arg[128];
83241 int ret = 0;
83242
83243+ pax_track_stack();
83244+
83245 if (!capable(CAP_NET_ADMIN))
83246 return -EPERM;
83247
83248@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
83249 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
83250
83251 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
83252- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
83253+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
83254 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
83255 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
83256 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
83257diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
83258index e177f0d..55e8581 100644
83259--- a/net/netfilter/ipvs/ip_vs_sync.c
83260+++ b/net/netfilter/ipvs/ip_vs_sync.c
83261@@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
83262
83263 if (opt)
83264 memcpy(&cp->in_seq, opt, sizeof(*opt));
83265- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
83266+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
83267 cp->state = state;
83268 cp->old_state = cp->state;
83269 /*
83270diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
83271index 30b3189..e2e4b55 100644
83272--- a/net/netfilter/ipvs/ip_vs_xmit.c
83273+++ b/net/netfilter/ipvs/ip_vs_xmit.c
83274@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
83275 else
83276 rc = NF_ACCEPT;
83277 /* do not touch skb anymore */
83278- atomic_inc(&cp->in_pkts);
83279+ atomic_inc_unchecked(&cp->in_pkts);
83280 goto out;
83281 }
83282
83283@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
83284 else
83285 rc = NF_ACCEPT;
83286 /* do not touch skb anymore */
83287- atomic_inc(&cp->in_pkts);
83288+ atomic_inc_unchecked(&cp->in_pkts);
83289 goto out;
83290 }
83291
83292diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
83293index d521718..d0fd7a1 100644
83294--- a/net/netfilter/nf_conntrack_netlink.c
83295+++ b/net/netfilter/nf_conntrack_netlink.c
83296@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
83297 static int
83298 ctnetlink_parse_tuple(const struct nlattr * const cda[],
83299 struct nf_conntrack_tuple *tuple,
83300- enum ctattr_tuple type, u_int8_t l3num)
83301+ enum ctattr_type type, u_int8_t l3num)
83302 {
83303 struct nlattr *tb[CTA_TUPLE_MAX+1];
83304 int err;
83305diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
83306index f900dc3..5e45346 100644
83307--- a/net/netfilter/nfnetlink_log.c
83308+++ b/net/netfilter/nfnetlink_log.c
83309@@ -68,7 +68,7 @@ struct nfulnl_instance {
83310 };
83311
83312 static DEFINE_RWLOCK(instances_lock);
83313-static atomic_t global_seq;
83314+static atomic_unchecked_t global_seq;
83315
83316 #define INSTANCE_BUCKETS 16
83317 static struct hlist_head instance_table[INSTANCE_BUCKETS];
83318@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
83319 /* global sequence number */
83320 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
83321 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
83322- htonl(atomic_inc_return(&global_seq)));
83323+ htonl(atomic_inc_return_unchecked(&global_seq)));
83324
83325 if (data_len) {
83326 struct nlattr *nla;
83327diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
83328new file mode 100644
83329index 0000000..b1bac76
83330--- /dev/null
83331+++ b/net/netfilter/xt_gradm.c
83332@@ -0,0 +1,51 @@
83333+/*
83334+ * gradm match for netfilter
83335