]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9-2.6.32.57-201203022148.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-2.6.32.57-201203022148.patch
CommitLineData
f7b53b3e
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index e1efc40..47f0daf 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9+*.cis
10 *.cpio
11 *.csp
12+*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18+*.gcno
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *_MODULES
32+*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36@@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40+GPATH
41+GRTAGS
42+GSYMS
43+GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49+PERF*
50 SCCS
51 System.map*
52 TAGS
53@@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57+capability_names.h
58+capflags.c
59 classlist.h*
60+clut_vga16.c
61+common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65@@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69+config.c
70+config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74@@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78+gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90+initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103+mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110+mkpiggy
111 mkprep
112+mkregtable
113 mktables
114 mktree
115 modpost
116@@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120+piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124@@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128+regdb.c
129 relocs
130+rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152+vmlinux.bin.all
153+vmlinux.bin.bz2
154 vmlinux.lds
155+vmlinux.relocs
156+voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zoffset.h
169diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170index c840e7d..f4c451c 100644
171--- a/Documentation/kernel-parameters.txt
172+++ b/Documentation/kernel-parameters.txt
173@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178+ virtualization environments that don't cope well with the
179+ expand down segment used by UDEREF on X86-32 or the frequent
180+ page table updates on X86-64.
181+
182+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183+
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187diff --git a/Makefile b/Makefile
188index 3377650..095e46d 100644
189--- a/Makefile
190+++ b/Makefile
191@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196-HOSTCXXFLAGS = -O2
197+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203@@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207-PHONY += scripts_basic
208-scripts_basic:
209+PHONY += scripts_basic gcc-plugins
210+scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214@@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218- cscope TAGS tags help %docs check% \
219+ cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223@@ -526,6 +527,48 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227+ifndef DISABLE_PAX_PLUGINS
228+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231+endif
232+ifdef CONFIG_PAX_MEMORY_STACKLEAK
233+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235+endif
236+ifdef CONFIG_KALLOCSTAT_PLUGIN
237+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238+endif
239+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
242+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
243+endif
244+ifdef CONFIG_CHECKER_PLUGIN
245+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
246+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
247+endif
248+endif
249+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS)
250+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
251+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
252+ifeq ($(KBUILD_EXTMOD),)
253+gcc-plugins:
254+ $(Q)$(MAKE) $(build)=tools/gcc
255+else
256+gcc-plugins: ;
257+endif
258+else
259+gcc-plugins:
260+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
261+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
262+else
263+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
264+endif
265+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
266+endif
267+endif
268+
269 include $(srctree)/arch/$(SRCARCH)/Makefile
270
271 ifneq ($(CONFIG_FRAME_WARN),0)
272@@ -647,7 +690,7 @@ export mod_strip_cmd
273
274
275 ifeq ($(KBUILD_EXTMOD),)
276-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
277+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
278
279 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
280 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
281@@ -868,6 +911,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
282
283 # The actual objects are generated when descending,
284 # make sure no implicit rule kicks in
285+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
286+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
287 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
288
289 # Handle descending into subdirectories listed in $(vmlinux-dirs)
290@@ -877,7 +922,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
291 # Error messages still appears in the original language
292
293 PHONY += $(vmlinux-dirs)
294-$(vmlinux-dirs): prepare scripts
295+$(vmlinux-dirs): gcc-plugins prepare scripts
296 $(Q)$(MAKE) $(build)=$@
297
298 # Build the kernel release string
299@@ -986,6 +1031,7 @@ prepare0: archprepare FORCE
300 $(Q)$(MAKE) $(build)=. missing-syscalls
301
302 # All the preparing..
303+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
304 prepare: prepare0
305
306 # The asm symlink changes when $(ARCH) changes.
307@@ -1127,6 +1173,8 @@ all: modules
308 # using awk while concatenating to the final file.
309
310 PHONY += modules
311+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
312+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
313 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
314 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
315 @$(kecho) ' Building modules, stage 2.';
316@@ -1136,7 +1184,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
317
318 # Target to prepare building external modules
319 PHONY += modules_prepare
320-modules_prepare: prepare scripts
321+modules_prepare: gcc-plugins prepare scripts
322
323 # Target to install modules
324 PHONY += modules_install
325@@ -1201,7 +1249,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
326 include/linux/autoconf.h include/linux/version.h \
327 include/linux/utsrelease.h \
328 include/linux/bounds.h include/asm*/asm-offsets.h \
329- Module.symvers Module.markers tags TAGS cscope*
330+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
331
332 # clean - Delete most, but leave enough to build external modules
333 #
334@@ -1245,7 +1293,7 @@ distclean: mrproper
335 @find $(srctree) $(RCS_FIND_IGNORE) \
336 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
337 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
338- -o -name '.*.rej' -o -size 0 \
339+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
340 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
341 -type f -print | xargs rm -f
342
343@@ -1292,6 +1340,7 @@ help:
344 @echo ' modules_prepare - Set up for building external modules'
345 @echo ' tags/TAGS - Generate tags file for editors'
346 @echo ' cscope - Generate cscope index'
347+ @echo ' gtags - Generate GNU GLOBAL index'
348 @echo ' kernelrelease - Output the release version string'
349 @echo ' kernelversion - Output the version stored in Makefile'
350 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
351@@ -1393,6 +1442,8 @@ PHONY += $(module-dirs) modules
352 $(module-dirs): crmodverdir $(objtree)/Module.symvers
353 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
354
355+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
356+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
357 modules: $(module-dirs)
358 @$(kecho) ' Building modules, stage 2.';
359 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
360@@ -1448,7 +1499,7 @@ endif # KBUILD_EXTMOD
361 quiet_cmd_tags = GEN $@
362 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
363
364-tags TAGS cscope: FORCE
365+tags TAGS cscope gtags: FORCE
366 $(call cmd,tags)
367
368 # Scripts to check various things for consistency
369@@ -1513,17 +1564,21 @@ else
370 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
371 endif
372
373-%.s: %.c prepare scripts FORCE
374+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
375+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
376+%.s: %.c gcc-plugins prepare scripts FORCE
377 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
378 %.i: %.c prepare scripts FORCE
379 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
380-%.o: %.c prepare scripts FORCE
381+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
382+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
383+%.o: %.c gcc-plugins prepare scripts FORCE
384 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
385 %.lst: %.c prepare scripts FORCE
386 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
387-%.s: %.S prepare scripts FORCE
388+%.s: %.S gcc-plugins prepare scripts FORCE
389 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
390-%.o: %.S prepare scripts FORCE
391+%.o: %.S gcc-plugins prepare scripts FORCE
392 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
393 %.symtypes: %.c prepare scripts FORCE
394 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
395@@ -1533,11 +1588,15 @@ endif
396 $(cmd_crmodverdir)
397 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
398 $(build)=$(build-dir)
399-%/: prepare scripts FORCE
400+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
401+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
402+%/: gcc-plugins prepare scripts FORCE
403 $(cmd_crmodverdir)
404 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
405 $(build)=$(build-dir)
406-%.ko: prepare scripts FORCE
407+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
408+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
409+%.ko: gcc-plugins prepare scripts FORCE
410 $(cmd_crmodverdir)
411 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
412 $(build)=$(build-dir) $(@:.ko=.o)
413diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
414index 610dff4..f396854 100644
415--- a/arch/alpha/include/asm/atomic.h
416+++ b/arch/alpha/include/asm/atomic.h
417@@ -251,6 +251,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
418 #define atomic_dec(v) atomic_sub(1,(v))
419 #define atomic64_dec(v) atomic64_sub(1,(v))
420
421+#define atomic64_read_unchecked(v) atomic64_read(v)
422+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
423+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
424+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
425+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
426+#define atomic64_inc_unchecked(v) atomic64_inc(v)
427+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
428+#define atomic64_dec_unchecked(v) atomic64_dec(v)
429+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
430+
431 #define smp_mb__before_atomic_dec() smp_mb()
432 #define smp_mb__after_atomic_dec() smp_mb()
433 #define smp_mb__before_atomic_inc() smp_mb()
434diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
435index 5c75c1b..c82f878 100644
436--- a/arch/alpha/include/asm/elf.h
437+++ b/arch/alpha/include/asm/elf.h
438@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
439
440 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
441
442+#ifdef CONFIG_PAX_ASLR
443+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
444+
445+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
446+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
447+#endif
448+
449 /* $0 is set by ld.so to a pointer to a function which might be
450 registered using atexit. This provides a mean for the dynamic
451 linker to call DT_FINI functions for shared libraries that have
452diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
453index 3f0c59f..cf1e100 100644
454--- a/arch/alpha/include/asm/pgtable.h
455+++ b/arch/alpha/include/asm/pgtable.h
456@@ -101,6 +101,17 @@ struct vm_area_struct;
457 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
458 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
459 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
460+
461+#ifdef CONFIG_PAX_PAGEEXEC
462+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
463+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
464+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
465+#else
466+# define PAGE_SHARED_NOEXEC PAGE_SHARED
467+# define PAGE_COPY_NOEXEC PAGE_COPY
468+# define PAGE_READONLY_NOEXEC PAGE_READONLY
469+#endif
470+
471 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
472
473 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
474diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
475index ebc3c89..20cfa63 100644
476--- a/arch/alpha/kernel/module.c
477+++ b/arch/alpha/kernel/module.c
478@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
479
480 /* The small sections were sorted to the end of the segment.
481 The following should definitely cover them. */
482- gp = (u64)me->module_core + me->core_size - 0x8000;
483+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
484 got = sechdrs[me->arch.gotsecindex].sh_addr;
485
486 for (i = 0; i < n; i++) {
487diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
488index a94e49c..d71dd44 100644
489--- a/arch/alpha/kernel/osf_sys.c
490+++ b/arch/alpha/kernel/osf_sys.c
491@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
492 /* At this point: (!vma || addr < vma->vm_end). */
493 if (limit - len < addr)
494 return -ENOMEM;
495- if (!vma || addr + len <= vma->vm_start)
496+ if (check_heap_stack_gap(vma, addr, len))
497 return addr;
498 addr = vma->vm_end;
499 vma = vma->vm_next;
500@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
501 merely specific addresses, but regions of memory -- perhaps
502 this feature should be incorporated into all ports? */
503
504+#ifdef CONFIG_PAX_RANDMMAP
505+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
506+#endif
507+
508 if (addr) {
509 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
510 if (addr != (unsigned long) -ENOMEM)
511@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
512 }
513
514 /* Next, try allocating at TASK_UNMAPPED_BASE. */
515- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
516- len, limit);
517+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
518+
519 if (addr != (unsigned long) -ENOMEM)
520 return addr;
521
522diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
523index 00a31de..2ded0f2 100644
524--- a/arch/alpha/mm/fault.c
525+++ b/arch/alpha/mm/fault.c
526@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
527 __reload_thread(pcb);
528 }
529
530+#ifdef CONFIG_PAX_PAGEEXEC
531+/*
532+ * PaX: decide what to do with offenders (regs->pc = fault address)
533+ *
534+ * returns 1 when task should be killed
535+ * 2 when patched PLT trampoline was detected
536+ * 3 when unpatched PLT trampoline was detected
537+ */
538+static int pax_handle_fetch_fault(struct pt_regs *regs)
539+{
540+
541+#ifdef CONFIG_PAX_EMUPLT
542+ int err;
543+
544+ do { /* PaX: patched PLT emulation #1 */
545+ unsigned int ldah, ldq, jmp;
546+
547+ err = get_user(ldah, (unsigned int *)regs->pc);
548+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
549+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
550+
551+ if (err)
552+ break;
553+
554+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
555+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
556+ jmp == 0x6BFB0000U)
557+ {
558+ unsigned long r27, addr;
559+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
560+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
561+
562+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
563+ err = get_user(r27, (unsigned long *)addr);
564+ if (err)
565+ break;
566+
567+ regs->r27 = r27;
568+ regs->pc = r27;
569+ return 2;
570+ }
571+ } while (0);
572+
573+ do { /* PaX: patched PLT emulation #2 */
574+ unsigned int ldah, lda, br;
575+
576+ err = get_user(ldah, (unsigned int *)regs->pc);
577+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
578+ err |= get_user(br, (unsigned int *)(regs->pc+8));
579+
580+ if (err)
581+ break;
582+
583+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
584+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
585+ (br & 0xFFE00000U) == 0xC3E00000U)
586+ {
587+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
588+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
589+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
590+
591+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
592+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
593+ return 2;
594+ }
595+ } while (0);
596+
597+ do { /* PaX: unpatched PLT emulation */
598+ unsigned int br;
599+
600+ err = get_user(br, (unsigned int *)regs->pc);
601+
602+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
603+ unsigned int br2, ldq, nop, jmp;
604+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
605+
606+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
607+ err = get_user(br2, (unsigned int *)addr);
608+ err |= get_user(ldq, (unsigned int *)(addr+4));
609+ err |= get_user(nop, (unsigned int *)(addr+8));
610+ err |= get_user(jmp, (unsigned int *)(addr+12));
611+ err |= get_user(resolver, (unsigned long *)(addr+16));
612+
613+ if (err)
614+ break;
615+
616+ if (br2 == 0xC3600000U &&
617+ ldq == 0xA77B000CU &&
618+ nop == 0x47FF041FU &&
619+ jmp == 0x6B7B0000U)
620+ {
621+ regs->r28 = regs->pc+4;
622+ regs->r27 = addr+16;
623+ regs->pc = resolver;
624+ return 3;
625+ }
626+ }
627+ } while (0);
628+#endif
629+
630+ return 1;
631+}
632+
633+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
634+{
635+ unsigned long i;
636+
637+ printk(KERN_ERR "PAX: bytes at PC: ");
638+ for (i = 0; i < 5; i++) {
639+ unsigned int c;
640+ if (get_user(c, (unsigned int *)pc+i))
641+ printk(KERN_CONT "???????? ");
642+ else
643+ printk(KERN_CONT "%08x ", c);
644+ }
645+ printk("\n");
646+}
647+#endif
648
649 /*
650 * This routine handles page faults. It determines the address,
651@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
652 good_area:
653 si_code = SEGV_ACCERR;
654 if (cause < 0) {
655- if (!(vma->vm_flags & VM_EXEC))
656+ if (!(vma->vm_flags & VM_EXEC)) {
657+
658+#ifdef CONFIG_PAX_PAGEEXEC
659+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
660+ goto bad_area;
661+
662+ up_read(&mm->mmap_sem);
663+ switch (pax_handle_fetch_fault(regs)) {
664+
665+#ifdef CONFIG_PAX_EMUPLT
666+ case 2:
667+ case 3:
668+ return;
669+#endif
670+
671+ }
672+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
673+ do_group_exit(SIGKILL);
674+#else
675 goto bad_area;
676+#endif
677+
678+ }
679 } else if (!cause) {
680 /* Allow reads even for write-only mappings */
681 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
682diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
683index b68faef..6dd1496 100644
684--- a/arch/arm/Kconfig
685+++ b/arch/arm/Kconfig
686@@ -14,6 +14,7 @@ config ARM
687 select SYS_SUPPORTS_APM_EMULATION
688 select HAVE_OPROFILE
689 select HAVE_ARCH_KGDB
690+ select GENERIC_ATOMIC64
691 select HAVE_KPROBES if (!XIP_KERNEL)
692 select HAVE_KRETPROBES if (HAVE_KPROBES)
693 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
694diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
695index d0daeab..ff286a8 100644
696--- a/arch/arm/include/asm/atomic.h
697+++ b/arch/arm/include/asm/atomic.h
698@@ -15,6 +15,10 @@
699 #include <linux/types.h>
700 #include <asm/system.h>
701
702+#ifdef CONFIG_GENERIC_ATOMIC64
703+#include <asm-generic/atomic64.h>
704+#endif
705+
706 #define ATOMIC_INIT(i) { (i) }
707
708 #ifdef __KERNEL__
709diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
710index 6aac3f5..265536b 100644
711--- a/arch/arm/include/asm/elf.h
712+++ b/arch/arm/include/asm/elf.h
713@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
714 the loader. We need to make sure that it is out of the way of the program
715 that it will "exec", and that there is sufficient room for the brk. */
716
717-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
718+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
719+
720+#ifdef CONFIG_PAX_ASLR
721+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
722+
723+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
724+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
725+#endif
726
727 /* When the program starts, a1 contains a pointer to a function to be
728 registered with atexit, as per the SVR4 ABI. A value of 0 means we
729diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
730index c019949..388fdd1 100644
731--- a/arch/arm/include/asm/kmap_types.h
732+++ b/arch/arm/include/asm/kmap_types.h
733@@ -19,6 +19,7 @@ enum km_type {
734 KM_SOFTIRQ0,
735 KM_SOFTIRQ1,
736 KM_L2_CACHE,
737+ KM_CLEARPAGE,
738 KM_TYPE_NR
739 };
740
741diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
742index 1d6bd40..fba0cb9 100644
743--- a/arch/arm/include/asm/uaccess.h
744+++ b/arch/arm/include/asm/uaccess.h
745@@ -22,6 +22,8 @@
746 #define VERIFY_READ 0
747 #define VERIFY_WRITE 1
748
749+extern void check_object_size(const void *ptr, unsigned long n, bool to);
750+
751 /*
752 * The exception table consists of pairs of addresses: the first is the
753 * address of an instruction that is allowed to fault, and the second is
754@@ -387,8 +389,23 @@ do { \
755
756
757 #ifdef CONFIG_MMU
758-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
759-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
760+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
761+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
762+
763+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
764+{
765+ if (!__builtin_constant_p(n))
766+ check_object_size(to, n, false);
767+ return ___copy_from_user(to, from, n);
768+}
769+
770+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
771+{
772+ if (!__builtin_constant_p(n))
773+ check_object_size(from, n, true);
774+ return ___copy_to_user(to, from, n);
775+}
776+
777 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
778 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
779 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
780@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
781
782 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
783 {
784+ if ((long)n < 0)
785+ return n;
786+
787 if (access_ok(VERIFY_READ, from, n))
788 n = __copy_from_user(to, from, n);
789 else /* security hole - plug it */
790@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
791
792 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
793 {
794+ if ((long)n < 0)
795+ return n;
796+
797 if (access_ok(VERIFY_WRITE, to, n))
798 n = __copy_to_user(to, from, n);
799 return n;
800diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
801index 0e62770..e2c2cd6 100644
802--- a/arch/arm/kernel/armksyms.c
803+++ b/arch/arm/kernel/armksyms.c
804@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
805 #ifdef CONFIG_MMU
806 EXPORT_SYMBOL(copy_page);
807
808-EXPORT_SYMBOL(__copy_from_user);
809-EXPORT_SYMBOL(__copy_to_user);
810+EXPORT_SYMBOL(___copy_from_user);
811+EXPORT_SYMBOL(___copy_to_user);
812 EXPORT_SYMBOL(__clear_user);
813
814 EXPORT_SYMBOL(__get_user_1);
815diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
816index ba8ccfe..2dc34dc 100644
817--- a/arch/arm/kernel/kgdb.c
818+++ b/arch/arm/kernel/kgdb.c
819@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
820 * and we handle the normal undef case within the do_undefinstr
821 * handler.
822 */
823-struct kgdb_arch arch_kgdb_ops = {
824+const struct kgdb_arch arch_kgdb_ops = {
825 #ifndef __ARMEB__
826 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
827 #else /* ! __ARMEB__ */
828diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
829index 3f361a7..6e806e1 100644
830--- a/arch/arm/kernel/traps.c
831+++ b/arch/arm/kernel/traps.c
832@@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
833
834 DEFINE_SPINLOCK(die_lock);
835
836+extern void gr_handle_kernel_exploit(void);
837+
838 /*
839 * This function is protected against re-entrancy.
840 */
841@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
842 if (panic_on_oops)
843 panic("Fatal exception");
844
845+ gr_handle_kernel_exploit();
846+
847 do_exit(SIGSEGV);
848 }
849
850diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
851index e4fe124..0fc246b 100644
852--- a/arch/arm/lib/copy_from_user.S
853+++ b/arch/arm/lib/copy_from_user.S
854@@ -16,7 +16,7 @@
855 /*
856 * Prototype:
857 *
858- * size_t __copy_from_user(void *to, const void *from, size_t n)
859+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
860 *
861 * Purpose:
862 *
863@@ -84,11 +84,11 @@
864
865 .text
866
867-ENTRY(__copy_from_user)
868+ENTRY(___copy_from_user)
869
870 #include "copy_template.S"
871
872-ENDPROC(__copy_from_user)
873+ENDPROC(___copy_from_user)
874
875 .section .fixup,"ax"
876 .align 0
877diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
878index 1a71e15..ac7b258 100644
879--- a/arch/arm/lib/copy_to_user.S
880+++ b/arch/arm/lib/copy_to_user.S
881@@ -16,7 +16,7 @@
882 /*
883 * Prototype:
884 *
885- * size_t __copy_to_user(void *to, const void *from, size_t n)
886+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
887 *
888 * Purpose:
889 *
890@@ -88,11 +88,11 @@
891 .text
892
893 ENTRY(__copy_to_user_std)
894-WEAK(__copy_to_user)
895+WEAK(___copy_to_user)
896
897 #include "copy_template.S"
898
899-ENDPROC(__copy_to_user)
900+ENDPROC(___copy_to_user)
901
902 .section .fixup,"ax"
903 .align 0
904diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
905index ffdd274..91017b6 100644
906--- a/arch/arm/lib/uaccess.S
907+++ b/arch/arm/lib/uaccess.S
908@@ -19,7 +19,7 @@
909
910 #define PAGE_SHIFT 12
911
912-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
913+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
914 * Purpose : copy a block to user memory from kernel memory
915 * Params : to - user memory
916 * : from - kernel memory
917@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
918 sub r2, r2, ip
919 b .Lc2u_dest_aligned
920
921-ENTRY(__copy_to_user)
922+ENTRY(___copy_to_user)
923 stmfd sp!, {r2, r4 - r7, lr}
924 cmp r2, #4
925 blt .Lc2u_not_enough
926@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
927 ldrgtb r3, [r1], #0
928 USER( strgtbt r3, [r0], #1) @ May fault
929 b .Lc2u_finished
930-ENDPROC(__copy_to_user)
931+ENDPROC(___copy_to_user)
932
933 .section .fixup,"ax"
934 .align 0
935 9001: ldmfd sp!, {r0, r4 - r7, pc}
936 .previous
937
938-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
939+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
940 * Purpose : copy a block from user memory to kernel memory
941 * Params : to - kernel memory
942 * : from - user memory
943@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
944 sub r2, r2, ip
945 b .Lcfu_dest_aligned
946
947-ENTRY(__copy_from_user)
948+ENTRY(___copy_from_user)
949 stmfd sp!, {r0, r2, r4 - r7, lr}
950 cmp r2, #4
951 blt .Lcfu_not_enough
952@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
953 USER( ldrgtbt r3, [r1], #1) @ May fault
954 strgtb r3, [r0], #1
955 b .Lcfu_finished
956-ENDPROC(__copy_from_user)
957+ENDPROC(___copy_from_user)
958
959 .section .fixup,"ax"
960 .align 0
961diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
962index 6b967ff..67d5b2b 100644
963--- a/arch/arm/lib/uaccess_with_memcpy.c
964+++ b/arch/arm/lib/uaccess_with_memcpy.c
965@@ -97,7 +97,7 @@ out:
966 }
967
968 unsigned long
969-__copy_to_user(void __user *to, const void *from, unsigned long n)
970+___copy_to_user(void __user *to, const void *from, unsigned long n)
971 {
972 /*
973 * This test is stubbed out of the main function above to keep
974diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
975index 4028724..beec230 100644
976--- a/arch/arm/mach-at91/pm.c
977+++ b/arch/arm/mach-at91/pm.c
978@@ -348,7 +348,7 @@ static void at91_pm_end(void)
979 }
980
981
982-static struct platform_suspend_ops at91_pm_ops ={
983+static const struct platform_suspend_ops at91_pm_ops ={
984 .valid = at91_pm_valid_state,
985 .begin = at91_pm_begin,
986 .enter = at91_pm_enter,
987diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
988index 5218943..0a34552 100644
989--- a/arch/arm/mach-omap1/pm.c
990+++ b/arch/arm/mach-omap1/pm.c
991@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
992
993
994
995-static struct platform_suspend_ops omap_pm_ops ={
996+static const struct platform_suspend_ops omap_pm_ops ={
997 .prepare = omap_pm_prepare,
998 .enter = omap_pm_enter,
999 .finish = omap_pm_finish,
1000diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
1001index bff5c4e..d4c649b 100644
1002--- a/arch/arm/mach-omap2/pm24xx.c
1003+++ b/arch/arm/mach-omap2/pm24xx.c
1004@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
1005 enable_hlt();
1006 }
1007
1008-static struct platform_suspend_ops omap_pm_ops = {
1009+static const struct platform_suspend_ops omap_pm_ops = {
1010 .prepare = omap2_pm_prepare,
1011 .enter = omap2_pm_enter,
1012 .finish = omap2_pm_finish,
1013diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
1014index 8946319..7d3e661 100644
1015--- a/arch/arm/mach-omap2/pm34xx.c
1016+++ b/arch/arm/mach-omap2/pm34xx.c
1017@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
1018 return;
1019 }
1020
1021-static struct platform_suspend_ops omap_pm_ops = {
1022+static const struct platform_suspend_ops omap_pm_ops = {
1023 .begin = omap3_pm_begin,
1024 .end = omap3_pm_end,
1025 .prepare = omap3_pm_prepare,
1026diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
1027index b3d8d53..6e68ebc 100644
1028--- a/arch/arm/mach-pnx4008/pm.c
1029+++ b/arch/arm/mach-pnx4008/pm.c
1030@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
1031 (state == PM_SUSPEND_MEM);
1032 }
1033
1034-static struct platform_suspend_ops pnx4008_pm_ops = {
1035+static const struct platform_suspend_ops pnx4008_pm_ops = {
1036 .enter = pnx4008_pm_enter,
1037 .valid = pnx4008_pm_valid,
1038 };
1039diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
1040index 7693355..9beb00a 100644
1041--- a/arch/arm/mach-pxa/pm.c
1042+++ b/arch/arm/mach-pxa/pm.c
1043@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
1044 pxa_cpu_pm_fns->finish();
1045 }
1046
1047-static struct platform_suspend_ops pxa_pm_ops = {
1048+static const struct platform_suspend_ops pxa_pm_ops = {
1049 .valid = pxa_pm_valid,
1050 .enter = pxa_pm_enter,
1051 .prepare = pxa_pm_prepare,
1052diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
1053index 629e05d..06be589 100644
1054--- a/arch/arm/mach-pxa/sharpsl_pm.c
1055+++ b/arch/arm/mach-pxa/sharpsl_pm.c
1056@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1057 }
1058
1059 #ifdef CONFIG_PM
1060-static struct platform_suspend_ops sharpsl_pm_ops = {
1061+static const struct platform_suspend_ops sharpsl_pm_ops = {
1062 .prepare = pxa_pm_prepare,
1063 .finish = pxa_pm_finish,
1064 .enter = corgi_pxa_pm_enter,
1065diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1066index c83fdc8..ab9fc44 100644
1067--- a/arch/arm/mach-sa1100/pm.c
1068+++ b/arch/arm/mach-sa1100/pm.c
1069@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1070 return virt_to_phys(sp);
1071 }
1072
1073-static struct platform_suspend_ops sa11x0_pm_ops = {
1074+static const struct platform_suspend_ops sa11x0_pm_ops = {
1075 .enter = sa11x0_pm_enter,
1076 .valid = suspend_valid_only_mem,
1077 };
1078diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1079index 3191cd6..c0739db 100644
1080--- a/arch/arm/mm/fault.c
1081+++ b/arch/arm/mm/fault.c
1082@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1083 }
1084 #endif
1085
1086+#ifdef CONFIG_PAX_PAGEEXEC
1087+ if (fsr & FSR_LNX_PF) {
1088+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1089+ do_group_exit(SIGKILL);
1090+ }
1091+#endif
1092+
1093 tsk->thread.address = addr;
1094 tsk->thread.error_code = fsr;
1095 tsk->thread.trap_no = 14;
1096@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1097 }
1098 #endif /* CONFIG_MMU */
1099
1100+#ifdef CONFIG_PAX_PAGEEXEC
1101+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1102+{
1103+ long i;
1104+
1105+ printk(KERN_ERR "PAX: bytes at PC: ");
1106+ for (i = 0; i < 20; i++) {
1107+ unsigned char c;
1108+ if (get_user(c, (__force unsigned char __user *)pc+i))
1109+ printk(KERN_CONT "?? ");
1110+ else
1111+ printk(KERN_CONT "%02x ", c);
1112+ }
1113+ printk("\n");
1114+
1115+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1116+ for (i = -1; i < 20; i++) {
1117+ unsigned long c;
1118+ if (get_user(c, (__force unsigned long __user *)sp+i))
1119+ printk(KERN_CONT "???????? ");
1120+ else
1121+ printk(KERN_CONT "%08lx ", c);
1122+ }
1123+ printk("\n");
1124+}
1125+#endif
1126+
1127 /*
1128 * First Level Translation Fault Handler
1129 *
1130diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1131index f5abc51..7ec524c 100644
1132--- a/arch/arm/mm/mmap.c
1133+++ b/arch/arm/mm/mmap.c
1134@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1135 if (len > TASK_SIZE)
1136 return -ENOMEM;
1137
1138+#ifdef CONFIG_PAX_RANDMMAP
1139+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1140+#endif
1141+
1142 if (addr) {
1143 if (do_align)
1144 addr = COLOUR_ALIGN(addr, pgoff);
1145@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1146 addr = PAGE_ALIGN(addr);
1147
1148 vma = find_vma(mm, addr);
1149- if (TASK_SIZE - len >= addr &&
1150- (!vma || addr + len <= vma->vm_start))
1151+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1152 return addr;
1153 }
1154 if (len > mm->cached_hole_size) {
1155- start_addr = addr = mm->free_area_cache;
1156+ start_addr = addr = mm->free_area_cache;
1157 } else {
1158- start_addr = addr = TASK_UNMAPPED_BASE;
1159- mm->cached_hole_size = 0;
1160+ start_addr = addr = mm->mmap_base;
1161+ mm->cached_hole_size = 0;
1162 }
1163
1164 full_search:
1165@@ -94,14 +97,14 @@ full_search:
1166 * Start a new search - just in case we missed
1167 * some holes.
1168 */
1169- if (start_addr != TASK_UNMAPPED_BASE) {
1170- start_addr = addr = TASK_UNMAPPED_BASE;
1171+ if (start_addr != mm->mmap_base) {
1172+ start_addr = addr = mm->mmap_base;
1173 mm->cached_hole_size = 0;
1174 goto full_search;
1175 }
1176 return -ENOMEM;
1177 }
1178- if (!vma || addr + len <= vma->vm_start) {
1179+ if (check_heap_stack_gap(vma, addr, len)) {
1180 /*
1181 * Remember the place where we stopped the search:
1182 */
1183diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1184index 8d97db2..b66cfa5 100644
1185--- a/arch/arm/plat-s3c/pm.c
1186+++ b/arch/arm/plat-s3c/pm.c
1187@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1188 s3c_pm_check_cleanup();
1189 }
1190
1191-static struct platform_suspend_ops s3c_pm_ops = {
1192+static const struct platform_suspend_ops s3c_pm_ops = {
1193 .enter = s3c_pm_enter,
1194 .prepare = s3c_pm_prepare,
1195 .finish = s3c_pm_finish,
1196diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1197index d5d1d41..856e2ed 100644
1198--- a/arch/avr32/include/asm/elf.h
1199+++ b/arch/avr32/include/asm/elf.h
1200@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1201 the loader. We need to make sure that it is out of the way of the program
1202 that it will "exec", and that there is sufficient room for the brk. */
1203
1204-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1205+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1206
1207+#ifdef CONFIG_PAX_ASLR
1208+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1209+
1210+#define PAX_DELTA_MMAP_LEN 15
1211+#define PAX_DELTA_STACK_LEN 15
1212+#endif
1213
1214 /* This yields a mask that user programs can use to figure out what
1215 instruction set this CPU supports. This could be done in user space,
1216diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1217index b7f5c68..556135c 100644
1218--- a/arch/avr32/include/asm/kmap_types.h
1219+++ b/arch/avr32/include/asm/kmap_types.h
1220@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1221 D(11) KM_IRQ1,
1222 D(12) KM_SOFTIRQ0,
1223 D(13) KM_SOFTIRQ1,
1224-D(14) KM_TYPE_NR
1225+D(14) KM_CLEARPAGE,
1226+D(15) KM_TYPE_NR
1227 };
1228
1229 #undef D
1230diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1231index f021edf..32d680e 100644
1232--- a/arch/avr32/mach-at32ap/pm.c
1233+++ b/arch/avr32/mach-at32ap/pm.c
1234@@ -176,7 +176,7 @@ out:
1235 return 0;
1236 }
1237
1238-static struct platform_suspend_ops avr32_pm_ops = {
1239+static const struct platform_suspend_ops avr32_pm_ops = {
1240 .valid = avr32_pm_valid_state,
1241 .enter = avr32_pm_enter,
1242 };
1243diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1244index b61d86d..e292c7f 100644
1245--- a/arch/avr32/mm/fault.c
1246+++ b/arch/avr32/mm/fault.c
1247@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1248
1249 int exception_trace = 1;
1250
1251+#ifdef CONFIG_PAX_PAGEEXEC
1252+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1253+{
1254+ unsigned long i;
1255+
1256+ printk(KERN_ERR "PAX: bytes at PC: ");
1257+ for (i = 0; i < 20; i++) {
1258+ unsigned char c;
1259+ if (get_user(c, (unsigned char *)pc+i))
1260+ printk(KERN_CONT "???????? ");
1261+ else
1262+ printk(KERN_CONT "%02x ", c);
1263+ }
1264+ printk("\n");
1265+}
1266+#endif
1267+
1268 /*
1269 * This routine handles page faults. It determines the address and the
1270 * problem, and then passes it off to one of the appropriate routines.
1271@@ -157,6 +174,16 @@ bad_area:
1272 up_read(&mm->mmap_sem);
1273
1274 if (user_mode(regs)) {
1275+
1276+#ifdef CONFIG_PAX_PAGEEXEC
1277+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1278+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1279+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1280+ do_group_exit(SIGKILL);
1281+ }
1282+ }
1283+#endif
1284+
1285 if (exception_trace && printk_ratelimit())
1286 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1287 "sp %08lx ecr %lu\n",
1288diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1289index cce79d0..c406c85 100644
1290--- a/arch/blackfin/kernel/kgdb.c
1291+++ b/arch/blackfin/kernel/kgdb.c
1292@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1293 return -1; /* this means that we do not want to exit from the handler */
1294 }
1295
1296-struct kgdb_arch arch_kgdb_ops = {
1297+const struct kgdb_arch arch_kgdb_ops = {
1298 .gdb_bpt_instr = {0xa1},
1299 #ifdef CONFIG_SMP
1300 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1301diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1302index 8837be4..b2fb413 100644
1303--- a/arch/blackfin/mach-common/pm.c
1304+++ b/arch/blackfin/mach-common/pm.c
1305@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1306 return 0;
1307 }
1308
1309-struct platform_suspend_ops bfin_pm_ops = {
1310+const struct platform_suspend_ops bfin_pm_ops = {
1311 .enter = bfin_pm_enter,
1312 .valid = bfin_pm_valid,
1313 };
1314diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1315index 00a57af..c3ef0cd 100644
1316--- a/arch/frv/include/asm/atomic.h
1317+++ b/arch/frv/include/asm/atomic.h
1318@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1319 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1320 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1321
1322+#define atomic64_read_unchecked(v) atomic64_read(v)
1323+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1324+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1325+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1326+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1327+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1328+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1329+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1330+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1331+
1332 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
1333 {
1334 int c, old;
1335diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1336index f8e16b2..c73ff79 100644
1337--- a/arch/frv/include/asm/kmap_types.h
1338+++ b/arch/frv/include/asm/kmap_types.h
1339@@ -23,6 +23,7 @@ enum km_type {
1340 KM_IRQ1,
1341 KM_SOFTIRQ0,
1342 KM_SOFTIRQ1,
1343+ KM_CLEARPAGE,
1344 KM_TYPE_NR
1345 };
1346
1347diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1348index 385fd30..6c3d97e 100644
1349--- a/arch/frv/mm/elf-fdpic.c
1350+++ b/arch/frv/mm/elf-fdpic.c
1351@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1352 if (addr) {
1353 addr = PAGE_ALIGN(addr);
1354 vma = find_vma(current->mm, addr);
1355- if (TASK_SIZE - len >= addr &&
1356- (!vma || addr + len <= vma->vm_start))
1357+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1358 goto success;
1359 }
1360
1361@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1362 for (; vma; vma = vma->vm_next) {
1363 if (addr > limit)
1364 break;
1365- if (addr + len <= vma->vm_start)
1366+ if (check_heap_stack_gap(vma, addr, len))
1367 goto success;
1368 addr = vma->vm_end;
1369 }
1370@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1371 for (; vma; vma = vma->vm_next) {
1372 if (addr > limit)
1373 break;
1374- if (addr + len <= vma->vm_start)
1375+ if (check_heap_stack_gap(vma, addr, len))
1376 goto success;
1377 addr = vma->vm_end;
1378 }
1379diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1380index e4a80d8..11a7ea1 100644
1381--- a/arch/ia64/hp/common/hwsw_iommu.c
1382+++ b/arch/ia64/hp/common/hwsw_iommu.c
1383@@ -17,7 +17,7 @@
1384 #include <linux/swiotlb.h>
1385 #include <asm/machvec.h>
1386
1387-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1388+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1389
1390 /* swiotlb declarations & definitions: */
1391 extern int swiotlb_late_init_with_default_size (size_t size);
1392@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1393 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1394 }
1395
1396-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1397+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1398 {
1399 if (use_swiotlb(dev))
1400 return &swiotlb_dma_ops;
1401diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1402index 01ae69b..35752fd 100644
1403--- a/arch/ia64/hp/common/sba_iommu.c
1404+++ b/arch/ia64/hp/common/sba_iommu.c
1405@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1406 },
1407 };
1408
1409-extern struct dma_map_ops swiotlb_dma_ops;
1410+extern const struct dma_map_ops swiotlb_dma_ops;
1411
1412 static int __init
1413 sba_init(void)
1414@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1415
1416 __setup("sbapagesize=",sba_page_override);
1417
1418-struct dma_map_ops sba_dma_ops = {
1419+const struct dma_map_ops sba_dma_ops = {
1420 .alloc_coherent = sba_alloc_coherent,
1421 .free_coherent = sba_free_coherent,
1422 .map_page = sba_map_page,
1423diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1424index c69552b..c7122f4 100644
1425--- a/arch/ia64/ia32/binfmt_elf32.c
1426+++ b/arch/ia64/ia32/binfmt_elf32.c
1427@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1428
1429 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1430
1431+#ifdef CONFIG_PAX_ASLR
1432+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1433+
1434+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1435+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1436+#endif
1437+
1438 /* Ugly but avoids duplication */
1439 #include "../../../fs/binfmt_elf.c"
1440
1441diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1442index 0f15349..26b3429 100644
1443--- a/arch/ia64/ia32/ia32priv.h
1444+++ b/arch/ia64/ia32/ia32priv.h
1445@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1446 #define ELF_DATA ELFDATA2LSB
1447 #define ELF_ARCH EM_386
1448
1449-#define IA32_STACK_TOP IA32_PAGE_OFFSET
1450+#ifdef CONFIG_PAX_RANDUSTACK
1451+#define __IA32_DELTA_STACK (current->mm->delta_stack)
1452+#else
1453+#define __IA32_DELTA_STACK 0UL
1454+#endif
1455+
1456+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1457+
1458 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1459 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1460
1461diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
1462index 88405cb..de5ca5d 100644
1463--- a/arch/ia64/include/asm/atomic.h
1464+++ b/arch/ia64/include/asm/atomic.h
1465@@ -210,6 +210,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
1466 #define atomic64_inc(v) atomic64_add(1, (v))
1467 #define atomic64_dec(v) atomic64_sub(1, (v))
1468
1469+#define atomic64_read_unchecked(v) atomic64_read(v)
1470+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1471+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1472+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1473+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1474+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1475+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1476+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1477+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1478+
1479 /* Atomic operations are already serializing */
1480 #define smp_mb__before_atomic_dec() barrier()
1481 #define smp_mb__after_atomic_dec() barrier()
1482diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1483index 8d3c79c..71b3af6 100644
1484--- a/arch/ia64/include/asm/dma-mapping.h
1485+++ b/arch/ia64/include/asm/dma-mapping.h
1486@@ -12,7 +12,7 @@
1487
1488 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1489
1490-extern struct dma_map_ops *dma_ops;
1491+extern const struct dma_map_ops *dma_ops;
1492 extern struct ia64_machine_vector ia64_mv;
1493 extern void set_iommu_machvec(void);
1494
1495@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1496 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1497 dma_addr_t *daddr, gfp_t gfp)
1498 {
1499- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1500+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1501 void *caddr;
1502
1503 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1504@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1505 static inline void dma_free_coherent(struct device *dev, size_t size,
1506 void *caddr, dma_addr_t daddr)
1507 {
1508- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1509+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1510 debug_dma_free_coherent(dev, size, caddr, daddr);
1511 ops->free_coherent(dev, size, caddr, daddr);
1512 }
1513@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1514
1515 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1516 {
1517- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1518+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1519 return ops->mapping_error(dev, daddr);
1520 }
1521
1522 static inline int dma_supported(struct device *dev, u64 mask)
1523 {
1524- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1525+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1526 return ops->dma_supported(dev, mask);
1527 }
1528
1529diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1530index 86eddee..b116bb4 100644
1531--- a/arch/ia64/include/asm/elf.h
1532+++ b/arch/ia64/include/asm/elf.h
1533@@ -43,6 +43,13 @@
1534 */
1535 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1536
1537+#ifdef CONFIG_PAX_ASLR
1538+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1539+
1540+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1541+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1542+#endif
1543+
1544 #define PT_IA_64_UNWIND 0x70000001
1545
1546 /* IA-64 relocations: */
1547diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1548index 367d299..9ad4279 100644
1549--- a/arch/ia64/include/asm/machvec.h
1550+++ b/arch/ia64/include/asm/machvec.h
1551@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1552 /* DMA-mapping interface: */
1553 typedef void ia64_mv_dma_init (void);
1554 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1555-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1556+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1557
1558 /*
1559 * WARNING: The legacy I/O space is _architected_. Platforms are
1560@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1561 # endif /* CONFIG_IA64_GENERIC */
1562
1563 extern void swiotlb_dma_init(void);
1564-extern struct dma_map_ops *dma_get_ops(struct device *);
1565+extern const struct dma_map_ops *dma_get_ops(struct device *);
1566
1567 /*
1568 * Define default versions so we can extend machvec for new platforms without having
1569diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1570index 8840a69..cdb63d9 100644
1571--- a/arch/ia64/include/asm/pgtable.h
1572+++ b/arch/ia64/include/asm/pgtable.h
1573@@ -12,7 +12,7 @@
1574 * David Mosberger-Tang <davidm@hpl.hp.com>
1575 */
1576
1577-
1578+#include <linux/const.h>
1579 #include <asm/mman.h>
1580 #include <asm/page.h>
1581 #include <asm/processor.h>
1582@@ -143,6 +143,17 @@
1583 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1584 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1585 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1586+
1587+#ifdef CONFIG_PAX_PAGEEXEC
1588+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1589+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1590+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1591+#else
1592+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1593+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1594+# define PAGE_COPY_NOEXEC PAGE_COPY
1595+#endif
1596+
1597 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1598 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1599 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1600diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1601index 239ecdc..f94170e 100644
1602--- a/arch/ia64/include/asm/spinlock.h
1603+++ b/arch/ia64/include/asm/spinlock.h
1604@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1605 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1606
1607 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1608- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1609+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1610 }
1611
1612 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1613diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1614index 449c8c0..432a3d2 100644
1615--- a/arch/ia64/include/asm/uaccess.h
1616+++ b/arch/ia64/include/asm/uaccess.h
1617@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1618 const void *__cu_from = (from); \
1619 long __cu_len = (n); \
1620 \
1621- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1622+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1623 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1624 __cu_len; \
1625 })
1626@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1627 long __cu_len = (n); \
1628 \
1629 __chk_user_ptr(__cu_from); \
1630- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1631+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1632 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1633 __cu_len; \
1634 })
1635diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1636index f2c1600..969398a 100644
1637--- a/arch/ia64/kernel/dma-mapping.c
1638+++ b/arch/ia64/kernel/dma-mapping.c
1639@@ -3,7 +3,7 @@
1640 /* Set this to 1 if there is a HW IOMMU in the system */
1641 int iommu_detected __read_mostly;
1642
1643-struct dma_map_ops *dma_ops;
1644+const struct dma_map_ops *dma_ops;
1645 EXPORT_SYMBOL(dma_ops);
1646
1647 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1648@@ -16,7 +16,7 @@ static int __init dma_init(void)
1649 }
1650 fs_initcall(dma_init);
1651
1652-struct dma_map_ops *dma_get_ops(struct device *dev)
1653+const struct dma_map_ops *dma_get_ops(struct device *dev)
1654 {
1655 return dma_ops;
1656 }
1657diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1658index 1481b0a..e7d38ff 100644
1659--- a/arch/ia64/kernel/module.c
1660+++ b/arch/ia64/kernel/module.c
1661@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1662 void
1663 module_free (struct module *mod, void *module_region)
1664 {
1665- if (mod && mod->arch.init_unw_table &&
1666- module_region == mod->module_init) {
1667+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1668 unw_remove_unwind_table(mod->arch.init_unw_table);
1669 mod->arch.init_unw_table = NULL;
1670 }
1671@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1672 }
1673
1674 static inline int
1675+in_init_rx (const struct module *mod, uint64_t addr)
1676+{
1677+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1678+}
1679+
1680+static inline int
1681+in_init_rw (const struct module *mod, uint64_t addr)
1682+{
1683+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1684+}
1685+
1686+static inline int
1687 in_init (const struct module *mod, uint64_t addr)
1688 {
1689- return addr - (uint64_t) mod->module_init < mod->init_size;
1690+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1691+}
1692+
1693+static inline int
1694+in_core_rx (const struct module *mod, uint64_t addr)
1695+{
1696+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1697+}
1698+
1699+static inline int
1700+in_core_rw (const struct module *mod, uint64_t addr)
1701+{
1702+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1703 }
1704
1705 static inline int
1706 in_core (const struct module *mod, uint64_t addr)
1707 {
1708- return addr - (uint64_t) mod->module_core < mod->core_size;
1709+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1710 }
1711
1712 static inline int
1713@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1714 break;
1715
1716 case RV_BDREL:
1717- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1718+ if (in_init_rx(mod, val))
1719+ val -= (uint64_t) mod->module_init_rx;
1720+ else if (in_init_rw(mod, val))
1721+ val -= (uint64_t) mod->module_init_rw;
1722+ else if (in_core_rx(mod, val))
1723+ val -= (uint64_t) mod->module_core_rx;
1724+ else if (in_core_rw(mod, val))
1725+ val -= (uint64_t) mod->module_core_rw;
1726 break;
1727
1728 case RV_LTV:
1729@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1730 * addresses have been selected...
1731 */
1732 uint64_t gp;
1733- if (mod->core_size > MAX_LTOFF)
1734+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1735 /*
1736 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1737 * at the end of the module.
1738 */
1739- gp = mod->core_size - MAX_LTOFF / 2;
1740+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1741 else
1742- gp = mod->core_size / 2;
1743- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1744+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1745+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1746 mod->arch.gp = gp;
1747 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1748 }
1749diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1750index f6b1ff0..de773fb 100644
1751--- a/arch/ia64/kernel/pci-dma.c
1752+++ b/arch/ia64/kernel/pci-dma.c
1753@@ -43,7 +43,7 @@ struct device fallback_dev = {
1754 .dma_mask = &fallback_dev.coherent_dma_mask,
1755 };
1756
1757-extern struct dma_map_ops intel_dma_ops;
1758+extern const struct dma_map_ops intel_dma_ops;
1759
1760 static int __init pci_iommu_init(void)
1761 {
1762@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1763 }
1764 EXPORT_SYMBOL(iommu_dma_supported);
1765
1766+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1767+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1768+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1769+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1770+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1771+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1772+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1773+
1774+static const struct dma_map_ops intel_iommu_dma_ops = {
1775+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1776+ .alloc_coherent = intel_alloc_coherent,
1777+ .free_coherent = intel_free_coherent,
1778+ .map_sg = intel_map_sg,
1779+ .unmap_sg = intel_unmap_sg,
1780+ .map_page = intel_map_page,
1781+ .unmap_page = intel_unmap_page,
1782+ .mapping_error = intel_mapping_error,
1783+
1784+ .sync_single_for_cpu = machvec_dma_sync_single,
1785+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1786+ .sync_single_for_device = machvec_dma_sync_single,
1787+ .sync_sg_for_device = machvec_dma_sync_sg,
1788+ .dma_supported = iommu_dma_supported,
1789+};
1790+
1791 void __init pci_iommu_alloc(void)
1792 {
1793- dma_ops = &intel_dma_ops;
1794-
1795- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1796- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1797- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1798- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1799- dma_ops->dma_supported = iommu_dma_supported;
1800+ dma_ops = &intel_iommu_dma_ops;
1801
1802 /*
1803 * The order of these functions is important for
1804diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1805index 285aae8..61dbab6 100644
1806--- a/arch/ia64/kernel/pci-swiotlb.c
1807+++ b/arch/ia64/kernel/pci-swiotlb.c
1808@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1809 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1810 }
1811
1812-struct dma_map_ops swiotlb_dma_ops = {
1813+const struct dma_map_ops swiotlb_dma_ops = {
1814 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1815 .free_coherent = swiotlb_free_coherent,
1816 .map_page = swiotlb_map_page,
1817diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1818index 609d500..7dde2a8 100644
1819--- a/arch/ia64/kernel/sys_ia64.c
1820+++ b/arch/ia64/kernel/sys_ia64.c
1821@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1822 if (REGION_NUMBER(addr) == RGN_HPAGE)
1823 addr = 0;
1824 #endif
1825+
1826+#ifdef CONFIG_PAX_RANDMMAP
1827+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1828+ addr = mm->free_area_cache;
1829+ else
1830+#endif
1831+
1832 if (!addr)
1833 addr = mm->free_area_cache;
1834
1835@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1836 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1837 /* At this point: (!vma || addr < vma->vm_end). */
1838 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1839- if (start_addr != TASK_UNMAPPED_BASE) {
1840+ if (start_addr != mm->mmap_base) {
1841 /* Start a new search --- just in case we missed some holes. */
1842- addr = TASK_UNMAPPED_BASE;
1843+ addr = mm->mmap_base;
1844 goto full_search;
1845 }
1846 return -ENOMEM;
1847 }
1848- if (!vma || addr + len <= vma->vm_start) {
1849+ if (check_heap_stack_gap(vma, addr, len)) {
1850 /* Remember the address where we stopped this search: */
1851 mm->free_area_cache = addr + len;
1852 return addr;
1853diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1854index 8f06035..b3a5818 100644
1855--- a/arch/ia64/kernel/topology.c
1856+++ b/arch/ia64/kernel/topology.c
1857@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1858 return ret;
1859 }
1860
1861-static struct sysfs_ops cache_sysfs_ops = {
1862+static const struct sysfs_ops cache_sysfs_ops = {
1863 .show = cache_show
1864 };
1865
1866diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1867index 0a0c77b..8e55a81 100644
1868--- a/arch/ia64/kernel/vmlinux.lds.S
1869+++ b/arch/ia64/kernel/vmlinux.lds.S
1870@@ -190,7 +190,7 @@ SECTIONS
1871 /* Per-cpu data: */
1872 . = ALIGN(PERCPU_PAGE_SIZE);
1873 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1874- __phys_per_cpu_start = __per_cpu_load;
1875+ __phys_per_cpu_start = per_cpu_load;
1876 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1877 * into percpu page size
1878 */
1879diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1880index 19261a9..1611b7a 100644
1881--- a/arch/ia64/mm/fault.c
1882+++ b/arch/ia64/mm/fault.c
1883@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1884 return pte_present(pte);
1885 }
1886
1887+#ifdef CONFIG_PAX_PAGEEXEC
1888+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1889+{
1890+ unsigned long i;
1891+
1892+ printk(KERN_ERR "PAX: bytes at PC: ");
1893+ for (i = 0; i < 8; i++) {
1894+ unsigned int c;
1895+ if (get_user(c, (unsigned int *)pc+i))
1896+ printk(KERN_CONT "???????? ");
1897+ else
1898+ printk(KERN_CONT "%08x ", c);
1899+ }
1900+ printk("\n");
1901+}
1902+#endif
1903+
1904 void __kprobes
1905 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1906 {
1907@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1908 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1909 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1910
1911- if ((vma->vm_flags & mask) != mask)
1912+ if ((vma->vm_flags & mask) != mask) {
1913+
1914+#ifdef CONFIG_PAX_PAGEEXEC
1915+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1916+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1917+ goto bad_area;
1918+
1919+ up_read(&mm->mmap_sem);
1920+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1921+ do_group_exit(SIGKILL);
1922+ }
1923+#endif
1924+
1925 goto bad_area;
1926
1927+ }
1928+
1929 survive:
1930 /*
1931 * If for any reason at all we couldn't handle the fault, make
1932diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1933index b0f6157..a082bbc 100644
1934--- a/arch/ia64/mm/hugetlbpage.c
1935+++ b/arch/ia64/mm/hugetlbpage.c
1936@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1937 /* At this point: (!vmm || addr < vmm->vm_end). */
1938 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1939 return -ENOMEM;
1940- if (!vmm || (addr + len) <= vmm->vm_start)
1941+ if (check_heap_stack_gap(vmm, addr, len))
1942 return addr;
1943 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1944 }
1945diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1946index 1857766..05cc6a3 100644
1947--- a/arch/ia64/mm/init.c
1948+++ b/arch/ia64/mm/init.c
1949@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1950 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1951 vma->vm_end = vma->vm_start + PAGE_SIZE;
1952 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1953+
1954+#ifdef CONFIG_PAX_PAGEEXEC
1955+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1956+ vma->vm_flags &= ~VM_EXEC;
1957+
1958+#ifdef CONFIG_PAX_MPROTECT
1959+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1960+ vma->vm_flags &= ~VM_MAYEXEC;
1961+#endif
1962+
1963+ }
1964+#endif
1965+
1966 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1967 down_write(&current->mm->mmap_sem);
1968 if (insert_vm_struct(current->mm, vma)) {
1969diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1970index 98b6849..8046766 100644
1971--- a/arch/ia64/sn/pci/pci_dma.c
1972+++ b/arch/ia64/sn/pci/pci_dma.c
1973@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1974 return ret;
1975 }
1976
1977-static struct dma_map_ops sn_dma_ops = {
1978+static const struct dma_map_ops sn_dma_ops = {
1979 .alloc_coherent = sn_dma_alloc_coherent,
1980 .free_coherent = sn_dma_free_coherent,
1981 .map_page = sn_dma_map_page,
1982diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1983index 82abd15..d95ae5d 100644
1984--- a/arch/m32r/lib/usercopy.c
1985+++ b/arch/m32r/lib/usercopy.c
1986@@ -14,6 +14,9 @@
1987 unsigned long
1988 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1989 {
1990+ if ((long)n < 0)
1991+ return n;
1992+
1993 prefetch(from);
1994 if (access_ok(VERIFY_WRITE, to, n))
1995 __copy_user(to,from,n);
1996@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1997 unsigned long
1998 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1999 {
2000+ if ((long)n < 0)
2001+ return n;
2002+
2003 prefetchw(to);
2004 if (access_ok(VERIFY_READ, from, n))
2005 __copy_user_zeroing(to,from,n);
2006diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
2007index fd7620f..63d73a6 100644
2008--- a/arch/mips/Kconfig
2009+++ b/arch/mips/Kconfig
2010@@ -5,6 +5,7 @@ config MIPS
2011 select HAVE_IDE
2012 select HAVE_OPROFILE
2013 select HAVE_ARCH_KGDB
2014+ select GENERIC_ATOMIC64 if !64BIT
2015 # Horrible source of confusion. Die, die, die ...
2016 select EMBEDDED
2017 select RTC_LIB if !LEMOTE_FULOONG2E
2018diff --git a/arch/mips/Makefile b/arch/mips/Makefile
2019index 77f5021..2b1db8a 100644
2020--- a/arch/mips/Makefile
2021+++ b/arch/mips/Makefile
2022@@ -51,6 +51,8 @@ endif
2023 cflags-y := -ffunction-sections
2024 cflags-y += $(call cc-option, -mno-check-zero-division)
2025
2026+cflags-y += -Wno-sign-compare -Wno-extra
2027+
2028 ifdef CONFIG_32BIT
2029 ld-emul = $(32bit-emul)
2030 vmlinux-32 = vmlinux
2031diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
2032index 632f986..fd0378d 100644
2033--- a/arch/mips/alchemy/devboards/pm.c
2034+++ b/arch/mips/alchemy/devboards/pm.c
2035@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
2036
2037 }
2038
2039-static struct platform_suspend_ops db1x_pm_ops = {
2040+static const struct platform_suspend_ops db1x_pm_ops = {
2041 .valid = suspend_valid_only_mem,
2042 .begin = db1x_pm_begin,
2043 .enter = db1x_pm_enter,
2044diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2045index 09e7128..111035b 100644
2046--- a/arch/mips/include/asm/atomic.h
2047+++ b/arch/mips/include/asm/atomic.h
2048@@ -21,6 +21,10 @@
2049 #include <asm/war.h>
2050 #include <asm/system.h>
2051
2052+#ifdef CONFIG_GENERIC_ATOMIC64
2053+#include <asm-generic/atomic64.h>
2054+#endif
2055+
2056 #define ATOMIC_INIT(i) { (i) }
2057
2058 /*
2059@@ -782,6 +786,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2060 */
2061 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2062
2063+#define atomic64_read_unchecked(v) atomic64_read(v)
2064+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2065+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2066+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2067+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2068+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2069+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2070+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2071+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2072+
2073 #endif /* CONFIG_64BIT */
2074
2075 /*
2076diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2077index 7990694..4e93acf 100644
2078--- a/arch/mips/include/asm/elf.h
2079+++ b/arch/mips/include/asm/elf.h
2080@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
2081 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2082 #endif
2083
2084+#ifdef CONFIG_PAX_ASLR
2085+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2086+
2087+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2088+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2089+#endif
2090+
2091 #endif /* _ASM_ELF_H */
2092diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2093index f266295..627cfff 100644
2094--- a/arch/mips/include/asm/page.h
2095+++ b/arch/mips/include/asm/page.h
2096@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2097 #ifdef CONFIG_CPU_MIPS32
2098 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2099 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2100- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2101+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2102 #else
2103 typedef struct { unsigned long long pte; } pte_t;
2104 #define pte_val(x) ((x).pte)
2105diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
2106index e48c0bf..f3acf65 100644
2107--- a/arch/mips/include/asm/reboot.h
2108+++ b/arch/mips/include/asm/reboot.h
2109@@ -9,7 +9,7 @@
2110 #ifndef _ASM_REBOOT_H
2111 #define _ASM_REBOOT_H
2112
2113-extern void (*_machine_restart)(char *command);
2114-extern void (*_machine_halt)(void);
2115+extern void (*__noreturn _machine_restart)(char *command);
2116+extern void (*__noreturn _machine_halt)(void);
2117
2118 #endif /* _ASM_REBOOT_H */
2119diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2120index 83b5509..9fa24a23 100644
2121--- a/arch/mips/include/asm/system.h
2122+++ b/arch/mips/include/asm/system.h
2123@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2124 */
2125 #define __ARCH_WANT_UNLOCKED_CTXSW
2126
2127-extern unsigned long arch_align_stack(unsigned long sp);
2128+#define arch_align_stack(x) ((x) & ~0xfUL)
2129
2130 #endif /* _ASM_SYSTEM_H */
2131diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2132index 9fdd8bc..fcf9d68 100644
2133--- a/arch/mips/kernel/binfmt_elfn32.c
2134+++ b/arch/mips/kernel/binfmt_elfn32.c
2135@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2136 #undef ELF_ET_DYN_BASE
2137 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2138
2139+#ifdef CONFIG_PAX_ASLR
2140+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2141+
2142+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2143+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2144+#endif
2145+
2146 #include <asm/processor.h>
2147 #include <linux/module.h>
2148 #include <linux/elfcore.h>
2149diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2150index ff44823..cf0b48a 100644
2151--- a/arch/mips/kernel/binfmt_elfo32.c
2152+++ b/arch/mips/kernel/binfmt_elfo32.c
2153@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2154 #undef ELF_ET_DYN_BASE
2155 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2156
2157+#ifdef CONFIG_PAX_ASLR
2158+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2159+
2160+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2161+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2162+#endif
2163+
2164 #include <asm/processor.h>
2165
2166 /*
2167diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2168index 50c9bb8..efdd5f8 100644
2169--- a/arch/mips/kernel/kgdb.c
2170+++ b/arch/mips/kernel/kgdb.c
2171@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2172 return -1;
2173 }
2174
2175+/* cannot be const */
2176 struct kgdb_arch arch_kgdb_ops;
2177
2178 /*
2179diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2180index f3d73e1..bb3f57a 100644
2181--- a/arch/mips/kernel/process.c
2182+++ b/arch/mips/kernel/process.c
2183@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2184 out:
2185 return pc;
2186 }
2187-
2188-/*
2189- * Don't forget that the stack pointer must be aligned on a 8 bytes
2190- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2191- */
2192-unsigned long arch_align_stack(unsigned long sp)
2193-{
2194- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2195- sp -= get_random_int() & ~PAGE_MASK;
2196-
2197- return sp & ALMASK;
2198-}
2199diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2200index 060563a..7fbf310 100644
2201--- a/arch/mips/kernel/reset.c
2202+++ b/arch/mips/kernel/reset.c
2203@@ -19,8 +19,8 @@
2204 * So handle all using function pointers to machine specific
2205 * functions.
2206 */
2207-void (*_machine_restart)(char *command);
2208-void (*_machine_halt)(void);
2209+void (*__noreturn _machine_restart)(char *command);
2210+void (*__noreturn _machine_halt)(void);
2211 void (*pm_power_off)(void);
2212
2213 EXPORT_SYMBOL(pm_power_off);
2214@@ -29,16 +29,19 @@ void machine_restart(char *command)
2215 {
2216 if (_machine_restart)
2217 _machine_restart(command);
2218+ BUG();
2219 }
2220
2221 void machine_halt(void)
2222 {
2223 if (_machine_halt)
2224 _machine_halt();
2225+ BUG();
2226 }
2227
2228 void machine_power_off(void)
2229 {
2230 if (pm_power_off)
2231 pm_power_off();
2232+ BUG();
2233 }
2234diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2235index 3f7f466..3abe0b5 100644
2236--- a/arch/mips/kernel/syscall.c
2237+++ b/arch/mips/kernel/syscall.c
2238@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2239 do_color_align = 0;
2240 if (filp || (flags & MAP_SHARED))
2241 do_color_align = 1;
2242+
2243+#ifdef CONFIG_PAX_RANDMMAP
2244+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2245+#endif
2246+
2247 if (addr) {
2248 if (do_color_align)
2249 addr = COLOUR_ALIGN(addr, pgoff);
2250 else
2251 addr = PAGE_ALIGN(addr);
2252 vmm = find_vma(current->mm, addr);
2253- if (task_size - len >= addr &&
2254- (!vmm || addr + len <= vmm->vm_start))
2255+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2256 return addr;
2257 }
2258- addr = TASK_UNMAPPED_BASE;
2259+ addr = current->mm->mmap_base;
2260 if (do_color_align)
2261 addr = COLOUR_ALIGN(addr, pgoff);
2262 else
2263@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2264 /* At this point: (!vmm || addr < vmm->vm_end). */
2265 if (task_size - len < addr)
2266 return -ENOMEM;
2267- if (!vmm || addr + len <= vmm->vm_start)
2268+ if (check_heap_stack_gap(vmm, addr, len))
2269 return addr;
2270 addr = vmm->vm_end;
2271 if (do_color_align)
2272diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2273index e97a7a2..f18f5b0 100644
2274--- a/arch/mips/mm/fault.c
2275+++ b/arch/mips/mm/fault.c
2276@@ -26,6 +26,23 @@
2277 #include <asm/ptrace.h>
2278 #include <asm/highmem.h> /* For VMALLOC_END */
2279
2280+#ifdef CONFIG_PAX_PAGEEXEC
2281+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2282+{
2283+ unsigned long i;
2284+
2285+ printk(KERN_ERR "PAX: bytes at PC: ");
2286+ for (i = 0; i < 5; i++) {
2287+ unsigned int c;
2288+ if (get_user(c, (unsigned int *)pc+i))
2289+ printk(KERN_CONT "???????? ");
2290+ else
2291+ printk(KERN_CONT "%08x ", c);
2292+ }
2293+ printk("\n");
2294+}
2295+#endif
2296+
2297 /*
2298 * This routine handles page faults. It determines the address,
2299 * and the problem, and then passes it off to one of the appropriate
2300diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
2301index 8bc9e96..26554f8 100644
2302--- a/arch/parisc/include/asm/atomic.h
2303+++ b/arch/parisc/include/asm/atomic.h
2304@@ -336,6 +336,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2305
2306 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
2307
2308+#define atomic64_read_unchecked(v) atomic64_read(v)
2309+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2310+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2311+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2312+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2313+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2314+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2315+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2316+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2317+
2318 #else /* CONFIG_64BIT */
2319
2320 #include <asm-generic/atomic64.h>
2321diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2322index 9c802eb..0592e41 100644
2323--- a/arch/parisc/include/asm/elf.h
2324+++ b/arch/parisc/include/asm/elf.h
2325@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2326
2327 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2328
2329+#ifdef CONFIG_PAX_ASLR
2330+#define PAX_ELF_ET_DYN_BASE 0x10000UL
2331+
2332+#define PAX_DELTA_MMAP_LEN 16
2333+#define PAX_DELTA_STACK_LEN 16
2334+#endif
2335+
2336 /* This yields a mask that user programs can use to figure out what
2337 instruction set this CPU supports. This could be done in user space,
2338 but it's not easy, and we've already done it here. */
2339diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2340index a27d2e2..18fd845 100644
2341--- a/arch/parisc/include/asm/pgtable.h
2342+++ b/arch/parisc/include/asm/pgtable.h
2343@@ -207,6 +207,17 @@
2344 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2345 #define PAGE_COPY PAGE_EXECREAD
2346 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2347+
2348+#ifdef CONFIG_PAX_PAGEEXEC
2349+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2350+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2351+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2352+#else
2353+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2354+# define PAGE_COPY_NOEXEC PAGE_COPY
2355+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2356+#endif
2357+
2358 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2359 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2360 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2361diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2362index 2120746..8d70a5e 100644
2363--- a/arch/parisc/kernel/module.c
2364+++ b/arch/parisc/kernel/module.c
2365@@ -95,16 +95,38 @@
2366
2367 /* three functions to determine where in the module core
2368 * or init pieces the location is */
2369+static inline int in_init_rx(struct module *me, void *loc)
2370+{
2371+ return (loc >= me->module_init_rx &&
2372+ loc < (me->module_init_rx + me->init_size_rx));
2373+}
2374+
2375+static inline int in_init_rw(struct module *me, void *loc)
2376+{
2377+ return (loc >= me->module_init_rw &&
2378+ loc < (me->module_init_rw + me->init_size_rw));
2379+}
2380+
2381 static inline int in_init(struct module *me, void *loc)
2382 {
2383- return (loc >= me->module_init &&
2384- loc <= (me->module_init + me->init_size));
2385+ return in_init_rx(me, loc) || in_init_rw(me, loc);
2386+}
2387+
2388+static inline int in_core_rx(struct module *me, void *loc)
2389+{
2390+ return (loc >= me->module_core_rx &&
2391+ loc < (me->module_core_rx + me->core_size_rx));
2392+}
2393+
2394+static inline int in_core_rw(struct module *me, void *loc)
2395+{
2396+ return (loc >= me->module_core_rw &&
2397+ loc < (me->module_core_rw + me->core_size_rw));
2398 }
2399
2400 static inline int in_core(struct module *me, void *loc)
2401 {
2402- return (loc >= me->module_core &&
2403- loc <= (me->module_core + me->core_size));
2404+ return in_core_rx(me, loc) || in_core_rw(me, loc);
2405 }
2406
2407 static inline int in_local(struct module *me, void *loc)
2408@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2409 }
2410
2411 /* align things a bit */
2412- me->core_size = ALIGN(me->core_size, 16);
2413- me->arch.got_offset = me->core_size;
2414- me->core_size += gots * sizeof(struct got_entry);
2415+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2416+ me->arch.got_offset = me->core_size_rw;
2417+ me->core_size_rw += gots * sizeof(struct got_entry);
2418
2419- me->core_size = ALIGN(me->core_size, 16);
2420- me->arch.fdesc_offset = me->core_size;
2421- me->core_size += fdescs * sizeof(Elf_Fdesc);
2422+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2423+ me->arch.fdesc_offset = me->core_size_rw;
2424+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2425
2426 me->arch.got_max = gots;
2427 me->arch.fdesc_max = fdescs;
2428@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2429
2430 BUG_ON(value == 0);
2431
2432- got = me->module_core + me->arch.got_offset;
2433+ got = me->module_core_rw + me->arch.got_offset;
2434 for (i = 0; got[i].addr; i++)
2435 if (got[i].addr == value)
2436 goto out;
2437@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2438 #ifdef CONFIG_64BIT
2439 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2440 {
2441- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2442+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2443
2444 if (!value) {
2445 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2446@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2447
2448 /* Create new one */
2449 fdesc->addr = value;
2450- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2451+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2452 return (Elf_Addr)fdesc;
2453 }
2454 #endif /* CONFIG_64BIT */
2455@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2456
2457 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2458 end = table + sechdrs[me->arch.unwind_section].sh_size;
2459- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2460+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2461
2462 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2463 me->arch.unwind_section, table, end, gp);
2464diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2465index 9147391..f3d949a 100644
2466--- a/arch/parisc/kernel/sys_parisc.c
2467+++ b/arch/parisc/kernel/sys_parisc.c
2468@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2469 /* At this point: (!vma || addr < vma->vm_end). */
2470 if (TASK_SIZE - len < addr)
2471 return -ENOMEM;
2472- if (!vma || addr + len <= vma->vm_start)
2473+ if (check_heap_stack_gap(vma, addr, len))
2474 return addr;
2475 addr = vma->vm_end;
2476 }
2477@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2478 /* At this point: (!vma || addr < vma->vm_end). */
2479 if (TASK_SIZE - len < addr)
2480 return -ENOMEM;
2481- if (!vma || addr + len <= vma->vm_start)
2482+ if (check_heap_stack_gap(vma, addr, len))
2483 return addr;
2484 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2485 if (addr < vma->vm_end) /* handle wraparound */
2486@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2487 if (flags & MAP_FIXED)
2488 return addr;
2489 if (!addr)
2490- addr = TASK_UNMAPPED_BASE;
2491+ addr = current->mm->mmap_base;
2492
2493 if (filp) {
2494 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2495diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2496index 8b58bf0..7afff03 100644
2497--- a/arch/parisc/kernel/traps.c
2498+++ b/arch/parisc/kernel/traps.c
2499@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2500
2501 down_read(&current->mm->mmap_sem);
2502 vma = find_vma(current->mm,regs->iaoq[0]);
2503- if (vma && (regs->iaoq[0] >= vma->vm_start)
2504- && (vma->vm_flags & VM_EXEC)) {
2505-
2506+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2507 fault_address = regs->iaoq[0];
2508 fault_space = regs->iasq[0];
2509
2510diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2511index c6afbfc..c5839f6 100644
2512--- a/arch/parisc/mm/fault.c
2513+++ b/arch/parisc/mm/fault.c
2514@@ -15,6 +15,7 @@
2515 #include <linux/sched.h>
2516 #include <linux/interrupt.h>
2517 #include <linux/module.h>
2518+#include <linux/unistd.h>
2519
2520 #include <asm/uaccess.h>
2521 #include <asm/traps.h>
2522@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2523 static unsigned long
2524 parisc_acctyp(unsigned long code, unsigned int inst)
2525 {
2526- if (code == 6 || code == 16)
2527+ if (code == 6 || code == 7 || code == 16)
2528 return VM_EXEC;
2529
2530 switch (inst & 0xf0000000) {
2531@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2532 }
2533 #endif
2534
2535+#ifdef CONFIG_PAX_PAGEEXEC
2536+/*
2537+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2538+ *
2539+ * returns 1 when task should be killed
2540+ * 2 when rt_sigreturn trampoline was detected
2541+ * 3 when unpatched PLT trampoline was detected
2542+ */
2543+static int pax_handle_fetch_fault(struct pt_regs *regs)
2544+{
2545+
2546+#ifdef CONFIG_PAX_EMUPLT
2547+ int err;
2548+
2549+ do { /* PaX: unpatched PLT emulation */
2550+ unsigned int bl, depwi;
2551+
2552+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2553+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2554+
2555+ if (err)
2556+ break;
2557+
2558+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2559+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2560+
2561+ err = get_user(ldw, (unsigned int *)addr);
2562+ err |= get_user(bv, (unsigned int *)(addr+4));
2563+ err |= get_user(ldw2, (unsigned int *)(addr+8));
2564+
2565+ if (err)
2566+ break;
2567+
2568+ if (ldw == 0x0E801096U &&
2569+ bv == 0xEAC0C000U &&
2570+ ldw2 == 0x0E881095U)
2571+ {
2572+ unsigned int resolver, map;
2573+
2574+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2575+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2576+ if (err)
2577+ break;
2578+
2579+ regs->gr[20] = instruction_pointer(regs)+8;
2580+ regs->gr[21] = map;
2581+ regs->gr[22] = resolver;
2582+ regs->iaoq[0] = resolver | 3UL;
2583+ regs->iaoq[1] = regs->iaoq[0] + 4;
2584+ return 3;
2585+ }
2586+ }
2587+ } while (0);
2588+#endif
2589+
2590+#ifdef CONFIG_PAX_EMUTRAMP
2591+
2592+#ifndef CONFIG_PAX_EMUSIGRT
2593+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2594+ return 1;
2595+#endif
2596+
2597+ do { /* PaX: rt_sigreturn emulation */
2598+ unsigned int ldi1, ldi2, bel, nop;
2599+
2600+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2601+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2602+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2603+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2604+
2605+ if (err)
2606+ break;
2607+
2608+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2609+ ldi2 == 0x3414015AU &&
2610+ bel == 0xE4008200U &&
2611+ nop == 0x08000240U)
2612+ {
2613+ regs->gr[25] = (ldi1 & 2) >> 1;
2614+ regs->gr[20] = __NR_rt_sigreturn;
2615+ regs->gr[31] = regs->iaoq[1] + 16;
2616+ regs->sr[0] = regs->iasq[1];
2617+ regs->iaoq[0] = 0x100UL;
2618+ regs->iaoq[1] = regs->iaoq[0] + 4;
2619+ regs->iasq[0] = regs->sr[2];
2620+ regs->iasq[1] = regs->sr[2];
2621+ return 2;
2622+ }
2623+ } while (0);
2624+#endif
2625+
2626+ return 1;
2627+}
2628+
2629+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2630+{
2631+ unsigned long i;
2632+
2633+ printk(KERN_ERR "PAX: bytes at PC: ");
2634+ for (i = 0; i < 5; i++) {
2635+ unsigned int c;
2636+ if (get_user(c, (unsigned int *)pc+i))
2637+ printk(KERN_CONT "???????? ");
2638+ else
2639+ printk(KERN_CONT "%08x ", c);
2640+ }
2641+ printk("\n");
2642+}
2643+#endif
2644+
2645 int fixup_exception(struct pt_regs *regs)
2646 {
2647 const struct exception_table_entry *fix;
2648@@ -192,8 +303,33 @@ good_area:
2649
2650 acc_type = parisc_acctyp(code,regs->iir);
2651
2652- if ((vma->vm_flags & acc_type) != acc_type)
2653+ if ((vma->vm_flags & acc_type) != acc_type) {
2654+
2655+#ifdef CONFIG_PAX_PAGEEXEC
2656+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2657+ (address & ~3UL) == instruction_pointer(regs))
2658+ {
2659+ up_read(&mm->mmap_sem);
2660+ switch (pax_handle_fetch_fault(regs)) {
2661+
2662+#ifdef CONFIG_PAX_EMUPLT
2663+ case 3:
2664+ return;
2665+#endif
2666+
2667+#ifdef CONFIG_PAX_EMUTRAMP
2668+ case 2:
2669+ return;
2670+#endif
2671+
2672+ }
2673+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2674+ do_group_exit(SIGKILL);
2675+ }
2676+#endif
2677+
2678 goto bad_area;
2679+ }
2680
2681 /*
2682 * If for any reason at all we couldn't handle the fault, make
2683diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2684index c107b74..409dc0f 100644
2685--- a/arch/powerpc/Makefile
2686+++ b/arch/powerpc/Makefile
2687@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2688 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2689 CPP = $(CC) -E $(KBUILD_CFLAGS)
2690
2691+cflags-y += -Wno-sign-compare -Wno-extra
2692+
2693 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2694
2695 ifeq ($(CONFIG_PPC64),y)
2696diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2697index 6d94d27..50d4cad 100644
2698--- a/arch/powerpc/include/asm/device.h
2699+++ b/arch/powerpc/include/asm/device.h
2700@@ -14,7 +14,7 @@ struct dev_archdata {
2701 struct device_node *of_node;
2702
2703 /* DMA operations on that device */
2704- struct dma_map_ops *dma_ops;
2705+ const struct dma_map_ops *dma_ops;
2706
2707 /*
2708 * When an iommu is in use, dma_data is used as a ptr to the base of the
2709diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2710index e281dae..2b8a784 100644
2711--- a/arch/powerpc/include/asm/dma-mapping.h
2712+++ b/arch/powerpc/include/asm/dma-mapping.h
2713@@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2714 #ifdef CONFIG_PPC64
2715 extern struct dma_map_ops dma_iommu_ops;
2716 #endif
2717-extern struct dma_map_ops dma_direct_ops;
2718+extern const struct dma_map_ops dma_direct_ops;
2719
2720-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2721+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2722 {
2723 /* We don't handle the NULL dev case for ISA for now. We could
2724 * do it via an out of line call but it is not needed for now. The
2725@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2726 return dev->archdata.dma_ops;
2727 }
2728
2729-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2730+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2731 {
2732 dev->archdata.dma_ops = ops;
2733 }
2734@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2735
2736 static inline int dma_supported(struct device *dev, u64 mask)
2737 {
2738- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2739+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2740
2741 if (unlikely(dma_ops == NULL))
2742 return 0;
2743@@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2744
2745 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2746 {
2747- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2748+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2749
2750 if (unlikely(dma_ops == NULL))
2751 return -EIO;
2752@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2753 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2754 dma_addr_t *dma_handle, gfp_t flag)
2755 {
2756- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2757+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2758 void *cpu_addr;
2759
2760 BUG_ON(!dma_ops);
2761@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2762 static inline void dma_free_coherent(struct device *dev, size_t size,
2763 void *cpu_addr, dma_addr_t dma_handle)
2764 {
2765- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2766+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2767
2768 BUG_ON(!dma_ops);
2769
2770@@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2771
2772 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2773 {
2774- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2775+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2776
2777 if (dma_ops->mapping_error)
2778 return dma_ops->mapping_error(dev, dma_addr);
2779diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2780index 5698502..5db093c 100644
2781--- a/arch/powerpc/include/asm/elf.h
2782+++ b/arch/powerpc/include/asm/elf.h
2783@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2784 the loader. We need to make sure that it is out of the way of the program
2785 that it will "exec", and that there is sufficient room for the brk. */
2786
2787-extern unsigned long randomize_et_dyn(unsigned long base);
2788-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2789+#define ELF_ET_DYN_BASE (0x20000000)
2790+
2791+#ifdef CONFIG_PAX_ASLR
2792+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2793+
2794+#ifdef __powerpc64__
2795+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2796+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2797+#else
2798+#define PAX_DELTA_MMAP_LEN 15
2799+#define PAX_DELTA_STACK_LEN 15
2800+#endif
2801+#endif
2802
2803 /*
2804 * Our registers are always unsigned longs, whether we're a 32 bit
2805@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2806 (0x7ff >> (PAGE_SHIFT - 12)) : \
2807 (0x3ffff >> (PAGE_SHIFT - 12)))
2808
2809-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2810-#define arch_randomize_brk arch_randomize_brk
2811-
2812 #endif /* __KERNEL__ */
2813
2814 /*
2815diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2816index edfc980..1766f59 100644
2817--- a/arch/powerpc/include/asm/iommu.h
2818+++ b/arch/powerpc/include/asm/iommu.h
2819@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2820 extern void iommu_init_early_dart(void);
2821 extern void iommu_init_early_pasemi(void);
2822
2823+/* dma-iommu.c */
2824+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2825+
2826 #ifdef CONFIG_PCI
2827 extern void pci_iommu_init(void);
2828 extern void pci_direct_iommu_init(void);
2829diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2830index 9163695..5a00112 100644
2831--- a/arch/powerpc/include/asm/kmap_types.h
2832+++ b/arch/powerpc/include/asm/kmap_types.h
2833@@ -26,6 +26,7 @@ enum km_type {
2834 KM_SOFTIRQ1,
2835 KM_PPC_SYNC_PAGE,
2836 KM_PPC_SYNC_ICACHE,
2837+ KM_CLEARPAGE,
2838 KM_TYPE_NR
2839 };
2840
2841diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2842index ff24254..fe45b21 100644
2843--- a/arch/powerpc/include/asm/page.h
2844+++ b/arch/powerpc/include/asm/page.h
2845@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2846 * and needs to be executable. This means the whole heap ends
2847 * up being executable.
2848 */
2849-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2850- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2851+#define VM_DATA_DEFAULT_FLAGS32 \
2852+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2853+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2854
2855 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2856 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2857@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2858 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2859 #endif
2860
2861+#define ktla_ktva(addr) (addr)
2862+#define ktva_ktla(addr) (addr)
2863+
2864 #ifndef __ASSEMBLY__
2865
2866 #undef STRICT_MM_TYPECHECKS
2867diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2868index 3f17b83..1f9e766 100644
2869--- a/arch/powerpc/include/asm/page_64.h
2870+++ b/arch/powerpc/include/asm/page_64.h
2871@@ -180,15 +180,18 @@ do { \
2872 * stack by default, so in the absense of a PT_GNU_STACK program header
2873 * we turn execute permission off.
2874 */
2875-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2876- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2877+#define VM_STACK_DEFAULT_FLAGS32 \
2878+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2879+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2880
2881 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2882 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2883
2884+#ifndef CONFIG_PAX_PAGEEXEC
2885 #define VM_STACK_DEFAULT_FLAGS \
2886 (test_thread_flag(TIF_32BIT) ? \
2887 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2888+#endif
2889
2890 #include <asm-generic/getorder.h>
2891
2892diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2893index b5ea626..40308222 100644
2894--- a/arch/powerpc/include/asm/pci.h
2895+++ b/arch/powerpc/include/asm/pci.h
2896@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2897 }
2898
2899 #ifdef CONFIG_PCI
2900-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2901-extern struct dma_map_ops *get_pci_dma_ops(void);
2902+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2903+extern const struct dma_map_ops *get_pci_dma_ops(void);
2904 #else /* CONFIG_PCI */
2905 #define set_pci_dma_ops(d)
2906 #define get_pci_dma_ops() NULL
2907diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2908index 2a5da06..d65bea2 100644
2909--- a/arch/powerpc/include/asm/pgtable.h
2910+++ b/arch/powerpc/include/asm/pgtable.h
2911@@ -2,6 +2,7 @@
2912 #define _ASM_POWERPC_PGTABLE_H
2913 #ifdef __KERNEL__
2914
2915+#include <linux/const.h>
2916 #ifndef __ASSEMBLY__
2917 #include <asm/processor.h> /* For TASK_SIZE */
2918 #include <asm/mmu.h>
2919diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2920index 4aad413..85d86bf 100644
2921--- a/arch/powerpc/include/asm/pte-hash32.h
2922+++ b/arch/powerpc/include/asm/pte-hash32.h
2923@@ -21,6 +21,7 @@
2924 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2925 #define _PAGE_USER 0x004 /* usermode access allowed */
2926 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2927+#define _PAGE_EXEC _PAGE_GUARDED
2928 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2929 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2930 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2931diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2932index 8c34149..78f425a 100644
2933--- a/arch/powerpc/include/asm/ptrace.h
2934+++ b/arch/powerpc/include/asm/ptrace.h
2935@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2936 } while(0)
2937
2938 struct task_struct;
2939-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2940+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2941 extern int ptrace_put_reg(struct task_struct *task, int regno,
2942 unsigned long data);
2943
2944diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2945index 32a7c30..be3a8bb 100644
2946--- a/arch/powerpc/include/asm/reg.h
2947+++ b/arch/powerpc/include/asm/reg.h
2948@@ -191,6 +191,7 @@
2949 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2950 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2951 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2952+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2953 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2954 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2955 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2956diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2957index 8979d4c..d2fd0d3 100644
2958--- a/arch/powerpc/include/asm/swiotlb.h
2959+++ b/arch/powerpc/include/asm/swiotlb.h
2960@@ -13,7 +13,7 @@
2961
2962 #include <linux/swiotlb.h>
2963
2964-extern struct dma_map_ops swiotlb_dma_ops;
2965+extern const struct dma_map_ops swiotlb_dma_ops;
2966
2967 static inline void dma_mark_clean(void *addr, size_t size) {}
2968
2969diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2970index 094a12a..877a60a 100644
2971--- a/arch/powerpc/include/asm/system.h
2972+++ b/arch/powerpc/include/asm/system.h
2973@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2974 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2975 #endif
2976
2977-extern unsigned long arch_align_stack(unsigned long sp);
2978+#define arch_align_stack(x) ((x) & ~0xfUL)
2979
2980 /* Used in very early kernel initialization. */
2981 extern unsigned long reloc_offset(void);
2982diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2983index bd0fb84..a42a14b 100644
2984--- a/arch/powerpc/include/asm/uaccess.h
2985+++ b/arch/powerpc/include/asm/uaccess.h
2986@@ -13,6 +13,8 @@
2987 #define VERIFY_READ 0
2988 #define VERIFY_WRITE 1
2989
2990+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2991+
2992 /*
2993 * The fs value determines whether argument validity checking should be
2994 * performed or not. If get_fs() == USER_DS, checking is performed, with
2995@@ -327,52 +329,6 @@ do { \
2996 extern unsigned long __copy_tofrom_user(void __user *to,
2997 const void __user *from, unsigned long size);
2998
2999-#ifndef __powerpc64__
3000-
3001-static inline unsigned long copy_from_user(void *to,
3002- const void __user *from, unsigned long n)
3003-{
3004- unsigned long over;
3005-
3006- if (access_ok(VERIFY_READ, from, n))
3007- return __copy_tofrom_user((__force void __user *)to, from, n);
3008- if ((unsigned long)from < TASK_SIZE) {
3009- over = (unsigned long)from + n - TASK_SIZE;
3010- return __copy_tofrom_user((__force void __user *)to, from,
3011- n - over) + over;
3012- }
3013- return n;
3014-}
3015-
3016-static inline unsigned long copy_to_user(void __user *to,
3017- const void *from, unsigned long n)
3018-{
3019- unsigned long over;
3020-
3021- if (access_ok(VERIFY_WRITE, to, n))
3022- return __copy_tofrom_user(to, (__force void __user *)from, n);
3023- if ((unsigned long)to < TASK_SIZE) {
3024- over = (unsigned long)to + n - TASK_SIZE;
3025- return __copy_tofrom_user(to, (__force void __user *)from,
3026- n - over) + over;
3027- }
3028- return n;
3029-}
3030-
3031-#else /* __powerpc64__ */
3032-
3033-#define __copy_in_user(to, from, size) \
3034- __copy_tofrom_user((to), (from), (size))
3035-
3036-extern unsigned long copy_from_user(void *to, const void __user *from,
3037- unsigned long n);
3038-extern unsigned long copy_to_user(void __user *to, const void *from,
3039- unsigned long n);
3040-extern unsigned long copy_in_user(void __user *to, const void __user *from,
3041- unsigned long n);
3042-
3043-#endif /* __powerpc64__ */
3044-
3045 static inline unsigned long __copy_from_user_inatomic(void *to,
3046 const void __user *from, unsigned long n)
3047 {
3048@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3049 if (ret == 0)
3050 return 0;
3051 }
3052+
3053+ if (!__builtin_constant_p(n))
3054+ check_object_size(to, n, false);
3055+
3056 return __copy_tofrom_user((__force void __user *)to, from, n);
3057 }
3058
3059@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3060 if (ret == 0)
3061 return 0;
3062 }
3063+
3064+ if (!__builtin_constant_p(n))
3065+ check_object_size(from, n, true);
3066+
3067 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3068 }
3069
3070@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3071 return __copy_to_user_inatomic(to, from, size);
3072 }
3073
3074+#ifndef __powerpc64__
3075+
3076+static inline unsigned long __must_check copy_from_user(void *to,
3077+ const void __user *from, unsigned long n)
3078+{
3079+ unsigned long over;
3080+
3081+ if ((long)n < 0)
3082+ return n;
3083+
3084+ if (access_ok(VERIFY_READ, from, n)) {
3085+ if (!__builtin_constant_p(n))
3086+ check_object_size(to, n, false);
3087+ return __copy_tofrom_user((__force void __user *)to, from, n);
3088+ }
3089+ if ((unsigned long)from < TASK_SIZE) {
3090+ over = (unsigned long)from + n - TASK_SIZE;
3091+ if (!__builtin_constant_p(n - over))
3092+ check_object_size(to, n - over, false);
3093+ return __copy_tofrom_user((__force void __user *)to, from,
3094+ n - over) + over;
3095+ }
3096+ return n;
3097+}
3098+
3099+static inline unsigned long __must_check copy_to_user(void __user *to,
3100+ const void *from, unsigned long n)
3101+{
3102+ unsigned long over;
3103+
3104+ if ((long)n < 0)
3105+ return n;
3106+
3107+ if (access_ok(VERIFY_WRITE, to, n)) {
3108+ if (!__builtin_constant_p(n))
3109+ check_object_size(from, n, true);
3110+ return __copy_tofrom_user(to, (__force void __user *)from, n);
3111+ }
3112+ if ((unsigned long)to < TASK_SIZE) {
3113+ over = (unsigned long)to + n - TASK_SIZE;
3114+ if (!__builtin_constant_p(n))
3115+ check_object_size(from, n - over, true);
3116+ return __copy_tofrom_user(to, (__force void __user *)from,
3117+ n - over) + over;
3118+ }
3119+ return n;
3120+}
3121+
3122+#else /* __powerpc64__ */
3123+
3124+#define __copy_in_user(to, from, size) \
3125+ __copy_tofrom_user((to), (from), (size))
3126+
3127+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3128+{
3129+ if ((long)n < 0 || n > INT_MAX)
3130+ return n;
3131+
3132+ if (!__builtin_constant_p(n))
3133+ check_object_size(to, n, false);
3134+
3135+ if (likely(access_ok(VERIFY_READ, from, n)))
3136+ n = __copy_from_user(to, from, n);
3137+ else
3138+ memset(to, 0, n);
3139+ return n;
3140+}
3141+
3142+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3143+{
3144+ if ((long)n < 0 || n > INT_MAX)
3145+ return n;
3146+
3147+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
3148+ if (!__builtin_constant_p(n))
3149+ check_object_size(from, n, true);
3150+ n = __copy_to_user(to, from, n);
3151+ }
3152+ return n;
3153+}
3154+
3155+extern unsigned long copy_in_user(void __user *to, const void __user *from,
3156+ unsigned long n);
3157+
3158+#endif /* __powerpc64__ */
3159+
3160 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3161
3162 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3163diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3164index bb37b1d..01fe9ce 100644
3165--- a/arch/powerpc/kernel/cacheinfo.c
3166+++ b/arch/powerpc/kernel/cacheinfo.c
3167@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3168 &cache_assoc_attr,
3169 };
3170
3171-static struct sysfs_ops cache_index_ops = {
3172+static const struct sysfs_ops cache_index_ops = {
3173 .show = cache_index_show,
3174 };
3175
3176diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3177index 37771a5..648530c 100644
3178--- a/arch/powerpc/kernel/dma-iommu.c
3179+++ b/arch/powerpc/kernel/dma-iommu.c
3180@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3181 }
3182
3183 /* We support DMA to/from any memory page via the iommu */
3184-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3185+int dma_iommu_dma_supported(struct device *dev, u64 mask)
3186 {
3187 struct iommu_table *tbl = get_iommu_table_base(dev);
3188
3189diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3190index e96cbbd..bdd6d41 100644
3191--- a/arch/powerpc/kernel/dma-swiotlb.c
3192+++ b/arch/powerpc/kernel/dma-swiotlb.c
3193@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3194 * map_page, and unmap_page on highmem, use normal dma_ops
3195 * for everything else.
3196 */
3197-struct dma_map_ops swiotlb_dma_ops = {
3198+const struct dma_map_ops swiotlb_dma_ops = {
3199 .alloc_coherent = dma_direct_alloc_coherent,
3200 .free_coherent = dma_direct_free_coherent,
3201 .map_sg = swiotlb_map_sg_attrs,
3202diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3203index 6215062..ebea59c 100644
3204--- a/arch/powerpc/kernel/dma.c
3205+++ b/arch/powerpc/kernel/dma.c
3206@@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3207 }
3208 #endif
3209
3210-struct dma_map_ops dma_direct_ops = {
3211+const struct dma_map_ops dma_direct_ops = {
3212 .alloc_coherent = dma_direct_alloc_coherent,
3213 .free_coherent = dma_direct_free_coherent,
3214 .map_sg = dma_direct_map_sg,
3215diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3216index 24dcc0e..a300455 100644
3217--- a/arch/powerpc/kernel/exceptions-64e.S
3218+++ b/arch/powerpc/kernel/exceptions-64e.S
3219@@ -455,6 +455,7 @@ storage_fault_common:
3220 std r14,_DAR(r1)
3221 std r15,_DSISR(r1)
3222 addi r3,r1,STACK_FRAME_OVERHEAD
3223+ bl .save_nvgprs
3224 mr r4,r14
3225 mr r5,r15
3226 ld r14,PACA_EXGEN+EX_R14(r13)
3227@@ -464,8 +465,7 @@ storage_fault_common:
3228 cmpdi r3,0
3229 bne- 1f
3230 b .ret_from_except_lite
3231-1: bl .save_nvgprs
3232- mr r5,r3
3233+1: mr r5,r3
3234 addi r3,r1,STACK_FRAME_OVERHEAD
3235 ld r4,_DAR(r1)
3236 bl .bad_page_fault
3237diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3238index 1808876..9fd206a 100644
3239--- a/arch/powerpc/kernel/exceptions-64s.S
3240+++ b/arch/powerpc/kernel/exceptions-64s.S
3241@@ -818,10 +818,10 @@ handle_page_fault:
3242 11: ld r4,_DAR(r1)
3243 ld r5,_DSISR(r1)
3244 addi r3,r1,STACK_FRAME_OVERHEAD
3245+ bl .save_nvgprs
3246 bl .do_page_fault
3247 cmpdi r3,0
3248 beq+ 13f
3249- bl .save_nvgprs
3250 mr r5,r3
3251 addi r3,r1,STACK_FRAME_OVERHEAD
3252 lwz r4,_DAR(r1)
3253diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3254index a4c8b38..1b09ad9 100644
3255--- a/arch/powerpc/kernel/ibmebus.c
3256+++ b/arch/powerpc/kernel/ibmebus.c
3257@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3258 return 1;
3259 }
3260
3261-static struct dma_map_ops ibmebus_dma_ops = {
3262+static const struct dma_map_ops ibmebus_dma_ops = {
3263 .alloc_coherent = ibmebus_alloc_coherent,
3264 .free_coherent = ibmebus_free_coherent,
3265 .map_sg = ibmebus_map_sg,
3266diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3267index 641c74b..8339ad7 100644
3268--- a/arch/powerpc/kernel/kgdb.c
3269+++ b/arch/powerpc/kernel/kgdb.c
3270@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3271 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3272 return 0;
3273
3274- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3275+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3276 regs->nip += 4;
3277
3278 return 1;
3279@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3280 /*
3281 * Global data
3282 */
3283-struct kgdb_arch arch_kgdb_ops = {
3284+const struct kgdb_arch arch_kgdb_ops = {
3285 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3286 };
3287
3288diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3289index 477c663..4f50234 100644
3290--- a/arch/powerpc/kernel/module.c
3291+++ b/arch/powerpc/kernel/module.c
3292@@ -31,11 +31,24 @@
3293
3294 LIST_HEAD(module_bug_list);
3295
3296+#ifdef CONFIG_PAX_KERNEXEC
3297 void *module_alloc(unsigned long size)
3298 {
3299 if (size == 0)
3300 return NULL;
3301
3302+ return vmalloc(size);
3303+}
3304+
3305+void *module_alloc_exec(unsigned long size)
3306+#else
3307+void *module_alloc(unsigned long size)
3308+#endif
3309+
3310+{
3311+ if (size == 0)
3312+ return NULL;
3313+
3314 return vmalloc_exec(size);
3315 }
3316
3317@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3318 vfree(module_region);
3319 }
3320
3321+#ifdef CONFIG_PAX_KERNEXEC
3322+void module_free_exec(struct module *mod, void *module_region)
3323+{
3324+ module_free(mod, module_region);
3325+}
3326+#endif
3327+
3328 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3329 const Elf_Shdr *sechdrs,
3330 const char *name)
3331diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3332index f832773..0507238 100644
3333--- a/arch/powerpc/kernel/module_32.c
3334+++ b/arch/powerpc/kernel/module_32.c
3335@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3336 me->arch.core_plt_section = i;
3337 }
3338 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3339- printk("Module doesn't contain .plt or .init.plt sections.\n");
3340+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3341 return -ENOEXEC;
3342 }
3343
3344@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3345
3346 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3347 /* Init, or core PLT? */
3348- if (location >= mod->module_core
3349- && location < mod->module_core + mod->core_size)
3350+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3351+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3352 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3353- else
3354+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3355+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3356 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3357+ else {
3358+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3359+ return ~0UL;
3360+ }
3361
3362 /* Find this entry, or if that fails, the next avail. entry */
3363 while (entry->jump[0]) {
3364diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3365index cadbed6..b9bbb00 100644
3366--- a/arch/powerpc/kernel/pci-common.c
3367+++ b/arch/powerpc/kernel/pci-common.c
3368@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3369 unsigned int ppc_pci_flags = 0;
3370
3371
3372-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3373+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3374
3375-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3376+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3377 {
3378 pci_dma_ops = dma_ops;
3379 }
3380
3381-struct dma_map_ops *get_pci_dma_ops(void)
3382+const struct dma_map_ops *get_pci_dma_ops(void)
3383 {
3384 return pci_dma_ops;
3385 }
3386diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3387index 7b816da..8d5c277 100644
3388--- a/arch/powerpc/kernel/process.c
3389+++ b/arch/powerpc/kernel/process.c
3390@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3391 * Lookup NIP late so we have the best change of getting the
3392 * above info out without failing
3393 */
3394- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3395- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3396+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3397+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3398 #endif
3399 show_stack(current, (unsigned long *) regs->gpr[1]);
3400 if (!user_mode(regs))
3401@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3402 newsp = stack[0];
3403 ip = stack[STACK_FRAME_LR_SAVE];
3404 if (!firstframe || ip != lr) {
3405- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3406+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3407 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3408 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3409- printk(" (%pS)",
3410+ printk(" (%pA)",
3411 (void *)current->ret_stack[curr_frame].ret);
3412 curr_frame--;
3413 }
3414@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3415 struct pt_regs *regs = (struct pt_regs *)
3416 (sp + STACK_FRAME_OVERHEAD);
3417 lr = regs->link;
3418- printk("--- Exception: %lx at %pS\n LR = %pS\n",
3419+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
3420 regs->trap, (void *)regs->nip, (void *)lr);
3421 firstframe = 1;
3422 }
3423@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3424 }
3425
3426 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3427-
3428-unsigned long arch_align_stack(unsigned long sp)
3429-{
3430- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3431- sp -= get_random_int() & ~PAGE_MASK;
3432- return sp & ~0xf;
3433-}
3434-
3435-static inline unsigned long brk_rnd(void)
3436-{
3437- unsigned long rnd = 0;
3438-
3439- /* 8MB for 32bit, 1GB for 64bit */
3440- if (is_32bit_task())
3441- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3442- else
3443- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3444-
3445- return rnd << PAGE_SHIFT;
3446-}
3447-
3448-unsigned long arch_randomize_brk(struct mm_struct *mm)
3449-{
3450- unsigned long base = mm->brk;
3451- unsigned long ret;
3452-
3453-#ifdef CONFIG_PPC_STD_MMU_64
3454- /*
3455- * If we are using 1TB segments and we are allowed to randomise
3456- * the heap, we can put it above 1TB so it is backed by a 1TB
3457- * segment. Otherwise the heap will be in the bottom 1TB
3458- * which always uses 256MB segments and this may result in a
3459- * performance penalty.
3460- */
3461- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3462- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3463-#endif
3464-
3465- ret = PAGE_ALIGN(base + brk_rnd());
3466-
3467- if (ret < mm->brk)
3468- return mm->brk;
3469-
3470- return ret;
3471-}
3472-
3473-unsigned long randomize_et_dyn(unsigned long base)
3474-{
3475- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3476-
3477- if (ret < base)
3478- return base;
3479-
3480- return ret;
3481-}
3482diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3483index ef14988..856c4bc 100644
3484--- a/arch/powerpc/kernel/ptrace.c
3485+++ b/arch/powerpc/kernel/ptrace.c
3486@@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3487 /*
3488 * Get contents of register REGNO in task TASK.
3489 */
3490-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3491+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3492 {
3493 if (task->thread.regs == NULL)
3494 return -EIO;
3495@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3496
3497 CHECK_FULL_REGS(child->thread.regs);
3498 if (index < PT_FPR0) {
3499- tmp = ptrace_get_reg(child, (int) index);
3500+ tmp = ptrace_get_reg(child, index);
3501 } else {
3502 flush_fp_to_thread(child);
3503 tmp = ((unsigned long *)child->thread.fpr)
3504diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3505index d670429..2bc59b2 100644
3506--- a/arch/powerpc/kernel/signal_32.c
3507+++ b/arch/powerpc/kernel/signal_32.c
3508@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3509 /* Save user registers on the stack */
3510 frame = &rt_sf->uc.uc_mcontext;
3511 addr = frame;
3512- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3513+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3514 if (save_user_regs(regs, frame, 0, 1))
3515 goto badframe;
3516 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3517diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3518index 2fe6fc6..ada0d96 100644
3519--- a/arch/powerpc/kernel/signal_64.c
3520+++ b/arch/powerpc/kernel/signal_64.c
3521@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3522 current->thread.fpscr.val = 0;
3523
3524 /* Set up to return from userspace. */
3525- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3526+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3527 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3528 } else {
3529 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3530diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3531index b97c2d6..dd01a6a 100644
3532--- a/arch/powerpc/kernel/sys_ppc32.c
3533+++ b/arch/powerpc/kernel/sys_ppc32.c
3534@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3535 if (oldlenp) {
3536 if (!error) {
3537 if (get_user(oldlen, oldlenp) ||
3538- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3539+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3540+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3541 error = -EFAULT;
3542 }
3543- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3544 }
3545 return error;
3546 }
3547diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3548index 6f0ae1a..e4b6a56 100644
3549--- a/arch/powerpc/kernel/traps.c
3550+++ b/arch/powerpc/kernel/traps.c
3551@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3552 static inline void pmac_backlight_unblank(void) { }
3553 #endif
3554
3555+extern void gr_handle_kernel_exploit(void);
3556+
3557 int die(const char *str, struct pt_regs *regs, long err)
3558 {
3559 static struct {
3560@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3561 if (panic_on_oops)
3562 panic("Fatal exception");
3563
3564+ gr_handle_kernel_exploit();
3565+
3566 oops_exit();
3567 do_exit(err);
3568
3569diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3570index 137dc22..fe57a79 100644
3571--- a/arch/powerpc/kernel/vdso.c
3572+++ b/arch/powerpc/kernel/vdso.c
3573@@ -36,6 +36,7 @@
3574 #include <asm/firmware.h>
3575 #include <asm/vdso.h>
3576 #include <asm/vdso_datapage.h>
3577+#include <asm/mman.h>
3578
3579 #include "setup.h"
3580
3581@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3582 vdso_base = VDSO32_MBASE;
3583 #endif
3584
3585- current->mm->context.vdso_base = 0;
3586+ current->mm->context.vdso_base = ~0UL;
3587
3588 /* vDSO has a problem and was disabled, just don't "enable" it for the
3589 * process
3590@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3591 vdso_base = get_unmapped_area(NULL, vdso_base,
3592 (vdso_pages << PAGE_SHIFT) +
3593 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3594- 0, 0);
3595+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
3596 if (IS_ERR_VALUE(vdso_base)) {
3597 rc = vdso_base;
3598 goto fail_mmapsem;
3599diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3600index 77f6421..829564a 100644
3601--- a/arch/powerpc/kernel/vio.c
3602+++ b/arch/powerpc/kernel/vio.c
3603@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3604 vio_cmo_dealloc(viodev, alloc_size);
3605 }
3606
3607-struct dma_map_ops vio_dma_mapping_ops = {
3608+static const struct dma_map_ops vio_dma_mapping_ops = {
3609 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3610 .free_coherent = vio_dma_iommu_free_coherent,
3611 .map_sg = vio_dma_iommu_map_sg,
3612 .unmap_sg = vio_dma_iommu_unmap_sg,
3613+ .dma_supported = dma_iommu_dma_supported,
3614 .map_page = vio_dma_iommu_map_page,
3615 .unmap_page = vio_dma_iommu_unmap_page,
3616
3617@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3618
3619 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3620 {
3621- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3622 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3623 }
3624
3625diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3626index 5eea6f3..5d10396 100644
3627--- a/arch/powerpc/lib/usercopy_64.c
3628+++ b/arch/powerpc/lib/usercopy_64.c
3629@@ -9,22 +9,6 @@
3630 #include <linux/module.h>
3631 #include <asm/uaccess.h>
3632
3633-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3634-{
3635- if (likely(access_ok(VERIFY_READ, from, n)))
3636- n = __copy_from_user(to, from, n);
3637- else
3638- memset(to, 0, n);
3639- return n;
3640-}
3641-
3642-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3643-{
3644- if (likely(access_ok(VERIFY_WRITE, to, n)))
3645- n = __copy_to_user(to, from, n);
3646- return n;
3647-}
3648-
3649 unsigned long copy_in_user(void __user *to, const void __user *from,
3650 unsigned long n)
3651 {
3652@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3653 return n;
3654 }
3655
3656-EXPORT_SYMBOL(copy_from_user);
3657-EXPORT_SYMBOL(copy_to_user);
3658 EXPORT_SYMBOL(copy_in_user);
3659
3660diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3661index e7dae82..877ce0d 100644
3662--- a/arch/powerpc/mm/fault.c
3663+++ b/arch/powerpc/mm/fault.c
3664@@ -30,6 +30,10 @@
3665 #include <linux/kprobes.h>
3666 #include <linux/kdebug.h>
3667 #include <linux/perf_event.h>
3668+#include <linux/slab.h>
3669+#include <linux/pagemap.h>
3670+#include <linux/compiler.h>
3671+#include <linux/unistd.h>
3672
3673 #include <asm/firmware.h>
3674 #include <asm/page.h>
3675@@ -40,6 +44,7 @@
3676 #include <asm/uaccess.h>
3677 #include <asm/tlbflush.h>
3678 #include <asm/siginfo.h>
3679+#include <asm/ptrace.h>
3680
3681
3682 #ifdef CONFIG_KPROBES
3683@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3684 }
3685 #endif
3686
3687+#ifdef CONFIG_PAX_PAGEEXEC
3688+/*
3689+ * PaX: decide what to do with offenders (regs->nip = fault address)
3690+ *
3691+ * returns 1 when task should be killed
3692+ */
3693+static int pax_handle_fetch_fault(struct pt_regs *regs)
3694+{
3695+ return 1;
3696+}
3697+
3698+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3699+{
3700+ unsigned long i;
3701+
3702+ printk(KERN_ERR "PAX: bytes at PC: ");
3703+ for (i = 0; i < 5; i++) {
3704+ unsigned int c;
3705+ if (get_user(c, (unsigned int __user *)pc+i))
3706+ printk(KERN_CONT "???????? ");
3707+ else
3708+ printk(KERN_CONT "%08x ", c);
3709+ }
3710+ printk("\n");
3711+}
3712+#endif
3713+
3714 /*
3715 * Check whether the instruction at regs->nip is a store using
3716 * an update addressing form which will update r1.
3717@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3718 * indicate errors in DSISR but can validly be set in SRR1.
3719 */
3720 if (trap == 0x400)
3721- error_code &= 0x48200000;
3722+ error_code &= 0x58200000;
3723 else
3724 is_write = error_code & DSISR_ISSTORE;
3725 #else
3726@@ -250,7 +282,7 @@ good_area:
3727 * "undefined". Of those that can be set, this is the only
3728 * one which seems bad.
3729 */
3730- if (error_code & 0x10000000)
3731+ if (error_code & DSISR_GUARDED)
3732 /* Guarded storage error. */
3733 goto bad_area;
3734 #endif /* CONFIG_8xx */
3735@@ -265,7 +297,7 @@ good_area:
3736 * processors use the same I/D cache coherency mechanism
3737 * as embedded.
3738 */
3739- if (error_code & DSISR_PROTFAULT)
3740+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3741 goto bad_area;
3742 #endif /* CONFIG_PPC_STD_MMU */
3743
3744@@ -335,6 +367,23 @@ bad_area:
3745 bad_area_nosemaphore:
3746 /* User mode accesses cause a SIGSEGV */
3747 if (user_mode(regs)) {
3748+
3749+#ifdef CONFIG_PAX_PAGEEXEC
3750+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3751+#ifdef CONFIG_PPC_STD_MMU
3752+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3753+#else
3754+ if (is_exec && regs->nip == address) {
3755+#endif
3756+ switch (pax_handle_fetch_fault(regs)) {
3757+ }
3758+
3759+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3760+ do_group_exit(SIGKILL);
3761+ }
3762+ }
3763+#endif
3764+
3765 _exception(SIGSEGV, regs, code, address);
3766 return 0;
3767 }
3768diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3769index 5973631..ad617af 100644
3770--- a/arch/powerpc/mm/mem.c
3771+++ b/arch/powerpc/mm/mem.c
3772@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3773 {
3774 unsigned long lmb_next_region_start_pfn,
3775 lmb_region_max_pfn;
3776- int i;
3777+ unsigned int i;
3778
3779 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3780 lmb_region_max_pfn =
3781diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3782index 0d957a4..26d968f 100644
3783--- a/arch/powerpc/mm/mmap_64.c
3784+++ b/arch/powerpc/mm/mmap_64.c
3785@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3786 */
3787 if (mmap_is_legacy()) {
3788 mm->mmap_base = TASK_UNMAPPED_BASE;
3789+
3790+#ifdef CONFIG_PAX_RANDMMAP
3791+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3792+ mm->mmap_base += mm->delta_mmap;
3793+#endif
3794+
3795 mm->get_unmapped_area = arch_get_unmapped_area;
3796 mm->unmap_area = arch_unmap_area;
3797 } else {
3798 mm->mmap_base = mmap_base();
3799+
3800+#ifdef CONFIG_PAX_RANDMMAP
3801+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3802+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3803+#endif
3804+
3805 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3806 mm->unmap_area = arch_unmap_area_topdown;
3807 }
3808diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3809index ba51948..23009d9 100644
3810--- a/arch/powerpc/mm/slice.c
3811+++ b/arch/powerpc/mm/slice.c
3812@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3813 if ((mm->task_size - len) < addr)
3814 return 0;
3815 vma = find_vma(mm, addr);
3816- return (!vma || (addr + len) <= vma->vm_start);
3817+ return check_heap_stack_gap(vma, addr, len);
3818 }
3819
3820 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3821@@ -256,7 +256,7 @@ full_search:
3822 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3823 continue;
3824 }
3825- if (!vma || addr + len <= vma->vm_start) {
3826+ if (check_heap_stack_gap(vma, addr, len)) {
3827 /*
3828 * Remember the place where we stopped the search:
3829 */
3830@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3831 }
3832 }
3833
3834- addr = mm->mmap_base;
3835- while (addr > len) {
3836+ if (mm->mmap_base < len)
3837+ addr = -ENOMEM;
3838+ else
3839+ addr = mm->mmap_base - len;
3840+
3841+ while (!IS_ERR_VALUE(addr)) {
3842 /* Go down by chunk size */
3843- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3844+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3845
3846 /* Check for hit with different page size */
3847 mask = slice_range_to_mask(addr, len);
3848@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3849 * return with success:
3850 */
3851 vma = find_vma(mm, addr);
3852- if (!vma || (addr + len) <= vma->vm_start) {
3853+ if (check_heap_stack_gap(vma, addr, len)) {
3854 /* remember the address as a hint for next time */
3855 if (use_cache)
3856 mm->free_area_cache = addr;
3857@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3858 mm->cached_hole_size = vma->vm_start - addr;
3859
3860 /* try just below the current vma->vm_start */
3861- addr = vma->vm_start;
3862+ addr = skip_heap_stack_gap(vma, len);
3863 }
3864
3865 /*
3866@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3867 if (fixed && addr > (mm->task_size - len))
3868 return -EINVAL;
3869
3870+#ifdef CONFIG_PAX_RANDMMAP
3871+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3872+ addr = 0;
3873+#endif
3874+
3875 /* If hint, make sure it matches our alignment restrictions */
3876 if (!fixed && addr) {
3877 addr = _ALIGN_UP(addr, 1ul << pshift);
3878diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3879index b5c753d..8f01abe 100644
3880--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3881+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3882@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3883 lite5200_pm_target_state = PM_SUSPEND_ON;
3884 }
3885
3886-static struct platform_suspend_ops lite5200_pm_ops = {
3887+static const struct platform_suspend_ops lite5200_pm_ops = {
3888 .valid = lite5200_pm_valid,
3889 .begin = lite5200_pm_begin,
3890 .prepare = lite5200_pm_prepare,
3891diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3892index a55b0b6..478c18e 100644
3893--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3894+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3895@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3896 iounmap(mbar);
3897 }
3898
3899-static struct platform_suspend_ops mpc52xx_pm_ops = {
3900+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3901 .valid = mpc52xx_pm_valid,
3902 .prepare = mpc52xx_pm_prepare,
3903 .enter = mpc52xx_pm_enter,
3904diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3905index 08e65fc..643d3ac 100644
3906--- a/arch/powerpc/platforms/83xx/suspend.c
3907+++ b/arch/powerpc/platforms/83xx/suspend.c
3908@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3909 return ret;
3910 }
3911
3912-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3913+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3914 .valid = mpc83xx_suspend_valid,
3915 .begin = mpc83xx_suspend_begin,
3916 .enter = mpc83xx_suspend_enter,
3917diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3918index ca5bfdf..1602e09 100644
3919--- a/arch/powerpc/platforms/cell/iommu.c
3920+++ b/arch/powerpc/platforms/cell/iommu.c
3921@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3922
3923 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3924
3925-struct dma_map_ops dma_iommu_fixed_ops = {
3926+const struct dma_map_ops dma_iommu_fixed_ops = {
3927 .alloc_coherent = dma_fixed_alloc_coherent,
3928 .free_coherent = dma_fixed_free_coherent,
3929 .map_sg = dma_fixed_map_sg,
3930diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3931index e34b305..20e48ec 100644
3932--- a/arch/powerpc/platforms/ps3/system-bus.c
3933+++ b/arch/powerpc/platforms/ps3/system-bus.c
3934@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3935 return mask >= DMA_BIT_MASK(32);
3936 }
3937
3938-static struct dma_map_ops ps3_sb_dma_ops = {
3939+static const struct dma_map_ops ps3_sb_dma_ops = {
3940 .alloc_coherent = ps3_alloc_coherent,
3941 .free_coherent = ps3_free_coherent,
3942 .map_sg = ps3_sb_map_sg,
3943@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3944 .unmap_page = ps3_unmap_page,
3945 };
3946
3947-static struct dma_map_ops ps3_ioc0_dma_ops = {
3948+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3949 .alloc_coherent = ps3_alloc_coherent,
3950 .free_coherent = ps3_free_coherent,
3951 .map_sg = ps3_ioc0_map_sg,
3952diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3953index f0e6f28..60d53ed 100644
3954--- a/arch/powerpc/platforms/pseries/Kconfig
3955+++ b/arch/powerpc/platforms/pseries/Kconfig
3956@@ -2,6 +2,8 @@ config PPC_PSERIES
3957 depends on PPC64 && PPC_BOOK3S
3958 bool "IBM pSeries & new (POWER5-based) iSeries"
3959 select MPIC
3960+ select PCI_MSI
3961+ select XICS
3962 select PPC_I8259
3963 select PPC_RTAS
3964 select RTAS_ERROR_LOGGING
3965diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3966index 43c0aca..42c045b 100644
3967--- a/arch/s390/Kconfig
3968+++ b/arch/s390/Kconfig
3969@@ -194,28 +194,26 @@ config AUDIT_ARCH
3970
3971 config S390_SWITCH_AMODE
3972 bool "Switch kernel/user addressing modes"
3973+ default y
3974 help
3975 This option allows to switch the addressing modes of kernel and user
3976- space. The kernel parameter switch_amode=on will enable this feature,
3977- default is disabled. Enabling this (via kernel parameter) on machines
3978- earlier than IBM System z9-109 EC/BC will reduce system performance.
3979+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3980+ will reduce system performance.
3981
3982 Note that this option will also be selected by selecting the execute
3983- protection option below. Enabling the execute protection via the
3984- noexec kernel parameter will also switch the addressing modes,
3985- independent of the switch_amode kernel parameter.
3986+ protection option below. Enabling the execute protection will also
3987+ switch the addressing modes, independent of this option.
3988
3989
3990 config S390_EXEC_PROTECT
3991 bool "Data execute protection"
3992+ default y
3993 select S390_SWITCH_AMODE
3994 help
3995 This option allows to enable a buffer overflow protection for user
3996 space programs and it also selects the addressing mode option above.
3997- The kernel parameter noexec=on will enable this feature and also
3998- switch the addressing modes, default is disabled. Enabling this (via
3999- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
4000- will reduce system performance.
4001+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
4002+ reduce system performance.
4003
4004 comment "Code generation options"
4005
4006diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4007index ae7c8f9..3f01a0c 100644
4008--- a/arch/s390/include/asm/atomic.h
4009+++ b/arch/s390/include/asm/atomic.h
4010@@ -362,6 +362,16 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4011 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4012 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4013
4014+#define atomic64_read_unchecked(v) atomic64_read(v)
4015+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4016+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4017+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4018+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4019+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4020+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4021+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4022+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4023+
4024 #define smp_mb__before_atomic_dec() smp_mb()
4025 #define smp_mb__after_atomic_dec() smp_mb()
4026 #define smp_mb__before_atomic_inc() smp_mb()
4027diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4028index e885442..e3a2817 100644
4029--- a/arch/s390/include/asm/elf.h
4030+++ b/arch/s390/include/asm/elf.h
4031@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
4032 that it will "exec", and that there is sufficient room for the brk. */
4033 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4034
4035+#ifdef CONFIG_PAX_ASLR
4036+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4037+
4038+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4039+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4040+#endif
4041+
4042 /* This yields a mask that user programs can use to figure out what
4043 instruction set this CPU supports. */
4044
4045diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
4046index e37478e..9ce0e9f 100644
4047--- a/arch/s390/include/asm/setup.h
4048+++ b/arch/s390/include/asm/setup.h
4049@@ -50,13 +50,13 @@ extern unsigned long memory_end;
4050 void detect_memory_layout(struct mem_chunk chunk[]);
4051
4052 #ifdef CONFIG_S390_SWITCH_AMODE
4053-extern unsigned int switch_amode;
4054+#define switch_amode (1)
4055 #else
4056 #define switch_amode (0)
4057 #endif
4058
4059 #ifdef CONFIG_S390_EXEC_PROTECT
4060-extern unsigned int s390_noexec;
4061+#define s390_noexec (1)
4062 #else
4063 #define s390_noexec (0)
4064 #endif
4065diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4066index 8377e91..e28e6f1 100644
4067--- a/arch/s390/include/asm/uaccess.h
4068+++ b/arch/s390/include/asm/uaccess.h
4069@@ -232,6 +232,10 @@ static inline unsigned long __must_check
4070 copy_to_user(void __user *to, const void *from, unsigned long n)
4071 {
4072 might_fault();
4073+
4074+ if ((long)n < 0)
4075+ return n;
4076+
4077 if (access_ok(VERIFY_WRITE, to, n))
4078 n = __copy_to_user(to, from, n);
4079 return n;
4080@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4081 static inline unsigned long __must_check
4082 __copy_from_user(void *to, const void __user *from, unsigned long n)
4083 {
4084+ if ((long)n < 0)
4085+ return n;
4086+
4087 if (__builtin_constant_p(n) && (n <= 256))
4088 return uaccess.copy_from_user_small(n, from, to);
4089 else
4090@@ -283,6 +290,10 @@ static inline unsigned long __must_check
4091 copy_from_user(void *to, const void __user *from, unsigned long n)
4092 {
4093 might_fault();
4094+
4095+ if ((long)n < 0)
4096+ return n;
4097+
4098 if (access_ok(VERIFY_READ, from, n))
4099 n = __copy_from_user(to, from, n);
4100 else
4101diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4102index 639380a..72e3c02 100644
4103--- a/arch/s390/kernel/module.c
4104+++ b/arch/s390/kernel/module.c
4105@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4106
4107 /* Increase core size by size of got & plt and set start
4108 offsets for got and plt. */
4109- me->core_size = ALIGN(me->core_size, 4);
4110- me->arch.got_offset = me->core_size;
4111- me->core_size += me->arch.got_size;
4112- me->arch.plt_offset = me->core_size;
4113- me->core_size += me->arch.plt_size;
4114+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4115+ me->arch.got_offset = me->core_size_rw;
4116+ me->core_size_rw += me->arch.got_size;
4117+ me->arch.plt_offset = me->core_size_rx;
4118+ me->core_size_rx += me->arch.plt_size;
4119 return 0;
4120 }
4121
4122@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4123 if (info->got_initialized == 0) {
4124 Elf_Addr *gotent;
4125
4126- gotent = me->module_core + me->arch.got_offset +
4127+ gotent = me->module_core_rw + me->arch.got_offset +
4128 info->got_offset;
4129 *gotent = val;
4130 info->got_initialized = 1;
4131@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4132 else if (r_type == R_390_GOTENT ||
4133 r_type == R_390_GOTPLTENT)
4134 *(unsigned int *) loc =
4135- (val + (Elf_Addr) me->module_core - loc) >> 1;
4136+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4137 else if (r_type == R_390_GOT64 ||
4138 r_type == R_390_GOTPLT64)
4139 *(unsigned long *) loc = val;
4140@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4141 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4142 if (info->plt_initialized == 0) {
4143 unsigned int *ip;
4144- ip = me->module_core + me->arch.plt_offset +
4145+ ip = me->module_core_rx + me->arch.plt_offset +
4146 info->plt_offset;
4147 #ifndef CONFIG_64BIT
4148 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4149@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4150 val - loc + 0xffffUL < 0x1ffffeUL) ||
4151 (r_type == R_390_PLT32DBL &&
4152 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4153- val = (Elf_Addr) me->module_core +
4154+ val = (Elf_Addr) me->module_core_rx +
4155 me->arch.plt_offset +
4156 info->plt_offset;
4157 val += rela->r_addend - loc;
4158@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4159 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4160 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4161 val = val + rela->r_addend -
4162- ((Elf_Addr) me->module_core + me->arch.got_offset);
4163+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4164 if (r_type == R_390_GOTOFF16)
4165 *(unsigned short *) loc = val;
4166 else if (r_type == R_390_GOTOFF32)
4167@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4168 break;
4169 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4170 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4171- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4172+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4173 rela->r_addend - loc;
4174 if (r_type == R_390_GOTPC)
4175 *(unsigned int *) loc = val;
4176diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
4177index 061479f..dbfb08c 100644
4178--- a/arch/s390/kernel/setup.c
4179+++ b/arch/s390/kernel/setup.c
4180@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
4181 early_param("mem", early_parse_mem);
4182
4183 #ifdef CONFIG_S390_SWITCH_AMODE
4184-unsigned int switch_amode = 0;
4185-EXPORT_SYMBOL_GPL(switch_amode);
4186-
4187 static int set_amode_and_uaccess(unsigned long user_amode,
4188 unsigned long user32_amode)
4189 {
4190@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4191 return 0;
4192 }
4193 }
4194-
4195-/*
4196- * Switch kernel/user addressing modes?
4197- */
4198-static int __init early_parse_switch_amode(char *p)
4199-{
4200- switch_amode = 1;
4201- return 0;
4202-}
4203-early_param("switch_amode", early_parse_switch_amode);
4204-
4205 #else /* CONFIG_S390_SWITCH_AMODE */
4206 static inline int set_amode_and_uaccess(unsigned long user_amode,
4207 unsigned long user32_amode)
4208@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4209 }
4210 #endif /* CONFIG_S390_SWITCH_AMODE */
4211
4212-#ifdef CONFIG_S390_EXEC_PROTECT
4213-unsigned int s390_noexec = 0;
4214-EXPORT_SYMBOL_GPL(s390_noexec);
4215-
4216-/*
4217- * Enable execute protection?
4218- */
4219-static int __init early_parse_noexec(char *p)
4220-{
4221- if (!strncmp(p, "off", 3))
4222- return 0;
4223- switch_amode = 1;
4224- s390_noexec = 1;
4225- return 0;
4226-}
4227-early_param("noexec", early_parse_noexec);
4228-#endif /* CONFIG_S390_EXEC_PROTECT */
4229-
4230 static void setup_addressing_mode(void)
4231 {
4232 if (s390_noexec) {
4233diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4234index f4558cc..e461f37 100644
4235--- a/arch/s390/mm/mmap.c
4236+++ b/arch/s390/mm/mmap.c
4237@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4238 */
4239 if (mmap_is_legacy()) {
4240 mm->mmap_base = TASK_UNMAPPED_BASE;
4241+
4242+#ifdef CONFIG_PAX_RANDMMAP
4243+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4244+ mm->mmap_base += mm->delta_mmap;
4245+#endif
4246+
4247 mm->get_unmapped_area = arch_get_unmapped_area;
4248 mm->unmap_area = arch_unmap_area;
4249 } else {
4250 mm->mmap_base = mmap_base();
4251+
4252+#ifdef CONFIG_PAX_RANDMMAP
4253+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4254+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4255+#endif
4256+
4257 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4258 mm->unmap_area = arch_unmap_area_topdown;
4259 }
4260@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4261 */
4262 if (mmap_is_legacy()) {
4263 mm->mmap_base = TASK_UNMAPPED_BASE;
4264+
4265+#ifdef CONFIG_PAX_RANDMMAP
4266+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4267+ mm->mmap_base += mm->delta_mmap;
4268+#endif
4269+
4270 mm->get_unmapped_area = s390_get_unmapped_area;
4271 mm->unmap_area = arch_unmap_area;
4272 } else {
4273 mm->mmap_base = mmap_base();
4274+
4275+#ifdef CONFIG_PAX_RANDMMAP
4276+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4277+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4278+#endif
4279+
4280 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4281 mm->unmap_area = arch_unmap_area_topdown;
4282 }
4283diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4284index 589d5c7..669e274 100644
4285--- a/arch/score/include/asm/system.h
4286+++ b/arch/score/include/asm/system.h
4287@@ -17,7 +17,7 @@ do { \
4288 #define finish_arch_switch(prev) do {} while (0)
4289
4290 typedef void (*vi_handler_t)(void);
4291-extern unsigned long arch_align_stack(unsigned long sp);
4292+#define arch_align_stack(x) (x)
4293
4294 #define mb() barrier()
4295 #define rmb() barrier()
4296diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4297index 25d0803..d6c8e36 100644
4298--- a/arch/score/kernel/process.c
4299+++ b/arch/score/kernel/process.c
4300@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4301
4302 return task_pt_regs(task)->cp0_epc;
4303 }
4304-
4305-unsigned long arch_align_stack(unsigned long sp)
4306-{
4307- return sp;
4308-}
4309diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4310index d936c1a..304a252 100644
4311--- a/arch/sh/boards/mach-hp6xx/pm.c
4312+++ b/arch/sh/boards/mach-hp6xx/pm.c
4313@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4314 return 0;
4315 }
4316
4317-static struct platform_suspend_ops hp6x0_pm_ops = {
4318+static const struct platform_suspend_ops hp6x0_pm_ops = {
4319 .enter = hp6x0_pm_enter,
4320 .valid = suspend_valid_only_mem,
4321 };
4322diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4323index 8a8a993..7b3079b 100644
4324--- a/arch/sh/kernel/cpu/sh4/sq.c
4325+++ b/arch/sh/kernel/cpu/sh4/sq.c
4326@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4327 NULL,
4328 };
4329
4330-static struct sysfs_ops sq_sysfs_ops = {
4331+static const struct sysfs_ops sq_sysfs_ops = {
4332 .show = sq_sysfs_show,
4333 .store = sq_sysfs_store,
4334 };
4335diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4336index ee3c2aa..c49cee6 100644
4337--- a/arch/sh/kernel/cpu/shmobile/pm.c
4338+++ b/arch/sh/kernel/cpu/shmobile/pm.c
4339@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4340 return 0;
4341 }
4342
4343-static struct platform_suspend_ops sh_pm_ops = {
4344+static const struct platform_suspend_ops sh_pm_ops = {
4345 .enter = sh_pm_enter,
4346 .valid = suspend_valid_only_mem,
4347 };
4348diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4349index 3e532d0..9faa306 100644
4350--- a/arch/sh/kernel/kgdb.c
4351+++ b/arch/sh/kernel/kgdb.c
4352@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4353 {
4354 }
4355
4356-struct kgdb_arch arch_kgdb_ops = {
4357+const struct kgdb_arch arch_kgdb_ops = {
4358 /* Breakpoint instruction: trapa #0x3c */
4359 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4360 .gdb_bpt_instr = { 0x3c, 0xc3 },
4361diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4362index afeb710..d1d1289 100644
4363--- a/arch/sh/mm/mmap.c
4364+++ b/arch/sh/mm/mmap.c
4365@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4366 addr = PAGE_ALIGN(addr);
4367
4368 vma = find_vma(mm, addr);
4369- if (TASK_SIZE - len >= addr &&
4370- (!vma || addr + len <= vma->vm_start))
4371+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4372 return addr;
4373 }
4374
4375@@ -106,7 +105,7 @@ full_search:
4376 }
4377 return -ENOMEM;
4378 }
4379- if (likely(!vma || addr + len <= vma->vm_start)) {
4380+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4381 /*
4382 * Remember the place where we stopped the search:
4383 */
4384@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4385 addr = PAGE_ALIGN(addr);
4386
4387 vma = find_vma(mm, addr);
4388- if (TASK_SIZE - len >= addr &&
4389- (!vma || addr + len <= vma->vm_start))
4390+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4391 return addr;
4392 }
4393
4394@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4395 /* make sure it can fit in the remaining address space */
4396 if (likely(addr > len)) {
4397 vma = find_vma(mm, addr-len);
4398- if (!vma || addr <= vma->vm_start) {
4399+ if (check_heap_stack_gap(vma, addr - len, len)) {
4400 /* remember the address as a hint for next time */
4401 return (mm->free_area_cache = addr-len);
4402 }
4403@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4404 if (unlikely(mm->mmap_base < len))
4405 goto bottomup;
4406
4407- addr = mm->mmap_base-len;
4408- if (do_colour_align)
4409- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4410+ addr = mm->mmap_base - len;
4411
4412 do {
4413+ if (do_colour_align)
4414+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4415 /*
4416 * Lookup failure means no vma is above this address,
4417 * else if new region fits below vma->vm_start,
4418 * return with success:
4419 */
4420 vma = find_vma(mm, addr);
4421- if (likely(!vma || addr+len <= vma->vm_start)) {
4422+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4423 /* remember the address as a hint for next time */
4424 return (mm->free_area_cache = addr);
4425 }
4426@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4427 mm->cached_hole_size = vma->vm_start - addr;
4428
4429 /* try just below the current vma->vm_start */
4430- addr = vma->vm_start-len;
4431- if (do_colour_align)
4432- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4433- } while (likely(len < vma->vm_start));
4434+ addr = skip_heap_stack_gap(vma, len);
4435+ } while (!IS_ERR_VALUE(addr));
4436
4437 bottomup:
4438 /*
4439diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
4440index 05ef538..dc9c857 100644
4441--- a/arch/sparc/Kconfig
4442+++ b/arch/sparc/Kconfig
4443@@ -32,6 +32,7 @@ config SPARC
4444
4445 config SPARC32
4446 def_bool !64BIT
4447+ select GENERIC_ATOMIC64
4448
4449 config SPARC64
4450 def_bool 64BIT
4451diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4452index 113225b..7fd04e7 100644
4453--- a/arch/sparc/Makefile
4454+++ b/arch/sparc/Makefile
4455@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4456 # Export what is needed by arch/sparc/boot/Makefile
4457 export VMLINUX_INIT VMLINUX_MAIN
4458 VMLINUX_INIT := $(head-y) $(init-y)
4459-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4460+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4461 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4462 VMLINUX_MAIN += $(drivers-y) $(net-y)
4463
4464diff --git a/arch/sparc/include/asm/atomic.h b/arch/sparc/include/asm/atomic.h
4465index 8ff83d8..4a459c2 100644
4466--- a/arch/sparc/include/asm/atomic.h
4467+++ b/arch/sparc/include/asm/atomic.h
4468@@ -4,5 +4,6 @@
4469 #include <asm/atomic_64.h>
4470 #else
4471 #include <asm/atomic_32.h>
4472+#include <asm-generic/atomic64.h>
4473 #endif
4474 #endif
4475diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4476index f5cc06f..f858d47 100644
4477--- a/arch/sparc/include/asm/atomic_64.h
4478+++ b/arch/sparc/include/asm/atomic_64.h
4479@@ -14,18 +14,40 @@
4480 #define ATOMIC64_INIT(i) { (i) }
4481
4482 #define atomic_read(v) ((v)->counter)
4483+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4484+{
4485+ return v->counter;
4486+}
4487 #define atomic64_read(v) ((v)->counter)
4488+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4489+{
4490+ return v->counter;
4491+}
4492
4493 #define atomic_set(v, i) (((v)->counter) = i)
4494+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4495+{
4496+ v->counter = i;
4497+}
4498 #define atomic64_set(v, i) (((v)->counter) = i)
4499+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4500+{
4501+ v->counter = i;
4502+}
4503
4504 extern void atomic_add(int, atomic_t *);
4505+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4506 extern void atomic64_add(long, atomic64_t *);
4507+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4508 extern void atomic_sub(int, atomic_t *);
4509+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4510 extern void atomic64_sub(long, atomic64_t *);
4511+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4512
4513 extern int atomic_add_ret(int, atomic_t *);
4514+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4515 extern long atomic64_add_ret(long, atomic64_t *);
4516+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4517 extern int atomic_sub_ret(int, atomic_t *);
4518 extern long atomic64_sub_ret(long, atomic64_t *);
4519
4520@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4521 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4522
4523 #define atomic_inc_return(v) atomic_add_ret(1, v)
4524+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4525+{
4526+ return atomic_add_ret_unchecked(1, v);
4527+}
4528 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4529+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4530+{
4531+ return atomic64_add_ret_unchecked(1, v);
4532+}
4533
4534 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4535 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4536
4537 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4538+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4539+{
4540+ return atomic_add_ret_unchecked(i, v);
4541+}
4542 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4543+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4544+{
4545+ return atomic64_add_ret_unchecked(i, v);
4546+}
4547
4548 /*
4549 * atomic_inc_and_test - increment and test
4550@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4551 * other cases.
4552 */
4553 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4554+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4555+{
4556+ return atomic_inc_return_unchecked(v) == 0;
4557+}
4558 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4559
4560 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4561@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4562 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4563
4564 #define atomic_inc(v) atomic_add(1, v)
4565+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4566+{
4567+ atomic_add_unchecked(1, v);
4568+}
4569 #define atomic64_inc(v) atomic64_add(1, v)
4570+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4571+{
4572+ atomic64_add_unchecked(1, v);
4573+}
4574
4575 #define atomic_dec(v) atomic_sub(1, v)
4576+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4577+{
4578+ atomic_sub_unchecked(1, v);
4579+}
4580 #define atomic64_dec(v) atomic64_sub(1, v)
4581+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4582+{
4583+ atomic64_sub_unchecked(1, v);
4584+}
4585
4586 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4587 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4588
4589 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4590+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4591+{
4592+ return cmpxchg(&v->counter, old, new);
4593+}
4594 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4595+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4596+{
4597+ return xchg(&v->counter, new);
4598+}
4599
4600 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4601 {
4602- int c, old;
4603+ int c, old, new;
4604 c = atomic_read(v);
4605 for (;;) {
4606- if (unlikely(c == (u)))
4607+ if (unlikely(c == u))
4608 break;
4609- old = atomic_cmpxchg((v), c, c + (a));
4610+
4611+ asm volatile("addcc %2, %0, %0\n"
4612+
4613+#ifdef CONFIG_PAX_REFCOUNT
4614+ "tvs %%icc, 6\n"
4615+#endif
4616+
4617+ : "=r" (new)
4618+ : "0" (c), "ir" (a)
4619+ : "cc");
4620+
4621+ old = atomic_cmpxchg(v, c, new);
4622 if (likely(old == c))
4623 break;
4624 c = old;
4625 }
4626- return c != (u);
4627+ return c != u;
4628 }
4629
4630 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4631@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4632 #define atomic64_cmpxchg(v, o, n) \
4633 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4634 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4635+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4636+{
4637+ return xchg(&v->counter, new);
4638+}
4639
4640 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4641 {
4642- long c, old;
4643+ long c, old, new;
4644 c = atomic64_read(v);
4645 for (;;) {
4646- if (unlikely(c == (u)))
4647+ if (unlikely(c == u))
4648 break;
4649- old = atomic64_cmpxchg((v), c, c + (a));
4650+
4651+ asm volatile("addcc %2, %0, %0\n"
4652+
4653+#ifdef CONFIG_PAX_REFCOUNT
4654+ "tvs %%xcc, 6\n"
4655+#endif
4656+
4657+ : "=r" (new)
4658+ : "0" (c), "ir" (a)
4659+ : "cc");
4660+
4661+ old = atomic64_cmpxchg(v, c, new);
4662 if (likely(old == c))
4663 break;
4664 c = old;
4665 }
4666- return c != (u);
4667+ return c != u;
4668 }
4669
4670 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4671diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4672index 41f85ae..fb54d5e 100644
4673--- a/arch/sparc/include/asm/cache.h
4674+++ b/arch/sparc/include/asm/cache.h
4675@@ -8,7 +8,7 @@
4676 #define _SPARC_CACHE_H
4677
4678 #define L1_CACHE_SHIFT 5
4679-#define L1_CACHE_BYTES 32
4680+#define L1_CACHE_BYTES 32UL
4681 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4682
4683 #ifdef CONFIG_SPARC32
4684diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4685index 5a8c308..38def92 100644
4686--- a/arch/sparc/include/asm/dma-mapping.h
4687+++ b/arch/sparc/include/asm/dma-mapping.h
4688@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4689 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4690 #define dma_is_consistent(d, h) (1)
4691
4692-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4693+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4694 extern struct bus_type pci_bus_type;
4695
4696-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4697+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4698 {
4699 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4700 if (dev->bus == &pci_bus_type)
4701@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4702 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4703 dma_addr_t *dma_handle, gfp_t flag)
4704 {
4705- struct dma_map_ops *ops = get_dma_ops(dev);
4706+ const struct dma_map_ops *ops = get_dma_ops(dev);
4707 void *cpu_addr;
4708
4709 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4710@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4711 static inline void dma_free_coherent(struct device *dev, size_t size,
4712 void *cpu_addr, dma_addr_t dma_handle)
4713 {
4714- struct dma_map_ops *ops = get_dma_ops(dev);
4715+ const struct dma_map_ops *ops = get_dma_ops(dev);
4716
4717 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4718 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4719diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4720index 381a1b5..b97e3ff 100644
4721--- a/arch/sparc/include/asm/elf_32.h
4722+++ b/arch/sparc/include/asm/elf_32.h
4723@@ -116,6 +116,13 @@ typedef struct {
4724
4725 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4726
4727+#ifdef CONFIG_PAX_ASLR
4728+#define PAX_ELF_ET_DYN_BASE 0x10000UL
4729+
4730+#define PAX_DELTA_MMAP_LEN 16
4731+#define PAX_DELTA_STACK_LEN 16
4732+#endif
4733+
4734 /* This yields a mask that user programs can use to figure out what
4735 instruction set this cpu supports. This can NOT be done in userspace
4736 on Sparc. */
4737diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4738index 9968085..c2106ef 100644
4739--- a/arch/sparc/include/asm/elf_64.h
4740+++ b/arch/sparc/include/asm/elf_64.h
4741@@ -163,6 +163,12 @@ typedef struct {
4742 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4743 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4744
4745+#ifdef CONFIG_PAX_ASLR
4746+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4747+
4748+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4749+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4750+#endif
4751
4752 /* This yields a mask that user programs can use to figure out what
4753 instruction set this cpu supports. */
4754diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
4755index 156707b..aefa786 100644
4756--- a/arch/sparc/include/asm/page_32.h
4757+++ b/arch/sparc/include/asm/page_32.h
4758@@ -8,6 +8,8 @@
4759 #ifndef _SPARC_PAGE_H
4760 #define _SPARC_PAGE_H
4761
4762+#include <linux/const.h>
4763+
4764 #define PAGE_SHIFT 12
4765
4766 #ifndef __ASSEMBLY__
4767diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4768index e0cabe7..efd60f1 100644
4769--- a/arch/sparc/include/asm/pgtable_32.h
4770+++ b/arch/sparc/include/asm/pgtable_32.h
4771@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4772 BTFIXUPDEF_INT(page_none)
4773 BTFIXUPDEF_INT(page_copy)
4774 BTFIXUPDEF_INT(page_readonly)
4775+
4776+#ifdef CONFIG_PAX_PAGEEXEC
4777+BTFIXUPDEF_INT(page_shared_noexec)
4778+BTFIXUPDEF_INT(page_copy_noexec)
4779+BTFIXUPDEF_INT(page_readonly_noexec)
4780+#endif
4781+
4782 BTFIXUPDEF_INT(page_kernel)
4783
4784 #define PMD_SHIFT SUN4C_PMD_SHIFT
4785@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4786 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4787 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4788
4789+#ifdef CONFIG_PAX_PAGEEXEC
4790+extern pgprot_t PAGE_SHARED_NOEXEC;
4791+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4792+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4793+#else
4794+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4795+# define PAGE_COPY_NOEXEC PAGE_COPY
4796+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4797+#endif
4798+
4799 extern unsigned long page_kernel;
4800
4801 #ifdef MODULE
4802diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4803index 1407c07..7e10231 100644
4804--- a/arch/sparc/include/asm/pgtsrmmu.h
4805+++ b/arch/sparc/include/asm/pgtsrmmu.h
4806@@ -115,6 +115,13 @@
4807 SRMMU_EXEC | SRMMU_REF)
4808 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4809 SRMMU_EXEC | SRMMU_REF)
4810+
4811+#ifdef CONFIG_PAX_PAGEEXEC
4812+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4813+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4814+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4815+#endif
4816+
4817 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4818 SRMMU_DIRTY | SRMMU_REF)
4819
4820diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4821index 43e5147..47622a1 100644
4822--- a/arch/sparc/include/asm/spinlock_64.h
4823+++ b/arch/sparc/include/asm/spinlock_64.h
4824@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4825
4826 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4827
4828-static void inline arch_read_lock(raw_rwlock_t *lock)
4829+static inline void arch_read_lock(raw_rwlock_t *lock)
4830 {
4831 unsigned long tmp1, tmp2;
4832
4833 __asm__ __volatile__ (
4834 "1: ldsw [%2], %0\n"
4835 " brlz,pn %0, 2f\n"
4836-"4: add %0, 1, %1\n"
4837+"4: addcc %0, 1, %1\n"
4838+
4839+#ifdef CONFIG_PAX_REFCOUNT
4840+" tvs %%icc, 6\n"
4841+#endif
4842+
4843 " cas [%2], %0, %1\n"
4844 " cmp %0, %1\n"
4845 " bne,pn %%icc, 1b\n"
4846@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4847 " .previous"
4848 : "=&r" (tmp1), "=&r" (tmp2)
4849 : "r" (lock)
4850- : "memory");
4851+ : "memory", "cc");
4852 }
4853
4854-static int inline arch_read_trylock(raw_rwlock_t *lock)
4855+static inline int arch_read_trylock(raw_rwlock_t *lock)
4856 {
4857 int tmp1, tmp2;
4858
4859@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4860 "1: ldsw [%2], %0\n"
4861 " brlz,a,pn %0, 2f\n"
4862 " mov 0, %0\n"
4863-" add %0, 1, %1\n"
4864+" addcc %0, 1, %1\n"
4865+
4866+#ifdef CONFIG_PAX_REFCOUNT
4867+" tvs %%icc, 6\n"
4868+#endif
4869+
4870 " cas [%2], %0, %1\n"
4871 " cmp %0, %1\n"
4872 " bne,pn %%icc, 1b\n"
4873@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4874 return tmp1;
4875 }
4876
4877-static void inline arch_read_unlock(raw_rwlock_t *lock)
4878+static inline void arch_read_unlock(raw_rwlock_t *lock)
4879 {
4880 unsigned long tmp1, tmp2;
4881
4882 __asm__ __volatile__(
4883 "1: lduw [%2], %0\n"
4884-" sub %0, 1, %1\n"
4885+" subcc %0, 1, %1\n"
4886+
4887+#ifdef CONFIG_PAX_REFCOUNT
4888+" tvs %%icc, 6\n"
4889+#endif
4890+
4891 " cas [%2], %0, %1\n"
4892 " cmp %0, %1\n"
4893 " bne,pn %%xcc, 1b\n"
4894@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4895 : "memory");
4896 }
4897
4898-static void inline arch_write_lock(raw_rwlock_t *lock)
4899+static inline void arch_write_lock(raw_rwlock_t *lock)
4900 {
4901 unsigned long mask, tmp1, tmp2;
4902
4903@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4904 : "memory");
4905 }
4906
4907-static void inline arch_write_unlock(raw_rwlock_t *lock)
4908+static inline void arch_write_unlock(raw_rwlock_t *lock)
4909 {
4910 __asm__ __volatile__(
4911 " stw %%g0, [%0]"
4912@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4913 : "memory");
4914 }
4915
4916-static int inline arch_write_trylock(raw_rwlock_t *lock)
4917+static inline int arch_write_trylock(raw_rwlock_t *lock)
4918 {
4919 unsigned long mask, tmp1, tmp2, result;
4920
4921diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4922index 844d73a..f787fb9 100644
4923--- a/arch/sparc/include/asm/thread_info_32.h
4924+++ b/arch/sparc/include/asm/thread_info_32.h
4925@@ -50,6 +50,8 @@ struct thread_info {
4926 unsigned long w_saved;
4927
4928 struct restart_block restart_block;
4929+
4930+ unsigned long lowest_stack;
4931 };
4932
4933 /*
4934diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4935index f78ad9a..9f55fc7 100644
4936--- a/arch/sparc/include/asm/thread_info_64.h
4937+++ b/arch/sparc/include/asm/thread_info_64.h
4938@@ -68,6 +68,8 @@ struct thread_info {
4939 struct pt_regs *kern_una_regs;
4940 unsigned int kern_una_insn;
4941
4942+ unsigned long lowest_stack;
4943+
4944 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4945 };
4946
4947diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4948index e88fbe5..96b0ce5 100644
4949--- a/arch/sparc/include/asm/uaccess.h
4950+++ b/arch/sparc/include/asm/uaccess.h
4951@@ -1,5 +1,13 @@
4952 #ifndef ___ASM_SPARC_UACCESS_H
4953 #define ___ASM_SPARC_UACCESS_H
4954+
4955+#ifdef __KERNEL__
4956+#ifndef __ASSEMBLY__
4957+#include <linux/types.h>
4958+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4959+#endif
4960+#endif
4961+
4962 #if defined(__sparc__) && defined(__arch64__)
4963 #include <asm/uaccess_64.h>
4964 #else
4965diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4966index 8303ac4..07f333d 100644
4967--- a/arch/sparc/include/asm/uaccess_32.h
4968+++ b/arch/sparc/include/asm/uaccess_32.h
4969@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4970
4971 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4972 {
4973- if (n && __access_ok((unsigned long) to, n))
4974+ if ((long)n < 0)
4975+ return n;
4976+
4977+ if (n && __access_ok((unsigned long) to, n)) {
4978+ if (!__builtin_constant_p(n))
4979+ check_object_size(from, n, true);
4980 return __copy_user(to, (__force void __user *) from, n);
4981- else
4982+ } else
4983 return n;
4984 }
4985
4986 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4987 {
4988+ if ((long)n < 0)
4989+ return n;
4990+
4991+ if (!__builtin_constant_p(n))
4992+ check_object_size(from, n, true);
4993+
4994 return __copy_user(to, (__force void __user *) from, n);
4995 }
4996
4997 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4998 {
4999- if (n && __access_ok((unsigned long) from, n))
5000+ if ((long)n < 0)
5001+ return n;
5002+
5003+ if (n && __access_ok((unsigned long) from, n)) {
5004+ if (!__builtin_constant_p(n))
5005+ check_object_size(to, n, false);
5006 return __copy_user((__force void __user *) to, from, n);
5007- else
5008+ } else
5009 return n;
5010 }
5011
5012 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5013 {
5014+ if ((long)n < 0)
5015+ return n;
5016+
5017 return __copy_user((__force void __user *) to, from, n);
5018 }
5019
5020diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5021index 9ea271e..7b8a271 100644
5022--- a/arch/sparc/include/asm/uaccess_64.h
5023+++ b/arch/sparc/include/asm/uaccess_64.h
5024@@ -9,6 +9,7 @@
5025 #include <linux/compiler.h>
5026 #include <linux/string.h>
5027 #include <linux/thread_info.h>
5028+#include <linux/kernel.h>
5029 #include <asm/asi.h>
5030 #include <asm/system.h>
5031 #include <asm/spitfire.h>
5032@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5033 static inline unsigned long __must_check
5034 copy_from_user(void *to, const void __user *from, unsigned long size)
5035 {
5036- unsigned long ret = ___copy_from_user(to, from, size);
5037+ unsigned long ret;
5038
5039+ if ((long)size < 0 || size > INT_MAX)
5040+ return size;
5041+
5042+ if (!__builtin_constant_p(size))
5043+ check_object_size(to, size, false);
5044+
5045+ ret = ___copy_from_user(to, from, size);
5046 if (unlikely(ret))
5047 ret = copy_from_user_fixup(to, from, size);
5048 return ret;
5049@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5050 static inline unsigned long __must_check
5051 copy_to_user(void __user *to, const void *from, unsigned long size)
5052 {
5053- unsigned long ret = ___copy_to_user(to, from, size);
5054+ unsigned long ret;
5055
5056+ if ((long)size < 0 || size > INT_MAX)
5057+ return size;
5058+
5059+ if (!__builtin_constant_p(size))
5060+ check_object_size(from, size, true);
5061+
5062+ ret = ___copy_to_user(to, from, size);
5063 if (unlikely(ret))
5064 ret = copy_to_user_fixup(to, from, size);
5065 return ret;
5066diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5067index 2782681..77ded84 100644
5068--- a/arch/sparc/kernel/Makefile
5069+++ b/arch/sparc/kernel/Makefile
5070@@ -3,7 +3,7 @@
5071 #
5072
5073 asflags-y := -ansi
5074-ccflags-y := -Werror
5075+#ccflags-y := -Werror
5076
5077 extra-y := head_$(BITS).o
5078 extra-y += init_task.o
5079diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
5080index 7690cc2..ece64c9 100644
5081--- a/arch/sparc/kernel/iommu.c
5082+++ b/arch/sparc/kernel/iommu.c
5083@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
5084 spin_unlock_irqrestore(&iommu->lock, flags);
5085 }
5086
5087-static struct dma_map_ops sun4u_dma_ops = {
5088+static const struct dma_map_ops sun4u_dma_ops = {
5089 .alloc_coherent = dma_4u_alloc_coherent,
5090 .free_coherent = dma_4u_free_coherent,
5091 .map_page = dma_4u_map_page,
5092@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
5093 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
5094 };
5095
5096-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5097+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5098 EXPORT_SYMBOL(dma_ops);
5099
5100 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
5101diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
5102index 9f61fd8..bd048db 100644
5103--- a/arch/sparc/kernel/ioport.c
5104+++ b/arch/sparc/kernel/ioport.c
5105@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
5106 BUG();
5107 }
5108
5109-struct dma_map_ops sbus_dma_ops = {
5110+const struct dma_map_ops sbus_dma_ops = {
5111 .alloc_coherent = sbus_alloc_coherent,
5112 .free_coherent = sbus_free_coherent,
5113 .map_page = sbus_map_page,
5114@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
5115 .sync_sg_for_device = sbus_sync_sg_for_device,
5116 };
5117
5118-struct dma_map_ops *dma_ops = &sbus_dma_ops;
5119+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
5120 EXPORT_SYMBOL(dma_ops);
5121
5122 static int __init sparc_register_ioport(void)
5123@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
5124 }
5125 }
5126
5127-struct dma_map_ops pci32_dma_ops = {
5128+const struct dma_map_ops pci32_dma_ops = {
5129 .alloc_coherent = pci32_alloc_coherent,
5130 .free_coherent = pci32_free_coherent,
5131 .map_page = pci32_map_page,
5132diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
5133index 04df4ed..55c4b6e 100644
5134--- a/arch/sparc/kernel/kgdb_32.c
5135+++ b/arch/sparc/kernel/kgdb_32.c
5136@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
5137 {
5138 }
5139
5140-struct kgdb_arch arch_kgdb_ops = {
5141+const struct kgdb_arch arch_kgdb_ops = {
5142 /* Breakpoint instruction: ta 0x7d */
5143 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
5144 };
5145diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
5146index f5a0fd4..d886f71 100644
5147--- a/arch/sparc/kernel/kgdb_64.c
5148+++ b/arch/sparc/kernel/kgdb_64.c
5149@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
5150 {
5151 }
5152
5153-struct kgdb_arch arch_kgdb_ops = {
5154+const struct kgdb_arch arch_kgdb_ops = {
5155 /* Breakpoint instruction: ta 0x72 */
5156 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
5157 };
5158diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
5159index 23c33ff..d137fbd 100644
5160--- a/arch/sparc/kernel/pci_sun4v.c
5161+++ b/arch/sparc/kernel/pci_sun4v.c
5162@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
5163 spin_unlock_irqrestore(&iommu->lock, flags);
5164 }
5165
5166-static struct dma_map_ops sun4v_dma_ops = {
5167+static const struct dma_map_ops sun4v_dma_ops = {
5168 .alloc_coherent = dma_4v_alloc_coherent,
5169 .free_coherent = dma_4v_free_coherent,
5170 .map_page = dma_4v_map_page,
5171diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5172index c49865b..b41a81b 100644
5173--- a/arch/sparc/kernel/process_32.c
5174+++ b/arch/sparc/kernel/process_32.c
5175@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
5176 rw->ins[4], rw->ins[5],
5177 rw->ins[6],
5178 rw->ins[7]);
5179- printk("%pS\n", (void *) rw->ins[7]);
5180+ printk("%pA\n", (void *) rw->ins[7]);
5181 rw = (struct reg_window32 *) rw->ins[6];
5182 }
5183 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5184@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
5185
5186 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5187 r->psr, r->pc, r->npc, r->y, print_tainted());
5188- printk("PC: <%pS>\n", (void *) r->pc);
5189+ printk("PC: <%pA>\n", (void *) r->pc);
5190 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5191 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5192 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5193 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5194 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5195 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5196- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5197+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5198
5199 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5200 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5201@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5202 rw = (struct reg_window32 *) fp;
5203 pc = rw->ins[7];
5204 printk("[%08lx : ", pc);
5205- printk("%pS ] ", (void *) pc);
5206+ printk("%pA ] ", (void *) pc);
5207 fp = rw->ins[6];
5208 } while (++count < 16);
5209 printk("\n");
5210diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5211index cb70476..3d0c191 100644
5212--- a/arch/sparc/kernel/process_64.c
5213+++ b/arch/sparc/kernel/process_64.c
5214@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
5215 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5216 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5217 if (regs->tstate & TSTATE_PRIV)
5218- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5219+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5220 }
5221
5222 void show_regs(struct pt_regs *regs)
5223 {
5224 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5225 regs->tpc, regs->tnpc, regs->y, print_tainted());
5226- printk("TPC: <%pS>\n", (void *) regs->tpc);
5227+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5228 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5229 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5230 regs->u_regs[3]);
5231@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5232 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5233 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5234 regs->u_regs[15]);
5235- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5236+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5237 show_regwindow(regs);
5238 }
5239
5240@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5241 ((tp && tp->task) ? tp->task->pid : -1));
5242
5243 if (gp->tstate & TSTATE_PRIV) {
5244- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5245+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5246 (void *) gp->tpc,
5247 (void *) gp->o7,
5248 (void *) gp->i7,
5249diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5250index 6edc4e5..06a69b4 100644
5251--- a/arch/sparc/kernel/sigutil_64.c
5252+++ b/arch/sparc/kernel/sigutil_64.c
5253@@ -2,6 +2,7 @@
5254 #include <linux/types.h>
5255 #include <linux/thread_info.h>
5256 #include <linux/uaccess.h>
5257+#include <linux/errno.h>
5258
5259 #include <asm/sigcontext.h>
5260 #include <asm/fpumacro.h>
5261diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5262index 3a82e65..ce0a53a 100644
5263--- a/arch/sparc/kernel/sys_sparc_32.c
5264+++ b/arch/sparc/kernel/sys_sparc_32.c
5265@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5266 if (ARCH_SUN4C && len > 0x20000000)
5267 return -ENOMEM;
5268 if (!addr)
5269- addr = TASK_UNMAPPED_BASE;
5270+ addr = current->mm->mmap_base;
5271
5272 if (flags & MAP_SHARED)
5273 addr = COLOUR_ALIGN(addr);
5274@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5275 }
5276 if (TASK_SIZE - PAGE_SIZE - len < addr)
5277 return -ENOMEM;
5278- if (!vmm || addr + len <= vmm->vm_start)
5279+ if (check_heap_stack_gap(vmm, addr, len))
5280 return addr;
5281 addr = vmm->vm_end;
5282 if (flags & MAP_SHARED)
5283diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5284index cfa0e19..98972ac 100644
5285--- a/arch/sparc/kernel/sys_sparc_64.c
5286+++ b/arch/sparc/kernel/sys_sparc_64.c
5287@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5288 /* We do not accept a shared mapping if it would violate
5289 * cache aliasing constraints.
5290 */
5291- if ((flags & MAP_SHARED) &&
5292+ if ((filp || (flags & MAP_SHARED)) &&
5293 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5294 return -EINVAL;
5295 return addr;
5296@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5297 if (filp || (flags & MAP_SHARED))
5298 do_color_align = 1;
5299
5300+#ifdef CONFIG_PAX_RANDMMAP
5301+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5302+#endif
5303+
5304 if (addr) {
5305 if (do_color_align)
5306 addr = COLOUR_ALIGN(addr, pgoff);
5307@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5308 addr = PAGE_ALIGN(addr);
5309
5310 vma = find_vma(mm, addr);
5311- if (task_size - len >= addr &&
5312- (!vma || addr + len <= vma->vm_start))
5313+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5314 return addr;
5315 }
5316
5317 if (len > mm->cached_hole_size) {
5318- start_addr = addr = mm->free_area_cache;
5319+ start_addr = addr = mm->free_area_cache;
5320 } else {
5321- start_addr = addr = TASK_UNMAPPED_BASE;
5322+ start_addr = addr = mm->mmap_base;
5323 mm->cached_hole_size = 0;
5324 }
5325
5326@@ -175,14 +178,14 @@ full_search:
5327 vma = find_vma(mm, VA_EXCLUDE_END);
5328 }
5329 if (unlikely(task_size < addr)) {
5330- if (start_addr != TASK_UNMAPPED_BASE) {
5331- start_addr = addr = TASK_UNMAPPED_BASE;
5332+ if (start_addr != mm->mmap_base) {
5333+ start_addr = addr = mm->mmap_base;
5334 mm->cached_hole_size = 0;
5335 goto full_search;
5336 }
5337 return -ENOMEM;
5338 }
5339- if (likely(!vma || addr + len <= vma->vm_start)) {
5340+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5341 /*
5342 * Remember the place where we stopped the search:
5343 */
5344@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5345 /* We do not accept a shared mapping if it would violate
5346 * cache aliasing constraints.
5347 */
5348- if ((flags & MAP_SHARED) &&
5349+ if ((filp || (flags & MAP_SHARED)) &&
5350 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5351 return -EINVAL;
5352 return addr;
5353@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5354 addr = PAGE_ALIGN(addr);
5355
5356 vma = find_vma(mm, addr);
5357- if (task_size - len >= addr &&
5358- (!vma || addr + len <= vma->vm_start))
5359+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5360 return addr;
5361 }
5362
5363@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5364 /* make sure it can fit in the remaining address space */
5365 if (likely(addr > len)) {
5366 vma = find_vma(mm, addr-len);
5367- if (!vma || addr <= vma->vm_start) {
5368+ if (check_heap_stack_gap(vma, addr - len, len)) {
5369 /* remember the address as a hint for next time */
5370 return (mm->free_area_cache = addr-len);
5371 }
5372@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5373 if (unlikely(mm->mmap_base < len))
5374 goto bottomup;
5375
5376- addr = mm->mmap_base-len;
5377- if (do_color_align)
5378- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5379+ addr = mm->mmap_base - len;
5380
5381 do {
5382+ if (do_color_align)
5383+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5384 /*
5385 * Lookup failure means no vma is above this address,
5386 * else if new region fits below vma->vm_start,
5387 * return with success:
5388 */
5389 vma = find_vma(mm, addr);
5390- if (likely(!vma || addr+len <= vma->vm_start)) {
5391+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5392 /* remember the address as a hint for next time */
5393 return (mm->free_area_cache = addr);
5394 }
5395@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5396 mm->cached_hole_size = vma->vm_start - addr;
5397
5398 /* try just below the current vma->vm_start */
5399- addr = vma->vm_start-len;
5400- if (do_color_align)
5401- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5402- } while (likely(len < vma->vm_start));
5403+ addr = skip_heap_stack_gap(vma, len);
5404+ } while (!IS_ERR_VALUE(addr));
5405
5406 bottomup:
5407 /*
5408@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5409 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5410 sysctl_legacy_va_layout) {
5411 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5412+
5413+#ifdef CONFIG_PAX_RANDMMAP
5414+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5415+ mm->mmap_base += mm->delta_mmap;
5416+#endif
5417+
5418 mm->get_unmapped_area = arch_get_unmapped_area;
5419 mm->unmap_area = arch_unmap_area;
5420 } else {
5421@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5422 gap = (task_size / 6 * 5);
5423
5424 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5425+
5426+#ifdef CONFIG_PAX_RANDMMAP
5427+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5428+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5429+#endif
5430+
5431 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5432 mm->unmap_area = arch_unmap_area_topdown;
5433 }
5434diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5435index c0490c7..84959d1 100644
5436--- a/arch/sparc/kernel/traps_32.c
5437+++ b/arch/sparc/kernel/traps_32.c
5438@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5439 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5440 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5441
5442+extern void gr_handle_kernel_exploit(void);
5443+
5444 void die_if_kernel(char *str, struct pt_regs *regs)
5445 {
5446 static int die_counter;
5447@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5448 count++ < 30 &&
5449 (((unsigned long) rw) >= PAGE_OFFSET) &&
5450 !(((unsigned long) rw) & 0x7)) {
5451- printk("Caller[%08lx]: %pS\n", rw->ins[7],
5452+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
5453 (void *) rw->ins[7]);
5454 rw = (struct reg_window32 *)rw->ins[6];
5455 }
5456 }
5457 printk("Instruction DUMP:");
5458 instruction_dump ((unsigned long *) regs->pc);
5459- if(regs->psr & PSR_PS)
5460+ if(regs->psr & PSR_PS) {
5461+ gr_handle_kernel_exploit();
5462 do_exit(SIGKILL);
5463+ }
5464 do_exit(SIGSEGV);
5465 }
5466
5467diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5468index 10f7bb9..cdb6793 100644
5469--- a/arch/sparc/kernel/traps_64.c
5470+++ b/arch/sparc/kernel/traps_64.c
5471@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5472 i + 1,
5473 p->trapstack[i].tstate, p->trapstack[i].tpc,
5474 p->trapstack[i].tnpc, p->trapstack[i].tt);
5475- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5476+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5477 }
5478 }
5479
5480@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5481
5482 lvl -= 0x100;
5483 if (regs->tstate & TSTATE_PRIV) {
5484+
5485+#ifdef CONFIG_PAX_REFCOUNT
5486+ if (lvl == 6)
5487+ pax_report_refcount_overflow(regs);
5488+#endif
5489+
5490 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5491 die_if_kernel(buffer, regs);
5492 }
5493@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5494 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5495 {
5496 char buffer[32];
5497-
5498+
5499 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5500 0, lvl, SIGTRAP) == NOTIFY_STOP)
5501 return;
5502
5503+#ifdef CONFIG_PAX_REFCOUNT
5504+ if (lvl == 6)
5505+ pax_report_refcount_overflow(regs);
5506+#endif
5507+
5508 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5509
5510 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5511@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5512 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5513 printk("%s" "ERROR(%d): ",
5514 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5515- printk("TPC<%pS>\n", (void *) regs->tpc);
5516+ printk("TPC<%pA>\n", (void *) regs->tpc);
5517 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5518 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5519 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5520@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5521 smp_processor_id(),
5522 (type & 0x1) ? 'I' : 'D',
5523 regs->tpc);
5524- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5525+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5526 panic("Irrecoverable Cheetah+ parity error.");
5527 }
5528
5529@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5530 smp_processor_id(),
5531 (type & 0x1) ? 'I' : 'D',
5532 regs->tpc);
5533- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5534+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5535 }
5536
5537 struct sun4v_error_entry {
5538@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5539
5540 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5541 regs->tpc, tl);
5542- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5543+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5544 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5545- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5546+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5547 (void *) regs->u_regs[UREG_I7]);
5548 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5549 "pte[%lx] error[%lx]\n",
5550@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5551
5552 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5553 regs->tpc, tl);
5554- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5555+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5556 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5557- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5558+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5559 (void *) regs->u_regs[UREG_I7]);
5560 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5561 "pte[%lx] error[%lx]\n",
5562@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5563 fp = (unsigned long)sf->fp + STACK_BIAS;
5564 }
5565
5566- printk(" [%016lx] %pS\n", pc, (void *) pc);
5567+ printk(" [%016lx] %pA\n", pc, (void *) pc);
5568 } while (++count < 16);
5569 }
5570
5571@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5572 return (struct reg_window *) (fp + STACK_BIAS);
5573 }
5574
5575+extern void gr_handle_kernel_exploit(void);
5576+
5577 void die_if_kernel(char *str, struct pt_regs *regs)
5578 {
5579 static int die_counter;
5580@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5581 while (rw &&
5582 count++ < 30&&
5583 is_kernel_stack(current, rw)) {
5584- printk("Caller[%016lx]: %pS\n", rw->ins[7],
5585+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
5586 (void *) rw->ins[7]);
5587
5588 rw = kernel_stack_up(rw);
5589@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5590 }
5591 user_instruction_dump ((unsigned int __user *) regs->tpc);
5592 }
5593- if (regs->tstate & TSTATE_PRIV)
5594+ if (regs->tstate & TSTATE_PRIV) {
5595+ gr_handle_kernel_exploit();
5596 do_exit(SIGKILL);
5597+ }
5598+
5599 do_exit(SIGSEGV);
5600 }
5601 EXPORT_SYMBOL(die_if_kernel);
5602diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5603index be183fe..1c8d332 100644
5604--- a/arch/sparc/kernel/una_asm_64.S
5605+++ b/arch/sparc/kernel/una_asm_64.S
5606@@ -127,7 +127,7 @@ do_int_load:
5607 wr %o5, 0x0, %asi
5608 retl
5609 mov 0, %o0
5610- .size __do_int_load, .-__do_int_load
5611+ .size do_int_load, .-do_int_load
5612
5613 .section __ex_table,"a"
5614 .word 4b, __retl_efault
5615diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5616index 3792099..2af17d8 100644
5617--- a/arch/sparc/kernel/unaligned_64.c
5618+++ b/arch/sparc/kernel/unaligned_64.c
5619@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5620 if (count < 5) {
5621 last_time = jiffies;
5622 count++;
5623- printk("Kernel unaligned access at TPC[%lx] %pS\n",
5624+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
5625 regs->tpc, (void *) regs->tpc);
5626 }
5627 }
5628diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5629index e75faf0..24f12f9 100644
5630--- a/arch/sparc/lib/Makefile
5631+++ b/arch/sparc/lib/Makefile
5632@@ -2,7 +2,7 @@
5633 #
5634
5635 asflags-y := -ansi -DST_DIV0=0x02
5636-ccflags-y := -Werror
5637+#ccflags-y := -Werror
5638
5639 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5640 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5641diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5642index 0268210..f0291ca 100644
5643--- a/arch/sparc/lib/atomic_64.S
5644+++ b/arch/sparc/lib/atomic_64.S
5645@@ -18,7 +18,12 @@
5646 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5647 BACKOFF_SETUP(%o2)
5648 1: lduw [%o1], %g1
5649- add %g1, %o0, %g7
5650+ addcc %g1, %o0, %g7
5651+
5652+#ifdef CONFIG_PAX_REFCOUNT
5653+ tvs %icc, 6
5654+#endif
5655+
5656 cas [%o1], %g1, %g7
5657 cmp %g1, %g7
5658 bne,pn %icc, 2f
5659@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5660 2: BACKOFF_SPIN(%o2, %o3, 1b)
5661 .size atomic_add, .-atomic_add
5662
5663+ .globl atomic_add_unchecked
5664+ .type atomic_add_unchecked,#function
5665+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5666+ BACKOFF_SETUP(%o2)
5667+1: lduw [%o1], %g1
5668+ add %g1, %o0, %g7
5669+ cas [%o1], %g1, %g7
5670+ cmp %g1, %g7
5671+ bne,pn %icc, 2f
5672+ nop
5673+ retl
5674+ nop
5675+2: BACKOFF_SPIN(%o2, %o3, 1b)
5676+ .size atomic_add_unchecked, .-atomic_add_unchecked
5677+
5678 .globl atomic_sub
5679 .type atomic_sub,#function
5680 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5681 BACKOFF_SETUP(%o2)
5682 1: lduw [%o1], %g1
5683- sub %g1, %o0, %g7
5684+ subcc %g1, %o0, %g7
5685+
5686+#ifdef CONFIG_PAX_REFCOUNT
5687+ tvs %icc, 6
5688+#endif
5689+
5690 cas [%o1], %g1, %g7
5691 cmp %g1, %g7
5692 bne,pn %icc, 2f
5693@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5694 2: BACKOFF_SPIN(%o2, %o3, 1b)
5695 .size atomic_sub, .-atomic_sub
5696
5697+ .globl atomic_sub_unchecked
5698+ .type atomic_sub_unchecked,#function
5699+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5700+ BACKOFF_SETUP(%o2)
5701+1: lduw [%o1], %g1
5702+ sub %g1, %o0, %g7
5703+ cas [%o1], %g1, %g7
5704+ cmp %g1, %g7
5705+ bne,pn %icc, 2f
5706+ nop
5707+ retl
5708+ nop
5709+2: BACKOFF_SPIN(%o2, %o3, 1b)
5710+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
5711+
5712 .globl atomic_add_ret
5713 .type atomic_add_ret,#function
5714 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5715 BACKOFF_SETUP(%o2)
5716 1: lduw [%o1], %g1
5717- add %g1, %o0, %g7
5718+ addcc %g1, %o0, %g7
5719+
5720+#ifdef CONFIG_PAX_REFCOUNT
5721+ tvs %icc, 6
5722+#endif
5723+
5724 cas [%o1], %g1, %g7
5725 cmp %g1, %g7
5726 bne,pn %icc, 2f
5727@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5728 2: BACKOFF_SPIN(%o2, %o3, 1b)
5729 .size atomic_add_ret, .-atomic_add_ret
5730
5731+ .globl atomic_add_ret_unchecked
5732+ .type atomic_add_ret_unchecked,#function
5733+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5734+ BACKOFF_SETUP(%o2)
5735+1: lduw [%o1], %g1
5736+ addcc %g1, %o0, %g7
5737+ cas [%o1], %g1, %g7
5738+ cmp %g1, %g7
5739+ bne,pn %icc, 2f
5740+ add %g7, %o0, %g7
5741+ sra %g7, 0, %o0
5742+ retl
5743+ nop
5744+2: BACKOFF_SPIN(%o2, %o3, 1b)
5745+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5746+
5747 .globl atomic_sub_ret
5748 .type atomic_sub_ret,#function
5749 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5750 BACKOFF_SETUP(%o2)
5751 1: lduw [%o1], %g1
5752- sub %g1, %o0, %g7
5753+ subcc %g1, %o0, %g7
5754+
5755+#ifdef CONFIG_PAX_REFCOUNT
5756+ tvs %icc, 6
5757+#endif
5758+
5759 cas [%o1], %g1, %g7
5760 cmp %g1, %g7
5761 bne,pn %icc, 2f
5762@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5763 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5764 BACKOFF_SETUP(%o2)
5765 1: ldx [%o1], %g1
5766- add %g1, %o0, %g7
5767+ addcc %g1, %o0, %g7
5768+
5769+#ifdef CONFIG_PAX_REFCOUNT
5770+ tvs %xcc, 6
5771+#endif
5772+
5773 casx [%o1], %g1, %g7
5774 cmp %g1, %g7
5775 bne,pn %xcc, 2f
5776@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5777 2: BACKOFF_SPIN(%o2, %o3, 1b)
5778 .size atomic64_add, .-atomic64_add
5779
5780+ .globl atomic64_add_unchecked
5781+ .type atomic64_add_unchecked,#function
5782+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5783+ BACKOFF_SETUP(%o2)
5784+1: ldx [%o1], %g1
5785+ addcc %g1, %o0, %g7
5786+ casx [%o1], %g1, %g7
5787+ cmp %g1, %g7
5788+ bne,pn %xcc, 2f
5789+ nop
5790+ retl
5791+ nop
5792+2: BACKOFF_SPIN(%o2, %o3, 1b)
5793+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
5794+
5795 .globl atomic64_sub
5796 .type atomic64_sub,#function
5797 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5798 BACKOFF_SETUP(%o2)
5799 1: ldx [%o1], %g1
5800- sub %g1, %o0, %g7
5801+ subcc %g1, %o0, %g7
5802+
5803+#ifdef CONFIG_PAX_REFCOUNT
5804+ tvs %xcc, 6
5805+#endif
5806+
5807 casx [%o1], %g1, %g7
5808 cmp %g1, %g7
5809 bne,pn %xcc, 2f
5810@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5811 2: BACKOFF_SPIN(%o2, %o3, 1b)
5812 .size atomic64_sub, .-atomic64_sub
5813
5814+ .globl atomic64_sub_unchecked
5815+ .type atomic64_sub_unchecked,#function
5816+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5817+ BACKOFF_SETUP(%o2)
5818+1: ldx [%o1], %g1
5819+ subcc %g1, %o0, %g7
5820+ casx [%o1], %g1, %g7
5821+ cmp %g1, %g7
5822+ bne,pn %xcc, 2f
5823+ nop
5824+ retl
5825+ nop
5826+2: BACKOFF_SPIN(%o2, %o3, 1b)
5827+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5828+
5829 .globl atomic64_add_ret
5830 .type atomic64_add_ret,#function
5831 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5832 BACKOFF_SETUP(%o2)
5833 1: ldx [%o1], %g1
5834- add %g1, %o0, %g7
5835+ addcc %g1, %o0, %g7
5836+
5837+#ifdef CONFIG_PAX_REFCOUNT
5838+ tvs %xcc, 6
5839+#endif
5840+
5841 casx [%o1], %g1, %g7
5842 cmp %g1, %g7
5843 bne,pn %xcc, 2f
5844@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5845 2: BACKOFF_SPIN(%o2, %o3, 1b)
5846 .size atomic64_add_ret, .-atomic64_add_ret
5847
5848+ .globl atomic64_add_ret_unchecked
5849+ .type atomic64_add_ret_unchecked,#function
5850+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5851+ BACKOFF_SETUP(%o2)
5852+1: ldx [%o1], %g1
5853+ addcc %g1, %o0, %g7
5854+ casx [%o1], %g1, %g7
5855+ cmp %g1, %g7
5856+ bne,pn %xcc, 2f
5857+ add %g7, %o0, %g7
5858+ mov %g7, %o0
5859+ retl
5860+ nop
5861+2: BACKOFF_SPIN(%o2, %o3, 1b)
5862+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5863+
5864 .globl atomic64_sub_ret
5865 .type atomic64_sub_ret,#function
5866 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5867 BACKOFF_SETUP(%o2)
5868 1: ldx [%o1], %g1
5869- sub %g1, %o0, %g7
5870+ subcc %g1, %o0, %g7
5871+
5872+#ifdef CONFIG_PAX_REFCOUNT
5873+ tvs %xcc, 6
5874+#endif
5875+
5876 casx [%o1], %g1, %g7
5877 cmp %g1, %g7
5878 bne,pn %xcc, 2f
5879diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5880index 704b126..2e79d76 100644
5881--- a/arch/sparc/lib/ksyms.c
5882+++ b/arch/sparc/lib/ksyms.c
5883@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5884
5885 /* Atomic counter implementation. */
5886 EXPORT_SYMBOL(atomic_add);
5887+EXPORT_SYMBOL(atomic_add_unchecked);
5888 EXPORT_SYMBOL(atomic_add_ret);
5889+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5890 EXPORT_SYMBOL(atomic_sub);
5891+EXPORT_SYMBOL(atomic_sub_unchecked);
5892 EXPORT_SYMBOL(atomic_sub_ret);
5893 EXPORT_SYMBOL(atomic64_add);
5894+EXPORT_SYMBOL(atomic64_add_unchecked);
5895 EXPORT_SYMBOL(atomic64_add_ret);
5896+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5897 EXPORT_SYMBOL(atomic64_sub);
5898+EXPORT_SYMBOL(atomic64_sub_unchecked);
5899 EXPORT_SYMBOL(atomic64_sub_ret);
5900
5901 /* Atomic bit operations. */
5902diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5903index 91a7d29..ce75c29 100644
5904--- a/arch/sparc/lib/rwsem_64.S
5905+++ b/arch/sparc/lib/rwsem_64.S
5906@@ -11,7 +11,12 @@
5907 .globl __down_read
5908 __down_read:
5909 1: lduw [%o0], %g1
5910- add %g1, 1, %g7
5911+ addcc %g1, 1, %g7
5912+
5913+#ifdef CONFIG_PAX_REFCOUNT
5914+ tvs %icc, 6
5915+#endif
5916+
5917 cas [%o0], %g1, %g7
5918 cmp %g1, %g7
5919 bne,pn %icc, 1b
5920@@ -33,7 +38,12 @@ __down_read:
5921 .globl __down_read_trylock
5922 __down_read_trylock:
5923 1: lduw [%o0], %g1
5924- add %g1, 1, %g7
5925+ addcc %g1, 1, %g7
5926+
5927+#ifdef CONFIG_PAX_REFCOUNT
5928+ tvs %icc, 6
5929+#endif
5930+
5931 cmp %g7, 0
5932 bl,pn %icc, 2f
5933 mov 0, %o1
5934@@ -51,7 +61,12 @@ __down_write:
5935 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5936 1:
5937 lduw [%o0], %g3
5938- add %g3, %g1, %g7
5939+ addcc %g3, %g1, %g7
5940+
5941+#ifdef CONFIG_PAX_REFCOUNT
5942+ tvs %icc, 6
5943+#endif
5944+
5945 cas [%o0], %g3, %g7
5946 cmp %g3, %g7
5947 bne,pn %icc, 1b
5948@@ -77,7 +92,12 @@ __down_write_trylock:
5949 cmp %g3, 0
5950 bne,pn %icc, 2f
5951 mov 0, %o1
5952- add %g3, %g1, %g7
5953+ addcc %g3, %g1, %g7
5954+
5955+#ifdef CONFIG_PAX_REFCOUNT
5956+ tvs %icc, 6
5957+#endif
5958+
5959 cas [%o0], %g3, %g7
5960 cmp %g3, %g7
5961 bne,pn %icc, 1b
5962@@ -90,7 +110,12 @@ __down_write_trylock:
5963 __up_read:
5964 1:
5965 lduw [%o0], %g1
5966- sub %g1, 1, %g7
5967+ subcc %g1, 1, %g7
5968+
5969+#ifdef CONFIG_PAX_REFCOUNT
5970+ tvs %icc, 6
5971+#endif
5972+
5973 cas [%o0], %g1, %g7
5974 cmp %g1, %g7
5975 bne,pn %icc, 1b
5976@@ -118,7 +143,12 @@ __up_write:
5977 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5978 1:
5979 lduw [%o0], %g3
5980- sub %g3, %g1, %g7
5981+ subcc %g3, %g1, %g7
5982+
5983+#ifdef CONFIG_PAX_REFCOUNT
5984+ tvs %icc, 6
5985+#endif
5986+
5987 cas [%o0], %g3, %g7
5988 cmp %g3, %g7
5989 bne,pn %icc, 1b
5990@@ -143,7 +173,12 @@ __downgrade_write:
5991 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5992 1:
5993 lduw [%o0], %g3
5994- sub %g3, %g1, %g7
5995+ subcc %g3, %g1, %g7
5996+
5997+#ifdef CONFIG_PAX_REFCOUNT
5998+ tvs %icc, 6
5999+#endif
6000+
6001 cas [%o0], %g3, %g7
6002 cmp %g3, %g7
6003 bne,pn %icc, 1b
6004diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6005index 79836a7..62f47a2 100644
6006--- a/arch/sparc/mm/Makefile
6007+++ b/arch/sparc/mm/Makefile
6008@@ -2,7 +2,7 @@
6009 #
6010
6011 asflags-y := -ansi
6012-ccflags-y := -Werror
6013+#ccflags-y := -Werror
6014
6015 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6016 obj-y += fault_$(BITS).o
6017diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6018index b99f81c..3453e93 100644
6019--- a/arch/sparc/mm/fault_32.c
6020+++ b/arch/sparc/mm/fault_32.c
6021@@ -21,6 +21,9 @@
6022 #include <linux/interrupt.h>
6023 #include <linux/module.h>
6024 #include <linux/kdebug.h>
6025+#include <linux/slab.h>
6026+#include <linux/pagemap.h>
6027+#include <linux/compiler.h>
6028
6029 #include <asm/system.h>
6030 #include <asm/page.h>
6031@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6032 return safe_compute_effective_address(regs, insn);
6033 }
6034
6035+#ifdef CONFIG_PAX_PAGEEXEC
6036+#ifdef CONFIG_PAX_DLRESOLVE
6037+static void pax_emuplt_close(struct vm_area_struct *vma)
6038+{
6039+ vma->vm_mm->call_dl_resolve = 0UL;
6040+}
6041+
6042+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6043+{
6044+ unsigned int *kaddr;
6045+
6046+ vmf->page = alloc_page(GFP_HIGHUSER);
6047+ if (!vmf->page)
6048+ return VM_FAULT_OOM;
6049+
6050+ kaddr = kmap(vmf->page);
6051+ memset(kaddr, 0, PAGE_SIZE);
6052+ kaddr[0] = 0x9DE3BFA8U; /* save */
6053+ flush_dcache_page(vmf->page);
6054+ kunmap(vmf->page);
6055+ return VM_FAULT_MAJOR;
6056+}
6057+
6058+static const struct vm_operations_struct pax_vm_ops = {
6059+ .close = pax_emuplt_close,
6060+ .fault = pax_emuplt_fault
6061+};
6062+
6063+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6064+{
6065+ int ret;
6066+
6067+ vma->vm_mm = current->mm;
6068+ vma->vm_start = addr;
6069+ vma->vm_end = addr + PAGE_SIZE;
6070+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6071+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6072+ vma->vm_ops = &pax_vm_ops;
6073+
6074+ ret = insert_vm_struct(current->mm, vma);
6075+ if (ret)
6076+ return ret;
6077+
6078+ ++current->mm->total_vm;
6079+ return 0;
6080+}
6081+#endif
6082+
6083+/*
6084+ * PaX: decide what to do with offenders (regs->pc = fault address)
6085+ *
6086+ * returns 1 when task should be killed
6087+ * 2 when patched PLT trampoline was detected
6088+ * 3 when unpatched PLT trampoline was detected
6089+ */
6090+static int pax_handle_fetch_fault(struct pt_regs *regs)
6091+{
6092+
6093+#ifdef CONFIG_PAX_EMUPLT
6094+ int err;
6095+
6096+ do { /* PaX: patched PLT emulation #1 */
6097+ unsigned int sethi1, sethi2, jmpl;
6098+
6099+ err = get_user(sethi1, (unsigned int *)regs->pc);
6100+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6101+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6102+
6103+ if (err)
6104+ break;
6105+
6106+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6107+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6108+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6109+ {
6110+ unsigned int addr;
6111+
6112+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6113+ addr = regs->u_regs[UREG_G1];
6114+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6115+ regs->pc = addr;
6116+ regs->npc = addr+4;
6117+ return 2;
6118+ }
6119+ } while (0);
6120+
6121+ { /* PaX: patched PLT emulation #2 */
6122+ unsigned int ba;
6123+
6124+ err = get_user(ba, (unsigned int *)regs->pc);
6125+
6126+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6127+ unsigned int addr;
6128+
6129+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6130+ regs->pc = addr;
6131+ regs->npc = addr+4;
6132+ return 2;
6133+ }
6134+ }
6135+
6136+ do { /* PaX: patched PLT emulation #3 */
6137+ unsigned int sethi, jmpl, nop;
6138+
6139+ err = get_user(sethi, (unsigned int *)regs->pc);
6140+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6141+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6142+
6143+ if (err)
6144+ break;
6145+
6146+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6147+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6148+ nop == 0x01000000U)
6149+ {
6150+ unsigned int addr;
6151+
6152+ addr = (sethi & 0x003FFFFFU) << 10;
6153+ regs->u_regs[UREG_G1] = addr;
6154+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6155+ regs->pc = addr;
6156+ regs->npc = addr+4;
6157+ return 2;
6158+ }
6159+ } while (0);
6160+
6161+ do { /* PaX: unpatched PLT emulation step 1 */
6162+ unsigned int sethi, ba, nop;
6163+
6164+ err = get_user(sethi, (unsigned int *)regs->pc);
6165+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
6166+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6167+
6168+ if (err)
6169+ break;
6170+
6171+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6172+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6173+ nop == 0x01000000U)
6174+ {
6175+ unsigned int addr, save, call;
6176+
6177+ if ((ba & 0xFFC00000U) == 0x30800000U)
6178+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6179+ else
6180+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6181+
6182+ err = get_user(save, (unsigned int *)addr);
6183+ err |= get_user(call, (unsigned int *)(addr+4));
6184+ err |= get_user(nop, (unsigned int *)(addr+8));
6185+ if (err)
6186+ break;
6187+
6188+#ifdef CONFIG_PAX_DLRESOLVE
6189+ if (save == 0x9DE3BFA8U &&
6190+ (call & 0xC0000000U) == 0x40000000U &&
6191+ nop == 0x01000000U)
6192+ {
6193+ struct vm_area_struct *vma;
6194+ unsigned long call_dl_resolve;
6195+
6196+ down_read(&current->mm->mmap_sem);
6197+ call_dl_resolve = current->mm->call_dl_resolve;
6198+ up_read(&current->mm->mmap_sem);
6199+ if (likely(call_dl_resolve))
6200+ goto emulate;
6201+
6202+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6203+
6204+ down_write(&current->mm->mmap_sem);
6205+ if (current->mm->call_dl_resolve) {
6206+ call_dl_resolve = current->mm->call_dl_resolve;
6207+ up_write(&current->mm->mmap_sem);
6208+ if (vma)
6209+ kmem_cache_free(vm_area_cachep, vma);
6210+ goto emulate;
6211+ }
6212+
6213+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6214+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6215+ up_write(&current->mm->mmap_sem);
6216+ if (vma)
6217+ kmem_cache_free(vm_area_cachep, vma);
6218+ return 1;
6219+ }
6220+
6221+ if (pax_insert_vma(vma, call_dl_resolve)) {
6222+ up_write(&current->mm->mmap_sem);
6223+ kmem_cache_free(vm_area_cachep, vma);
6224+ return 1;
6225+ }
6226+
6227+ current->mm->call_dl_resolve = call_dl_resolve;
6228+ up_write(&current->mm->mmap_sem);
6229+
6230+emulate:
6231+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6232+ regs->pc = call_dl_resolve;
6233+ regs->npc = addr+4;
6234+ return 3;
6235+ }
6236+#endif
6237+
6238+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6239+ if ((save & 0xFFC00000U) == 0x05000000U &&
6240+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6241+ nop == 0x01000000U)
6242+ {
6243+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6244+ regs->u_regs[UREG_G2] = addr + 4;
6245+ addr = (save & 0x003FFFFFU) << 10;
6246+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6247+ regs->pc = addr;
6248+ regs->npc = addr+4;
6249+ return 3;
6250+ }
6251+ }
6252+ } while (0);
6253+
6254+ do { /* PaX: unpatched PLT emulation step 2 */
6255+ unsigned int save, call, nop;
6256+
6257+ err = get_user(save, (unsigned int *)(regs->pc-4));
6258+ err |= get_user(call, (unsigned int *)regs->pc);
6259+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6260+ if (err)
6261+ break;
6262+
6263+ if (save == 0x9DE3BFA8U &&
6264+ (call & 0xC0000000U) == 0x40000000U &&
6265+ nop == 0x01000000U)
6266+ {
6267+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6268+
6269+ regs->u_regs[UREG_RETPC] = regs->pc;
6270+ regs->pc = dl_resolve;
6271+ regs->npc = dl_resolve+4;
6272+ return 3;
6273+ }
6274+ } while (0);
6275+#endif
6276+
6277+ return 1;
6278+}
6279+
6280+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6281+{
6282+ unsigned long i;
6283+
6284+ printk(KERN_ERR "PAX: bytes at PC: ");
6285+ for (i = 0; i < 8; i++) {
6286+ unsigned int c;
6287+ if (get_user(c, (unsigned int *)pc+i))
6288+ printk(KERN_CONT "???????? ");
6289+ else
6290+ printk(KERN_CONT "%08x ", c);
6291+ }
6292+ printk("\n");
6293+}
6294+#endif
6295+
6296 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6297 unsigned long address)
6298 {
6299@@ -231,6 +495,24 @@ good_area:
6300 if(!(vma->vm_flags & VM_WRITE))
6301 goto bad_area;
6302 } else {
6303+
6304+#ifdef CONFIG_PAX_PAGEEXEC
6305+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6306+ up_read(&mm->mmap_sem);
6307+ switch (pax_handle_fetch_fault(regs)) {
6308+
6309+#ifdef CONFIG_PAX_EMUPLT
6310+ case 2:
6311+ case 3:
6312+ return;
6313+#endif
6314+
6315+ }
6316+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6317+ do_group_exit(SIGKILL);
6318+ }
6319+#endif
6320+
6321 /* Allow reads even for write-only mappings */
6322 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6323 goto bad_area;
6324diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6325index 43b0da9..a0b78f9 100644
6326--- a/arch/sparc/mm/fault_64.c
6327+++ b/arch/sparc/mm/fault_64.c
6328@@ -20,6 +20,9 @@
6329 #include <linux/kprobes.h>
6330 #include <linux/kdebug.h>
6331 #include <linux/percpu.h>
6332+#include <linux/slab.h>
6333+#include <linux/pagemap.h>
6334+#include <linux/compiler.h>
6335
6336 #include <asm/page.h>
6337 #include <asm/pgtable.h>
6338@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6339 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6340 regs->tpc);
6341 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6342- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6343+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6344 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6345 dump_stack();
6346 unhandled_fault(regs->tpc, current, regs);
6347@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6348 show_regs(regs);
6349 }
6350
6351+#ifdef CONFIG_PAX_PAGEEXEC
6352+#ifdef CONFIG_PAX_DLRESOLVE
6353+static void pax_emuplt_close(struct vm_area_struct *vma)
6354+{
6355+ vma->vm_mm->call_dl_resolve = 0UL;
6356+}
6357+
6358+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6359+{
6360+ unsigned int *kaddr;
6361+
6362+ vmf->page = alloc_page(GFP_HIGHUSER);
6363+ if (!vmf->page)
6364+ return VM_FAULT_OOM;
6365+
6366+ kaddr = kmap(vmf->page);
6367+ memset(kaddr, 0, PAGE_SIZE);
6368+ kaddr[0] = 0x9DE3BFA8U; /* save */
6369+ flush_dcache_page(vmf->page);
6370+ kunmap(vmf->page);
6371+ return VM_FAULT_MAJOR;
6372+}
6373+
6374+static const struct vm_operations_struct pax_vm_ops = {
6375+ .close = pax_emuplt_close,
6376+ .fault = pax_emuplt_fault
6377+};
6378+
6379+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6380+{
6381+ int ret;
6382+
6383+ vma->vm_mm = current->mm;
6384+ vma->vm_start = addr;
6385+ vma->vm_end = addr + PAGE_SIZE;
6386+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6387+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6388+ vma->vm_ops = &pax_vm_ops;
6389+
6390+ ret = insert_vm_struct(current->mm, vma);
6391+ if (ret)
6392+ return ret;
6393+
6394+ ++current->mm->total_vm;
6395+ return 0;
6396+}
6397+#endif
6398+
6399+/*
6400+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6401+ *
6402+ * returns 1 when task should be killed
6403+ * 2 when patched PLT trampoline was detected
6404+ * 3 when unpatched PLT trampoline was detected
6405+ */
6406+static int pax_handle_fetch_fault(struct pt_regs *regs)
6407+{
6408+
6409+#ifdef CONFIG_PAX_EMUPLT
6410+ int err;
6411+
6412+ do { /* PaX: patched PLT emulation #1 */
6413+ unsigned int sethi1, sethi2, jmpl;
6414+
6415+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6416+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6417+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6418+
6419+ if (err)
6420+ break;
6421+
6422+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6423+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6424+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6425+ {
6426+ unsigned long addr;
6427+
6428+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6429+ addr = regs->u_regs[UREG_G1];
6430+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6431+
6432+ if (test_thread_flag(TIF_32BIT))
6433+ addr &= 0xFFFFFFFFUL;
6434+
6435+ regs->tpc = addr;
6436+ regs->tnpc = addr+4;
6437+ return 2;
6438+ }
6439+ } while (0);
6440+
6441+ { /* PaX: patched PLT emulation #2 */
6442+ unsigned int ba;
6443+
6444+ err = get_user(ba, (unsigned int *)regs->tpc);
6445+
6446+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6447+ unsigned long addr;
6448+
6449+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6450+
6451+ if (test_thread_flag(TIF_32BIT))
6452+ addr &= 0xFFFFFFFFUL;
6453+
6454+ regs->tpc = addr;
6455+ regs->tnpc = addr+4;
6456+ return 2;
6457+ }
6458+ }
6459+
6460+ do { /* PaX: patched PLT emulation #3 */
6461+ unsigned int sethi, jmpl, nop;
6462+
6463+ err = get_user(sethi, (unsigned int *)regs->tpc);
6464+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6465+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6466+
6467+ if (err)
6468+ break;
6469+
6470+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6471+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6472+ nop == 0x01000000U)
6473+ {
6474+ unsigned long addr;
6475+
6476+ addr = (sethi & 0x003FFFFFU) << 10;
6477+ regs->u_regs[UREG_G1] = addr;
6478+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6479+
6480+ if (test_thread_flag(TIF_32BIT))
6481+ addr &= 0xFFFFFFFFUL;
6482+
6483+ regs->tpc = addr;
6484+ regs->tnpc = addr+4;
6485+ return 2;
6486+ }
6487+ } while (0);
6488+
6489+ do { /* PaX: patched PLT emulation #4 */
6490+ unsigned int sethi, mov1, call, mov2;
6491+
6492+ err = get_user(sethi, (unsigned int *)regs->tpc);
6493+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6494+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6495+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6496+
6497+ if (err)
6498+ break;
6499+
6500+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6501+ mov1 == 0x8210000FU &&
6502+ (call & 0xC0000000U) == 0x40000000U &&
6503+ mov2 == 0x9E100001U)
6504+ {
6505+ unsigned long addr;
6506+
6507+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6508+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6509+
6510+ if (test_thread_flag(TIF_32BIT))
6511+ addr &= 0xFFFFFFFFUL;
6512+
6513+ regs->tpc = addr;
6514+ regs->tnpc = addr+4;
6515+ return 2;
6516+ }
6517+ } while (0);
6518+
6519+ do { /* PaX: patched PLT emulation #5 */
6520+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6521+
6522+ err = get_user(sethi, (unsigned int *)regs->tpc);
6523+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6524+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6525+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6526+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6527+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6528+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6529+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6530+
6531+ if (err)
6532+ break;
6533+
6534+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6535+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6536+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6537+ (or1 & 0xFFFFE000U) == 0x82106000U &&
6538+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6539+ sllx == 0x83287020U &&
6540+ jmpl == 0x81C04005U &&
6541+ nop == 0x01000000U)
6542+ {
6543+ unsigned long addr;
6544+
6545+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6546+ regs->u_regs[UREG_G1] <<= 32;
6547+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6548+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6549+ regs->tpc = addr;
6550+ regs->tnpc = addr+4;
6551+ return 2;
6552+ }
6553+ } while (0);
6554+
6555+ do { /* PaX: patched PLT emulation #6 */
6556+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6557+
6558+ err = get_user(sethi, (unsigned int *)regs->tpc);
6559+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6560+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6561+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6562+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
6563+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6564+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6565+
6566+ if (err)
6567+ break;
6568+
6569+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6570+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6571+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6572+ sllx == 0x83287020U &&
6573+ (or & 0xFFFFE000U) == 0x8A116000U &&
6574+ jmpl == 0x81C04005U &&
6575+ nop == 0x01000000U)
6576+ {
6577+ unsigned long addr;
6578+
6579+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6580+ regs->u_regs[UREG_G1] <<= 32;
6581+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6582+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6583+ regs->tpc = addr;
6584+ regs->tnpc = addr+4;
6585+ return 2;
6586+ }
6587+ } while (0);
6588+
6589+ do { /* PaX: unpatched PLT emulation step 1 */
6590+ unsigned int sethi, ba, nop;
6591+
6592+ err = get_user(sethi, (unsigned int *)regs->tpc);
6593+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6594+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6595+
6596+ if (err)
6597+ break;
6598+
6599+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6600+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6601+ nop == 0x01000000U)
6602+ {
6603+ unsigned long addr;
6604+ unsigned int save, call;
6605+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6606+
6607+ if ((ba & 0xFFC00000U) == 0x30800000U)
6608+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6609+ else
6610+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6611+
6612+ if (test_thread_flag(TIF_32BIT))
6613+ addr &= 0xFFFFFFFFUL;
6614+
6615+ err = get_user(save, (unsigned int *)addr);
6616+ err |= get_user(call, (unsigned int *)(addr+4));
6617+ err |= get_user(nop, (unsigned int *)(addr+8));
6618+ if (err)
6619+ break;
6620+
6621+#ifdef CONFIG_PAX_DLRESOLVE
6622+ if (save == 0x9DE3BFA8U &&
6623+ (call & 0xC0000000U) == 0x40000000U &&
6624+ nop == 0x01000000U)
6625+ {
6626+ struct vm_area_struct *vma;
6627+ unsigned long call_dl_resolve;
6628+
6629+ down_read(&current->mm->mmap_sem);
6630+ call_dl_resolve = current->mm->call_dl_resolve;
6631+ up_read(&current->mm->mmap_sem);
6632+ if (likely(call_dl_resolve))
6633+ goto emulate;
6634+
6635+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6636+
6637+ down_write(&current->mm->mmap_sem);
6638+ if (current->mm->call_dl_resolve) {
6639+ call_dl_resolve = current->mm->call_dl_resolve;
6640+ up_write(&current->mm->mmap_sem);
6641+ if (vma)
6642+ kmem_cache_free(vm_area_cachep, vma);
6643+ goto emulate;
6644+ }
6645+
6646+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6647+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6648+ up_write(&current->mm->mmap_sem);
6649+ if (vma)
6650+ kmem_cache_free(vm_area_cachep, vma);
6651+ return 1;
6652+ }
6653+
6654+ if (pax_insert_vma(vma, call_dl_resolve)) {
6655+ up_write(&current->mm->mmap_sem);
6656+ kmem_cache_free(vm_area_cachep, vma);
6657+ return 1;
6658+ }
6659+
6660+ current->mm->call_dl_resolve = call_dl_resolve;
6661+ up_write(&current->mm->mmap_sem);
6662+
6663+emulate:
6664+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6665+ regs->tpc = call_dl_resolve;
6666+ regs->tnpc = addr+4;
6667+ return 3;
6668+ }
6669+#endif
6670+
6671+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6672+ if ((save & 0xFFC00000U) == 0x05000000U &&
6673+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6674+ nop == 0x01000000U)
6675+ {
6676+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6677+ regs->u_regs[UREG_G2] = addr + 4;
6678+ addr = (save & 0x003FFFFFU) << 10;
6679+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6680+
6681+ if (test_thread_flag(TIF_32BIT))
6682+ addr &= 0xFFFFFFFFUL;
6683+
6684+ regs->tpc = addr;
6685+ regs->tnpc = addr+4;
6686+ return 3;
6687+ }
6688+
6689+ /* PaX: 64-bit PLT stub */
6690+ err = get_user(sethi1, (unsigned int *)addr);
6691+ err |= get_user(sethi2, (unsigned int *)(addr+4));
6692+ err |= get_user(or1, (unsigned int *)(addr+8));
6693+ err |= get_user(or2, (unsigned int *)(addr+12));
6694+ err |= get_user(sllx, (unsigned int *)(addr+16));
6695+ err |= get_user(add, (unsigned int *)(addr+20));
6696+ err |= get_user(jmpl, (unsigned int *)(addr+24));
6697+ err |= get_user(nop, (unsigned int *)(addr+28));
6698+ if (err)
6699+ break;
6700+
6701+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6702+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6703+ (or1 & 0xFFFFE000U) == 0x88112000U &&
6704+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6705+ sllx == 0x89293020U &&
6706+ add == 0x8A010005U &&
6707+ jmpl == 0x89C14000U &&
6708+ nop == 0x01000000U)
6709+ {
6710+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6711+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6712+ regs->u_regs[UREG_G4] <<= 32;
6713+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6714+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6715+ regs->u_regs[UREG_G4] = addr + 24;
6716+ addr = regs->u_regs[UREG_G5];
6717+ regs->tpc = addr;
6718+ regs->tnpc = addr+4;
6719+ return 3;
6720+ }
6721+ }
6722+ } while (0);
6723+
6724+#ifdef CONFIG_PAX_DLRESOLVE
6725+ do { /* PaX: unpatched PLT emulation step 2 */
6726+ unsigned int save, call, nop;
6727+
6728+ err = get_user(save, (unsigned int *)(regs->tpc-4));
6729+ err |= get_user(call, (unsigned int *)regs->tpc);
6730+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6731+ if (err)
6732+ break;
6733+
6734+ if (save == 0x9DE3BFA8U &&
6735+ (call & 0xC0000000U) == 0x40000000U &&
6736+ nop == 0x01000000U)
6737+ {
6738+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6739+
6740+ if (test_thread_flag(TIF_32BIT))
6741+ dl_resolve &= 0xFFFFFFFFUL;
6742+
6743+ regs->u_regs[UREG_RETPC] = regs->tpc;
6744+ regs->tpc = dl_resolve;
6745+ regs->tnpc = dl_resolve+4;
6746+ return 3;
6747+ }
6748+ } while (0);
6749+#endif
6750+
6751+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6752+ unsigned int sethi, ba, nop;
6753+
6754+ err = get_user(sethi, (unsigned int *)regs->tpc);
6755+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6756+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6757+
6758+ if (err)
6759+ break;
6760+
6761+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6762+ (ba & 0xFFF00000U) == 0x30600000U &&
6763+ nop == 0x01000000U)
6764+ {
6765+ unsigned long addr;
6766+
6767+ addr = (sethi & 0x003FFFFFU) << 10;
6768+ regs->u_regs[UREG_G1] = addr;
6769+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6770+
6771+ if (test_thread_flag(TIF_32BIT))
6772+ addr &= 0xFFFFFFFFUL;
6773+
6774+ regs->tpc = addr;
6775+ regs->tnpc = addr+4;
6776+ return 2;
6777+ }
6778+ } while (0);
6779+
6780+#endif
6781+
6782+ return 1;
6783+}
6784+
6785+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6786+{
6787+ unsigned long i;
6788+
6789+ printk(KERN_ERR "PAX: bytes at PC: ");
6790+ for (i = 0; i < 8; i++) {
6791+ unsigned int c;
6792+ if (get_user(c, (unsigned int *)pc+i))
6793+ printk(KERN_CONT "???????? ");
6794+ else
6795+ printk(KERN_CONT "%08x ", c);
6796+ }
6797+ printk("\n");
6798+}
6799+#endif
6800+
6801 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6802 {
6803 struct mm_struct *mm = current->mm;
6804@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6805 if (!vma)
6806 goto bad_area;
6807
6808+#ifdef CONFIG_PAX_PAGEEXEC
6809+ /* PaX: detect ITLB misses on non-exec pages */
6810+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6811+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6812+ {
6813+ if (address != regs->tpc)
6814+ goto good_area;
6815+
6816+ up_read(&mm->mmap_sem);
6817+ switch (pax_handle_fetch_fault(regs)) {
6818+
6819+#ifdef CONFIG_PAX_EMUPLT
6820+ case 2:
6821+ case 3:
6822+ return;
6823+#endif
6824+
6825+ }
6826+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6827+ do_group_exit(SIGKILL);
6828+ }
6829+#endif
6830+
6831 /* Pure DTLB misses do not tell us whether the fault causing
6832 * load/store/atomic was a write or not, it only says that there
6833 * was no match. So in such a case we (carefully) read the
6834diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6835index f27d103..1b06377 100644
6836--- a/arch/sparc/mm/hugetlbpage.c
6837+++ b/arch/sparc/mm/hugetlbpage.c
6838@@ -69,7 +69,7 @@ full_search:
6839 }
6840 return -ENOMEM;
6841 }
6842- if (likely(!vma || addr + len <= vma->vm_start)) {
6843+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6844 /*
6845 * Remember the place where we stopped the search:
6846 */
6847@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6848 /* make sure it can fit in the remaining address space */
6849 if (likely(addr > len)) {
6850 vma = find_vma(mm, addr-len);
6851- if (!vma || addr <= vma->vm_start) {
6852+ if (check_heap_stack_gap(vma, addr - len, len)) {
6853 /* remember the address as a hint for next time */
6854 return (mm->free_area_cache = addr-len);
6855 }
6856@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6857 if (unlikely(mm->mmap_base < len))
6858 goto bottomup;
6859
6860- addr = (mm->mmap_base-len) & HPAGE_MASK;
6861+ addr = mm->mmap_base - len;
6862
6863 do {
6864+ addr &= HPAGE_MASK;
6865 /*
6866 * Lookup failure means no vma is above this address,
6867 * else if new region fits below vma->vm_start,
6868 * return with success:
6869 */
6870 vma = find_vma(mm, addr);
6871- if (likely(!vma || addr+len <= vma->vm_start)) {
6872+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6873 /* remember the address as a hint for next time */
6874 return (mm->free_area_cache = addr);
6875 }
6876@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6877 mm->cached_hole_size = vma->vm_start - addr;
6878
6879 /* try just below the current vma->vm_start */
6880- addr = (vma->vm_start-len) & HPAGE_MASK;
6881- } while (likely(len < vma->vm_start));
6882+ addr = skip_heap_stack_gap(vma, len);
6883+ } while (!IS_ERR_VALUE(addr));
6884
6885 bottomup:
6886 /*
6887@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6888 if (addr) {
6889 addr = ALIGN(addr, HPAGE_SIZE);
6890 vma = find_vma(mm, addr);
6891- if (task_size - len >= addr &&
6892- (!vma || addr + len <= vma->vm_start))
6893+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6894 return addr;
6895 }
6896 if (mm->get_unmapped_area == arch_get_unmapped_area)
6897diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6898index dc7c3b1..34c0070 100644
6899--- a/arch/sparc/mm/init_32.c
6900+++ b/arch/sparc/mm/init_32.c
6901@@ -317,6 +317,9 @@ extern void device_scan(void);
6902 pgprot_t PAGE_SHARED __read_mostly;
6903 EXPORT_SYMBOL(PAGE_SHARED);
6904
6905+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6906+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6907+
6908 void __init paging_init(void)
6909 {
6910 switch(sparc_cpu_model) {
6911@@ -345,17 +348,17 @@ void __init paging_init(void)
6912
6913 /* Initialize the protection map with non-constant, MMU dependent values. */
6914 protection_map[0] = PAGE_NONE;
6915- protection_map[1] = PAGE_READONLY;
6916- protection_map[2] = PAGE_COPY;
6917- protection_map[3] = PAGE_COPY;
6918+ protection_map[1] = PAGE_READONLY_NOEXEC;
6919+ protection_map[2] = PAGE_COPY_NOEXEC;
6920+ protection_map[3] = PAGE_COPY_NOEXEC;
6921 protection_map[4] = PAGE_READONLY;
6922 protection_map[5] = PAGE_READONLY;
6923 protection_map[6] = PAGE_COPY;
6924 protection_map[7] = PAGE_COPY;
6925 protection_map[8] = PAGE_NONE;
6926- protection_map[9] = PAGE_READONLY;
6927- protection_map[10] = PAGE_SHARED;
6928- protection_map[11] = PAGE_SHARED;
6929+ protection_map[9] = PAGE_READONLY_NOEXEC;
6930+ protection_map[10] = PAGE_SHARED_NOEXEC;
6931+ protection_map[11] = PAGE_SHARED_NOEXEC;
6932 protection_map[12] = PAGE_READONLY;
6933 protection_map[13] = PAGE_READONLY;
6934 protection_map[14] = PAGE_SHARED;
6935diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6936index 509b1ff..bfd7118 100644
6937--- a/arch/sparc/mm/srmmu.c
6938+++ b/arch/sparc/mm/srmmu.c
6939@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6940 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6941 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6942 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6943+
6944+#ifdef CONFIG_PAX_PAGEEXEC
6945+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6946+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6947+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6948+#endif
6949+
6950 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6951 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6952
6953diff --git a/arch/um/Makefile b/arch/um/Makefile
6954index fc633db..5e1a1c2 100644
6955--- a/arch/um/Makefile
6956+++ b/arch/um/Makefile
6957@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6958 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6959 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6960
6961+ifdef CONSTIFY_PLUGIN
6962+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6963+endif
6964+
6965 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6966
6967 #This will adjust *FLAGS accordingly to the platform.
6968diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6969index 6c03acd..a5e0215 100644
6970--- a/arch/um/include/asm/kmap_types.h
6971+++ b/arch/um/include/asm/kmap_types.h
6972@@ -23,6 +23,7 @@ enum km_type {
6973 KM_IRQ1,
6974 KM_SOFTIRQ0,
6975 KM_SOFTIRQ1,
6976+ KM_CLEARPAGE,
6977 KM_TYPE_NR
6978 };
6979
6980diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6981index 4cc9b6c..02e5029 100644
6982--- a/arch/um/include/asm/page.h
6983+++ b/arch/um/include/asm/page.h
6984@@ -14,6 +14,9 @@
6985 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6986 #define PAGE_MASK (~(PAGE_SIZE-1))
6987
6988+#define ktla_ktva(addr) (addr)
6989+#define ktva_ktla(addr) (addr)
6990+
6991 #ifndef __ASSEMBLY__
6992
6993 struct page;
6994diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6995index 4a28a15..654dc2a 100644
6996--- a/arch/um/kernel/process.c
6997+++ b/arch/um/kernel/process.c
6998@@ -393,22 +393,6 @@ int singlestepping(void * t)
6999 return 2;
7000 }
7001
7002-/*
7003- * Only x86 and x86_64 have an arch_align_stack().
7004- * All other arches have "#define arch_align_stack(x) (x)"
7005- * in their asm/system.h
7006- * As this is included in UML from asm-um/system-generic.h,
7007- * we can use it to behave as the subarch does.
7008- */
7009-#ifndef arch_align_stack
7010-unsigned long arch_align_stack(unsigned long sp)
7011-{
7012- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7013- sp -= get_random_int() % 8192;
7014- return sp & ~0xf;
7015-}
7016-#endif
7017-
7018 unsigned long get_wchan(struct task_struct *p)
7019 {
7020 unsigned long stack_page, sp, ip;
7021diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
7022index d1b93c4..ae1b7fd 100644
7023--- a/arch/um/sys-i386/shared/sysdep/system.h
7024+++ b/arch/um/sys-i386/shared/sysdep/system.h
7025@@ -17,7 +17,7 @@
7026 # define AT_VECTOR_SIZE_ARCH 1
7027 #endif
7028
7029-extern unsigned long arch_align_stack(unsigned long sp);
7030+#define arch_align_stack(x) ((x) & ~0xfUL)
7031
7032 void default_idle(void);
7033
7034diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
7035index 857ca0b..9a2669d 100644
7036--- a/arch/um/sys-i386/syscalls.c
7037+++ b/arch/um/sys-i386/syscalls.c
7038@@ -11,6 +11,21 @@
7039 #include "asm/uaccess.h"
7040 #include "asm/unistd.h"
7041
7042+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
7043+{
7044+ unsigned long pax_task_size = TASK_SIZE;
7045+
7046+#ifdef CONFIG_PAX_SEGMEXEC
7047+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
7048+ pax_task_size = SEGMEXEC_TASK_SIZE;
7049+#endif
7050+
7051+ if (len > pax_task_size || addr > pax_task_size - len)
7052+ return -EINVAL;
7053+
7054+ return 0;
7055+}
7056+
7057 /*
7058 * Perform the select(nd, in, out, ex, tv) and mmap() system
7059 * calls. Linux/i386 didn't use to be able to handle more than
7060diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
7061index d1b93c4..ae1b7fd 100644
7062--- a/arch/um/sys-x86_64/shared/sysdep/system.h
7063+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
7064@@ -17,7 +17,7 @@
7065 # define AT_VECTOR_SIZE_ARCH 1
7066 #endif
7067
7068-extern unsigned long arch_align_stack(unsigned long sp);
7069+#define arch_align_stack(x) ((x) & ~0xfUL)
7070
7071 void default_idle(void);
7072
7073diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7074index 73ae02a..f932de5 100644
7075--- a/arch/x86/Kconfig
7076+++ b/arch/x86/Kconfig
7077@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
7078
7079 config X86_32_LAZY_GS
7080 def_bool y
7081- depends on X86_32 && !CC_STACKPROTECTOR
7082+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7083
7084 config KTIME_SCALAR
7085 def_bool X86_32
7086@@ -1008,7 +1008,7 @@ choice
7087
7088 config NOHIGHMEM
7089 bool "off"
7090- depends on !X86_NUMAQ
7091+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7092 ---help---
7093 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7094 However, the address space of 32-bit x86 processors is only 4
7095@@ -1045,7 +1045,7 @@ config NOHIGHMEM
7096
7097 config HIGHMEM4G
7098 bool "4GB"
7099- depends on !X86_NUMAQ
7100+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7101 ---help---
7102 Select this if you have a 32-bit processor and between 1 and 4
7103 gigabytes of physical RAM.
7104@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
7105 hex
7106 default 0xB0000000 if VMSPLIT_3G_OPT
7107 default 0x80000000 if VMSPLIT_2G
7108- default 0x78000000 if VMSPLIT_2G_OPT
7109+ default 0x70000000 if VMSPLIT_2G_OPT
7110 default 0x40000000 if VMSPLIT_1G
7111 default 0xC0000000
7112 depends on X86_32
7113@@ -1460,6 +1460,7 @@ config SECCOMP
7114
7115 config CC_STACKPROTECTOR
7116 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7117+ depends on X86_64 || !PAX_MEMORY_UDEREF
7118 ---help---
7119 This option turns on the -fstack-protector GCC feature. This
7120 feature puts, at the beginning of functions, a canary value on
7121@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
7122 config PHYSICAL_START
7123 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
7124 default "0x1000000"
7125+ range 0x400000 0x40000000
7126 ---help---
7127 This gives the physical address where the kernel is loaded.
7128
7129@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
7130 hex
7131 prompt "Alignment value to which kernel should be aligned" if X86_32
7132 default "0x1000000"
7133+ range 0x400000 0x1000000 if PAX_KERNEXEC
7134 range 0x2000 0x1000000
7135 ---help---
7136 This value puts the alignment restrictions on physical address
7137@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
7138 Say N if you want to disable CPU hotplug.
7139
7140 config COMPAT_VDSO
7141- def_bool y
7142+ def_bool n
7143 prompt "Compat VDSO support"
7144 depends on X86_32 || IA32_EMULATION
7145+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7146 ---help---
7147 Map the 32-bit VDSO to the predictable old-style address too.
7148 ---help---
7149diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7150index 0e566103..1a6b57e 100644
7151--- a/arch/x86/Kconfig.cpu
7152+++ b/arch/x86/Kconfig.cpu
7153@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
7154
7155 config X86_F00F_BUG
7156 def_bool y
7157- depends on M586MMX || M586TSC || M586 || M486 || M386
7158+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7159
7160 config X86_WP_WORKS_OK
7161 def_bool y
7162@@ -360,7 +360,7 @@ config X86_POPAD_OK
7163
7164 config X86_ALIGNMENT_16
7165 def_bool y
7166- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7167+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7168
7169 config X86_INTEL_USERCOPY
7170 def_bool y
7171@@ -406,7 +406,7 @@ config X86_CMPXCHG64
7172 # generates cmov.
7173 config X86_CMOV
7174 def_bool y
7175- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7176+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
7177
7178 config X86_MINIMUM_CPU_FAMILY
7179 int
7180diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7181index d105f29..c928727 100644
7182--- a/arch/x86/Kconfig.debug
7183+++ b/arch/x86/Kconfig.debug
7184@@ -99,7 +99,7 @@ config X86_PTDUMP
7185 config DEBUG_RODATA
7186 bool "Write protect kernel read-only data structures"
7187 default y
7188- depends on DEBUG_KERNEL
7189+ depends on DEBUG_KERNEL && BROKEN
7190 ---help---
7191 Mark the kernel read-only data as write-protected in the pagetables,
7192 in order to catch accidental (and incorrect) writes to such const
7193diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7194index d2d24c9..0f21f8d 100644
7195--- a/arch/x86/Makefile
7196+++ b/arch/x86/Makefile
7197@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
7198 else
7199 BITS := 64
7200 UTS_MACHINE := x86_64
7201+ biarch := $(call cc-option,-m64)
7202 CHECKFLAGS += -D__x86_64__ -m64
7203
7204 KBUILD_AFLAGS += -m64
7205@@ -189,3 +190,12 @@ define archhelp
7206 echo ' FDARGS="..." arguments for the booted kernel'
7207 echo ' FDINITRD=file initrd for the booted kernel'
7208 endef
7209+
7210+define OLD_LD
7211+
7212+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7213+*** Please upgrade your binutils to 2.18 or newer
7214+endef
7215+
7216+archprepare:
7217+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7218diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7219index ec749c2..bbb5319 100644
7220--- a/arch/x86/boot/Makefile
7221+++ b/arch/x86/boot/Makefile
7222@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7223 $(call cc-option, -fno-stack-protector) \
7224 $(call cc-option, -mpreferred-stack-boundary=2)
7225 KBUILD_CFLAGS += $(call cc-option, -m32)
7226+ifdef CONSTIFY_PLUGIN
7227+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7228+endif
7229 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7230 GCOV_PROFILE := n
7231
7232diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7233index 878e4b9..20537ab 100644
7234--- a/arch/x86/boot/bitops.h
7235+++ b/arch/x86/boot/bitops.h
7236@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7237 u8 v;
7238 const u32 *p = (const u32 *)addr;
7239
7240- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7241+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7242 return v;
7243 }
7244
7245@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7246
7247 static inline void set_bit(int nr, void *addr)
7248 {
7249- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7250+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7251 }
7252
7253 #endif /* BOOT_BITOPS_H */
7254diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7255index 98239d2..f40214c 100644
7256--- a/arch/x86/boot/boot.h
7257+++ b/arch/x86/boot/boot.h
7258@@ -82,7 +82,7 @@ static inline void io_delay(void)
7259 static inline u16 ds(void)
7260 {
7261 u16 seg;
7262- asm("movw %%ds,%0" : "=rm" (seg));
7263+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7264 return seg;
7265 }
7266
7267@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7268 static inline int memcmp(const void *s1, const void *s2, size_t len)
7269 {
7270 u8 diff;
7271- asm("repe; cmpsb; setnz %0"
7272+ asm volatile("repe; cmpsb; setnz %0"
7273 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7274 return diff;
7275 }
7276diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7277index f8ed065..5bf5ff3 100644
7278--- a/arch/x86/boot/compressed/Makefile
7279+++ b/arch/x86/boot/compressed/Makefile
7280@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7281 KBUILD_CFLAGS += $(cflags-y)
7282 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7283 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7284+ifdef CONSTIFY_PLUGIN
7285+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7286+endif
7287
7288 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7289 GCOV_PROFILE := n
7290diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7291index f543b70..b60fba8 100644
7292--- a/arch/x86/boot/compressed/head_32.S
7293+++ b/arch/x86/boot/compressed/head_32.S
7294@@ -76,7 +76,7 @@ ENTRY(startup_32)
7295 notl %eax
7296 andl %eax, %ebx
7297 #else
7298- movl $LOAD_PHYSICAL_ADDR, %ebx
7299+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7300 #endif
7301
7302 /* Target address to relocate to for decompression */
7303@@ -149,7 +149,7 @@ relocated:
7304 * and where it was actually loaded.
7305 */
7306 movl %ebp, %ebx
7307- subl $LOAD_PHYSICAL_ADDR, %ebx
7308+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7309 jz 2f /* Nothing to be done if loaded at compiled addr. */
7310 /*
7311 * Process relocations.
7312@@ -157,8 +157,7 @@ relocated:
7313
7314 1: subl $4, %edi
7315 movl (%edi), %ecx
7316- testl %ecx, %ecx
7317- jz 2f
7318+ jecxz 2f
7319 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7320 jmp 1b
7321 2:
7322diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7323index 077e1b6..2c6b13b 100644
7324--- a/arch/x86/boot/compressed/head_64.S
7325+++ b/arch/x86/boot/compressed/head_64.S
7326@@ -91,7 +91,7 @@ ENTRY(startup_32)
7327 notl %eax
7328 andl %eax, %ebx
7329 #else
7330- movl $LOAD_PHYSICAL_ADDR, %ebx
7331+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7332 #endif
7333
7334 /* Target address to relocate to for decompression */
7335@@ -183,7 +183,7 @@ no_longmode:
7336 hlt
7337 jmp 1b
7338
7339-#include "../../kernel/verify_cpu_64.S"
7340+#include "../../kernel/verify_cpu.S"
7341
7342 /*
7343 * Be careful here startup_64 needs to be at a predictable
7344@@ -234,7 +234,7 @@ ENTRY(startup_64)
7345 notq %rax
7346 andq %rax, %rbp
7347 #else
7348- movq $LOAD_PHYSICAL_ADDR, %rbp
7349+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7350 #endif
7351
7352 /* Target address to relocate to for decompression */
7353diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7354index 842b2a3..f00178b 100644
7355--- a/arch/x86/boot/compressed/misc.c
7356+++ b/arch/x86/boot/compressed/misc.c
7357@@ -288,7 +288,7 @@ static void parse_elf(void *output)
7358 case PT_LOAD:
7359 #ifdef CONFIG_RELOCATABLE
7360 dest = output;
7361- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7362+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7363 #else
7364 dest = (void *)(phdr->p_paddr);
7365 #endif
7366@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7367 error("Destination address too large");
7368 #endif
7369 #ifndef CONFIG_RELOCATABLE
7370- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7371+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7372 error("Wrong destination address");
7373 #endif
7374
7375diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7376index bcbd36c..b1754af 100644
7377--- a/arch/x86/boot/compressed/mkpiggy.c
7378+++ b/arch/x86/boot/compressed/mkpiggy.c
7379@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7380
7381 offs = (olen > ilen) ? olen - ilen : 0;
7382 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7383- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7384+ offs += 64*1024; /* Add 64K bytes slack */
7385 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7386
7387 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7388diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7389index bbeb0c3..f5167ab 100644
7390--- a/arch/x86/boot/compressed/relocs.c
7391+++ b/arch/x86/boot/compressed/relocs.c
7392@@ -10,8 +10,11 @@
7393 #define USE_BSD
7394 #include <endian.h>
7395
7396+#include "../../../../include/linux/autoconf.h"
7397+
7398 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7399 static Elf32_Ehdr ehdr;
7400+static Elf32_Phdr *phdr;
7401 static unsigned long reloc_count, reloc_idx;
7402 static unsigned long *relocs;
7403
7404@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7405
7406 static int is_safe_abs_reloc(const char* sym_name)
7407 {
7408- int i;
7409+ unsigned int i;
7410
7411 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7412 if (!strcmp(sym_name, safe_abs_relocs[i]))
7413@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7414 }
7415 }
7416
7417+static void read_phdrs(FILE *fp)
7418+{
7419+ unsigned int i;
7420+
7421+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7422+ if (!phdr) {
7423+ die("Unable to allocate %d program headers\n",
7424+ ehdr.e_phnum);
7425+ }
7426+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7427+ die("Seek to %d failed: %s\n",
7428+ ehdr.e_phoff, strerror(errno));
7429+ }
7430+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7431+ die("Cannot read ELF program headers: %s\n",
7432+ strerror(errno));
7433+ }
7434+ for(i = 0; i < ehdr.e_phnum; i++) {
7435+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7436+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7437+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7438+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7439+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7440+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7441+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7442+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7443+ }
7444+
7445+}
7446+
7447 static void read_shdrs(FILE *fp)
7448 {
7449- int i;
7450+ unsigned int i;
7451 Elf32_Shdr shdr;
7452
7453 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7454@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7455
7456 static void read_strtabs(FILE *fp)
7457 {
7458- int i;
7459+ unsigned int i;
7460 for (i = 0; i < ehdr.e_shnum; i++) {
7461 struct section *sec = &secs[i];
7462 if (sec->shdr.sh_type != SHT_STRTAB) {
7463@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7464
7465 static void read_symtabs(FILE *fp)
7466 {
7467- int i,j;
7468+ unsigned int i,j;
7469 for (i = 0; i < ehdr.e_shnum; i++) {
7470 struct section *sec = &secs[i];
7471 if (sec->shdr.sh_type != SHT_SYMTAB) {
7472@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7473
7474 static void read_relocs(FILE *fp)
7475 {
7476- int i,j;
7477+ unsigned int i,j;
7478+ uint32_t base;
7479+
7480 for (i = 0; i < ehdr.e_shnum; i++) {
7481 struct section *sec = &secs[i];
7482 if (sec->shdr.sh_type != SHT_REL) {
7483@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7484 die("Cannot read symbol table: %s\n",
7485 strerror(errno));
7486 }
7487+ base = 0;
7488+ for (j = 0; j < ehdr.e_phnum; j++) {
7489+ if (phdr[j].p_type != PT_LOAD )
7490+ continue;
7491+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7492+ continue;
7493+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7494+ break;
7495+ }
7496 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7497 Elf32_Rel *rel = &sec->reltab[j];
7498- rel->r_offset = elf32_to_cpu(rel->r_offset);
7499+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7500 rel->r_info = elf32_to_cpu(rel->r_info);
7501 }
7502 }
7503@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7504
7505 static void print_absolute_symbols(void)
7506 {
7507- int i;
7508+ unsigned int i;
7509 printf("Absolute symbols\n");
7510 printf(" Num: Value Size Type Bind Visibility Name\n");
7511 for (i = 0; i < ehdr.e_shnum; i++) {
7512 struct section *sec = &secs[i];
7513 char *sym_strtab;
7514 Elf32_Sym *sh_symtab;
7515- int j;
7516+ unsigned int j;
7517
7518 if (sec->shdr.sh_type != SHT_SYMTAB) {
7519 continue;
7520@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7521
7522 static void print_absolute_relocs(void)
7523 {
7524- int i, printed = 0;
7525+ unsigned int i, printed = 0;
7526
7527 for (i = 0; i < ehdr.e_shnum; i++) {
7528 struct section *sec = &secs[i];
7529 struct section *sec_applies, *sec_symtab;
7530 char *sym_strtab;
7531 Elf32_Sym *sh_symtab;
7532- int j;
7533+ unsigned int j;
7534 if (sec->shdr.sh_type != SHT_REL) {
7535 continue;
7536 }
7537@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7538
7539 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7540 {
7541- int i;
7542+ unsigned int i;
7543 /* Walk through the relocations */
7544 for (i = 0; i < ehdr.e_shnum; i++) {
7545 char *sym_strtab;
7546 Elf32_Sym *sh_symtab;
7547 struct section *sec_applies, *sec_symtab;
7548- int j;
7549+ unsigned int j;
7550 struct section *sec = &secs[i];
7551
7552 if (sec->shdr.sh_type != SHT_REL) {
7553@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7554 if (sym->st_shndx == SHN_ABS) {
7555 continue;
7556 }
7557+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7558+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7559+ continue;
7560+
7561+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7562+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7563+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7564+ continue;
7565+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7566+ continue;
7567+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7568+ continue;
7569+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7570+ continue;
7571+#endif
7572 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7573 /*
7574 * NONE can be ignored and and PC relative
7575@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7576
7577 static void emit_relocs(int as_text)
7578 {
7579- int i;
7580+ unsigned int i;
7581 /* Count how many relocations I have and allocate space for them. */
7582 reloc_count = 0;
7583 walk_relocs(count_reloc);
7584@@ -634,6 +693,7 @@ int main(int argc, char **argv)
7585 fname, strerror(errno));
7586 }
7587 read_ehdr(fp);
7588+ read_phdrs(fp);
7589 read_shdrs(fp);
7590 read_strtabs(fp);
7591 read_symtabs(fp);
7592diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7593index 4d3ff03..e4972ff 100644
7594--- a/arch/x86/boot/cpucheck.c
7595+++ b/arch/x86/boot/cpucheck.c
7596@@ -74,7 +74,7 @@ static int has_fpu(void)
7597 u16 fcw = -1, fsw = -1;
7598 u32 cr0;
7599
7600- asm("movl %%cr0,%0" : "=r" (cr0));
7601+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7602 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7603 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7604 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7605@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7606 {
7607 u32 f0, f1;
7608
7609- asm("pushfl ; "
7610+ asm volatile("pushfl ; "
7611 "pushfl ; "
7612 "popl %0 ; "
7613 "movl %0,%1 ; "
7614@@ -115,7 +115,7 @@ static void get_flags(void)
7615 set_bit(X86_FEATURE_FPU, cpu.flags);
7616
7617 if (has_eflag(X86_EFLAGS_ID)) {
7618- asm("cpuid"
7619+ asm volatile("cpuid"
7620 : "=a" (max_intel_level),
7621 "=b" (cpu_vendor[0]),
7622 "=d" (cpu_vendor[1]),
7623@@ -124,7 +124,7 @@ static void get_flags(void)
7624
7625 if (max_intel_level >= 0x00000001 &&
7626 max_intel_level <= 0x0000ffff) {
7627- asm("cpuid"
7628+ asm volatile("cpuid"
7629 : "=a" (tfms),
7630 "=c" (cpu.flags[4]),
7631 "=d" (cpu.flags[0])
7632@@ -136,7 +136,7 @@ static void get_flags(void)
7633 cpu.model += ((tfms >> 16) & 0xf) << 4;
7634 }
7635
7636- asm("cpuid"
7637+ asm volatile("cpuid"
7638 : "=a" (max_amd_level)
7639 : "a" (0x80000000)
7640 : "ebx", "ecx", "edx");
7641@@ -144,7 +144,7 @@ static void get_flags(void)
7642 if (max_amd_level >= 0x80000001 &&
7643 max_amd_level <= 0x8000ffff) {
7644 u32 eax = 0x80000001;
7645- asm("cpuid"
7646+ asm volatile("cpuid"
7647 : "+a" (eax),
7648 "=c" (cpu.flags[6]),
7649 "=d" (cpu.flags[1])
7650@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7651 u32 ecx = MSR_K7_HWCR;
7652 u32 eax, edx;
7653
7654- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7655+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7656 eax &= ~(1 << 15);
7657- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7658+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7659
7660 get_flags(); /* Make sure it really did something */
7661 err = check_flags();
7662@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7663 u32 ecx = MSR_VIA_FCR;
7664 u32 eax, edx;
7665
7666- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7667+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7668 eax |= (1<<1)|(1<<7);
7669- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7670+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7671
7672 set_bit(X86_FEATURE_CX8, cpu.flags);
7673 err = check_flags();
7674@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7675 u32 eax, edx;
7676 u32 level = 1;
7677
7678- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7679- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7680- asm("cpuid"
7681+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7682+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7683+ asm volatile("cpuid"
7684 : "+a" (level), "=d" (cpu.flags[0])
7685 : : "ecx", "ebx");
7686- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7687+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7688
7689 err = check_flags();
7690 }
7691diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7692index b31cc54..8d69237 100644
7693--- a/arch/x86/boot/header.S
7694+++ b/arch/x86/boot/header.S
7695@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7696 # single linked list of
7697 # struct setup_data
7698
7699-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7700+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7701
7702 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7703 #define VO_INIT_SIZE (VO__end - VO__text)
7704diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7705index cae3feb..ff8ff2a 100644
7706--- a/arch/x86/boot/memory.c
7707+++ b/arch/x86/boot/memory.c
7708@@ -19,7 +19,7 @@
7709
7710 static int detect_memory_e820(void)
7711 {
7712- int count = 0;
7713+ unsigned int count = 0;
7714 struct biosregs ireg, oreg;
7715 struct e820entry *desc = boot_params.e820_map;
7716 static struct e820entry buf; /* static so it is zeroed */
7717diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7718index 11e8c6e..fdbb1ed 100644
7719--- a/arch/x86/boot/video-vesa.c
7720+++ b/arch/x86/boot/video-vesa.c
7721@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7722
7723 boot_params.screen_info.vesapm_seg = oreg.es;
7724 boot_params.screen_info.vesapm_off = oreg.di;
7725+ boot_params.screen_info.vesapm_size = oreg.cx;
7726 }
7727
7728 /*
7729diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7730index d42da38..787cdf3 100644
7731--- a/arch/x86/boot/video.c
7732+++ b/arch/x86/boot/video.c
7733@@ -90,7 +90,7 @@ static void store_mode_params(void)
7734 static unsigned int get_entry(void)
7735 {
7736 char entry_buf[4];
7737- int i, len = 0;
7738+ unsigned int i, len = 0;
7739 int key;
7740 unsigned int v;
7741
7742diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7743index 5b577d5..3c1fed4 100644
7744--- a/arch/x86/crypto/aes-x86_64-asm_64.S
7745+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7746@@ -8,6 +8,8 @@
7747 * including this sentence is retained in full.
7748 */
7749
7750+#include <asm/alternative-asm.h>
7751+
7752 .extern crypto_ft_tab
7753 .extern crypto_it_tab
7754 .extern crypto_fl_tab
7755@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7756 je B192; \
7757 leaq 32(r9),r9;
7758
7759+#define ret pax_force_retaddr 0, 1; ret
7760+
7761 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7762 movq r1,r2; \
7763 movq r3,r4; \
7764diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7765index eb0566e..e3ebad8 100644
7766--- a/arch/x86/crypto/aesni-intel_asm.S
7767+++ b/arch/x86/crypto/aesni-intel_asm.S
7768@@ -16,6 +16,7 @@
7769 */
7770
7771 #include <linux/linkage.h>
7772+#include <asm/alternative-asm.h>
7773
7774 .text
7775
7776@@ -52,6 +53,7 @@ _key_expansion_256a:
7777 pxor %xmm1, %xmm0
7778 movaps %xmm0, (%rcx)
7779 add $0x10, %rcx
7780+ pax_force_retaddr_bts
7781 ret
7782
7783 _key_expansion_192a:
7784@@ -75,6 +77,7 @@ _key_expansion_192a:
7785 shufps $0b01001110, %xmm2, %xmm1
7786 movaps %xmm1, 16(%rcx)
7787 add $0x20, %rcx
7788+ pax_force_retaddr_bts
7789 ret
7790
7791 _key_expansion_192b:
7792@@ -93,6 +96,7 @@ _key_expansion_192b:
7793
7794 movaps %xmm0, (%rcx)
7795 add $0x10, %rcx
7796+ pax_force_retaddr_bts
7797 ret
7798
7799 _key_expansion_256b:
7800@@ -104,6 +108,7 @@ _key_expansion_256b:
7801 pxor %xmm1, %xmm2
7802 movaps %xmm2, (%rcx)
7803 add $0x10, %rcx
7804+ pax_force_retaddr_bts
7805 ret
7806
7807 /*
7808@@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7809 cmp %rcx, %rdi
7810 jb .Ldec_key_loop
7811 xor %rax, %rax
7812+ pax_force_retaddr 0, 1
7813 ret
7814+ENDPROC(aesni_set_key)
7815
7816 /*
7817 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7818@@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7819 movups (INP), STATE # input
7820 call _aesni_enc1
7821 movups STATE, (OUTP) # output
7822+ pax_force_retaddr 0, 1
7823 ret
7824+ENDPROC(aesni_enc)
7825
7826 /*
7827 * _aesni_enc1: internal ABI
7828@@ -319,6 +328,7 @@ _aesni_enc1:
7829 movaps 0x70(TKEYP), KEY
7830 # aesenclast KEY, STATE # last round
7831 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7832+ pax_force_retaddr_bts
7833 ret
7834
7835 /*
7836@@ -482,6 +492,7 @@ _aesni_enc4:
7837 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7838 # aesenclast KEY, STATE4
7839 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7840+ pax_force_retaddr_bts
7841 ret
7842
7843 /*
7844@@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7845 movups (INP), STATE # input
7846 call _aesni_dec1
7847 movups STATE, (OUTP) #output
7848+ pax_force_retaddr 0, 1
7849 ret
7850+ENDPROC(aesni_dec)
7851
7852 /*
7853 * _aesni_dec1: internal ABI
7854@@ -563,6 +576,7 @@ _aesni_dec1:
7855 movaps 0x70(TKEYP), KEY
7856 # aesdeclast KEY, STATE # last round
7857 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7858+ pax_force_retaddr_bts
7859 ret
7860
7861 /*
7862@@ -726,6 +740,7 @@ _aesni_dec4:
7863 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7864 # aesdeclast KEY, STATE4
7865 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7866+ pax_force_retaddr_bts
7867 ret
7868
7869 /*
7870@@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7871 cmp $16, LEN
7872 jge .Lecb_enc_loop1
7873 .Lecb_enc_ret:
7874+ pax_force_retaddr 0, 1
7875 ret
7876+ENDPROC(aesni_ecb_enc)
7877
7878 /*
7879 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7880@@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7881 cmp $16, LEN
7882 jge .Lecb_dec_loop1
7883 .Lecb_dec_ret:
7884+ pax_force_retaddr 0, 1
7885 ret
7886+ENDPROC(aesni_ecb_dec)
7887
7888 /*
7889 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7890@@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7891 jge .Lcbc_enc_loop
7892 movups STATE, (IVP)
7893 .Lcbc_enc_ret:
7894+ pax_force_retaddr 0, 1
7895 ret
7896+ENDPROC(aesni_cbc_enc)
7897
7898 /*
7899 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7900@@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7901 .Lcbc_dec_ret:
7902 movups IV, (IVP)
7903 .Lcbc_dec_just_ret:
7904+ pax_force_retaddr 0, 1
7905 ret
7906+ENDPROC(aesni_cbc_dec)
7907diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7908index 6214a9b..1f4fc9a 100644
7909--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7910+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7911@@ -1,3 +1,5 @@
7912+#include <asm/alternative-asm.h>
7913+
7914 # enter ECRYPT_encrypt_bytes
7915 .text
7916 .p2align 5
7917@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7918 add %r11,%rsp
7919 mov %rdi,%rax
7920 mov %rsi,%rdx
7921+ pax_force_retaddr 0, 1
7922 ret
7923 # bytesatleast65:
7924 ._bytesatleast65:
7925@@ -891,6 +894,7 @@ ECRYPT_keysetup:
7926 add %r11,%rsp
7927 mov %rdi,%rax
7928 mov %rsi,%rdx
7929+ pax_force_retaddr
7930 ret
7931 # enter ECRYPT_ivsetup
7932 .text
7933@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7934 add %r11,%rsp
7935 mov %rdi,%rax
7936 mov %rsi,%rdx
7937+ pax_force_retaddr
7938 ret
7939diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7940index 35974a5..5662ae2 100644
7941--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7942+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7943@@ -21,6 +21,7 @@
7944 .text
7945
7946 #include <asm/asm-offsets.h>
7947+#include <asm/alternative-asm.h>
7948
7949 #define a_offset 0
7950 #define b_offset 4
7951@@ -269,6 +270,7 @@ twofish_enc_blk:
7952
7953 popq R1
7954 movq $1,%rax
7955+ pax_force_retaddr 0, 1
7956 ret
7957
7958 twofish_dec_blk:
7959@@ -321,4 +323,5 @@ twofish_dec_blk:
7960
7961 popq R1
7962 movq $1,%rax
7963+ pax_force_retaddr 0, 1
7964 ret
7965diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7966index 14531ab..a89a0c0 100644
7967--- a/arch/x86/ia32/ia32_aout.c
7968+++ b/arch/x86/ia32/ia32_aout.c
7969@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7970 unsigned long dump_start, dump_size;
7971 struct user32 dump;
7972
7973+ memset(&dump, 0, sizeof(dump));
7974+
7975 fs = get_fs();
7976 set_fs(KERNEL_DS);
7977 has_dumped = 1;
7978@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7979 dump_size = dump.u_ssize << PAGE_SHIFT;
7980 DUMP_WRITE(dump_start, dump_size);
7981 }
7982- /*
7983- * Finally dump the task struct. Not be used by gdb, but
7984- * could be useful
7985- */
7986- set_fs(KERNEL_DS);
7987- DUMP_WRITE(current, sizeof(*current));
7988 end_coredump:
7989 set_fs(fs);
7990 return has_dumped;
7991diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7992index 588a7aa..a3468b0 100644
7993--- a/arch/x86/ia32/ia32_signal.c
7994+++ b/arch/x86/ia32/ia32_signal.c
7995@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7996 }
7997 seg = get_fs();
7998 set_fs(KERNEL_DS);
7999- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8000+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8001 set_fs(seg);
8002 if (ret >= 0 && uoss_ptr) {
8003 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8004@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8005 */
8006 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8007 size_t frame_size,
8008- void **fpstate)
8009+ void __user **fpstate)
8010 {
8011 unsigned long sp;
8012
8013@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8014
8015 if (used_math()) {
8016 sp = sp - sig_xstate_ia32_size;
8017- *fpstate = (struct _fpstate_ia32 *) sp;
8018+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8019 if (save_i387_xstate_ia32(*fpstate) < 0)
8020 return (void __user *) -1L;
8021 }
8022@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8023 sp -= frame_size;
8024 /* Align the stack pointer according to the i386 ABI,
8025 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8026- sp = ((sp + 4) & -16ul) - 4;
8027+ sp = ((sp - 12) & -16ul) - 4;
8028 return (void __user *) sp;
8029 }
8030
8031@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8032 * These are actually not used anymore, but left because some
8033 * gdb versions depend on them as a marker.
8034 */
8035- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8036+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8037 } put_user_catch(err);
8038
8039 if (err)
8040@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8041 0xb8,
8042 __NR_ia32_rt_sigreturn,
8043 0x80cd,
8044- 0,
8045+ 0
8046 };
8047
8048 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8049@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8050
8051 if (ka->sa.sa_flags & SA_RESTORER)
8052 restorer = ka->sa.sa_restorer;
8053+ else if (current->mm->context.vdso)
8054+ /* Return stub is in 32bit vsyscall page */
8055+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8056 else
8057- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8058- rt_sigreturn);
8059+ restorer = &frame->retcode;
8060 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8061
8062 /*
8063 * Not actually used anymore, but left because some gdb
8064 * versions need it.
8065 */
8066- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8067+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8068 } put_user_catch(err);
8069
8070 if (err)
8071diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8072index 4edd8eb..29124b4 100644
8073--- a/arch/x86/ia32/ia32entry.S
8074+++ b/arch/x86/ia32/ia32entry.S
8075@@ -13,7 +13,9 @@
8076 #include <asm/thread_info.h>
8077 #include <asm/segment.h>
8078 #include <asm/irqflags.h>
8079+#include <asm/pgtable.h>
8080 #include <linux/linkage.h>
8081+#include <asm/alternative-asm.h>
8082
8083 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8084 #include <linux/elf-em.h>
8085@@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
8086 ENDPROC(native_irq_enable_sysexit)
8087 #endif
8088
8089+ .macro pax_enter_kernel_user
8090+ pax_set_fptr_mask
8091+#ifdef CONFIG_PAX_MEMORY_UDEREF
8092+ call pax_enter_kernel_user
8093+#endif
8094+ .endm
8095+
8096+ .macro pax_exit_kernel_user
8097+#ifdef CONFIG_PAX_MEMORY_UDEREF
8098+ call pax_exit_kernel_user
8099+#endif
8100+#ifdef CONFIG_PAX_RANDKSTACK
8101+ pushq %rax
8102+ pushq %r11
8103+ call pax_randomize_kstack
8104+ popq %r11
8105+ popq %rax
8106+#endif
8107+ .endm
8108+
8109+.macro pax_erase_kstack
8110+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8111+ call pax_erase_kstack
8112+#endif
8113+.endm
8114+
8115 /*
8116 * 32bit SYSENTER instruction entry.
8117 *
8118@@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
8119 CFI_REGISTER rsp,rbp
8120 SWAPGS_UNSAFE_STACK
8121 movq PER_CPU_VAR(kernel_stack), %rsp
8122- addq $(KERNEL_STACK_OFFSET),%rsp
8123- /*
8124- * No need to follow this irqs on/off section: the syscall
8125- * disabled irqs, here we enable it straight after entry:
8126- */
8127- ENABLE_INTERRUPTS(CLBR_NONE)
8128 movl %ebp,%ebp /* zero extension */
8129 pushq $__USER32_DS
8130 CFI_ADJUST_CFA_OFFSET 8
8131@@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
8132 pushfq
8133 CFI_ADJUST_CFA_OFFSET 8
8134 /*CFI_REL_OFFSET rflags,0*/
8135- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
8136- CFI_REGISTER rip,r10
8137+ orl $X86_EFLAGS_IF,(%rsp)
8138+ GET_THREAD_INFO(%r11)
8139+ movl TI_sysenter_return(%r11), %r11d
8140+ CFI_REGISTER rip,r11
8141 pushq $__USER32_CS
8142 CFI_ADJUST_CFA_OFFSET 8
8143 /*CFI_REL_OFFSET cs,0*/
8144 movl %eax, %eax
8145- pushq %r10
8146+ pushq %r11
8147 CFI_ADJUST_CFA_OFFSET 8
8148 CFI_REL_OFFSET rip,0
8149 pushq %rax
8150 CFI_ADJUST_CFA_OFFSET 8
8151 cld
8152 SAVE_ARGS 0,0,1
8153+ pax_enter_kernel_user
8154+ /*
8155+ * No need to follow this irqs on/off section: the syscall
8156+ * disabled irqs, here we enable it straight after entry:
8157+ */
8158+ ENABLE_INTERRUPTS(CLBR_NONE)
8159 /* no need to do an access_ok check here because rbp has been
8160 32bit zero extended */
8161+
8162+#ifdef CONFIG_PAX_MEMORY_UDEREF
8163+ mov $PAX_USER_SHADOW_BASE,%r11
8164+ add %r11,%rbp
8165+#endif
8166+
8167 1: movl (%rbp),%ebp
8168 .section __ex_table,"a"
8169 .quad 1b,ia32_badarg
8170 .previous
8171- GET_THREAD_INFO(%r10)
8172- orl $TS_COMPAT,TI_status(%r10)
8173- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8174+ GET_THREAD_INFO(%r11)
8175+ orl $TS_COMPAT,TI_status(%r11)
8176+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8177 CFI_REMEMBER_STATE
8178 jnz sysenter_tracesys
8179 cmpq $(IA32_NR_syscalls-1),%rax
8180@@ -166,13 +202,15 @@ sysenter_do_call:
8181 sysenter_dispatch:
8182 call *ia32_sys_call_table(,%rax,8)
8183 movq %rax,RAX-ARGOFFSET(%rsp)
8184- GET_THREAD_INFO(%r10)
8185+ GET_THREAD_INFO(%r11)
8186 DISABLE_INTERRUPTS(CLBR_NONE)
8187 TRACE_IRQS_OFF
8188- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8189+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8190 jnz sysexit_audit
8191 sysexit_from_sys_call:
8192- andl $~TS_COMPAT,TI_status(%r10)
8193+ pax_exit_kernel_user
8194+ pax_erase_kstack
8195+ andl $~TS_COMPAT,TI_status(%r11)
8196 /* clear IF, that popfq doesn't enable interrupts early */
8197 andl $~0x200,EFLAGS-R11(%rsp)
8198 movl RIP-R11(%rsp),%edx /* User %eip */
8199@@ -200,6 +238,9 @@ sysexit_from_sys_call:
8200 movl %eax,%esi /* 2nd arg: syscall number */
8201 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8202 call audit_syscall_entry
8203+
8204+ pax_erase_kstack
8205+
8206 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8207 cmpq $(IA32_NR_syscalls-1),%rax
8208 ja ia32_badsys
8209@@ -211,7 +252,7 @@ sysexit_from_sys_call:
8210 .endm
8211
8212 .macro auditsys_exit exit
8213- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8214+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8215 jnz ia32_ret_from_sys_call
8216 TRACE_IRQS_ON
8217 sti
8218@@ -221,12 +262,12 @@ sysexit_from_sys_call:
8219 movzbl %al,%edi /* zero-extend that into %edi */
8220 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
8221 call audit_syscall_exit
8222- GET_THREAD_INFO(%r10)
8223+ GET_THREAD_INFO(%r11)
8224 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8225 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8226 cli
8227 TRACE_IRQS_OFF
8228- testl %edi,TI_flags(%r10)
8229+ testl %edi,TI_flags(%r11)
8230 jz \exit
8231 CLEAR_RREGS -ARGOFFSET
8232 jmp int_with_check
8233@@ -244,7 +285,7 @@ sysexit_audit:
8234
8235 sysenter_tracesys:
8236 #ifdef CONFIG_AUDITSYSCALL
8237- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8238+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8239 jz sysenter_auditsys
8240 #endif
8241 SAVE_REST
8242@@ -252,6 +293,9 @@ sysenter_tracesys:
8243 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8244 movq %rsp,%rdi /* &pt_regs -> arg1 */
8245 call syscall_trace_enter
8246+
8247+ pax_erase_kstack
8248+
8249 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8250 RESTORE_REST
8251 cmpq $(IA32_NR_syscalls-1),%rax
8252@@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
8253 ENTRY(ia32_cstar_target)
8254 CFI_STARTPROC32 simple
8255 CFI_SIGNAL_FRAME
8256- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8257+ CFI_DEF_CFA rsp,0
8258 CFI_REGISTER rip,rcx
8259 /*CFI_REGISTER rflags,r11*/
8260 SWAPGS_UNSAFE_STACK
8261 movl %esp,%r8d
8262 CFI_REGISTER rsp,r8
8263 movq PER_CPU_VAR(kernel_stack),%rsp
8264+ SAVE_ARGS 8*6,1,1
8265+ pax_enter_kernel_user
8266 /*
8267 * No need to follow this irqs on/off section: the syscall
8268 * disabled irqs and here we enable it straight after entry:
8269 */
8270 ENABLE_INTERRUPTS(CLBR_NONE)
8271- SAVE_ARGS 8,1,1
8272 movl %eax,%eax /* zero extension */
8273 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8274 movq %rcx,RIP-ARGOFFSET(%rsp)
8275@@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
8276 /* no need to do an access_ok check here because r8 has been
8277 32bit zero extended */
8278 /* hardware stack frame is complete now */
8279+
8280+#ifdef CONFIG_PAX_MEMORY_UDEREF
8281+ mov $PAX_USER_SHADOW_BASE,%r11
8282+ add %r11,%r8
8283+#endif
8284+
8285 1: movl (%r8),%r9d
8286 .section __ex_table,"a"
8287 .quad 1b,ia32_badarg
8288 .previous
8289- GET_THREAD_INFO(%r10)
8290- orl $TS_COMPAT,TI_status(%r10)
8291- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8292+ GET_THREAD_INFO(%r11)
8293+ orl $TS_COMPAT,TI_status(%r11)
8294+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8295 CFI_REMEMBER_STATE
8296 jnz cstar_tracesys
8297 cmpq $IA32_NR_syscalls-1,%rax
8298@@ -327,13 +378,15 @@ cstar_do_call:
8299 cstar_dispatch:
8300 call *ia32_sys_call_table(,%rax,8)
8301 movq %rax,RAX-ARGOFFSET(%rsp)
8302- GET_THREAD_INFO(%r10)
8303+ GET_THREAD_INFO(%r11)
8304 DISABLE_INTERRUPTS(CLBR_NONE)
8305 TRACE_IRQS_OFF
8306- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8307+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8308 jnz sysretl_audit
8309 sysretl_from_sys_call:
8310- andl $~TS_COMPAT,TI_status(%r10)
8311+ pax_exit_kernel_user
8312+ pax_erase_kstack
8313+ andl $~TS_COMPAT,TI_status(%r11)
8314 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8315 movl RIP-ARGOFFSET(%rsp),%ecx
8316 CFI_REGISTER rip,rcx
8317@@ -361,7 +414,7 @@ sysretl_audit:
8318
8319 cstar_tracesys:
8320 #ifdef CONFIG_AUDITSYSCALL
8321- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8322+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8323 jz cstar_auditsys
8324 #endif
8325 xchgl %r9d,%ebp
8326@@ -370,6 +423,9 @@ cstar_tracesys:
8327 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8328 movq %rsp,%rdi /* &pt_regs -> arg1 */
8329 call syscall_trace_enter
8330+
8331+ pax_erase_kstack
8332+
8333 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8334 RESTORE_REST
8335 xchgl %ebp,%r9d
8336@@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
8337 CFI_REL_OFFSET rip,RIP-RIP
8338 PARAVIRT_ADJUST_EXCEPTION_FRAME
8339 SWAPGS
8340- /*
8341- * No need to follow this irqs on/off section: the syscall
8342- * disabled irqs and here we enable it straight after entry:
8343- */
8344- ENABLE_INTERRUPTS(CLBR_NONE)
8345 movl %eax,%eax
8346 pushq %rax
8347 CFI_ADJUST_CFA_OFFSET 8
8348@@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
8349 /* note the registers are not zero extended to the sf.
8350 this could be a problem. */
8351 SAVE_ARGS 0,0,1
8352- GET_THREAD_INFO(%r10)
8353- orl $TS_COMPAT,TI_status(%r10)
8354- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8355+ pax_enter_kernel_user
8356+ /*
8357+ * No need to follow this irqs on/off section: the syscall
8358+ * disabled irqs and here we enable it straight after entry:
8359+ */
8360+ ENABLE_INTERRUPTS(CLBR_NONE)
8361+ GET_THREAD_INFO(%r11)
8362+ orl $TS_COMPAT,TI_status(%r11)
8363+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8364 jnz ia32_tracesys
8365 cmpq $(IA32_NR_syscalls-1),%rax
8366 ja ia32_badsys
8367@@ -448,6 +505,9 @@ ia32_tracesys:
8368 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8369 movq %rsp,%rdi /* &pt_regs -> arg1 */
8370 call syscall_trace_enter
8371+
8372+ pax_erase_kstack
8373+
8374 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8375 RESTORE_REST
8376 cmpq $(IA32_NR_syscalls-1),%rax
8377@@ -462,6 +522,7 @@ ia32_badsys:
8378
8379 quiet_ni_syscall:
8380 movq $-ENOSYS,%rax
8381+ pax_force_retaddr
8382 ret
8383 CFI_ENDPROC
8384
8385diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8386index 016218c..47ccbdd 100644
8387--- a/arch/x86/ia32/sys_ia32.c
8388+++ b/arch/x86/ia32/sys_ia32.c
8389@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8390 */
8391 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8392 {
8393- typeof(ubuf->st_uid) uid = 0;
8394- typeof(ubuf->st_gid) gid = 0;
8395+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8396+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8397 SET_UID(uid, stat->uid);
8398 SET_GID(gid, stat->gid);
8399 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8400@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8401 }
8402 set_fs(KERNEL_DS);
8403 ret = sys_rt_sigprocmask(how,
8404- set ? (sigset_t __user *)&s : NULL,
8405- oset ? (sigset_t __user *)&s : NULL,
8406+ set ? (sigset_t __force_user *)&s : NULL,
8407+ oset ? (sigset_t __force_user *)&s : NULL,
8408 sigsetsize);
8409 set_fs(old_fs);
8410 if (ret)
8411@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8412 mm_segment_t old_fs = get_fs();
8413
8414 set_fs(KERNEL_DS);
8415- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8416+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8417 set_fs(old_fs);
8418 if (put_compat_timespec(&t, interval))
8419 return -EFAULT;
8420@@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8421 mm_segment_t old_fs = get_fs();
8422
8423 set_fs(KERNEL_DS);
8424- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8425+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8426 set_fs(old_fs);
8427 if (!ret) {
8428 switch (_NSIG_WORDS) {
8429@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8430 if (copy_siginfo_from_user32(&info, uinfo))
8431 return -EFAULT;
8432 set_fs(KERNEL_DS);
8433- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8434+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8435 set_fs(old_fs);
8436 return ret;
8437 }
8438@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8439 return -EFAULT;
8440
8441 set_fs(KERNEL_DS);
8442- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8443+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8444 count);
8445 set_fs(old_fs);
8446
8447diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8448index e2077d3..17d07ad 100644
8449--- a/arch/x86/include/asm/alternative-asm.h
8450+++ b/arch/x86/include/asm/alternative-asm.h
8451@@ -8,10 +8,10 @@
8452
8453 #ifdef CONFIG_SMP
8454 .macro LOCK_PREFIX
8455-1: lock
8456+672: lock
8457 .section .smp_locks,"a"
8458 .align 4
8459- X86_ALIGN 1b
8460+ X86_ALIGN 672b
8461 .previous
8462 .endm
8463 #else
8464@@ -19,4 +19,43 @@
8465 .endm
8466 #endif
8467
8468+#ifdef KERNEXEC_PLUGIN
8469+ .macro pax_force_retaddr_bts rip=0
8470+ btsq $63,\rip(%rsp)
8471+ .endm
8472+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8473+ .macro pax_force_retaddr rip=0, reload=0
8474+ btsq $63,\rip(%rsp)
8475+ .endm
8476+ .macro pax_force_fptr ptr
8477+ btsq $63,\ptr
8478+ .endm
8479+ .macro pax_set_fptr_mask
8480+ .endm
8481+#endif
8482+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8483+ .macro pax_force_retaddr rip=0, reload=0
8484+ .if \reload
8485+ pax_set_fptr_mask
8486+ .endif
8487+ orq %r10,\rip(%rsp)
8488+ .endm
8489+ .macro pax_force_fptr ptr
8490+ orq %r10,\ptr
8491+ .endm
8492+ .macro pax_set_fptr_mask
8493+ movabs $0x8000000000000000,%r10
8494+ .endm
8495+#endif
8496+#else
8497+ .macro pax_force_retaddr rip=0, reload=0
8498+ .endm
8499+ .macro pax_force_fptr ptr
8500+ .endm
8501+ .macro pax_force_retaddr_bts rip=0
8502+ .endm
8503+ .macro pax_set_fptr_mask
8504+ .endm
8505+#endif
8506+
8507 #endif /* __ASSEMBLY__ */
8508diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8509index c240efc..fdfadf3 100644
8510--- a/arch/x86/include/asm/alternative.h
8511+++ b/arch/x86/include/asm/alternative.h
8512@@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8513 " .byte 662b-661b\n" /* sourcelen */ \
8514 " .byte 664f-663f\n" /* replacementlen */ \
8515 ".previous\n" \
8516- ".section .altinstr_replacement, \"ax\"\n" \
8517+ ".section .altinstr_replacement, \"a\"\n" \
8518 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8519 ".previous"
8520
8521diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8522index 474d80d..1f97d58 100644
8523--- a/arch/x86/include/asm/apic.h
8524+++ b/arch/x86/include/asm/apic.h
8525@@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8526
8527 #ifdef CONFIG_X86_LOCAL_APIC
8528
8529-extern unsigned int apic_verbosity;
8530+extern int apic_verbosity;
8531 extern int local_apic_timer_c2_ok;
8532
8533 extern int disable_apic;
8534diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8535index 20370c6..a2eb9b0 100644
8536--- a/arch/x86/include/asm/apm.h
8537+++ b/arch/x86/include/asm/apm.h
8538@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8539 __asm__ __volatile__(APM_DO_ZERO_SEGS
8540 "pushl %%edi\n\t"
8541 "pushl %%ebp\n\t"
8542- "lcall *%%cs:apm_bios_entry\n\t"
8543+ "lcall *%%ss:apm_bios_entry\n\t"
8544 "setc %%al\n\t"
8545 "popl %%ebp\n\t"
8546 "popl %%edi\n\t"
8547@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8548 __asm__ __volatile__(APM_DO_ZERO_SEGS
8549 "pushl %%edi\n\t"
8550 "pushl %%ebp\n\t"
8551- "lcall *%%cs:apm_bios_entry\n\t"
8552+ "lcall *%%ss:apm_bios_entry\n\t"
8553 "setc %%bl\n\t"
8554 "popl %%ebp\n\t"
8555 "popl %%edi\n\t"
8556diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8557index dc5a667..939040c 100644
8558--- a/arch/x86/include/asm/atomic_32.h
8559+++ b/arch/x86/include/asm/atomic_32.h
8560@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8561 }
8562
8563 /**
8564+ * atomic_read_unchecked - read atomic variable
8565+ * @v: pointer of type atomic_unchecked_t
8566+ *
8567+ * Atomically reads the value of @v.
8568+ */
8569+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8570+{
8571+ return v->counter;
8572+}
8573+
8574+/**
8575 * atomic_set - set atomic variable
8576 * @v: pointer of type atomic_t
8577 * @i: required value
8578@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8579 }
8580
8581 /**
8582+ * atomic_set_unchecked - set atomic variable
8583+ * @v: pointer of type atomic_unchecked_t
8584+ * @i: required value
8585+ *
8586+ * Atomically sets the value of @v to @i.
8587+ */
8588+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8589+{
8590+ v->counter = i;
8591+}
8592+
8593+/**
8594 * atomic_add - add integer to atomic variable
8595 * @i: integer value to add
8596 * @v: pointer of type atomic_t
8597@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8598 */
8599 static inline void atomic_add(int i, atomic_t *v)
8600 {
8601- asm volatile(LOCK_PREFIX "addl %1,%0"
8602+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8603+
8604+#ifdef CONFIG_PAX_REFCOUNT
8605+ "jno 0f\n"
8606+ LOCK_PREFIX "subl %1,%0\n"
8607+ "int $4\n0:\n"
8608+ _ASM_EXTABLE(0b, 0b)
8609+#endif
8610+
8611+ : "+m" (v->counter)
8612+ : "ir" (i));
8613+}
8614+
8615+/**
8616+ * atomic_add_unchecked - add integer to atomic variable
8617+ * @i: integer value to add
8618+ * @v: pointer of type atomic_unchecked_t
8619+ *
8620+ * Atomically adds @i to @v.
8621+ */
8622+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8623+{
8624+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8625 : "+m" (v->counter)
8626 : "ir" (i));
8627 }
8628@@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8629 */
8630 static inline void atomic_sub(int i, atomic_t *v)
8631 {
8632- asm volatile(LOCK_PREFIX "subl %1,%0"
8633+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8634+
8635+#ifdef CONFIG_PAX_REFCOUNT
8636+ "jno 0f\n"
8637+ LOCK_PREFIX "addl %1,%0\n"
8638+ "int $4\n0:\n"
8639+ _ASM_EXTABLE(0b, 0b)
8640+#endif
8641+
8642+ : "+m" (v->counter)
8643+ : "ir" (i));
8644+}
8645+
8646+/**
8647+ * atomic_sub_unchecked - subtract integer from atomic variable
8648+ * @i: integer value to subtract
8649+ * @v: pointer of type atomic_unchecked_t
8650+ *
8651+ * Atomically subtracts @i from @v.
8652+ */
8653+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8654+{
8655+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8656 : "+m" (v->counter)
8657 : "ir" (i));
8658 }
8659@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8660 {
8661 unsigned char c;
8662
8663- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8664+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8665+
8666+#ifdef CONFIG_PAX_REFCOUNT
8667+ "jno 0f\n"
8668+ LOCK_PREFIX "addl %2,%0\n"
8669+ "int $4\n0:\n"
8670+ _ASM_EXTABLE(0b, 0b)
8671+#endif
8672+
8673+ "sete %1\n"
8674 : "+m" (v->counter), "=qm" (c)
8675 : "ir" (i) : "memory");
8676 return c;
8677@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8678 */
8679 static inline void atomic_inc(atomic_t *v)
8680 {
8681- asm volatile(LOCK_PREFIX "incl %0"
8682+ asm volatile(LOCK_PREFIX "incl %0\n"
8683+
8684+#ifdef CONFIG_PAX_REFCOUNT
8685+ "jno 0f\n"
8686+ LOCK_PREFIX "decl %0\n"
8687+ "int $4\n0:\n"
8688+ _ASM_EXTABLE(0b, 0b)
8689+#endif
8690+
8691+ : "+m" (v->counter));
8692+}
8693+
8694+/**
8695+ * atomic_inc_unchecked - increment atomic variable
8696+ * @v: pointer of type atomic_unchecked_t
8697+ *
8698+ * Atomically increments @v by 1.
8699+ */
8700+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8701+{
8702+ asm volatile(LOCK_PREFIX "incl %0\n"
8703 : "+m" (v->counter));
8704 }
8705
8706@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8707 */
8708 static inline void atomic_dec(atomic_t *v)
8709 {
8710- asm volatile(LOCK_PREFIX "decl %0"
8711+ asm volatile(LOCK_PREFIX "decl %0\n"
8712+
8713+#ifdef CONFIG_PAX_REFCOUNT
8714+ "jno 0f\n"
8715+ LOCK_PREFIX "incl %0\n"
8716+ "int $4\n0:\n"
8717+ _ASM_EXTABLE(0b, 0b)
8718+#endif
8719+
8720+ : "+m" (v->counter));
8721+}
8722+
8723+/**
8724+ * atomic_dec_unchecked - decrement atomic variable
8725+ * @v: pointer of type atomic_unchecked_t
8726+ *
8727+ * Atomically decrements @v by 1.
8728+ */
8729+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8730+{
8731+ asm volatile(LOCK_PREFIX "decl %0\n"
8732 : "+m" (v->counter));
8733 }
8734
8735@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8736 {
8737 unsigned char c;
8738
8739- asm volatile(LOCK_PREFIX "decl %0; sete %1"
8740+ asm volatile(LOCK_PREFIX "decl %0\n"
8741+
8742+#ifdef CONFIG_PAX_REFCOUNT
8743+ "jno 0f\n"
8744+ LOCK_PREFIX "incl %0\n"
8745+ "int $4\n0:\n"
8746+ _ASM_EXTABLE(0b, 0b)
8747+#endif
8748+
8749+ "sete %1\n"
8750 : "+m" (v->counter), "=qm" (c)
8751 : : "memory");
8752 return c != 0;
8753@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8754 {
8755 unsigned char c;
8756
8757- asm volatile(LOCK_PREFIX "incl %0; sete %1"
8758+ asm volatile(LOCK_PREFIX "incl %0\n"
8759+
8760+#ifdef CONFIG_PAX_REFCOUNT
8761+ "jno 0f\n"
8762+ LOCK_PREFIX "decl %0\n"
8763+ "into\n0:\n"
8764+ _ASM_EXTABLE(0b, 0b)
8765+#endif
8766+
8767+ "sete %1\n"
8768+ : "+m" (v->counter), "=qm" (c)
8769+ : : "memory");
8770+ return c != 0;
8771+}
8772+
8773+/**
8774+ * atomic_inc_and_test_unchecked - increment and test
8775+ * @v: pointer of type atomic_unchecked_t
8776+ *
8777+ * Atomically increments @v by 1
8778+ * and returns true if the result is zero, or false for all
8779+ * other cases.
8780+ */
8781+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8782+{
8783+ unsigned char c;
8784+
8785+ asm volatile(LOCK_PREFIX "incl %0\n"
8786+ "sete %1\n"
8787 : "+m" (v->counter), "=qm" (c)
8788 : : "memory");
8789 return c != 0;
8790@@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8791 {
8792 unsigned char c;
8793
8794- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8795+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
8796+
8797+#ifdef CONFIG_PAX_REFCOUNT
8798+ "jno 0f\n"
8799+ LOCK_PREFIX "subl %2,%0\n"
8800+ "int $4\n0:\n"
8801+ _ASM_EXTABLE(0b, 0b)
8802+#endif
8803+
8804+ "sets %1\n"
8805 : "+m" (v->counter), "=qm" (c)
8806 : "ir" (i) : "memory");
8807 return c;
8808@@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
8809 #endif
8810 /* Modern 486+ processor */
8811 __i = i;
8812- asm volatile(LOCK_PREFIX "xaddl %0, %1"
8813+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8814+
8815+#ifdef CONFIG_PAX_REFCOUNT
8816+ "jno 0f\n"
8817+ "movl %0, %1\n"
8818+ "int $4\n0:\n"
8819+ _ASM_EXTABLE(0b, 0b)
8820+#endif
8821+
8822 : "+r" (i), "+m" (v->counter)
8823 : : "memory");
8824 return i + __i;
8825@@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
8826 }
8827
8828 /**
8829+ * atomic_add_return_unchecked - add integer and return
8830+ * @v: pointer of type atomic_unchecked_t
8831+ * @i: integer value to add
8832+ *
8833+ * Atomically adds @i to @v and returns @i + @v
8834+ */
8835+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8836+{
8837+ int __i;
8838+#ifdef CONFIG_M386
8839+ unsigned long flags;
8840+ if (unlikely(boot_cpu_data.x86 <= 3))
8841+ goto no_xadd;
8842+#endif
8843+ /* Modern 486+ processor */
8844+ __i = i;
8845+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
8846+ : "+r" (i), "+m" (v->counter)
8847+ : : "memory");
8848+ return i + __i;
8849+
8850+#ifdef CONFIG_M386
8851+no_xadd: /* Legacy 386 processor */
8852+ local_irq_save(flags);
8853+ __i = atomic_read_unchecked(v);
8854+ atomic_set_unchecked(v, i + __i);
8855+ local_irq_restore(flags);
8856+ return i + __i;
8857+#endif
8858+}
8859+
8860+/**
8861 * atomic_sub_return - subtract integer and return
8862 * @v: pointer of type atomic_t
8863 * @i: integer value to subtract
8864@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8865 return cmpxchg(&v->counter, old, new);
8866 }
8867
8868+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8869+{
8870+ return cmpxchg(&v->counter, old, new);
8871+}
8872+
8873 static inline int atomic_xchg(atomic_t *v, int new)
8874 {
8875 return xchg(&v->counter, new);
8876 }
8877
8878+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8879+{
8880+ return xchg(&v->counter, new);
8881+}
8882+
8883 /**
8884 * atomic_add_unless - add unless the number is already a given value
8885 * @v: pointer of type atomic_t
8886@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8887 */
8888 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8889 {
8890- int c, old;
8891+ int c, old, new;
8892 c = atomic_read(v);
8893 for (;;) {
8894- if (unlikely(c == (u)))
8895+ if (unlikely(c == u))
8896 break;
8897- old = atomic_cmpxchg((v), c, c + (a));
8898+
8899+ asm volatile("addl %2,%0\n"
8900+
8901+#ifdef CONFIG_PAX_REFCOUNT
8902+ "jno 0f\n"
8903+ "subl %2,%0\n"
8904+ "int $4\n0:\n"
8905+ _ASM_EXTABLE(0b, 0b)
8906+#endif
8907+
8908+ : "=r" (new)
8909+ : "0" (c), "ir" (a));
8910+
8911+ old = atomic_cmpxchg(v, c, new);
8912 if (likely(old == c))
8913 break;
8914 c = old;
8915 }
8916- return c != (u);
8917+ return c != u;
8918 }
8919
8920 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8921
8922 #define atomic_inc_return(v) (atomic_add_return(1, v))
8923+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8924+{
8925+ return atomic_add_return_unchecked(1, v);
8926+}
8927 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8928
8929 /* These are x86-specific, used by some header files */
8930@@ -266,9 +495,18 @@ typedef struct {
8931 u64 __aligned(8) counter;
8932 } atomic64_t;
8933
8934+#ifdef CONFIG_PAX_REFCOUNT
8935+typedef struct {
8936+ u64 __aligned(8) counter;
8937+} atomic64_unchecked_t;
8938+#else
8939+typedef atomic64_t atomic64_unchecked_t;
8940+#endif
8941+
8942 #define ATOMIC64_INIT(val) { (val) }
8943
8944 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8945+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8946
8947 /**
8948 * atomic64_xchg - xchg atomic64 variable
8949@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8950 * the old value.
8951 */
8952 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8953+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8954
8955 /**
8956 * atomic64_set - set atomic64 variable
8957@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8958 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8959
8960 /**
8961+ * atomic64_unchecked_set - set atomic64 variable
8962+ * @ptr: pointer to type atomic64_unchecked_t
8963+ * @new_val: value to assign
8964+ *
8965+ * Atomically sets the value of @ptr to @new_val.
8966+ */
8967+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8968+
8969+/**
8970 * atomic64_read - read atomic64 variable
8971 * @ptr: pointer to type atomic64_t
8972 *
8973@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8974 return res;
8975 }
8976
8977-extern u64 atomic64_read(atomic64_t *ptr);
8978+/**
8979+ * atomic64_read_unchecked - read atomic64 variable
8980+ * @ptr: pointer to type atomic64_unchecked_t
8981+ *
8982+ * Atomically reads the value of @ptr and returns it.
8983+ */
8984+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8985+{
8986+ u64 res;
8987+
8988+ /*
8989+ * Note, we inline this atomic64_unchecked_t primitive because
8990+ * it only clobbers EAX/EDX and leaves the others
8991+ * untouched. We also (somewhat subtly) rely on the
8992+ * fact that cmpxchg8b returns the current 64-bit value
8993+ * of the memory location we are touching:
8994+ */
8995+ asm volatile(
8996+ "mov %%ebx, %%eax\n\t"
8997+ "mov %%ecx, %%edx\n\t"
8998+ LOCK_PREFIX "cmpxchg8b %1\n"
8999+ : "=&A" (res)
9000+ : "m" (*ptr)
9001+ );
9002+
9003+ return res;
9004+}
9005
9006 /**
9007 * atomic64_add_return - add and return
9008@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
9009 * Other variants with different arithmetic operators:
9010 */
9011 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
9012+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9013 extern u64 atomic64_inc_return(atomic64_t *ptr);
9014+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
9015 extern u64 atomic64_dec_return(atomic64_t *ptr);
9016+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
9017
9018 /**
9019 * atomic64_add - add integer to atomic64 variable
9020@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
9021 extern void atomic64_add(u64 delta, atomic64_t *ptr);
9022
9023 /**
9024+ * atomic64_add_unchecked - add integer to atomic64 variable
9025+ * @delta: integer value to add
9026+ * @ptr: pointer to type atomic64_unchecked_t
9027+ *
9028+ * Atomically adds @delta to @ptr.
9029+ */
9030+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9031+
9032+/**
9033 * atomic64_sub - subtract the atomic64 variable
9034 * @delta: integer value to subtract
9035 * @ptr: pointer to type atomic64_t
9036@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
9037 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
9038
9039 /**
9040+ * atomic64_sub_unchecked - subtract the atomic64 variable
9041+ * @delta: integer value to subtract
9042+ * @ptr: pointer to type atomic64_unchecked_t
9043+ *
9044+ * Atomically subtracts @delta from @ptr.
9045+ */
9046+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9047+
9048+/**
9049 * atomic64_sub_and_test - subtract value from variable and test result
9050 * @delta: integer value to subtract
9051 * @ptr: pointer to type atomic64_t
9052@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
9053 extern void atomic64_inc(atomic64_t *ptr);
9054
9055 /**
9056+ * atomic64_inc_unchecked - increment atomic64 variable
9057+ * @ptr: pointer to type atomic64_unchecked_t
9058+ *
9059+ * Atomically increments @ptr by 1.
9060+ */
9061+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
9062+
9063+/**
9064 * atomic64_dec - decrement atomic64 variable
9065 * @ptr: pointer to type atomic64_t
9066 *
9067@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
9068 extern void atomic64_dec(atomic64_t *ptr);
9069
9070 /**
9071+ * atomic64_dec_unchecked - decrement atomic64 variable
9072+ * @ptr: pointer to type atomic64_unchecked_t
9073+ *
9074+ * Atomically decrements @ptr by 1.
9075+ */
9076+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
9077+
9078+/**
9079 * atomic64_dec_and_test - decrement and test
9080 * @ptr: pointer to type atomic64_t
9081 *
9082diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
9083index d605dc2..fafd7bd 100644
9084--- a/arch/x86/include/asm/atomic_64.h
9085+++ b/arch/x86/include/asm/atomic_64.h
9086@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
9087 }
9088
9089 /**
9090+ * atomic_read_unchecked - read atomic variable
9091+ * @v: pointer of type atomic_unchecked_t
9092+ *
9093+ * Atomically reads the value of @v.
9094+ */
9095+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9096+{
9097+ return v->counter;
9098+}
9099+
9100+/**
9101 * atomic_set - set atomic variable
9102 * @v: pointer of type atomic_t
9103 * @i: required value
9104@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
9105 }
9106
9107 /**
9108+ * atomic_set_unchecked - set atomic variable
9109+ * @v: pointer of type atomic_unchecked_t
9110+ * @i: required value
9111+ *
9112+ * Atomically sets the value of @v to @i.
9113+ */
9114+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9115+{
9116+ v->counter = i;
9117+}
9118+
9119+/**
9120 * atomic_add - add integer to atomic variable
9121 * @i: integer value to add
9122 * @v: pointer of type atomic_t
9123@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
9124 */
9125 static inline void atomic_add(int i, atomic_t *v)
9126 {
9127- asm volatile(LOCK_PREFIX "addl %1,%0"
9128+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9129+
9130+#ifdef CONFIG_PAX_REFCOUNT
9131+ "jno 0f\n"
9132+ LOCK_PREFIX "subl %1,%0\n"
9133+ "int $4\n0:\n"
9134+ _ASM_EXTABLE(0b, 0b)
9135+#endif
9136+
9137+ : "=m" (v->counter)
9138+ : "ir" (i), "m" (v->counter));
9139+}
9140+
9141+/**
9142+ * atomic_add_unchecked - add integer to atomic variable
9143+ * @i: integer value to add
9144+ * @v: pointer of type atomic_unchecked_t
9145+ *
9146+ * Atomically adds @i to @v.
9147+ */
9148+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9149+{
9150+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9151 : "=m" (v->counter)
9152 : "ir" (i), "m" (v->counter));
9153 }
9154@@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
9155 */
9156 static inline void atomic_sub(int i, atomic_t *v)
9157 {
9158- asm volatile(LOCK_PREFIX "subl %1,%0"
9159+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9160+
9161+#ifdef CONFIG_PAX_REFCOUNT
9162+ "jno 0f\n"
9163+ LOCK_PREFIX "addl %1,%0\n"
9164+ "int $4\n0:\n"
9165+ _ASM_EXTABLE(0b, 0b)
9166+#endif
9167+
9168+ : "=m" (v->counter)
9169+ : "ir" (i), "m" (v->counter));
9170+}
9171+
9172+/**
9173+ * atomic_sub_unchecked - subtract the atomic variable
9174+ * @i: integer value to subtract
9175+ * @v: pointer of type atomic_unchecked_t
9176+ *
9177+ * Atomically subtracts @i from @v.
9178+ */
9179+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9180+{
9181+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9182 : "=m" (v->counter)
9183 : "ir" (i), "m" (v->counter));
9184 }
9185@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9186 {
9187 unsigned char c;
9188
9189- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9190+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
9191+
9192+#ifdef CONFIG_PAX_REFCOUNT
9193+ "jno 0f\n"
9194+ LOCK_PREFIX "addl %2,%0\n"
9195+ "int $4\n0:\n"
9196+ _ASM_EXTABLE(0b, 0b)
9197+#endif
9198+
9199+ "sete %1\n"
9200 : "=m" (v->counter), "=qm" (c)
9201 : "ir" (i), "m" (v->counter) : "memory");
9202 return c;
9203@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9204 */
9205 static inline void atomic_inc(atomic_t *v)
9206 {
9207- asm volatile(LOCK_PREFIX "incl %0"
9208+ asm volatile(LOCK_PREFIX "incl %0\n"
9209+
9210+#ifdef CONFIG_PAX_REFCOUNT
9211+ "jno 0f\n"
9212+ LOCK_PREFIX "decl %0\n"
9213+ "int $4\n0:\n"
9214+ _ASM_EXTABLE(0b, 0b)
9215+#endif
9216+
9217+ : "=m" (v->counter)
9218+ : "m" (v->counter));
9219+}
9220+
9221+/**
9222+ * atomic_inc_unchecked - increment atomic variable
9223+ * @v: pointer of type atomic_unchecked_t
9224+ *
9225+ * Atomically increments @v by 1.
9226+ */
9227+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9228+{
9229+ asm volatile(LOCK_PREFIX "incl %0\n"
9230 : "=m" (v->counter)
9231 : "m" (v->counter));
9232 }
9233@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
9234 */
9235 static inline void atomic_dec(atomic_t *v)
9236 {
9237- asm volatile(LOCK_PREFIX "decl %0"
9238+ asm volatile(LOCK_PREFIX "decl %0\n"
9239+
9240+#ifdef CONFIG_PAX_REFCOUNT
9241+ "jno 0f\n"
9242+ LOCK_PREFIX "incl %0\n"
9243+ "int $4\n0:\n"
9244+ _ASM_EXTABLE(0b, 0b)
9245+#endif
9246+
9247+ : "=m" (v->counter)
9248+ : "m" (v->counter));
9249+}
9250+
9251+/**
9252+ * atomic_dec_unchecked - decrement atomic variable
9253+ * @v: pointer of type atomic_unchecked_t
9254+ *
9255+ * Atomically decrements @v by 1.
9256+ */
9257+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9258+{
9259+ asm volatile(LOCK_PREFIX "decl %0\n"
9260 : "=m" (v->counter)
9261 : "m" (v->counter));
9262 }
9263@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9264 {
9265 unsigned char c;
9266
9267- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9268+ asm volatile(LOCK_PREFIX "decl %0\n"
9269+
9270+#ifdef CONFIG_PAX_REFCOUNT
9271+ "jno 0f\n"
9272+ LOCK_PREFIX "incl %0\n"
9273+ "int $4\n0:\n"
9274+ _ASM_EXTABLE(0b, 0b)
9275+#endif
9276+
9277+ "sete %1\n"
9278 : "=m" (v->counter), "=qm" (c)
9279 : "m" (v->counter) : "memory");
9280 return c != 0;
9281@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9282 {
9283 unsigned char c;
9284
9285- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9286+ asm volatile(LOCK_PREFIX "incl %0\n"
9287+
9288+#ifdef CONFIG_PAX_REFCOUNT
9289+ "jno 0f\n"
9290+ LOCK_PREFIX "decl %0\n"
9291+ "int $4\n0:\n"
9292+ _ASM_EXTABLE(0b, 0b)
9293+#endif
9294+
9295+ "sete %1\n"
9296+ : "=m" (v->counter), "=qm" (c)
9297+ : "m" (v->counter) : "memory");
9298+ return c != 0;
9299+}
9300+
9301+/**
9302+ * atomic_inc_and_test_unchecked - increment and test
9303+ * @v: pointer of type atomic_unchecked_t
9304+ *
9305+ * Atomically increments @v by 1
9306+ * and returns true if the result is zero, or false for all
9307+ * other cases.
9308+ */
9309+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9310+{
9311+ unsigned char c;
9312+
9313+ asm volatile(LOCK_PREFIX "incl %0\n"
9314+ "sete %1\n"
9315 : "=m" (v->counter), "=qm" (c)
9316 : "m" (v->counter) : "memory");
9317 return c != 0;
9318@@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9319 {
9320 unsigned char c;
9321
9322- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9323+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9324+
9325+#ifdef CONFIG_PAX_REFCOUNT
9326+ "jno 0f\n"
9327+ LOCK_PREFIX "subl %2,%0\n"
9328+ "int $4\n0:\n"
9329+ _ASM_EXTABLE(0b, 0b)
9330+#endif
9331+
9332+ "sets %1\n"
9333 : "=m" (v->counter), "=qm" (c)
9334 : "ir" (i), "m" (v->counter) : "memory");
9335 return c;
9336@@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9337 static inline int atomic_add_return(int i, atomic_t *v)
9338 {
9339 int __i = i;
9340- asm volatile(LOCK_PREFIX "xaddl %0, %1"
9341+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9342+
9343+#ifdef CONFIG_PAX_REFCOUNT
9344+ "jno 0f\n"
9345+ "movl %0, %1\n"
9346+ "int $4\n0:\n"
9347+ _ASM_EXTABLE(0b, 0b)
9348+#endif
9349+
9350+ : "+r" (i), "+m" (v->counter)
9351+ : : "memory");
9352+ return i + __i;
9353+}
9354+
9355+/**
9356+ * atomic_add_return_unchecked - add and return
9357+ * @i: integer value to add
9358+ * @v: pointer of type atomic_unchecked_t
9359+ *
9360+ * Atomically adds @i to @v and returns @i + @v
9361+ */
9362+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9363+{
9364+ int __i = i;
9365+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9366 : "+r" (i), "+m" (v->counter)
9367 : : "memory");
9368 return i + __i;
9369@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9370 }
9371
9372 #define atomic_inc_return(v) (atomic_add_return(1, v))
9373+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9374+{
9375+ return atomic_add_return_unchecked(1, v);
9376+}
9377 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9378
9379 /* The 64-bit atomic type */
9380@@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9381 }
9382
9383 /**
9384+ * atomic64_read_unchecked - read atomic64 variable
9385+ * @v: pointer of type atomic64_unchecked_t
9386+ *
9387+ * Atomically reads the value of @v.
9388+ * Doesn't imply a read memory barrier.
9389+ */
9390+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9391+{
9392+ return v->counter;
9393+}
9394+
9395+/**
9396 * atomic64_set - set atomic64 variable
9397 * @v: pointer to type atomic64_t
9398 * @i: required value
9399@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9400 }
9401
9402 /**
9403+ * atomic64_set_unchecked - set atomic64 variable
9404+ * @v: pointer to type atomic64_unchecked_t
9405+ * @i: required value
9406+ *
9407+ * Atomically sets the value of @v to @i.
9408+ */
9409+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9410+{
9411+ v->counter = i;
9412+}
9413+
9414+/**
9415 * atomic64_add - add integer to atomic64 variable
9416 * @i: integer value to add
9417 * @v: pointer to type atomic64_t
9418@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9419 */
9420 static inline void atomic64_add(long i, atomic64_t *v)
9421 {
9422+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9423+
9424+#ifdef CONFIG_PAX_REFCOUNT
9425+ "jno 0f\n"
9426+ LOCK_PREFIX "subq %1,%0\n"
9427+ "int $4\n0:\n"
9428+ _ASM_EXTABLE(0b, 0b)
9429+#endif
9430+
9431+ : "=m" (v->counter)
9432+ : "er" (i), "m" (v->counter));
9433+}
9434+
9435+/**
9436+ * atomic64_add_unchecked - add integer to atomic64 variable
9437+ * @i: integer value to add
9438+ * @v: pointer to type atomic64_unchecked_t
9439+ *
9440+ * Atomically adds @i to @v.
9441+ */
9442+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9443+{
9444 asm volatile(LOCK_PREFIX "addq %1,%0"
9445 : "=m" (v->counter)
9446 : "er" (i), "m" (v->counter));
9447@@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9448 */
9449 static inline void atomic64_sub(long i, atomic64_t *v)
9450 {
9451- asm volatile(LOCK_PREFIX "subq %1,%0"
9452+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9453+
9454+#ifdef CONFIG_PAX_REFCOUNT
9455+ "jno 0f\n"
9456+ LOCK_PREFIX "addq %1,%0\n"
9457+ "int $4\n0:\n"
9458+ _ASM_EXTABLE(0b, 0b)
9459+#endif
9460+
9461 : "=m" (v->counter)
9462 : "er" (i), "m" (v->counter));
9463 }
9464@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9465 {
9466 unsigned char c;
9467
9468- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9469+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9470+
9471+#ifdef CONFIG_PAX_REFCOUNT
9472+ "jno 0f\n"
9473+ LOCK_PREFIX "addq %2,%0\n"
9474+ "int $4\n0:\n"
9475+ _ASM_EXTABLE(0b, 0b)
9476+#endif
9477+
9478+ "sete %1\n"
9479 : "=m" (v->counter), "=qm" (c)
9480 : "er" (i), "m" (v->counter) : "memory");
9481 return c;
9482@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9483 */
9484 static inline void atomic64_inc(atomic64_t *v)
9485 {
9486+ asm volatile(LOCK_PREFIX "incq %0\n"
9487+
9488+#ifdef CONFIG_PAX_REFCOUNT
9489+ "jno 0f\n"
9490+ LOCK_PREFIX "decq %0\n"
9491+ "int $4\n0:\n"
9492+ _ASM_EXTABLE(0b, 0b)
9493+#endif
9494+
9495+ : "=m" (v->counter)
9496+ : "m" (v->counter));
9497+}
9498+
9499+/**
9500+ * atomic64_inc_unchecked - increment atomic64 variable
9501+ * @v: pointer to type atomic64_unchecked_t
9502+ *
9503+ * Atomically increments @v by 1.
9504+ */
9505+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9506+{
9507 asm volatile(LOCK_PREFIX "incq %0"
9508 : "=m" (v->counter)
9509 : "m" (v->counter));
9510@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9511 */
9512 static inline void atomic64_dec(atomic64_t *v)
9513 {
9514- asm volatile(LOCK_PREFIX "decq %0"
9515+ asm volatile(LOCK_PREFIX "decq %0\n"
9516+
9517+#ifdef CONFIG_PAX_REFCOUNT
9518+ "jno 0f\n"
9519+ LOCK_PREFIX "incq %0\n"
9520+ "int $4\n0:\n"
9521+ _ASM_EXTABLE(0b, 0b)
9522+#endif
9523+
9524+ : "=m" (v->counter)
9525+ : "m" (v->counter));
9526+}
9527+
9528+/**
9529+ * atomic64_dec_unchecked - decrement atomic64 variable
9530+ * @v: pointer to type atomic64_t
9531+ *
9532+ * Atomically decrements @v by 1.
9533+ */
9534+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9535+{
9536+ asm volatile(LOCK_PREFIX "decq %0\n"
9537 : "=m" (v->counter)
9538 : "m" (v->counter));
9539 }
9540@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9541 {
9542 unsigned char c;
9543
9544- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9545+ asm volatile(LOCK_PREFIX "decq %0\n"
9546+
9547+#ifdef CONFIG_PAX_REFCOUNT
9548+ "jno 0f\n"
9549+ LOCK_PREFIX "incq %0\n"
9550+ "int $4\n0:\n"
9551+ _ASM_EXTABLE(0b, 0b)
9552+#endif
9553+
9554+ "sete %1\n"
9555 : "=m" (v->counter), "=qm" (c)
9556 : "m" (v->counter) : "memory");
9557 return c != 0;
9558@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9559 {
9560 unsigned char c;
9561
9562- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9563+ asm volatile(LOCK_PREFIX "incq %0\n"
9564+
9565+#ifdef CONFIG_PAX_REFCOUNT
9566+ "jno 0f\n"
9567+ LOCK_PREFIX "decq %0\n"
9568+ "int $4\n0:\n"
9569+ _ASM_EXTABLE(0b, 0b)
9570+#endif
9571+
9572+ "sete %1\n"
9573 : "=m" (v->counter), "=qm" (c)
9574 : "m" (v->counter) : "memory");
9575 return c != 0;
9576@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9577 {
9578 unsigned char c;
9579
9580- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9581+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9582+
9583+#ifdef CONFIG_PAX_REFCOUNT
9584+ "jno 0f\n"
9585+ LOCK_PREFIX "subq %2,%0\n"
9586+ "int $4\n0:\n"
9587+ _ASM_EXTABLE(0b, 0b)
9588+#endif
9589+
9590+ "sets %1\n"
9591 : "=m" (v->counter), "=qm" (c)
9592 : "er" (i), "m" (v->counter) : "memory");
9593 return c;
9594@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9595 static inline long atomic64_add_return(long i, atomic64_t *v)
9596 {
9597 long __i = i;
9598- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9599+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9600+
9601+#ifdef CONFIG_PAX_REFCOUNT
9602+ "jno 0f\n"
9603+ "movq %0, %1\n"
9604+ "int $4\n0:\n"
9605+ _ASM_EXTABLE(0b, 0b)
9606+#endif
9607+
9608+ : "+r" (i), "+m" (v->counter)
9609+ : : "memory");
9610+ return i + __i;
9611+}
9612+
9613+/**
9614+ * atomic64_add_return_unchecked - add and return
9615+ * @i: integer value to add
9616+ * @v: pointer to type atomic64_unchecked_t
9617+ *
9618+ * Atomically adds @i to @v and returns @i + @v
9619+ */
9620+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9621+{
9622+ long __i = i;
9623+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
9624 : "+r" (i), "+m" (v->counter)
9625 : : "memory");
9626 return i + __i;
9627@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9628 }
9629
9630 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9631+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9632+{
9633+ return atomic64_add_return_unchecked(1, v);
9634+}
9635 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9636
9637 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9638@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9639 return cmpxchg(&v->counter, old, new);
9640 }
9641
9642+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9643+{
9644+ return cmpxchg(&v->counter, old, new);
9645+}
9646+
9647 static inline long atomic64_xchg(atomic64_t *v, long new)
9648 {
9649 return xchg(&v->counter, new);
9650 }
9651
9652+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9653+{
9654+ return xchg(&v->counter, new);
9655+}
9656+
9657 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9658 {
9659 return cmpxchg(&v->counter, old, new);
9660 }
9661
9662+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9663+{
9664+ return cmpxchg(&v->counter, old, new);
9665+}
9666+
9667 static inline long atomic_xchg(atomic_t *v, int new)
9668 {
9669 return xchg(&v->counter, new);
9670 }
9671
9672+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9673+{
9674+ return xchg(&v->counter, new);
9675+}
9676+
9677 /**
9678 * atomic_add_unless - add unless the number is a given value
9679 * @v: pointer of type atomic_t
9680@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9681 */
9682 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9683 {
9684- int c, old;
9685+ int c, old, new;
9686 c = atomic_read(v);
9687 for (;;) {
9688- if (unlikely(c == (u)))
9689+ if (unlikely(c == u))
9690 break;
9691- old = atomic_cmpxchg((v), c, c + (a));
9692+
9693+ asm volatile("addl %2,%0\n"
9694+
9695+#ifdef CONFIG_PAX_REFCOUNT
9696+ "jno 0f\n"
9697+ "subl %2,%0\n"
9698+ "int $4\n0:\n"
9699+ _ASM_EXTABLE(0b, 0b)
9700+#endif
9701+
9702+ : "=r" (new)
9703+ : "0" (c), "ir" (a));
9704+
9705+ old = atomic_cmpxchg(v, c, new);
9706 if (likely(old == c))
9707 break;
9708 c = old;
9709 }
9710- return c != (u);
9711+ return c != u;
9712 }
9713
9714 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9715@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9716 */
9717 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9718 {
9719- long c, old;
9720+ long c, old, new;
9721 c = atomic64_read(v);
9722 for (;;) {
9723- if (unlikely(c == (u)))
9724+ if (unlikely(c == u))
9725 break;
9726- old = atomic64_cmpxchg((v), c, c + (a));
9727+
9728+ asm volatile("addq %2,%0\n"
9729+
9730+#ifdef CONFIG_PAX_REFCOUNT
9731+ "jno 0f\n"
9732+ "subq %2,%0\n"
9733+ "int $4\n0:\n"
9734+ _ASM_EXTABLE(0b, 0b)
9735+#endif
9736+
9737+ : "=r" (new)
9738+ : "0" (c), "er" (a));
9739+
9740+ old = atomic64_cmpxchg(v, c, new);
9741 if (likely(old == c))
9742 break;
9743 c = old;
9744 }
9745- return c != (u);
9746+ return c != u;
9747 }
9748
9749 /**
9750diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9751index 02b47a6..d5c4b15 100644
9752--- a/arch/x86/include/asm/bitops.h
9753+++ b/arch/x86/include/asm/bitops.h
9754@@ -38,7 +38,7 @@
9755 * a mask operation on a byte.
9756 */
9757 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9758-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9759+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9760 #define CONST_MASK(nr) (1 << ((nr) & 7))
9761
9762 /**
9763diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9764index 7a10659..8bbf355 100644
9765--- a/arch/x86/include/asm/boot.h
9766+++ b/arch/x86/include/asm/boot.h
9767@@ -11,10 +11,15 @@
9768 #include <asm/pgtable_types.h>
9769
9770 /* Physical address where kernel should be loaded. */
9771-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9772+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9773 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9774 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9775
9776+#ifndef __ASSEMBLY__
9777+extern unsigned char __LOAD_PHYSICAL_ADDR[];
9778+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9779+#endif
9780+
9781 /* Minimum kernel alignment, as a power of two */
9782 #ifdef CONFIG_X86_64
9783 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9784diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9785index 549860d..7d45f68 100644
9786--- a/arch/x86/include/asm/cache.h
9787+++ b/arch/x86/include/asm/cache.h
9788@@ -5,9 +5,10 @@
9789
9790 /* L1 cache line size */
9791 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9792-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9793+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9794
9795 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9796+#define __read_only __attribute__((__section__(".data.read_only")))
9797
9798 #ifdef CONFIG_X86_VSMP
9799 /* vSMP Internode cacheline shift */
9800diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9801index b54f6af..5b376a6 100644
9802--- a/arch/x86/include/asm/cacheflush.h
9803+++ b/arch/x86/include/asm/cacheflush.h
9804@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9805 static inline unsigned long get_page_memtype(struct page *pg)
9806 {
9807 if (!PageUncached(pg) && !PageWC(pg))
9808- return -1;
9809+ return ~0UL;
9810 else if (!PageUncached(pg) && PageWC(pg))
9811 return _PAGE_CACHE_WC;
9812 else if (PageUncached(pg) && !PageWC(pg))
9813@@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9814 SetPageWC(pg);
9815 break;
9816 default:
9817- case -1:
9818+ case ~0UL:
9819 ClearPageUncached(pg);
9820 ClearPageWC(pg);
9821 break;
9822diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9823index 0e63c9a..ab8d972 100644
9824--- a/arch/x86/include/asm/calling.h
9825+++ b/arch/x86/include/asm/calling.h
9826@@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9827 * for assembly code:
9828 */
9829
9830-#define R15 0
9831-#define R14 8
9832-#define R13 16
9833-#define R12 24
9834-#define RBP 32
9835-#define RBX 40
9836+#define R15 (0)
9837+#define R14 (8)
9838+#define R13 (16)
9839+#define R12 (24)
9840+#define RBP (32)
9841+#define RBX (40)
9842
9843 /* arguments: interrupts/non tracing syscalls only save up to here: */
9844-#define R11 48
9845-#define R10 56
9846-#define R9 64
9847-#define R8 72
9848-#define RAX 80
9849-#define RCX 88
9850-#define RDX 96
9851-#define RSI 104
9852-#define RDI 112
9853-#define ORIG_RAX 120 /* + error_code */
9854+#define R11 (48)
9855+#define R10 (56)
9856+#define R9 (64)
9857+#define R8 (72)
9858+#define RAX (80)
9859+#define RCX (88)
9860+#define RDX (96)
9861+#define RSI (104)
9862+#define RDI (112)
9863+#define ORIG_RAX (120) /* + error_code */
9864 /* end of arguments */
9865
9866 /* cpu exception frame or undefined in case of fast syscall: */
9867-#define RIP 128
9868-#define CS 136
9869-#define EFLAGS 144
9870-#define RSP 152
9871-#define SS 160
9872+#define RIP (128)
9873+#define CS (136)
9874+#define EFLAGS (144)
9875+#define RSP (152)
9876+#define SS (160)
9877
9878 #define ARGOFFSET R11
9879 #define SWFRAME ORIG_RAX
9880diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9881index 46fc474..b02b0f9 100644
9882--- a/arch/x86/include/asm/checksum_32.h
9883+++ b/arch/x86/include/asm/checksum_32.h
9884@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9885 int len, __wsum sum,
9886 int *src_err_ptr, int *dst_err_ptr);
9887
9888+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9889+ int len, __wsum sum,
9890+ int *src_err_ptr, int *dst_err_ptr);
9891+
9892+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9893+ int len, __wsum sum,
9894+ int *src_err_ptr, int *dst_err_ptr);
9895+
9896 /*
9897 * Note: when you get a NULL pointer exception here this means someone
9898 * passed in an incorrect kernel address to one of these functions.
9899@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9900 int *err_ptr)
9901 {
9902 might_sleep();
9903- return csum_partial_copy_generic((__force void *)src, dst,
9904+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
9905 len, sum, err_ptr, NULL);
9906 }
9907
9908@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9909 {
9910 might_sleep();
9911 if (access_ok(VERIFY_WRITE, dst, len))
9912- return csum_partial_copy_generic(src, (__force void *)dst,
9913+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9914 len, sum, NULL, err_ptr);
9915
9916 if (len)
9917diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9918index 617bd56..7b047a1 100644
9919--- a/arch/x86/include/asm/desc.h
9920+++ b/arch/x86/include/asm/desc.h
9921@@ -4,6 +4,7 @@
9922 #include <asm/desc_defs.h>
9923 #include <asm/ldt.h>
9924 #include <asm/mmu.h>
9925+#include <asm/pgtable.h>
9926 #include <linux/smp.h>
9927
9928 static inline void fill_ldt(struct desc_struct *desc,
9929@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9930 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9931 desc->type = (info->read_exec_only ^ 1) << 1;
9932 desc->type |= info->contents << 2;
9933+ desc->type |= info->seg_not_present ^ 1;
9934 desc->s = 1;
9935 desc->dpl = 0x3;
9936 desc->p = info->seg_not_present ^ 1;
9937@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9938 }
9939
9940 extern struct desc_ptr idt_descr;
9941-extern gate_desc idt_table[];
9942-
9943-struct gdt_page {
9944- struct desc_struct gdt[GDT_ENTRIES];
9945-} __attribute__((aligned(PAGE_SIZE)));
9946-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9947+extern gate_desc idt_table[256];
9948
9949+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9950 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9951 {
9952- return per_cpu(gdt_page, cpu).gdt;
9953+ return cpu_gdt_table[cpu];
9954 }
9955
9956 #ifdef CONFIG_X86_64
9957@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9958 unsigned long base, unsigned dpl, unsigned flags,
9959 unsigned short seg)
9960 {
9961- gate->a = (seg << 16) | (base & 0xffff);
9962- gate->b = (base & 0xffff0000) |
9963- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9964+ gate->gate.offset_low = base;
9965+ gate->gate.seg = seg;
9966+ gate->gate.reserved = 0;
9967+ gate->gate.type = type;
9968+ gate->gate.s = 0;
9969+ gate->gate.dpl = dpl;
9970+ gate->gate.p = 1;
9971+ gate->gate.offset_high = base >> 16;
9972 }
9973
9974 #endif
9975@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9976 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9977 const gate_desc *gate)
9978 {
9979+ pax_open_kernel();
9980 memcpy(&idt[entry], gate, sizeof(*gate));
9981+ pax_close_kernel();
9982 }
9983
9984 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9985 const void *desc)
9986 {
9987+ pax_open_kernel();
9988 memcpy(&ldt[entry], desc, 8);
9989+ pax_close_kernel();
9990 }
9991
9992 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9993@@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9994 size = sizeof(struct desc_struct);
9995 break;
9996 }
9997+
9998+ pax_open_kernel();
9999 memcpy(&gdt[entry], desc, size);
10000+ pax_close_kernel();
10001 }
10002
10003 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10004@@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10005
10006 static inline void native_load_tr_desc(void)
10007 {
10008+ pax_open_kernel();
10009 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10010+ pax_close_kernel();
10011 }
10012
10013 static inline void native_load_gdt(const struct desc_ptr *dtr)
10014@@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10015 unsigned int i;
10016 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10017
10018+ pax_open_kernel();
10019 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10020 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10021+ pax_close_kernel();
10022 }
10023
10024 #define _LDT_empty(info) \
10025@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10026 desc->limit = (limit >> 16) & 0xf;
10027 }
10028
10029-static inline void _set_gate(int gate, unsigned type, void *addr,
10030+static inline void _set_gate(int gate, unsigned type, const void *addr,
10031 unsigned dpl, unsigned ist, unsigned seg)
10032 {
10033 gate_desc s;
10034@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10035 * Pentium F0 0F bugfix can have resulted in the mapped
10036 * IDT being write-protected.
10037 */
10038-static inline void set_intr_gate(unsigned int n, void *addr)
10039+static inline void set_intr_gate(unsigned int n, const void *addr)
10040 {
10041 BUG_ON((unsigned)n > 0xFF);
10042 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10043@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10044 /*
10045 * This routine sets up an interrupt gate at directory privilege level 3.
10046 */
10047-static inline void set_system_intr_gate(unsigned int n, void *addr)
10048+static inline void set_system_intr_gate(unsigned int n, const void *addr)
10049 {
10050 BUG_ON((unsigned)n > 0xFF);
10051 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10052 }
10053
10054-static inline void set_system_trap_gate(unsigned int n, void *addr)
10055+static inline void set_system_trap_gate(unsigned int n, const void *addr)
10056 {
10057 BUG_ON((unsigned)n > 0xFF);
10058 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10059 }
10060
10061-static inline void set_trap_gate(unsigned int n, void *addr)
10062+static inline void set_trap_gate(unsigned int n, const void *addr)
10063 {
10064 BUG_ON((unsigned)n > 0xFF);
10065 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10066@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10067 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10068 {
10069 BUG_ON((unsigned)n > 0xFF);
10070- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10071+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10072 }
10073
10074-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10075+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10076 {
10077 BUG_ON((unsigned)n > 0xFF);
10078 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10079 }
10080
10081-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10082+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10083 {
10084 BUG_ON((unsigned)n > 0xFF);
10085 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10086 }
10087
10088+#ifdef CONFIG_X86_32
10089+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10090+{
10091+ struct desc_struct d;
10092+
10093+ if (likely(limit))
10094+ limit = (limit - 1UL) >> PAGE_SHIFT;
10095+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
10096+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10097+}
10098+#endif
10099+
10100 #endif /* _ASM_X86_DESC_H */
10101diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10102index 9d66848..6b4a691 100644
10103--- a/arch/x86/include/asm/desc_defs.h
10104+++ b/arch/x86/include/asm/desc_defs.h
10105@@ -31,6 +31,12 @@ struct desc_struct {
10106 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10107 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10108 };
10109+ struct {
10110+ u16 offset_low;
10111+ u16 seg;
10112+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10113+ unsigned offset_high: 16;
10114+ } gate;
10115 };
10116 } __attribute__((packed));
10117
10118diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
10119index cee34e9..a7c3fa2 100644
10120--- a/arch/x86/include/asm/device.h
10121+++ b/arch/x86/include/asm/device.h
10122@@ -6,7 +6,7 @@ struct dev_archdata {
10123 void *acpi_handle;
10124 #endif
10125 #ifdef CONFIG_X86_64
10126-struct dma_map_ops *dma_ops;
10127+ const struct dma_map_ops *dma_ops;
10128 #endif
10129 #ifdef CONFIG_DMAR
10130 void *iommu; /* hook for IOMMU specific extension */
10131diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
10132index 6a25d5d..786b202 100644
10133--- a/arch/x86/include/asm/dma-mapping.h
10134+++ b/arch/x86/include/asm/dma-mapping.h
10135@@ -25,9 +25,9 @@ extern int iommu_merge;
10136 extern struct device x86_dma_fallback_dev;
10137 extern int panic_on_overflow;
10138
10139-extern struct dma_map_ops *dma_ops;
10140+extern const struct dma_map_ops *dma_ops;
10141
10142-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10143+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
10144 {
10145 #ifdef CONFIG_X86_32
10146 return dma_ops;
10147@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
10148 /* Make sure we keep the same behaviour */
10149 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
10150 {
10151- struct dma_map_ops *ops = get_dma_ops(dev);
10152+ const struct dma_map_ops *ops = get_dma_ops(dev);
10153 if (ops->mapping_error)
10154 return ops->mapping_error(dev, dma_addr);
10155
10156@@ -122,7 +122,7 @@ static inline void *
10157 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10158 gfp_t gfp)
10159 {
10160- struct dma_map_ops *ops = get_dma_ops(dev);
10161+ const struct dma_map_ops *ops = get_dma_ops(dev);
10162 void *memory;
10163
10164 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
10165@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
10166 static inline void dma_free_coherent(struct device *dev, size_t size,
10167 void *vaddr, dma_addr_t bus)
10168 {
10169- struct dma_map_ops *ops = get_dma_ops(dev);
10170+ const struct dma_map_ops *ops = get_dma_ops(dev);
10171
10172 WARN_ON(irqs_disabled()); /* for portability */
10173
10174diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10175index 40b4e61..40d8133 100644
10176--- a/arch/x86/include/asm/e820.h
10177+++ b/arch/x86/include/asm/e820.h
10178@@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
10179 #define ISA_END_ADDRESS 0x100000
10180 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
10181
10182-#define BIOS_BEGIN 0x000a0000
10183+#define BIOS_BEGIN 0x000c0000
10184 #define BIOS_END 0x00100000
10185
10186 #ifdef __KERNEL__
10187diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10188index 8ac9d9a..0a6c96e 100644
10189--- a/arch/x86/include/asm/elf.h
10190+++ b/arch/x86/include/asm/elf.h
10191@@ -257,7 +257,25 @@ extern int force_personality32;
10192 the loader. We need to make sure that it is out of the way of the program
10193 that it will "exec", and that there is sufficient room for the brk. */
10194
10195+#ifdef CONFIG_PAX_SEGMEXEC
10196+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10197+#else
10198 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10199+#endif
10200+
10201+#ifdef CONFIG_PAX_ASLR
10202+#ifdef CONFIG_X86_32
10203+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10204+
10205+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10206+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10207+#else
10208+#define PAX_ELF_ET_DYN_BASE 0x400000UL
10209+
10210+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10211+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10212+#endif
10213+#endif
10214
10215 /* This yields a mask that user programs can use to figure out what
10216 instruction set this CPU supports. This could be done in user space,
10217@@ -310,9 +328,7 @@ do { \
10218
10219 #define ARCH_DLINFO \
10220 do { \
10221- if (vdso_enabled) \
10222- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10223- (unsigned long)current->mm->context.vdso); \
10224+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10225 } while (0)
10226
10227 #define AT_SYSINFO 32
10228@@ -323,7 +339,7 @@ do { \
10229
10230 #endif /* !CONFIG_X86_32 */
10231
10232-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10233+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10234
10235 #define VDSO_ENTRY \
10236 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10237@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10238 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10239 #define compat_arch_setup_additional_pages syscall32_setup_pages
10240
10241-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10242-#define arch_randomize_brk arch_randomize_brk
10243-
10244 #endif /* _ASM_X86_ELF_H */
10245diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10246index cc70c1c..d96d011 100644
10247--- a/arch/x86/include/asm/emergency-restart.h
10248+++ b/arch/x86/include/asm/emergency-restart.h
10249@@ -15,6 +15,6 @@ enum reboot_type {
10250
10251 extern enum reboot_type reboot_type;
10252
10253-extern void machine_emergency_restart(void);
10254+extern void machine_emergency_restart(void) __noreturn;
10255
10256 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10257diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10258index 1f11ce4..7caabd1 100644
10259--- a/arch/x86/include/asm/futex.h
10260+++ b/arch/x86/include/asm/futex.h
10261@@ -12,16 +12,18 @@
10262 #include <asm/system.h>
10263
10264 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10265+ typecheck(u32 __user *, uaddr); \
10266 asm volatile("1:\t" insn "\n" \
10267 "2:\t.section .fixup,\"ax\"\n" \
10268 "3:\tmov\t%3, %1\n" \
10269 "\tjmp\t2b\n" \
10270 "\t.previous\n" \
10271 _ASM_EXTABLE(1b, 3b) \
10272- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10273+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10274 : "i" (-EFAULT), "0" (oparg), "1" (0))
10275
10276 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10277+ typecheck(u32 __user *, uaddr); \
10278 asm volatile("1:\tmovl %2, %0\n" \
10279 "\tmovl\t%0, %3\n" \
10280 "\t" insn "\n" \
10281@@ -34,10 +36,10 @@
10282 _ASM_EXTABLE(1b, 4b) \
10283 _ASM_EXTABLE(2b, 4b) \
10284 : "=&a" (oldval), "=&r" (ret), \
10285- "+m" (*uaddr), "=&r" (tem) \
10286+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10287 : "r" (oparg), "i" (-EFAULT), "1" (0))
10288
10289-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10290+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10291 {
10292 int op = (encoded_op >> 28) & 7;
10293 int cmp = (encoded_op >> 24) & 15;
10294@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10295
10296 switch (op) {
10297 case FUTEX_OP_SET:
10298- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10299+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10300 break;
10301 case FUTEX_OP_ADD:
10302- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10303+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10304 uaddr, oparg);
10305 break;
10306 case FUTEX_OP_OR:
10307@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10308 return ret;
10309 }
10310
10311-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10312+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10313 int newval)
10314 {
10315
10316@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10317 return -ENOSYS;
10318 #endif
10319
10320- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10321+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10322 return -EFAULT;
10323
10324- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10325+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10326 "2:\t.section .fixup, \"ax\"\n"
10327 "3:\tmov %2, %0\n"
10328 "\tjmp 2b\n"
10329 "\t.previous\n"
10330 _ASM_EXTABLE(1b, 3b)
10331- : "=a" (oldval), "+m" (*uaddr)
10332+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10333 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10334 : "memory"
10335 );
10336diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10337index ba180d9..3bad351 100644
10338--- a/arch/x86/include/asm/hw_irq.h
10339+++ b/arch/x86/include/asm/hw_irq.h
10340@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10341 extern void enable_IO_APIC(void);
10342
10343 /* Statistics */
10344-extern atomic_t irq_err_count;
10345-extern atomic_t irq_mis_count;
10346+extern atomic_unchecked_t irq_err_count;
10347+extern atomic_unchecked_t irq_mis_count;
10348
10349 /* EISA */
10350 extern void eisa_set_level_irq(unsigned int irq);
10351diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10352index 0b20bbb..4cb1396 100644
10353--- a/arch/x86/include/asm/i387.h
10354+++ b/arch/x86/include/asm/i387.h
10355@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10356 {
10357 int err;
10358
10359+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10360+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10361+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10362+#endif
10363+
10364 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10365 "2:\n"
10366 ".section .fixup,\"ax\"\n"
10367@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10368 {
10369 int err;
10370
10371+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10372+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10373+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10374+#endif
10375+
10376 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10377 "2:\n"
10378 ".section .fixup,\"ax\"\n"
10379@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10380 }
10381
10382 /* We need a safe address that is cheap to find and that is already
10383- in L1 during context switch. The best choices are unfortunately
10384- different for UP and SMP */
10385-#ifdef CONFIG_SMP
10386-#define safe_address (__per_cpu_offset[0])
10387-#else
10388-#define safe_address (kstat_cpu(0).cpustat.user)
10389-#endif
10390+ in L1 during context switch. */
10391+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10392
10393 /*
10394 * These must be called with preempt disabled
10395@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10396 struct thread_info *me = current_thread_info();
10397 preempt_disable();
10398 if (me->status & TS_USEDFPU)
10399- __save_init_fpu(me->task);
10400+ __save_init_fpu(current);
10401 else
10402 clts();
10403 }
10404diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10405index a299900..15c5410 100644
10406--- a/arch/x86/include/asm/io_32.h
10407+++ b/arch/x86/include/asm/io_32.h
10408@@ -3,6 +3,7 @@
10409
10410 #include <linux/string.h>
10411 #include <linux/compiler.h>
10412+#include <asm/processor.h>
10413
10414 /*
10415 * This file contains the definitions for the x86 IO instructions
10416@@ -42,6 +43,17 @@
10417
10418 #ifdef __KERNEL__
10419
10420+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10421+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10422+{
10423+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10424+}
10425+
10426+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10427+{
10428+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10429+}
10430+
10431 #include <asm-generic/iomap.h>
10432
10433 #include <linux/vmalloc.h>
10434diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10435index 2440678..c158b88 100644
10436--- a/arch/x86/include/asm/io_64.h
10437+++ b/arch/x86/include/asm/io_64.h
10438@@ -140,6 +140,17 @@ __OUTS(l)
10439
10440 #include <linux/vmalloc.h>
10441
10442+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10443+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10444+{
10445+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10446+}
10447+
10448+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10449+{
10450+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10451+}
10452+
10453 #include <asm-generic/iomap.h>
10454
10455 void __memcpy_fromio(void *, unsigned long, unsigned);
10456diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10457index fd6d21b..8b13915 100644
10458--- a/arch/x86/include/asm/iommu.h
10459+++ b/arch/x86/include/asm/iommu.h
10460@@ -3,7 +3,7 @@
10461
10462 extern void pci_iommu_shutdown(void);
10463 extern void no_iommu_init(void);
10464-extern struct dma_map_ops nommu_dma_ops;
10465+extern const struct dma_map_ops nommu_dma_ops;
10466 extern int force_iommu, no_iommu;
10467 extern int iommu_detected;
10468 extern int iommu_pass_through;
10469diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10470index 9e2b952..557206e 100644
10471--- a/arch/x86/include/asm/irqflags.h
10472+++ b/arch/x86/include/asm/irqflags.h
10473@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10474 sti; \
10475 sysexit
10476
10477+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10478+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10479+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10480+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10481+
10482 #else
10483 #define INTERRUPT_RETURN iret
10484 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10485diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10486index 4fe681d..bb6d40c 100644
10487--- a/arch/x86/include/asm/kprobes.h
10488+++ b/arch/x86/include/asm/kprobes.h
10489@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10490 #define BREAKPOINT_INSTRUCTION 0xcc
10491 #define RELATIVEJUMP_INSTRUCTION 0xe9
10492 #define MAX_INSN_SIZE 16
10493-#define MAX_STACK_SIZE 64
10494-#define MIN_STACK_SIZE(ADDR) \
10495- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10496- THREAD_SIZE - (unsigned long)(ADDR))) \
10497- ? (MAX_STACK_SIZE) \
10498- : (((unsigned long)current_thread_info()) + \
10499- THREAD_SIZE - (unsigned long)(ADDR)))
10500+#define MAX_STACK_SIZE 64UL
10501+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10502
10503 #define flush_insn_slot(p) do { } while (0)
10504
10505diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10506index 08bc2ff..2e88d1f 100644
10507--- a/arch/x86/include/asm/kvm_host.h
10508+++ b/arch/x86/include/asm/kvm_host.h
10509@@ -534,9 +534,9 @@ struct kvm_x86_ops {
10510 bool (*gb_page_enable)(void);
10511
10512 const struct trace_print_flags *exit_reasons_str;
10513-};
10514+} __do_const;
10515
10516-extern struct kvm_x86_ops *kvm_x86_ops;
10517+extern const struct kvm_x86_ops *kvm_x86_ops;
10518
10519 int kvm_mmu_module_init(void);
10520 void kvm_mmu_module_exit(void);
10521diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10522index 47b9b6f..815aaa1 100644
10523--- a/arch/x86/include/asm/local.h
10524+++ b/arch/x86/include/asm/local.h
10525@@ -18,26 +18,58 @@ typedef struct {
10526
10527 static inline void local_inc(local_t *l)
10528 {
10529- asm volatile(_ASM_INC "%0"
10530+ asm volatile(_ASM_INC "%0\n"
10531+
10532+#ifdef CONFIG_PAX_REFCOUNT
10533+ "jno 0f\n"
10534+ _ASM_DEC "%0\n"
10535+ "int $4\n0:\n"
10536+ _ASM_EXTABLE(0b, 0b)
10537+#endif
10538+
10539 : "+m" (l->a.counter));
10540 }
10541
10542 static inline void local_dec(local_t *l)
10543 {
10544- asm volatile(_ASM_DEC "%0"
10545+ asm volatile(_ASM_DEC "%0\n"
10546+
10547+#ifdef CONFIG_PAX_REFCOUNT
10548+ "jno 0f\n"
10549+ _ASM_INC "%0\n"
10550+ "int $4\n0:\n"
10551+ _ASM_EXTABLE(0b, 0b)
10552+#endif
10553+
10554 : "+m" (l->a.counter));
10555 }
10556
10557 static inline void local_add(long i, local_t *l)
10558 {
10559- asm volatile(_ASM_ADD "%1,%0"
10560+ asm volatile(_ASM_ADD "%1,%0\n"
10561+
10562+#ifdef CONFIG_PAX_REFCOUNT
10563+ "jno 0f\n"
10564+ _ASM_SUB "%1,%0\n"
10565+ "int $4\n0:\n"
10566+ _ASM_EXTABLE(0b, 0b)
10567+#endif
10568+
10569 : "+m" (l->a.counter)
10570 : "ir" (i));
10571 }
10572
10573 static inline void local_sub(long i, local_t *l)
10574 {
10575- asm volatile(_ASM_SUB "%1,%0"
10576+ asm volatile(_ASM_SUB "%1,%0\n"
10577+
10578+#ifdef CONFIG_PAX_REFCOUNT
10579+ "jno 0f\n"
10580+ _ASM_ADD "%1,%0\n"
10581+ "int $4\n0:\n"
10582+ _ASM_EXTABLE(0b, 0b)
10583+#endif
10584+
10585 : "+m" (l->a.counter)
10586 : "ir" (i));
10587 }
10588@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10589 {
10590 unsigned char c;
10591
10592- asm volatile(_ASM_SUB "%2,%0; sete %1"
10593+ asm volatile(_ASM_SUB "%2,%0\n"
10594+
10595+#ifdef CONFIG_PAX_REFCOUNT
10596+ "jno 0f\n"
10597+ _ASM_ADD "%2,%0\n"
10598+ "int $4\n0:\n"
10599+ _ASM_EXTABLE(0b, 0b)
10600+#endif
10601+
10602+ "sete %1\n"
10603 : "+m" (l->a.counter), "=qm" (c)
10604 : "ir" (i) : "memory");
10605 return c;
10606@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10607 {
10608 unsigned char c;
10609
10610- asm volatile(_ASM_DEC "%0; sete %1"
10611+ asm volatile(_ASM_DEC "%0\n"
10612+
10613+#ifdef CONFIG_PAX_REFCOUNT
10614+ "jno 0f\n"
10615+ _ASM_INC "%0\n"
10616+ "int $4\n0:\n"
10617+ _ASM_EXTABLE(0b, 0b)
10618+#endif
10619+
10620+ "sete %1\n"
10621 : "+m" (l->a.counter), "=qm" (c)
10622 : : "memory");
10623 return c != 0;
10624@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10625 {
10626 unsigned char c;
10627
10628- asm volatile(_ASM_INC "%0; sete %1"
10629+ asm volatile(_ASM_INC "%0\n"
10630+
10631+#ifdef CONFIG_PAX_REFCOUNT
10632+ "jno 0f\n"
10633+ _ASM_DEC "%0\n"
10634+ "int $4\n0:\n"
10635+ _ASM_EXTABLE(0b, 0b)
10636+#endif
10637+
10638+ "sete %1\n"
10639 : "+m" (l->a.counter), "=qm" (c)
10640 : : "memory");
10641 return c != 0;
10642@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10643 {
10644 unsigned char c;
10645
10646- asm volatile(_ASM_ADD "%2,%0; sets %1"
10647+ asm volatile(_ASM_ADD "%2,%0\n"
10648+
10649+#ifdef CONFIG_PAX_REFCOUNT
10650+ "jno 0f\n"
10651+ _ASM_SUB "%2,%0\n"
10652+ "int $4\n0:\n"
10653+ _ASM_EXTABLE(0b, 0b)
10654+#endif
10655+
10656+ "sets %1\n"
10657 : "+m" (l->a.counter), "=qm" (c)
10658 : "ir" (i) : "memory");
10659 return c;
10660@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10661 #endif
10662 /* Modern 486+ processor */
10663 __i = i;
10664- asm volatile(_ASM_XADD "%0, %1;"
10665+ asm volatile(_ASM_XADD "%0, %1\n"
10666+
10667+#ifdef CONFIG_PAX_REFCOUNT
10668+ "jno 0f\n"
10669+ _ASM_MOV "%0,%1\n"
10670+ "int $4\n0:\n"
10671+ _ASM_EXTABLE(0b, 0b)
10672+#endif
10673+
10674 : "+r" (i), "+m" (l->a.counter)
10675 : : "memory");
10676 return i + __i;
10677diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10678index ef51b50..514ba37 100644
10679--- a/arch/x86/include/asm/microcode.h
10680+++ b/arch/x86/include/asm/microcode.h
10681@@ -12,13 +12,13 @@ struct device;
10682 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10683
10684 struct microcode_ops {
10685- enum ucode_state (*request_microcode_user) (int cpu,
10686+ enum ucode_state (* const request_microcode_user) (int cpu,
10687 const void __user *buf, size_t size);
10688
10689- enum ucode_state (*request_microcode_fw) (int cpu,
10690+ enum ucode_state (* const request_microcode_fw) (int cpu,
10691 struct device *device);
10692
10693- void (*microcode_fini_cpu) (int cpu);
10694+ void (* const microcode_fini_cpu) (int cpu);
10695
10696 /*
10697 * The generic 'microcode_core' part guarantees that
10698@@ -38,18 +38,18 @@ struct ucode_cpu_info {
10699 extern struct ucode_cpu_info ucode_cpu_info[];
10700
10701 #ifdef CONFIG_MICROCODE_INTEL
10702-extern struct microcode_ops * __init init_intel_microcode(void);
10703+extern const struct microcode_ops * __init init_intel_microcode(void);
10704 #else
10705-static inline struct microcode_ops * __init init_intel_microcode(void)
10706+static inline const struct microcode_ops * __init init_intel_microcode(void)
10707 {
10708 return NULL;
10709 }
10710 #endif /* CONFIG_MICROCODE_INTEL */
10711
10712 #ifdef CONFIG_MICROCODE_AMD
10713-extern struct microcode_ops * __init init_amd_microcode(void);
10714+extern const struct microcode_ops * __init init_amd_microcode(void);
10715 #else
10716-static inline struct microcode_ops * __init init_amd_microcode(void)
10717+static inline const struct microcode_ops * __init init_amd_microcode(void)
10718 {
10719 return NULL;
10720 }
10721diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10722index 593e51d..fa69c9a 100644
10723--- a/arch/x86/include/asm/mman.h
10724+++ b/arch/x86/include/asm/mman.h
10725@@ -5,4 +5,14 @@
10726
10727 #include <asm-generic/mman.h>
10728
10729+#ifdef __KERNEL__
10730+#ifndef __ASSEMBLY__
10731+#ifdef CONFIG_X86_32
10732+#define arch_mmap_check i386_mmap_check
10733+int i386_mmap_check(unsigned long addr, unsigned long len,
10734+ unsigned long flags);
10735+#endif
10736+#endif
10737+#endif
10738+
10739 #endif /* _ASM_X86_MMAN_H */
10740diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10741index 80a1dee..239c67d 100644
10742--- a/arch/x86/include/asm/mmu.h
10743+++ b/arch/x86/include/asm/mmu.h
10744@@ -9,10 +9,23 @@
10745 * we put the segment information here.
10746 */
10747 typedef struct {
10748- void *ldt;
10749+ struct desc_struct *ldt;
10750 int size;
10751 struct mutex lock;
10752- void *vdso;
10753+ unsigned long vdso;
10754+
10755+#ifdef CONFIG_X86_32
10756+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10757+ unsigned long user_cs_base;
10758+ unsigned long user_cs_limit;
10759+
10760+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10761+ cpumask_t cpu_user_cs_mask;
10762+#endif
10763+
10764+#endif
10765+#endif
10766+
10767 } mm_context_t;
10768
10769 #ifdef CONFIG_SMP
10770diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10771index 8b5393e..8143173 100644
10772--- a/arch/x86/include/asm/mmu_context.h
10773+++ b/arch/x86/include/asm/mmu_context.h
10774@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10775
10776 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10777 {
10778+
10779+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10780+ unsigned int i;
10781+ pgd_t *pgd;
10782+
10783+ pax_open_kernel();
10784+ pgd = get_cpu_pgd(smp_processor_id());
10785+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10786+ set_pgd_batched(pgd+i, native_make_pgd(0));
10787+ pax_close_kernel();
10788+#endif
10789+
10790 #ifdef CONFIG_SMP
10791 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10792 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10793@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10794 struct task_struct *tsk)
10795 {
10796 unsigned cpu = smp_processor_id();
10797+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10798+ int tlbstate = TLBSTATE_OK;
10799+#endif
10800
10801 if (likely(prev != next)) {
10802 #ifdef CONFIG_SMP
10803+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10804+ tlbstate = percpu_read(cpu_tlbstate.state);
10805+#endif
10806 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10807 percpu_write(cpu_tlbstate.active_mm, next);
10808 #endif
10809 cpumask_set_cpu(cpu, mm_cpumask(next));
10810
10811 /* Re-load page tables */
10812+#ifdef CONFIG_PAX_PER_CPU_PGD
10813+ pax_open_kernel();
10814+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10815+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10816+ pax_close_kernel();
10817+ load_cr3(get_cpu_pgd(cpu));
10818+#else
10819 load_cr3(next->pgd);
10820+#endif
10821
10822 /* stop flush ipis for the previous mm */
10823 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10824@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10825 */
10826 if (unlikely(prev->context.ldt != next->context.ldt))
10827 load_LDT_nolock(&next->context);
10828- }
10829+
10830+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10831+ if (!nx_enabled) {
10832+ smp_mb__before_clear_bit();
10833+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10834+ smp_mb__after_clear_bit();
10835+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10836+ }
10837+#endif
10838+
10839+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10840+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10841+ prev->context.user_cs_limit != next->context.user_cs_limit))
10842+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10843 #ifdef CONFIG_SMP
10844+ else if (unlikely(tlbstate != TLBSTATE_OK))
10845+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10846+#endif
10847+#endif
10848+
10849+ }
10850 else {
10851+
10852+#ifdef CONFIG_PAX_PER_CPU_PGD
10853+ pax_open_kernel();
10854+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10855+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10856+ pax_close_kernel();
10857+ load_cr3(get_cpu_pgd(cpu));
10858+#endif
10859+
10860+#ifdef CONFIG_SMP
10861 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10862 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10863
10864@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10865 * tlb flush IPI delivery. We must reload CR3
10866 * to make sure to use no freed page tables.
10867 */
10868+
10869+#ifndef CONFIG_PAX_PER_CPU_PGD
10870 load_cr3(next->pgd);
10871+#endif
10872+
10873 load_LDT_nolock(&next->context);
10874+
10875+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10876+ if (!nx_enabled)
10877+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10878+#endif
10879+
10880+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10881+#ifdef CONFIG_PAX_PAGEEXEC
10882+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10883+#endif
10884+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10885+#endif
10886+
10887 }
10888+#endif
10889 }
10890-#endif
10891 }
10892
10893 #define activate_mm(prev, next) \
10894diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10895index 3e2ce58..caaf478 100644
10896--- a/arch/x86/include/asm/module.h
10897+++ b/arch/x86/include/asm/module.h
10898@@ -5,6 +5,7 @@
10899
10900 #ifdef CONFIG_X86_64
10901 /* X86_64 does not define MODULE_PROC_FAMILY */
10902+#define MODULE_PROC_FAMILY ""
10903 #elif defined CONFIG_M386
10904 #define MODULE_PROC_FAMILY "386 "
10905 #elif defined CONFIG_M486
10906@@ -59,13 +60,26 @@
10907 #error unknown processor family
10908 #endif
10909
10910-#ifdef CONFIG_X86_32
10911-# ifdef CONFIG_4KSTACKS
10912-# define MODULE_STACKSIZE "4KSTACKS "
10913-# else
10914-# define MODULE_STACKSIZE ""
10915-# endif
10916-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10917+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10918+#define MODULE_STACKSIZE "4KSTACKS "
10919+#else
10920+#define MODULE_STACKSIZE ""
10921 #endif
10922
10923+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10924+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10925+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10926+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10927+#else
10928+#define MODULE_PAX_KERNEXEC ""
10929+#endif
10930+
10931+#ifdef CONFIG_PAX_MEMORY_UDEREF
10932+#define MODULE_PAX_UDEREF "UDEREF "
10933+#else
10934+#define MODULE_PAX_UDEREF ""
10935+#endif
10936+
10937+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10938+
10939 #endif /* _ASM_X86_MODULE_H */
10940diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10941index 7639dbf..e08a58c 100644
10942--- a/arch/x86/include/asm/page_64_types.h
10943+++ b/arch/x86/include/asm/page_64_types.h
10944@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10945
10946 /* duplicated to the one in bootmem.h */
10947 extern unsigned long max_pfn;
10948-extern unsigned long phys_base;
10949+extern const unsigned long phys_base;
10950
10951 extern unsigned long __phys_addr(unsigned long);
10952 #define __phys_reloc_hide(x) (x)
10953diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10954index efb3899..ef30687 100644
10955--- a/arch/x86/include/asm/paravirt.h
10956+++ b/arch/x86/include/asm/paravirt.h
10957@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10958 val);
10959 }
10960
10961+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10962+{
10963+ pgdval_t val = native_pgd_val(pgd);
10964+
10965+ if (sizeof(pgdval_t) > sizeof(long))
10966+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10967+ val, (u64)val >> 32);
10968+ else
10969+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10970+ val);
10971+}
10972+
10973 static inline void pgd_clear(pgd_t *pgdp)
10974 {
10975 set_pgd(pgdp, __pgd(0));
10976@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10977 pv_mmu_ops.set_fixmap(idx, phys, flags);
10978 }
10979
10980+#ifdef CONFIG_PAX_KERNEXEC
10981+static inline unsigned long pax_open_kernel(void)
10982+{
10983+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10984+}
10985+
10986+static inline unsigned long pax_close_kernel(void)
10987+{
10988+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10989+}
10990+#else
10991+static inline unsigned long pax_open_kernel(void) { return 0; }
10992+static inline unsigned long pax_close_kernel(void) { return 0; }
10993+#endif
10994+
10995 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10996
10997 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10998@@ -945,7 +972,7 @@ extern void default_banner(void);
10999
11000 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11001 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11002-#define PARA_INDIRECT(addr) *%cs:addr
11003+#define PARA_INDIRECT(addr) *%ss:addr
11004 #endif
11005
11006 #define INTERRUPT_RETURN \
11007@@ -1022,6 +1049,21 @@ extern void default_banner(void);
11008 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11009 CLBR_NONE, \
11010 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11011+
11012+#define GET_CR0_INTO_RDI \
11013+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11014+ mov %rax,%rdi
11015+
11016+#define SET_RDI_INTO_CR0 \
11017+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11018+
11019+#define GET_CR3_INTO_RDI \
11020+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11021+ mov %rax,%rdi
11022+
11023+#define SET_RDI_INTO_CR3 \
11024+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11025+
11026 #endif /* CONFIG_X86_32 */
11027
11028 #endif /* __ASSEMBLY__ */
11029diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11030index 9357473..aeb2de5 100644
11031--- a/arch/x86/include/asm/paravirt_types.h
11032+++ b/arch/x86/include/asm/paravirt_types.h
11033@@ -78,19 +78,19 @@ struct pv_init_ops {
11034 */
11035 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11036 unsigned long addr, unsigned len);
11037-};
11038+} __no_const;
11039
11040
11041 struct pv_lazy_ops {
11042 /* Set deferred update mode, used for batching operations. */
11043 void (*enter)(void);
11044 void (*leave)(void);
11045-};
11046+} __no_const;
11047
11048 struct pv_time_ops {
11049 unsigned long long (*sched_clock)(void);
11050 unsigned long (*get_tsc_khz)(void);
11051-};
11052+} __no_const;
11053
11054 struct pv_cpu_ops {
11055 /* hooks for various privileged instructions */
11056@@ -186,7 +186,7 @@ struct pv_cpu_ops {
11057
11058 void (*start_context_switch)(struct task_struct *prev);
11059 void (*end_context_switch)(struct task_struct *next);
11060-};
11061+} __no_const;
11062
11063 struct pv_irq_ops {
11064 /*
11065@@ -217,7 +217,7 @@ struct pv_apic_ops {
11066 unsigned long start_eip,
11067 unsigned long start_esp);
11068 #endif
11069-};
11070+} __no_const;
11071
11072 struct pv_mmu_ops {
11073 unsigned long (*read_cr2)(void);
11074@@ -301,6 +301,7 @@ struct pv_mmu_ops {
11075 struct paravirt_callee_save make_pud;
11076
11077 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11078+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11079 #endif /* PAGETABLE_LEVELS == 4 */
11080 #endif /* PAGETABLE_LEVELS >= 3 */
11081
11082@@ -316,6 +317,12 @@ struct pv_mmu_ops {
11083 an mfn. We can tell which is which from the index. */
11084 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11085 phys_addr_t phys, pgprot_t flags);
11086+
11087+#ifdef CONFIG_PAX_KERNEXEC
11088+ unsigned long (*pax_open_kernel)(void);
11089+ unsigned long (*pax_close_kernel)(void);
11090+#endif
11091+
11092 };
11093
11094 struct raw_spinlock;
11095@@ -326,7 +333,7 @@ struct pv_lock_ops {
11096 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
11097 int (*spin_trylock)(struct raw_spinlock *lock);
11098 void (*spin_unlock)(struct raw_spinlock *lock);
11099-};
11100+} __no_const;
11101
11102 /* This contains all the paravirt structures: we get a convenient
11103 * number for each function using the offset which we use to indicate
11104diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
11105index b399988..3f47c38 100644
11106--- a/arch/x86/include/asm/pci_x86.h
11107+++ b/arch/x86/include/asm/pci_x86.h
11108@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
11109 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
11110
11111 struct pci_raw_ops {
11112- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11113+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11114 int reg, int len, u32 *val);
11115- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11116+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11117 int reg, int len, u32 val);
11118 };
11119
11120-extern struct pci_raw_ops *raw_pci_ops;
11121-extern struct pci_raw_ops *raw_pci_ext_ops;
11122+extern const struct pci_raw_ops *raw_pci_ops;
11123+extern const struct pci_raw_ops *raw_pci_ext_ops;
11124
11125-extern struct pci_raw_ops pci_direct_conf1;
11126+extern const struct pci_raw_ops pci_direct_conf1;
11127 extern bool port_cf9_safe;
11128
11129 /* arch_initcall level */
11130diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
11131index b65a36d..50345a4 100644
11132--- a/arch/x86/include/asm/percpu.h
11133+++ b/arch/x86/include/asm/percpu.h
11134@@ -78,6 +78,7 @@ do { \
11135 if (0) { \
11136 T__ tmp__; \
11137 tmp__ = (val); \
11138+ (void)tmp__; \
11139 } \
11140 switch (sizeof(var)) { \
11141 case 1: \
11142diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11143index 271de94..ef944d6 100644
11144--- a/arch/x86/include/asm/pgalloc.h
11145+++ b/arch/x86/include/asm/pgalloc.h
11146@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11147 pmd_t *pmd, pte_t *pte)
11148 {
11149 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11150+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11151+}
11152+
11153+static inline void pmd_populate_user(struct mm_struct *mm,
11154+ pmd_t *pmd, pte_t *pte)
11155+{
11156+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11157 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11158 }
11159
11160diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11161index 2334982..70bc412 100644
11162--- a/arch/x86/include/asm/pgtable-2level.h
11163+++ b/arch/x86/include/asm/pgtable-2level.h
11164@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11165
11166 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11167 {
11168+ pax_open_kernel();
11169 *pmdp = pmd;
11170+ pax_close_kernel();
11171 }
11172
11173 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11174diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11175index 33927d2..ccde329 100644
11176--- a/arch/x86/include/asm/pgtable-3level.h
11177+++ b/arch/x86/include/asm/pgtable-3level.h
11178@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11179
11180 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11181 {
11182+ pax_open_kernel();
11183 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11184+ pax_close_kernel();
11185 }
11186
11187 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11188 {
11189+ pax_open_kernel();
11190 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11191+ pax_close_kernel();
11192 }
11193
11194 /*
11195diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11196index af6fd36..867ff74 100644
11197--- a/arch/x86/include/asm/pgtable.h
11198+++ b/arch/x86/include/asm/pgtable.h
11199@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
11200
11201 #ifndef __PAGETABLE_PUD_FOLDED
11202 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11203+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11204 #define pgd_clear(pgd) native_pgd_clear(pgd)
11205 #endif
11206
11207@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
11208
11209 #define arch_end_context_switch(prev) do {} while(0)
11210
11211+#define pax_open_kernel() native_pax_open_kernel()
11212+#define pax_close_kernel() native_pax_close_kernel()
11213 #endif /* CONFIG_PARAVIRT */
11214
11215+#define __HAVE_ARCH_PAX_OPEN_KERNEL
11216+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11217+
11218+#ifdef CONFIG_PAX_KERNEXEC
11219+static inline unsigned long native_pax_open_kernel(void)
11220+{
11221+ unsigned long cr0;
11222+
11223+ preempt_disable();
11224+ barrier();
11225+ cr0 = read_cr0() ^ X86_CR0_WP;
11226+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
11227+ write_cr0(cr0);
11228+ return cr0 ^ X86_CR0_WP;
11229+}
11230+
11231+static inline unsigned long native_pax_close_kernel(void)
11232+{
11233+ unsigned long cr0;
11234+
11235+ cr0 = read_cr0() ^ X86_CR0_WP;
11236+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11237+ write_cr0(cr0);
11238+ barrier();
11239+ preempt_enable_no_resched();
11240+ return cr0 ^ X86_CR0_WP;
11241+}
11242+#else
11243+static inline unsigned long native_pax_open_kernel(void) { return 0; }
11244+static inline unsigned long native_pax_close_kernel(void) { return 0; }
11245+#endif
11246+
11247 /*
11248 * The following only work if pte_present() is true.
11249 * Undefined behaviour if not..
11250 */
11251+static inline int pte_user(pte_t pte)
11252+{
11253+ return pte_val(pte) & _PAGE_USER;
11254+}
11255+
11256 static inline int pte_dirty(pte_t pte)
11257 {
11258 return pte_flags(pte) & _PAGE_DIRTY;
11259@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11260 return pte_clear_flags(pte, _PAGE_RW);
11261 }
11262
11263+static inline pte_t pte_mkread(pte_t pte)
11264+{
11265+ return __pte(pte_val(pte) | _PAGE_USER);
11266+}
11267+
11268 static inline pte_t pte_mkexec(pte_t pte)
11269 {
11270- return pte_clear_flags(pte, _PAGE_NX);
11271+#ifdef CONFIG_X86_PAE
11272+ if (__supported_pte_mask & _PAGE_NX)
11273+ return pte_clear_flags(pte, _PAGE_NX);
11274+ else
11275+#endif
11276+ return pte_set_flags(pte, _PAGE_USER);
11277+}
11278+
11279+static inline pte_t pte_exprotect(pte_t pte)
11280+{
11281+#ifdef CONFIG_X86_PAE
11282+ if (__supported_pte_mask & _PAGE_NX)
11283+ return pte_set_flags(pte, _PAGE_NX);
11284+ else
11285+#endif
11286+ return pte_clear_flags(pte, _PAGE_USER);
11287 }
11288
11289 static inline pte_t pte_mkdirty(pte_t pte)
11290@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11291 #endif
11292
11293 #ifndef __ASSEMBLY__
11294+
11295+#ifdef CONFIG_PAX_PER_CPU_PGD
11296+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11297+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11298+{
11299+ return cpu_pgd[cpu];
11300+}
11301+#endif
11302+
11303 #include <linux/mm_types.h>
11304
11305 static inline int pte_none(pte_t pte)
11306@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11307
11308 static inline int pgd_bad(pgd_t pgd)
11309 {
11310- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11311+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11312 }
11313
11314 static inline int pgd_none(pgd_t pgd)
11315@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11316 * pgd_offset() returns a (pgd_t *)
11317 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11318 */
11319-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11320+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11321+
11322+#ifdef CONFIG_PAX_PER_CPU_PGD
11323+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11324+#endif
11325+
11326 /*
11327 * a shortcut which implies the use of the kernel's pgd, instead
11328 * of a process's
11329@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11330 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11331 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11332
11333+#ifdef CONFIG_X86_32
11334+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11335+#else
11336+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11337+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11338+
11339+#ifdef CONFIG_PAX_MEMORY_UDEREF
11340+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11341+#else
11342+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11343+#endif
11344+
11345+#endif
11346+
11347 #ifndef __ASSEMBLY__
11348
11349 extern int direct_gbpages;
11350@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11351 * dst and src can be on the same page, but the range must not overlap,
11352 * and must not cross a page boundary.
11353 */
11354-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11355+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11356 {
11357- memcpy(dst, src, count * sizeof(pgd_t));
11358+ pax_open_kernel();
11359+ while (count--)
11360+ *dst++ = *src++;
11361+ pax_close_kernel();
11362 }
11363
11364+#ifdef CONFIG_PAX_PER_CPU_PGD
11365+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11366+#endif
11367+
11368+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11369+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11370+#else
11371+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11372+#endif
11373
11374 #include <asm-generic/pgtable.h>
11375 #endif /* __ASSEMBLY__ */
11376diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11377index 750f1bf..971e839 100644
11378--- a/arch/x86/include/asm/pgtable_32.h
11379+++ b/arch/x86/include/asm/pgtable_32.h
11380@@ -26,9 +26,6 @@
11381 struct mm_struct;
11382 struct vm_area_struct;
11383
11384-extern pgd_t swapper_pg_dir[1024];
11385-extern pgd_t trampoline_pg_dir[1024];
11386-
11387 static inline void pgtable_cache_init(void) { }
11388 static inline void check_pgt_cache(void) { }
11389 void paging_init(void);
11390@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11391 # include <asm/pgtable-2level.h>
11392 #endif
11393
11394+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11395+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11396+#ifdef CONFIG_X86_PAE
11397+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11398+#endif
11399+
11400 #if defined(CONFIG_HIGHPTE)
11401 #define __KM_PTE \
11402 (in_nmi() ? KM_NMI_PTE : \
11403@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11404 /* Clear a kernel PTE and flush it from the TLB */
11405 #define kpte_clear_flush(ptep, vaddr) \
11406 do { \
11407+ pax_open_kernel(); \
11408 pte_clear(&init_mm, (vaddr), (ptep)); \
11409+ pax_close_kernel(); \
11410 __flush_tlb_one((vaddr)); \
11411 } while (0)
11412
11413@@ -85,6 +90,9 @@ do { \
11414
11415 #endif /* !__ASSEMBLY__ */
11416
11417+#define HAVE_ARCH_UNMAPPED_AREA
11418+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11419+
11420 /*
11421 * kern_addr_valid() is (1) for FLATMEM and (0) for
11422 * SPARSEMEM and DISCONTIGMEM
11423diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11424index 5e67c15..12d5c47 100644
11425--- a/arch/x86/include/asm/pgtable_32_types.h
11426+++ b/arch/x86/include/asm/pgtable_32_types.h
11427@@ -8,7 +8,7 @@
11428 */
11429 #ifdef CONFIG_X86_PAE
11430 # include <asm/pgtable-3level_types.h>
11431-# define PMD_SIZE (1UL << PMD_SHIFT)
11432+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11433 # define PMD_MASK (~(PMD_SIZE - 1))
11434 #else
11435 # include <asm/pgtable-2level_types.h>
11436@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11437 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11438 #endif
11439
11440+#ifdef CONFIG_PAX_KERNEXEC
11441+#ifndef __ASSEMBLY__
11442+extern unsigned char MODULES_EXEC_VADDR[];
11443+extern unsigned char MODULES_EXEC_END[];
11444+#endif
11445+#include <asm/boot.h>
11446+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11447+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11448+#else
11449+#define ktla_ktva(addr) (addr)
11450+#define ktva_ktla(addr) (addr)
11451+#endif
11452+
11453 #define MODULES_VADDR VMALLOC_START
11454 #define MODULES_END VMALLOC_END
11455 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11456diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11457index c57a301..6b414ff 100644
11458--- a/arch/x86/include/asm/pgtable_64.h
11459+++ b/arch/x86/include/asm/pgtable_64.h
11460@@ -16,10 +16,14 @@
11461
11462 extern pud_t level3_kernel_pgt[512];
11463 extern pud_t level3_ident_pgt[512];
11464+extern pud_t level3_vmalloc_start_pgt[512];
11465+extern pud_t level3_vmalloc_end_pgt[512];
11466+extern pud_t level3_vmemmap_pgt[512];
11467+extern pud_t level2_vmemmap_pgt[512];
11468 extern pmd_t level2_kernel_pgt[512];
11469 extern pmd_t level2_fixmap_pgt[512];
11470-extern pmd_t level2_ident_pgt[512];
11471-extern pgd_t init_level4_pgt[];
11472+extern pmd_t level2_ident_pgt[512*2];
11473+extern pgd_t init_level4_pgt[512];
11474
11475 #define swapper_pg_dir init_level4_pgt
11476
11477@@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11478
11479 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11480 {
11481+ pax_open_kernel();
11482 *pmdp = pmd;
11483+ pax_close_kernel();
11484 }
11485
11486 static inline void native_pmd_clear(pmd_t *pmd)
11487@@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
11488
11489 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11490 {
11491+ pax_open_kernel();
11492+ *pgdp = pgd;
11493+ pax_close_kernel();
11494+}
11495+
11496+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11497+{
11498 *pgdp = pgd;
11499 }
11500
11501diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11502index 766ea16..5b96cb3 100644
11503--- a/arch/x86/include/asm/pgtable_64_types.h
11504+++ b/arch/x86/include/asm/pgtable_64_types.h
11505@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11506 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11507 #define MODULES_END _AC(0xffffffffff000000, UL)
11508 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11509+#define MODULES_EXEC_VADDR MODULES_VADDR
11510+#define MODULES_EXEC_END MODULES_END
11511+
11512+#define ktla_ktva(addr) (addr)
11513+#define ktva_ktla(addr) (addr)
11514
11515 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11516diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11517index d1f4a76..2f46ba1 100644
11518--- a/arch/x86/include/asm/pgtable_types.h
11519+++ b/arch/x86/include/asm/pgtable_types.h
11520@@ -16,12 +16,11 @@
11521 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11522 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11523 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11524-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11525+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11526 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11527 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11528 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11529-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11530-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11531+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11532 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11533
11534 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11535@@ -39,7 +38,6 @@
11536 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11537 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11538 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11539-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11540 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11541 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11542 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11543@@ -55,8 +53,10 @@
11544
11545 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11546 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11547-#else
11548+#elif defined(CONFIG_KMEMCHECK)
11549 #define _PAGE_NX (_AT(pteval_t, 0))
11550+#else
11551+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11552 #endif
11553
11554 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11555@@ -93,6 +93,9 @@
11556 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11557 _PAGE_ACCESSED)
11558
11559+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11560+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11561+
11562 #define __PAGE_KERNEL_EXEC \
11563 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11564 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11565@@ -103,8 +106,8 @@
11566 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11567 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11568 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11569-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11570-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11571+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11572+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11573 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11574 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11575 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11576@@ -163,8 +166,8 @@
11577 * bits are combined, this will alow user to access the high address mapped
11578 * VDSO in the presence of CONFIG_COMPAT_VDSO
11579 */
11580-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11581-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11582+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11583+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11584 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11585 #endif
11586
11587@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11588 {
11589 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11590 }
11591+#endif
11592
11593+#if PAGETABLE_LEVELS == 3
11594+#include <asm-generic/pgtable-nopud.h>
11595+#endif
11596+
11597+#if PAGETABLE_LEVELS == 2
11598+#include <asm-generic/pgtable-nopmd.h>
11599+#endif
11600+
11601+#ifndef __ASSEMBLY__
11602 #if PAGETABLE_LEVELS > 3
11603 typedef struct { pudval_t pud; } pud_t;
11604
11605@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11606 return pud.pud;
11607 }
11608 #else
11609-#include <asm-generic/pgtable-nopud.h>
11610-
11611 static inline pudval_t native_pud_val(pud_t pud)
11612 {
11613 return native_pgd_val(pud.pgd);
11614@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11615 return pmd.pmd;
11616 }
11617 #else
11618-#include <asm-generic/pgtable-nopmd.h>
11619-
11620 static inline pmdval_t native_pmd_val(pmd_t pmd)
11621 {
11622 return native_pgd_val(pmd.pud.pgd);
11623@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11624
11625 extern pteval_t __supported_pte_mask;
11626 extern void set_nx(void);
11627+
11628+#ifdef CONFIG_X86_32
11629+#ifdef CONFIG_X86_PAE
11630 extern int nx_enabled;
11631+#else
11632+#define nx_enabled (0)
11633+#endif
11634+#else
11635+#define nx_enabled (1)
11636+#endif
11637
11638 #define pgprot_writecombine pgprot_writecombine
11639 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11640diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11641index fa04dea..5f823fc 100644
11642--- a/arch/x86/include/asm/processor.h
11643+++ b/arch/x86/include/asm/processor.h
11644@@ -272,7 +272,7 @@ struct tss_struct {
11645
11646 } ____cacheline_aligned;
11647
11648-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11649+extern struct tss_struct init_tss[NR_CPUS];
11650
11651 /*
11652 * Save the original ist values for checking stack pointers during debugging
11653@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11654 */
11655 #define TASK_SIZE PAGE_OFFSET
11656 #define TASK_SIZE_MAX TASK_SIZE
11657+
11658+#ifdef CONFIG_PAX_SEGMEXEC
11659+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11660+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11661+#else
11662 #define STACK_TOP TASK_SIZE
11663-#define STACK_TOP_MAX STACK_TOP
11664+#endif
11665+
11666+#define STACK_TOP_MAX TASK_SIZE
11667
11668 #define INIT_THREAD { \
11669- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11670+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11671 .vm86_info = NULL, \
11672 .sysenter_cs = __KERNEL_CS, \
11673 .io_bitmap_ptr = NULL, \
11674@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11675 */
11676 #define INIT_TSS { \
11677 .x86_tss = { \
11678- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11679+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11680 .ss0 = __KERNEL_DS, \
11681 .ss1 = __KERNEL_CS, \
11682 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11683@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11684 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11685
11686 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11687-#define KSTK_TOP(info) \
11688-({ \
11689- unsigned long *__ptr = (unsigned long *)(info); \
11690- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11691-})
11692+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11693
11694 /*
11695 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11696@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11697 #define task_pt_regs(task) \
11698 ({ \
11699 struct pt_regs *__regs__; \
11700- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11701+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11702 __regs__ - 1; \
11703 })
11704
11705@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11706 /*
11707 * User space process size. 47bits minus one guard page.
11708 */
11709-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11710+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11711
11712 /* This decides where the kernel will search for a free chunk of vm
11713 * space during mmap's.
11714 */
11715 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11716- 0xc0000000 : 0xFFFFe000)
11717+ 0xc0000000 : 0xFFFFf000)
11718
11719 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11720 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11721@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11722 #define STACK_TOP_MAX TASK_SIZE_MAX
11723
11724 #define INIT_THREAD { \
11725- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11726+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11727 }
11728
11729 #define INIT_TSS { \
11730- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11731+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11732 }
11733
11734 /*
11735@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11736 */
11737 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11738
11739+#ifdef CONFIG_PAX_SEGMEXEC
11740+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11741+#endif
11742+
11743 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11744
11745 /* Get/set a process' ability to use the timestamp counter instruction */
11746diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11747index 0f0d908..f2e3da2 100644
11748--- a/arch/x86/include/asm/ptrace.h
11749+++ b/arch/x86/include/asm/ptrace.h
11750@@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11751 }
11752
11753 /*
11754- * user_mode_vm(regs) determines whether a register set came from user mode.
11755+ * user_mode(regs) determines whether a register set came from user mode.
11756 * This is true if V8086 mode was enabled OR if the register set was from
11757 * protected mode with RPL-3 CS value. This tricky test checks that with
11758 * one comparison. Many places in the kernel can bypass this full check
11759- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11760+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11761+ * be used.
11762 */
11763-static inline int user_mode(struct pt_regs *regs)
11764+static inline int user_mode_novm(struct pt_regs *regs)
11765 {
11766 #ifdef CONFIG_X86_32
11767 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11768 #else
11769- return !!(regs->cs & 3);
11770+ return !!(regs->cs & SEGMENT_RPL_MASK);
11771 #endif
11772 }
11773
11774-static inline int user_mode_vm(struct pt_regs *regs)
11775+static inline int user_mode(struct pt_regs *regs)
11776 {
11777 #ifdef CONFIG_X86_32
11778 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11779 USER_RPL;
11780 #else
11781- return user_mode(regs);
11782+ return user_mode_novm(regs);
11783 #endif
11784 }
11785
11786diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11787index 562d4fd..6e39df1 100644
11788--- a/arch/x86/include/asm/reboot.h
11789+++ b/arch/x86/include/asm/reboot.h
11790@@ -6,19 +6,19 @@
11791 struct pt_regs;
11792
11793 struct machine_ops {
11794- void (*restart)(char *cmd);
11795- void (*halt)(void);
11796- void (*power_off)(void);
11797+ void (* __noreturn restart)(char *cmd);
11798+ void (* __noreturn halt)(void);
11799+ void (* __noreturn power_off)(void);
11800 void (*shutdown)(void);
11801 void (*crash_shutdown)(struct pt_regs *);
11802- void (*emergency_restart)(void);
11803-};
11804+ void (* __noreturn emergency_restart)(void);
11805+} __no_const;
11806
11807 extern struct machine_ops machine_ops;
11808
11809 void native_machine_crash_shutdown(struct pt_regs *regs);
11810 void native_machine_shutdown(void);
11811-void machine_real_restart(const unsigned char *code, int length);
11812+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11813
11814 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11815 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11816diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11817index 606ede1..dbfff37 100644
11818--- a/arch/x86/include/asm/rwsem.h
11819+++ b/arch/x86/include/asm/rwsem.h
11820@@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11821 {
11822 asm volatile("# beginning down_read\n\t"
11823 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11824+
11825+#ifdef CONFIG_PAX_REFCOUNT
11826+ "jno 0f\n"
11827+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11828+ "int $4\n0:\n"
11829+ _ASM_EXTABLE(0b, 0b)
11830+#endif
11831+
11832 /* adds 0x00000001, returns the old value */
11833 " jns 1f\n"
11834 " call call_rwsem_down_read_failed\n"
11835@@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11836 "1:\n\t"
11837 " mov %1,%2\n\t"
11838 " add %3,%2\n\t"
11839+
11840+#ifdef CONFIG_PAX_REFCOUNT
11841+ "jno 0f\n"
11842+ "sub %3,%2\n"
11843+ "int $4\n0:\n"
11844+ _ASM_EXTABLE(0b, 0b)
11845+#endif
11846+
11847 " jle 2f\n\t"
11848 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11849 " jnz 1b\n\t"
11850@@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11851 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11852 asm volatile("# beginning down_write\n\t"
11853 LOCK_PREFIX " xadd %1,(%2)\n\t"
11854+
11855+#ifdef CONFIG_PAX_REFCOUNT
11856+ "jno 0f\n"
11857+ "mov %1,(%2)\n"
11858+ "int $4\n0:\n"
11859+ _ASM_EXTABLE(0b, 0b)
11860+#endif
11861+
11862 /* subtract 0x0000ffff, returns the old value */
11863 " test %1,%1\n\t"
11864 /* was the count 0 before? */
11865@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11866 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11867 asm volatile("# beginning __up_read\n\t"
11868 LOCK_PREFIX " xadd %1,(%2)\n\t"
11869+
11870+#ifdef CONFIG_PAX_REFCOUNT
11871+ "jno 0f\n"
11872+ "mov %1,(%2)\n"
11873+ "int $4\n0:\n"
11874+ _ASM_EXTABLE(0b, 0b)
11875+#endif
11876+
11877 /* subtracts 1, returns the old value */
11878 " jns 1f\n\t"
11879 " call call_rwsem_wake\n"
11880@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11881 rwsem_count_t tmp;
11882 asm volatile("# beginning __up_write\n\t"
11883 LOCK_PREFIX " xadd %1,(%2)\n\t"
11884+
11885+#ifdef CONFIG_PAX_REFCOUNT
11886+ "jno 0f\n"
11887+ "mov %1,(%2)\n"
11888+ "int $4\n0:\n"
11889+ _ASM_EXTABLE(0b, 0b)
11890+#endif
11891+
11892 /* tries to transition
11893 0xffff0001 -> 0x00000000 */
11894 " jz 1f\n"
11895@@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11896 {
11897 asm volatile("# beginning __downgrade_write\n\t"
11898 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11899+
11900+#ifdef CONFIG_PAX_REFCOUNT
11901+ "jno 0f\n"
11902+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11903+ "int $4\n0:\n"
11904+ _ASM_EXTABLE(0b, 0b)
11905+#endif
11906+
11907 /*
11908 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11909 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11910@@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11911 static inline void rwsem_atomic_add(rwsem_count_t delta,
11912 struct rw_semaphore *sem)
11913 {
11914- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11915+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11916+
11917+#ifdef CONFIG_PAX_REFCOUNT
11918+ "jno 0f\n"
11919+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
11920+ "int $4\n0:\n"
11921+ _ASM_EXTABLE(0b, 0b)
11922+#endif
11923+
11924 : "+m" (sem->count)
11925 : "er" (delta));
11926 }
11927@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11928 {
11929 rwsem_count_t tmp = delta;
11930
11931- asm volatile(LOCK_PREFIX "xadd %0,%1"
11932+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11933+
11934+#ifdef CONFIG_PAX_REFCOUNT
11935+ "jno 0f\n"
11936+ "mov %0,%1\n"
11937+ "int $4\n0:\n"
11938+ _ASM_EXTABLE(0b, 0b)
11939+#endif
11940+
11941 : "+r" (tmp), "+m" (sem->count)
11942 : : "memory");
11943
11944diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11945index 14e0ed8..7f7dd5e 100644
11946--- a/arch/x86/include/asm/segment.h
11947+++ b/arch/x86/include/asm/segment.h
11948@@ -62,10 +62,15 @@
11949 * 26 - ESPFIX small SS
11950 * 27 - per-cpu [ offset to per-cpu data area ]
11951 * 28 - stack_canary-20 [ for stack protector ]
11952- * 29 - unused
11953- * 30 - unused
11954+ * 29 - PCI BIOS CS
11955+ * 30 - PCI BIOS DS
11956 * 31 - TSS for double fault handler
11957 */
11958+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11959+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11960+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11961+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11962+
11963 #define GDT_ENTRY_TLS_MIN 6
11964 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11965
11966@@ -77,6 +82,8 @@
11967
11968 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11969
11970+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11971+
11972 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11973
11974 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11975@@ -88,7 +95,7 @@
11976 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11977 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11978
11979-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11980+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11981 #ifdef CONFIG_SMP
11982 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11983 #else
11984@@ -102,6 +109,12 @@
11985 #define __KERNEL_STACK_CANARY 0
11986 #endif
11987
11988+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11989+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11990+
11991+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11992+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11993+
11994 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11995
11996 /*
11997@@ -139,7 +152,7 @@
11998 */
11999
12000 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12001-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12002+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12003
12004
12005 #else
12006@@ -163,6 +176,8 @@
12007 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
12008 #define __USER32_DS __USER_DS
12009
12010+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12011+
12012 #define GDT_ENTRY_TSS 8 /* needs two entries */
12013 #define GDT_ENTRY_LDT 10 /* needs two entries */
12014 #define GDT_ENTRY_TLS_MIN 12
12015@@ -183,6 +198,7 @@
12016 #endif
12017
12018 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
12019+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
12020 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
12021 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
12022 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
12023diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12024index 4c2f63c..5685db2 100644
12025--- a/arch/x86/include/asm/smp.h
12026+++ b/arch/x86/include/asm/smp.h
12027@@ -24,7 +24,7 @@ extern unsigned int num_processors;
12028 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
12029 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12030 DECLARE_PER_CPU(u16, cpu_llc_id);
12031-DECLARE_PER_CPU(int, cpu_number);
12032+DECLARE_PER_CPU(unsigned int, cpu_number);
12033
12034 static inline struct cpumask *cpu_sibling_mask(int cpu)
12035 {
12036@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
12037 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
12038
12039 /* Static state in head.S used to set up a CPU */
12040-extern struct {
12041- void *sp;
12042- unsigned short ss;
12043-} stack_start;
12044+extern unsigned long stack_start; /* Initial stack pointer address */
12045
12046 struct smp_ops {
12047 void (*smp_prepare_boot_cpu)(void);
12048@@ -60,7 +57,7 @@ struct smp_ops {
12049
12050 void (*send_call_func_ipi)(const struct cpumask *mask);
12051 void (*send_call_func_single_ipi)(int cpu);
12052-};
12053+} __no_const;
12054
12055 /* Globals due to paravirt */
12056 extern void set_cpu_sibling_map(int cpu);
12057@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12058 extern int safe_smp_processor_id(void);
12059
12060 #elif defined(CONFIG_X86_64_SMP)
12061-#define raw_smp_processor_id() (percpu_read(cpu_number))
12062-
12063-#define stack_smp_processor_id() \
12064-({ \
12065- struct thread_info *ti; \
12066- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12067- ti->cpu; \
12068-})
12069+#define raw_smp_processor_id() (percpu_read(cpu_number))
12070+#define stack_smp_processor_id() raw_smp_processor_id()
12071 #define safe_smp_processor_id() smp_processor_id()
12072
12073 #endif
12074diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12075index 4e77853..4359783 100644
12076--- a/arch/x86/include/asm/spinlock.h
12077+++ b/arch/x86/include/asm/spinlock.h
12078@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
12079 static inline void __raw_read_lock(raw_rwlock_t *rw)
12080 {
12081 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
12082+
12083+#ifdef CONFIG_PAX_REFCOUNT
12084+ "jno 0f\n"
12085+ LOCK_PREFIX " addl $1,(%0)\n"
12086+ "int $4\n0:\n"
12087+ _ASM_EXTABLE(0b, 0b)
12088+#endif
12089+
12090 "jns 1f\n"
12091 "call __read_lock_failed\n\t"
12092 "1:\n"
12093@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
12094 static inline void __raw_write_lock(raw_rwlock_t *rw)
12095 {
12096 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
12097+
12098+#ifdef CONFIG_PAX_REFCOUNT
12099+ "jno 0f\n"
12100+ LOCK_PREFIX " addl %1,(%0)\n"
12101+ "int $4\n0:\n"
12102+ _ASM_EXTABLE(0b, 0b)
12103+#endif
12104+
12105 "jz 1f\n"
12106 "call __write_lock_failed\n\t"
12107 "1:\n"
12108@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
12109
12110 static inline void __raw_read_unlock(raw_rwlock_t *rw)
12111 {
12112- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
12113+ asm volatile(LOCK_PREFIX "incl %0\n"
12114+
12115+#ifdef CONFIG_PAX_REFCOUNT
12116+ "jno 0f\n"
12117+ LOCK_PREFIX "decl %0\n"
12118+ "int $4\n0:\n"
12119+ _ASM_EXTABLE(0b, 0b)
12120+#endif
12121+
12122+ :"+m" (rw->lock) : : "memory");
12123 }
12124
12125 static inline void __raw_write_unlock(raw_rwlock_t *rw)
12126 {
12127- asm volatile(LOCK_PREFIX "addl %1, %0"
12128+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
12129+
12130+#ifdef CONFIG_PAX_REFCOUNT
12131+ "jno 0f\n"
12132+ LOCK_PREFIX "subl %1, %0\n"
12133+ "int $4\n0:\n"
12134+ _ASM_EXTABLE(0b, 0b)
12135+#endif
12136+
12137 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
12138 }
12139
12140diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12141index 1575177..cb23f52 100644
12142--- a/arch/x86/include/asm/stackprotector.h
12143+++ b/arch/x86/include/asm/stackprotector.h
12144@@ -48,7 +48,7 @@
12145 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12146 */
12147 #define GDT_STACK_CANARY_INIT \
12148- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12149+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12150
12151 /*
12152 * Initialize the stackprotector canary value.
12153@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
12154
12155 static inline void load_stack_canary_segment(void)
12156 {
12157-#ifdef CONFIG_X86_32
12158+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12159 asm volatile ("mov %0, %%gs" : : "r" (0));
12160 #endif
12161 }
12162diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
12163index e0fbf29..858ef4a 100644
12164--- a/arch/x86/include/asm/system.h
12165+++ b/arch/x86/include/asm/system.h
12166@@ -132,7 +132,7 @@ do { \
12167 "thread_return:\n\t" \
12168 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12169 __switch_canary \
12170- "movq %P[thread_info](%%rsi),%%r8\n\t" \
12171+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12172 "movq %%rax,%%rdi\n\t" \
12173 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12174 "jnz ret_from_fork\n\t" \
12175@@ -143,7 +143,7 @@ do { \
12176 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12177 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12178 [_tif_fork] "i" (_TIF_FORK), \
12179- [thread_info] "i" (offsetof(struct task_struct, stack)), \
12180+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
12181 [current_task] "m" (per_cpu_var(current_task)) \
12182 __switch_canary_iparam \
12183 : "memory", "cc" __EXTRA_CLOBBER)
12184@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
12185 {
12186 unsigned long __limit;
12187 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12188- return __limit + 1;
12189+ return __limit;
12190 }
12191
12192 static inline void native_clts(void)
12193@@ -340,12 +340,12 @@ void enable_hlt(void);
12194
12195 void cpu_idle_wait(void);
12196
12197-extern unsigned long arch_align_stack(unsigned long sp);
12198+#define arch_align_stack(x) ((x) & ~0xfUL)
12199 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
12200
12201 void default_idle(void);
12202
12203-void stop_this_cpu(void *dummy);
12204+void stop_this_cpu(void *dummy) __noreturn;
12205
12206 /*
12207 * Force strict CPU ordering.
12208diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12209index 19c3ce4..8962535 100644
12210--- a/arch/x86/include/asm/thread_info.h
12211+++ b/arch/x86/include/asm/thread_info.h
12212@@ -10,6 +10,7 @@
12213 #include <linux/compiler.h>
12214 #include <asm/page.h>
12215 #include <asm/types.h>
12216+#include <asm/percpu.h>
12217
12218 /*
12219 * low level task data that entry.S needs immediate access to
12220@@ -24,7 +25,6 @@ struct exec_domain;
12221 #include <asm/atomic.h>
12222
12223 struct thread_info {
12224- struct task_struct *task; /* main task structure */
12225 struct exec_domain *exec_domain; /* execution domain */
12226 __u32 flags; /* low level flags */
12227 __u32 status; /* thread synchronous flags */
12228@@ -34,18 +34,12 @@ struct thread_info {
12229 mm_segment_t addr_limit;
12230 struct restart_block restart_block;
12231 void __user *sysenter_return;
12232-#ifdef CONFIG_X86_32
12233- unsigned long previous_esp; /* ESP of the previous stack in
12234- case of nested (IRQ) stacks
12235- */
12236- __u8 supervisor_stack[0];
12237-#endif
12238+ unsigned long lowest_stack;
12239 int uaccess_err;
12240 };
12241
12242-#define INIT_THREAD_INFO(tsk) \
12243+#define INIT_THREAD_INFO \
12244 { \
12245- .task = &tsk, \
12246 .exec_domain = &default_exec_domain, \
12247 .flags = 0, \
12248 .cpu = 0, \
12249@@ -56,7 +50,7 @@ struct thread_info {
12250 }, \
12251 }
12252
12253-#define init_thread_info (init_thread_union.thread_info)
12254+#define init_thread_info (init_thread_union.stack)
12255 #define init_stack (init_thread_union.stack)
12256
12257 #else /* !__ASSEMBLY__ */
12258@@ -163,45 +157,40 @@ struct thread_info {
12259 #define alloc_thread_info(tsk) \
12260 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12261
12262-#ifdef CONFIG_X86_32
12263-
12264-#define STACK_WARN (THREAD_SIZE/8)
12265-/*
12266- * macros/functions for gaining access to the thread information structure
12267- *
12268- * preempt_count needs to be 1 initially, until the scheduler is functional.
12269- */
12270-#ifndef __ASSEMBLY__
12271-
12272-
12273-/* how to get the current stack pointer from C */
12274-register unsigned long current_stack_pointer asm("esp") __used;
12275-
12276-/* how to get the thread information struct from C */
12277-static inline struct thread_info *current_thread_info(void)
12278-{
12279- return (struct thread_info *)
12280- (current_stack_pointer & ~(THREAD_SIZE - 1));
12281-}
12282-
12283-#else /* !__ASSEMBLY__ */
12284-
12285+#ifdef __ASSEMBLY__
12286 /* how to get the thread information struct from ASM */
12287 #define GET_THREAD_INFO(reg) \
12288- movl $-THREAD_SIZE, reg; \
12289- andl %esp, reg
12290+ mov PER_CPU_VAR(current_tinfo), reg
12291
12292 /* use this one if reg already contains %esp */
12293-#define GET_THREAD_INFO_WITH_ESP(reg) \
12294- andl $-THREAD_SIZE, reg
12295+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12296+#else
12297+/* how to get the thread information struct from C */
12298+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12299+
12300+static __always_inline struct thread_info *current_thread_info(void)
12301+{
12302+ return percpu_read_stable(current_tinfo);
12303+}
12304+#endif
12305+
12306+#ifdef CONFIG_X86_32
12307+
12308+#define STACK_WARN (THREAD_SIZE/8)
12309+/*
12310+ * macros/functions for gaining access to the thread information structure
12311+ *
12312+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12313+ */
12314+#ifndef __ASSEMBLY__
12315+
12316+/* how to get the current stack pointer from C */
12317+register unsigned long current_stack_pointer asm("esp") __used;
12318
12319 #endif
12320
12321 #else /* X86_32 */
12322
12323-#include <asm/percpu.h>
12324-#define KERNEL_STACK_OFFSET (5*8)
12325-
12326 /*
12327 * macros/functions for gaining access to the thread information structure
12328 * preempt_count needs to be 1 initially, until the scheduler is functional.
12329@@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12330 #ifndef __ASSEMBLY__
12331 DECLARE_PER_CPU(unsigned long, kernel_stack);
12332
12333-static inline struct thread_info *current_thread_info(void)
12334-{
12335- struct thread_info *ti;
12336- ti = (void *)(percpu_read_stable(kernel_stack) +
12337- KERNEL_STACK_OFFSET - THREAD_SIZE);
12338- return ti;
12339-}
12340-
12341-#else /* !__ASSEMBLY__ */
12342-
12343-/* how to get the thread information struct from ASM */
12344-#define GET_THREAD_INFO(reg) \
12345- movq PER_CPU_VAR(kernel_stack),reg ; \
12346- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12347-
12348+/* how to get the current stack pointer from C */
12349+register unsigned long current_stack_pointer asm("rsp") __used;
12350 #endif
12351
12352 #endif /* !X86_32 */
12353@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12354 extern void free_thread_info(struct thread_info *ti);
12355 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12356 #define arch_task_cache_init arch_task_cache_init
12357+
12358+#define __HAVE_THREAD_FUNCTIONS
12359+#define task_thread_info(task) (&(task)->tinfo)
12360+#define task_stack_page(task) ((task)->stack)
12361+#define setup_thread_stack(p, org) do {} while (0)
12362+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12363+
12364+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12365+extern struct task_struct *alloc_task_struct(void);
12366+extern void free_task_struct(struct task_struct *);
12367+
12368 #endif
12369 #endif /* _ASM_X86_THREAD_INFO_H */
12370diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12371index 61c5874..8a046e9 100644
12372--- a/arch/x86/include/asm/uaccess.h
12373+++ b/arch/x86/include/asm/uaccess.h
12374@@ -8,12 +8,15 @@
12375 #include <linux/thread_info.h>
12376 #include <linux/prefetch.h>
12377 #include <linux/string.h>
12378+#include <linux/sched.h>
12379 #include <asm/asm.h>
12380 #include <asm/page.h>
12381
12382 #define VERIFY_READ 0
12383 #define VERIFY_WRITE 1
12384
12385+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12386+
12387 /*
12388 * The fs value determines whether argument validity checking should be
12389 * performed or not. If get_fs() == USER_DS, checking is performed, with
12390@@ -29,7 +32,12 @@
12391
12392 #define get_ds() (KERNEL_DS)
12393 #define get_fs() (current_thread_info()->addr_limit)
12394+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12395+void __set_fs(mm_segment_t x);
12396+void set_fs(mm_segment_t x);
12397+#else
12398 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12399+#endif
12400
12401 #define segment_eq(a, b) ((a).seg == (b).seg)
12402
12403@@ -77,7 +85,33 @@
12404 * checks that the pointer is in the user space range - after calling
12405 * this function, memory access functions may still return -EFAULT.
12406 */
12407-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12408+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12409+#define access_ok(type, addr, size) \
12410+({ \
12411+ long __size = size; \
12412+ unsigned long __addr = (unsigned long)addr; \
12413+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12414+ unsigned long __end_ao = __addr + __size - 1; \
12415+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12416+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12417+ while(__addr_ao <= __end_ao) { \
12418+ char __c_ao; \
12419+ __addr_ao += PAGE_SIZE; \
12420+ if (__size > PAGE_SIZE) \
12421+ cond_resched(); \
12422+ if (__get_user(__c_ao, (char __user *)__addr)) \
12423+ break; \
12424+ if (type != VERIFY_WRITE) { \
12425+ __addr = __addr_ao; \
12426+ continue; \
12427+ } \
12428+ if (__put_user(__c_ao, (char __user *)__addr)) \
12429+ break; \
12430+ __addr = __addr_ao; \
12431+ } \
12432+ } \
12433+ __ret_ao; \
12434+})
12435
12436 /*
12437 * The exception table consists of pairs of addresses: the first is the
12438@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12439 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12440 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12441
12442-
12443+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12444+#define __copyuser_seg "gs;"
12445+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12446+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12447+#else
12448+#define __copyuser_seg
12449+#define __COPYUSER_SET_ES
12450+#define __COPYUSER_RESTORE_ES
12451+#endif
12452
12453 #ifdef CONFIG_X86_32
12454 #define __put_user_asm_u64(x, addr, err, errret) \
12455- asm volatile("1: movl %%eax,0(%2)\n" \
12456- "2: movl %%edx,4(%2)\n" \
12457+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12458+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12459 "3:\n" \
12460 ".section .fixup,\"ax\"\n" \
12461 "4: movl %3,%0\n" \
12462@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12463 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12464
12465 #define __put_user_asm_ex_u64(x, addr) \
12466- asm volatile("1: movl %%eax,0(%1)\n" \
12467- "2: movl %%edx,4(%1)\n" \
12468+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12469+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12470 "3:\n" \
12471 _ASM_EXTABLE(1b, 2b - 1b) \
12472 _ASM_EXTABLE(2b, 3b - 2b) \
12473@@ -253,7 +295,7 @@ extern void __put_user_8(void);
12474 __typeof__(*(ptr)) __pu_val; \
12475 __chk_user_ptr(ptr); \
12476 might_fault(); \
12477- __pu_val = x; \
12478+ __pu_val = (x); \
12479 switch (sizeof(*(ptr))) { \
12480 case 1: \
12481 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12482@@ -374,7 +416,7 @@ do { \
12483 } while (0)
12484
12485 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12486- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12487+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12488 "2:\n" \
12489 ".section .fixup,\"ax\"\n" \
12490 "3: mov %3,%0\n" \
12491@@ -382,7 +424,7 @@ do { \
12492 " jmp 2b\n" \
12493 ".previous\n" \
12494 _ASM_EXTABLE(1b, 3b) \
12495- : "=r" (err), ltype(x) \
12496+ : "=r" (err), ltype (x) \
12497 : "m" (__m(addr)), "i" (errret), "0" (err))
12498
12499 #define __get_user_size_ex(x, ptr, size) \
12500@@ -407,7 +449,7 @@ do { \
12501 } while (0)
12502
12503 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12504- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12505+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12506 "2:\n" \
12507 _ASM_EXTABLE(1b, 2b - 1b) \
12508 : ltype(x) : "m" (__m(addr)))
12509@@ -424,13 +466,24 @@ do { \
12510 int __gu_err; \
12511 unsigned long __gu_val; \
12512 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12513- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12514+ (x) = (__typeof__(*(ptr)))__gu_val; \
12515 __gu_err; \
12516 })
12517
12518 /* FIXME: this hack is definitely wrong -AK */
12519 struct __large_struct { unsigned long buf[100]; };
12520-#define __m(x) (*(struct __large_struct __user *)(x))
12521+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12522+#define ____m(x) \
12523+({ \
12524+ unsigned long ____x = (unsigned long)(x); \
12525+ if (____x < PAX_USER_SHADOW_BASE) \
12526+ ____x += PAX_USER_SHADOW_BASE; \
12527+ (void __user *)____x; \
12528+})
12529+#else
12530+#define ____m(x) (x)
12531+#endif
12532+#define __m(x) (*(struct __large_struct __user *)____m(x))
12533
12534 /*
12535 * Tell gcc we read from memory instead of writing: this is because
12536@@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12537 * aliasing issues.
12538 */
12539 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12540- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12541+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12542 "2:\n" \
12543 ".section .fixup,\"ax\"\n" \
12544 "3: mov %3,%0\n" \
12545@@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12546 ".previous\n" \
12547 _ASM_EXTABLE(1b, 3b) \
12548 : "=r"(err) \
12549- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12550+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12551
12552 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12553- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12554+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12555 "2:\n" \
12556 _ASM_EXTABLE(1b, 2b - 1b) \
12557 : : ltype(x), "m" (__m(addr)))
12558@@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12559 * On error, the variable @x is set to zero.
12560 */
12561
12562+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12563+#define __get_user(x, ptr) get_user((x), (ptr))
12564+#else
12565 #define __get_user(x, ptr) \
12566 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12567+#endif
12568
12569 /**
12570 * __put_user: - Write a simple value into user space, with less checking.
12571@@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12572 * Returns zero on success, or -EFAULT on error.
12573 */
12574
12575+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12576+#define __put_user(x, ptr) put_user((x), (ptr))
12577+#else
12578 #define __put_user(x, ptr) \
12579 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12580+#endif
12581
12582 #define __get_user_unaligned __get_user
12583 #define __put_user_unaligned __put_user
12584@@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12585 #define get_user_ex(x, ptr) do { \
12586 unsigned long __gue_val; \
12587 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12588- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12589+ (x) = (__typeof__(*(ptr)))__gue_val; \
12590 } while (0)
12591
12592 #ifdef CONFIG_X86_WP_WORKS_OK
12593@@ -567,6 +628,7 @@ extern struct movsl_mask {
12594
12595 #define ARCH_HAS_NOCACHE_UACCESS 1
12596
12597+#define ARCH_HAS_SORT_EXTABLE
12598 #ifdef CONFIG_X86_32
12599 # include "uaccess_32.h"
12600 #else
12601diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12602index 632fb44..e30e334 100644
12603--- a/arch/x86/include/asm/uaccess_32.h
12604+++ b/arch/x86/include/asm/uaccess_32.h
12605@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12606 static __always_inline unsigned long __must_check
12607 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12608 {
12609+ pax_track_stack();
12610+
12611+ if ((long)n < 0)
12612+ return n;
12613+
12614 if (__builtin_constant_p(n)) {
12615 unsigned long ret;
12616
12617@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12618 return ret;
12619 }
12620 }
12621+ if (!__builtin_constant_p(n))
12622+ check_object_size(from, n, true);
12623 return __copy_to_user_ll(to, from, n);
12624 }
12625
12626@@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12627 __copy_to_user(void __user *to, const void *from, unsigned long n)
12628 {
12629 might_fault();
12630+
12631 return __copy_to_user_inatomic(to, from, n);
12632 }
12633
12634 static __always_inline unsigned long
12635 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12636 {
12637+ if ((long)n < 0)
12638+ return n;
12639+
12640 /* Avoid zeroing the tail if the copy fails..
12641 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12642 * but as the zeroing behaviour is only significant when n is not
12643@@ -138,6 +149,12 @@ static __always_inline unsigned long
12644 __copy_from_user(void *to, const void __user *from, unsigned long n)
12645 {
12646 might_fault();
12647+
12648+ pax_track_stack();
12649+
12650+ if ((long)n < 0)
12651+ return n;
12652+
12653 if (__builtin_constant_p(n)) {
12654 unsigned long ret;
12655
12656@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12657 return ret;
12658 }
12659 }
12660+ if (!__builtin_constant_p(n))
12661+ check_object_size(to, n, false);
12662 return __copy_from_user_ll(to, from, n);
12663 }
12664
12665@@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12666 const void __user *from, unsigned long n)
12667 {
12668 might_fault();
12669+
12670+ if ((long)n < 0)
12671+ return n;
12672+
12673 if (__builtin_constant_p(n)) {
12674 unsigned long ret;
12675
12676@@ -182,14 +205,62 @@ static __always_inline unsigned long
12677 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12678 unsigned long n)
12679 {
12680- return __copy_from_user_ll_nocache_nozero(to, from, n);
12681+ if ((long)n < 0)
12682+ return n;
12683+
12684+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12685+}
12686+
12687+/**
12688+ * copy_to_user: - Copy a block of data into user space.
12689+ * @to: Destination address, in user space.
12690+ * @from: Source address, in kernel space.
12691+ * @n: Number of bytes to copy.
12692+ *
12693+ * Context: User context only. This function may sleep.
12694+ *
12695+ * Copy data from kernel space to user space.
12696+ *
12697+ * Returns number of bytes that could not be copied.
12698+ * On success, this will be zero.
12699+ */
12700+static __always_inline unsigned long __must_check
12701+copy_to_user(void __user *to, const void *from, unsigned long n)
12702+{
12703+ if (access_ok(VERIFY_WRITE, to, n))
12704+ n = __copy_to_user(to, from, n);
12705+ return n;
12706+}
12707+
12708+/**
12709+ * copy_from_user: - Copy a block of data from user space.
12710+ * @to: Destination address, in kernel space.
12711+ * @from: Source address, in user space.
12712+ * @n: Number of bytes to copy.
12713+ *
12714+ * Context: User context only. This function may sleep.
12715+ *
12716+ * Copy data from user space to kernel space.
12717+ *
12718+ * Returns number of bytes that could not be copied.
12719+ * On success, this will be zero.
12720+ *
12721+ * If some data could not be copied, this function will pad the copied
12722+ * data to the requested size using zero bytes.
12723+ */
12724+static __always_inline unsigned long __must_check
12725+copy_from_user(void *to, const void __user *from, unsigned long n)
12726+{
12727+ if (access_ok(VERIFY_READ, from, n))
12728+ n = __copy_from_user(to, from, n);
12729+ else if ((long)n > 0) {
12730+ if (!__builtin_constant_p(n))
12731+ check_object_size(to, n, false);
12732+ memset(to, 0, n);
12733+ }
12734+ return n;
12735 }
12736
12737-unsigned long __must_check copy_to_user(void __user *to,
12738- const void *from, unsigned long n);
12739-unsigned long __must_check copy_from_user(void *to,
12740- const void __user *from,
12741- unsigned long n);
12742 long __must_check strncpy_from_user(char *dst, const char __user *src,
12743 long count);
12744 long __must_check __strncpy_from_user(char *dst,
12745diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12746index db24b21..f595ae7 100644
12747--- a/arch/x86/include/asm/uaccess_64.h
12748+++ b/arch/x86/include/asm/uaccess_64.h
12749@@ -9,6 +9,9 @@
12750 #include <linux/prefetch.h>
12751 #include <linux/lockdep.h>
12752 #include <asm/page.h>
12753+#include <asm/pgtable.h>
12754+
12755+#define set_fs(x) (current_thread_info()->addr_limit = (x))
12756
12757 /*
12758 * Copy To/From Userspace
12759@@ -16,116 +19,205 @@
12760
12761 /* Handles exceptions in both to and from, but doesn't do access_ok */
12762 __must_check unsigned long
12763-copy_user_generic(void *to, const void *from, unsigned len);
12764+copy_user_generic(void *to, const void *from, unsigned long len);
12765
12766 __must_check unsigned long
12767-copy_to_user(void __user *to, const void *from, unsigned len);
12768-__must_check unsigned long
12769-copy_from_user(void *to, const void __user *from, unsigned len);
12770-__must_check unsigned long
12771-copy_in_user(void __user *to, const void __user *from, unsigned len);
12772+copy_in_user(void __user *to, const void __user *from, unsigned long len);
12773
12774 static __always_inline __must_check
12775-int __copy_from_user(void *dst, const void __user *src, unsigned size)
12776+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12777 {
12778- int ret = 0;
12779+ unsigned ret = 0;
12780
12781 might_fault();
12782- if (!__builtin_constant_p(size))
12783- return copy_user_generic(dst, (__force void *)src, size);
12784+
12785+ if (size > INT_MAX)
12786+ return size;
12787+
12788+#ifdef CONFIG_PAX_MEMORY_UDEREF
12789+ if (!__access_ok(VERIFY_READ, src, size))
12790+ return size;
12791+#endif
12792+
12793+ if (!__builtin_constant_p(size)) {
12794+ check_object_size(dst, size, false);
12795+
12796+#ifdef CONFIG_PAX_MEMORY_UDEREF
12797+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12798+ src += PAX_USER_SHADOW_BASE;
12799+#endif
12800+
12801+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12802+ }
12803 switch (size) {
12804- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12805+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12806 ret, "b", "b", "=q", 1);
12807 return ret;
12808- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12809+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12810 ret, "w", "w", "=r", 2);
12811 return ret;
12812- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12813+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12814 ret, "l", "k", "=r", 4);
12815 return ret;
12816- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12817+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12818 ret, "q", "", "=r", 8);
12819 return ret;
12820 case 10:
12821- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12822+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12823 ret, "q", "", "=r", 10);
12824 if (unlikely(ret))
12825 return ret;
12826 __get_user_asm(*(u16 *)(8 + (char *)dst),
12827- (u16 __user *)(8 + (char __user *)src),
12828+ (const u16 __user *)(8 + (const char __user *)src),
12829 ret, "w", "w", "=r", 2);
12830 return ret;
12831 case 16:
12832- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12833+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12834 ret, "q", "", "=r", 16);
12835 if (unlikely(ret))
12836 return ret;
12837 __get_user_asm(*(u64 *)(8 + (char *)dst),
12838- (u64 __user *)(8 + (char __user *)src),
12839+ (const u64 __user *)(8 + (const char __user *)src),
12840 ret, "q", "", "=r", 8);
12841 return ret;
12842 default:
12843- return copy_user_generic(dst, (__force void *)src, size);
12844+
12845+#ifdef CONFIG_PAX_MEMORY_UDEREF
12846+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12847+ src += PAX_USER_SHADOW_BASE;
12848+#endif
12849+
12850+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12851 }
12852 }
12853
12854 static __always_inline __must_check
12855-int __copy_to_user(void __user *dst, const void *src, unsigned size)
12856+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12857 {
12858- int ret = 0;
12859+ unsigned ret = 0;
12860
12861 might_fault();
12862- if (!__builtin_constant_p(size))
12863- return copy_user_generic((__force void *)dst, src, size);
12864+
12865+ pax_track_stack();
12866+
12867+ if (size > INT_MAX)
12868+ return size;
12869+
12870+#ifdef CONFIG_PAX_MEMORY_UDEREF
12871+ if (!__access_ok(VERIFY_WRITE, dst, size))
12872+ return size;
12873+#endif
12874+
12875+ if (!__builtin_constant_p(size)) {
12876+ check_object_size(src, size, true);
12877+
12878+#ifdef CONFIG_PAX_MEMORY_UDEREF
12879+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12880+ dst += PAX_USER_SHADOW_BASE;
12881+#endif
12882+
12883+ return copy_user_generic((__force_kernel void *)dst, src, size);
12884+ }
12885 switch (size) {
12886- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12887+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12888 ret, "b", "b", "iq", 1);
12889 return ret;
12890- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12891+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12892 ret, "w", "w", "ir", 2);
12893 return ret;
12894- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12895+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12896 ret, "l", "k", "ir", 4);
12897 return ret;
12898- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12899+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12900 ret, "q", "", "er", 8);
12901 return ret;
12902 case 10:
12903- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12904+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12905 ret, "q", "", "er", 10);
12906 if (unlikely(ret))
12907 return ret;
12908 asm("":::"memory");
12909- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12910+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12911 ret, "w", "w", "ir", 2);
12912 return ret;
12913 case 16:
12914- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12915+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12916 ret, "q", "", "er", 16);
12917 if (unlikely(ret))
12918 return ret;
12919 asm("":::"memory");
12920- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12921+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12922 ret, "q", "", "er", 8);
12923 return ret;
12924 default:
12925- return copy_user_generic((__force void *)dst, src, size);
12926+
12927+#ifdef CONFIG_PAX_MEMORY_UDEREF
12928+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12929+ dst += PAX_USER_SHADOW_BASE;
12930+#endif
12931+
12932+ return copy_user_generic((__force_kernel void *)dst, src, size);
12933+ }
12934+}
12935+
12936+static __always_inline __must_check
12937+unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
12938+{
12939+ if (access_ok(VERIFY_WRITE, to, len))
12940+ len = __copy_to_user(to, from, len);
12941+ return len;
12942+}
12943+
12944+static __always_inline __must_check
12945+unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
12946+{
12947+ might_fault();
12948+
12949+ if (access_ok(VERIFY_READ, from, len))
12950+ len = __copy_from_user(to, from, len);
12951+ else if (len < INT_MAX) {
12952+ if (!__builtin_constant_p(len))
12953+ check_object_size(to, len, false);
12954+ memset(to, 0, len);
12955 }
12956+ return len;
12957 }
12958
12959 static __always_inline __must_check
12960-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12961+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12962 {
12963- int ret = 0;
12964+ unsigned ret = 0;
12965
12966 might_fault();
12967- if (!__builtin_constant_p(size))
12968- return copy_user_generic((__force void *)dst,
12969- (__force void *)src, size);
12970+
12971+ pax_track_stack();
12972+
12973+ if (size > INT_MAX)
12974+ return size;
12975+
12976+#ifdef CONFIG_PAX_MEMORY_UDEREF
12977+ if (!__access_ok(VERIFY_READ, src, size))
12978+ return size;
12979+ if (!__access_ok(VERIFY_WRITE, dst, size))
12980+ return size;
12981+#endif
12982+
12983+ if (!__builtin_constant_p(size)) {
12984+
12985+#ifdef CONFIG_PAX_MEMORY_UDEREF
12986+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12987+ src += PAX_USER_SHADOW_BASE;
12988+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12989+ dst += PAX_USER_SHADOW_BASE;
12990+#endif
12991+
12992+ return copy_user_generic((__force_kernel void *)dst,
12993+ (__force_kernel const void *)src, size);
12994+ }
12995 switch (size) {
12996 case 1: {
12997 u8 tmp;
12998- __get_user_asm(tmp, (u8 __user *)src,
12999+ __get_user_asm(tmp, (const u8 __user *)src,
13000 ret, "b", "b", "=q", 1);
13001 if (likely(!ret))
13002 __put_user_asm(tmp, (u8 __user *)dst,
13003@@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13004 }
13005 case 2: {
13006 u16 tmp;
13007- __get_user_asm(tmp, (u16 __user *)src,
13008+ __get_user_asm(tmp, (const u16 __user *)src,
13009 ret, "w", "w", "=r", 2);
13010 if (likely(!ret))
13011 __put_user_asm(tmp, (u16 __user *)dst,
13012@@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13013
13014 case 4: {
13015 u32 tmp;
13016- __get_user_asm(tmp, (u32 __user *)src,
13017+ __get_user_asm(tmp, (const u32 __user *)src,
13018 ret, "l", "k", "=r", 4);
13019 if (likely(!ret))
13020 __put_user_asm(tmp, (u32 __user *)dst,
13021@@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13022 }
13023 case 8: {
13024 u64 tmp;
13025- __get_user_asm(tmp, (u64 __user *)src,
13026+ __get_user_asm(tmp, (const u64 __user *)src,
13027 ret, "q", "", "=r", 8);
13028 if (likely(!ret))
13029 __put_user_asm(tmp, (u64 __user *)dst,
13030@@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13031 return ret;
13032 }
13033 default:
13034- return copy_user_generic((__force void *)dst,
13035- (__force void *)src, size);
13036+
13037+#ifdef CONFIG_PAX_MEMORY_UDEREF
13038+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13039+ src += PAX_USER_SHADOW_BASE;
13040+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13041+ dst += PAX_USER_SHADOW_BASE;
13042+#endif
13043+
13044+ return copy_user_generic((__force_kernel void *)dst,
13045+ (__force_kernel const void *)src, size);
13046 }
13047 }
13048
13049@@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
13050 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
13051 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13052
13053-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
13054- unsigned size);
13055+static __must_check __always_inline unsigned long
13056+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13057+{
13058+ pax_track_stack();
13059+
13060+ if (size > INT_MAX)
13061+ return size;
13062+
13063+#ifdef CONFIG_PAX_MEMORY_UDEREF
13064+ if (!__access_ok(VERIFY_READ, src, size))
13065+ return size;
13066
13067-static __must_check __always_inline int
13068-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13069+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13070+ src += PAX_USER_SHADOW_BASE;
13071+#endif
13072+
13073+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13074+}
13075+
13076+static __must_check __always_inline unsigned long
13077+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13078 {
13079- return copy_user_generic((__force void *)dst, src, size);
13080+ if (size > INT_MAX)
13081+ return size;
13082+
13083+#ifdef CONFIG_PAX_MEMORY_UDEREF
13084+ if (!__access_ok(VERIFY_WRITE, dst, size))
13085+ return size;
13086+
13087+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13088+ dst += PAX_USER_SHADOW_BASE;
13089+#endif
13090+
13091+ return copy_user_generic((__force_kernel void *)dst, src, size);
13092 }
13093
13094-extern long __copy_user_nocache(void *dst, const void __user *src,
13095- unsigned size, int zerorest);
13096+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13097+ unsigned long size, int zerorest);
13098
13099-static inline int
13100-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13101+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13102 {
13103 might_sleep();
13104+
13105+ if (size > INT_MAX)
13106+ return size;
13107+
13108+#ifdef CONFIG_PAX_MEMORY_UDEREF
13109+ if (!__access_ok(VERIFY_READ, src, size))
13110+ return size;
13111+#endif
13112+
13113 return __copy_user_nocache(dst, src, size, 1);
13114 }
13115
13116-static inline int
13117-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13118- unsigned size)
13119+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13120+ unsigned long size)
13121 {
13122+ if (size > INT_MAX)
13123+ return size;
13124+
13125+#ifdef CONFIG_PAX_MEMORY_UDEREF
13126+ if (!__access_ok(VERIFY_READ, src, size))
13127+ return size;
13128+#endif
13129+
13130 return __copy_user_nocache(dst, src, size, 0);
13131 }
13132
13133-unsigned long
13134-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13135+extern unsigned long
13136+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
13137
13138 #endif /* _ASM_X86_UACCESS_64_H */
13139diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13140index 9064052..786cfbc 100644
13141--- a/arch/x86/include/asm/vdso.h
13142+++ b/arch/x86/include/asm/vdso.h
13143@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
13144 #define VDSO32_SYMBOL(base, name) \
13145 ({ \
13146 extern const char VDSO32_##name[]; \
13147- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13148+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13149 })
13150 #endif
13151
13152diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
13153index 3d61e20..9507180 100644
13154--- a/arch/x86/include/asm/vgtod.h
13155+++ b/arch/x86/include/asm/vgtod.h
13156@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
13157 int sysctl_enabled;
13158 struct timezone sys_tz;
13159 struct { /* extract of a clocksource struct */
13160+ char name[8];
13161 cycle_t (*vread)(void);
13162 cycle_t cycle_last;
13163 cycle_t mask;
13164diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
13165index 61e08c0..b0da582 100644
13166--- a/arch/x86/include/asm/vmi.h
13167+++ b/arch/x86/include/asm/vmi.h
13168@@ -191,6 +191,7 @@ struct vrom_header {
13169 u8 reserved[96]; /* Reserved for headers */
13170 char vmi_init[8]; /* VMI_Init jump point */
13171 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
13172+ char rom_data[8048]; /* rest of the option ROM */
13173 } __attribute__((packed));
13174
13175 struct pnp_header {
13176diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
13177index c6e0bee..fcb9f74 100644
13178--- a/arch/x86/include/asm/vmi_time.h
13179+++ b/arch/x86/include/asm/vmi_time.h
13180@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
13181 int (*wallclock_updated)(void);
13182 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
13183 void (*cancel_alarm)(u32 flags);
13184-} vmi_timer_ops;
13185+} __no_const vmi_timer_ops;
13186
13187 /* Prototypes */
13188 extern void __init vmi_time_init(void);
13189diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
13190index d0983d2..1f7c9e9 100644
13191--- a/arch/x86/include/asm/vsyscall.h
13192+++ b/arch/x86/include/asm/vsyscall.h
13193@@ -15,9 +15,10 @@ enum vsyscall_num {
13194
13195 #ifdef __KERNEL__
13196 #include <linux/seqlock.h>
13197+#include <linux/getcpu.h>
13198+#include <linux/time.h>
13199
13200 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
13201-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
13202
13203 /* Definitions for CONFIG_GENERIC_TIME definitions */
13204 #define __section_vsyscall_gtod_data __attribute__ \
13205@@ -31,7 +32,6 @@ enum vsyscall_num {
13206 #define VGETCPU_LSL 2
13207
13208 extern int __vgetcpu_mode;
13209-extern volatile unsigned long __jiffies;
13210
13211 /* kernel space (writeable) */
13212 extern int vgetcpu_mode;
13213@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
13214
13215 extern void map_vsyscall(void);
13216
13217+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
13218+extern time_t vtime(time_t *t);
13219+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
13220 #endif /* __KERNEL__ */
13221
13222 #endif /* _ASM_X86_VSYSCALL_H */
13223diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13224index 2c756fd..3377e37 100644
13225--- a/arch/x86/include/asm/x86_init.h
13226+++ b/arch/x86/include/asm/x86_init.h
13227@@ -28,7 +28,7 @@ struct x86_init_mpparse {
13228 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13229 void (*find_smp_config)(unsigned int reserve);
13230 void (*get_smp_config)(unsigned int early);
13231-};
13232+} __no_const;
13233
13234 /**
13235 * struct x86_init_resources - platform specific resource related ops
13236@@ -42,7 +42,7 @@ struct x86_init_resources {
13237 void (*probe_roms)(void);
13238 void (*reserve_resources)(void);
13239 char *(*memory_setup)(void);
13240-};
13241+} __no_const;
13242
13243 /**
13244 * struct x86_init_irqs - platform specific interrupt setup
13245@@ -55,7 +55,7 @@ struct x86_init_irqs {
13246 void (*pre_vector_init)(void);
13247 void (*intr_init)(void);
13248 void (*trap_init)(void);
13249-};
13250+} __no_const;
13251
13252 /**
13253 * struct x86_init_oem - oem platform specific customizing functions
13254@@ -65,7 +65,7 @@ struct x86_init_irqs {
13255 struct x86_init_oem {
13256 void (*arch_setup)(void);
13257 void (*banner)(void);
13258-};
13259+} __no_const;
13260
13261 /**
13262 * struct x86_init_paging - platform specific paging functions
13263@@ -75,7 +75,7 @@ struct x86_init_oem {
13264 struct x86_init_paging {
13265 void (*pagetable_setup_start)(pgd_t *base);
13266 void (*pagetable_setup_done)(pgd_t *base);
13267-};
13268+} __no_const;
13269
13270 /**
13271 * struct x86_init_timers - platform specific timer setup
13272@@ -88,7 +88,7 @@ struct x86_init_timers {
13273 void (*setup_percpu_clockev)(void);
13274 void (*tsc_pre_init)(void);
13275 void (*timer_init)(void);
13276-};
13277+} __no_const;
13278
13279 /**
13280 * struct x86_init_ops - functions for platform specific setup
13281@@ -101,7 +101,7 @@ struct x86_init_ops {
13282 struct x86_init_oem oem;
13283 struct x86_init_paging paging;
13284 struct x86_init_timers timers;
13285-};
13286+} __no_const;
13287
13288 /**
13289 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13290@@ -109,7 +109,7 @@ struct x86_init_ops {
13291 */
13292 struct x86_cpuinit_ops {
13293 void (*setup_percpu_clockev)(void);
13294-};
13295+} __no_const;
13296
13297 /**
13298 * struct x86_platform_ops - platform specific runtime functions
13299@@ -121,7 +121,7 @@ struct x86_platform_ops {
13300 unsigned long (*calibrate_tsc)(void);
13301 unsigned long (*get_wallclock)(void);
13302 int (*set_wallclock)(unsigned long nowtime);
13303-};
13304+} __no_const;
13305
13306 extern struct x86_init_ops x86_init;
13307 extern struct x86_cpuinit_ops x86_cpuinit;
13308diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13309index 727acc1..554f3eb 100644
13310--- a/arch/x86/include/asm/xsave.h
13311+++ b/arch/x86/include/asm/xsave.h
13312@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13313 static inline int xsave_user(struct xsave_struct __user *buf)
13314 {
13315 int err;
13316+
13317+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13318+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13319+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13320+#endif
13321+
13322 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13323 "2:\n"
13324 ".section .fixup,\"ax\"\n"
13325@@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13326 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13327 {
13328 int err;
13329- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13330+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13331 u32 lmask = mask;
13332 u32 hmask = mask >> 32;
13333
13334+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13335+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13336+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13337+#endif
13338+
13339 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13340 "2:\n"
13341 ".section .fixup,\"ax\"\n"
13342diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13343index 6a564ac..9b1340c 100644
13344--- a/arch/x86/kernel/acpi/realmode/Makefile
13345+++ b/arch/x86/kernel/acpi/realmode/Makefile
13346@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13347 $(call cc-option, -fno-stack-protector) \
13348 $(call cc-option, -mpreferred-stack-boundary=2)
13349 KBUILD_CFLAGS += $(call cc-option, -m32)
13350+ifdef CONSTIFY_PLUGIN
13351+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13352+endif
13353 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13354 GCOV_PROFILE := n
13355
13356diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13357index 580b4e2..d4129e4 100644
13358--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13359+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13360@@ -91,6 +91,9 @@ _start:
13361 /* Do any other stuff... */
13362
13363 #ifndef CONFIG_64BIT
13364+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
13365+ call verify_cpu
13366+
13367 /* This could also be done in C code... */
13368 movl pmode_cr3, %eax
13369 movl %eax, %cr3
13370@@ -104,7 +107,7 @@ _start:
13371 movl %eax, %ecx
13372 orl %edx, %ecx
13373 jz 1f
13374- movl $0xc0000080, %ecx
13375+ mov $MSR_EFER, %ecx
13376 wrmsr
13377 1:
13378
13379@@ -114,6 +117,7 @@ _start:
13380 movl pmode_cr0, %eax
13381 movl %eax, %cr0
13382 jmp pmode_return
13383+# include "../../verify_cpu.S"
13384 #else
13385 pushw $0
13386 pushw trampoline_segment
13387diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13388index ca93638..7042f24 100644
13389--- a/arch/x86/kernel/acpi/sleep.c
13390+++ b/arch/x86/kernel/acpi/sleep.c
13391@@ -11,11 +11,12 @@
13392 #include <linux/cpumask.h>
13393 #include <asm/segment.h>
13394 #include <asm/desc.h>
13395+#include <asm/e820.h>
13396
13397 #include "realmode/wakeup.h"
13398 #include "sleep.h"
13399
13400-unsigned long acpi_wakeup_address;
13401+unsigned long acpi_wakeup_address = 0x2000;
13402 unsigned long acpi_realmode_flags;
13403
13404 /* address in low memory of the wakeup routine. */
13405@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13406 #else /* CONFIG_64BIT */
13407 header->trampoline_segment = setup_trampoline() >> 4;
13408 #ifdef CONFIG_SMP
13409- stack_start.sp = temp_stack + sizeof(temp_stack);
13410+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13411+
13412+ pax_open_kernel();
13413 early_gdt_descr.address =
13414 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13415+ pax_close_kernel();
13416+
13417 initial_gs = per_cpu_offset(smp_processor_id());
13418 #endif
13419 initial_code = (unsigned long)wakeup_long64;
13420@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13421 return;
13422 }
13423
13424- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13425-
13426- if (!acpi_realmode) {
13427- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13428- return;
13429- }
13430-
13431- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13432+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13433+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13434 }
13435
13436
13437diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13438index 8ded418..079961e 100644
13439--- a/arch/x86/kernel/acpi/wakeup_32.S
13440+++ b/arch/x86/kernel/acpi/wakeup_32.S
13441@@ -30,13 +30,11 @@ wakeup_pmode_return:
13442 # and restore the stack ... but you need gdt for this to work
13443 movl saved_context_esp, %esp
13444
13445- movl %cs:saved_magic, %eax
13446- cmpl $0x12345678, %eax
13447+ cmpl $0x12345678, saved_magic
13448 jne bogus_magic
13449
13450 # jump to place where we left off
13451- movl saved_eip, %eax
13452- jmp *%eax
13453+ jmp *(saved_eip)
13454
13455 bogus_magic:
13456 jmp bogus_magic
13457diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13458index de7353c..075da5f 100644
13459--- a/arch/x86/kernel/alternative.c
13460+++ b/arch/x86/kernel/alternative.c
13461@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13462
13463 BUG_ON(p->len > MAX_PATCH_LEN);
13464 /* prep the buffer with the original instructions */
13465- memcpy(insnbuf, p->instr, p->len);
13466+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13467 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13468 (unsigned long)p->instr, p->len);
13469
13470@@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13471 if (smp_alt_once)
13472 free_init_pages("SMP alternatives",
13473 (unsigned long)__smp_locks,
13474- (unsigned long)__smp_locks_end);
13475+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13476
13477 restart_nmi();
13478 }
13479@@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13480 * instructions. And on the local CPU you need to be protected again NMI or MCE
13481 * handlers seeing an inconsistent instruction while you patch.
13482 */
13483-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13484+static void *__kprobes text_poke_early(void *addr, const void *opcode,
13485 size_t len)
13486 {
13487 unsigned long flags;
13488 local_irq_save(flags);
13489- memcpy(addr, opcode, len);
13490+
13491+ pax_open_kernel();
13492+ memcpy(ktla_ktva(addr), opcode, len);
13493 sync_core();
13494+ pax_close_kernel();
13495+
13496 local_irq_restore(flags);
13497 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13498 that causes hangs on some VIA CPUs. */
13499@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13500 */
13501 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13502 {
13503- unsigned long flags;
13504- char *vaddr;
13505+ unsigned char *vaddr = ktla_ktva(addr);
13506 struct page *pages[2];
13507- int i;
13508+ size_t i;
13509
13510 if (!core_kernel_text((unsigned long)addr)) {
13511- pages[0] = vmalloc_to_page(addr);
13512- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13513+ pages[0] = vmalloc_to_page(vaddr);
13514+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13515 } else {
13516- pages[0] = virt_to_page(addr);
13517+ pages[0] = virt_to_page(vaddr);
13518 WARN_ON(!PageReserved(pages[0]));
13519- pages[1] = virt_to_page(addr + PAGE_SIZE);
13520+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13521 }
13522 BUG_ON(!pages[0]);
13523- local_irq_save(flags);
13524- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13525- if (pages[1])
13526- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13527- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13528- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13529- clear_fixmap(FIX_TEXT_POKE0);
13530- if (pages[1])
13531- clear_fixmap(FIX_TEXT_POKE1);
13532- local_flush_tlb();
13533- sync_core();
13534- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13535- that causes hangs on some VIA CPUs. */
13536+ text_poke_early(addr, opcode, len);
13537 for (i = 0; i < len; i++)
13538- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13539- local_irq_restore(flags);
13540+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13541 return addr;
13542 }
13543diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13544index 3a44b75..1601800 100644
13545--- a/arch/x86/kernel/amd_iommu.c
13546+++ b/arch/x86/kernel/amd_iommu.c
13547@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13548 }
13549 }
13550
13551-static struct dma_map_ops amd_iommu_dma_ops = {
13552+static const struct dma_map_ops amd_iommu_dma_ops = {
13553 .alloc_coherent = alloc_coherent,
13554 .free_coherent = free_coherent,
13555 .map_page = map_page,
13556diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13557index 1d2d670..8e3f477 100644
13558--- a/arch/x86/kernel/apic/apic.c
13559+++ b/arch/x86/kernel/apic/apic.c
13560@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13561 /*
13562 * Debug level, exported for io_apic.c
13563 */
13564-unsigned int apic_verbosity;
13565+int apic_verbosity;
13566
13567 int pic_mode;
13568
13569@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13570 apic_write(APIC_ESR, 0);
13571 v1 = apic_read(APIC_ESR);
13572 ack_APIC_irq();
13573- atomic_inc(&irq_err_count);
13574+ atomic_inc_unchecked(&irq_err_count);
13575
13576 /*
13577 * Here is what the APIC error bits mean:
13578@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13579 u16 *bios_cpu_apicid;
13580 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13581
13582+ pax_track_stack();
13583+
13584 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13585 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13586
13587diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13588index 8928d97..f799cea 100644
13589--- a/arch/x86/kernel/apic/io_apic.c
13590+++ b/arch/x86/kernel/apic/io_apic.c
13591@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13592 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13593 GFP_ATOMIC);
13594 if (!ioapic_entries)
13595- return 0;
13596+ return NULL;
13597
13598 for (apic = 0; apic < nr_ioapics; apic++) {
13599 ioapic_entries[apic] =
13600@@ -733,7 +733,7 @@ nomem:
13601 kfree(ioapic_entries[apic]);
13602 kfree(ioapic_entries);
13603
13604- return 0;
13605+ return NULL;
13606 }
13607
13608 /*
13609@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13610 }
13611 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13612
13613-void lock_vector_lock(void)
13614+void lock_vector_lock(void) __acquires(vector_lock)
13615 {
13616 /* Used to the online set of cpus does not change
13617 * during assign_irq_vector.
13618@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13619 spin_lock(&vector_lock);
13620 }
13621
13622-void unlock_vector_lock(void)
13623+void unlock_vector_lock(void) __releases(vector_lock)
13624 {
13625 spin_unlock(&vector_lock);
13626 }
13627@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13628 ack_APIC_irq();
13629 }
13630
13631-atomic_t irq_mis_count;
13632+atomic_unchecked_t irq_mis_count;
13633
13634 static void ack_apic_level(unsigned int irq)
13635 {
13636@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13637
13638 /* Tail end of version 0x11 I/O APIC bug workaround */
13639 if (!(v & (1 << (i & 0x1f)))) {
13640- atomic_inc(&irq_mis_count);
13641+ atomic_inc_unchecked(&irq_mis_count);
13642 spin_lock(&ioapic_lock);
13643 __mask_and_edge_IO_APIC_irq(cfg);
13644 __unmask_and_level_IO_APIC_irq(cfg);
13645diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13646index 151ace6..f317474 100644
13647--- a/arch/x86/kernel/apm_32.c
13648+++ b/arch/x86/kernel/apm_32.c
13649@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13650 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13651 * even though they are called in protected mode.
13652 */
13653-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13654+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13655 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13656
13657 static const char driver_version[] = "1.16ac"; /* no spaces */
13658@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13659 BUG_ON(cpu != 0);
13660 gdt = get_cpu_gdt_table(cpu);
13661 save_desc_40 = gdt[0x40 / 8];
13662+
13663+ pax_open_kernel();
13664 gdt[0x40 / 8] = bad_bios_desc;
13665+ pax_close_kernel();
13666
13667 apm_irq_save(flags);
13668 APM_DO_SAVE_SEGS;
13669@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13670 &call->esi);
13671 APM_DO_RESTORE_SEGS;
13672 apm_irq_restore(flags);
13673+
13674+ pax_open_kernel();
13675 gdt[0x40 / 8] = save_desc_40;
13676+ pax_close_kernel();
13677+
13678 put_cpu();
13679
13680 return call->eax & 0xff;
13681@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13682 BUG_ON(cpu != 0);
13683 gdt = get_cpu_gdt_table(cpu);
13684 save_desc_40 = gdt[0x40 / 8];
13685+
13686+ pax_open_kernel();
13687 gdt[0x40 / 8] = bad_bios_desc;
13688+ pax_close_kernel();
13689
13690 apm_irq_save(flags);
13691 APM_DO_SAVE_SEGS;
13692@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13693 &call->eax);
13694 APM_DO_RESTORE_SEGS;
13695 apm_irq_restore(flags);
13696+
13697+ pax_open_kernel();
13698 gdt[0x40 / 8] = save_desc_40;
13699+ pax_close_kernel();
13700+
13701 put_cpu();
13702 return error;
13703 }
13704@@ -975,7 +989,7 @@ recalc:
13705
13706 static void apm_power_off(void)
13707 {
13708- unsigned char po_bios_call[] = {
13709+ const unsigned char po_bios_call[] = {
13710 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13711 0x8e, 0xd0, /* movw ax,ss */
13712 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13713@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13714 * code to that CPU.
13715 */
13716 gdt = get_cpu_gdt_table(0);
13717+
13718+ pax_open_kernel();
13719 set_desc_base(&gdt[APM_CS >> 3],
13720 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13721 set_desc_base(&gdt[APM_CS_16 >> 3],
13722 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13723 set_desc_base(&gdt[APM_DS >> 3],
13724 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13725+ pax_close_kernel();
13726
13727 proc_create("apm", 0, NULL, &apm_file_ops);
13728
13729diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13730index dfdbf64..9b2b6ce 100644
13731--- a/arch/x86/kernel/asm-offsets_32.c
13732+++ b/arch/x86/kernel/asm-offsets_32.c
13733@@ -51,7 +51,6 @@ void foo(void)
13734 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13735 BLANK();
13736
13737- OFFSET(TI_task, thread_info, task);
13738 OFFSET(TI_exec_domain, thread_info, exec_domain);
13739 OFFSET(TI_flags, thread_info, flags);
13740 OFFSET(TI_status, thread_info, status);
13741@@ -60,6 +59,8 @@ void foo(void)
13742 OFFSET(TI_restart_block, thread_info, restart_block);
13743 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13744 OFFSET(TI_cpu, thread_info, cpu);
13745+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13746+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13747 BLANK();
13748
13749 OFFSET(GDS_size, desc_ptr, size);
13750@@ -99,6 +100,7 @@ void foo(void)
13751
13752 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13753 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13754+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13755 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13756 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13757 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13758@@ -115,6 +117,11 @@ void foo(void)
13759 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13760 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13761 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13762+
13763+#ifdef CONFIG_PAX_KERNEXEC
13764+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13765+#endif
13766+
13767 #endif
13768
13769 #ifdef CONFIG_XEN
13770diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13771index 4a6aeed..371de20 100644
13772--- a/arch/x86/kernel/asm-offsets_64.c
13773+++ b/arch/x86/kernel/asm-offsets_64.c
13774@@ -44,6 +44,8 @@ int main(void)
13775 ENTRY(addr_limit);
13776 ENTRY(preempt_count);
13777 ENTRY(status);
13778+ ENTRY(lowest_stack);
13779+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13780 #ifdef CONFIG_IA32_EMULATION
13781 ENTRY(sysenter_return);
13782 #endif
13783@@ -63,6 +65,18 @@ int main(void)
13784 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13785 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13786 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13787+
13788+#ifdef CONFIG_PAX_KERNEXEC
13789+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13790+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13791+#endif
13792+
13793+#ifdef CONFIG_PAX_MEMORY_UDEREF
13794+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13795+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13796+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13797+#endif
13798+
13799 #endif
13800
13801
13802@@ -115,6 +129,7 @@ int main(void)
13803 ENTRY(cr8);
13804 BLANK();
13805 #undef ENTRY
13806+ DEFINE(TSS_size, sizeof(struct tss_struct));
13807 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13808 BLANK();
13809 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13810@@ -130,6 +145,7 @@ int main(void)
13811
13812 BLANK();
13813 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13814+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13815 #ifdef CONFIG_XEN
13816 BLANK();
13817 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13818diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13819index ff502cc..dc5133e 100644
13820--- a/arch/x86/kernel/cpu/Makefile
13821+++ b/arch/x86/kernel/cpu/Makefile
13822@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13823 CFLAGS_REMOVE_common.o = -pg
13824 endif
13825
13826-# Make sure load_percpu_segment has no stackprotector
13827-nostackp := $(call cc-option, -fno-stack-protector)
13828-CFLAGS_common.o := $(nostackp)
13829-
13830 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13831 obj-y += proc.o capflags.o powerflags.o common.o
13832 obj-y += vmware.o hypervisor.o sched.o
13833diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13834index 6e082dc..a0b5f36 100644
13835--- a/arch/x86/kernel/cpu/amd.c
13836+++ b/arch/x86/kernel/cpu/amd.c
13837@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13838 unsigned int size)
13839 {
13840 /* AMD errata T13 (order #21922) */
13841- if ((c->x86 == 6)) {
13842+ if (c->x86 == 6) {
13843 /* Duron Rev A0 */
13844 if (c->x86_model == 3 && c->x86_mask == 0)
13845 size = 64;
13846diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13847index 4e34d10..ba6bc97 100644
13848--- a/arch/x86/kernel/cpu/common.c
13849+++ b/arch/x86/kernel/cpu/common.c
13850@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13851
13852 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13853
13854-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13855-#ifdef CONFIG_X86_64
13856- /*
13857- * We need valid kernel segments for data and code in long mode too
13858- * IRET will check the segment types kkeil 2000/10/28
13859- * Also sysret mandates a special GDT layout
13860- *
13861- * TLS descriptors are currently at a different place compared to i386.
13862- * Hopefully nobody expects them at a fixed place (Wine?)
13863- */
13864- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13865- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13866- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13867- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13868- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13869- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13870-#else
13871- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13872- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13873- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13874- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13875- /*
13876- * Segments used for calling PnP BIOS have byte granularity.
13877- * They code segments and data segments have fixed 64k limits,
13878- * the transfer segment sizes are set at run time.
13879- */
13880- /* 32-bit code */
13881- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13882- /* 16-bit code */
13883- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13884- /* 16-bit data */
13885- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13886- /* 16-bit data */
13887- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13888- /* 16-bit data */
13889- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13890- /*
13891- * The APM segments have byte granularity and their bases
13892- * are set at run time. All have 64k limits.
13893- */
13894- /* 32-bit code */
13895- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13896- /* 16-bit code */
13897- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13898- /* data */
13899- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13900-
13901- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13902- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13903- GDT_STACK_CANARY_INIT
13904-#endif
13905-} };
13906-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13907-
13908 static int __init x86_xsave_setup(char *s)
13909 {
13910 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13911@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13912 {
13913 struct desc_ptr gdt_descr;
13914
13915- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13916+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13917 gdt_descr.size = GDT_SIZE - 1;
13918 load_gdt(&gdt_descr);
13919 /* Reload the per-cpu base */
13920@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13921 /* Filter out anything that depends on CPUID levels we don't have */
13922 filter_cpuid_features(c, true);
13923
13924+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13925+ setup_clear_cpu_cap(X86_FEATURE_SEP);
13926+#endif
13927+
13928 /* If the model name is still unset, do table lookup. */
13929 if (!c->x86_model_id[0]) {
13930 const char *p;
13931@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13932 }
13933 __setup("clearcpuid=", setup_disablecpuid);
13934
13935+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13936+EXPORT_PER_CPU_SYMBOL(current_tinfo);
13937+
13938 #ifdef CONFIG_X86_64
13939 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13940
13941@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13942 EXPORT_PER_CPU_SYMBOL(current_task);
13943
13944 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13945- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13946+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13947 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13948
13949 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13950@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13951 {
13952 memset(regs, 0, sizeof(struct pt_regs));
13953 regs->fs = __KERNEL_PERCPU;
13954- regs->gs = __KERNEL_STACK_CANARY;
13955+ savesegment(gs, regs->gs);
13956
13957 return regs;
13958 }
13959@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13960 int i;
13961
13962 cpu = stack_smp_processor_id();
13963- t = &per_cpu(init_tss, cpu);
13964+ t = init_tss + cpu;
13965 orig_ist = &per_cpu(orig_ist, cpu);
13966
13967 #ifdef CONFIG_NUMA
13968@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13969 switch_to_new_gdt(cpu);
13970 loadsegment(fs, 0);
13971
13972- load_idt((const struct desc_ptr *)&idt_descr);
13973+ load_idt(&idt_descr);
13974
13975 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13976 syscall_init();
13977@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13978 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13979 barrier();
13980
13981- check_efer();
13982 if (cpu != 0)
13983 enable_x2apic();
13984
13985@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13986 {
13987 int cpu = smp_processor_id();
13988 struct task_struct *curr = current;
13989- struct tss_struct *t = &per_cpu(init_tss, cpu);
13990+ struct tss_struct *t = init_tss + cpu;
13991 struct thread_struct *thread = &curr->thread;
13992
13993 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13994diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13995index 6a77cca..4f4fca0 100644
13996--- a/arch/x86/kernel/cpu/intel.c
13997+++ b/arch/x86/kernel/cpu/intel.c
13998@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13999 * Update the IDT descriptor and reload the IDT so that
14000 * it uses the read-only mapped virtual address.
14001 */
14002- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14003+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14004 load_idt(&idt_descr);
14005 }
14006 #endif
14007diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
14008index 417990f..96dc36b 100644
14009--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
14010+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
14011@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14012 return ret;
14013 }
14014
14015-static struct sysfs_ops sysfs_ops = {
14016+static const struct sysfs_ops sysfs_ops = {
14017 .show = show,
14018 .store = store,
14019 };
14020diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14021index 472763d..9831e11 100644
14022--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
14023+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14024@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14025 static int inject_init(void)
14026 {
14027 printk(KERN_INFO "Machine check injector initialized\n");
14028- mce_chrdev_ops.write = mce_write;
14029+ pax_open_kernel();
14030+ *(void **)&mce_chrdev_ops.write = mce_write;
14031+ pax_close_kernel();
14032 register_die_notifier(&mce_raise_nb);
14033 return 0;
14034 }
14035diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14036index 0f16a2b..21740f5 100644
14037--- a/arch/x86/kernel/cpu/mcheck/mce.c
14038+++ b/arch/x86/kernel/cpu/mcheck/mce.c
14039@@ -43,6 +43,7 @@
14040 #include <asm/ipi.h>
14041 #include <asm/mce.h>
14042 #include <asm/msr.h>
14043+#include <asm/local.h>
14044
14045 #include "mce-internal.h"
14046
14047@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
14048 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14049 m->cs, m->ip);
14050
14051- if (m->cs == __KERNEL_CS)
14052+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14053 print_symbol("{%s}", m->ip);
14054 pr_cont("\n");
14055 }
14056@@ -221,10 +222,10 @@ static void print_mce_tail(void)
14057
14058 #define PANIC_TIMEOUT 5 /* 5 seconds */
14059
14060-static atomic_t mce_paniced;
14061+static atomic_unchecked_t mce_paniced;
14062
14063 static int fake_panic;
14064-static atomic_t mce_fake_paniced;
14065+static atomic_unchecked_t mce_fake_paniced;
14066
14067 /* Panic in progress. Enable interrupts and wait for final IPI */
14068 static void wait_for_panic(void)
14069@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14070 /*
14071 * Make sure only one CPU runs in machine check panic
14072 */
14073- if (atomic_inc_return(&mce_paniced) > 1)
14074+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14075 wait_for_panic();
14076 barrier();
14077
14078@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14079 console_verbose();
14080 } else {
14081 /* Don't log too much for fake panic */
14082- if (atomic_inc_return(&mce_fake_paniced) > 1)
14083+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14084 return;
14085 }
14086 print_mce_head();
14087@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
14088 * might have been modified by someone else.
14089 */
14090 rmb();
14091- if (atomic_read(&mce_paniced))
14092+ if (atomic_read_unchecked(&mce_paniced))
14093 wait_for_panic();
14094 if (!monarch_timeout)
14095 goto out;
14096@@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14097 }
14098
14099 /* Call the installed machine check handler for this CPU setup. */
14100-void (*machine_check_vector)(struct pt_regs *, long error_code) =
14101+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14102 unexpected_machine_check;
14103
14104 /*
14105@@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14106 return;
14107 }
14108
14109+ pax_open_kernel();
14110 machine_check_vector = do_machine_check;
14111+ pax_close_kernel();
14112
14113 mce_init();
14114 mce_cpu_features(c);
14115@@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14116 */
14117
14118 static DEFINE_SPINLOCK(mce_state_lock);
14119-static int open_count; /* #times opened */
14120+static local_t open_count; /* #times opened */
14121 static int open_exclu; /* already open exclusive? */
14122
14123 static int mce_open(struct inode *inode, struct file *file)
14124 {
14125 spin_lock(&mce_state_lock);
14126
14127- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
14128+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
14129 spin_unlock(&mce_state_lock);
14130
14131 return -EBUSY;
14132@@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
14133
14134 if (file->f_flags & O_EXCL)
14135 open_exclu = 1;
14136- open_count++;
14137+ local_inc(&open_count);
14138
14139 spin_unlock(&mce_state_lock);
14140
14141@@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
14142 {
14143 spin_lock(&mce_state_lock);
14144
14145- open_count--;
14146+ local_dec(&open_count);
14147 open_exclu = 0;
14148
14149 spin_unlock(&mce_state_lock);
14150@@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
14151 static void mce_reset(void)
14152 {
14153 cpu_missing = 0;
14154- atomic_set(&mce_fake_paniced, 0);
14155+ atomic_set_unchecked(&mce_fake_paniced, 0);
14156 atomic_set(&mce_executing, 0);
14157 atomic_set(&mce_callin, 0);
14158 atomic_set(&global_nwo, 0);
14159diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14160index ef3cd31..9d2f6ab 100644
14161--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
14162+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
14163@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14164 return ret;
14165 }
14166
14167-static struct sysfs_ops threshold_ops = {
14168+static const struct sysfs_ops threshold_ops = {
14169 .show = show,
14170 .store = store,
14171 };
14172diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14173index 5c0e653..0882b0a 100644
14174--- a/arch/x86/kernel/cpu/mcheck/p5.c
14175+++ b/arch/x86/kernel/cpu/mcheck/p5.c
14176@@ -12,6 +12,7 @@
14177 #include <asm/system.h>
14178 #include <asm/mce.h>
14179 #include <asm/msr.h>
14180+#include <asm/pgtable.h>
14181
14182 /* By default disabled */
14183 int mce_p5_enabled __read_mostly;
14184@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14185 if (!cpu_has(c, X86_FEATURE_MCE))
14186 return;
14187
14188+ pax_open_kernel();
14189 machine_check_vector = pentium_machine_check;
14190+ pax_close_kernel();
14191 /* Make sure the vector pointer is visible before we enable MCEs: */
14192 wmb();
14193
14194diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14195index 54060f5..c1a7577 100644
14196--- a/arch/x86/kernel/cpu/mcheck/winchip.c
14197+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14198@@ -11,6 +11,7 @@
14199 #include <asm/system.h>
14200 #include <asm/mce.h>
14201 #include <asm/msr.h>
14202+#include <asm/pgtable.h>
14203
14204 /* Machine check handler for WinChip C6: */
14205 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14206@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14207 {
14208 u32 lo, hi;
14209
14210+ pax_open_kernel();
14211 machine_check_vector = winchip_machine_check;
14212+ pax_close_kernel();
14213 /* Make sure the vector pointer is visible before we enable MCEs: */
14214 wmb();
14215
14216diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
14217index 33af141..92ba9cd 100644
14218--- a/arch/x86/kernel/cpu/mtrr/amd.c
14219+++ b/arch/x86/kernel/cpu/mtrr/amd.c
14220@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
14221 return 0;
14222 }
14223
14224-static struct mtrr_ops amd_mtrr_ops = {
14225+static const struct mtrr_ops amd_mtrr_ops = {
14226 .vendor = X86_VENDOR_AMD,
14227 .set = amd_set_mtrr,
14228 .get = amd_get_mtrr,
14229diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
14230index de89f14..316fe3e 100644
14231--- a/arch/x86/kernel/cpu/mtrr/centaur.c
14232+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
14233@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
14234 return 0;
14235 }
14236
14237-static struct mtrr_ops centaur_mtrr_ops = {
14238+static const struct mtrr_ops centaur_mtrr_ops = {
14239 .vendor = X86_VENDOR_CENTAUR,
14240 .set = centaur_set_mcr,
14241 .get = centaur_get_mcr,
14242diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
14243index 228d982..68a3343 100644
14244--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
14245+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
14246@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
14247 post_set();
14248 }
14249
14250-static struct mtrr_ops cyrix_mtrr_ops = {
14251+static const struct mtrr_ops cyrix_mtrr_ops = {
14252 .vendor = X86_VENDOR_CYRIX,
14253 .set_all = cyrix_set_all,
14254 .set = cyrix_set_arr,
14255diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
14256index 55da0c5..4d75584 100644
14257--- a/arch/x86/kernel/cpu/mtrr/generic.c
14258+++ b/arch/x86/kernel/cpu/mtrr/generic.c
14259@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
14260 /*
14261 * Generic structure...
14262 */
14263-struct mtrr_ops generic_mtrr_ops = {
14264+const struct mtrr_ops generic_mtrr_ops = {
14265 .use_intel_if = 1,
14266 .set_all = generic_set_all,
14267 .get = generic_get_mtrr,
14268diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14269index fd60f09..c94ef52 100644
14270--- a/arch/x86/kernel/cpu/mtrr/main.c
14271+++ b/arch/x86/kernel/cpu/mtrr/main.c
14272@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14273 u64 size_or_mask, size_and_mask;
14274 static bool mtrr_aps_delayed_init;
14275
14276-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14277+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14278
14279-struct mtrr_ops *mtrr_if;
14280+const struct mtrr_ops *mtrr_if;
14281
14282 static void set_mtrr(unsigned int reg, unsigned long base,
14283 unsigned long size, mtrr_type type);
14284
14285-void set_mtrr_ops(struct mtrr_ops *ops)
14286+void set_mtrr_ops(const struct mtrr_ops *ops)
14287 {
14288 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14289 mtrr_ops[ops->vendor] = ops;
14290diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14291index a501dee..816c719 100644
14292--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14293+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14294@@ -25,14 +25,14 @@ struct mtrr_ops {
14295 int (*validate_add_page)(unsigned long base, unsigned long size,
14296 unsigned int type);
14297 int (*have_wrcomb)(void);
14298-};
14299+} __do_const;
14300
14301 extern int generic_get_free_region(unsigned long base, unsigned long size,
14302 int replace_reg);
14303 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14304 unsigned int type);
14305
14306-extern struct mtrr_ops generic_mtrr_ops;
14307+extern const struct mtrr_ops generic_mtrr_ops;
14308
14309 extern int positive_have_wrcomb(void);
14310
14311@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14312 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14313 void get_mtrr_state(void);
14314
14315-extern void set_mtrr_ops(struct mtrr_ops *ops);
14316+extern void set_mtrr_ops(const struct mtrr_ops *ops);
14317
14318 extern u64 size_or_mask, size_and_mask;
14319-extern struct mtrr_ops *mtrr_if;
14320+extern const struct mtrr_ops *mtrr_if;
14321
14322 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14323 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14324diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14325index 0ff02ca..fc49a60 100644
14326--- a/arch/x86/kernel/cpu/perf_event.c
14327+++ b/arch/x86/kernel/cpu/perf_event.c
14328@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14329 * count to the generic event atomically:
14330 */
14331 again:
14332- prev_raw_count = atomic64_read(&hwc->prev_count);
14333+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14334 rdmsrl(hwc->event_base + idx, new_raw_count);
14335
14336- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14337+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14338 new_raw_count) != prev_raw_count)
14339 goto again;
14340
14341@@ -741,7 +741,7 @@ again:
14342 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14343 delta >>= shift;
14344
14345- atomic64_add(delta, &event->count);
14346+ atomic64_add_unchecked(delta, &event->count);
14347 atomic64_sub(delta, &hwc->period_left);
14348
14349 return new_raw_count;
14350@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14351 * The hw event starts counting from this event offset,
14352 * mark it to be able to extra future deltas:
14353 */
14354- atomic64_set(&hwc->prev_count, (u64)-left);
14355+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14356
14357 err = checking_wrmsrl(hwc->event_base + idx,
14358 (u64)(-left) & x86_pmu.event_mask);
14359@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14360 break;
14361
14362 callchain_store(entry, frame.return_address);
14363- fp = frame.next_frame;
14364+ fp = (__force const void __user *)frame.next_frame;
14365 }
14366 }
14367
14368diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14369index 898df97..9e82503 100644
14370--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14371+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14372@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14373
14374 /* Interface defining a CPU specific perfctr watchdog */
14375 struct wd_ops {
14376- int (*reserve)(void);
14377- void (*unreserve)(void);
14378- int (*setup)(unsigned nmi_hz);
14379- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14380- void (*stop)(void);
14381+ int (* const reserve)(void);
14382+ void (* const unreserve)(void);
14383+ int (* const setup)(unsigned nmi_hz);
14384+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14385+ void (* const stop)(void);
14386 unsigned perfctr;
14387 unsigned evntsel;
14388 u64 checkbit;
14389@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14390 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14391 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14392
14393+/* cannot be const */
14394 static struct wd_ops intel_arch_wd_ops;
14395
14396 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14397@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14398 return 1;
14399 }
14400
14401+/* cannot be const */
14402 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14403 .reserve = single_msr_reserve,
14404 .unreserve = single_msr_unreserve,
14405diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14406index ff95824..2ffdcb5 100644
14407--- a/arch/x86/kernel/crash.c
14408+++ b/arch/x86/kernel/crash.c
14409@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14410 regs = args->regs;
14411
14412 #ifdef CONFIG_X86_32
14413- if (!user_mode_vm(regs)) {
14414+ if (!user_mode(regs)) {
14415 crash_fixup_ss_esp(&fixed_regs, regs);
14416 regs = &fixed_regs;
14417 }
14418diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14419index 37250fe..bf2ec74 100644
14420--- a/arch/x86/kernel/doublefault_32.c
14421+++ b/arch/x86/kernel/doublefault_32.c
14422@@ -11,7 +11,7 @@
14423
14424 #define DOUBLEFAULT_STACKSIZE (1024)
14425 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14426-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14427+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14428
14429 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14430
14431@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14432 unsigned long gdt, tss;
14433
14434 store_gdt(&gdt_desc);
14435- gdt = gdt_desc.address;
14436+ gdt = (unsigned long)gdt_desc.address;
14437
14438 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14439
14440@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14441 /* 0x2 bit is always set */
14442 .flags = X86_EFLAGS_SF | 0x2,
14443 .sp = STACK_START,
14444- .es = __USER_DS,
14445+ .es = __KERNEL_DS,
14446 .cs = __KERNEL_CS,
14447 .ss = __KERNEL_DS,
14448- .ds = __USER_DS,
14449+ .ds = __KERNEL_DS,
14450 .fs = __KERNEL_PERCPU,
14451
14452 .__cr3 = __pa_nodebug(swapper_pg_dir),
14453diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14454index 2d8a371..4fa6ae6 100644
14455--- a/arch/x86/kernel/dumpstack.c
14456+++ b/arch/x86/kernel/dumpstack.c
14457@@ -2,6 +2,9 @@
14458 * Copyright (C) 1991, 1992 Linus Torvalds
14459 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14460 */
14461+#ifdef CONFIG_GRKERNSEC_HIDESYM
14462+#define __INCLUDED_BY_HIDESYM 1
14463+#endif
14464 #include <linux/kallsyms.h>
14465 #include <linux/kprobes.h>
14466 #include <linux/uaccess.h>
14467@@ -28,7 +31,7 @@ static int die_counter;
14468
14469 void printk_address(unsigned long address, int reliable)
14470 {
14471- printk(" [<%p>] %s%pS\n", (void *) address,
14472+ printk(" [<%p>] %s%pA\n", (void *) address,
14473 reliable ? "" : "? ", (void *) address);
14474 }
14475
14476@@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14477 static void
14478 print_ftrace_graph_addr(unsigned long addr, void *data,
14479 const struct stacktrace_ops *ops,
14480- struct thread_info *tinfo, int *graph)
14481+ struct task_struct *task, int *graph)
14482 {
14483- struct task_struct *task = tinfo->task;
14484 unsigned long ret_addr;
14485 int index = task->curr_ret_stack;
14486
14487@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14488 static inline void
14489 print_ftrace_graph_addr(unsigned long addr, void *data,
14490 const struct stacktrace_ops *ops,
14491- struct thread_info *tinfo, int *graph)
14492+ struct task_struct *task, int *graph)
14493 { }
14494 #endif
14495
14496@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14497 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14498 */
14499
14500-static inline int valid_stack_ptr(struct thread_info *tinfo,
14501- void *p, unsigned int size, void *end)
14502+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14503 {
14504- void *t = tinfo;
14505 if (end) {
14506 if (p < end && p >= (end-THREAD_SIZE))
14507 return 1;
14508@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14509 }
14510
14511 unsigned long
14512-print_context_stack(struct thread_info *tinfo,
14513+print_context_stack(struct task_struct *task, void *stack_start,
14514 unsigned long *stack, unsigned long bp,
14515 const struct stacktrace_ops *ops, void *data,
14516 unsigned long *end, int *graph)
14517 {
14518 struct stack_frame *frame = (struct stack_frame *)bp;
14519
14520- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14521+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14522 unsigned long addr;
14523
14524 addr = *stack;
14525@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14526 } else {
14527 ops->address(data, addr, 0);
14528 }
14529- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14530+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14531 }
14532 stack++;
14533 }
14534@@ -180,7 +180,7 @@ void dump_stack(void)
14535 #endif
14536
14537 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14538- current->pid, current->comm, print_tainted(),
14539+ task_pid_nr(current), current->comm, print_tainted(),
14540 init_utsname()->release,
14541 (int)strcspn(init_utsname()->version, " "),
14542 init_utsname()->version);
14543@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14544 return flags;
14545 }
14546
14547+extern void gr_handle_kernel_exploit(void);
14548+
14549 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14550 {
14551 if (regs && kexec_should_crash(current))
14552@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14553 panic("Fatal exception in interrupt");
14554 if (panic_on_oops)
14555 panic("Fatal exception");
14556- do_exit(signr);
14557+
14558+ gr_handle_kernel_exploit();
14559+
14560+ do_group_exit(signr);
14561 }
14562
14563 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14564@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14565 unsigned long flags = oops_begin();
14566 int sig = SIGSEGV;
14567
14568- if (!user_mode_vm(regs))
14569+ if (!user_mode(regs))
14570 report_bug(regs->ip, regs);
14571
14572 if (__die(str, regs, err))
14573diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14574index 81086c2..13e8b17 100644
14575--- a/arch/x86/kernel/dumpstack.h
14576+++ b/arch/x86/kernel/dumpstack.h
14577@@ -15,7 +15,7 @@
14578 #endif
14579
14580 extern unsigned long
14581-print_context_stack(struct thread_info *tinfo,
14582+print_context_stack(struct task_struct *task, void *stack_start,
14583 unsigned long *stack, unsigned long bp,
14584 const struct stacktrace_ops *ops, void *data,
14585 unsigned long *end, int *graph);
14586diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14587index f7dd2a7..504f53b 100644
14588--- a/arch/x86/kernel/dumpstack_32.c
14589+++ b/arch/x86/kernel/dumpstack_32.c
14590@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14591 #endif
14592
14593 for (;;) {
14594- struct thread_info *context;
14595+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14596+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14597
14598- context = (struct thread_info *)
14599- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14600- bp = print_context_stack(context, stack, bp, ops,
14601- data, NULL, &graph);
14602-
14603- stack = (unsigned long *)context->previous_esp;
14604- if (!stack)
14605+ if (stack_start == task_stack_page(task))
14606 break;
14607+ stack = *(unsigned long **)stack_start;
14608 if (ops->stack(data, "IRQ") < 0)
14609 break;
14610 touch_nmi_watchdog();
14611@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14612 * When in-kernel, we also print out the stack and code at the
14613 * time of the fault..
14614 */
14615- if (!user_mode_vm(regs)) {
14616+ if (!user_mode(regs)) {
14617 unsigned int code_prologue = code_bytes * 43 / 64;
14618 unsigned int code_len = code_bytes;
14619 unsigned char c;
14620 u8 *ip;
14621+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14622
14623 printk(KERN_EMERG "Stack:\n");
14624 show_stack_log_lvl(NULL, regs, &regs->sp,
14625@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14626
14627 printk(KERN_EMERG "Code: ");
14628
14629- ip = (u8 *)regs->ip - code_prologue;
14630+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14631 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14632 /* try starting at IP */
14633- ip = (u8 *)regs->ip;
14634+ ip = (u8 *)regs->ip + cs_base;
14635 code_len = code_len - code_prologue + 1;
14636 }
14637 for (i = 0; i < code_len; i++, ip++) {
14638@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14639 printk(" Bad EIP value.");
14640 break;
14641 }
14642- if (ip == (u8 *)regs->ip)
14643+ if (ip == (u8 *)regs->ip + cs_base)
14644 printk("<%02x> ", c);
14645 else
14646 printk("%02x ", c);
14647@@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14648 printk("\n");
14649 }
14650
14651+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14652+void pax_check_alloca(unsigned long size)
14653+{
14654+ unsigned long sp = (unsigned long)&sp, stack_left;
14655+
14656+ /* all kernel stacks are of the same size */
14657+ stack_left = sp & (THREAD_SIZE - 1);
14658+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14659+}
14660+EXPORT_SYMBOL(pax_check_alloca);
14661+#endif
14662+
14663 int is_valid_bugaddr(unsigned long ip)
14664 {
14665 unsigned short ud2;
14666
14667+ ip = ktla_ktva(ip);
14668 if (ip < PAGE_OFFSET)
14669 return 0;
14670 if (probe_kernel_address((unsigned short *)ip, ud2))
14671diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14672index a071e6b..36cd585 100644
14673--- a/arch/x86/kernel/dumpstack_64.c
14674+++ b/arch/x86/kernel/dumpstack_64.c
14675@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14676 unsigned long *irq_stack_end =
14677 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14678 unsigned used = 0;
14679- struct thread_info *tinfo;
14680 int graph = 0;
14681+ void *stack_start;
14682
14683 if (!task)
14684 task = current;
14685@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14686 * current stack address. If the stacks consist of nested
14687 * exceptions
14688 */
14689- tinfo = task_thread_info(task);
14690 for (;;) {
14691 char *id;
14692 unsigned long *estack_end;
14693+
14694 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14695 &used, &id);
14696
14697@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14698 if (ops->stack(data, id) < 0)
14699 break;
14700
14701- bp = print_context_stack(tinfo, stack, bp, ops,
14702+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14703 data, estack_end, &graph);
14704 ops->stack(data, "<EOE>");
14705 /*
14706@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14707 if (stack >= irq_stack && stack < irq_stack_end) {
14708 if (ops->stack(data, "IRQ") < 0)
14709 break;
14710- bp = print_context_stack(tinfo, stack, bp,
14711+ bp = print_context_stack(task, irq_stack, stack, bp,
14712 ops, data, irq_stack_end, &graph);
14713 /*
14714 * We link to the next stack (which would be
14715@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14716 /*
14717 * This handles the process stack:
14718 */
14719- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14720+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14721+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14722 put_cpu();
14723 }
14724 EXPORT_SYMBOL(dump_trace);
14725@@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14726 return ud2 == 0x0b0f;
14727 }
14728
14729+
14730+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14731+void pax_check_alloca(unsigned long size)
14732+{
14733+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14734+ unsigned cpu, used;
14735+ char *id;
14736+
14737+ /* check the process stack first */
14738+ stack_start = (unsigned long)task_stack_page(current);
14739+ stack_end = stack_start + THREAD_SIZE;
14740+ if (likely(stack_start <= sp && sp < stack_end)) {
14741+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14742+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14743+ return;
14744+ }
14745+
14746+ cpu = get_cpu();
14747+
14748+ /* check the irq stacks */
14749+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14750+ stack_start = stack_end - IRQ_STACK_SIZE;
14751+ if (stack_start <= sp && sp < stack_end) {
14752+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14753+ put_cpu();
14754+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14755+ return;
14756+ }
14757+
14758+ /* check the exception stacks */
14759+ used = 0;
14760+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14761+ stack_start = stack_end - EXCEPTION_STKSZ;
14762+ if (stack_end && stack_start <= sp && sp < stack_end) {
14763+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14764+ put_cpu();
14765+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14766+ return;
14767+ }
14768+
14769+ put_cpu();
14770+
14771+ /* unknown stack */
14772+ BUG();
14773+}
14774+EXPORT_SYMBOL(pax_check_alloca);
14775+#endif
14776diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14777index a89739a..95e0c48 100644
14778--- a/arch/x86/kernel/e820.c
14779+++ b/arch/x86/kernel/e820.c
14780@@ -733,7 +733,7 @@ struct early_res {
14781 };
14782 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14783 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14784- {}
14785+ { 0, 0, {0}, 0 }
14786 };
14787
14788 static int __init find_overlapped_early(u64 start, u64 end)
14789diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14790index b9c830c..1e41a96 100644
14791--- a/arch/x86/kernel/early_printk.c
14792+++ b/arch/x86/kernel/early_printk.c
14793@@ -7,6 +7,7 @@
14794 #include <linux/pci_regs.h>
14795 #include <linux/pci_ids.h>
14796 #include <linux/errno.h>
14797+#include <linux/sched.h>
14798 #include <asm/io.h>
14799 #include <asm/processor.h>
14800 #include <asm/fcntl.h>
14801@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14802 int n;
14803 va_list ap;
14804
14805+ pax_track_stack();
14806+
14807 va_start(ap, fmt);
14808 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14809 early_console->write(early_console, buf, n);
14810diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14811index 5cab48e..b025f9b 100644
14812--- a/arch/x86/kernel/efi_32.c
14813+++ b/arch/x86/kernel/efi_32.c
14814@@ -38,70 +38,56 @@
14815 */
14816
14817 static unsigned long efi_rt_eflags;
14818-static pgd_t efi_bak_pg_dir_pointer[2];
14819+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14820
14821-void efi_call_phys_prelog(void)
14822+void __init efi_call_phys_prelog(void)
14823 {
14824- unsigned long cr4;
14825- unsigned long temp;
14826 struct desc_ptr gdt_descr;
14827
14828+#ifdef CONFIG_PAX_KERNEXEC
14829+ struct desc_struct d;
14830+#endif
14831+
14832 local_irq_save(efi_rt_eflags);
14833
14834- /*
14835- * If I don't have PAE, I should just duplicate two entries in page
14836- * directory. If I have PAE, I just need to duplicate one entry in
14837- * page directory.
14838- */
14839- cr4 = read_cr4_safe();
14840-
14841- if (cr4 & X86_CR4_PAE) {
14842- efi_bak_pg_dir_pointer[0].pgd =
14843- swapper_pg_dir[pgd_index(0)].pgd;
14844- swapper_pg_dir[0].pgd =
14845- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14846- } else {
14847- efi_bak_pg_dir_pointer[0].pgd =
14848- swapper_pg_dir[pgd_index(0)].pgd;
14849- efi_bak_pg_dir_pointer[1].pgd =
14850- swapper_pg_dir[pgd_index(0x400000)].pgd;
14851- swapper_pg_dir[pgd_index(0)].pgd =
14852- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14853- temp = PAGE_OFFSET + 0x400000;
14854- swapper_pg_dir[pgd_index(0x400000)].pgd =
14855- swapper_pg_dir[pgd_index(temp)].pgd;
14856- }
14857+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14858+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14859+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14860
14861 /*
14862 * After the lock is released, the original page table is restored.
14863 */
14864 __flush_tlb_all();
14865
14866+#ifdef CONFIG_PAX_KERNEXEC
14867+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14868+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14869+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14870+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14871+#endif
14872+
14873 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14874 gdt_descr.size = GDT_SIZE - 1;
14875 load_gdt(&gdt_descr);
14876 }
14877
14878-void efi_call_phys_epilog(void)
14879+void __init efi_call_phys_epilog(void)
14880 {
14881- unsigned long cr4;
14882 struct desc_ptr gdt_descr;
14883
14884+#ifdef CONFIG_PAX_KERNEXEC
14885+ struct desc_struct d;
14886+
14887+ memset(&d, 0, sizeof d);
14888+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14889+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14890+#endif
14891+
14892 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14893 gdt_descr.size = GDT_SIZE - 1;
14894 load_gdt(&gdt_descr);
14895
14896- cr4 = read_cr4_safe();
14897-
14898- if (cr4 & X86_CR4_PAE) {
14899- swapper_pg_dir[pgd_index(0)].pgd =
14900- efi_bak_pg_dir_pointer[0].pgd;
14901- } else {
14902- swapper_pg_dir[pgd_index(0)].pgd =
14903- efi_bak_pg_dir_pointer[0].pgd;
14904- swapper_pg_dir[pgd_index(0x400000)].pgd =
14905- efi_bak_pg_dir_pointer[1].pgd;
14906- }
14907+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14908
14909 /*
14910 * After the lock is released, the original page table is restored.
14911diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14912index fbe66e6..c5c0dd2 100644
14913--- a/arch/x86/kernel/efi_stub_32.S
14914+++ b/arch/x86/kernel/efi_stub_32.S
14915@@ -6,7 +6,9 @@
14916 */
14917
14918 #include <linux/linkage.h>
14919+#include <linux/init.h>
14920 #include <asm/page_types.h>
14921+#include <asm/segment.h>
14922
14923 /*
14924 * efi_call_phys(void *, ...) is a function with variable parameters.
14925@@ -20,7 +22,7 @@
14926 * service functions will comply with gcc calling convention, too.
14927 */
14928
14929-.text
14930+__INIT
14931 ENTRY(efi_call_phys)
14932 /*
14933 * 0. The function can only be called in Linux kernel. So CS has been
14934@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14935 * The mapping of lower virtual memory has been created in prelog and
14936 * epilog.
14937 */
14938- movl $1f, %edx
14939- subl $__PAGE_OFFSET, %edx
14940- jmp *%edx
14941+ movl $(__KERNEXEC_EFI_DS), %edx
14942+ mov %edx, %ds
14943+ mov %edx, %es
14944+ mov %edx, %ss
14945+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14946 1:
14947
14948 /*
14949@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14950 * parameter 2, ..., param n. To make things easy, we save the return
14951 * address of efi_call_phys in a global variable.
14952 */
14953- popl %edx
14954- movl %edx, saved_return_addr
14955- /* get the function pointer into ECX*/
14956- popl %ecx
14957- movl %ecx, efi_rt_function_ptr
14958- movl $2f, %edx
14959- subl $__PAGE_OFFSET, %edx
14960- pushl %edx
14961+ popl (saved_return_addr)
14962+ popl (efi_rt_function_ptr)
14963
14964 /*
14965 * 3. Clear PG bit in %CR0.
14966@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14967 /*
14968 * 5. Call the physical function.
14969 */
14970- jmp *%ecx
14971+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
14972
14973-2:
14974 /*
14975 * 6. After EFI runtime service returns, control will return to
14976 * following instruction. We'd better readjust stack pointer first.
14977@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14978 movl %cr0, %edx
14979 orl $0x80000000, %edx
14980 movl %edx, %cr0
14981- jmp 1f
14982-1:
14983+
14984 /*
14985 * 8. Now restore the virtual mode from flat mode by
14986 * adding EIP with PAGE_OFFSET.
14987 */
14988- movl $1f, %edx
14989- jmp *%edx
14990+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14991 1:
14992+ movl $(__KERNEL_DS), %edx
14993+ mov %edx, %ds
14994+ mov %edx, %es
14995+ mov %edx, %ss
14996
14997 /*
14998 * 9. Balance the stack. And because EAX contain the return value,
14999 * we'd better not clobber it.
15000 */
15001- leal efi_rt_function_ptr, %edx
15002- movl (%edx), %ecx
15003- pushl %ecx
15004+ pushl (efi_rt_function_ptr)
15005
15006 /*
15007- * 10. Push the saved return address onto the stack and return.
15008+ * 10. Return to the saved return address.
15009 */
15010- leal saved_return_addr, %edx
15011- movl (%edx), %ecx
15012- pushl %ecx
15013- ret
15014+ jmpl *(saved_return_addr)
15015 ENDPROC(efi_call_phys)
15016 .previous
15017
15018-.data
15019+__INITDATA
15020 saved_return_addr:
15021 .long 0
15022 efi_rt_function_ptr:
15023diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
15024index 4c07cca..2c8427d 100644
15025--- a/arch/x86/kernel/efi_stub_64.S
15026+++ b/arch/x86/kernel/efi_stub_64.S
15027@@ -7,6 +7,7 @@
15028 */
15029
15030 #include <linux/linkage.h>
15031+#include <asm/alternative-asm.h>
15032
15033 #define SAVE_XMM \
15034 mov %rsp, %rax; \
15035@@ -40,6 +41,7 @@ ENTRY(efi_call0)
15036 call *%rdi
15037 addq $32, %rsp
15038 RESTORE_XMM
15039+ pax_force_retaddr 0, 1
15040 ret
15041 ENDPROC(efi_call0)
15042
15043@@ -50,6 +52,7 @@ ENTRY(efi_call1)
15044 call *%rdi
15045 addq $32, %rsp
15046 RESTORE_XMM
15047+ pax_force_retaddr 0, 1
15048 ret
15049 ENDPROC(efi_call1)
15050
15051@@ -60,6 +63,7 @@ ENTRY(efi_call2)
15052 call *%rdi
15053 addq $32, %rsp
15054 RESTORE_XMM
15055+ pax_force_retaddr 0, 1
15056 ret
15057 ENDPROC(efi_call2)
15058
15059@@ -71,6 +75,7 @@ ENTRY(efi_call3)
15060 call *%rdi
15061 addq $32, %rsp
15062 RESTORE_XMM
15063+ pax_force_retaddr 0, 1
15064 ret
15065 ENDPROC(efi_call3)
15066
15067@@ -83,6 +88,7 @@ ENTRY(efi_call4)
15068 call *%rdi
15069 addq $32, %rsp
15070 RESTORE_XMM
15071+ pax_force_retaddr 0, 1
15072 ret
15073 ENDPROC(efi_call4)
15074
15075@@ -96,6 +102,7 @@ ENTRY(efi_call5)
15076 call *%rdi
15077 addq $48, %rsp
15078 RESTORE_XMM
15079+ pax_force_retaddr 0, 1
15080 ret
15081 ENDPROC(efi_call5)
15082
15083@@ -112,5 +119,6 @@ ENTRY(efi_call6)
15084 call *%rdi
15085 addq $48, %rsp
15086 RESTORE_XMM
15087+ pax_force_retaddr 0, 1
15088 ret
15089 ENDPROC(efi_call6)
15090diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
15091index c097e7d..c689cf4 100644
15092--- a/arch/x86/kernel/entry_32.S
15093+++ b/arch/x86/kernel/entry_32.S
15094@@ -185,13 +185,146 @@
15095 /*CFI_REL_OFFSET gs, PT_GS*/
15096 .endm
15097 .macro SET_KERNEL_GS reg
15098+
15099+#ifdef CONFIG_CC_STACKPROTECTOR
15100 movl $(__KERNEL_STACK_CANARY), \reg
15101+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15102+ movl $(__USER_DS), \reg
15103+#else
15104+ xorl \reg, \reg
15105+#endif
15106+
15107 movl \reg, %gs
15108 .endm
15109
15110 #endif /* CONFIG_X86_32_LAZY_GS */
15111
15112-.macro SAVE_ALL
15113+.macro pax_enter_kernel
15114+#ifdef CONFIG_PAX_KERNEXEC
15115+ call pax_enter_kernel
15116+#endif
15117+.endm
15118+
15119+.macro pax_exit_kernel
15120+#ifdef CONFIG_PAX_KERNEXEC
15121+ call pax_exit_kernel
15122+#endif
15123+.endm
15124+
15125+#ifdef CONFIG_PAX_KERNEXEC
15126+ENTRY(pax_enter_kernel)
15127+#ifdef CONFIG_PARAVIRT
15128+ pushl %eax
15129+ pushl %ecx
15130+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
15131+ mov %eax, %esi
15132+#else
15133+ mov %cr0, %esi
15134+#endif
15135+ bts $16, %esi
15136+ jnc 1f
15137+ mov %cs, %esi
15138+ cmp $__KERNEL_CS, %esi
15139+ jz 3f
15140+ ljmp $__KERNEL_CS, $3f
15141+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
15142+2:
15143+#ifdef CONFIG_PARAVIRT
15144+ mov %esi, %eax
15145+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15146+#else
15147+ mov %esi, %cr0
15148+#endif
15149+3:
15150+#ifdef CONFIG_PARAVIRT
15151+ popl %ecx
15152+ popl %eax
15153+#endif
15154+ ret
15155+ENDPROC(pax_enter_kernel)
15156+
15157+ENTRY(pax_exit_kernel)
15158+#ifdef CONFIG_PARAVIRT
15159+ pushl %eax
15160+ pushl %ecx
15161+#endif
15162+ mov %cs, %esi
15163+ cmp $__KERNEXEC_KERNEL_CS, %esi
15164+ jnz 2f
15165+#ifdef CONFIG_PARAVIRT
15166+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15167+ mov %eax, %esi
15168+#else
15169+ mov %cr0, %esi
15170+#endif
15171+ btr $16, %esi
15172+ ljmp $__KERNEL_CS, $1f
15173+1:
15174+#ifdef CONFIG_PARAVIRT
15175+ mov %esi, %eax
15176+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15177+#else
15178+ mov %esi, %cr0
15179+#endif
15180+2:
15181+#ifdef CONFIG_PARAVIRT
15182+ popl %ecx
15183+ popl %eax
15184+#endif
15185+ ret
15186+ENDPROC(pax_exit_kernel)
15187+#endif
15188+
15189+.macro pax_erase_kstack
15190+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15191+ call pax_erase_kstack
15192+#endif
15193+.endm
15194+
15195+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15196+/*
15197+ * ebp: thread_info
15198+ * ecx, edx: can be clobbered
15199+ */
15200+ENTRY(pax_erase_kstack)
15201+ pushl %edi
15202+ pushl %eax
15203+
15204+ mov TI_lowest_stack(%ebp), %edi
15205+ mov $-0xBEEF, %eax
15206+ std
15207+
15208+1: mov %edi, %ecx
15209+ and $THREAD_SIZE_asm - 1, %ecx
15210+ shr $2, %ecx
15211+ repne scasl
15212+ jecxz 2f
15213+
15214+ cmp $2*16, %ecx
15215+ jc 2f
15216+
15217+ mov $2*16, %ecx
15218+ repe scasl
15219+ jecxz 2f
15220+ jne 1b
15221+
15222+2: cld
15223+ mov %esp, %ecx
15224+ sub %edi, %ecx
15225+ shr $2, %ecx
15226+ rep stosl
15227+
15228+ mov TI_task_thread_sp0(%ebp), %edi
15229+ sub $128, %edi
15230+ mov %edi, TI_lowest_stack(%ebp)
15231+
15232+ popl %eax
15233+ popl %edi
15234+ ret
15235+ENDPROC(pax_erase_kstack)
15236+#endif
15237+
15238+.macro __SAVE_ALL _DS
15239 cld
15240 PUSH_GS
15241 pushl %fs
15242@@ -224,7 +357,7 @@
15243 pushl %ebx
15244 CFI_ADJUST_CFA_OFFSET 4
15245 CFI_REL_OFFSET ebx, 0
15246- movl $(__USER_DS), %edx
15247+ movl $\_DS, %edx
15248 movl %edx, %ds
15249 movl %edx, %es
15250 movl $(__KERNEL_PERCPU), %edx
15251@@ -232,6 +365,15 @@
15252 SET_KERNEL_GS %edx
15253 .endm
15254
15255+.macro SAVE_ALL
15256+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15257+ __SAVE_ALL __KERNEL_DS
15258+ pax_enter_kernel
15259+#else
15260+ __SAVE_ALL __USER_DS
15261+#endif
15262+.endm
15263+
15264 .macro RESTORE_INT_REGS
15265 popl %ebx
15266 CFI_ADJUST_CFA_OFFSET -4
15267@@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15268 CFI_ADJUST_CFA_OFFSET -4
15269 jmp syscall_exit
15270 CFI_ENDPROC
15271-END(ret_from_fork)
15272+ENDPROC(ret_from_fork)
15273
15274 /*
15275 * Return to user mode is not as complex as all this looks,
15276@@ -352,7 +494,15 @@ check_userspace:
15277 movb PT_CS(%esp), %al
15278 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15279 cmpl $USER_RPL, %eax
15280+
15281+#ifdef CONFIG_PAX_KERNEXEC
15282+ jae resume_userspace
15283+
15284+ PAX_EXIT_KERNEL
15285+ jmp resume_kernel
15286+#else
15287 jb resume_kernel # not returning to v8086 or userspace
15288+#endif
15289
15290 ENTRY(resume_userspace)
15291 LOCKDEP_SYS_EXIT
15292@@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15293 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15294 # int/exception return?
15295 jne work_pending
15296- jmp restore_all
15297-END(ret_from_exception)
15298+ jmp restore_all_pax
15299+ENDPROC(ret_from_exception)
15300
15301 #ifdef CONFIG_PREEMPT
15302 ENTRY(resume_kernel)
15303@@ -380,7 +530,7 @@ need_resched:
15304 jz restore_all
15305 call preempt_schedule_irq
15306 jmp need_resched
15307-END(resume_kernel)
15308+ENDPROC(resume_kernel)
15309 #endif
15310 CFI_ENDPROC
15311
15312@@ -414,25 +564,36 @@ sysenter_past_esp:
15313 /*CFI_REL_OFFSET cs, 0*/
15314 /*
15315 * Push current_thread_info()->sysenter_return to the stack.
15316- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15317- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15318 */
15319- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15320+ pushl $0
15321 CFI_ADJUST_CFA_OFFSET 4
15322 CFI_REL_OFFSET eip, 0
15323
15324 pushl %eax
15325 CFI_ADJUST_CFA_OFFSET 4
15326 SAVE_ALL
15327+ GET_THREAD_INFO(%ebp)
15328+ movl TI_sysenter_return(%ebp),%ebp
15329+ movl %ebp,PT_EIP(%esp)
15330 ENABLE_INTERRUPTS(CLBR_NONE)
15331
15332 /*
15333 * Load the potential sixth argument from user stack.
15334 * Careful about security.
15335 */
15336+ movl PT_OLDESP(%esp),%ebp
15337+
15338+#ifdef CONFIG_PAX_MEMORY_UDEREF
15339+ mov PT_OLDSS(%esp),%ds
15340+1: movl %ds:(%ebp),%ebp
15341+ push %ss
15342+ pop %ds
15343+#else
15344 cmpl $__PAGE_OFFSET-3,%ebp
15345 jae syscall_fault
15346 1: movl (%ebp),%ebp
15347+#endif
15348+
15349 movl %ebp,PT_EBP(%esp)
15350 .section __ex_table,"a"
15351 .align 4
15352@@ -455,12 +616,24 @@ sysenter_do_call:
15353 testl $_TIF_ALLWORK_MASK, %ecx
15354 jne sysexit_audit
15355 sysenter_exit:
15356+
15357+#ifdef CONFIG_PAX_RANDKSTACK
15358+ pushl_cfi %eax
15359+ movl %esp, %eax
15360+ call pax_randomize_kstack
15361+ popl_cfi %eax
15362+#endif
15363+
15364+ pax_erase_kstack
15365+
15366 /* if something modifies registers it must also disable sysexit */
15367 movl PT_EIP(%esp), %edx
15368 movl PT_OLDESP(%esp), %ecx
15369 xorl %ebp,%ebp
15370 TRACE_IRQS_ON
15371 1: mov PT_FS(%esp), %fs
15372+2: mov PT_DS(%esp), %ds
15373+3: mov PT_ES(%esp), %es
15374 PTGS_TO_GS
15375 ENABLE_INTERRUPTS_SYSEXIT
15376
15377@@ -477,6 +650,9 @@ sysenter_audit:
15378 movl %eax,%edx /* 2nd arg: syscall number */
15379 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15380 call audit_syscall_entry
15381+
15382+ pax_erase_kstack
15383+
15384 pushl %ebx
15385 CFI_ADJUST_CFA_OFFSET 4
15386 movl PT_EAX(%esp),%eax /* reload syscall number */
15387@@ -504,11 +680,17 @@ sysexit_audit:
15388
15389 CFI_ENDPROC
15390 .pushsection .fixup,"ax"
15391-2: movl $0,PT_FS(%esp)
15392+4: movl $0,PT_FS(%esp)
15393+ jmp 1b
15394+5: movl $0,PT_DS(%esp)
15395+ jmp 1b
15396+6: movl $0,PT_ES(%esp)
15397 jmp 1b
15398 .section __ex_table,"a"
15399 .align 4
15400- .long 1b,2b
15401+ .long 1b,4b
15402+ .long 2b,5b
15403+ .long 3b,6b
15404 .popsection
15405 PTGS_TO_GS_EX
15406 ENDPROC(ia32_sysenter_target)
15407@@ -538,6 +720,15 @@ syscall_exit:
15408 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15409 jne syscall_exit_work
15410
15411+restore_all_pax:
15412+
15413+#ifdef CONFIG_PAX_RANDKSTACK
15414+ movl %esp, %eax
15415+ call pax_randomize_kstack
15416+#endif
15417+
15418+ pax_erase_kstack
15419+
15420 restore_all:
15421 TRACE_IRQS_IRET
15422 restore_all_notrace:
15423@@ -602,10 +793,29 @@ ldt_ss:
15424 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15425 mov %dx, %ax /* eax: new kernel esp */
15426 sub %eax, %edx /* offset (low word is 0) */
15427- PER_CPU(gdt_page, %ebx)
15428+#ifdef CONFIG_SMP
15429+ movl PER_CPU_VAR(cpu_number), %ebx
15430+ shll $PAGE_SHIFT_asm, %ebx
15431+ addl $cpu_gdt_table, %ebx
15432+#else
15433+ movl $cpu_gdt_table, %ebx
15434+#endif
15435 shr $16, %edx
15436+
15437+#ifdef CONFIG_PAX_KERNEXEC
15438+ mov %cr0, %esi
15439+ btr $16, %esi
15440+ mov %esi, %cr0
15441+#endif
15442+
15443 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15444 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15445+
15446+#ifdef CONFIG_PAX_KERNEXEC
15447+ bts $16, %esi
15448+ mov %esi, %cr0
15449+#endif
15450+
15451 pushl $__ESPFIX_SS
15452 CFI_ADJUST_CFA_OFFSET 4
15453 push %eax /* new kernel esp */
15454@@ -636,36 +846,30 @@ work_resched:
15455 movl TI_flags(%ebp), %ecx
15456 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15457 # than syscall tracing?
15458- jz restore_all
15459+ jz restore_all_pax
15460 testb $_TIF_NEED_RESCHED, %cl
15461 jnz work_resched
15462
15463 work_notifysig: # deal with pending signals and
15464 # notify-resume requests
15465+ movl %esp, %eax
15466 #ifdef CONFIG_VM86
15467 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15468- movl %esp, %eax
15469- jne work_notifysig_v86 # returning to kernel-space or
15470+ jz 1f # returning to kernel-space or
15471 # vm86-space
15472- xorl %edx, %edx
15473- call do_notify_resume
15474- jmp resume_userspace_sig
15475
15476- ALIGN
15477-work_notifysig_v86:
15478 pushl %ecx # save ti_flags for do_notify_resume
15479 CFI_ADJUST_CFA_OFFSET 4
15480 call save_v86_state # %eax contains pt_regs pointer
15481 popl %ecx
15482 CFI_ADJUST_CFA_OFFSET -4
15483 movl %eax, %esp
15484-#else
15485- movl %esp, %eax
15486+1:
15487 #endif
15488 xorl %edx, %edx
15489 call do_notify_resume
15490 jmp resume_userspace_sig
15491-END(work_pending)
15492+ENDPROC(work_pending)
15493
15494 # perform syscall exit tracing
15495 ALIGN
15496@@ -673,11 +877,14 @@ syscall_trace_entry:
15497 movl $-ENOSYS,PT_EAX(%esp)
15498 movl %esp, %eax
15499 call syscall_trace_enter
15500+
15501+ pax_erase_kstack
15502+
15503 /* What it returned is what we'll actually use. */
15504 cmpl $(nr_syscalls), %eax
15505 jnae syscall_call
15506 jmp syscall_exit
15507-END(syscall_trace_entry)
15508+ENDPROC(syscall_trace_entry)
15509
15510 # perform syscall exit tracing
15511 ALIGN
15512@@ -690,20 +897,24 @@ syscall_exit_work:
15513 movl %esp, %eax
15514 call syscall_trace_leave
15515 jmp resume_userspace
15516-END(syscall_exit_work)
15517+ENDPROC(syscall_exit_work)
15518 CFI_ENDPROC
15519
15520 RING0_INT_FRAME # can't unwind into user space anyway
15521 syscall_fault:
15522+#ifdef CONFIG_PAX_MEMORY_UDEREF
15523+ push %ss
15524+ pop %ds
15525+#endif
15526 GET_THREAD_INFO(%ebp)
15527 movl $-EFAULT,PT_EAX(%esp)
15528 jmp resume_userspace
15529-END(syscall_fault)
15530+ENDPROC(syscall_fault)
15531
15532 syscall_badsys:
15533 movl $-ENOSYS,PT_EAX(%esp)
15534 jmp resume_userspace
15535-END(syscall_badsys)
15536+ENDPROC(syscall_badsys)
15537 CFI_ENDPROC
15538
15539 /*
15540@@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15541 PTREGSCALL(vm86)
15542 PTREGSCALL(vm86old)
15543
15544+ ALIGN;
15545+ENTRY(kernel_execve)
15546+ push %ebp
15547+ sub $PT_OLDSS+4,%esp
15548+ push %edi
15549+ push %ecx
15550+ push %eax
15551+ lea 3*4(%esp),%edi
15552+ mov $PT_OLDSS/4+1,%ecx
15553+ xorl %eax,%eax
15554+ rep stosl
15555+ pop %eax
15556+ pop %ecx
15557+ pop %edi
15558+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15559+ mov %eax,PT_EBX(%esp)
15560+ mov %edx,PT_ECX(%esp)
15561+ mov %ecx,PT_EDX(%esp)
15562+ mov %esp,%eax
15563+ call sys_execve
15564+ GET_THREAD_INFO(%ebp)
15565+ test %eax,%eax
15566+ jz syscall_exit
15567+ add $PT_OLDSS+4,%esp
15568+ pop %ebp
15569+ ret
15570+
15571 .macro FIXUP_ESPFIX_STACK
15572 /*
15573 * Switch back for ESPFIX stack to the normal zerobased stack
15574@@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15575 * normal stack and adjusts ESP with the matching offset.
15576 */
15577 /* fixup the stack */
15578- PER_CPU(gdt_page, %ebx)
15579+#ifdef CONFIG_SMP
15580+ movl PER_CPU_VAR(cpu_number), %ebx
15581+ shll $PAGE_SHIFT_asm, %ebx
15582+ addl $cpu_gdt_table, %ebx
15583+#else
15584+ movl $cpu_gdt_table, %ebx
15585+#endif
15586 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15587 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15588 shl $16, %eax
15589@@ -793,7 +1037,7 @@ vector=vector+1
15590 .endr
15591 2: jmp common_interrupt
15592 .endr
15593-END(irq_entries_start)
15594+ENDPROC(irq_entries_start)
15595
15596 .previous
15597 END(interrupt)
15598@@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15599 CFI_ADJUST_CFA_OFFSET 4
15600 jmp error_code
15601 CFI_ENDPROC
15602-END(coprocessor_error)
15603+ENDPROC(coprocessor_error)
15604
15605 ENTRY(simd_coprocessor_error)
15606 RING0_INT_FRAME
15607@@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15608 CFI_ADJUST_CFA_OFFSET 4
15609 jmp error_code
15610 CFI_ENDPROC
15611-END(simd_coprocessor_error)
15612+ENDPROC(simd_coprocessor_error)
15613
15614 ENTRY(device_not_available)
15615 RING0_INT_FRAME
15616@@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15617 CFI_ADJUST_CFA_OFFSET 4
15618 jmp error_code
15619 CFI_ENDPROC
15620-END(device_not_available)
15621+ENDPROC(device_not_available)
15622
15623 #ifdef CONFIG_PARAVIRT
15624 ENTRY(native_iret)
15625@@ -869,12 +1113,12 @@ ENTRY(native_iret)
15626 .align 4
15627 .long native_iret, iret_exc
15628 .previous
15629-END(native_iret)
15630+ENDPROC(native_iret)
15631
15632 ENTRY(native_irq_enable_sysexit)
15633 sti
15634 sysexit
15635-END(native_irq_enable_sysexit)
15636+ENDPROC(native_irq_enable_sysexit)
15637 #endif
15638
15639 ENTRY(overflow)
15640@@ -885,7 +1129,7 @@ ENTRY(overflow)
15641 CFI_ADJUST_CFA_OFFSET 4
15642 jmp error_code
15643 CFI_ENDPROC
15644-END(overflow)
15645+ENDPROC(overflow)
15646
15647 ENTRY(bounds)
15648 RING0_INT_FRAME
15649@@ -895,7 +1139,7 @@ ENTRY(bounds)
15650 CFI_ADJUST_CFA_OFFSET 4
15651 jmp error_code
15652 CFI_ENDPROC
15653-END(bounds)
15654+ENDPROC(bounds)
15655
15656 ENTRY(invalid_op)
15657 RING0_INT_FRAME
15658@@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15659 CFI_ADJUST_CFA_OFFSET 4
15660 jmp error_code
15661 CFI_ENDPROC
15662-END(invalid_op)
15663+ENDPROC(invalid_op)
15664
15665 ENTRY(coprocessor_segment_overrun)
15666 RING0_INT_FRAME
15667@@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15668 CFI_ADJUST_CFA_OFFSET 4
15669 jmp error_code
15670 CFI_ENDPROC
15671-END(coprocessor_segment_overrun)
15672+ENDPROC(coprocessor_segment_overrun)
15673
15674 ENTRY(invalid_TSS)
15675 RING0_EC_FRAME
15676@@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15677 CFI_ADJUST_CFA_OFFSET 4
15678 jmp error_code
15679 CFI_ENDPROC
15680-END(invalid_TSS)
15681+ENDPROC(invalid_TSS)
15682
15683 ENTRY(segment_not_present)
15684 RING0_EC_FRAME
15685@@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15686 CFI_ADJUST_CFA_OFFSET 4
15687 jmp error_code
15688 CFI_ENDPROC
15689-END(segment_not_present)
15690+ENDPROC(segment_not_present)
15691
15692 ENTRY(stack_segment)
15693 RING0_EC_FRAME
15694@@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15695 CFI_ADJUST_CFA_OFFSET 4
15696 jmp error_code
15697 CFI_ENDPROC
15698-END(stack_segment)
15699+ENDPROC(stack_segment)
15700
15701 ENTRY(alignment_check)
15702 RING0_EC_FRAME
15703@@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15704 CFI_ADJUST_CFA_OFFSET 4
15705 jmp error_code
15706 CFI_ENDPROC
15707-END(alignment_check)
15708+ENDPROC(alignment_check)
15709
15710 ENTRY(divide_error)
15711 RING0_INT_FRAME
15712@@ -957,7 +1201,7 @@ ENTRY(divide_error)
15713 CFI_ADJUST_CFA_OFFSET 4
15714 jmp error_code
15715 CFI_ENDPROC
15716-END(divide_error)
15717+ENDPROC(divide_error)
15718
15719 #ifdef CONFIG_X86_MCE
15720 ENTRY(machine_check)
15721@@ -968,7 +1212,7 @@ ENTRY(machine_check)
15722 CFI_ADJUST_CFA_OFFSET 4
15723 jmp error_code
15724 CFI_ENDPROC
15725-END(machine_check)
15726+ENDPROC(machine_check)
15727 #endif
15728
15729 ENTRY(spurious_interrupt_bug)
15730@@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15731 CFI_ADJUST_CFA_OFFSET 4
15732 jmp error_code
15733 CFI_ENDPROC
15734-END(spurious_interrupt_bug)
15735+ENDPROC(spurious_interrupt_bug)
15736
15737 ENTRY(kernel_thread_helper)
15738 pushl $0 # fake return address for unwinder
15739@@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15740
15741 ENTRY(mcount)
15742 ret
15743-END(mcount)
15744+ENDPROC(mcount)
15745
15746 ENTRY(ftrace_caller)
15747 cmpl $0, function_trace_stop
15748@@ -1124,7 +1368,7 @@ ftrace_graph_call:
15749 .globl ftrace_stub
15750 ftrace_stub:
15751 ret
15752-END(ftrace_caller)
15753+ENDPROC(ftrace_caller)
15754
15755 #else /* ! CONFIG_DYNAMIC_FTRACE */
15756
15757@@ -1160,7 +1404,7 @@ trace:
15758 popl %ecx
15759 popl %eax
15760 jmp ftrace_stub
15761-END(mcount)
15762+ENDPROC(mcount)
15763 #endif /* CONFIG_DYNAMIC_FTRACE */
15764 #endif /* CONFIG_FUNCTION_TRACER */
15765
15766@@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15767 popl %ecx
15768 popl %eax
15769 ret
15770-END(ftrace_graph_caller)
15771+ENDPROC(ftrace_graph_caller)
15772
15773 .globl return_to_handler
15774 return_to_handler:
15775@@ -1198,7 +1442,6 @@ return_to_handler:
15776 ret
15777 #endif
15778
15779-.section .rodata,"a"
15780 #include "syscall_table_32.S"
15781
15782 syscall_table_size=(.-sys_call_table)
15783@@ -1255,15 +1498,18 @@ error_code:
15784 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15785 REG_TO_PTGS %ecx
15786 SET_KERNEL_GS %ecx
15787- movl $(__USER_DS), %ecx
15788+ movl $(__KERNEL_DS), %ecx
15789 movl %ecx, %ds
15790 movl %ecx, %es
15791+
15792+ pax_enter_kernel
15793+
15794 TRACE_IRQS_OFF
15795 movl %esp,%eax # pt_regs pointer
15796 call *%edi
15797 jmp ret_from_exception
15798 CFI_ENDPROC
15799-END(page_fault)
15800+ENDPROC(page_fault)
15801
15802 /*
15803 * Debug traps and NMI can happen at the one SYSENTER instruction
15804@@ -1309,7 +1555,7 @@ debug_stack_correct:
15805 call do_debug
15806 jmp ret_from_exception
15807 CFI_ENDPROC
15808-END(debug)
15809+ENDPROC(debug)
15810
15811 /*
15812 * NMI is doubly nasty. It can happen _while_ we're handling
15813@@ -1351,6 +1597,9 @@ nmi_stack_correct:
15814 xorl %edx,%edx # zero error code
15815 movl %esp,%eax # pt_regs pointer
15816 call do_nmi
15817+
15818+ pax_exit_kernel
15819+
15820 jmp restore_all_notrace
15821 CFI_ENDPROC
15822
15823@@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15824 FIXUP_ESPFIX_STACK # %eax == %esp
15825 xorl %edx,%edx # zero error code
15826 call do_nmi
15827+
15828+ pax_exit_kernel
15829+
15830 RESTORE_REGS
15831 lss 12+4(%esp), %esp # back to espfix stack
15832 CFI_ADJUST_CFA_OFFSET -24
15833 jmp irq_return
15834 CFI_ENDPROC
15835-END(nmi)
15836+ENDPROC(nmi)
15837
15838 ENTRY(int3)
15839 RING0_INT_FRAME
15840@@ -1409,7 +1661,7 @@ ENTRY(int3)
15841 call do_int3
15842 jmp ret_from_exception
15843 CFI_ENDPROC
15844-END(int3)
15845+ENDPROC(int3)
15846
15847 ENTRY(general_protection)
15848 RING0_EC_FRAME
15849@@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15850 CFI_ADJUST_CFA_OFFSET 4
15851 jmp error_code
15852 CFI_ENDPROC
15853-END(general_protection)
15854+ENDPROC(general_protection)
15855
15856 /*
15857 * End of kprobes section
15858diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15859index 34a56a9..87790b4 100644
15860--- a/arch/x86/kernel/entry_64.S
15861+++ b/arch/x86/kernel/entry_64.S
15862@@ -53,6 +53,8 @@
15863 #include <asm/paravirt.h>
15864 #include <asm/ftrace.h>
15865 #include <asm/percpu.h>
15866+#include <asm/pgtable.h>
15867+#include <asm/alternative-asm.h>
15868
15869 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15870 #include <linux/elf-em.h>
15871@@ -64,8 +66,9 @@
15872 #ifdef CONFIG_FUNCTION_TRACER
15873 #ifdef CONFIG_DYNAMIC_FTRACE
15874 ENTRY(mcount)
15875+ pax_force_retaddr
15876 retq
15877-END(mcount)
15878+ENDPROC(mcount)
15879
15880 ENTRY(ftrace_caller)
15881 cmpl $0, function_trace_stop
15882@@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15883 #endif
15884
15885 GLOBAL(ftrace_stub)
15886+ pax_force_retaddr
15887 retq
15888-END(ftrace_caller)
15889+ENDPROC(ftrace_caller)
15890
15891 #else /* ! CONFIG_DYNAMIC_FTRACE */
15892 ENTRY(mcount)
15893@@ -108,6 +112,7 @@ ENTRY(mcount)
15894 #endif
15895
15896 GLOBAL(ftrace_stub)
15897+ pax_force_retaddr
15898 retq
15899
15900 trace:
15901@@ -117,12 +122,13 @@ trace:
15902 movq 8(%rbp), %rsi
15903 subq $MCOUNT_INSN_SIZE, %rdi
15904
15905+ pax_force_fptr ftrace_trace_function
15906 call *ftrace_trace_function
15907
15908 MCOUNT_RESTORE_FRAME
15909
15910 jmp ftrace_stub
15911-END(mcount)
15912+ENDPROC(mcount)
15913 #endif /* CONFIG_DYNAMIC_FTRACE */
15914 #endif /* CONFIG_FUNCTION_TRACER */
15915
15916@@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15917
15918 MCOUNT_RESTORE_FRAME
15919
15920+ pax_force_retaddr
15921 retq
15922-END(ftrace_graph_caller)
15923+ENDPROC(ftrace_graph_caller)
15924
15925 GLOBAL(return_to_handler)
15926 subq $24, %rsp
15927@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15928 movq 8(%rsp), %rdx
15929 movq (%rsp), %rax
15930 addq $16, %rsp
15931+ pax_force_retaddr
15932 retq
15933 #endif
15934
15935@@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15936 ENDPROC(native_usergs_sysret64)
15937 #endif /* CONFIG_PARAVIRT */
15938
15939+ .macro ljmpq sel, off
15940+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15941+ .byte 0x48; ljmp *1234f(%rip)
15942+ .pushsection .rodata
15943+ .align 16
15944+ 1234: .quad \off; .word \sel
15945+ .popsection
15946+#else
15947+ pushq $\sel
15948+ pushq $\off
15949+ lretq
15950+#endif
15951+ .endm
15952+
15953+ .macro pax_enter_kernel
15954+ pax_set_fptr_mask
15955+#ifdef CONFIG_PAX_KERNEXEC
15956+ call pax_enter_kernel
15957+#endif
15958+ .endm
15959+
15960+ .macro pax_exit_kernel
15961+#ifdef CONFIG_PAX_KERNEXEC
15962+ call pax_exit_kernel
15963+#endif
15964+ .endm
15965+
15966+#ifdef CONFIG_PAX_KERNEXEC
15967+ENTRY(pax_enter_kernel)
15968+ pushq %rdi
15969+
15970+#ifdef CONFIG_PARAVIRT
15971+ PV_SAVE_REGS(CLBR_RDI)
15972+#endif
15973+
15974+ GET_CR0_INTO_RDI
15975+ bts $16,%rdi
15976+ jnc 3f
15977+ mov %cs,%edi
15978+ cmp $__KERNEL_CS,%edi
15979+ jnz 2f
15980+1:
15981+
15982+#ifdef CONFIG_PARAVIRT
15983+ PV_RESTORE_REGS(CLBR_RDI)
15984+#endif
15985+
15986+ popq %rdi
15987+ pax_force_retaddr
15988+ retq
15989+
15990+2: ljmpq __KERNEL_CS,1f
15991+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15992+4: SET_RDI_INTO_CR0
15993+ jmp 1b
15994+ENDPROC(pax_enter_kernel)
15995+
15996+ENTRY(pax_exit_kernel)
15997+ pushq %rdi
15998+
15999+#ifdef CONFIG_PARAVIRT
16000+ PV_SAVE_REGS(CLBR_RDI)
16001+#endif
16002+
16003+ mov %cs,%rdi
16004+ cmp $__KERNEXEC_KERNEL_CS,%edi
16005+ jz 2f
16006+1:
16007+
16008+#ifdef CONFIG_PARAVIRT
16009+ PV_RESTORE_REGS(CLBR_RDI);
16010+#endif
16011+
16012+ popq %rdi
16013+ pax_force_retaddr
16014+ retq
16015+
16016+2: GET_CR0_INTO_RDI
16017+ btr $16,%rdi
16018+ ljmpq __KERNEL_CS,3f
16019+3: SET_RDI_INTO_CR0
16020+ jmp 1b
16021+#ifdef CONFIG_PARAVIRT
16022+ PV_RESTORE_REGS(CLBR_RDI);
16023+#endif
16024+
16025+ popq %rdi
16026+ pax_force_retaddr
16027+ retq
16028+ENDPROC(pax_exit_kernel)
16029+#endif
16030+
16031+ .macro pax_enter_kernel_user
16032+ pax_set_fptr_mask
16033+#ifdef CONFIG_PAX_MEMORY_UDEREF
16034+ call pax_enter_kernel_user
16035+#endif
16036+ .endm
16037+
16038+ .macro pax_exit_kernel_user
16039+#ifdef CONFIG_PAX_MEMORY_UDEREF
16040+ call pax_exit_kernel_user
16041+#endif
16042+#ifdef CONFIG_PAX_RANDKSTACK
16043+ pushq %rax
16044+ call pax_randomize_kstack
16045+ popq %rax
16046+#endif
16047+ .endm
16048+
16049+#ifdef CONFIG_PAX_MEMORY_UDEREF
16050+ENTRY(pax_enter_kernel_user)
16051+ pushq %rdi
16052+ pushq %rbx
16053+
16054+#ifdef CONFIG_PARAVIRT
16055+ PV_SAVE_REGS(CLBR_RDI)
16056+#endif
16057+
16058+ GET_CR3_INTO_RDI
16059+ mov %rdi,%rbx
16060+ add $__START_KERNEL_map,%rbx
16061+ sub phys_base(%rip),%rbx
16062+
16063+#ifdef CONFIG_PARAVIRT
16064+ pushq %rdi
16065+ cmpl $0, pv_info+PARAVIRT_enabled
16066+ jz 1f
16067+ i = 0
16068+ .rept USER_PGD_PTRS
16069+ mov i*8(%rbx),%rsi
16070+ mov $0,%sil
16071+ lea i*8(%rbx),%rdi
16072+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16073+ i = i + 1
16074+ .endr
16075+ jmp 2f
16076+1:
16077+#endif
16078+
16079+ i = 0
16080+ .rept USER_PGD_PTRS
16081+ movb $0,i*8(%rbx)
16082+ i = i + 1
16083+ .endr
16084+
16085+#ifdef CONFIG_PARAVIRT
16086+2: popq %rdi
16087+#endif
16088+ SET_RDI_INTO_CR3
16089+
16090+#ifdef CONFIG_PAX_KERNEXEC
16091+ GET_CR0_INTO_RDI
16092+ bts $16,%rdi
16093+ SET_RDI_INTO_CR0
16094+#endif
16095+
16096+#ifdef CONFIG_PARAVIRT
16097+ PV_RESTORE_REGS(CLBR_RDI)
16098+#endif
16099+
16100+ popq %rbx
16101+ popq %rdi
16102+ pax_force_retaddr
16103+ retq
16104+ENDPROC(pax_enter_kernel_user)
16105+
16106+ENTRY(pax_exit_kernel_user)
16107+ push %rdi
16108+
16109+#ifdef CONFIG_PARAVIRT
16110+ pushq %rbx
16111+ PV_SAVE_REGS(CLBR_RDI)
16112+#endif
16113+
16114+#ifdef CONFIG_PAX_KERNEXEC
16115+ GET_CR0_INTO_RDI
16116+ btr $16,%rdi
16117+ SET_RDI_INTO_CR0
16118+#endif
16119+
16120+ GET_CR3_INTO_RDI
16121+ add $__START_KERNEL_map,%rdi
16122+ sub phys_base(%rip),%rdi
16123+
16124+#ifdef CONFIG_PARAVIRT
16125+ cmpl $0, pv_info+PARAVIRT_enabled
16126+ jz 1f
16127+ mov %rdi,%rbx
16128+ i = 0
16129+ .rept USER_PGD_PTRS
16130+ mov i*8(%rbx),%rsi
16131+ mov $0x67,%sil
16132+ lea i*8(%rbx),%rdi
16133+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16134+ i = i + 1
16135+ .endr
16136+ jmp 2f
16137+1:
16138+#endif
16139+
16140+ i = 0
16141+ .rept USER_PGD_PTRS
16142+ movb $0x67,i*8(%rdi)
16143+ i = i + 1
16144+ .endr
16145+
16146+#ifdef CONFIG_PARAVIRT
16147+2: PV_RESTORE_REGS(CLBR_RDI)
16148+ popq %rbx
16149+#endif
16150+
16151+ popq %rdi
16152+ pax_force_retaddr
16153+ retq
16154+ENDPROC(pax_exit_kernel_user)
16155+#endif
16156+
16157+.macro pax_erase_kstack
16158+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16159+ call pax_erase_kstack
16160+#endif
16161+.endm
16162+
16163+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16164+/*
16165+ * r11: thread_info
16166+ * rcx, rdx: can be clobbered
16167+ */
16168+ENTRY(pax_erase_kstack)
16169+ pushq %rdi
16170+ pushq %rax
16171+ pushq %r11
16172+
16173+ GET_THREAD_INFO(%r11)
16174+ mov TI_lowest_stack(%r11), %rdi
16175+ mov $-0xBEEF, %rax
16176+ std
16177+
16178+1: mov %edi, %ecx
16179+ and $THREAD_SIZE_asm - 1, %ecx
16180+ shr $3, %ecx
16181+ repne scasq
16182+ jecxz 2f
16183+
16184+ cmp $2*8, %ecx
16185+ jc 2f
16186+
16187+ mov $2*8, %ecx
16188+ repe scasq
16189+ jecxz 2f
16190+ jne 1b
16191+
16192+2: cld
16193+ mov %esp, %ecx
16194+ sub %edi, %ecx
16195+
16196+ cmp $THREAD_SIZE_asm, %rcx
16197+ jb 3f
16198+ ud2
16199+3:
16200+
16201+ shr $3, %ecx
16202+ rep stosq
16203+
16204+ mov TI_task_thread_sp0(%r11), %rdi
16205+ sub $256, %rdi
16206+ mov %rdi, TI_lowest_stack(%r11)
16207+
16208+ popq %r11
16209+ popq %rax
16210+ popq %rdi
16211+ pax_force_retaddr
16212+ ret
16213+ENDPROC(pax_erase_kstack)
16214+#endif
16215
16216 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16217 #ifdef CONFIG_TRACE_IRQFLAGS
16218@@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
16219 .endm
16220
16221 .macro UNFAKE_STACK_FRAME
16222- addq $8*6, %rsp
16223- CFI_ADJUST_CFA_OFFSET -(6*8)
16224+ addq $8*6 + ARG_SKIP, %rsp
16225+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16226 .endm
16227
16228 /*
16229@@ -317,7 +601,7 @@ ENTRY(save_args)
16230 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
16231 movq_cfi rbp, 8 /* push %rbp */
16232 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
16233- testl $3, CS(%rdi)
16234+ testb $3, CS(%rdi)
16235 je 1f
16236 SWAPGS
16237 /*
16238@@ -337,9 +621,10 @@ ENTRY(save_args)
16239 * We entered an interrupt context - irqs are off:
16240 */
16241 2: TRACE_IRQS_OFF
16242+ pax_force_retaddr
16243 ret
16244 CFI_ENDPROC
16245-END(save_args)
16246+ENDPROC(save_args)
16247
16248 ENTRY(save_rest)
16249 PARTIAL_FRAME 1 REST_SKIP+8
16250@@ -352,9 +637,10 @@ ENTRY(save_rest)
16251 movq_cfi r15, R15+16
16252 movq %r11, 8(%rsp) /* return address */
16253 FIXUP_TOP_OF_STACK %r11, 16
16254+ pax_force_retaddr
16255 ret
16256 CFI_ENDPROC
16257-END(save_rest)
16258+ENDPROC(save_rest)
16259
16260 /* save complete stack frame */
16261 .pushsection .kprobes.text, "ax"
16262@@ -383,9 +669,10 @@ ENTRY(save_paranoid)
16263 js 1f /* negative -> in kernel */
16264 SWAPGS
16265 xorl %ebx,%ebx
16266-1: ret
16267+1: pax_force_retaddr_bts
16268+ ret
16269 CFI_ENDPROC
16270-END(save_paranoid)
16271+ENDPROC(save_paranoid)
16272 .popsection
16273
16274 /*
16275@@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16276
16277 RESTORE_REST
16278
16279- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16280+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16281 je int_ret_from_sys_call
16282
16283 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16284@@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16285 jmp ret_from_sys_call # go to the SYSRET fastpath
16286
16287 CFI_ENDPROC
16288-END(ret_from_fork)
16289+ENDPROC(ret_from_fork)
16290
16291 /*
16292 * System call entry. Upto 6 arguments in registers are supported.
16293@@ -455,7 +742,7 @@ END(ret_from_fork)
16294 ENTRY(system_call)
16295 CFI_STARTPROC simple
16296 CFI_SIGNAL_FRAME
16297- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16298+ CFI_DEF_CFA rsp,0
16299 CFI_REGISTER rip,rcx
16300 /*CFI_REGISTER rflags,r11*/
16301 SWAPGS_UNSAFE_STACK
16302@@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16303
16304 movq %rsp,PER_CPU_VAR(old_rsp)
16305 movq PER_CPU_VAR(kernel_stack),%rsp
16306+ SAVE_ARGS 8*6,1
16307+ pax_enter_kernel_user
16308 /*
16309 * No need to follow this irqs off/on section - it's straight
16310 * and short:
16311 */
16312 ENABLE_INTERRUPTS(CLBR_NONE)
16313- SAVE_ARGS 8,1
16314 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16315 movq %rcx,RIP-ARGOFFSET(%rsp)
16316 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16317@@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16318 system_call_fastpath:
16319 cmpq $__NR_syscall_max,%rax
16320 ja badsys
16321- movq %r10,%rcx
16322+ movq R10-ARGOFFSET(%rsp),%rcx
16323 call *sys_call_table(,%rax,8) # XXX: rip relative
16324 movq %rax,RAX-ARGOFFSET(%rsp)
16325 /*
16326@@ -502,6 +790,8 @@ sysret_check:
16327 andl %edi,%edx
16328 jnz sysret_careful
16329 CFI_REMEMBER_STATE
16330+ pax_exit_kernel_user
16331+ pax_erase_kstack
16332 /*
16333 * sysretq will re-enable interrupts:
16334 */
16335@@ -555,14 +845,18 @@ badsys:
16336 * jump back to the normal fast path.
16337 */
16338 auditsys:
16339- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16340+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16341 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16342 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16343 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16344 movq %rax,%rsi /* 2nd arg: syscall number */
16345 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16346 call audit_syscall_entry
16347+
16348+ pax_erase_kstack
16349+
16350 LOAD_ARGS 0 /* reload call-clobbered registers */
16351+ pax_set_fptr_mask
16352 jmp system_call_fastpath
16353
16354 /*
16355@@ -592,16 +886,20 @@ tracesys:
16356 FIXUP_TOP_OF_STACK %rdi
16357 movq %rsp,%rdi
16358 call syscall_trace_enter
16359+
16360+ pax_erase_kstack
16361+
16362 /*
16363 * Reload arg registers from stack in case ptrace changed them.
16364 * We don't reload %rax because syscall_trace_enter() returned
16365 * the value it wants us to use in the table lookup.
16366 */
16367 LOAD_ARGS ARGOFFSET, 1
16368+ pax_set_fptr_mask
16369 RESTORE_REST
16370 cmpq $__NR_syscall_max,%rax
16371 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16372- movq %r10,%rcx /* fixup for C */
16373+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16374 call *sys_call_table(,%rax,8)
16375 movq %rax,RAX-ARGOFFSET(%rsp)
16376 /* Use IRET because user could have changed frame */
16377@@ -613,7 +911,7 @@ tracesys:
16378 GLOBAL(int_ret_from_sys_call)
16379 DISABLE_INTERRUPTS(CLBR_NONE)
16380 TRACE_IRQS_OFF
16381- testl $3,CS-ARGOFFSET(%rsp)
16382+ testb $3,CS-ARGOFFSET(%rsp)
16383 je retint_restore_args
16384 movl $_TIF_ALLWORK_MASK,%edi
16385 /* edi: mask to check */
16386@@ -624,6 +922,7 @@ GLOBAL(int_with_check)
16387 andl %edi,%edx
16388 jnz int_careful
16389 andl $~TS_COMPAT,TI_status(%rcx)
16390+ pax_erase_kstack
16391 jmp retint_swapgs
16392
16393 /* Either reschedule or signal or syscall exit tracking needed. */
16394@@ -674,7 +973,7 @@ int_restore_rest:
16395 TRACE_IRQS_OFF
16396 jmp int_with_check
16397 CFI_ENDPROC
16398-END(system_call)
16399+ENDPROC(system_call)
16400
16401 /*
16402 * Certain special system calls that need to save a complete full stack frame.
16403@@ -690,7 +989,7 @@ ENTRY(\label)
16404 call \func
16405 jmp ptregscall_common
16406 CFI_ENDPROC
16407-END(\label)
16408+ENDPROC(\label)
16409 .endm
16410
16411 PTREGSCALL stub_clone, sys_clone, %r8
16412@@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
16413 movq_cfi_restore R12+8, r12
16414 movq_cfi_restore RBP+8, rbp
16415 movq_cfi_restore RBX+8, rbx
16416+ pax_force_retaddr
16417 ret $REST_SKIP /* pop extended registers */
16418 CFI_ENDPROC
16419-END(ptregscall_common)
16420+ENDPROC(ptregscall_common)
16421
16422 ENTRY(stub_execve)
16423 CFI_STARTPROC
16424@@ -726,7 +1026,7 @@ ENTRY(stub_execve)
16425 RESTORE_REST
16426 jmp int_ret_from_sys_call
16427 CFI_ENDPROC
16428-END(stub_execve)
16429+ENDPROC(stub_execve)
16430
16431 /*
16432 * sigreturn is special because it needs to restore all registers on return.
16433@@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16434 RESTORE_REST
16435 jmp int_ret_from_sys_call
16436 CFI_ENDPROC
16437-END(stub_rt_sigreturn)
16438+ENDPROC(stub_rt_sigreturn)
16439
16440 /*
16441 * Build the entry stubs and pointer table with some assembler magic.
16442@@ -780,7 +1080,7 @@ vector=vector+1
16443 2: jmp common_interrupt
16444 .endr
16445 CFI_ENDPROC
16446-END(irq_entries_start)
16447+ENDPROC(irq_entries_start)
16448
16449 .previous
16450 END(interrupt)
16451@@ -800,6 +1100,16 @@ END(interrupt)
16452 CFI_ADJUST_CFA_OFFSET 10*8
16453 call save_args
16454 PARTIAL_FRAME 0
16455+#ifdef CONFIG_PAX_MEMORY_UDEREF
16456+ testb $3, CS(%rdi)
16457+ jnz 1f
16458+ pax_enter_kernel
16459+ jmp 2f
16460+1: pax_enter_kernel_user
16461+2:
16462+#else
16463+ pax_enter_kernel
16464+#endif
16465 call \func
16466 .endm
16467
16468@@ -822,7 +1132,7 @@ ret_from_intr:
16469 CFI_ADJUST_CFA_OFFSET -8
16470 exit_intr:
16471 GET_THREAD_INFO(%rcx)
16472- testl $3,CS-ARGOFFSET(%rsp)
16473+ testb $3,CS-ARGOFFSET(%rsp)
16474 je retint_kernel
16475
16476 /* Interrupt came from user space */
16477@@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
16478 * The iretq could re-enable interrupts:
16479 */
16480 DISABLE_INTERRUPTS(CLBR_ANY)
16481+ pax_exit_kernel_user
16482 TRACE_IRQS_IRETQ
16483 SWAPGS
16484 jmp restore_args
16485
16486 retint_restore_args: /* return to kernel space */
16487 DISABLE_INTERRUPTS(CLBR_ANY)
16488+ pax_exit_kernel
16489+ pax_force_retaddr RIP-ARGOFFSET
16490 /*
16491 * The iretq could re-enable interrupts:
16492 */
16493@@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16494 #endif
16495
16496 CFI_ENDPROC
16497-END(common_interrupt)
16498+ENDPROC(common_interrupt)
16499
16500 /*
16501 * APIC interrupts.
16502@@ -953,7 +1266,7 @@ ENTRY(\sym)
16503 interrupt \do_sym
16504 jmp ret_from_intr
16505 CFI_ENDPROC
16506-END(\sym)
16507+ENDPROC(\sym)
16508 .endm
16509
16510 #ifdef CONFIG_SMP
16511@@ -1032,12 +1345,22 @@ ENTRY(\sym)
16512 CFI_ADJUST_CFA_OFFSET 15*8
16513 call error_entry
16514 DEFAULT_FRAME 0
16515+#ifdef CONFIG_PAX_MEMORY_UDEREF
16516+ testb $3, CS(%rsp)
16517+ jnz 1f
16518+ pax_enter_kernel
16519+ jmp 2f
16520+1: pax_enter_kernel_user
16521+2:
16522+#else
16523+ pax_enter_kernel
16524+#endif
16525 movq %rsp,%rdi /* pt_regs pointer */
16526 xorl %esi,%esi /* no error code */
16527 call \do_sym
16528 jmp error_exit /* %ebx: no swapgs flag */
16529 CFI_ENDPROC
16530-END(\sym)
16531+ENDPROC(\sym)
16532 .endm
16533
16534 .macro paranoidzeroentry sym do_sym
16535@@ -1049,12 +1372,22 @@ ENTRY(\sym)
16536 subq $15*8, %rsp
16537 call save_paranoid
16538 TRACE_IRQS_OFF
16539+#ifdef CONFIG_PAX_MEMORY_UDEREF
16540+ testb $3, CS(%rsp)
16541+ jnz 1f
16542+ pax_enter_kernel
16543+ jmp 2f
16544+1: pax_enter_kernel_user
16545+2:
16546+#else
16547+ pax_enter_kernel
16548+#endif
16549 movq %rsp,%rdi /* pt_regs pointer */
16550 xorl %esi,%esi /* no error code */
16551 call \do_sym
16552 jmp paranoid_exit /* %ebx: no swapgs flag */
16553 CFI_ENDPROC
16554-END(\sym)
16555+ENDPROC(\sym)
16556 .endm
16557
16558 .macro paranoidzeroentry_ist sym do_sym ist
16559@@ -1066,15 +1399,30 @@ ENTRY(\sym)
16560 subq $15*8, %rsp
16561 call save_paranoid
16562 TRACE_IRQS_OFF
16563+#ifdef CONFIG_PAX_MEMORY_UDEREF
16564+ testb $3, CS(%rsp)
16565+ jnz 1f
16566+ pax_enter_kernel
16567+ jmp 2f
16568+1: pax_enter_kernel_user
16569+2:
16570+#else
16571+ pax_enter_kernel
16572+#endif
16573 movq %rsp,%rdi /* pt_regs pointer */
16574 xorl %esi,%esi /* no error code */
16575- PER_CPU(init_tss, %rbp)
16576+#ifdef CONFIG_SMP
16577+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16578+ lea init_tss(%rbp), %rbp
16579+#else
16580+ lea init_tss(%rip), %rbp
16581+#endif
16582 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16583 call \do_sym
16584 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16585 jmp paranoid_exit /* %ebx: no swapgs flag */
16586 CFI_ENDPROC
16587-END(\sym)
16588+ENDPROC(\sym)
16589 .endm
16590
16591 .macro errorentry sym do_sym
16592@@ -1085,13 +1433,23 @@ ENTRY(\sym)
16593 CFI_ADJUST_CFA_OFFSET 15*8
16594 call error_entry
16595 DEFAULT_FRAME 0
16596+#ifdef CONFIG_PAX_MEMORY_UDEREF
16597+ testb $3, CS(%rsp)
16598+ jnz 1f
16599+ pax_enter_kernel
16600+ jmp 2f
16601+1: pax_enter_kernel_user
16602+2:
16603+#else
16604+ pax_enter_kernel
16605+#endif
16606 movq %rsp,%rdi /* pt_regs pointer */
16607 movq ORIG_RAX(%rsp),%rsi /* get error code */
16608 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16609 call \do_sym
16610 jmp error_exit /* %ebx: no swapgs flag */
16611 CFI_ENDPROC
16612-END(\sym)
16613+ENDPROC(\sym)
16614 .endm
16615
16616 /* error code is on the stack already */
16617@@ -1104,13 +1462,23 @@ ENTRY(\sym)
16618 call save_paranoid
16619 DEFAULT_FRAME 0
16620 TRACE_IRQS_OFF
16621+#ifdef CONFIG_PAX_MEMORY_UDEREF
16622+ testb $3, CS(%rsp)
16623+ jnz 1f
16624+ pax_enter_kernel
16625+ jmp 2f
16626+1: pax_enter_kernel_user
16627+2:
16628+#else
16629+ pax_enter_kernel
16630+#endif
16631 movq %rsp,%rdi /* pt_regs pointer */
16632 movq ORIG_RAX(%rsp),%rsi /* get error code */
16633 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16634 call \do_sym
16635 jmp paranoid_exit /* %ebx: no swapgs flag */
16636 CFI_ENDPROC
16637-END(\sym)
16638+ENDPROC(\sym)
16639 .endm
16640
16641 zeroentry divide_error do_divide_error
16642@@ -1141,9 +1509,10 @@ gs_change:
16643 SWAPGS
16644 popf
16645 CFI_ADJUST_CFA_OFFSET -8
16646+ pax_force_retaddr
16647 ret
16648 CFI_ENDPROC
16649-END(native_load_gs_index)
16650+ENDPROC(native_load_gs_index)
16651
16652 .section __ex_table,"a"
16653 .align 8
16654@@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
16655 * of hacks for example to fork off the per-CPU idle tasks.
16656 * [Hopefully no generic code relies on the reschedule -AK]
16657 */
16658- RESTORE_ALL
16659+ RESTORE_REST
16660 UNFAKE_STACK_FRAME
16661+ pax_force_retaddr
16662 ret
16663 CFI_ENDPROC
16664-END(kernel_thread)
16665+ENDPROC(kernel_thread)
16666
16667 ENTRY(child_rip)
16668 pushq $0 # fake return address
16669@@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16670 */
16671 movq %rdi, %rax
16672 movq %rsi, %rdi
16673+ pax_force_fptr %rax
16674 call *%rax
16675 # exit
16676 mov %eax, %edi
16677 call do_exit
16678 ud2 # padding for call trace
16679 CFI_ENDPROC
16680-END(child_rip)
16681+ENDPROC(child_rip)
16682
16683 /*
16684 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16685@@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
16686 RESTORE_REST
16687 testq %rax,%rax
16688 je int_ret_from_sys_call
16689- RESTORE_ARGS
16690 UNFAKE_STACK_FRAME
16691+ pax_force_retaddr
16692 ret
16693 CFI_ENDPROC
16694-END(kernel_execve)
16695+ENDPROC(kernel_execve)
16696
16697 /* Call softirq on interrupt stack. Interrupts are off. */
16698 ENTRY(call_softirq)
16699@@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
16700 CFI_DEF_CFA_REGISTER rsp
16701 CFI_ADJUST_CFA_OFFSET -8
16702 decl PER_CPU_VAR(irq_count)
16703+ pax_force_retaddr
16704 ret
16705 CFI_ENDPROC
16706-END(call_softirq)
16707+ENDPROC(call_softirq)
16708
16709 #ifdef CONFIG_XEN
16710 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16711@@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16712 decl PER_CPU_VAR(irq_count)
16713 jmp error_exit
16714 CFI_ENDPROC
16715-END(xen_do_hypervisor_callback)
16716+ENDPROC(xen_do_hypervisor_callback)
16717
16718 /*
16719 * Hypervisor uses this for application faults while it executes.
16720@@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
16721 SAVE_ALL
16722 jmp error_exit
16723 CFI_ENDPROC
16724-END(xen_failsafe_callback)
16725+ENDPROC(xen_failsafe_callback)
16726
16727 #endif /* CONFIG_XEN */
16728
16729@@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
16730 TRACE_IRQS_OFF
16731 testl %ebx,%ebx /* swapgs needed? */
16732 jnz paranoid_restore
16733- testl $3,CS(%rsp)
16734+ testb $3,CS(%rsp)
16735 jnz paranoid_userspace
16736+#ifdef CONFIG_PAX_MEMORY_UDEREF
16737+ pax_exit_kernel
16738+ TRACE_IRQS_IRETQ 0
16739+ SWAPGS_UNSAFE_STACK
16740+ RESTORE_ALL 8
16741+ pax_force_retaddr_bts
16742+ jmp irq_return
16743+#endif
16744 paranoid_swapgs:
16745+#ifdef CONFIG_PAX_MEMORY_UDEREF
16746+ pax_exit_kernel_user
16747+#else
16748+ pax_exit_kernel
16749+#endif
16750 TRACE_IRQS_IRETQ 0
16751 SWAPGS_UNSAFE_STACK
16752 RESTORE_ALL 8
16753 jmp irq_return
16754 paranoid_restore:
16755+ pax_exit_kernel
16756 TRACE_IRQS_IRETQ 0
16757 RESTORE_ALL 8
16758+ pax_force_retaddr_bts
16759 jmp irq_return
16760 paranoid_userspace:
16761 GET_THREAD_INFO(%rcx)
16762@@ -1443,7 +1830,7 @@ paranoid_schedule:
16763 TRACE_IRQS_OFF
16764 jmp paranoid_userspace
16765 CFI_ENDPROC
16766-END(paranoid_exit)
16767+ENDPROC(paranoid_exit)
16768
16769 /*
16770 * Exception entry point. This expects an error code/orig_rax on the stack.
16771@@ -1470,12 +1857,13 @@ ENTRY(error_entry)
16772 movq_cfi r14, R14+8
16773 movq_cfi r15, R15+8
16774 xorl %ebx,%ebx
16775- testl $3,CS+8(%rsp)
16776+ testb $3,CS+8(%rsp)
16777 je error_kernelspace
16778 error_swapgs:
16779 SWAPGS
16780 error_sti:
16781 TRACE_IRQS_OFF
16782+ pax_force_retaddr_bts
16783 ret
16784 CFI_ENDPROC
16785
16786@@ -1497,7 +1885,7 @@ error_kernelspace:
16787 cmpq $gs_change,RIP+8(%rsp)
16788 je error_swapgs
16789 jmp error_sti
16790-END(error_entry)
16791+ENDPROC(error_entry)
16792
16793
16794 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16795@@ -1517,7 +1905,7 @@ ENTRY(error_exit)
16796 jnz retint_careful
16797 jmp retint_swapgs
16798 CFI_ENDPROC
16799-END(error_exit)
16800+ENDPROC(error_exit)
16801
16802
16803 /* runs on exception stack */
16804@@ -1529,6 +1917,16 @@ ENTRY(nmi)
16805 CFI_ADJUST_CFA_OFFSET 15*8
16806 call save_paranoid
16807 DEFAULT_FRAME 0
16808+#ifdef CONFIG_PAX_MEMORY_UDEREF
16809+ testb $3, CS(%rsp)
16810+ jnz 1f
16811+ pax_enter_kernel
16812+ jmp 2f
16813+1: pax_enter_kernel_user
16814+2:
16815+#else
16816+ pax_enter_kernel
16817+#endif
16818 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16819 movq %rsp,%rdi
16820 movq $-1,%rsi
16821@@ -1539,12 +1937,28 @@ ENTRY(nmi)
16822 DISABLE_INTERRUPTS(CLBR_NONE)
16823 testl %ebx,%ebx /* swapgs needed? */
16824 jnz nmi_restore
16825- testl $3,CS(%rsp)
16826+ testb $3,CS(%rsp)
16827 jnz nmi_userspace
16828+#ifdef CONFIG_PAX_MEMORY_UDEREF
16829+ pax_exit_kernel
16830+ SWAPGS_UNSAFE_STACK
16831+ RESTORE_ALL 8
16832+ pax_force_retaddr_bts
16833+ jmp irq_return
16834+#endif
16835 nmi_swapgs:
16836+#ifdef CONFIG_PAX_MEMORY_UDEREF
16837+ pax_exit_kernel_user
16838+#else
16839+ pax_exit_kernel
16840+#endif
16841 SWAPGS_UNSAFE_STACK
16842+ RESTORE_ALL 8
16843+ jmp irq_return
16844 nmi_restore:
16845+ pax_exit_kernel
16846 RESTORE_ALL 8
16847+ pax_force_retaddr_bts
16848 jmp irq_return
16849 nmi_userspace:
16850 GET_THREAD_INFO(%rcx)
16851@@ -1573,14 +1987,14 @@ nmi_schedule:
16852 jmp paranoid_exit
16853 CFI_ENDPROC
16854 #endif
16855-END(nmi)
16856+ENDPROC(nmi)
16857
16858 ENTRY(ignore_sysret)
16859 CFI_STARTPROC
16860 mov $-ENOSYS,%eax
16861 sysret
16862 CFI_ENDPROC
16863-END(ignore_sysret)
16864+ENDPROC(ignore_sysret)
16865
16866 /*
16867 * End of kprobes section
16868diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16869index 9dbb527..7b3615a 100644
16870--- a/arch/x86/kernel/ftrace.c
16871+++ b/arch/x86/kernel/ftrace.c
16872@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16873 static void *mod_code_newcode; /* holds the text to write to the IP */
16874
16875 static unsigned nmi_wait_count;
16876-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16877+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16878
16879 int ftrace_arch_read_dyn_info(char *buf, int size)
16880 {
16881@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16882
16883 r = snprintf(buf, size, "%u %u",
16884 nmi_wait_count,
16885- atomic_read(&nmi_update_count));
16886+ atomic_read_unchecked(&nmi_update_count));
16887 return r;
16888 }
16889
16890@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16891 {
16892 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16893 smp_rmb();
16894+ pax_open_kernel();
16895 ftrace_mod_code();
16896- atomic_inc(&nmi_update_count);
16897+ pax_close_kernel();
16898+ atomic_inc_unchecked(&nmi_update_count);
16899 }
16900 /* Must have previous changes seen before executions */
16901 smp_mb();
16902@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16903
16904
16905
16906-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16907+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16908
16909 static unsigned char *ftrace_nop_replace(void)
16910 {
16911@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16912 {
16913 unsigned char replaced[MCOUNT_INSN_SIZE];
16914
16915+ ip = ktla_ktva(ip);
16916+
16917 /*
16918 * Note: Due to modules and __init, code can
16919 * disappear and change, we need to protect against faulting
16920@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16921 unsigned char old[MCOUNT_INSN_SIZE], *new;
16922 int ret;
16923
16924- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16925+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16926 new = ftrace_call_replace(ip, (unsigned long)func);
16927 ret = ftrace_modify_code(ip, old, new);
16928
16929@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16930 switch (faulted) {
16931 case 0:
16932 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16933- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16934+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16935 break;
16936 case 1:
16937 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16938- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16939+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16940 break;
16941 case 2:
16942 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16943- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16944+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16945 break;
16946 }
16947
16948@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16949 {
16950 unsigned char code[MCOUNT_INSN_SIZE];
16951
16952+ ip = ktla_ktva(ip);
16953+
16954 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16955 return -EFAULT;
16956
16957diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16958index 4f8e250..df24706 100644
16959--- a/arch/x86/kernel/head32.c
16960+++ b/arch/x86/kernel/head32.c
16961@@ -16,6 +16,7 @@
16962 #include <asm/apic.h>
16963 #include <asm/io_apic.h>
16964 #include <asm/bios_ebda.h>
16965+#include <asm/boot.h>
16966
16967 static void __init i386_default_early_setup(void)
16968 {
16969@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16970 {
16971 reserve_trampoline_memory();
16972
16973- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16974+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16975
16976 #ifdef CONFIG_BLK_DEV_INITRD
16977 /* Reserve INITRD */
16978diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16979index 34c3308..6fc4e76 100644
16980--- a/arch/x86/kernel/head_32.S
16981+++ b/arch/x86/kernel/head_32.S
16982@@ -19,10 +19,17 @@
16983 #include <asm/setup.h>
16984 #include <asm/processor-flags.h>
16985 #include <asm/percpu.h>
16986+#include <asm/msr-index.h>
16987
16988 /* Physical address */
16989 #define pa(X) ((X) - __PAGE_OFFSET)
16990
16991+#ifdef CONFIG_PAX_KERNEXEC
16992+#define ta(X) (X)
16993+#else
16994+#define ta(X) ((X) - __PAGE_OFFSET)
16995+#endif
16996+
16997 /*
16998 * References to members of the new_cpu_data structure.
16999 */
17000@@ -52,11 +59,7 @@
17001 * and small than max_low_pfn, otherwise will waste some page table entries
17002 */
17003
17004-#if PTRS_PER_PMD > 1
17005-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
17006-#else
17007-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
17008-#endif
17009+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
17010
17011 /* Enough space to fit pagetables for the low memory linear map */
17012 MAPPING_BEYOND_END = \
17013@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
17014 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17015
17016 /*
17017+ * Real beginning of normal "text" segment
17018+ */
17019+ENTRY(stext)
17020+ENTRY(_stext)
17021+
17022+/*
17023 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
17024 * %esi points to the real-mode code as a 32-bit pointer.
17025 * CS and DS must be 4 GB flat segments, but we don't depend on
17026@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17027 * can.
17028 */
17029 __HEAD
17030+
17031+#ifdef CONFIG_PAX_KERNEXEC
17032+ jmp startup_32
17033+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
17034+.fill PAGE_SIZE-5,1,0xcc
17035+#endif
17036+
17037 ENTRY(startup_32)
17038+ movl pa(stack_start),%ecx
17039+
17040 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
17041 us to not reload segments */
17042 testb $(1<<6), BP_loadflags(%esi)
17043@@ -95,7 +113,60 @@ ENTRY(startup_32)
17044 movl %eax,%es
17045 movl %eax,%fs
17046 movl %eax,%gs
17047+ movl %eax,%ss
17048 2:
17049+ leal -__PAGE_OFFSET(%ecx),%esp
17050+
17051+#ifdef CONFIG_SMP
17052+ movl $pa(cpu_gdt_table),%edi
17053+ movl $__per_cpu_load,%eax
17054+ movw %ax,__KERNEL_PERCPU + 2(%edi)
17055+ rorl $16,%eax
17056+ movb %al,__KERNEL_PERCPU + 4(%edi)
17057+ movb %ah,__KERNEL_PERCPU + 7(%edi)
17058+ movl $__per_cpu_end - 1,%eax
17059+ subl $__per_cpu_start,%eax
17060+ movw %ax,__KERNEL_PERCPU + 0(%edi)
17061+#endif
17062+
17063+#ifdef CONFIG_PAX_MEMORY_UDEREF
17064+ movl $NR_CPUS,%ecx
17065+ movl $pa(cpu_gdt_table),%edi
17066+1:
17067+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
17068+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
17069+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
17070+ addl $PAGE_SIZE_asm,%edi
17071+ loop 1b
17072+#endif
17073+
17074+#ifdef CONFIG_PAX_KERNEXEC
17075+ movl $pa(boot_gdt),%edi
17076+ movl $__LOAD_PHYSICAL_ADDR,%eax
17077+ movw %ax,__BOOT_CS + 2(%edi)
17078+ rorl $16,%eax
17079+ movb %al,__BOOT_CS + 4(%edi)
17080+ movb %ah,__BOOT_CS + 7(%edi)
17081+ rorl $16,%eax
17082+
17083+ ljmp $(__BOOT_CS),$1f
17084+1:
17085+
17086+ movl $NR_CPUS,%ecx
17087+ movl $pa(cpu_gdt_table),%edi
17088+ addl $__PAGE_OFFSET,%eax
17089+1:
17090+ movw %ax,__KERNEL_CS + 2(%edi)
17091+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
17092+ rorl $16,%eax
17093+ movb %al,__KERNEL_CS + 4(%edi)
17094+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
17095+ movb %ah,__KERNEL_CS + 7(%edi)
17096+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
17097+ rorl $16,%eax
17098+ addl $PAGE_SIZE_asm,%edi
17099+ loop 1b
17100+#endif
17101
17102 /*
17103 * Clear BSS first so that there are no surprises...
17104@@ -140,9 +211,7 @@ ENTRY(startup_32)
17105 cmpl $num_subarch_entries, %eax
17106 jae bad_subarch
17107
17108- movl pa(subarch_entries)(,%eax,4), %eax
17109- subl $__PAGE_OFFSET, %eax
17110- jmp *%eax
17111+ jmp *pa(subarch_entries)(,%eax,4)
17112
17113 bad_subarch:
17114 WEAK(lguest_entry)
17115@@ -154,10 +223,10 @@ WEAK(xen_entry)
17116 __INITDATA
17117
17118 subarch_entries:
17119- .long default_entry /* normal x86/PC */
17120- .long lguest_entry /* lguest hypervisor */
17121- .long xen_entry /* Xen hypervisor */
17122- .long default_entry /* Moorestown MID */
17123+ .long ta(default_entry) /* normal x86/PC */
17124+ .long ta(lguest_entry) /* lguest hypervisor */
17125+ .long ta(xen_entry) /* Xen hypervisor */
17126+ .long ta(default_entry) /* Moorestown MID */
17127 num_subarch_entries = (. - subarch_entries) / 4
17128 .previous
17129 #endif /* CONFIG_PARAVIRT */
17130@@ -218,8 +287,11 @@ default_entry:
17131 movl %eax, pa(max_pfn_mapped)
17132
17133 /* Do early initialization of the fixmap area */
17134- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17135- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17136+#ifdef CONFIG_COMPAT_VDSO
17137+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17138+#else
17139+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
17140+#endif
17141 #else /* Not PAE */
17142
17143 page_pde_offset = (__PAGE_OFFSET >> 20);
17144@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
17145 movl %eax, pa(max_pfn_mapped)
17146
17147 /* Do early initialization of the fixmap area */
17148- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
17149- movl %eax,pa(swapper_pg_dir+0xffc)
17150+#ifdef CONFIG_COMPAT_VDSO
17151+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
17152+#else
17153+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
17154+#endif
17155 #endif
17156 jmp 3f
17157 /*
17158@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
17159 movl %eax,%es
17160 movl %eax,%fs
17161 movl %eax,%gs
17162+ movl pa(stack_start),%ecx
17163+ movl %eax,%ss
17164+ leal -__PAGE_OFFSET(%ecx),%esp
17165 #endif /* CONFIG_SMP */
17166 3:
17167
17168@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
17169 orl %edx,%eax
17170 movl %eax,%cr4
17171
17172+#ifdef CONFIG_X86_PAE
17173 btl $5, %eax # check if PAE is enabled
17174 jnc 6f
17175
17176@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
17177 cpuid
17178 cmpl $0x80000000, %eax
17179 jbe 6f
17180+
17181+ /* Clear bogus XD_DISABLE bits */
17182+ call verify_cpu
17183+
17184 mov $0x80000001, %eax
17185 cpuid
17186 /* Execute Disable bit supported? */
17187@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
17188 jnc 6f
17189
17190 /* Setup EFER (Extended Feature Enable Register) */
17191- movl $0xc0000080, %ecx
17192+ movl $MSR_EFER, %ecx
17193 rdmsr
17194
17195 btsl $11, %eax
17196 /* Make changes effective */
17197 wrmsr
17198
17199+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
17200+ movl $1,pa(nx_enabled)
17201+#endif
17202+
17203 6:
17204
17205 /*
17206@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
17207 movl %eax,%cr0 /* ..and set paging (PG) bit */
17208 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
17209 1:
17210- /* Set up the stack pointer */
17211- lss stack_start,%esp
17212+ /* Shift the stack pointer to a virtual address */
17213+ addl $__PAGE_OFFSET, %esp
17214
17215 /*
17216 * Initialize eflags. Some BIOS's leave bits like NT set. This would
17217@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
17218
17219 #ifdef CONFIG_SMP
17220 cmpb $0, ready
17221- jz 1f /* Initial CPU cleans BSS */
17222- jmp checkCPUtype
17223-1:
17224+ jnz checkCPUtype
17225 #endif /* CONFIG_SMP */
17226
17227 /*
17228@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
17229 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17230 movl %eax,%ss # after changing gdt.
17231
17232- movl $(__USER_DS),%eax # DS/ES contains default USER segment
17233+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17234 movl %eax,%ds
17235 movl %eax,%es
17236
17237@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
17238 */
17239 cmpb $0,ready
17240 jne 1f
17241- movl $per_cpu__gdt_page,%eax
17242+ movl $cpu_gdt_table,%eax
17243 movl $per_cpu__stack_canary,%ecx
17244+#ifdef CONFIG_SMP
17245+ addl $__per_cpu_load,%ecx
17246+#endif
17247 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17248 shrl $16, %ecx
17249 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17250 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17251 1:
17252-#endif
17253 movl $(__KERNEL_STACK_CANARY),%eax
17254+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17255+ movl $(__USER_DS),%eax
17256+#else
17257+ xorl %eax,%eax
17258+#endif
17259 movl %eax,%gs
17260
17261 xorl %eax,%eax # Clear LDT
17262@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
17263
17264 cld # gcc2 wants the direction flag cleared at all times
17265 pushl $0 # fake return address for unwinder
17266-#ifdef CONFIG_SMP
17267- movb ready, %cl
17268 movb $1, ready
17269- cmpb $0,%cl # the first CPU calls start_kernel
17270- je 1f
17271- movl (stack_start), %esp
17272-1:
17273-#endif /* CONFIG_SMP */
17274 jmp *(initial_code)
17275
17276 /*
17277@@ -546,22 +631,22 @@ early_page_fault:
17278 jmp early_fault
17279
17280 early_fault:
17281- cld
17282 #ifdef CONFIG_PRINTK
17283+ cmpl $1,%ss:early_recursion_flag
17284+ je hlt_loop
17285+ incl %ss:early_recursion_flag
17286+ cld
17287 pusha
17288 movl $(__KERNEL_DS),%eax
17289 movl %eax,%ds
17290 movl %eax,%es
17291- cmpl $2,early_recursion_flag
17292- je hlt_loop
17293- incl early_recursion_flag
17294 movl %cr2,%eax
17295 pushl %eax
17296 pushl %edx /* trapno */
17297 pushl $fault_msg
17298 call printk
17299+; call dump_stack
17300 #endif
17301- call dump_stack
17302 hlt_loop:
17303 hlt
17304 jmp hlt_loop
17305@@ -569,8 +654,11 @@ hlt_loop:
17306 /* This is the default interrupt "handler" :-) */
17307 ALIGN
17308 ignore_int:
17309- cld
17310 #ifdef CONFIG_PRINTK
17311+ cmpl $2,%ss:early_recursion_flag
17312+ je hlt_loop
17313+ incl %ss:early_recursion_flag
17314+ cld
17315 pushl %eax
17316 pushl %ecx
17317 pushl %edx
17318@@ -579,9 +667,6 @@ ignore_int:
17319 movl $(__KERNEL_DS),%eax
17320 movl %eax,%ds
17321 movl %eax,%es
17322- cmpl $2,early_recursion_flag
17323- je hlt_loop
17324- incl early_recursion_flag
17325 pushl 16(%esp)
17326 pushl 24(%esp)
17327 pushl 32(%esp)
17328@@ -600,6 +685,8 @@ ignore_int:
17329 #endif
17330 iret
17331
17332+#include "verify_cpu.S"
17333+
17334 __REFDATA
17335 .align 4
17336 ENTRY(initial_code)
17337@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17338 /*
17339 * BSS section
17340 */
17341-__PAGE_ALIGNED_BSS
17342- .align PAGE_SIZE_asm
17343 #ifdef CONFIG_X86_PAE
17344+.section .swapper_pg_pmd,"a",@progbits
17345 swapper_pg_pmd:
17346 .fill 1024*KPMDS,4,0
17347 #else
17348+.section .swapper_pg_dir,"a",@progbits
17349 ENTRY(swapper_pg_dir)
17350 .fill 1024,4,0
17351 #endif
17352+.section .swapper_pg_fixmap,"a",@progbits
17353 swapper_pg_fixmap:
17354 .fill 1024,4,0
17355 #ifdef CONFIG_X86_TRAMPOLINE
17356+.section .trampoline_pg_dir,"a",@progbits
17357 ENTRY(trampoline_pg_dir)
17358+#ifdef CONFIG_X86_PAE
17359+ .fill 4,8,0
17360+#else
17361 .fill 1024,4,0
17362 #endif
17363+#endif
17364+
17365+.section .empty_zero_page,"a",@progbits
17366 ENTRY(empty_zero_page)
17367 .fill 4096,1,0
17368
17369 /*
17370+ * The IDT has to be page-aligned to simplify the Pentium
17371+ * F0 0F bug workaround.. We have a special link segment
17372+ * for this.
17373+ */
17374+.section .idt,"a",@progbits
17375+ENTRY(idt_table)
17376+ .fill 256,8,0
17377+
17378+/*
17379 * This starts the data section.
17380 */
17381 #ifdef CONFIG_X86_PAE
17382-__PAGE_ALIGNED_DATA
17383- /* Page-aligned for the benefit of paravirt? */
17384- .align PAGE_SIZE_asm
17385+.section .swapper_pg_dir,"a",@progbits
17386+
17387 ENTRY(swapper_pg_dir)
17388 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17389 # if KPMDS == 3
17390@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17391 # error "Kernel PMDs should be 1, 2 or 3"
17392 # endif
17393 .align PAGE_SIZE_asm /* needs to be page-sized too */
17394+
17395+#ifdef CONFIG_PAX_PER_CPU_PGD
17396+ENTRY(cpu_pgd)
17397+ .rept NR_CPUS
17398+ .fill 4,8,0
17399+ .endr
17400+#endif
17401+
17402 #endif
17403
17404 .data
17405+.balign 4
17406 ENTRY(stack_start)
17407- .long init_thread_union+THREAD_SIZE
17408- .long __BOOT_DS
17409+ .long init_thread_union+THREAD_SIZE-8
17410
17411 ready: .byte 0
17412
17413+.section .rodata,"a",@progbits
17414 early_recursion_flag:
17415 .long 0
17416
17417@@ -697,7 +809,7 @@ fault_msg:
17418 .word 0 # 32 bit align gdt_desc.address
17419 boot_gdt_descr:
17420 .word __BOOT_DS+7
17421- .long boot_gdt - __PAGE_OFFSET
17422+ .long pa(boot_gdt)
17423
17424 .word 0 # 32-bit align idt_desc.address
17425 idt_descr:
17426@@ -708,7 +820,7 @@ idt_descr:
17427 .word 0 # 32 bit align gdt_desc.address
17428 ENTRY(early_gdt_descr)
17429 .word GDT_ENTRIES*8-1
17430- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17431+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17432
17433 /*
17434 * The boot_gdt must mirror the equivalent in setup.S and is
17435@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17436 .align L1_CACHE_BYTES
17437 ENTRY(boot_gdt)
17438 .fill GDT_ENTRY_BOOT_CS,8,0
17439- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17440- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17441+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17442+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17443+
17444+ .align PAGE_SIZE_asm
17445+ENTRY(cpu_gdt_table)
17446+ .rept NR_CPUS
17447+ .quad 0x0000000000000000 /* NULL descriptor */
17448+ .quad 0x0000000000000000 /* 0x0b reserved */
17449+ .quad 0x0000000000000000 /* 0x13 reserved */
17450+ .quad 0x0000000000000000 /* 0x1b reserved */
17451+
17452+#ifdef CONFIG_PAX_KERNEXEC
17453+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17454+#else
17455+ .quad 0x0000000000000000 /* 0x20 unused */
17456+#endif
17457+
17458+ .quad 0x0000000000000000 /* 0x28 unused */
17459+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17460+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17461+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17462+ .quad 0x0000000000000000 /* 0x4b reserved */
17463+ .quad 0x0000000000000000 /* 0x53 reserved */
17464+ .quad 0x0000000000000000 /* 0x5b reserved */
17465+
17466+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17467+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17468+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17469+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17470+
17471+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17472+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17473+
17474+ /*
17475+ * Segments used for calling PnP BIOS have byte granularity.
17476+ * The code segments and data segments have fixed 64k limits,
17477+ * the transfer segment sizes are set at run time.
17478+ */
17479+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17480+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17481+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17482+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17483+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17484+
17485+ /*
17486+ * The APM segments have byte granularity and their bases
17487+ * are set at run time. All have 64k limits.
17488+ */
17489+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17490+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17491+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17492+
17493+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17494+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17495+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17496+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17497+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17498+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17499+
17500+ /* Be sure this is zeroed to avoid false validations in Xen */
17501+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17502+ .endr
17503diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17504index 780cd92..758b2a6 100644
17505--- a/arch/x86/kernel/head_64.S
17506+++ b/arch/x86/kernel/head_64.S
17507@@ -19,6 +19,8 @@
17508 #include <asm/cache.h>
17509 #include <asm/processor-flags.h>
17510 #include <asm/percpu.h>
17511+#include <asm/cpufeature.h>
17512+#include <asm/alternative-asm.h>
17513
17514 #ifdef CONFIG_PARAVIRT
17515 #include <asm/asm-offsets.h>
17516@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17517 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17518 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17519 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17520+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17521+L3_VMALLOC_START = pud_index(VMALLOC_START)
17522+L4_VMALLOC_END = pgd_index(VMALLOC_END)
17523+L3_VMALLOC_END = pud_index(VMALLOC_END)
17524+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17525+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17526
17527 .text
17528 __HEAD
17529@@ -85,35 +93,23 @@ startup_64:
17530 */
17531 addq %rbp, init_level4_pgt + 0(%rip)
17532 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17533+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17534+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17535+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17536 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17537
17538 addq %rbp, level3_ident_pgt + 0(%rip)
17539+#ifndef CONFIG_XEN
17540+ addq %rbp, level3_ident_pgt + 8(%rip)
17541+#endif
17542
17543- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17544- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17545+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17546+
17547+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17548+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17549
17550 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17551-
17552- /* Add an Identity mapping if I am above 1G */
17553- leaq _text(%rip), %rdi
17554- andq $PMD_PAGE_MASK, %rdi
17555-
17556- movq %rdi, %rax
17557- shrq $PUD_SHIFT, %rax
17558- andq $(PTRS_PER_PUD - 1), %rax
17559- jz ident_complete
17560-
17561- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17562- leaq level3_ident_pgt(%rip), %rbx
17563- movq %rdx, 0(%rbx, %rax, 8)
17564-
17565- movq %rdi, %rax
17566- shrq $PMD_SHIFT, %rax
17567- andq $(PTRS_PER_PMD - 1), %rax
17568- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17569- leaq level2_spare_pgt(%rip), %rbx
17570- movq %rdx, 0(%rbx, %rax, 8)
17571-ident_complete:
17572+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17573
17574 /*
17575 * Fixup the kernel text+data virtual addresses. Note that
17576@@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
17577 * after the boot processor executes this code.
17578 */
17579
17580- /* Enable PAE mode and PGE */
17581- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17582+ /* Enable PAE mode and PSE/PGE */
17583+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17584 movq %rax, %cr4
17585
17586 /* Setup early boot stage 4 level pagetables. */
17587@@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
17588 movl $MSR_EFER, %ecx
17589 rdmsr
17590 btsl $_EFER_SCE, %eax /* Enable System Call */
17591- btl $20,%edi /* No Execute supported? */
17592+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17593 jnc 1f
17594 btsl $_EFER_NX, %eax
17595+ leaq init_level4_pgt(%rip), %rdi
17596+#ifndef CONFIG_EFI
17597+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17598+#endif
17599+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17600+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17601+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17602 1: wrmsr /* Make changes effective */
17603
17604 /* Setup cr0 */
17605@@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
17606 * jump. In addition we need to ensure %cs is set so we make this
17607 * a far return.
17608 */
17609+ pax_set_fptr_mask
17610 movq initial_code(%rip),%rax
17611 pushq $0 # fake return address to stop unwinder
17612 pushq $__KERNEL_CS # set correct cs
17613@@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
17614 .quad x86_64_start_kernel
17615 ENTRY(initial_gs)
17616 .quad INIT_PER_CPU_VAR(irq_stack_union)
17617- __FINITDATA
17618
17619 ENTRY(stack_start)
17620 .quad init_thread_union+THREAD_SIZE-8
17621 .word 0
17622+ __FINITDATA
17623
17624 bad_address:
17625 jmp bad_address
17626
17627- .section ".init.text","ax"
17628+ __INIT
17629 #ifdef CONFIG_EARLY_PRINTK
17630 .globl early_idt_handlers
17631 early_idt_handlers:
17632@@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
17633 #endif /* EARLY_PRINTK */
17634 1: hlt
17635 jmp 1b
17636+ .previous
17637
17638 #ifdef CONFIG_EARLY_PRINTK
17639+ __INITDATA
17640 early_recursion_flag:
17641 .long 0
17642+ .previous
17643
17644+ .section .rodata,"a",@progbits
17645 early_idt_msg:
17646 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17647 early_idt_ripmsg:
17648 .asciz "RIP %s\n"
17649+ .previous
17650 #endif /* CONFIG_EARLY_PRINTK */
17651- .previous
17652
17653+ .section .rodata,"a",@progbits
17654 #define NEXT_PAGE(name) \
17655 .balign PAGE_SIZE; \
17656 ENTRY(name)
17657@@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
17658 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17659 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17660 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17661+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
17662+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17663+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
17664+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17665+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17666+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17667 .org init_level4_pgt + L4_START_KERNEL*8, 0
17668 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17669 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17670
17671+#ifdef CONFIG_PAX_PER_CPU_PGD
17672+NEXT_PAGE(cpu_pgd)
17673+ .rept NR_CPUS
17674+ .fill 512,8,0
17675+ .endr
17676+#endif
17677+
17678 NEXT_PAGE(level3_ident_pgt)
17679 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17680+#ifdef CONFIG_XEN
17681 .fill 511,8,0
17682+#else
17683+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17684+ .fill 510,8,0
17685+#endif
17686+
17687+NEXT_PAGE(level3_vmalloc_start_pgt)
17688+ .fill 512,8,0
17689+
17690+NEXT_PAGE(level3_vmalloc_end_pgt)
17691+ .fill 512,8,0
17692+
17693+NEXT_PAGE(level3_vmemmap_pgt)
17694+ .fill L3_VMEMMAP_START,8,0
17695+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17696
17697 NEXT_PAGE(level3_kernel_pgt)
17698 .fill L3_START_KERNEL,8,0
17699@@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
17700 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17701 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17702
17703+NEXT_PAGE(level2_vmemmap_pgt)
17704+ .fill 512,8,0
17705+
17706 NEXT_PAGE(level2_fixmap_pgt)
17707- .fill 506,8,0
17708- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17709- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17710- .fill 5,8,0
17711+ .fill 507,8,0
17712+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17713+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17714+ .fill 4,8,0
17715
17716-NEXT_PAGE(level1_fixmap_pgt)
17717+NEXT_PAGE(level1_vsyscall_pgt)
17718 .fill 512,8,0
17719
17720-NEXT_PAGE(level2_ident_pgt)
17721- /* Since I easily can, map the first 1G.
17722+ /* Since I easily can, map the first 2G.
17723 * Don't set NX because code runs from these pages.
17724 */
17725- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17726+NEXT_PAGE(level2_ident_pgt)
17727+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17728
17729 NEXT_PAGE(level2_kernel_pgt)
17730 /*
17731@@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
17732 * If you want to increase this then increase MODULES_VADDR
17733 * too.)
17734 */
17735- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17736- KERNEL_IMAGE_SIZE/PMD_SIZE)
17737-
17738-NEXT_PAGE(level2_spare_pgt)
17739- .fill 512, 8, 0
17740+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17741
17742 #undef PMDS
17743 #undef NEXT_PAGE
17744
17745- .data
17746+ .align PAGE_SIZE
17747+ENTRY(cpu_gdt_table)
17748+ .rept NR_CPUS
17749+ .quad 0x0000000000000000 /* NULL descriptor */
17750+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17751+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17752+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17753+ .quad 0x00cffb000000ffff /* __USER32_CS */
17754+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17755+ .quad 0x00affb000000ffff /* __USER_CS */
17756+
17757+#ifdef CONFIG_PAX_KERNEXEC
17758+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17759+#else
17760+ .quad 0x0 /* unused */
17761+#endif
17762+
17763+ .quad 0,0 /* TSS */
17764+ .quad 0,0 /* LDT */
17765+ .quad 0,0,0 /* three TLS descriptors */
17766+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17767+ /* asm/segment.h:GDT_ENTRIES must match this */
17768+
17769+ /* zero the remaining page */
17770+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17771+ .endr
17772+
17773 .align 16
17774 .globl early_gdt_descr
17775 early_gdt_descr:
17776 .word GDT_ENTRIES*8-1
17777 early_gdt_descr_base:
17778- .quad INIT_PER_CPU_VAR(gdt_page)
17779+ .quad cpu_gdt_table
17780
17781 ENTRY(phys_base)
17782 /* This must match the first entry in level2_kernel_pgt */
17783 .quad 0x0000000000000000
17784
17785 #include "../../x86/xen/xen-head.S"
17786-
17787- .section .bss, "aw", @nobits
17788+
17789+ .section .rodata,"a",@progbits
17790 .align L1_CACHE_BYTES
17791 ENTRY(idt_table)
17792- .skip IDT_ENTRIES * 16
17793+ .fill 512,8,0
17794
17795 __PAGE_ALIGNED_BSS
17796 .align PAGE_SIZE
17797diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17798index 9c3bd4a..e1d9b35 100644
17799--- a/arch/x86/kernel/i386_ksyms_32.c
17800+++ b/arch/x86/kernel/i386_ksyms_32.c
17801@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17802 EXPORT_SYMBOL(cmpxchg8b_emu);
17803 #endif
17804
17805+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17806+
17807 /* Networking helper routines. */
17808 EXPORT_SYMBOL(csum_partial_copy_generic);
17809+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17810+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17811
17812 EXPORT_SYMBOL(__get_user_1);
17813 EXPORT_SYMBOL(__get_user_2);
17814@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17815
17816 EXPORT_SYMBOL(csum_partial);
17817 EXPORT_SYMBOL(empty_zero_page);
17818+
17819+#ifdef CONFIG_PAX_KERNEXEC
17820+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17821+#endif
17822diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17823index df89102..a244320 100644
17824--- a/arch/x86/kernel/i8259.c
17825+++ b/arch/x86/kernel/i8259.c
17826@@ -208,7 +208,7 @@ spurious_8259A_irq:
17827 "spurious 8259A interrupt: IRQ%d.\n", irq);
17828 spurious_irq_mask |= irqmask;
17829 }
17830- atomic_inc(&irq_err_count);
17831+ atomic_inc_unchecked(&irq_err_count);
17832 /*
17833 * Theoretically we do not have to handle this IRQ,
17834 * but in Linux this does not cause problems and is
17835diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17836index 3a54dcb..1c22348 100644
17837--- a/arch/x86/kernel/init_task.c
17838+++ b/arch/x86/kernel/init_task.c
17839@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17840 * way process stacks are handled. This is done by having a special
17841 * "init_task" linker map entry..
17842 */
17843-union thread_union init_thread_union __init_task_data =
17844- { INIT_THREAD_INFO(init_task) };
17845+union thread_union init_thread_union __init_task_data;
17846
17847 /*
17848 * Initial task structure.
17849@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17850 * section. Since TSS's are completely CPU-local, we want them
17851 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17852 */
17853-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17854-
17855+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17856+EXPORT_SYMBOL(init_tss);
17857diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17858index 99c4d30..74c84e9 100644
17859--- a/arch/x86/kernel/ioport.c
17860+++ b/arch/x86/kernel/ioport.c
17861@@ -6,6 +6,7 @@
17862 #include <linux/sched.h>
17863 #include <linux/kernel.h>
17864 #include <linux/capability.h>
17865+#include <linux/security.h>
17866 #include <linux/errno.h>
17867 #include <linux/types.h>
17868 #include <linux/ioport.h>
17869@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17870
17871 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17872 return -EINVAL;
17873+#ifdef CONFIG_GRKERNSEC_IO
17874+ if (turn_on && grsec_disable_privio) {
17875+ gr_handle_ioperm();
17876+ return -EPERM;
17877+ }
17878+#endif
17879 if (turn_on && !capable(CAP_SYS_RAWIO))
17880 return -EPERM;
17881
17882@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17883 * because the ->io_bitmap_max value must match the bitmap
17884 * contents:
17885 */
17886- tss = &per_cpu(init_tss, get_cpu());
17887+ tss = init_tss + get_cpu();
17888
17889 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17890
17891@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17892 return -EINVAL;
17893 /* Trying to gain more privileges? */
17894 if (level > old) {
17895+#ifdef CONFIG_GRKERNSEC_IO
17896+ if (grsec_disable_privio) {
17897+ gr_handle_iopl();
17898+ return -EPERM;
17899+ }
17900+#endif
17901 if (!capable(CAP_SYS_RAWIO))
17902 return -EPERM;
17903 }
17904diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17905index 04bbd52..83a07d9 100644
17906--- a/arch/x86/kernel/irq.c
17907+++ b/arch/x86/kernel/irq.c
17908@@ -15,7 +15,7 @@
17909 #include <asm/mce.h>
17910 #include <asm/hw_irq.h>
17911
17912-atomic_t irq_err_count;
17913+atomic_unchecked_t irq_err_count;
17914
17915 /* Function pointer for generic interrupt vector handling */
17916 void (*generic_interrupt_extension)(void) = NULL;
17917@@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17918 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17919 seq_printf(p, " Machine check polls\n");
17920 #endif
17921- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17922+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17923 #if defined(CONFIG_X86_IO_APIC)
17924- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17925+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17926 #endif
17927 return 0;
17928 }
17929@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17930
17931 u64 arch_irq_stat(void)
17932 {
17933- u64 sum = atomic_read(&irq_err_count);
17934+ u64 sum = atomic_read_unchecked(&irq_err_count);
17935
17936 #ifdef CONFIG_X86_IO_APIC
17937- sum += atomic_read(&irq_mis_count);
17938+ sum += atomic_read_unchecked(&irq_mis_count);
17939 #endif
17940 return sum;
17941 }
17942diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17943index 7d35d0f..03f1d52 100644
17944--- a/arch/x86/kernel/irq_32.c
17945+++ b/arch/x86/kernel/irq_32.c
17946@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17947 __asm__ __volatile__("andl %%esp,%0" :
17948 "=r" (sp) : "0" (THREAD_SIZE - 1));
17949
17950- return sp < (sizeof(struct thread_info) + STACK_WARN);
17951+ return sp < STACK_WARN;
17952 }
17953
17954 static void print_stack_overflow(void)
17955@@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17956 * per-CPU IRQ handling contexts (thread information and stack)
17957 */
17958 union irq_ctx {
17959- struct thread_info tinfo;
17960- u32 stack[THREAD_SIZE/sizeof(u32)];
17961-} __attribute__((aligned(PAGE_SIZE)));
17962+ unsigned long previous_esp;
17963+ u32 stack[THREAD_SIZE/sizeof(u32)];
17964+} __attribute__((aligned(THREAD_SIZE)));
17965
17966 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17967 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17968@@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17969 static inline int
17970 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17971 {
17972- union irq_ctx *curctx, *irqctx;
17973+ union irq_ctx *irqctx;
17974 u32 *isp, arg1, arg2;
17975
17976- curctx = (union irq_ctx *) current_thread_info();
17977 irqctx = __get_cpu_var(hardirq_ctx);
17978
17979 /*
17980@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17981 * handler) we can't do that and just have to keep using the
17982 * current stack (which is the irq stack already after all)
17983 */
17984- if (unlikely(curctx == irqctx))
17985+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17986 return 0;
17987
17988 /* build the stack frame on the IRQ stack */
17989- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17990- irqctx->tinfo.task = curctx->tinfo.task;
17991- irqctx->tinfo.previous_esp = current_stack_pointer;
17992+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17993+ irqctx->previous_esp = current_stack_pointer;
17994
17995- /*
17996- * Copy the softirq bits in preempt_count so that the
17997- * softirq checks work in the hardirq context.
17998- */
17999- irqctx->tinfo.preempt_count =
18000- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
18001- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
18002+#ifdef CONFIG_PAX_MEMORY_UDEREF
18003+ __set_fs(MAKE_MM_SEG(0));
18004+#endif
18005
18006 if (unlikely(overflow))
18007 call_on_stack(print_stack_overflow, isp);
18008@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18009 : "0" (irq), "1" (desc), "2" (isp),
18010 "D" (desc->handle_irq)
18011 : "memory", "cc", "ecx");
18012+
18013+#ifdef CONFIG_PAX_MEMORY_UDEREF
18014+ __set_fs(current_thread_info()->addr_limit);
18015+#endif
18016+
18017 return 1;
18018 }
18019
18020@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18021 */
18022 void __cpuinit irq_ctx_init(int cpu)
18023 {
18024- union irq_ctx *irqctx;
18025-
18026 if (per_cpu(hardirq_ctx, cpu))
18027 return;
18028
18029- irqctx = &per_cpu(hardirq_stack, cpu);
18030- irqctx->tinfo.task = NULL;
18031- irqctx->tinfo.exec_domain = NULL;
18032- irqctx->tinfo.cpu = cpu;
18033- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
18034- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18035-
18036- per_cpu(hardirq_ctx, cpu) = irqctx;
18037-
18038- irqctx = &per_cpu(softirq_stack, cpu);
18039- irqctx->tinfo.task = NULL;
18040- irqctx->tinfo.exec_domain = NULL;
18041- irqctx->tinfo.cpu = cpu;
18042- irqctx->tinfo.preempt_count = 0;
18043- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18044-
18045- per_cpu(softirq_ctx, cpu) = irqctx;
18046+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
18047+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
18048
18049 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18050 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18051@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
18052 asmlinkage void do_softirq(void)
18053 {
18054 unsigned long flags;
18055- struct thread_info *curctx;
18056 union irq_ctx *irqctx;
18057 u32 *isp;
18058
18059@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
18060 local_irq_save(flags);
18061
18062 if (local_softirq_pending()) {
18063- curctx = current_thread_info();
18064 irqctx = __get_cpu_var(softirq_ctx);
18065- irqctx->tinfo.task = curctx->task;
18066- irqctx->tinfo.previous_esp = current_stack_pointer;
18067+ irqctx->previous_esp = current_stack_pointer;
18068
18069 /* build the stack frame on the softirq stack */
18070- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18071+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18072+
18073+#ifdef CONFIG_PAX_MEMORY_UDEREF
18074+ __set_fs(MAKE_MM_SEG(0));
18075+#endif
18076
18077 call_on_stack(__do_softirq, isp);
18078+
18079+#ifdef CONFIG_PAX_MEMORY_UDEREF
18080+ __set_fs(current_thread_info()->addr_limit);
18081+#endif
18082+
18083 /*
18084 * Shouldnt happen, we returned above if in_interrupt():
18085 */
18086diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
18087index 8d82a77..0baf312 100644
18088--- a/arch/x86/kernel/kgdb.c
18089+++ b/arch/x86/kernel/kgdb.c
18090@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
18091
18092 /* clear the trace bit */
18093 linux_regs->flags &= ~X86_EFLAGS_TF;
18094- atomic_set(&kgdb_cpu_doing_single_step, -1);
18095+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
18096
18097 /* set the trace bit if we're stepping */
18098 if (remcomInBuffer[0] == 's') {
18099 linux_regs->flags |= X86_EFLAGS_TF;
18100 kgdb_single_step = 1;
18101- atomic_set(&kgdb_cpu_doing_single_step,
18102+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
18103 raw_smp_processor_id());
18104 }
18105
18106@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
18107 break;
18108
18109 case DIE_DEBUG:
18110- if (atomic_read(&kgdb_cpu_doing_single_step) ==
18111+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
18112 raw_smp_processor_id()) {
18113 if (user_mode(regs))
18114 return single_step_cont(regs, args);
18115@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
18116 return instruction_pointer(regs);
18117 }
18118
18119-struct kgdb_arch arch_kgdb_ops = {
18120+const struct kgdb_arch arch_kgdb_ops = {
18121 /* Breakpoint instruction: */
18122 .gdb_bpt_instr = { 0xcc },
18123 .flags = KGDB_HW_BREAKPOINT,
18124diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
18125index 7a67820..70ea187 100644
18126--- a/arch/x86/kernel/kprobes.c
18127+++ b/arch/x86/kernel/kprobes.c
18128@@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
18129 char op;
18130 s32 raddr;
18131 } __attribute__((packed)) * jop;
18132- jop = (struct __arch_jmp_op *)from;
18133+
18134+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
18135+
18136+ pax_open_kernel();
18137 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
18138 jop->op = RELATIVEJUMP_INSTRUCTION;
18139+ pax_close_kernel();
18140 }
18141
18142 /*
18143@@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
18144 kprobe_opcode_t opcode;
18145 kprobe_opcode_t *orig_opcodes = opcodes;
18146
18147- if (search_exception_tables((unsigned long)opcodes))
18148+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
18149 return 0; /* Page fault may occur on this address. */
18150
18151 retry:
18152@@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
18153 disp = (u8 *) p->addr + *((s32 *) insn) -
18154 (u8 *) p->ainsn.insn;
18155 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
18156+ pax_open_kernel();
18157 *(s32 *)insn = (s32) disp;
18158+ pax_close_kernel();
18159 }
18160 }
18161 #endif
18162@@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
18163
18164 static void __kprobes arch_copy_kprobe(struct kprobe *p)
18165 {
18166- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18167+ pax_open_kernel();
18168+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
18169+ pax_close_kernel();
18170
18171 fix_riprel(p);
18172
18173- if (can_boost(p->addr))
18174+ if (can_boost(ktla_ktva(p->addr)))
18175 p->ainsn.boostable = 0;
18176 else
18177 p->ainsn.boostable = -1;
18178
18179- p->opcode = *p->addr;
18180+ p->opcode = *(ktla_ktva(p->addr));
18181 }
18182
18183 int __kprobes arch_prepare_kprobe(struct kprobe *p)
18184@@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
18185 if (p->opcode == BREAKPOINT_INSTRUCTION)
18186 regs->ip = (unsigned long)p->addr;
18187 else
18188- regs->ip = (unsigned long)p->ainsn.insn;
18189+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18190 }
18191
18192 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
18193@@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
18194 if (p->ainsn.boostable == 1 && !p->post_handler) {
18195 /* Boost up -- we can execute copied instructions directly */
18196 reset_current_kprobe();
18197- regs->ip = (unsigned long)p->ainsn.insn;
18198+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18199 preempt_enable_no_resched();
18200 return;
18201 }
18202@@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
18203 struct kprobe_ctlblk *kcb;
18204
18205 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
18206- if (*addr != BREAKPOINT_INSTRUCTION) {
18207+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
18208 /*
18209 * The breakpoint instruction was removed right
18210 * after we hit it. Another cpu has removed
18211@@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
18212 /* Skip orig_ax, ip, cs */
18213 " addq $24, %rsp\n"
18214 " popfq\n"
18215+#ifdef KERNEXEC_PLUGIN
18216+ " btsq $63,(%rsp)\n"
18217+#endif
18218 #else
18219 " pushf\n"
18220 /*
18221@@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
18222 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18223 {
18224 unsigned long *tos = stack_addr(regs);
18225- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18226+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18227 unsigned long orig_ip = (unsigned long)p->addr;
18228 kprobe_opcode_t *insn = p->ainsn.insn;
18229
18230@@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
18231 struct die_args *args = data;
18232 int ret = NOTIFY_DONE;
18233
18234- if (args->regs && user_mode_vm(args->regs))
18235+ if (args->regs && user_mode(args->regs))
18236 return ret;
18237
18238 switch (val) {
18239diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
18240index 63b0ec8..6d92227 100644
18241--- a/arch/x86/kernel/kvm.c
18242+++ b/arch/x86/kernel/kvm.c
18243@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
18244 pv_mmu_ops.set_pud = kvm_set_pud;
18245 #if PAGETABLE_LEVELS == 4
18246 pv_mmu_ops.set_pgd = kvm_set_pgd;
18247+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
18248 #endif
18249 #endif
18250 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
18251diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18252index ec6ef60..ab2c824 100644
18253--- a/arch/x86/kernel/ldt.c
18254+++ b/arch/x86/kernel/ldt.c
18255@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18256 if (reload) {
18257 #ifdef CONFIG_SMP
18258 preempt_disable();
18259- load_LDT(pc);
18260+ load_LDT_nolock(pc);
18261 if (!cpumask_equal(mm_cpumask(current->mm),
18262 cpumask_of(smp_processor_id())))
18263 smp_call_function(flush_ldt, current->mm, 1);
18264 preempt_enable();
18265 #else
18266- load_LDT(pc);
18267+ load_LDT_nolock(pc);
18268 #endif
18269 }
18270 if (oldsize) {
18271@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18272 return err;
18273
18274 for (i = 0; i < old->size; i++)
18275- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18276+ write_ldt_entry(new->ldt, i, old->ldt + i);
18277 return 0;
18278 }
18279
18280@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18281 retval = copy_ldt(&mm->context, &old_mm->context);
18282 mutex_unlock(&old_mm->context.lock);
18283 }
18284+
18285+ if (tsk == current) {
18286+ mm->context.vdso = 0;
18287+
18288+#ifdef CONFIG_X86_32
18289+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18290+ mm->context.user_cs_base = 0UL;
18291+ mm->context.user_cs_limit = ~0UL;
18292+
18293+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18294+ cpus_clear(mm->context.cpu_user_cs_mask);
18295+#endif
18296+
18297+#endif
18298+#endif
18299+
18300+ }
18301+
18302 return retval;
18303 }
18304
18305@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18306 }
18307 }
18308
18309+#ifdef CONFIG_PAX_SEGMEXEC
18310+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18311+ error = -EINVAL;
18312+ goto out_unlock;
18313+ }
18314+#endif
18315+
18316 fill_ldt(&ldt, &ldt_info);
18317 if (oldmode)
18318 ldt.avl = 0;
18319diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18320index c1c429d..f02eaf9 100644
18321--- a/arch/x86/kernel/machine_kexec_32.c
18322+++ b/arch/x86/kernel/machine_kexec_32.c
18323@@ -26,7 +26,7 @@
18324 #include <asm/system.h>
18325 #include <asm/cacheflush.h>
18326
18327-static void set_idt(void *newidt, __u16 limit)
18328+static void set_idt(struct desc_struct *newidt, __u16 limit)
18329 {
18330 struct desc_ptr curidt;
18331
18332@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18333 }
18334
18335
18336-static void set_gdt(void *newgdt, __u16 limit)
18337+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18338 {
18339 struct desc_ptr curgdt;
18340
18341@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18342 }
18343
18344 control_page = page_address(image->control_code_page);
18345- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18346+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18347
18348 relocate_kernel_ptr = control_page;
18349 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18350diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18351index 1e47679..e73449d 100644
18352--- a/arch/x86/kernel/microcode_amd.c
18353+++ b/arch/x86/kernel/microcode_amd.c
18354@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18355 uci->mc = NULL;
18356 }
18357
18358-static struct microcode_ops microcode_amd_ops = {
18359+static const struct microcode_ops microcode_amd_ops = {
18360 .request_microcode_user = request_microcode_user,
18361 .request_microcode_fw = request_microcode_fw,
18362 .collect_cpu_info = collect_cpu_info_amd,
18363@@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18364 .microcode_fini_cpu = microcode_fini_cpu_amd,
18365 };
18366
18367-struct microcode_ops * __init init_amd_microcode(void)
18368+const struct microcode_ops * __init init_amd_microcode(void)
18369 {
18370 return &microcode_amd_ops;
18371 }
18372diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18373index 378e9a8..b5a6ea9 100644
18374--- a/arch/x86/kernel/microcode_core.c
18375+++ b/arch/x86/kernel/microcode_core.c
18376@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18377
18378 #define MICROCODE_VERSION "2.00"
18379
18380-static struct microcode_ops *microcode_ops;
18381+static const struct microcode_ops *microcode_ops;
18382
18383 /*
18384 * Synchronization.
18385diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18386index 0d334dd..14cedaf 100644
18387--- a/arch/x86/kernel/microcode_intel.c
18388+++ b/arch/x86/kernel/microcode_intel.c
18389@@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18390
18391 static int get_ucode_user(void *to, const void *from, size_t n)
18392 {
18393- return copy_from_user(to, from, n);
18394+ return copy_from_user(to, (const void __force_user *)from, n);
18395 }
18396
18397 static enum ucode_state
18398 request_microcode_user(int cpu, const void __user *buf, size_t size)
18399 {
18400- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18401+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18402 }
18403
18404 static void microcode_fini_cpu(int cpu)
18405@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18406 uci->mc = NULL;
18407 }
18408
18409-static struct microcode_ops microcode_intel_ops = {
18410+static const struct microcode_ops microcode_intel_ops = {
18411 .request_microcode_user = request_microcode_user,
18412 .request_microcode_fw = request_microcode_fw,
18413 .collect_cpu_info = collect_cpu_info,
18414@@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18415 .microcode_fini_cpu = microcode_fini_cpu,
18416 };
18417
18418-struct microcode_ops * __init init_intel_microcode(void)
18419+const struct microcode_ops * __init init_intel_microcode(void)
18420 {
18421 return &microcode_intel_ops;
18422 }
18423diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18424index 89f386f..9028f51 100644
18425--- a/arch/x86/kernel/module.c
18426+++ b/arch/x86/kernel/module.c
18427@@ -34,7 +34,7 @@
18428 #define DEBUGP(fmt...)
18429 #endif
18430
18431-void *module_alloc(unsigned long size)
18432+static void *__module_alloc(unsigned long size, pgprot_t prot)
18433 {
18434 struct vm_struct *area;
18435
18436@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18437 if (!area)
18438 return NULL;
18439
18440- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18441- PAGE_KERNEL_EXEC);
18442+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18443+}
18444+
18445+void *module_alloc(unsigned long size)
18446+{
18447+
18448+#ifdef CONFIG_PAX_KERNEXEC
18449+ return __module_alloc(size, PAGE_KERNEL);
18450+#else
18451+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18452+#endif
18453+
18454 }
18455
18456 /* Free memory returned from module_alloc */
18457@@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18458 vfree(module_region);
18459 }
18460
18461+#ifdef CONFIG_PAX_KERNEXEC
18462+#ifdef CONFIG_X86_32
18463+void *module_alloc_exec(unsigned long size)
18464+{
18465+ struct vm_struct *area;
18466+
18467+ if (size == 0)
18468+ return NULL;
18469+
18470+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18471+ return area ? area->addr : NULL;
18472+}
18473+EXPORT_SYMBOL(module_alloc_exec);
18474+
18475+void module_free_exec(struct module *mod, void *module_region)
18476+{
18477+ vunmap(module_region);
18478+}
18479+EXPORT_SYMBOL(module_free_exec);
18480+#else
18481+void module_free_exec(struct module *mod, void *module_region)
18482+{
18483+ module_free(mod, module_region);
18484+}
18485+EXPORT_SYMBOL(module_free_exec);
18486+
18487+void *module_alloc_exec(unsigned long size)
18488+{
18489+ return __module_alloc(size, PAGE_KERNEL_RX);
18490+}
18491+EXPORT_SYMBOL(module_alloc_exec);
18492+#endif
18493+#endif
18494+
18495 /* We don't need anything special. */
18496 int module_frob_arch_sections(Elf_Ehdr *hdr,
18497 Elf_Shdr *sechdrs,
18498@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18499 unsigned int i;
18500 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18501 Elf32_Sym *sym;
18502- uint32_t *location;
18503+ uint32_t *plocation, location;
18504
18505 DEBUGP("Applying relocate section %u to %u\n", relsec,
18506 sechdrs[relsec].sh_info);
18507 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18508 /* This is where to make the change */
18509- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18510- + rel[i].r_offset;
18511+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18512+ location = (uint32_t)plocation;
18513+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18514+ plocation = ktla_ktva((void *)plocation);
18515 /* This is the symbol it is referring to. Note that all
18516 undefined symbols have been resolved. */
18517 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18518@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18519 switch (ELF32_R_TYPE(rel[i].r_info)) {
18520 case R_386_32:
18521 /* We add the value into the location given */
18522- *location += sym->st_value;
18523+ pax_open_kernel();
18524+ *plocation += sym->st_value;
18525+ pax_close_kernel();
18526 break;
18527 case R_386_PC32:
18528 /* Add the value, subtract its postition */
18529- *location += sym->st_value - (uint32_t)location;
18530+ pax_open_kernel();
18531+ *plocation += sym->st_value - location;
18532+ pax_close_kernel();
18533 break;
18534 default:
18535 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18536@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18537 case R_X86_64_NONE:
18538 break;
18539 case R_X86_64_64:
18540+ pax_open_kernel();
18541 *(u64 *)loc = val;
18542+ pax_close_kernel();
18543 break;
18544 case R_X86_64_32:
18545+ pax_open_kernel();
18546 *(u32 *)loc = val;
18547+ pax_close_kernel();
18548 if (val != *(u32 *)loc)
18549 goto overflow;
18550 break;
18551 case R_X86_64_32S:
18552+ pax_open_kernel();
18553 *(s32 *)loc = val;
18554+ pax_close_kernel();
18555 if ((s64)val != *(s32 *)loc)
18556 goto overflow;
18557 break;
18558 case R_X86_64_PC32:
18559 val -= (u64)loc;
18560+ pax_open_kernel();
18561 *(u32 *)loc = val;
18562+ pax_close_kernel();
18563+
18564 #if 0
18565 if ((s64)val != *(s32 *)loc)
18566 goto overflow;
18567diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18568index 3a7c5a4..9191528 100644
18569--- a/arch/x86/kernel/paravirt-spinlocks.c
18570+++ b/arch/x86/kernel/paravirt-spinlocks.c
18571@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18572 __raw_spin_lock(lock);
18573 }
18574
18575-struct pv_lock_ops pv_lock_ops = {
18576+struct pv_lock_ops pv_lock_ops __read_only = {
18577 #ifdef CONFIG_SMP
18578 .spin_is_locked = __ticket_spin_is_locked,
18579 .spin_is_contended = __ticket_spin_is_contended,
18580diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18581index 1b1739d..dea6077 100644
18582--- a/arch/x86/kernel/paravirt.c
18583+++ b/arch/x86/kernel/paravirt.c
18584@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18585 {
18586 return x;
18587 }
18588+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18589+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18590+#endif
18591
18592 void __init default_banner(void)
18593 {
18594@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18595 * corresponding structure. */
18596 static void *get_call_destination(u8 type)
18597 {
18598- struct paravirt_patch_template tmpl = {
18599+ const struct paravirt_patch_template tmpl = {
18600 .pv_init_ops = pv_init_ops,
18601 .pv_time_ops = pv_time_ops,
18602 .pv_cpu_ops = pv_cpu_ops,
18603@@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18604 .pv_lock_ops = pv_lock_ops,
18605 #endif
18606 };
18607+
18608+ pax_track_stack();
18609 return *((void **)&tmpl + type);
18610 }
18611
18612@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18613 if (opfunc == NULL)
18614 /* If there's no function, patch it with a ud2a (BUG) */
18615 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18616- else if (opfunc == _paravirt_nop)
18617+ else if (opfunc == (void *)_paravirt_nop)
18618 /* If the operation is a nop, then nop the callsite */
18619 ret = paravirt_patch_nop();
18620
18621 /* identity functions just return their single argument */
18622- else if (opfunc == _paravirt_ident_32)
18623+ else if (opfunc == (void *)_paravirt_ident_32)
18624 ret = paravirt_patch_ident_32(insnbuf, len);
18625- else if (opfunc == _paravirt_ident_64)
18626+ else if (opfunc == (void *)_paravirt_ident_64)
18627 ret = paravirt_patch_ident_64(insnbuf, len);
18628+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18629+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18630+ ret = paravirt_patch_ident_64(insnbuf, len);
18631+#endif
18632
18633 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18634 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18635@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18636 if (insn_len > len || start == NULL)
18637 insn_len = len;
18638 else
18639- memcpy(insnbuf, start, insn_len);
18640+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18641
18642 return insn_len;
18643 }
18644@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18645 preempt_enable();
18646 }
18647
18648-struct pv_info pv_info = {
18649+struct pv_info pv_info __read_only = {
18650 .name = "bare hardware",
18651 .paravirt_enabled = 0,
18652 .kernel_rpl = 0,
18653 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18654 };
18655
18656-struct pv_init_ops pv_init_ops = {
18657+struct pv_init_ops pv_init_ops __read_only = {
18658 .patch = native_patch,
18659 };
18660
18661-struct pv_time_ops pv_time_ops = {
18662+struct pv_time_ops pv_time_ops __read_only = {
18663 .sched_clock = native_sched_clock,
18664 };
18665
18666-struct pv_irq_ops pv_irq_ops = {
18667+struct pv_irq_ops pv_irq_ops __read_only = {
18668 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18669 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18670 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18671@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18672 #endif
18673 };
18674
18675-struct pv_cpu_ops pv_cpu_ops = {
18676+struct pv_cpu_ops pv_cpu_ops __read_only = {
18677 .cpuid = native_cpuid,
18678 .get_debugreg = native_get_debugreg,
18679 .set_debugreg = native_set_debugreg,
18680@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18681 .end_context_switch = paravirt_nop,
18682 };
18683
18684-struct pv_apic_ops pv_apic_ops = {
18685+struct pv_apic_ops pv_apic_ops __read_only = {
18686 #ifdef CONFIG_X86_LOCAL_APIC
18687 .startup_ipi_hook = paravirt_nop,
18688 #endif
18689 };
18690
18691-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18692+#ifdef CONFIG_X86_32
18693+#ifdef CONFIG_X86_PAE
18694+/* 64-bit pagetable entries */
18695+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18696+#else
18697 /* 32-bit pagetable entries */
18698 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18699+#endif
18700 #else
18701 /* 64-bit pagetable entries */
18702 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18703 #endif
18704
18705-struct pv_mmu_ops pv_mmu_ops = {
18706+struct pv_mmu_ops pv_mmu_ops __read_only = {
18707
18708 .read_cr2 = native_read_cr2,
18709 .write_cr2 = native_write_cr2,
18710@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18711 .make_pud = PTE_IDENT,
18712
18713 .set_pgd = native_set_pgd,
18714+ .set_pgd_batched = native_set_pgd_batched,
18715 #endif
18716 #endif /* PAGETABLE_LEVELS >= 3 */
18717
18718@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18719 },
18720
18721 .set_fixmap = native_set_fixmap,
18722+
18723+#ifdef CONFIG_PAX_KERNEXEC
18724+ .pax_open_kernel = native_pax_open_kernel,
18725+ .pax_close_kernel = native_pax_close_kernel,
18726+#endif
18727+
18728 };
18729
18730 EXPORT_SYMBOL_GPL(pv_time_ops);
18731diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18732index 1a2d4b1..6a0dd55 100644
18733--- a/arch/x86/kernel/pci-calgary_64.c
18734+++ b/arch/x86/kernel/pci-calgary_64.c
18735@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18736 free_pages((unsigned long)vaddr, get_order(size));
18737 }
18738
18739-static struct dma_map_ops calgary_dma_ops = {
18740+static const struct dma_map_ops calgary_dma_ops = {
18741 .alloc_coherent = calgary_alloc_coherent,
18742 .free_coherent = calgary_free_coherent,
18743 .map_sg = calgary_map_sg,
18744diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18745index 6ac3931..42b4414 100644
18746--- a/arch/x86/kernel/pci-dma.c
18747+++ b/arch/x86/kernel/pci-dma.c
18748@@ -14,7 +14,7 @@
18749
18750 static int forbid_dac __read_mostly;
18751
18752-struct dma_map_ops *dma_ops;
18753+const struct dma_map_ops *dma_ops;
18754 EXPORT_SYMBOL(dma_ops);
18755
18756 static int iommu_sac_force __read_mostly;
18757@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18758
18759 int dma_supported(struct device *dev, u64 mask)
18760 {
18761- struct dma_map_ops *ops = get_dma_ops(dev);
18762+ const struct dma_map_ops *ops = get_dma_ops(dev);
18763
18764 #ifdef CONFIG_PCI
18765 if (mask > 0xffffffff && forbid_dac > 0) {
18766diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18767index 1c76691..e3632db 100644
18768--- a/arch/x86/kernel/pci-gart_64.c
18769+++ b/arch/x86/kernel/pci-gart_64.c
18770@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18771 return -1;
18772 }
18773
18774-static struct dma_map_ops gart_dma_ops = {
18775+static const struct dma_map_ops gart_dma_ops = {
18776 .map_sg = gart_map_sg,
18777 .unmap_sg = gart_unmap_sg,
18778 .map_page = gart_map_page,
18779diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18780index a3933d4..c898869 100644
18781--- a/arch/x86/kernel/pci-nommu.c
18782+++ b/arch/x86/kernel/pci-nommu.c
18783@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18784 flush_write_buffers();
18785 }
18786
18787-struct dma_map_ops nommu_dma_ops = {
18788+const struct dma_map_ops nommu_dma_ops = {
18789 .alloc_coherent = dma_generic_alloc_coherent,
18790 .free_coherent = nommu_free_coherent,
18791 .map_sg = nommu_map_sg,
18792diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18793index aaa6b78..4de1881 100644
18794--- a/arch/x86/kernel/pci-swiotlb.c
18795+++ b/arch/x86/kernel/pci-swiotlb.c
18796@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18797 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18798 }
18799
18800-static struct dma_map_ops swiotlb_dma_ops = {
18801+static const struct dma_map_ops swiotlb_dma_ops = {
18802 .mapping_error = swiotlb_dma_mapping_error,
18803 .alloc_coherent = x86_swiotlb_alloc_coherent,
18804 .free_coherent = swiotlb_free_coherent,
18805diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18806index fc6c84d..0312ca2 100644
18807--- a/arch/x86/kernel/process.c
18808+++ b/arch/x86/kernel/process.c
18809@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18810
18811 void free_thread_info(struct thread_info *ti)
18812 {
18813- free_thread_xstate(ti->task);
18814 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18815 }
18816
18817+static struct kmem_cache *task_struct_cachep;
18818+
18819 void arch_task_cache_init(void)
18820 {
18821- task_xstate_cachep =
18822- kmem_cache_create("task_xstate", xstate_size,
18823+ /* create a slab on which task_structs can be allocated */
18824+ task_struct_cachep =
18825+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18826+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18827+
18828+ task_xstate_cachep =
18829+ kmem_cache_create("task_xstate", xstate_size,
18830 __alignof__(union thread_xstate),
18831- SLAB_PANIC | SLAB_NOTRACK, NULL);
18832+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18833+}
18834+
18835+struct task_struct *alloc_task_struct(void)
18836+{
18837+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18838+}
18839+
18840+void free_task_struct(struct task_struct *task)
18841+{
18842+ free_thread_xstate(task);
18843+ kmem_cache_free(task_struct_cachep, task);
18844 }
18845
18846 /*
18847@@ -73,7 +90,7 @@ void exit_thread(void)
18848 unsigned long *bp = t->io_bitmap_ptr;
18849
18850 if (bp) {
18851- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18852+ struct tss_struct *tss = init_tss + get_cpu();
18853
18854 t->io_bitmap_ptr = NULL;
18855 clear_thread_flag(TIF_IO_BITMAP);
18856@@ -93,6 +110,9 @@ void flush_thread(void)
18857
18858 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18859
18860+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18861+ loadsegment(gs, 0);
18862+#endif
18863 tsk->thread.debugreg0 = 0;
18864 tsk->thread.debugreg1 = 0;
18865 tsk->thread.debugreg2 = 0;
18866@@ -307,7 +327,7 @@ void default_idle(void)
18867 EXPORT_SYMBOL(default_idle);
18868 #endif
18869
18870-void stop_this_cpu(void *dummy)
18871+__noreturn void stop_this_cpu(void *dummy)
18872 {
18873 local_irq_disable();
18874 /*
18875@@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18876 }
18877 early_param("idle", idle_setup);
18878
18879-unsigned long arch_align_stack(unsigned long sp)
18880+#ifdef CONFIG_PAX_RANDKSTACK
18881+void pax_randomize_kstack(struct pt_regs *regs)
18882 {
18883- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18884- sp -= get_random_int() % 8192;
18885- return sp & ~0xf;
18886-}
18887+ struct thread_struct *thread = &current->thread;
18888+ unsigned long time;
18889
18890-unsigned long arch_randomize_brk(struct mm_struct *mm)
18891-{
18892- unsigned long range_end = mm->brk + 0x02000000;
18893- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18894+ if (!randomize_va_space)
18895+ return;
18896+
18897+ if (v8086_mode(regs))
18898+ return;
18899+
18900+ rdtscl(time);
18901+
18902+ /* P4 seems to return a 0 LSB, ignore it */
18903+#ifdef CONFIG_MPENTIUM4
18904+ time &= 0x3EUL;
18905+ time <<= 2;
18906+#elif defined(CONFIG_X86_64)
18907+ time &= 0xFUL;
18908+ time <<= 4;
18909+#else
18910+ time &= 0x1FUL;
18911+ time <<= 3;
18912+#endif
18913+
18914+ thread->sp0 ^= time;
18915+ load_sp0(init_tss + smp_processor_id(), thread);
18916+
18917+#ifdef CONFIG_X86_64
18918+ percpu_write(kernel_stack, thread->sp0);
18919+#endif
18920 }
18921+#endif
18922
18923diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18924index c40c432..6e1df72 100644
18925--- a/arch/x86/kernel/process_32.c
18926+++ b/arch/x86/kernel/process_32.c
18927@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18928 unsigned long thread_saved_pc(struct task_struct *tsk)
18929 {
18930 return ((unsigned long *)tsk->thread.sp)[3];
18931+//XXX return tsk->thread.eip;
18932 }
18933
18934 #ifndef CONFIG_SMP
18935@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18936 unsigned short ss, gs;
18937 const char *board;
18938
18939- if (user_mode_vm(regs)) {
18940+ if (user_mode(regs)) {
18941 sp = regs->sp;
18942 ss = regs->ss & 0xffff;
18943- gs = get_user_gs(regs);
18944 } else {
18945 sp = (unsigned long) (&regs->sp);
18946 savesegment(ss, ss);
18947- savesegment(gs, gs);
18948 }
18949+ gs = get_user_gs(regs);
18950
18951 printk("\n");
18952
18953@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18954 regs.bx = (unsigned long) fn;
18955 regs.dx = (unsigned long) arg;
18956
18957- regs.ds = __USER_DS;
18958- regs.es = __USER_DS;
18959+ regs.ds = __KERNEL_DS;
18960+ regs.es = __KERNEL_DS;
18961 regs.fs = __KERNEL_PERCPU;
18962- regs.gs = __KERNEL_STACK_CANARY;
18963+ savesegment(gs, regs.gs);
18964 regs.orig_ax = -1;
18965 regs.ip = (unsigned long) kernel_thread_helper;
18966 regs.cs = __KERNEL_CS | get_kernel_rpl();
18967@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18968 struct task_struct *tsk;
18969 int err;
18970
18971- childregs = task_pt_regs(p);
18972+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18973 *childregs = *regs;
18974 childregs->ax = 0;
18975 childregs->sp = sp;
18976
18977 p->thread.sp = (unsigned long) childregs;
18978 p->thread.sp0 = (unsigned long) (childregs+1);
18979+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18980
18981 p->thread.ip = (unsigned long) ret_from_fork;
18982
18983@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18984 struct thread_struct *prev = &prev_p->thread,
18985 *next = &next_p->thread;
18986 int cpu = smp_processor_id();
18987- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18988+ struct tss_struct *tss = init_tss + cpu;
18989 bool preload_fpu;
18990
18991 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18992@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18993 */
18994 lazy_save_gs(prev->gs);
18995
18996+#ifdef CONFIG_PAX_MEMORY_UDEREF
18997+ __set_fs(task_thread_info(next_p)->addr_limit);
18998+#endif
18999+
19000 /*
19001 * Load the per-thread Thread-Local Storage descriptor.
19002 */
19003@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19004 */
19005 arch_end_context_switch(next_p);
19006
19007+ percpu_write(current_task, next_p);
19008+ percpu_write(current_tinfo, &next_p->tinfo);
19009+
19010 if (preload_fpu)
19011 __math_state_restore();
19012
19013@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19014 if (prev->gs | next->gs)
19015 lazy_load_gs(next->gs);
19016
19017- percpu_write(current_task, next_p);
19018-
19019 return prev_p;
19020 }
19021
19022@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
19023 } while (count++ < 16);
19024 return 0;
19025 }
19026-
19027diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
19028index 39493bc..196816d 100644
19029--- a/arch/x86/kernel/process_64.c
19030+++ b/arch/x86/kernel/process_64.c
19031@@ -91,7 +91,7 @@ static void __exit_idle(void)
19032 void exit_idle(void)
19033 {
19034 /* idle loop has pid 0 */
19035- if (current->pid)
19036+ if (task_pid_nr(current))
19037 return;
19038 __exit_idle();
19039 }
19040@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
19041 if (!board)
19042 board = "";
19043 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
19044- current->pid, current->comm, print_tainted(),
19045+ task_pid_nr(current), current->comm, print_tainted(),
19046 init_utsname()->release,
19047 (int)strcspn(init_utsname()->version, " "),
19048 init_utsname()->version, board);
19049@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19050 struct pt_regs *childregs;
19051 struct task_struct *me = current;
19052
19053- childregs = ((struct pt_regs *)
19054- (THREAD_SIZE + task_stack_page(p))) - 1;
19055+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
19056 *childregs = *regs;
19057
19058 childregs->ax = 0;
19059@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19060 p->thread.sp = (unsigned long) childregs;
19061 p->thread.sp0 = (unsigned long) (childregs+1);
19062 p->thread.usersp = me->thread.usersp;
19063+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19064
19065 set_tsk_thread_flag(p, TIF_FORK);
19066
19067@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19068 struct thread_struct *prev = &prev_p->thread;
19069 struct thread_struct *next = &next_p->thread;
19070 int cpu = smp_processor_id();
19071- struct tss_struct *tss = &per_cpu(init_tss, cpu);
19072+ struct tss_struct *tss = init_tss + cpu;
19073 unsigned fsindex, gsindex;
19074 bool preload_fpu;
19075
19076@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19077 prev->usersp = percpu_read(old_rsp);
19078 percpu_write(old_rsp, next->usersp);
19079 percpu_write(current_task, next_p);
19080+ percpu_write(current_tinfo, &next_p->tinfo);
19081
19082- percpu_write(kernel_stack,
19083- (unsigned long)task_stack_page(next_p) +
19084- THREAD_SIZE - KERNEL_STACK_OFFSET);
19085+ percpu_write(kernel_stack, next->sp0);
19086
19087 /*
19088 * Now maybe reload the debug registers and handle I/O bitmaps
19089@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
19090 if (!p || p == current || p->state == TASK_RUNNING)
19091 return 0;
19092 stack = (unsigned long)task_stack_page(p);
19093- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
19094+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
19095 return 0;
19096 fp = *(u64 *)(p->thread.sp);
19097 do {
19098- if (fp < (unsigned long)stack ||
19099- fp >= (unsigned long)stack+THREAD_SIZE)
19100+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
19101 return 0;
19102 ip = *(u64 *)(fp+8);
19103 if (!in_sched_functions(ip))
19104diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
19105index c06acdd..3f5fff5 100644
19106--- a/arch/x86/kernel/ptrace.c
19107+++ b/arch/x86/kernel/ptrace.c
19108@@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
19109 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19110 {
19111 int ret;
19112- unsigned long __user *datap = (unsigned long __user *)data;
19113+ unsigned long __user *datap = (__force unsigned long __user *)data;
19114
19115 switch (request) {
19116 /* read the word at location addr in the USER area. */
19117@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19118 if (addr < 0)
19119 return -EIO;
19120 ret = do_get_thread_area(child, addr,
19121- (struct user_desc __user *) data);
19122+ (__force struct user_desc __user *) data);
19123 break;
19124
19125 case PTRACE_SET_THREAD_AREA:
19126 if (addr < 0)
19127 return -EIO;
19128 ret = do_set_thread_area(child, addr,
19129- (struct user_desc __user *) data, 0);
19130+ (__force struct user_desc __user *) data, 0);
19131 break;
19132 #endif
19133
19134@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19135 #ifdef CONFIG_X86_PTRACE_BTS
19136 case PTRACE_BTS_CONFIG:
19137 ret = ptrace_bts_config
19138- (child, data, (struct ptrace_bts_config __user *)addr);
19139+ (child, data, (__force struct ptrace_bts_config __user *)addr);
19140 break;
19141
19142 case PTRACE_BTS_STATUS:
19143 ret = ptrace_bts_status
19144- (child, data, (struct ptrace_bts_config __user *)addr);
19145+ (child, data, (__force struct ptrace_bts_config __user *)addr);
19146 break;
19147
19148 case PTRACE_BTS_SIZE:
19149@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19150
19151 case PTRACE_BTS_GET:
19152 ret = ptrace_bts_read_record
19153- (child, data, (struct bts_struct __user *) addr);
19154+ (child, data, (__force struct bts_struct __user *) addr);
19155 break;
19156
19157 case PTRACE_BTS_CLEAR:
19158@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19159
19160 case PTRACE_BTS_DRAIN:
19161 ret = ptrace_bts_drain
19162- (child, data, (struct bts_struct __user *) addr);
19163+ (child, data, (__force struct bts_struct __user *) addr);
19164 break;
19165 #endif /* CONFIG_X86_PTRACE_BTS */
19166
19167@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19168 info.si_code = si_code;
19169
19170 /* User-mode ip? */
19171- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
19172+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
19173
19174 /* Send us the fake SIGTRAP */
19175 force_sig_info(SIGTRAP, &info, tsk);
19176@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
19177 * We must return the syscall number to actually look up in the table.
19178 * This can be -1L to skip running any syscall at all.
19179 */
19180-asmregparm long syscall_trace_enter(struct pt_regs *regs)
19181+long syscall_trace_enter(struct pt_regs *regs)
19182 {
19183 long ret = 0;
19184
19185@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
19186 return ret ?: regs->orig_ax;
19187 }
19188
19189-asmregparm void syscall_trace_leave(struct pt_regs *regs)
19190+void syscall_trace_leave(struct pt_regs *regs)
19191 {
19192 if (unlikely(current->audit_context))
19193 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
19194diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
19195index cf98100..e76e03d 100644
19196--- a/arch/x86/kernel/reboot.c
19197+++ b/arch/x86/kernel/reboot.c
19198@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
19199 EXPORT_SYMBOL(pm_power_off);
19200
19201 static const struct desc_ptr no_idt = {};
19202-static int reboot_mode;
19203+static unsigned short reboot_mode;
19204 enum reboot_type reboot_type = BOOT_KBD;
19205 int reboot_force;
19206
19207@@ -292,12 +292,12 @@ core_initcall(reboot_init);
19208 controller to pulse the CPU reset line, which is more thorough, but
19209 doesn't work with at least one type of 486 motherboard. It is easy
19210 to stop this code working; hence the copious comments. */
19211-static const unsigned long long
19212-real_mode_gdt_entries [3] =
19213+static struct desc_struct
19214+real_mode_gdt_entries [3] __read_only =
19215 {
19216- 0x0000000000000000ULL, /* Null descriptor */
19217- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
19218- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
19219+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
19220+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
19221+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
19222 };
19223
19224 static const struct desc_ptr
19225@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
19226 * specified by the code and length parameters.
19227 * We assume that length will aways be less that 100!
19228 */
19229-void machine_real_restart(const unsigned char *code, int length)
19230+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
19231 {
19232 local_irq_disable();
19233
19234@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
19235 /* Remap the kernel at virtual address zero, as well as offset zero
19236 from the kernel segment. This assumes the kernel segment starts at
19237 virtual address PAGE_OFFSET. */
19238- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19239- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
19240+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19241+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
19242
19243 /*
19244 * Use `swapper_pg_dir' as our page directory.
19245@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
19246 boot)". This seems like a fairly standard thing that gets set by
19247 REBOOT.COM programs, and the previous reset routine did this
19248 too. */
19249- *((unsigned short *)0x472) = reboot_mode;
19250+ *(unsigned short *)(__va(0x472)) = reboot_mode;
19251
19252 /* For the switch to real mode, copy some code to low memory. It has
19253 to be in the first 64k because it is running in 16-bit mode, and it
19254 has to have the same physical and virtual address, because it turns
19255 off paging. Copy it near the end of the first page, out of the way
19256 of BIOS variables. */
19257- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
19258- real_mode_switch, sizeof (real_mode_switch));
19259- memcpy((void *)(0x1000 - 100), code, length);
19260+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
19261+ memcpy(__va(0x1000 - 100), code, length);
19262
19263 /* Set up the IDT for real mode. */
19264 load_idt(&real_mode_idt);
19265@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
19266 __asm__ __volatile__ ("ljmp $0x0008,%0"
19267 :
19268 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
19269+ do { } while (1);
19270 }
19271 #ifdef CONFIG_APM_MODULE
19272 EXPORT_SYMBOL(machine_real_restart);
19273@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19274 {
19275 }
19276
19277-static void native_machine_emergency_restart(void)
19278+__noreturn static void native_machine_emergency_restart(void)
19279 {
19280 int i;
19281
19282@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19283 #endif
19284 }
19285
19286-static void __machine_emergency_restart(int emergency)
19287+static __noreturn void __machine_emergency_restart(int emergency)
19288 {
19289 reboot_emergency = emergency;
19290 machine_ops.emergency_restart();
19291 }
19292
19293-static void native_machine_restart(char *__unused)
19294+static __noreturn void native_machine_restart(char *__unused)
19295 {
19296 printk("machine restart\n");
19297
19298@@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19299 __machine_emergency_restart(0);
19300 }
19301
19302-static void native_machine_halt(void)
19303+static __noreturn void native_machine_halt(void)
19304 {
19305 /* stop other cpus and apics */
19306 machine_shutdown();
19307@@ -685,7 +685,7 @@ static void native_machine_halt(void)
19308 stop_this_cpu(NULL);
19309 }
19310
19311-static void native_machine_power_off(void)
19312+__noreturn static void native_machine_power_off(void)
19313 {
19314 if (pm_power_off) {
19315 if (!reboot_force)
19316@@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19317 }
19318 /* a fallback in case there is no PM info available */
19319 tboot_shutdown(TB_SHUTDOWN_HALT);
19320+ do { } while (1);
19321 }
19322
19323 struct machine_ops machine_ops = {
19324diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19325index 7a6f3b3..976a959 100644
19326--- a/arch/x86/kernel/relocate_kernel_64.S
19327+++ b/arch/x86/kernel/relocate_kernel_64.S
19328@@ -11,6 +11,7 @@
19329 #include <asm/kexec.h>
19330 #include <asm/processor-flags.h>
19331 #include <asm/pgtable_types.h>
19332+#include <asm/alternative-asm.h>
19333
19334 /*
19335 * Must be relocatable PIC code callable as a C function
19336@@ -167,6 +168,7 @@ identity_mapped:
19337 xorq %r14, %r14
19338 xorq %r15, %r15
19339
19340+ pax_force_retaddr 0, 1
19341 ret
19342
19343 1:
19344diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19345index 5449a26..0b6c759 100644
19346--- a/arch/x86/kernel/setup.c
19347+++ b/arch/x86/kernel/setup.c
19348@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19349
19350 if (!boot_params.hdr.root_flags)
19351 root_mountflags &= ~MS_RDONLY;
19352- init_mm.start_code = (unsigned long) _text;
19353- init_mm.end_code = (unsigned long) _etext;
19354+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19355+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19356 init_mm.end_data = (unsigned long) _edata;
19357 init_mm.brk = _brk_end;
19358
19359- code_resource.start = virt_to_phys(_text);
19360- code_resource.end = virt_to_phys(_etext)-1;
19361- data_resource.start = virt_to_phys(_etext);
19362+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19363+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19364+ data_resource.start = virt_to_phys(_sdata);
19365 data_resource.end = virt_to_phys(_edata)-1;
19366 bss_resource.start = virt_to_phys(&__bss_start);
19367 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19368diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19369index d559af9..524c6ad 100644
19370--- a/arch/x86/kernel/setup_percpu.c
19371+++ b/arch/x86/kernel/setup_percpu.c
19372@@ -25,19 +25,17 @@
19373 # define DBG(x...)
19374 #endif
19375
19376-DEFINE_PER_CPU(int, cpu_number);
19377+#ifdef CONFIG_SMP
19378+DEFINE_PER_CPU(unsigned int, cpu_number);
19379 EXPORT_PER_CPU_SYMBOL(cpu_number);
19380+#endif
19381
19382-#ifdef CONFIG_X86_64
19383 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19384-#else
19385-#define BOOT_PERCPU_OFFSET 0
19386-#endif
19387
19388 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19389 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19390
19391-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19392+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19393 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19394 };
19395 EXPORT_SYMBOL(__per_cpu_offset);
19396@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19397 {
19398 #ifdef CONFIG_X86_32
19399 struct desc_struct gdt;
19400+ unsigned long base = per_cpu_offset(cpu);
19401
19402- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19403- 0x2 | DESCTYPE_S, 0x8);
19404- gdt.s = 1;
19405+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19406+ 0x83 | DESCTYPE_S, 0xC);
19407 write_gdt_entry(get_cpu_gdt_table(cpu),
19408 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19409 #endif
19410@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19411 /* alrighty, percpu areas up and running */
19412 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19413 for_each_possible_cpu(cpu) {
19414+#ifdef CONFIG_CC_STACKPROTECTOR
19415+#ifdef CONFIG_X86_32
19416+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
19417+#endif
19418+#endif
19419 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19420 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19421 per_cpu(cpu_number, cpu) = cpu;
19422@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19423 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19424 #endif
19425 #endif
19426+#ifdef CONFIG_CC_STACKPROTECTOR
19427+#ifdef CONFIG_X86_32
19428+ if (!cpu)
19429+ per_cpu(stack_canary.canary, cpu) = canary;
19430+#endif
19431+#endif
19432 /*
19433 * Up to this point, the boot CPU has been using .data.init
19434 * area. Reload any changed state for the boot CPU.
19435diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19436index 6a44a76..a9287a1 100644
19437--- a/arch/x86/kernel/signal.c
19438+++ b/arch/x86/kernel/signal.c
19439@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19440 * Align the stack pointer according to the i386 ABI,
19441 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19442 */
19443- sp = ((sp + 4) & -16ul) - 4;
19444+ sp = ((sp - 12) & -16ul) - 4;
19445 #else /* !CONFIG_X86_32 */
19446 sp = round_down(sp, 16) - 8;
19447 #endif
19448@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19449 * Return an always-bogus address instead so we will die with SIGSEGV.
19450 */
19451 if (onsigstack && !likely(on_sig_stack(sp)))
19452- return (void __user *)-1L;
19453+ return (__force void __user *)-1L;
19454
19455 /* save i387 state */
19456 if (used_math() && save_i387_xstate(*fpstate) < 0)
19457- return (void __user *)-1L;
19458+ return (__force void __user *)-1L;
19459
19460 return (void __user *)sp;
19461 }
19462@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19463 }
19464
19465 if (current->mm->context.vdso)
19466- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19467+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19468 else
19469- restorer = &frame->retcode;
19470+ restorer = (void __user *)&frame->retcode;
19471 if (ka->sa.sa_flags & SA_RESTORER)
19472 restorer = ka->sa.sa_restorer;
19473
19474@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19475 * reasons and because gdb uses it as a signature to notice
19476 * signal handler stack frames.
19477 */
19478- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19479+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19480
19481 if (err)
19482 return -EFAULT;
19483@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19484 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19485
19486 /* Set up to return from userspace. */
19487- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19488+ if (current->mm->context.vdso)
19489+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19490+ else
19491+ restorer = (void __user *)&frame->retcode;
19492 if (ka->sa.sa_flags & SA_RESTORER)
19493 restorer = ka->sa.sa_restorer;
19494 put_user_ex(restorer, &frame->pretcode);
19495@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19496 * reasons and because gdb uses it as a signature to notice
19497 * signal handler stack frames.
19498 */
19499- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19500+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19501 } put_user_catch(err);
19502
19503 if (err)
19504@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19505 int signr;
19506 sigset_t *oldset;
19507
19508+ pax_track_stack();
19509+
19510 /*
19511 * We want the common case to go fast, which is why we may in certain
19512 * cases get here from kernel mode. Just return without doing anything
19513@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19514 * X86_32: vm86 regs switched out by assembly code before reaching
19515 * here, so testing against kernel CS suffices.
19516 */
19517- if (!user_mode(regs))
19518+ if (!user_mode_novm(regs))
19519 return;
19520
19521 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19522diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19523index 7e8e905..64d5c32 100644
19524--- a/arch/x86/kernel/smpboot.c
19525+++ b/arch/x86/kernel/smpboot.c
19526@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19527 */
19528 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19529
19530-void cpu_hotplug_driver_lock()
19531+void cpu_hotplug_driver_lock(void)
19532 {
19533- mutex_lock(&x86_cpu_hotplug_driver_mutex);
19534+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
19535 }
19536
19537-void cpu_hotplug_driver_unlock()
19538+void cpu_hotplug_driver_unlock(void)
19539 {
19540- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19541+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19542 }
19543
19544 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19545@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19546 * target processor state.
19547 */
19548 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19549- (unsigned long)stack_start.sp);
19550+ stack_start);
19551
19552 /*
19553 * Run STARTUP IPI loop.
19554@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19555 set_idle_for_cpu(cpu, c_idle.idle);
19556 do_rest:
19557 per_cpu(current_task, cpu) = c_idle.idle;
19558+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19559 #ifdef CONFIG_X86_32
19560 /* Stack for startup_32 can be just as for start_secondary onwards */
19561 irq_ctx_init(cpu);
19562@@ -750,13 +751,15 @@ do_rest:
19563 #else
19564 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19565 initial_gs = per_cpu_offset(cpu);
19566- per_cpu(kernel_stack, cpu) =
19567- (unsigned long)task_stack_page(c_idle.idle) -
19568- KERNEL_STACK_OFFSET + THREAD_SIZE;
19569+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19570 #endif
19571+
19572+ pax_open_kernel();
19573 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19574+ pax_close_kernel();
19575+
19576 initial_code = (unsigned long)start_secondary;
19577- stack_start.sp = (void *) c_idle.idle->thread.sp;
19578+ stack_start = c_idle.idle->thread.sp;
19579
19580 /* start_ip had better be page-aligned! */
19581 start_ip = setup_trampoline();
19582@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19583
19584 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19585
19586+#ifdef CONFIG_PAX_PER_CPU_PGD
19587+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19588+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19589+ KERNEL_PGD_PTRS);
19590+#endif
19591+
19592 err = do_boot_cpu(apicid, cpu);
19593
19594 if (err) {
19595diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19596index 3149032..14f1053 100644
19597--- a/arch/x86/kernel/step.c
19598+++ b/arch/x86/kernel/step.c
19599@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19600 struct desc_struct *desc;
19601 unsigned long base;
19602
19603- seg &= ~7UL;
19604+ seg >>= 3;
19605
19606 mutex_lock(&child->mm->context.lock);
19607- if (unlikely((seg >> 3) >= child->mm->context.size))
19608+ if (unlikely(seg >= child->mm->context.size))
19609 addr = -1L; /* bogus selector, access would fault */
19610 else {
19611 desc = child->mm->context.ldt + seg;
19612@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19613 addr += base;
19614 }
19615 mutex_unlock(&child->mm->context.lock);
19616- }
19617+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19618+ addr = ktla_ktva(addr);
19619
19620 return addr;
19621 }
19622@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19623 unsigned char opcode[15];
19624 unsigned long addr = convert_ip_to_linear(child, regs);
19625
19626+ if (addr == -EINVAL)
19627+ return 0;
19628+
19629 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19630 for (i = 0; i < copied; i++) {
19631 switch (opcode[i]) {
19632@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19633
19634 #ifdef CONFIG_X86_64
19635 case 0x40 ... 0x4f:
19636- if (regs->cs != __USER_CS)
19637+ if ((regs->cs & 0xffff) != __USER_CS)
19638 /* 32-bit mode: register increment */
19639 return 0;
19640 /* 64-bit mode: REX prefix */
19641diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19642index dee1ff7..a397f7f 100644
19643--- a/arch/x86/kernel/sys_i386_32.c
19644+++ b/arch/x86/kernel/sys_i386_32.c
19645@@ -24,6 +24,21 @@
19646
19647 #include <asm/syscalls.h>
19648
19649+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19650+{
19651+ unsigned long pax_task_size = TASK_SIZE;
19652+
19653+#ifdef CONFIG_PAX_SEGMEXEC
19654+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19655+ pax_task_size = SEGMEXEC_TASK_SIZE;
19656+#endif
19657+
19658+ if (len > pax_task_size || addr > pax_task_size - len)
19659+ return -EINVAL;
19660+
19661+ return 0;
19662+}
19663+
19664 /*
19665 * Perform the select(nd, in, out, ex, tv) and mmap() system
19666 * calls. Linux/i386 didn't use to be able to handle more than
19667@@ -58,6 +73,212 @@ out:
19668 return err;
19669 }
19670
19671+unsigned long
19672+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19673+ unsigned long len, unsigned long pgoff, unsigned long flags)
19674+{
19675+ struct mm_struct *mm = current->mm;
19676+ struct vm_area_struct *vma;
19677+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19678+
19679+#ifdef CONFIG_PAX_SEGMEXEC
19680+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19681+ pax_task_size = SEGMEXEC_TASK_SIZE;
19682+#endif
19683+
19684+ pax_task_size -= PAGE_SIZE;
19685+
19686+ if (len > pax_task_size)
19687+ return -ENOMEM;
19688+
19689+ if (flags & MAP_FIXED)
19690+ return addr;
19691+
19692+#ifdef CONFIG_PAX_RANDMMAP
19693+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19694+#endif
19695+
19696+ if (addr) {
19697+ addr = PAGE_ALIGN(addr);
19698+ if (pax_task_size - len >= addr) {
19699+ vma = find_vma(mm, addr);
19700+ if (check_heap_stack_gap(vma, addr, len))
19701+ return addr;
19702+ }
19703+ }
19704+ if (len > mm->cached_hole_size) {
19705+ start_addr = addr = mm->free_area_cache;
19706+ } else {
19707+ start_addr = addr = mm->mmap_base;
19708+ mm->cached_hole_size = 0;
19709+ }
19710+
19711+#ifdef CONFIG_PAX_PAGEEXEC
19712+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19713+ start_addr = 0x00110000UL;
19714+
19715+#ifdef CONFIG_PAX_RANDMMAP
19716+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19717+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19718+#endif
19719+
19720+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19721+ start_addr = addr = mm->mmap_base;
19722+ else
19723+ addr = start_addr;
19724+ }
19725+#endif
19726+
19727+full_search:
19728+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19729+ /* At this point: (!vma || addr < vma->vm_end). */
19730+ if (pax_task_size - len < addr) {
19731+ /*
19732+ * Start a new search - just in case we missed
19733+ * some holes.
19734+ */
19735+ if (start_addr != mm->mmap_base) {
19736+ start_addr = addr = mm->mmap_base;
19737+ mm->cached_hole_size = 0;
19738+ goto full_search;
19739+ }
19740+ return -ENOMEM;
19741+ }
19742+ if (check_heap_stack_gap(vma, addr, len))
19743+ break;
19744+ if (addr + mm->cached_hole_size < vma->vm_start)
19745+ mm->cached_hole_size = vma->vm_start - addr;
19746+ addr = vma->vm_end;
19747+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19748+ start_addr = addr = mm->mmap_base;
19749+ mm->cached_hole_size = 0;
19750+ goto full_search;
19751+ }
19752+ }
19753+
19754+ /*
19755+ * Remember the place where we stopped the search:
19756+ */
19757+ mm->free_area_cache = addr + len;
19758+ return addr;
19759+}
19760+
19761+unsigned long
19762+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19763+ const unsigned long len, const unsigned long pgoff,
19764+ const unsigned long flags)
19765+{
19766+ struct vm_area_struct *vma;
19767+ struct mm_struct *mm = current->mm;
19768+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19769+
19770+#ifdef CONFIG_PAX_SEGMEXEC
19771+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19772+ pax_task_size = SEGMEXEC_TASK_SIZE;
19773+#endif
19774+
19775+ pax_task_size -= PAGE_SIZE;
19776+
19777+ /* requested length too big for entire address space */
19778+ if (len > pax_task_size)
19779+ return -ENOMEM;
19780+
19781+ if (flags & MAP_FIXED)
19782+ return addr;
19783+
19784+#ifdef CONFIG_PAX_PAGEEXEC
19785+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19786+ goto bottomup;
19787+#endif
19788+
19789+#ifdef CONFIG_PAX_RANDMMAP
19790+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19791+#endif
19792+
19793+ /* requesting a specific address */
19794+ if (addr) {
19795+ addr = PAGE_ALIGN(addr);
19796+ if (pax_task_size - len >= addr) {
19797+ vma = find_vma(mm, addr);
19798+ if (check_heap_stack_gap(vma, addr, len))
19799+ return addr;
19800+ }
19801+ }
19802+
19803+ /* check if free_area_cache is useful for us */
19804+ if (len <= mm->cached_hole_size) {
19805+ mm->cached_hole_size = 0;
19806+ mm->free_area_cache = mm->mmap_base;
19807+ }
19808+
19809+ /* either no address requested or can't fit in requested address hole */
19810+ addr = mm->free_area_cache;
19811+
19812+ /* make sure it can fit in the remaining address space */
19813+ if (addr > len) {
19814+ vma = find_vma(mm, addr-len);
19815+ if (check_heap_stack_gap(vma, addr - len, len))
19816+ /* remember the address as a hint for next time */
19817+ return (mm->free_area_cache = addr-len);
19818+ }
19819+
19820+ if (mm->mmap_base < len)
19821+ goto bottomup;
19822+
19823+ addr = mm->mmap_base-len;
19824+
19825+ do {
19826+ /*
19827+ * Lookup failure means no vma is above this address,
19828+ * else if new region fits below vma->vm_start,
19829+ * return with success:
19830+ */
19831+ vma = find_vma(mm, addr);
19832+ if (check_heap_stack_gap(vma, addr, len))
19833+ /* remember the address as a hint for next time */
19834+ return (mm->free_area_cache = addr);
19835+
19836+ /* remember the largest hole we saw so far */
19837+ if (addr + mm->cached_hole_size < vma->vm_start)
19838+ mm->cached_hole_size = vma->vm_start - addr;
19839+
19840+ /* try just below the current vma->vm_start */
19841+ addr = skip_heap_stack_gap(vma, len);
19842+ } while (!IS_ERR_VALUE(addr));
19843+
19844+bottomup:
19845+ /*
19846+ * A failed mmap() very likely causes application failure,
19847+ * so fall back to the bottom-up function here. This scenario
19848+ * can happen with large stack limits and large mmap()
19849+ * allocations.
19850+ */
19851+
19852+#ifdef CONFIG_PAX_SEGMEXEC
19853+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19854+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19855+ else
19856+#endif
19857+
19858+ mm->mmap_base = TASK_UNMAPPED_BASE;
19859+
19860+#ifdef CONFIG_PAX_RANDMMAP
19861+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19862+ mm->mmap_base += mm->delta_mmap;
19863+#endif
19864+
19865+ mm->free_area_cache = mm->mmap_base;
19866+ mm->cached_hole_size = ~0UL;
19867+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19868+ /*
19869+ * Restore the topdown base:
19870+ */
19871+ mm->mmap_base = base;
19872+ mm->free_area_cache = base;
19873+ mm->cached_hole_size = ~0UL;
19874+
19875+ return addr;
19876+}
19877
19878 struct sel_arg_struct {
19879 unsigned long n;
19880@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19881 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19882 case SEMTIMEDOP:
19883 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19884- (const struct timespec __user *)fifth);
19885+ (__force const struct timespec __user *)fifth);
19886
19887 case SEMGET:
19888 return sys_semget(first, second, third);
19889@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19890 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19891 if (ret)
19892 return ret;
19893- return put_user(raddr, (ulong __user *) third);
19894+ return put_user(raddr, (__force ulong __user *) third);
19895 }
19896 case 1: /* iBCS2 emulator entry point */
19897 if (!segment_eq(get_fs(), get_ds()))
19898@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19899
19900 return error;
19901 }
19902-
19903-
19904-/*
19905- * Do a system call from kernel instead of calling sys_execve so we
19906- * end up with proper pt_regs.
19907- */
19908-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19909-{
19910- long __res;
19911- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19912- : "=a" (__res)
19913- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19914- return __res;
19915-}
19916diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19917index 8aa2057..b604bc1 100644
19918--- a/arch/x86/kernel/sys_x86_64.c
19919+++ b/arch/x86/kernel/sys_x86_64.c
19920@@ -32,8 +32,8 @@ out:
19921 return error;
19922 }
19923
19924-static void find_start_end(unsigned long flags, unsigned long *begin,
19925- unsigned long *end)
19926+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19927+ unsigned long *begin, unsigned long *end)
19928 {
19929 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19930 unsigned long new_begin;
19931@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19932 *begin = new_begin;
19933 }
19934 } else {
19935- *begin = TASK_UNMAPPED_BASE;
19936+ *begin = mm->mmap_base;
19937 *end = TASK_SIZE;
19938 }
19939 }
19940@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19941 if (flags & MAP_FIXED)
19942 return addr;
19943
19944- find_start_end(flags, &begin, &end);
19945+ find_start_end(mm, flags, &begin, &end);
19946
19947 if (len > end)
19948 return -ENOMEM;
19949
19950+#ifdef CONFIG_PAX_RANDMMAP
19951+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19952+#endif
19953+
19954 if (addr) {
19955 addr = PAGE_ALIGN(addr);
19956 vma = find_vma(mm, addr);
19957- if (end - len >= addr &&
19958- (!vma || addr + len <= vma->vm_start))
19959+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19960 return addr;
19961 }
19962 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19963@@ -106,7 +109,7 @@ full_search:
19964 }
19965 return -ENOMEM;
19966 }
19967- if (!vma || addr + len <= vma->vm_start) {
19968+ if (check_heap_stack_gap(vma, addr, len)) {
19969 /*
19970 * Remember the place where we stopped the search:
19971 */
19972@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19973 {
19974 struct vm_area_struct *vma;
19975 struct mm_struct *mm = current->mm;
19976- unsigned long addr = addr0;
19977+ unsigned long base = mm->mmap_base, addr = addr0;
19978
19979 /* requested length too big for entire address space */
19980 if (len > TASK_SIZE)
19981@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19982 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19983 goto bottomup;
19984
19985+#ifdef CONFIG_PAX_RANDMMAP
19986+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19987+#endif
19988+
19989 /* requesting a specific address */
19990 if (addr) {
19991 addr = PAGE_ALIGN(addr);
19992- vma = find_vma(mm, addr);
19993- if (TASK_SIZE - len >= addr &&
19994- (!vma || addr + len <= vma->vm_start))
19995- return addr;
19996+ if (TASK_SIZE - len >= addr) {
19997+ vma = find_vma(mm, addr);
19998+ if (check_heap_stack_gap(vma, addr, len))
19999+ return addr;
20000+ }
20001 }
20002
20003 /* check if free_area_cache is useful for us */
20004@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20005 /* make sure it can fit in the remaining address space */
20006 if (addr > len) {
20007 vma = find_vma(mm, addr-len);
20008- if (!vma || addr <= vma->vm_start)
20009+ if (check_heap_stack_gap(vma, addr - len, len))
20010 /* remember the address as a hint for next time */
20011 return mm->free_area_cache = addr-len;
20012 }
20013@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20014 * return with success:
20015 */
20016 vma = find_vma(mm, addr);
20017- if (!vma || addr+len <= vma->vm_start)
20018+ if (check_heap_stack_gap(vma, addr, len))
20019 /* remember the address as a hint for next time */
20020 return mm->free_area_cache = addr;
20021
20022@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20023 mm->cached_hole_size = vma->vm_start - addr;
20024
20025 /* try just below the current vma->vm_start */
20026- addr = vma->vm_start-len;
20027- } while (len < vma->vm_start);
20028+ addr = skip_heap_stack_gap(vma, len);
20029+ } while (!IS_ERR_VALUE(addr));
20030
20031 bottomup:
20032 /*
20033@@ -198,13 +206,21 @@ bottomup:
20034 * can happen with large stack limits and large mmap()
20035 * allocations.
20036 */
20037+ mm->mmap_base = TASK_UNMAPPED_BASE;
20038+
20039+#ifdef CONFIG_PAX_RANDMMAP
20040+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20041+ mm->mmap_base += mm->delta_mmap;
20042+#endif
20043+
20044+ mm->free_area_cache = mm->mmap_base;
20045 mm->cached_hole_size = ~0UL;
20046- mm->free_area_cache = TASK_UNMAPPED_BASE;
20047 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20048 /*
20049 * Restore the topdown base:
20050 */
20051- mm->free_area_cache = mm->mmap_base;
20052+ mm->mmap_base = base;
20053+ mm->free_area_cache = base;
20054 mm->cached_hole_size = ~0UL;
20055
20056 return addr;
20057diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
20058index 76d70a4..4c94a44 100644
20059--- a/arch/x86/kernel/syscall_table_32.S
20060+++ b/arch/x86/kernel/syscall_table_32.S
20061@@ -1,3 +1,4 @@
20062+.section .rodata,"a",@progbits
20063 ENTRY(sys_call_table)
20064 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
20065 .long sys_exit
20066diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
20067index 46b8277..3349d55 100644
20068--- a/arch/x86/kernel/tboot.c
20069+++ b/arch/x86/kernel/tboot.c
20070@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
20071
20072 void tboot_shutdown(u32 shutdown_type)
20073 {
20074- void (*shutdown)(void);
20075+ void (* __noreturn shutdown)(void);
20076
20077 if (!tboot_enabled())
20078 return;
20079@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
20080
20081 switch_to_tboot_pt();
20082
20083- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
20084+ shutdown = (void *)tboot->shutdown_entry;
20085 shutdown();
20086
20087 /* should not reach here */
20088@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
20089 tboot_shutdown(acpi_shutdown_map[sleep_state]);
20090 }
20091
20092-static atomic_t ap_wfs_count;
20093+static atomic_unchecked_t ap_wfs_count;
20094
20095 static int tboot_wait_for_aps(int num_aps)
20096 {
20097@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
20098 {
20099 switch (action) {
20100 case CPU_DYING:
20101- atomic_inc(&ap_wfs_count);
20102+ atomic_inc_unchecked(&ap_wfs_count);
20103 if (num_online_cpus() == 1)
20104- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
20105+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
20106 return NOTIFY_BAD;
20107 break;
20108 }
20109@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
20110
20111 tboot_create_trampoline();
20112
20113- atomic_set(&ap_wfs_count, 0);
20114+ atomic_set_unchecked(&ap_wfs_count, 0);
20115 register_hotcpu_notifier(&tboot_cpu_notifier);
20116 return 0;
20117 }
20118diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
20119index be25734..87fe232 100644
20120--- a/arch/x86/kernel/time.c
20121+++ b/arch/x86/kernel/time.c
20122@@ -26,17 +26,13 @@
20123 int timer_ack;
20124 #endif
20125
20126-#ifdef CONFIG_X86_64
20127-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
20128-#endif
20129-
20130 unsigned long profile_pc(struct pt_regs *regs)
20131 {
20132 unsigned long pc = instruction_pointer(regs);
20133
20134- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
20135+ if (!user_mode(regs) && in_lock_functions(pc)) {
20136 #ifdef CONFIG_FRAME_POINTER
20137- return *(unsigned long *)(regs->bp + sizeof(long));
20138+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
20139 #else
20140 unsigned long *sp =
20141 (unsigned long *)kernel_stack_pointer(regs);
20142@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
20143 * or above a saved flags. Eflags has bits 22-31 zero,
20144 * kernel addresses don't.
20145 */
20146+
20147+#ifdef CONFIG_PAX_KERNEXEC
20148+ return ktla_ktva(sp[0]);
20149+#else
20150 if (sp[0] >> 22)
20151 return sp[0];
20152 if (sp[1] >> 22)
20153 return sp[1];
20154 #endif
20155+
20156+#endif
20157 }
20158 return pc;
20159 }
20160diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
20161index 6bb7b85..dd853e1 100644
20162--- a/arch/x86/kernel/tls.c
20163+++ b/arch/x86/kernel/tls.c
20164@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
20165 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
20166 return -EINVAL;
20167
20168+#ifdef CONFIG_PAX_SEGMEXEC
20169+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
20170+ return -EINVAL;
20171+#endif
20172+
20173 set_tls_desc(p, idx, &info, 1);
20174
20175 return 0;
20176diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
20177index 8508237..229b664 100644
20178--- a/arch/x86/kernel/trampoline_32.S
20179+++ b/arch/x86/kernel/trampoline_32.S
20180@@ -32,6 +32,12 @@
20181 #include <asm/segment.h>
20182 #include <asm/page_types.h>
20183
20184+#ifdef CONFIG_PAX_KERNEXEC
20185+#define ta(X) (X)
20186+#else
20187+#define ta(X) ((X) - __PAGE_OFFSET)
20188+#endif
20189+
20190 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
20191 __CPUINITRODATA
20192 .code16
20193@@ -60,7 +66,7 @@ r_base = .
20194 inc %ax # protected mode (PE) bit
20195 lmsw %ax # into protected mode
20196 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
20197- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
20198+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
20199
20200 # These need to be in the same 64K segment as the above;
20201 # hence we don't use the boot_gdt_descr defined in head.S
20202diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
20203index 3af2dff..ba8aa49 100644
20204--- a/arch/x86/kernel/trampoline_64.S
20205+++ b/arch/x86/kernel/trampoline_64.S
20206@@ -91,7 +91,7 @@ startup_32:
20207 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
20208 movl %eax, %ds
20209
20210- movl $X86_CR4_PAE, %eax
20211+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20212 movl %eax, %cr4 # Enable PAE mode
20213
20214 # Setup trampoline 4 level pagetables
20215@@ -127,7 +127,7 @@ startup_64:
20216 no_longmode:
20217 hlt
20218 jmp no_longmode
20219-#include "verify_cpu_64.S"
20220+#include "verify_cpu.S"
20221
20222 # Careful these need to be in the same 64K segment as the above;
20223 tidt:
20224@@ -138,7 +138,7 @@ tidt:
20225 # so the kernel can live anywhere
20226 .balign 4
20227 tgdt:
20228- .short tgdt_end - tgdt # gdt limit
20229+ .short tgdt_end - tgdt - 1 # gdt limit
20230 .long tgdt - r_base
20231 .short 0
20232 .quad 0x00cf9b000000ffff # __KERNEL32_CS
20233diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
20234index 7e37dce..ec3f8e5 100644
20235--- a/arch/x86/kernel/traps.c
20236+++ b/arch/x86/kernel/traps.c
20237@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
20238
20239 /* Do we ignore FPU interrupts ? */
20240 char ignore_fpu_irq;
20241-
20242-/*
20243- * The IDT has to be page-aligned to simplify the Pentium
20244- * F0 0F bug workaround.
20245- */
20246-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20247 #endif
20248
20249 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20250@@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20251 static inline void
20252 die_if_kernel(const char *str, struct pt_regs *regs, long err)
20253 {
20254- if (!user_mode_vm(regs))
20255+ if (!user_mode(regs))
20256 die(str, regs, err);
20257 }
20258 #endif
20259
20260 static void __kprobes
20261-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20262+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20263 long error_code, siginfo_t *info)
20264 {
20265 struct task_struct *tsk = current;
20266
20267 #ifdef CONFIG_X86_32
20268- if (regs->flags & X86_VM_MASK) {
20269+ if (v8086_mode(regs)) {
20270 /*
20271 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20272 * On nmi (interrupt 2), do_trap should not be called.
20273@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20274 }
20275 #endif
20276
20277- if (!user_mode(regs))
20278+ if (!user_mode_novm(regs))
20279 goto kernel_trap;
20280
20281 #ifdef CONFIG_X86_32
20282@@ -158,7 +152,7 @@ trap_signal:
20283 printk_ratelimit()) {
20284 printk(KERN_INFO
20285 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20286- tsk->comm, tsk->pid, str,
20287+ tsk->comm, task_pid_nr(tsk), str,
20288 regs->ip, regs->sp, error_code);
20289 print_vma_addr(" in ", regs->ip);
20290 printk("\n");
20291@@ -175,8 +169,20 @@ kernel_trap:
20292 if (!fixup_exception(regs)) {
20293 tsk->thread.error_code = error_code;
20294 tsk->thread.trap_no = trapnr;
20295+
20296+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20297+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20298+ str = "PAX: suspicious stack segment fault";
20299+#endif
20300+
20301 die(str, regs, error_code);
20302 }
20303+
20304+#ifdef CONFIG_PAX_REFCOUNT
20305+ if (trapnr == 4)
20306+ pax_report_refcount_overflow(regs);
20307+#endif
20308+
20309 return;
20310
20311 #ifdef CONFIG_X86_32
20312@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20313 conditional_sti(regs);
20314
20315 #ifdef CONFIG_X86_32
20316- if (regs->flags & X86_VM_MASK)
20317+ if (v8086_mode(regs))
20318 goto gp_in_vm86;
20319 #endif
20320
20321 tsk = current;
20322- if (!user_mode(regs))
20323+ if (!user_mode_novm(regs))
20324 goto gp_in_kernel;
20325
20326+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20327+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20328+ struct mm_struct *mm = tsk->mm;
20329+ unsigned long limit;
20330+
20331+ down_write(&mm->mmap_sem);
20332+ limit = mm->context.user_cs_limit;
20333+ if (limit < TASK_SIZE) {
20334+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20335+ up_write(&mm->mmap_sem);
20336+ return;
20337+ }
20338+ up_write(&mm->mmap_sem);
20339+ }
20340+#endif
20341+
20342 tsk->thread.error_code = error_code;
20343 tsk->thread.trap_no = 13;
20344
20345@@ -305,6 +327,13 @@ gp_in_kernel:
20346 if (notify_die(DIE_GPF, "general protection fault", regs,
20347 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20348 return;
20349+
20350+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20351+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20352+ die("PAX: suspicious general protection fault", regs, error_code);
20353+ else
20354+#endif
20355+
20356 die("general protection fault", regs, error_code);
20357 }
20358
20359@@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20360 dotraplinkage notrace __kprobes void
20361 do_nmi(struct pt_regs *regs, long error_code)
20362 {
20363+
20364+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20365+ if (!user_mode(regs)) {
20366+ unsigned long cs = regs->cs & 0xFFFF;
20367+ unsigned long ip = ktva_ktla(regs->ip);
20368+
20369+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20370+ regs->ip = ip;
20371+ }
20372+#endif
20373+
20374 nmi_enter();
20375
20376 inc_irq_stat(__nmi_count);
20377@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20378 }
20379
20380 #ifdef CONFIG_X86_32
20381- if (regs->flags & X86_VM_MASK)
20382+ if (v8086_mode(regs))
20383 goto debug_vm86;
20384 #endif
20385
20386@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20387 * kernel space (but re-enable TF when returning to user mode).
20388 */
20389 if (condition & DR_STEP) {
20390- if (!user_mode(regs))
20391+ if (!user_mode_novm(regs))
20392 goto clear_TF_reenable;
20393 }
20394
20395@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20396 * Handle strange cache flush from user space exception
20397 * in all other cases. This is undocumented behaviour.
20398 */
20399- if (regs->flags & X86_VM_MASK) {
20400+ if (v8086_mode(regs)) {
20401 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20402 return;
20403 }
20404@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20405 void __math_state_restore(void)
20406 {
20407 struct thread_info *thread = current_thread_info();
20408- struct task_struct *tsk = thread->task;
20409+ struct task_struct *tsk = current;
20410
20411 /*
20412 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20413@@ -825,8 +865,7 @@ void __math_state_restore(void)
20414 */
20415 asmlinkage void math_state_restore(void)
20416 {
20417- struct thread_info *thread = current_thread_info();
20418- struct task_struct *tsk = thread->task;
20419+ struct task_struct *tsk = current;
20420
20421 if (!tsk_used_math(tsk)) {
20422 local_irq_enable();
20423diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20424new file mode 100644
20425index 0000000..50c5edd
20426--- /dev/null
20427+++ b/arch/x86/kernel/verify_cpu.S
20428@@ -0,0 +1,140 @@
20429+/*
20430+ *
20431+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
20432+ * code has been borrowed from boot/setup.S and was introduced by
20433+ * Andi Kleen.
20434+ *
20435+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20436+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20437+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20438+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20439+ *
20440+ * This source code is licensed under the GNU General Public License,
20441+ * Version 2. See the file COPYING for more details.
20442+ *
20443+ * This is a common code for verification whether CPU supports
20444+ * long mode and SSE or not. It is not called directly instead this
20445+ * file is included at various places and compiled in that context.
20446+ * This file is expected to run in 32bit code. Currently:
20447+ *
20448+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20449+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
20450+ * arch/x86/kernel/head_32.S: processor startup
20451+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20452+ *
20453+ * verify_cpu, returns the status of longmode and SSE in register %eax.
20454+ * 0: Success 1: Failure
20455+ *
20456+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20457+ *
20458+ * The caller needs to check for the error code and take the action
20459+ * appropriately. Either display a message or halt.
20460+ */
20461+
20462+#include <asm/cpufeature.h>
20463+#include <asm/msr-index.h>
20464+
20465+verify_cpu:
20466+ pushfl # Save caller passed flags
20467+ pushl $0 # Kill any dangerous flags
20468+ popfl
20469+
20470+ pushfl # standard way to check for cpuid
20471+ popl %eax
20472+ movl %eax,%ebx
20473+ xorl $0x200000,%eax
20474+ pushl %eax
20475+ popfl
20476+ pushfl
20477+ popl %eax
20478+ cmpl %eax,%ebx
20479+ jz verify_cpu_no_longmode # cpu has no cpuid
20480+
20481+ movl $0x0,%eax # See if cpuid 1 is implemented
20482+ cpuid
20483+ cmpl $0x1,%eax
20484+ jb verify_cpu_no_longmode # no cpuid 1
20485+
20486+ xor %di,%di
20487+ cmpl $0x68747541,%ebx # AuthenticAMD
20488+ jnz verify_cpu_noamd
20489+ cmpl $0x69746e65,%edx
20490+ jnz verify_cpu_noamd
20491+ cmpl $0x444d4163,%ecx
20492+ jnz verify_cpu_noamd
20493+ mov $1,%di # cpu is from AMD
20494+ jmp verify_cpu_check
20495+
20496+verify_cpu_noamd:
20497+ cmpl $0x756e6547,%ebx # GenuineIntel?
20498+ jnz verify_cpu_check
20499+ cmpl $0x49656e69,%edx
20500+ jnz verify_cpu_check
20501+ cmpl $0x6c65746e,%ecx
20502+ jnz verify_cpu_check
20503+
20504+ # only call IA32_MISC_ENABLE when:
20505+ # family > 6 || (family == 6 && model >= 0xd)
20506+ movl $0x1, %eax # check CPU family and model
20507+ cpuid
20508+ movl %eax, %ecx
20509+
20510+ andl $0x0ff00f00, %eax # mask family and extended family
20511+ shrl $8, %eax
20512+ cmpl $6, %eax
20513+ ja verify_cpu_clear_xd # family > 6, ok
20514+ jb verify_cpu_check # family < 6, skip
20515+
20516+ andl $0x000f00f0, %ecx # mask model and extended model
20517+ shrl $4, %ecx
20518+ cmpl $0xd, %ecx
20519+ jb verify_cpu_check # family == 6, model < 0xd, skip
20520+
20521+verify_cpu_clear_xd:
20522+ movl $MSR_IA32_MISC_ENABLE, %ecx
20523+ rdmsr
20524+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20525+ jnc verify_cpu_check # only write MSR if bit was changed
20526+ wrmsr
20527+
20528+verify_cpu_check:
20529+ movl $0x1,%eax # Does the cpu have what it takes
20530+ cpuid
20531+ andl $REQUIRED_MASK0,%edx
20532+ xorl $REQUIRED_MASK0,%edx
20533+ jnz verify_cpu_no_longmode
20534+
20535+ movl $0x80000000,%eax # See if extended cpuid is implemented
20536+ cpuid
20537+ cmpl $0x80000001,%eax
20538+ jb verify_cpu_no_longmode # no extended cpuid
20539+
20540+ movl $0x80000001,%eax # Does the cpu have what it takes
20541+ cpuid
20542+ andl $REQUIRED_MASK1,%edx
20543+ xorl $REQUIRED_MASK1,%edx
20544+ jnz verify_cpu_no_longmode
20545+
20546+verify_cpu_sse_test:
20547+ movl $1,%eax
20548+ cpuid
20549+ andl $SSE_MASK,%edx
20550+ cmpl $SSE_MASK,%edx
20551+ je verify_cpu_sse_ok
20552+ test %di,%di
20553+ jz verify_cpu_no_longmode # only try to force SSE on AMD
20554+ movl $MSR_K7_HWCR,%ecx
20555+ rdmsr
20556+ btr $15,%eax # enable SSE
20557+ wrmsr
20558+ xor %di,%di # don't loop
20559+ jmp verify_cpu_sse_test # try again
20560+
20561+verify_cpu_no_longmode:
20562+ popfl # Restore caller passed flags
20563+ movl $1,%eax
20564+ ret
20565+verify_cpu_sse_ok:
20566+ popfl # Restore caller passed flags
20567+ xorl %eax, %eax
20568+ ret
20569diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20570deleted file mode 100644
20571index 45b6f8a..0000000
20572--- a/arch/x86/kernel/verify_cpu_64.S
20573+++ /dev/null
20574@@ -1,105 +0,0 @@
20575-/*
20576- *
20577- * verify_cpu.S - Code for cpu long mode and SSE verification. This
20578- * code has been borrowed from boot/setup.S and was introduced by
20579- * Andi Kleen.
20580- *
20581- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20582- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20583- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20584- *
20585- * This source code is licensed under the GNU General Public License,
20586- * Version 2. See the file COPYING for more details.
20587- *
20588- * This is a common code for verification whether CPU supports
20589- * long mode and SSE or not. It is not called directly instead this
20590- * file is included at various places and compiled in that context.
20591- * Following are the current usage.
20592- *
20593- * This file is included by both 16bit and 32bit code.
20594- *
20595- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20596- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20597- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20598- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20599- *
20600- * verify_cpu, returns the status of cpu check in register %eax.
20601- * 0: Success 1: Failure
20602- *
20603- * The caller needs to check for the error code and take the action
20604- * appropriately. Either display a message or halt.
20605- */
20606-
20607-#include <asm/cpufeature.h>
20608-
20609-verify_cpu:
20610- pushfl # Save caller passed flags
20611- pushl $0 # Kill any dangerous flags
20612- popfl
20613-
20614- pushfl # standard way to check for cpuid
20615- popl %eax
20616- movl %eax,%ebx
20617- xorl $0x200000,%eax
20618- pushl %eax
20619- popfl
20620- pushfl
20621- popl %eax
20622- cmpl %eax,%ebx
20623- jz verify_cpu_no_longmode # cpu has no cpuid
20624-
20625- movl $0x0,%eax # See if cpuid 1 is implemented
20626- cpuid
20627- cmpl $0x1,%eax
20628- jb verify_cpu_no_longmode # no cpuid 1
20629-
20630- xor %di,%di
20631- cmpl $0x68747541,%ebx # AuthenticAMD
20632- jnz verify_cpu_noamd
20633- cmpl $0x69746e65,%edx
20634- jnz verify_cpu_noamd
20635- cmpl $0x444d4163,%ecx
20636- jnz verify_cpu_noamd
20637- mov $1,%di # cpu is from AMD
20638-
20639-verify_cpu_noamd:
20640- movl $0x1,%eax # Does the cpu have what it takes
20641- cpuid
20642- andl $REQUIRED_MASK0,%edx
20643- xorl $REQUIRED_MASK0,%edx
20644- jnz verify_cpu_no_longmode
20645-
20646- movl $0x80000000,%eax # See if extended cpuid is implemented
20647- cpuid
20648- cmpl $0x80000001,%eax
20649- jb verify_cpu_no_longmode # no extended cpuid
20650-
20651- movl $0x80000001,%eax # Does the cpu have what it takes
20652- cpuid
20653- andl $REQUIRED_MASK1,%edx
20654- xorl $REQUIRED_MASK1,%edx
20655- jnz verify_cpu_no_longmode
20656-
20657-verify_cpu_sse_test:
20658- movl $1,%eax
20659- cpuid
20660- andl $SSE_MASK,%edx
20661- cmpl $SSE_MASK,%edx
20662- je verify_cpu_sse_ok
20663- test %di,%di
20664- jz verify_cpu_no_longmode # only try to force SSE on AMD
20665- movl $0xc0010015,%ecx # HWCR
20666- rdmsr
20667- btr $15,%eax # enable SSE
20668- wrmsr
20669- xor %di,%di # don't loop
20670- jmp verify_cpu_sse_test # try again
20671-
20672-verify_cpu_no_longmode:
20673- popfl # Restore caller passed flags
20674- movl $1,%eax
20675- ret
20676-verify_cpu_sse_ok:
20677- popfl # Restore caller passed flags
20678- xorl %eax, %eax
20679- ret
20680diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20681index 9c4e625..c992817 100644
20682--- a/arch/x86/kernel/vm86_32.c
20683+++ b/arch/x86/kernel/vm86_32.c
20684@@ -41,6 +41,7 @@
20685 #include <linux/ptrace.h>
20686 #include <linux/audit.h>
20687 #include <linux/stddef.h>
20688+#include <linux/grsecurity.h>
20689
20690 #include <asm/uaccess.h>
20691 #include <asm/io.h>
20692@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20693 do_exit(SIGSEGV);
20694 }
20695
20696- tss = &per_cpu(init_tss, get_cpu());
20697+ tss = init_tss + get_cpu();
20698 current->thread.sp0 = current->thread.saved_sp0;
20699 current->thread.sysenter_cs = __KERNEL_CS;
20700 load_sp0(tss, &current->thread);
20701@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20702 struct task_struct *tsk;
20703 int tmp, ret = -EPERM;
20704
20705+#ifdef CONFIG_GRKERNSEC_VM86
20706+ if (!capable(CAP_SYS_RAWIO)) {
20707+ gr_handle_vm86();
20708+ goto out;
20709+ }
20710+#endif
20711+
20712 tsk = current;
20713 if (tsk->thread.saved_sp0)
20714 goto out;
20715@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20716 int tmp, ret;
20717 struct vm86plus_struct __user *v86;
20718
20719+#ifdef CONFIG_GRKERNSEC_VM86
20720+ if (!capable(CAP_SYS_RAWIO)) {
20721+ gr_handle_vm86();
20722+ ret = -EPERM;
20723+ goto out;
20724+ }
20725+#endif
20726+
20727 tsk = current;
20728 switch (regs->bx) {
20729 case VM86_REQUEST_IRQ:
20730@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20731 tsk->thread.saved_fs = info->regs32->fs;
20732 tsk->thread.saved_gs = get_user_gs(info->regs32);
20733
20734- tss = &per_cpu(init_tss, get_cpu());
20735+ tss = init_tss + get_cpu();
20736 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20737 if (cpu_has_sep)
20738 tsk->thread.sysenter_cs = 0;
20739@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20740 goto cannot_handle;
20741 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20742 goto cannot_handle;
20743- intr_ptr = (unsigned long __user *) (i << 2);
20744+ intr_ptr = (__force unsigned long __user *) (i << 2);
20745 if (get_user(segoffs, intr_ptr))
20746 goto cannot_handle;
20747 if ((segoffs >> 16) == BIOSSEG)
20748diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20749index d430e4c..831f817 100644
20750--- a/arch/x86/kernel/vmi_32.c
20751+++ b/arch/x86/kernel/vmi_32.c
20752@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20753 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20754
20755 #define call_vrom_func(rom,func) \
20756- (((VROMFUNC *)(rom->func))())
20757+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
20758
20759 #define call_vrom_long_func(rom,func,arg) \
20760- (((VROMLONGFUNC *)(rom->func)) (arg))
20761+({\
20762+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20763+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20764+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20765+ __reloc;\
20766+})
20767
20768-static struct vrom_header *vmi_rom;
20769+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20770 static int disable_pge;
20771 static int disable_pse;
20772 static int disable_sep;
20773@@ -76,10 +81,10 @@ static struct {
20774 void (*set_initial_ap_state)(int, int);
20775 void (*halt)(void);
20776 void (*set_lazy_mode)(int mode);
20777-} vmi_ops;
20778+} __no_const vmi_ops __read_only;
20779
20780 /* Cached VMI operations */
20781-struct vmi_timer_ops vmi_timer_ops;
20782+struct vmi_timer_ops vmi_timer_ops __read_only;
20783
20784 /*
20785 * VMI patching routines.
20786@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20787 static inline void patch_offset(void *insnbuf,
20788 unsigned long ip, unsigned long dest)
20789 {
20790- *(unsigned long *)(insnbuf+1) = dest-ip-5;
20791+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
20792 }
20793
20794 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20795@@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20796 {
20797 u64 reloc;
20798 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20799+
20800 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20801 switch(rel->type) {
20802 case VMI_RELOCATION_CALL_REL:
20803@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20804
20805 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20806 {
20807- const pte_t pte = { .pte = 0 };
20808+ const pte_t pte = __pte(0ULL);
20809 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20810 }
20811
20812 static void vmi_pmd_clear(pmd_t *pmd)
20813 {
20814- const pte_t pte = { .pte = 0 };
20815+ const pte_t pte = __pte(0ULL);
20816 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20817 }
20818 #endif
20819@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20820 ap.ss = __KERNEL_DS;
20821 ap.esp = (unsigned long) start_esp;
20822
20823- ap.ds = __USER_DS;
20824- ap.es = __USER_DS;
20825+ ap.ds = __KERNEL_DS;
20826+ ap.es = __KERNEL_DS;
20827 ap.fs = __KERNEL_PERCPU;
20828- ap.gs = __KERNEL_STACK_CANARY;
20829+ savesegment(gs, ap.gs);
20830
20831 ap.eflags = 0;
20832
20833@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20834 paravirt_leave_lazy_mmu();
20835 }
20836
20837+#ifdef CONFIG_PAX_KERNEXEC
20838+static unsigned long vmi_pax_open_kernel(void)
20839+{
20840+ return 0;
20841+}
20842+
20843+static unsigned long vmi_pax_close_kernel(void)
20844+{
20845+ return 0;
20846+}
20847+#endif
20848+
20849 static inline int __init check_vmi_rom(struct vrom_header *rom)
20850 {
20851 struct pci_header *pci;
20852@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20853 return 0;
20854 if (rom->vrom_signature != VMI_SIGNATURE)
20855 return 0;
20856+ if (rom->rom_length * 512 > sizeof(*rom)) {
20857+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20858+ return 0;
20859+ }
20860 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20861 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20862 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20863@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20864 struct vrom_header *romstart;
20865 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20866 if (check_vmi_rom(romstart)) {
20867- vmi_rom = romstart;
20868+ vmi_rom = *romstart;
20869 return 1;
20870 }
20871 }
20872@@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20873
20874 para_fill(pv_irq_ops.safe_halt, Halt);
20875
20876+#ifdef CONFIG_PAX_KERNEXEC
20877+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20878+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20879+#endif
20880+
20881 /*
20882 * Alternative instruction rewriting doesn't happen soon enough
20883 * to convert VMI_IRET to a call instead of a jump; so we have
20884@@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20885
20886 void __init vmi_init(void)
20887 {
20888- if (!vmi_rom)
20889+ if (!vmi_rom.rom_signature)
20890 probe_vmi_rom();
20891 else
20892- check_vmi_rom(vmi_rom);
20893+ check_vmi_rom(&vmi_rom);
20894
20895 /* In case probing for or validating the ROM failed, basil */
20896- if (!vmi_rom)
20897+ if (!vmi_rom.rom_signature)
20898 return;
20899
20900- reserve_top_address(-vmi_rom->virtual_top);
20901+ reserve_top_address(-vmi_rom.virtual_top);
20902
20903 #ifdef CONFIG_X86_IO_APIC
20904 /* This is virtual hardware; timer routing is wired correctly */
20905@@ -874,7 +901,7 @@ void __init vmi_activate(void)
20906 {
20907 unsigned long flags;
20908
20909- if (!vmi_rom)
20910+ if (!vmi_rom.rom_signature)
20911 return;
20912
20913 local_irq_save(flags);
20914diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20915index 3c68fe2..12c8280 100644
20916--- a/arch/x86/kernel/vmlinux.lds.S
20917+++ b/arch/x86/kernel/vmlinux.lds.S
20918@@ -26,6 +26,13 @@
20919 #include <asm/page_types.h>
20920 #include <asm/cache.h>
20921 #include <asm/boot.h>
20922+#include <asm/segment.h>
20923+
20924+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20925+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20926+#else
20927+#define __KERNEL_TEXT_OFFSET 0
20928+#endif
20929
20930 #undef i386 /* in case the preprocessor is a 32bit one */
20931
20932@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20933 #ifdef CONFIG_X86_32
20934 OUTPUT_ARCH(i386)
20935 ENTRY(phys_startup_32)
20936-jiffies = jiffies_64;
20937 #else
20938 OUTPUT_ARCH(i386:x86-64)
20939 ENTRY(phys_startup_64)
20940-jiffies_64 = jiffies;
20941 #endif
20942
20943 PHDRS {
20944 text PT_LOAD FLAGS(5); /* R_E */
20945- data PT_LOAD FLAGS(7); /* RWE */
20946+#ifdef CONFIG_X86_32
20947+ module PT_LOAD FLAGS(5); /* R_E */
20948+#endif
20949+#ifdef CONFIG_XEN
20950+ rodata PT_LOAD FLAGS(5); /* R_E */
20951+#else
20952+ rodata PT_LOAD FLAGS(4); /* R__ */
20953+#endif
20954+ data PT_LOAD FLAGS(6); /* RW_ */
20955 #ifdef CONFIG_X86_64
20956 user PT_LOAD FLAGS(5); /* R_E */
20957+#endif
20958+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20959 #ifdef CONFIG_SMP
20960 percpu PT_LOAD FLAGS(6); /* RW_ */
20961 #endif
20962+ text.init PT_LOAD FLAGS(5); /* R_E */
20963+ text.exit PT_LOAD FLAGS(5); /* R_E */
20964 init PT_LOAD FLAGS(7); /* RWE */
20965-#endif
20966 note PT_NOTE FLAGS(0); /* ___ */
20967 }
20968
20969 SECTIONS
20970 {
20971 #ifdef CONFIG_X86_32
20972- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20973- phys_startup_32 = startup_32 - LOAD_OFFSET;
20974+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20975 #else
20976- . = __START_KERNEL;
20977- phys_startup_64 = startup_64 - LOAD_OFFSET;
20978+ . = __START_KERNEL;
20979 #endif
20980
20981 /* Text and read-only data */
20982- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20983- _text = .;
20984+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20985 /* bootstrapping code */
20986+#ifdef CONFIG_X86_32
20987+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20988+#else
20989+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20990+#endif
20991+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20992+ _text = .;
20993 HEAD_TEXT
20994 #ifdef CONFIG_X86_32
20995 . = ALIGN(PAGE_SIZE);
20996@@ -82,28 +102,71 @@ SECTIONS
20997 IRQENTRY_TEXT
20998 *(.fixup)
20999 *(.gnu.warning)
21000- /* End of text section */
21001- _etext = .;
21002 } :text = 0x9090
21003
21004- NOTES :text :note
21005+ . += __KERNEL_TEXT_OFFSET;
21006
21007- EXCEPTION_TABLE(16) :text = 0x9090
21008+#ifdef CONFIG_X86_32
21009+ . = ALIGN(PAGE_SIZE);
21010+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
21011+ *(.vmi.rom)
21012+ } :module
21013+
21014+ . = ALIGN(PAGE_SIZE);
21015+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
21016+
21017+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
21018+ MODULES_EXEC_VADDR = .;
21019+ BYTE(0)
21020+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
21021+ . = ALIGN(HPAGE_SIZE);
21022+ MODULES_EXEC_END = . - 1;
21023+#endif
21024+
21025+ } :module
21026+#endif
21027+
21028+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
21029+ /* End of text section */
21030+ _etext = . - __KERNEL_TEXT_OFFSET;
21031+ }
21032+
21033+#ifdef CONFIG_X86_32
21034+ . = ALIGN(PAGE_SIZE);
21035+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
21036+ *(.idt)
21037+ . = ALIGN(PAGE_SIZE);
21038+ *(.empty_zero_page)
21039+ *(.swapper_pg_fixmap)
21040+ *(.swapper_pg_pmd)
21041+ *(.swapper_pg_dir)
21042+ *(.trampoline_pg_dir)
21043+ } :rodata
21044+#endif
21045+
21046+ . = ALIGN(PAGE_SIZE);
21047+ NOTES :rodata :note
21048+
21049+ EXCEPTION_TABLE(16) :rodata
21050
21051 RO_DATA(PAGE_SIZE)
21052
21053 /* Data */
21054 .data : AT(ADDR(.data) - LOAD_OFFSET) {
21055+
21056+#ifdef CONFIG_PAX_KERNEXEC
21057+ . = ALIGN(HPAGE_SIZE);
21058+#else
21059+ . = ALIGN(PAGE_SIZE);
21060+#endif
21061+
21062 /* Start of data section */
21063 _sdata = .;
21064
21065 /* init_task */
21066 INIT_TASK_DATA(THREAD_SIZE)
21067
21068-#ifdef CONFIG_X86_32
21069- /* 32 bit has nosave before _edata */
21070 NOSAVE_DATA
21071-#endif
21072
21073 PAGE_ALIGNED_DATA(PAGE_SIZE)
21074
21075@@ -112,6 +175,8 @@ SECTIONS
21076 DATA_DATA
21077 CONSTRUCTORS
21078
21079+ jiffies = jiffies_64;
21080+
21081 /* rarely changed data like cpu maps */
21082 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
21083
21084@@ -166,12 +231,6 @@ SECTIONS
21085 }
21086 vgetcpu_mode = VVIRT(.vgetcpu_mode);
21087
21088- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
21089- .jiffies : AT(VLOAD(.jiffies)) {
21090- *(.jiffies)
21091- }
21092- jiffies = VVIRT(.jiffies);
21093-
21094 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
21095 *(.vsyscall_3)
21096 }
21097@@ -187,12 +246,19 @@ SECTIONS
21098 #endif /* CONFIG_X86_64 */
21099
21100 /* Init code and data - will be freed after init */
21101- . = ALIGN(PAGE_SIZE);
21102 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
21103+ BYTE(0)
21104+
21105+#ifdef CONFIG_PAX_KERNEXEC
21106+ . = ALIGN(HPAGE_SIZE);
21107+#else
21108+ . = ALIGN(PAGE_SIZE);
21109+#endif
21110+
21111 __init_begin = .; /* paired with __init_end */
21112- }
21113+ } :init.begin
21114
21115-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
21116+#ifdef CONFIG_SMP
21117 /*
21118 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
21119 * output PHDR, so the next output section - .init.text - should
21120@@ -201,12 +267,27 @@ SECTIONS
21121 PERCPU_VADDR(0, :percpu)
21122 #endif
21123
21124- INIT_TEXT_SECTION(PAGE_SIZE)
21125-#ifdef CONFIG_X86_64
21126- :init
21127-#endif
21128+ . = ALIGN(PAGE_SIZE);
21129+ init_begin = .;
21130+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
21131+ VMLINUX_SYMBOL(_sinittext) = .;
21132+ INIT_TEXT
21133+ VMLINUX_SYMBOL(_einittext) = .;
21134+ . = ALIGN(PAGE_SIZE);
21135+ } :text.init
21136
21137- INIT_DATA_SECTION(16)
21138+ /*
21139+ * .exit.text is discard at runtime, not link time, to deal with
21140+ * references from .altinstructions and .eh_frame
21141+ */
21142+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21143+ EXIT_TEXT
21144+ . = ALIGN(16);
21145+ } :text.exit
21146+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
21147+
21148+ . = ALIGN(PAGE_SIZE);
21149+ INIT_DATA_SECTION(16) :init
21150
21151 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
21152 __x86_cpu_dev_start = .;
21153@@ -232,19 +313,11 @@ SECTIONS
21154 *(.altinstr_replacement)
21155 }
21156
21157- /*
21158- * .exit.text is discard at runtime, not link time, to deal with
21159- * references from .altinstructions and .eh_frame
21160- */
21161- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
21162- EXIT_TEXT
21163- }
21164-
21165 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
21166 EXIT_DATA
21167 }
21168
21169-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
21170+#ifndef CONFIG_SMP
21171 PERCPU(PAGE_SIZE)
21172 #endif
21173
21174@@ -267,12 +340,6 @@ SECTIONS
21175 . = ALIGN(PAGE_SIZE);
21176 }
21177
21178-#ifdef CONFIG_X86_64
21179- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
21180- NOSAVE_DATA
21181- }
21182-#endif
21183-
21184 /* BSS */
21185 . = ALIGN(PAGE_SIZE);
21186 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
21187@@ -288,6 +355,7 @@ SECTIONS
21188 __brk_base = .;
21189 . += 64 * 1024; /* 64k alignment slop space */
21190 *(.brk_reservation) /* areas brk users have reserved */
21191+ . = ALIGN(HPAGE_SIZE);
21192 __brk_limit = .;
21193 }
21194
21195@@ -316,13 +384,12 @@ SECTIONS
21196 * for the boot processor.
21197 */
21198 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
21199-INIT_PER_CPU(gdt_page);
21200 INIT_PER_CPU(irq_stack_union);
21201
21202 /*
21203 * Build-time check on the image size:
21204 */
21205-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
21206+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
21207 "kernel image bigger than KERNEL_IMAGE_SIZE");
21208
21209 #ifdef CONFIG_SMP
21210diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
21211index 62f39d7..3bc46a1 100644
21212--- a/arch/x86/kernel/vsyscall_64.c
21213+++ b/arch/x86/kernel/vsyscall_64.c
21214@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
21215
21216 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
21217 /* copy vsyscall data */
21218+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
21219 vsyscall_gtod_data.clock.vread = clock->vread;
21220 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
21221 vsyscall_gtod_data.clock.mask = clock->mask;
21222@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
21223 We do this here because otherwise user space would do it on
21224 its own in a likely inferior way (no access to jiffies).
21225 If you don't like it pass NULL. */
21226- if (tcache && tcache->blob[0] == (j = __jiffies)) {
21227+ if (tcache && tcache->blob[0] == (j = jiffies)) {
21228 p = tcache->blob[1];
21229 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
21230 /* Load per CPU data from RDTSCP */
21231diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21232index 3909e3b..5433a97 100644
21233--- a/arch/x86/kernel/x8664_ksyms_64.c
21234+++ b/arch/x86/kernel/x8664_ksyms_64.c
21235@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
21236
21237 EXPORT_SYMBOL(copy_user_generic);
21238 EXPORT_SYMBOL(__copy_user_nocache);
21239-EXPORT_SYMBOL(copy_from_user);
21240-EXPORT_SYMBOL(copy_to_user);
21241 EXPORT_SYMBOL(__copy_from_user_inatomic);
21242
21243 EXPORT_SYMBOL(copy_page);
21244diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21245index c5ee17e..d63218f 100644
21246--- a/arch/x86/kernel/xsave.c
21247+++ b/arch/x86/kernel/xsave.c
21248@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
21249 fx_sw_user->xstate_size > fx_sw_user->extended_size)
21250 return -1;
21251
21252- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
21253+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
21254 fx_sw_user->extended_size -
21255 FP_XSTATE_MAGIC2_SIZE));
21256 /*
21257@@ -196,7 +196,7 @@ fx_only:
21258 * the other extended state.
21259 */
21260 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
21261- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
21262+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
21263 }
21264
21265 /*
21266@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
21267 if (task_thread_info(tsk)->status & TS_XSAVE)
21268 err = restore_user_xstate(buf);
21269 else
21270- err = fxrstor_checking((__force struct i387_fxsave_struct *)
21271+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
21272 buf);
21273 if (unlikely(err)) {
21274 /*
21275diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21276index 1350e43..a94b011 100644
21277--- a/arch/x86/kvm/emulate.c
21278+++ b/arch/x86/kvm/emulate.c
21279@@ -81,8 +81,8 @@
21280 #define Src2CL (1<<29)
21281 #define Src2ImmByte (2<<29)
21282 #define Src2One (3<<29)
21283-#define Src2Imm16 (4<<29)
21284-#define Src2Mask (7<<29)
21285+#define Src2Imm16 (4U<<29)
21286+#define Src2Mask (7U<<29)
21287
21288 enum {
21289 Group1_80, Group1_81, Group1_82, Group1_83,
21290@@ -411,6 +411,7 @@ static u32 group2_table[] = {
21291
21292 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21293 do { \
21294+ unsigned long _tmp; \
21295 __asm__ __volatile__ ( \
21296 _PRE_EFLAGS("0", "4", "2") \
21297 _op _suffix " %"_x"3,%1; " \
21298@@ -424,8 +425,6 @@ static u32 group2_table[] = {
21299 /* Raw emulation: instruction has two explicit operands. */
21300 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21301 do { \
21302- unsigned long _tmp; \
21303- \
21304 switch ((_dst).bytes) { \
21305 case 2: \
21306 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21307@@ -441,7 +440,6 @@ static u32 group2_table[] = {
21308
21309 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21310 do { \
21311- unsigned long _tmp; \
21312 switch ((_dst).bytes) { \
21313 case 1: \
21314 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21315diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21316index 8dfeaaa..4daa395 100644
21317--- a/arch/x86/kvm/lapic.c
21318+++ b/arch/x86/kvm/lapic.c
21319@@ -52,7 +52,7 @@
21320 #define APIC_BUS_CYCLE_NS 1
21321
21322 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21323-#define apic_debug(fmt, arg...)
21324+#define apic_debug(fmt, arg...) do {} while (0)
21325
21326 #define APIC_LVT_NUM 6
21327 /* 14 is the version for Xeon and Pentium 8.4.8*/
21328diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21329index 3bc2707..dd157e2 100644
21330--- a/arch/x86/kvm/paging_tmpl.h
21331+++ b/arch/x86/kvm/paging_tmpl.h
21332@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21333 int level = PT_PAGE_TABLE_LEVEL;
21334 unsigned long mmu_seq;
21335
21336+ pax_track_stack();
21337+
21338 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21339 kvm_mmu_audit(vcpu, "pre page fault");
21340
21341@@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21342 kvm_mmu_free_some_pages(vcpu);
21343 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21344 level, &write_pt, pfn);
21345+ (void)sptep;
21346 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21347 sptep, *sptep, write_pt);
21348
21349diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21350index 7c6e63e..c5d92c1 100644
21351--- a/arch/x86/kvm/svm.c
21352+++ b/arch/x86/kvm/svm.c
21353@@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21354 int cpu = raw_smp_processor_id();
21355
21356 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21357+
21358+ pax_open_kernel();
21359 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21360+ pax_close_kernel();
21361+
21362 load_TR_desc();
21363 }
21364
21365@@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21366 return true;
21367 }
21368
21369-static struct kvm_x86_ops svm_x86_ops = {
21370+static const struct kvm_x86_ops svm_x86_ops = {
21371 .cpu_has_kvm_support = has_svm,
21372 .disabled_by_bios = is_disabled,
21373 .hardware_setup = svm_hardware_setup,
21374diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21375index e6d925f..e7a4af8 100644
21376--- a/arch/x86/kvm/vmx.c
21377+++ b/arch/x86/kvm/vmx.c
21378@@ -570,7 +570,11 @@ static void reload_tss(void)
21379
21380 kvm_get_gdt(&gdt);
21381 descs = (void *)gdt.base;
21382+
21383+ pax_open_kernel();
21384 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21385+ pax_close_kernel();
21386+
21387 load_TR_desc();
21388 }
21389
21390@@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21391 if (!cpu_has_vmx_flexpriority())
21392 flexpriority_enabled = 0;
21393
21394- if (!cpu_has_vmx_tpr_shadow())
21395- kvm_x86_ops->update_cr8_intercept = NULL;
21396+ if (!cpu_has_vmx_tpr_shadow()) {
21397+ pax_open_kernel();
21398+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21399+ pax_close_kernel();
21400+ }
21401
21402 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21403 kvm_disable_largepages();
21404@@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21405 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21406
21407 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21408- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21409+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21410 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21411 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21412 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21413@@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21414 "jmp .Lkvm_vmx_return \n\t"
21415 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21416 ".Lkvm_vmx_return: "
21417+
21418+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21419+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21420+ ".Lkvm_vmx_return2: "
21421+#endif
21422+
21423 /* Save guest registers, load host registers, keep flags */
21424 "xchg %0, (%%"R"sp) \n\t"
21425 "mov %%"R"ax, %c[rax](%0) \n\t"
21426@@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21427 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21428 #endif
21429 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21430+
21431+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21432+ ,[cs]"i"(__KERNEL_CS)
21433+#endif
21434+
21435 : "cc", "memory"
21436- , R"bx", R"di", R"si"
21437+ , R"ax", R"bx", R"di", R"si"
21438 #ifdef CONFIG_X86_64
21439 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21440 #endif
21441@@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21442 if (vmx->rmode.irq.pending)
21443 fixup_rmode_irq(vmx);
21444
21445- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21446+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21447+
21448+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21449+ loadsegment(fs, __KERNEL_PERCPU);
21450+#endif
21451+
21452+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21453+ __set_fs(current_thread_info()->addr_limit);
21454+#endif
21455+
21456 vmx->launched = 1;
21457
21458 vmx_complete_interrupts(vmx);
21459@@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21460 return false;
21461 }
21462
21463-static struct kvm_x86_ops vmx_x86_ops = {
21464+static const struct kvm_x86_ops vmx_x86_ops = {
21465 .cpu_has_kvm_support = cpu_has_kvm_support,
21466 .disabled_by_bios = vmx_disabled_by_bios,
21467 .hardware_setup = hardware_setup,
21468diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21469index df1cefb..5e882ad 100644
21470--- a/arch/x86/kvm/x86.c
21471+++ b/arch/x86/kvm/x86.c
21472@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21473 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21474 struct kvm_cpuid_entry2 __user *entries);
21475
21476-struct kvm_x86_ops *kvm_x86_ops;
21477+const struct kvm_x86_ops *kvm_x86_ops;
21478 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21479
21480 int ignore_msrs = 0;
21481@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21482 struct kvm_cpuid2 *cpuid,
21483 struct kvm_cpuid_entry2 __user *entries)
21484 {
21485- int r;
21486+ int r, i;
21487
21488 r = -E2BIG;
21489 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21490 goto out;
21491 r = -EFAULT;
21492- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21493- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21494+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21495 goto out;
21496+ for (i = 0; i < cpuid->nent; ++i) {
21497+ struct kvm_cpuid_entry2 cpuid_entry;
21498+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21499+ goto out;
21500+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
21501+ }
21502 vcpu->arch.cpuid_nent = cpuid->nent;
21503 kvm_apic_set_version(vcpu);
21504 return 0;
21505@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21506 struct kvm_cpuid2 *cpuid,
21507 struct kvm_cpuid_entry2 __user *entries)
21508 {
21509- int r;
21510+ int r, i;
21511
21512 vcpu_load(vcpu);
21513 r = -E2BIG;
21514 if (cpuid->nent < vcpu->arch.cpuid_nent)
21515 goto out;
21516 r = -EFAULT;
21517- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21518- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21519+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21520 goto out;
21521+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21522+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21523+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21524+ goto out;
21525+ }
21526 return 0;
21527
21528 out:
21529@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21530 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21531 struct kvm_interrupt *irq)
21532 {
21533- if (irq->irq < 0 || irq->irq >= 256)
21534+ if (irq->irq >= 256)
21535 return -EINVAL;
21536 if (irqchip_in_kernel(vcpu->kvm))
21537 return -ENXIO;
21538@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21539 .notifier_call = kvmclock_cpufreq_notifier
21540 };
21541
21542-int kvm_arch_init(void *opaque)
21543+int kvm_arch_init(const void *opaque)
21544 {
21545 int r, cpu;
21546- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21547+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21548
21549 if (kvm_x86_ops) {
21550 printk(KERN_ERR "kvm: already loaded the other module\n");
21551diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21552index 7e59dc1..b88c98f 100644
21553--- a/arch/x86/lguest/boot.c
21554+++ b/arch/x86/lguest/boot.c
21555@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21556 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21557 * Launcher to reboot us.
21558 */
21559-static void lguest_restart(char *reason)
21560+static __noreturn void lguest_restart(char *reason)
21561 {
21562 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21563+ BUG();
21564 }
21565
21566 /*G:050
21567diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21568index 824fa0b..c619e96 100644
21569--- a/arch/x86/lib/atomic64_32.c
21570+++ b/arch/x86/lib/atomic64_32.c
21571@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21572 }
21573 EXPORT_SYMBOL(atomic64_cmpxchg);
21574
21575+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21576+{
21577+ return cmpxchg8b(&ptr->counter, old_val, new_val);
21578+}
21579+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21580+
21581 /**
21582 * atomic64_xchg - xchg atomic64 variable
21583 * @ptr: pointer to type atomic64_t
21584@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21585 EXPORT_SYMBOL(atomic64_xchg);
21586
21587 /**
21588+ * atomic64_xchg_unchecked - xchg atomic64 variable
21589+ * @ptr: pointer to type atomic64_unchecked_t
21590+ * @new_val: value to assign
21591+ *
21592+ * Atomically xchgs the value of @ptr to @new_val and returns
21593+ * the old value.
21594+ */
21595+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21596+{
21597+ /*
21598+ * Try first with a (possibly incorrect) assumption about
21599+ * what we have there. We'll do two loops most likely,
21600+ * but we'll get an ownership MESI transaction straight away
21601+ * instead of a read transaction followed by a
21602+ * flush-for-ownership transaction:
21603+ */
21604+ u64 old_val, real_val = 0;
21605+
21606+ do {
21607+ old_val = real_val;
21608+
21609+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21610+
21611+ } while (real_val != old_val);
21612+
21613+ return old_val;
21614+}
21615+EXPORT_SYMBOL(atomic64_xchg_unchecked);
21616+
21617+/**
21618 * atomic64_set - set atomic64 variable
21619 * @ptr: pointer to type atomic64_t
21620 * @new_val: value to assign
21621@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21622 EXPORT_SYMBOL(atomic64_set);
21623
21624 /**
21625-EXPORT_SYMBOL(atomic64_read);
21626+ * atomic64_unchecked_set - set atomic64 variable
21627+ * @ptr: pointer to type atomic64_unchecked_t
21628+ * @new_val: value to assign
21629+ *
21630+ * Atomically sets the value of @ptr to @new_val.
21631+ */
21632+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21633+{
21634+ atomic64_xchg_unchecked(ptr, new_val);
21635+}
21636+EXPORT_SYMBOL(atomic64_set_unchecked);
21637+
21638+/**
21639 * atomic64_add_return - add and return
21640 * @delta: integer value to add
21641 * @ptr: pointer to type atomic64_t
21642@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21643 }
21644 EXPORT_SYMBOL(atomic64_add_return);
21645
21646+/**
21647+ * atomic64_add_return_unchecked - add and return
21648+ * @delta: integer value to add
21649+ * @ptr: pointer to type atomic64_unchecked_t
21650+ *
21651+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
21652+ */
21653+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21654+{
21655+ /*
21656+ * Try first with a (possibly incorrect) assumption about
21657+ * what we have there. We'll do two loops most likely,
21658+ * but we'll get an ownership MESI transaction straight away
21659+ * instead of a read transaction followed by a
21660+ * flush-for-ownership transaction:
21661+ */
21662+ u64 old_val, new_val, real_val = 0;
21663+
21664+ do {
21665+ old_val = real_val;
21666+ new_val = old_val + delta;
21667+
21668+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21669+
21670+ } while (real_val != old_val);
21671+
21672+ return new_val;
21673+}
21674+EXPORT_SYMBOL(atomic64_add_return_unchecked);
21675+
21676 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21677 {
21678 return atomic64_add_return(-delta, ptr);
21679 }
21680 EXPORT_SYMBOL(atomic64_sub_return);
21681
21682+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21683+{
21684+ return atomic64_add_return_unchecked(-delta, ptr);
21685+}
21686+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21687+
21688 u64 atomic64_inc_return(atomic64_t *ptr)
21689 {
21690 return atomic64_add_return(1, ptr);
21691 }
21692 EXPORT_SYMBOL(atomic64_inc_return);
21693
21694+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21695+{
21696+ return atomic64_add_return_unchecked(1, ptr);
21697+}
21698+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21699+
21700 u64 atomic64_dec_return(atomic64_t *ptr)
21701 {
21702 return atomic64_sub_return(1, ptr);
21703 }
21704 EXPORT_SYMBOL(atomic64_dec_return);
21705
21706+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21707+{
21708+ return atomic64_sub_return_unchecked(1, ptr);
21709+}
21710+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21711+
21712 /**
21713 * atomic64_add - add integer to atomic64 variable
21714 * @delta: integer value to add
21715@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21716 EXPORT_SYMBOL(atomic64_add);
21717
21718 /**
21719+ * atomic64_add_unchecked - add integer to atomic64 variable
21720+ * @delta: integer value to add
21721+ * @ptr: pointer to type atomic64_unchecked_t
21722+ *
21723+ * Atomically adds @delta to @ptr.
21724+ */
21725+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21726+{
21727+ atomic64_add_return_unchecked(delta, ptr);
21728+}
21729+EXPORT_SYMBOL(atomic64_add_unchecked);
21730+
21731+/**
21732 * atomic64_sub - subtract the atomic64 variable
21733 * @delta: integer value to subtract
21734 * @ptr: pointer to type atomic64_t
21735@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21736 EXPORT_SYMBOL(atomic64_sub);
21737
21738 /**
21739+ * atomic64_sub_unchecked - subtract the atomic64 variable
21740+ * @delta: integer value to subtract
21741+ * @ptr: pointer to type atomic64_unchecked_t
21742+ *
21743+ * Atomically subtracts @delta from @ptr.
21744+ */
21745+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21746+{
21747+ atomic64_add_unchecked(-delta, ptr);
21748+}
21749+EXPORT_SYMBOL(atomic64_sub_unchecked);
21750+
21751+/**
21752 * atomic64_sub_and_test - subtract value from variable and test result
21753 * @delta: integer value to subtract
21754 * @ptr: pointer to type atomic64_t
21755@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21756 EXPORT_SYMBOL(atomic64_inc);
21757
21758 /**
21759+ * atomic64_inc_unchecked - increment atomic64 variable
21760+ * @ptr: pointer to type atomic64_unchecked_t
21761+ *
21762+ * Atomically increments @ptr by 1.
21763+ */
21764+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21765+{
21766+ atomic64_add_unchecked(1, ptr);
21767+}
21768+EXPORT_SYMBOL(atomic64_inc_unchecked);
21769+
21770+/**
21771 * atomic64_dec - decrement atomic64 variable
21772 * @ptr: pointer to type atomic64_t
21773 *
21774@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21775 EXPORT_SYMBOL(atomic64_dec);
21776
21777 /**
21778+ * atomic64_dec_unchecked - decrement atomic64 variable
21779+ * @ptr: pointer to type atomic64_unchecked_t
21780+ *
21781+ * Atomically decrements @ptr by 1.
21782+ */
21783+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21784+{
21785+ atomic64_sub_unchecked(1, ptr);
21786+}
21787+EXPORT_SYMBOL(atomic64_dec_unchecked);
21788+
21789+/**
21790 * atomic64_dec_and_test - decrement and test
21791 * @ptr: pointer to type atomic64_t
21792 *
21793diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21794index adbccd0..98f96c8 100644
21795--- a/arch/x86/lib/checksum_32.S
21796+++ b/arch/x86/lib/checksum_32.S
21797@@ -28,7 +28,8 @@
21798 #include <linux/linkage.h>
21799 #include <asm/dwarf2.h>
21800 #include <asm/errno.h>
21801-
21802+#include <asm/segment.h>
21803+
21804 /*
21805 * computes a partial checksum, e.g. for TCP/UDP fragments
21806 */
21807@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21808
21809 #define ARGBASE 16
21810 #define FP 12
21811-
21812-ENTRY(csum_partial_copy_generic)
21813+
21814+ENTRY(csum_partial_copy_generic_to_user)
21815 CFI_STARTPROC
21816+
21817+#ifdef CONFIG_PAX_MEMORY_UDEREF
21818+ pushl %gs
21819+ CFI_ADJUST_CFA_OFFSET 4
21820+ popl %es
21821+ CFI_ADJUST_CFA_OFFSET -4
21822+ jmp csum_partial_copy_generic
21823+#endif
21824+
21825+ENTRY(csum_partial_copy_generic_from_user)
21826+
21827+#ifdef CONFIG_PAX_MEMORY_UDEREF
21828+ pushl %gs
21829+ CFI_ADJUST_CFA_OFFSET 4
21830+ popl %ds
21831+ CFI_ADJUST_CFA_OFFSET -4
21832+#endif
21833+
21834+ENTRY(csum_partial_copy_generic)
21835 subl $4,%esp
21836 CFI_ADJUST_CFA_OFFSET 4
21837 pushl %edi
21838@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21839 jmp 4f
21840 SRC(1: movw (%esi), %bx )
21841 addl $2, %esi
21842-DST( movw %bx, (%edi) )
21843+DST( movw %bx, %es:(%edi) )
21844 addl $2, %edi
21845 addw %bx, %ax
21846 adcl $0, %eax
21847@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21848 SRC(1: movl (%esi), %ebx )
21849 SRC( movl 4(%esi), %edx )
21850 adcl %ebx, %eax
21851-DST( movl %ebx, (%edi) )
21852+DST( movl %ebx, %es:(%edi) )
21853 adcl %edx, %eax
21854-DST( movl %edx, 4(%edi) )
21855+DST( movl %edx, %es:4(%edi) )
21856
21857 SRC( movl 8(%esi), %ebx )
21858 SRC( movl 12(%esi), %edx )
21859 adcl %ebx, %eax
21860-DST( movl %ebx, 8(%edi) )
21861+DST( movl %ebx, %es:8(%edi) )
21862 adcl %edx, %eax
21863-DST( movl %edx, 12(%edi) )
21864+DST( movl %edx, %es:12(%edi) )
21865
21866 SRC( movl 16(%esi), %ebx )
21867 SRC( movl 20(%esi), %edx )
21868 adcl %ebx, %eax
21869-DST( movl %ebx, 16(%edi) )
21870+DST( movl %ebx, %es:16(%edi) )
21871 adcl %edx, %eax
21872-DST( movl %edx, 20(%edi) )
21873+DST( movl %edx, %es:20(%edi) )
21874
21875 SRC( movl 24(%esi), %ebx )
21876 SRC( movl 28(%esi), %edx )
21877 adcl %ebx, %eax
21878-DST( movl %ebx, 24(%edi) )
21879+DST( movl %ebx, %es:24(%edi) )
21880 adcl %edx, %eax
21881-DST( movl %edx, 28(%edi) )
21882+DST( movl %edx, %es:28(%edi) )
21883
21884 lea 32(%esi), %esi
21885 lea 32(%edi), %edi
21886@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21887 shrl $2, %edx # This clears CF
21888 SRC(3: movl (%esi), %ebx )
21889 adcl %ebx, %eax
21890-DST( movl %ebx, (%edi) )
21891+DST( movl %ebx, %es:(%edi) )
21892 lea 4(%esi), %esi
21893 lea 4(%edi), %edi
21894 dec %edx
21895@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21896 jb 5f
21897 SRC( movw (%esi), %cx )
21898 leal 2(%esi), %esi
21899-DST( movw %cx, (%edi) )
21900+DST( movw %cx, %es:(%edi) )
21901 leal 2(%edi), %edi
21902 je 6f
21903 shll $16,%ecx
21904 SRC(5: movb (%esi), %cl )
21905-DST( movb %cl, (%edi) )
21906+DST( movb %cl, %es:(%edi) )
21907 6: addl %ecx, %eax
21908 adcl $0, %eax
21909 7:
21910@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21911
21912 6001:
21913 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21914- movl $-EFAULT, (%ebx)
21915+ movl $-EFAULT, %ss:(%ebx)
21916
21917 # zero the complete destination - computing the rest
21918 # is too much work
21919@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21920
21921 6002:
21922 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21923- movl $-EFAULT,(%ebx)
21924+ movl $-EFAULT,%ss:(%ebx)
21925 jmp 5000b
21926
21927 .previous
21928
21929+ pushl %ss
21930+ CFI_ADJUST_CFA_OFFSET 4
21931+ popl %ds
21932+ CFI_ADJUST_CFA_OFFSET -4
21933+ pushl %ss
21934+ CFI_ADJUST_CFA_OFFSET 4
21935+ popl %es
21936+ CFI_ADJUST_CFA_OFFSET -4
21937 popl %ebx
21938 CFI_ADJUST_CFA_OFFSET -4
21939 CFI_RESTORE ebx
21940@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21941 CFI_ADJUST_CFA_OFFSET -4
21942 ret
21943 CFI_ENDPROC
21944-ENDPROC(csum_partial_copy_generic)
21945+ENDPROC(csum_partial_copy_generic_to_user)
21946
21947 #else
21948
21949 /* Version for PentiumII/PPro */
21950
21951 #define ROUND1(x) \
21952+ nop; nop; nop; \
21953 SRC(movl x(%esi), %ebx ) ; \
21954 addl %ebx, %eax ; \
21955- DST(movl %ebx, x(%edi) ) ;
21956+ DST(movl %ebx, %es:x(%edi)) ;
21957
21958 #define ROUND(x) \
21959+ nop; nop; nop; \
21960 SRC(movl x(%esi), %ebx ) ; \
21961 adcl %ebx, %eax ; \
21962- DST(movl %ebx, x(%edi) ) ;
21963+ DST(movl %ebx, %es:x(%edi)) ;
21964
21965 #define ARGBASE 12
21966-
21967-ENTRY(csum_partial_copy_generic)
21968+
21969+ENTRY(csum_partial_copy_generic_to_user)
21970 CFI_STARTPROC
21971+
21972+#ifdef CONFIG_PAX_MEMORY_UDEREF
21973+ pushl %gs
21974+ CFI_ADJUST_CFA_OFFSET 4
21975+ popl %es
21976+ CFI_ADJUST_CFA_OFFSET -4
21977+ jmp csum_partial_copy_generic
21978+#endif
21979+
21980+ENTRY(csum_partial_copy_generic_from_user)
21981+
21982+#ifdef CONFIG_PAX_MEMORY_UDEREF
21983+ pushl %gs
21984+ CFI_ADJUST_CFA_OFFSET 4
21985+ popl %ds
21986+ CFI_ADJUST_CFA_OFFSET -4
21987+#endif
21988+
21989+ENTRY(csum_partial_copy_generic)
21990 pushl %ebx
21991 CFI_ADJUST_CFA_OFFSET 4
21992 CFI_REL_OFFSET ebx, 0
21993@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21994 subl %ebx, %edi
21995 lea -1(%esi),%edx
21996 andl $-32,%edx
21997- lea 3f(%ebx,%ebx), %ebx
21998+ lea 3f(%ebx,%ebx,2), %ebx
21999 testl %esi, %esi
22000 jmp *%ebx
22001 1: addl $64,%esi
22002@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
22003 jb 5f
22004 SRC( movw (%esi), %dx )
22005 leal 2(%esi), %esi
22006-DST( movw %dx, (%edi) )
22007+DST( movw %dx, %es:(%edi) )
22008 leal 2(%edi), %edi
22009 je 6f
22010 shll $16,%edx
22011 5:
22012 SRC( movb (%esi), %dl )
22013-DST( movb %dl, (%edi) )
22014+DST( movb %dl, %es:(%edi) )
22015 6: addl %edx, %eax
22016 adcl $0, %eax
22017 7:
22018 .section .fixup, "ax"
22019 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
22020- movl $-EFAULT, (%ebx)
22021+ movl $-EFAULT, %ss:(%ebx)
22022 # zero the complete destination (computing the rest is too much work)
22023 movl ARGBASE+8(%esp),%edi # dst
22024 movl ARGBASE+12(%esp),%ecx # len
22025@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
22026 rep; stosb
22027 jmp 7b
22028 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22029- movl $-EFAULT, (%ebx)
22030+ movl $-EFAULT, %ss:(%ebx)
22031 jmp 7b
22032 .previous
22033
22034+#ifdef CONFIG_PAX_MEMORY_UDEREF
22035+ pushl %ss
22036+ CFI_ADJUST_CFA_OFFSET 4
22037+ popl %ds
22038+ CFI_ADJUST_CFA_OFFSET -4
22039+ pushl %ss
22040+ CFI_ADJUST_CFA_OFFSET 4
22041+ popl %es
22042+ CFI_ADJUST_CFA_OFFSET -4
22043+#endif
22044+
22045 popl %esi
22046 CFI_ADJUST_CFA_OFFSET -4
22047 CFI_RESTORE esi
22048@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
22049 CFI_RESTORE ebx
22050 ret
22051 CFI_ENDPROC
22052-ENDPROC(csum_partial_copy_generic)
22053+ENDPROC(csum_partial_copy_generic_to_user)
22054
22055 #undef ROUND
22056 #undef ROUND1
22057diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
22058index ebeafcc..1e3a402 100644
22059--- a/arch/x86/lib/clear_page_64.S
22060+++ b/arch/x86/lib/clear_page_64.S
22061@@ -1,5 +1,6 @@
22062 #include <linux/linkage.h>
22063 #include <asm/dwarf2.h>
22064+#include <asm/alternative-asm.h>
22065
22066 /*
22067 * Zero a page.
22068@@ -10,6 +11,7 @@ ENTRY(clear_page_c)
22069 movl $4096/8,%ecx
22070 xorl %eax,%eax
22071 rep stosq
22072+ pax_force_retaddr
22073 ret
22074 CFI_ENDPROC
22075 ENDPROC(clear_page_c)
22076@@ -33,6 +35,7 @@ ENTRY(clear_page)
22077 leaq 64(%rdi),%rdi
22078 jnz .Lloop
22079 nop
22080+ pax_force_retaddr
22081 ret
22082 CFI_ENDPROC
22083 .Lclear_page_end:
22084@@ -43,7 +46,7 @@ ENDPROC(clear_page)
22085
22086 #include <asm/cpufeature.h>
22087
22088- .section .altinstr_replacement,"ax"
22089+ .section .altinstr_replacement,"a"
22090 1: .byte 0xeb /* jmp <disp8> */
22091 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
22092 2:
22093diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
22094index 727a5d4..333818a 100644
22095--- a/arch/x86/lib/copy_page_64.S
22096+++ b/arch/x86/lib/copy_page_64.S
22097@@ -2,12 +2,14 @@
22098
22099 #include <linux/linkage.h>
22100 #include <asm/dwarf2.h>
22101+#include <asm/alternative-asm.h>
22102
22103 ALIGN
22104 copy_page_c:
22105 CFI_STARTPROC
22106 movl $4096/8,%ecx
22107 rep movsq
22108+ pax_force_retaddr
22109 ret
22110 CFI_ENDPROC
22111 ENDPROC(copy_page_c)
22112@@ -38,7 +40,7 @@ ENTRY(copy_page)
22113 movq 16 (%rsi), %rdx
22114 movq 24 (%rsi), %r8
22115 movq 32 (%rsi), %r9
22116- movq 40 (%rsi), %r10
22117+ movq 40 (%rsi), %r13
22118 movq 48 (%rsi), %r11
22119 movq 56 (%rsi), %r12
22120
22121@@ -49,7 +51,7 @@ ENTRY(copy_page)
22122 movq %rdx, 16 (%rdi)
22123 movq %r8, 24 (%rdi)
22124 movq %r9, 32 (%rdi)
22125- movq %r10, 40 (%rdi)
22126+ movq %r13, 40 (%rdi)
22127 movq %r11, 48 (%rdi)
22128 movq %r12, 56 (%rdi)
22129
22130@@ -68,7 +70,7 @@ ENTRY(copy_page)
22131 movq 16 (%rsi), %rdx
22132 movq 24 (%rsi), %r8
22133 movq 32 (%rsi), %r9
22134- movq 40 (%rsi), %r10
22135+ movq 40 (%rsi), %r13
22136 movq 48 (%rsi), %r11
22137 movq 56 (%rsi), %r12
22138
22139@@ -77,7 +79,7 @@ ENTRY(copy_page)
22140 movq %rdx, 16 (%rdi)
22141 movq %r8, 24 (%rdi)
22142 movq %r9, 32 (%rdi)
22143- movq %r10, 40 (%rdi)
22144+ movq %r13, 40 (%rdi)
22145 movq %r11, 48 (%rdi)
22146 movq %r12, 56 (%rdi)
22147
22148@@ -94,6 +96,7 @@ ENTRY(copy_page)
22149 CFI_RESTORE r13
22150 addq $3*8,%rsp
22151 CFI_ADJUST_CFA_OFFSET -3*8
22152+ pax_force_retaddr
22153 ret
22154 .Lcopy_page_end:
22155 CFI_ENDPROC
22156@@ -104,7 +107,7 @@ ENDPROC(copy_page)
22157
22158 #include <asm/cpufeature.h>
22159
22160- .section .altinstr_replacement,"ax"
22161+ .section .altinstr_replacement,"a"
22162 1: .byte 0xeb /* jmp <disp8> */
22163 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
22164 2:
22165diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
22166index af8debd..40c75f3 100644
22167--- a/arch/x86/lib/copy_user_64.S
22168+++ b/arch/x86/lib/copy_user_64.S
22169@@ -15,13 +15,15 @@
22170 #include <asm/asm-offsets.h>
22171 #include <asm/thread_info.h>
22172 #include <asm/cpufeature.h>
22173+#include <asm/pgtable.h>
22174+#include <asm/alternative-asm.h>
22175
22176 .macro ALTERNATIVE_JUMP feature,orig,alt
22177 0:
22178 .byte 0xe9 /* 32bit jump */
22179 .long \orig-1f /* by default jump to orig */
22180 1:
22181- .section .altinstr_replacement,"ax"
22182+ .section .altinstr_replacement,"a"
22183 2: .byte 0xe9 /* near jump with 32bit immediate */
22184 .long \alt-1b /* offset */ /* or alternatively to alt */
22185 .previous
22186@@ -64,55 +66,26 @@
22187 #endif
22188 .endm
22189
22190-/* Standard copy_to_user with segment limit checking */
22191-ENTRY(copy_to_user)
22192- CFI_STARTPROC
22193- GET_THREAD_INFO(%rax)
22194- movq %rdi,%rcx
22195- addq %rdx,%rcx
22196- jc bad_to_user
22197- cmpq TI_addr_limit(%rax),%rcx
22198- ja bad_to_user
22199- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22200- CFI_ENDPROC
22201-ENDPROC(copy_to_user)
22202-
22203-/* Standard copy_from_user with segment limit checking */
22204-ENTRY(copy_from_user)
22205- CFI_STARTPROC
22206- GET_THREAD_INFO(%rax)
22207- movq %rsi,%rcx
22208- addq %rdx,%rcx
22209- jc bad_from_user
22210- cmpq TI_addr_limit(%rax),%rcx
22211- ja bad_from_user
22212- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22213- CFI_ENDPROC
22214-ENDPROC(copy_from_user)
22215-
22216 ENTRY(copy_user_generic)
22217 CFI_STARTPROC
22218 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22219 CFI_ENDPROC
22220 ENDPROC(copy_user_generic)
22221
22222-ENTRY(__copy_from_user_inatomic)
22223- CFI_STARTPROC
22224- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22225- CFI_ENDPROC
22226-ENDPROC(__copy_from_user_inatomic)
22227-
22228 .section .fixup,"ax"
22229 /* must zero dest */
22230 ENTRY(bad_from_user)
22231 bad_from_user:
22232 CFI_STARTPROC
22233+ testl %edx,%edx
22234+ js bad_to_user
22235 movl %edx,%ecx
22236 xorl %eax,%eax
22237 rep
22238 stosb
22239 bad_to_user:
22240 movl %edx,%eax
22241+ pax_force_retaddr
22242 ret
22243 CFI_ENDPROC
22244 ENDPROC(bad_from_user)
22245@@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22246 jz 17f
22247 1: movq (%rsi),%r8
22248 2: movq 1*8(%rsi),%r9
22249-3: movq 2*8(%rsi),%r10
22250+3: movq 2*8(%rsi),%rax
22251 4: movq 3*8(%rsi),%r11
22252 5: movq %r8,(%rdi)
22253 6: movq %r9,1*8(%rdi)
22254-7: movq %r10,2*8(%rdi)
22255+7: movq %rax,2*8(%rdi)
22256 8: movq %r11,3*8(%rdi)
22257 9: movq 4*8(%rsi),%r8
22258 10: movq 5*8(%rsi),%r9
22259-11: movq 6*8(%rsi),%r10
22260+11: movq 6*8(%rsi),%rax
22261 12: movq 7*8(%rsi),%r11
22262 13: movq %r8,4*8(%rdi)
22263 14: movq %r9,5*8(%rdi)
22264-15: movq %r10,6*8(%rdi)
22265+15: movq %rax,6*8(%rdi)
22266 16: movq %r11,7*8(%rdi)
22267 leaq 64(%rsi),%rsi
22268 leaq 64(%rdi),%rdi
22269@@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22270 decl %ecx
22271 jnz 21b
22272 23: xor %eax,%eax
22273+ pax_force_retaddr
22274 ret
22275
22276 .section .fixup,"ax"
22277@@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22278 3: rep
22279 movsb
22280 4: xorl %eax,%eax
22281+ pax_force_retaddr
22282 ret
22283
22284 .section .fixup,"ax"
22285diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22286index cb0c112..e3a6895 100644
22287--- a/arch/x86/lib/copy_user_nocache_64.S
22288+++ b/arch/x86/lib/copy_user_nocache_64.S
22289@@ -8,12 +8,14 @@
22290
22291 #include <linux/linkage.h>
22292 #include <asm/dwarf2.h>
22293+#include <asm/alternative-asm.h>
22294
22295 #define FIX_ALIGNMENT 1
22296
22297 #include <asm/current.h>
22298 #include <asm/asm-offsets.h>
22299 #include <asm/thread_info.h>
22300+#include <asm/pgtable.h>
22301
22302 .macro ALIGN_DESTINATION
22303 #ifdef FIX_ALIGNMENT
22304@@ -50,6 +52,15 @@
22305 */
22306 ENTRY(__copy_user_nocache)
22307 CFI_STARTPROC
22308+
22309+#ifdef CONFIG_PAX_MEMORY_UDEREF
22310+ mov $PAX_USER_SHADOW_BASE,%rcx
22311+ cmp %rcx,%rsi
22312+ jae 1f
22313+ add %rcx,%rsi
22314+1:
22315+#endif
22316+
22317 cmpl $8,%edx
22318 jb 20f /* less then 8 bytes, go to byte copy loop */
22319 ALIGN_DESTINATION
22320@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22321 jz 17f
22322 1: movq (%rsi),%r8
22323 2: movq 1*8(%rsi),%r9
22324-3: movq 2*8(%rsi),%r10
22325+3: movq 2*8(%rsi),%rax
22326 4: movq 3*8(%rsi),%r11
22327 5: movnti %r8,(%rdi)
22328 6: movnti %r9,1*8(%rdi)
22329-7: movnti %r10,2*8(%rdi)
22330+7: movnti %rax,2*8(%rdi)
22331 8: movnti %r11,3*8(%rdi)
22332 9: movq 4*8(%rsi),%r8
22333 10: movq 5*8(%rsi),%r9
22334-11: movq 6*8(%rsi),%r10
22335+11: movq 6*8(%rsi),%rax
22336 12: movq 7*8(%rsi),%r11
22337 13: movnti %r8,4*8(%rdi)
22338 14: movnti %r9,5*8(%rdi)
22339-15: movnti %r10,6*8(%rdi)
22340+15: movnti %rax,6*8(%rdi)
22341 16: movnti %r11,7*8(%rdi)
22342 leaq 64(%rsi),%rsi
22343 leaq 64(%rdi),%rdi
22344@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22345 jnz 21b
22346 23: xorl %eax,%eax
22347 sfence
22348+ pax_force_retaddr
22349 ret
22350
22351 .section .fixup,"ax"
22352diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22353index f0dba36..48cb4d6 100644
22354--- a/arch/x86/lib/csum-copy_64.S
22355+++ b/arch/x86/lib/csum-copy_64.S
22356@@ -8,6 +8,7 @@
22357 #include <linux/linkage.h>
22358 #include <asm/dwarf2.h>
22359 #include <asm/errno.h>
22360+#include <asm/alternative-asm.h>
22361
22362 /*
22363 * Checksum copy with exception handling.
22364@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22365 CFI_RESTORE rbp
22366 addq $7*8,%rsp
22367 CFI_ADJUST_CFA_OFFSET -7*8
22368+ pax_force_retaddr 0, 1
22369 ret
22370 CFI_RESTORE_STATE
22371
22372diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22373index 459b58a..9570bc7 100644
22374--- a/arch/x86/lib/csum-wrappers_64.c
22375+++ b/arch/x86/lib/csum-wrappers_64.c
22376@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22377 len -= 2;
22378 }
22379 }
22380- isum = csum_partial_copy_generic((__force const void *)src,
22381+
22382+#ifdef CONFIG_PAX_MEMORY_UDEREF
22383+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22384+ src += PAX_USER_SHADOW_BASE;
22385+#endif
22386+
22387+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
22388 dst, len, isum, errp, NULL);
22389 if (unlikely(*errp))
22390 goto out_err;
22391@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22392 }
22393
22394 *errp = 0;
22395- return csum_partial_copy_generic(src, (void __force *)dst,
22396+
22397+#ifdef CONFIG_PAX_MEMORY_UDEREF
22398+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22399+ dst += PAX_USER_SHADOW_BASE;
22400+#endif
22401+
22402+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22403 len, isum, NULL, errp);
22404 }
22405 EXPORT_SYMBOL(csum_partial_copy_to_user);
22406diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22407index 51f1504..ddac4c1 100644
22408--- a/arch/x86/lib/getuser.S
22409+++ b/arch/x86/lib/getuser.S
22410@@ -33,15 +33,38 @@
22411 #include <asm/asm-offsets.h>
22412 #include <asm/thread_info.h>
22413 #include <asm/asm.h>
22414+#include <asm/segment.h>
22415+#include <asm/pgtable.h>
22416+#include <asm/alternative-asm.h>
22417+
22418+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22419+#define __copyuser_seg gs;
22420+#else
22421+#define __copyuser_seg
22422+#endif
22423
22424 .text
22425 ENTRY(__get_user_1)
22426 CFI_STARTPROC
22427+
22428+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22429 GET_THREAD_INFO(%_ASM_DX)
22430 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22431 jae bad_get_user
22432-1: movzb (%_ASM_AX),%edx
22433+
22434+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22435+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22436+ cmp %_ASM_DX,%_ASM_AX
22437+ jae 1234f
22438+ add %_ASM_DX,%_ASM_AX
22439+1234:
22440+#endif
22441+
22442+#endif
22443+
22444+1: __copyuser_seg movzb (%_ASM_AX),%edx
22445 xor %eax,%eax
22446+ pax_force_retaddr
22447 ret
22448 CFI_ENDPROC
22449 ENDPROC(__get_user_1)
22450@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22451 ENTRY(__get_user_2)
22452 CFI_STARTPROC
22453 add $1,%_ASM_AX
22454+
22455+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22456 jc bad_get_user
22457 GET_THREAD_INFO(%_ASM_DX)
22458 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22459 jae bad_get_user
22460-2: movzwl -1(%_ASM_AX),%edx
22461+
22462+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22463+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22464+ cmp %_ASM_DX,%_ASM_AX
22465+ jae 1234f
22466+ add %_ASM_DX,%_ASM_AX
22467+1234:
22468+#endif
22469+
22470+#endif
22471+
22472+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22473 xor %eax,%eax
22474+ pax_force_retaddr
22475 ret
22476 CFI_ENDPROC
22477 ENDPROC(__get_user_2)
22478@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22479 ENTRY(__get_user_4)
22480 CFI_STARTPROC
22481 add $3,%_ASM_AX
22482+
22483+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22484 jc bad_get_user
22485 GET_THREAD_INFO(%_ASM_DX)
22486 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22487 jae bad_get_user
22488-3: mov -3(%_ASM_AX),%edx
22489+
22490+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22491+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22492+ cmp %_ASM_DX,%_ASM_AX
22493+ jae 1234f
22494+ add %_ASM_DX,%_ASM_AX
22495+1234:
22496+#endif
22497+
22498+#endif
22499+
22500+3: __copyuser_seg mov -3(%_ASM_AX),%edx
22501 xor %eax,%eax
22502+ pax_force_retaddr
22503 ret
22504 CFI_ENDPROC
22505 ENDPROC(__get_user_4)
22506@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22507 GET_THREAD_INFO(%_ASM_DX)
22508 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22509 jae bad_get_user
22510+
22511+#ifdef CONFIG_PAX_MEMORY_UDEREF
22512+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22513+ cmp %_ASM_DX,%_ASM_AX
22514+ jae 1234f
22515+ add %_ASM_DX,%_ASM_AX
22516+1234:
22517+#endif
22518+
22519 4: movq -7(%_ASM_AX),%_ASM_DX
22520 xor %eax,%eax
22521+ pax_force_retaddr
22522 ret
22523 CFI_ENDPROC
22524 ENDPROC(__get_user_8)
22525@@ -91,6 +152,7 @@ bad_get_user:
22526 CFI_STARTPROC
22527 xor %edx,%edx
22528 mov $(-EFAULT),%_ASM_AX
22529+ pax_force_retaddr
22530 ret
22531 CFI_ENDPROC
22532 END(bad_get_user)
22533diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22534index 05a95e7..326f2fa 100644
22535--- a/arch/x86/lib/iomap_copy_64.S
22536+++ b/arch/x86/lib/iomap_copy_64.S
22537@@ -17,6 +17,7 @@
22538
22539 #include <linux/linkage.h>
22540 #include <asm/dwarf2.h>
22541+#include <asm/alternative-asm.h>
22542
22543 /*
22544 * override generic version in lib/iomap_copy.c
22545@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22546 CFI_STARTPROC
22547 movl %edx,%ecx
22548 rep movsd
22549+ pax_force_retaddr
22550 ret
22551 CFI_ENDPROC
22552 ENDPROC(__iowrite32_copy)
22553diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22554index ad5441e..610e351 100644
22555--- a/arch/x86/lib/memcpy_64.S
22556+++ b/arch/x86/lib/memcpy_64.S
22557@@ -4,6 +4,7 @@
22558
22559 #include <asm/cpufeature.h>
22560 #include <asm/dwarf2.h>
22561+#include <asm/alternative-asm.h>
22562
22563 /*
22564 * memcpy - Copy a memory block.
22565@@ -34,6 +35,7 @@ memcpy_c:
22566 rep movsq
22567 movl %edx, %ecx
22568 rep movsb
22569+ pax_force_retaddr
22570 ret
22571 CFI_ENDPROC
22572 ENDPROC(memcpy_c)
22573@@ -118,6 +120,7 @@ ENTRY(memcpy)
22574 jnz .Lloop_1
22575
22576 .Lend:
22577+ pax_force_retaddr 0, 1
22578 ret
22579 CFI_ENDPROC
22580 ENDPROC(memcpy)
22581@@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22582 * It is also a lot simpler. Use this when possible:
22583 */
22584
22585- .section .altinstr_replacement, "ax"
22586+ .section .altinstr_replacement, "a"
22587 1: .byte 0xeb /* jmp <disp8> */
22588 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22589 2:
22590diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22591index 2c59481..7e9ba4e 100644
22592--- a/arch/x86/lib/memset_64.S
22593+++ b/arch/x86/lib/memset_64.S
22594@@ -2,6 +2,7 @@
22595
22596 #include <linux/linkage.h>
22597 #include <asm/dwarf2.h>
22598+#include <asm/alternative-asm.h>
22599
22600 /*
22601 * ISO C memset - set a memory block to a byte value.
22602@@ -28,6 +29,7 @@ memset_c:
22603 movl %r8d,%ecx
22604 rep stosb
22605 movq %r9,%rax
22606+ pax_force_retaddr
22607 ret
22608 CFI_ENDPROC
22609 ENDPROC(memset_c)
22610@@ -35,13 +37,13 @@ ENDPROC(memset_c)
22611 ENTRY(memset)
22612 ENTRY(__memset)
22613 CFI_STARTPROC
22614- movq %rdi,%r10
22615 movq %rdx,%r11
22616
22617 /* expand byte value */
22618 movzbl %sil,%ecx
22619 movabs $0x0101010101010101,%rax
22620 mul %rcx /* with rax, clobbers rdx */
22621+ movq %rdi,%rdx
22622
22623 /* align dst */
22624 movl %edi,%r9d
22625@@ -95,7 +97,8 @@ ENTRY(__memset)
22626 jnz .Lloop_1
22627
22628 .Lende:
22629- movq %r10,%rax
22630+ movq %rdx,%rax
22631+ pax_force_retaddr
22632 ret
22633
22634 CFI_RESTORE_STATE
22635@@ -118,7 +121,7 @@ ENDPROC(__memset)
22636
22637 #include <asm/cpufeature.h>
22638
22639- .section .altinstr_replacement,"ax"
22640+ .section .altinstr_replacement,"a"
22641 1: .byte 0xeb /* jmp <disp8> */
22642 .byte (memset_c - memset) - (2f - 1b) /* offset */
22643 2:
22644diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22645index c9f2d9b..e7fd2c0 100644
22646--- a/arch/x86/lib/mmx_32.c
22647+++ b/arch/x86/lib/mmx_32.c
22648@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22649 {
22650 void *p;
22651 int i;
22652+ unsigned long cr0;
22653
22654 if (unlikely(in_interrupt()))
22655 return __memcpy(to, from, len);
22656@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22657 kernel_fpu_begin();
22658
22659 __asm__ __volatile__ (
22660- "1: prefetch (%0)\n" /* This set is 28 bytes */
22661- " prefetch 64(%0)\n"
22662- " prefetch 128(%0)\n"
22663- " prefetch 192(%0)\n"
22664- " prefetch 256(%0)\n"
22665+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22666+ " prefetch 64(%1)\n"
22667+ " prefetch 128(%1)\n"
22668+ " prefetch 192(%1)\n"
22669+ " prefetch 256(%1)\n"
22670 "2: \n"
22671 ".section .fixup, \"ax\"\n"
22672- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22673+ "3: \n"
22674+
22675+#ifdef CONFIG_PAX_KERNEXEC
22676+ " movl %%cr0, %0\n"
22677+ " movl %0, %%eax\n"
22678+ " andl $0xFFFEFFFF, %%eax\n"
22679+ " movl %%eax, %%cr0\n"
22680+#endif
22681+
22682+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22683+
22684+#ifdef CONFIG_PAX_KERNEXEC
22685+ " movl %0, %%cr0\n"
22686+#endif
22687+
22688 " jmp 2b\n"
22689 ".previous\n"
22690 _ASM_EXTABLE(1b, 3b)
22691- : : "r" (from));
22692+ : "=&r" (cr0) : "r" (from) : "ax");
22693
22694 for ( ; i > 5; i--) {
22695 __asm__ __volatile__ (
22696- "1: prefetch 320(%0)\n"
22697- "2: movq (%0), %%mm0\n"
22698- " movq 8(%0), %%mm1\n"
22699- " movq 16(%0), %%mm2\n"
22700- " movq 24(%0), %%mm3\n"
22701- " movq %%mm0, (%1)\n"
22702- " movq %%mm1, 8(%1)\n"
22703- " movq %%mm2, 16(%1)\n"
22704- " movq %%mm3, 24(%1)\n"
22705- " movq 32(%0), %%mm0\n"
22706- " movq 40(%0), %%mm1\n"
22707- " movq 48(%0), %%mm2\n"
22708- " movq 56(%0), %%mm3\n"
22709- " movq %%mm0, 32(%1)\n"
22710- " movq %%mm1, 40(%1)\n"
22711- " movq %%mm2, 48(%1)\n"
22712- " movq %%mm3, 56(%1)\n"
22713+ "1: prefetch 320(%1)\n"
22714+ "2: movq (%1), %%mm0\n"
22715+ " movq 8(%1), %%mm1\n"
22716+ " movq 16(%1), %%mm2\n"
22717+ " movq 24(%1), %%mm3\n"
22718+ " movq %%mm0, (%2)\n"
22719+ " movq %%mm1, 8(%2)\n"
22720+ " movq %%mm2, 16(%2)\n"
22721+ " movq %%mm3, 24(%2)\n"
22722+ " movq 32(%1), %%mm0\n"
22723+ " movq 40(%1), %%mm1\n"
22724+ " movq 48(%1), %%mm2\n"
22725+ " movq 56(%1), %%mm3\n"
22726+ " movq %%mm0, 32(%2)\n"
22727+ " movq %%mm1, 40(%2)\n"
22728+ " movq %%mm2, 48(%2)\n"
22729+ " movq %%mm3, 56(%2)\n"
22730 ".section .fixup, \"ax\"\n"
22731- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22732+ "3:\n"
22733+
22734+#ifdef CONFIG_PAX_KERNEXEC
22735+ " movl %%cr0, %0\n"
22736+ " movl %0, %%eax\n"
22737+ " andl $0xFFFEFFFF, %%eax\n"
22738+ " movl %%eax, %%cr0\n"
22739+#endif
22740+
22741+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22742+
22743+#ifdef CONFIG_PAX_KERNEXEC
22744+ " movl %0, %%cr0\n"
22745+#endif
22746+
22747 " jmp 2b\n"
22748 ".previous\n"
22749 _ASM_EXTABLE(1b, 3b)
22750- : : "r" (from), "r" (to) : "memory");
22751+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22752
22753 from += 64;
22754 to += 64;
22755@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22756 static void fast_copy_page(void *to, void *from)
22757 {
22758 int i;
22759+ unsigned long cr0;
22760
22761 kernel_fpu_begin();
22762
22763@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22764 * but that is for later. -AV
22765 */
22766 __asm__ __volatile__(
22767- "1: prefetch (%0)\n"
22768- " prefetch 64(%0)\n"
22769- " prefetch 128(%0)\n"
22770- " prefetch 192(%0)\n"
22771- " prefetch 256(%0)\n"
22772+ "1: prefetch (%1)\n"
22773+ " prefetch 64(%1)\n"
22774+ " prefetch 128(%1)\n"
22775+ " prefetch 192(%1)\n"
22776+ " prefetch 256(%1)\n"
22777 "2: \n"
22778 ".section .fixup, \"ax\"\n"
22779- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22780+ "3: \n"
22781+
22782+#ifdef CONFIG_PAX_KERNEXEC
22783+ " movl %%cr0, %0\n"
22784+ " movl %0, %%eax\n"
22785+ " andl $0xFFFEFFFF, %%eax\n"
22786+ " movl %%eax, %%cr0\n"
22787+#endif
22788+
22789+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22790+
22791+#ifdef CONFIG_PAX_KERNEXEC
22792+ " movl %0, %%cr0\n"
22793+#endif
22794+
22795 " jmp 2b\n"
22796 ".previous\n"
22797- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22798+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22799
22800 for (i = 0; i < (4096-320)/64; i++) {
22801 __asm__ __volatile__ (
22802- "1: prefetch 320(%0)\n"
22803- "2: movq (%0), %%mm0\n"
22804- " movntq %%mm0, (%1)\n"
22805- " movq 8(%0), %%mm1\n"
22806- " movntq %%mm1, 8(%1)\n"
22807- " movq 16(%0), %%mm2\n"
22808- " movntq %%mm2, 16(%1)\n"
22809- " movq 24(%0), %%mm3\n"
22810- " movntq %%mm3, 24(%1)\n"
22811- " movq 32(%0), %%mm4\n"
22812- " movntq %%mm4, 32(%1)\n"
22813- " movq 40(%0), %%mm5\n"
22814- " movntq %%mm5, 40(%1)\n"
22815- " movq 48(%0), %%mm6\n"
22816- " movntq %%mm6, 48(%1)\n"
22817- " movq 56(%0), %%mm7\n"
22818- " movntq %%mm7, 56(%1)\n"
22819+ "1: prefetch 320(%1)\n"
22820+ "2: movq (%1), %%mm0\n"
22821+ " movntq %%mm0, (%2)\n"
22822+ " movq 8(%1), %%mm1\n"
22823+ " movntq %%mm1, 8(%2)\n"
22824+ " movq 16(%1), %%mm2\n"
22825+ " movntq %%mm2, 16(%2)\n"
22826+ " movq 24(%1), %%mm3\n"
22827+ " movntq %%mm3, 24(%2)\n"
22828+ " movq 32(%1), %%mm4\n"
22829+ " movntq %%mm4, 32(%2)\n"
22830+ " movq 40(%1), %%mm5\n"
22831+ " movntq %%mm5, 40(%2)\n"
22832+ " movq 48(%1), %%mm6\n"
22833+ " movntq %%mm6, 48(%2)\n"
22834+ " movq 56(%1), %%mm7\n"
22835+ " movntq %%mm7, 56(%2)\n"
22836 ".section .fixup, \"ax\"\n"
22837- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22838+ "3:\n"
22839+
22840+#ifdef CONFIG_PAX_KERNEXEC
22841+ " movl %%cr0, %0\n"
22842+ " movl %0, %%eax\n"
22843+ " andl $0xFFFEFFFF, %%eax\n"
22844+ " movl %%eax, %%cr0\n"
22845+#endif
22846+
22847+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22848+
22849+#ifdef CONFIG_PAX_KERNEXEC
22850+ " movl %0, %%cr0\n"
22851+#endif
22852+
22853 " jmp 2b\n"
22854 ".previous\n"
22855- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22856+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22857
22858 from += 64;
22859 to += 64;
22860@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22861 static void fast_copy_page(void *to, void *from)
22862 {
22863 int i;
22864+ unsigned long cr0;
22865
22866 kernel_fpu_begin();
22867
22868 __asm__ __volatile__ (
22869- "1: prefetch (%0)\n"
22870- " prefetch 64(%0)\n"
22871- " prefetch 128(%0)\n"
22872- " prefetch 192(%0)\n"
22873- " prefetch 256(%0)\n"
22874+ "1: prefetch (%1)\n"
22875+ " prefetch 64(%1)\n"
22876+ " prefetch 128(%1)\n"
22877+ " prefetch 192(%1)\n"
22878+ " prefetch 256(%1)\n"
22879 "2: \n"
22880 ".section .fixup, \"ax\"\n"
22881- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22882+ "3: \n"
22883+
22884+#ifdef CONFIG_PAX_KERNEXEC
22885+ " movl %%cr0, %0\n"
22886+ " movl %0, %%eax\n"
22887+ " andl $0xFFFEFFFF, %%eax\n"
22888+ " movl %%eax, %%cr0\n"
22889+#endif
22890+
22891+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22892+
22893+#ifdef CONFIG_PAX_KERNEXEC
22894+ " movl %0, %%cr0\n"
22895+#endif
22896+
22897 " jmp 2b\n"
22898 ".previous\n"
22899- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22900+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22901
22902 for (i = 0; i < 4096/64; i++) {
22903 __asm__ __volatile__ (
22904- "1: prefetch 320(%0)\n"
22905- "2: movq (%0), %%mm0\n"
22906- " movq 8(%0), %%mm1\n"
22907- " movq 16(%0), %%mm2\n"
22908- " movq 24(%0), %%mm3\n"
22909- " movq %%mm0, (%1)\n"
22910- " movq %%mm1, 8(%1)\n"
22911- " movq %%mm2, 16(%1)\n"
22912- " movq %%mm3, 24(%1)\n"
22913- " movq 32(%0), %%mm0\n"
22914- " movq 40(%0), %%mm1\n"
22915- " movq 48(%0), %%mm2\n"
22916- " movq 56(%0), %%mm3\n"
22917- " movq %%mm0, 32(%1)\n"
22918- " movq %%mm1, 40(%1)\n"
22919- " movq %%mm2, 48(%1)\n"
22920- " movq %%mm3, 56(%1)\n"
22921+ "1: prefetch 320(%1)\n"
22922+ "2: movq (%1), %%mm0\n"
22923+ " movq 8(%1), %%mm1\n"
22924+ " movq 16(%1), %%mm2\n"
22925+ " movq 24(%1), %%mm3\n"
22926+ " movq %%mm0, (%2)\n"
22927+ " movq %%mm1, 8(%2)\n"
22928+ " movq %%mm2, 16(%2)\n"
22929+ " movq %%mm3, 24(%2)\n"
22930+ " movq 32(%1), %%mm0\n"
22931+ " movq 40(%1), %%mm1\n"
22932+ " movq 48(%1), %%mm2\n"
22933+ " movq 56(%1), %%mm3\n"
22934+ " movq %%mm0, 32(%2)\n"
22935+ " movq %%mm1, 40(%2)\n"
22936+ " movq %%mm2, 48(%2)\n"
22937+ " movq %%mm3, 56(%2)\n"
22938 ".section .fixup, \"ax\"\n"
22939- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22940+ "3:\n"
22941+
22942+#ifdef CONFIG_PAX_KERNEXEC
22943+ " movl %%cr0, %0\n"
22944+ " movl %0, %%eax\n"
22945+ " andl $0xFFFEFFFF, %%eax\n"
22946+ " movl %%eax, %%cr0\n"
22947+#endif
22948+
22949+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22950+
22951+#ifdef CONFIG_PAX_KERNEXEC
22952+ " movl %0, %%cr0\n"
22953+#endif
22954+
22955 " jmp 2b\n"
22956 ".previous\n"
22957 _ASM_EXTABLE(1b, 3b)
22958- : : "r" (from), "r" (to) : "memory");
22959+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22960
22961 from += 64;
22962 to += 64;
22963diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22964index 69fa106..adda88b 100644
22965--- a/arch/x86/lib/msr-reg.S
22966+++ b/arch/x86/lib/msr-reg.S
22967@@ -3,6 +3,7 @@
22968 #include <asm/dwarf2.h>
22969 #include <asm/asm.h>
22970 #include <asm/msr.h>
22971+#include <asm/alternative-asm.h>
22972
22973 #ifdef CONFIG_X86_64
22974 /*
22975@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22976 CFI_STARTPROC
22977 pushq_cfi %rbx
22978 pushq_cfi %rbp
22979- movq %rdi, %r10 /* Save pointer */
22980+ movq %rdi, %r9 /* Save pointer */
22981 xorl %r11d, %r11d /* Return value */
22982 movl (%rdi), %eax
22983 movl 4(%rdi), %ecx
22984@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22985 movl 28(%rdi), %edi
22986 CFI_REMEMBER_STATE
22987 1: \op
22988-2: movl %eax, (%r10)
22989+2: movl %eax, (%r9)
22990 movl %r11d, %eax /* Return value */
22991- movl %ecx, 4(%r10)
22992- movl %edx, 8(%r10)
22993- movl %ebx, 12(%r10)
22994- movl %ebp, 20(%r10)
22995- movl %esi, 24(%r10)
22996- movl %edi, 28(%r10)
22997+ movl %ecx, 4(%r9)
22998+ movl %edx, 8(%r9)
22999+ movl %ebx, 12(%r9)
23000+ movl %ebp, 20(%r9)
23001+ movl %esi, 24(%r9)
23002+ movl %edi, 28(%r9)
23003 popq_cfi %rbp
23004 popq_cfi %rbx
23005+ pax_force_retaddr
23006 ret
23007 3:
23008 CFI_RESTORE_STATE
23009diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
23010index 36b0d15..d381858 100644
23011--- a/arch/x86/lib/putuser.S
23012+++ b/arch/x86/lib/putuser.S
23013@@ -15,7 +15,9 @@
23014 #include <asm/thread_info.h>
23015 #include <asm/errno.h>
23016 #include <asm/asm.h>
23017-
23018+#include <asm/segment.h>
23019+#include <asm/pgtable.h>
23020+#include <asm/alternative-asm.h>
23021
23022 /*
23023 * __put_user_X
23024@@ -29,52 +31,119 @@
23025 * as they get called from within inline assembly.
23026 */
23027
23028-#define ENTER CFI_STARTPROC ; \
23029- GET_THREAD_INFO(%_ASM_BX)
23030-#define EXIT ret ; \
23031+#define ENTER CFI_STARTPROC
23032+#define EXIT pax_force_retaddr; ret ; \
23033 CFI_ENDPROC
23034
23035+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23036+#define _DEST %_ASM_CX,%_ASM_BX
23037+#else
23038+#define _DEST %_ASM_CX
23039+#endif
23040+
23041+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23042+#define __copyuser_seg gs;
23043+#else
23044+#define __copyuser_seg
23045+#endif
23046+
23047 .text
23048 ENTRY(__put_user_1)
23049 ENTER
23050+
23051+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23052+ GET_THREAD_INFO(%_ASM_BX)
23053 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
23054 jae bad_put_user
23055-1: movb %al,(%_ASM_CX)
23056+
23057+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23058+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23059+ cmp %_ASM_BX,%_ASM_CX
23060+ jb 1234f
23061+ xor %ebx,%ebx
23062+1234:
23063+#endif
23064+
23065+#endif
23066+
23067+1: __copyuser_seg movb %al,(_DEST)
23068 xor %eax,%eax
23069 EXIT
23070 ENDPROC(__put_user_1)
23071
23072 ENTRY(__put_user_2)
23073 ENTER
23074+
23075+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23076+ GET_THREAD_INFO(%_ASM_BX)
23077 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23078 sub $1,%_ASM_BX
23079 cmp %_ASM_BX,%_ASM_CX
23080 jae bad_put_user
23081-2: movw %ax,(%_ASM_CX)
23082+
23083+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23084+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23085+ cmp %_ASM_BX,%_ASM_CX
23086+ jb 1234f
23087+ xor %ebx,%ebx
23088+1234:
23089+#endif
23090+
23091+#endif
23092+
23093+2: __copyuser_seg movw %ax,(_DEST)
23094 xor %eax,%eax
23095 EXIT
23096 ENDPROC(__put_user_2)
23097
23098 ENTRY(__put_user_4)
23099 ENTER
23100+
23101+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23102+ GET_THREAD_INFO(%_ASM_BX)
23103 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23104 sub $3,%_ASM_BX
23105 cmp %_ASM_BX,%_ASM_CX
23106 jae bad_put_user
23107-3: movl %eax,(%_ASM_CX)
23108+
23109+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23110+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23111+ cmp %_ASM_BX,%_ASM_CX
23112+ jb 1234f
23113+ xor %ebx,%ebx
23114+1234:
23115+#endif
23116+
23117+#endif
23118+
23119+3: __copyuser_seg movl %eax,(_DEST)
23120 xor %eax,%eax
23121 EXIT
23122 ENDPROC(__put_user_4)
23123
23124 ENTRY(__put_user_8)
23125 ENTER
23126+
23127+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23128+ GET_THREAD_INFO(%_ASM_BX)
23129 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23130 sub $7,%_ASM_BX
23131 cmp %_ASM_BX,%_ASM_CX
23132 jae bad_put_user
23133-4: mov %_ASM_AX,(%_ASM_CX)
23134+
23135+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23136+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23137+ cmp %_ASM_BX,%_ASM_CX
23138+ jb 1234f
23139+ xor %ebx,%ebx
23140+1234:
23141+#endif
23142+
23143+#endif
23144+
23145+4: __copyuser_seg mov %_ASM_AX,(_DEST)
23146 #ifdef CONFIG_X86_32
23147-5: movl %edx,4(%_ASM_CX)
23148+5: __copyuser_seg movl %edx,4(_DEST)
23149 #endif
23150 xor %eax,%eax
23151 EXIT
23152diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
23153index 05ea55f..6345b9a 100644
23154--- a/arch/x86/lib/rwlock_64.S
23155+++ b/arch/x86/lib/rwlock_64.S
23156@@ -2,6 +2,7 @@
23157
23158 #include <linux/linkage.h>
23159 #include <asm/rwlock.h>
23160+#include <asm/asm.h>
23161 #include <asm/alternative-asm.h>
23162 #include <asm/dwarf2.h>
23163
23164@@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
23165 CFI_STARTPROC
23166 LOCK_PREFIX
23167 addl $RW_LOCK_BIAS,(%rdi)
23168+
23169+#ifdef CONFIG_PAX_REFCOUNT
23170+ jno 1234f
23171+ LOCK_PREFIX
23172+ subl $RW_LOCK_BIAS,(%rdi)
23173+ int $4
23174+1234:
23175+ _ASM_EXTABLE(1234b, 1234b)
23176+#endif
23177+
23178 1: rep
23179 nop
23180 cmpl $RW_LOCK_BIAS,(%rdi)
23181 jne 1b
23182 LOCK_PREFIX
23183 subl $RW_LOCK_BIAS,(%rdi)
23184+
23185+#ifdef CONFIG_PAX_REFCOUNT
23186+ jno 1234f
23187+ LOCK_PREFIX
23188+ addl $RW_LOCK_BIAS,(%rdi)
23189+ int $4
23190+1234:
23191+ _ASM_EXTABLE(1234b, 1234b)
23192+#endif
23193+
23194 jnz __write_lock_failed
23195+ pax_force_retaddr
23196 ret
23197 CFI_ENDPROC
23198 END(__write_lock_failed)
23199@@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
23200 CFI_STARTPROC
23201 LOCK_PREFIX
23202 incl (%rdi)
23203+
23204+#ifdef CONFIG_PAX_REFCOUNT
23205+ jno 1234f
23206+ LOCK_PREFIX
23207+ decl (%rdi)
23208+ int $4
23209+1234:
23210+ _ASM_EXTABLE(1234b, 1234b)
23211+#endif
23212+
23213 1: rep
23214 nop
23215 cmpl $1,(%rdi)
23216 js 1b
23217 LOCK_PREFIX
23218 decl (%rdi)
23219+
23220+#ifdef CONFIG_PAX_REFCOUNT
23221+ jno 1234f
23222+ LOCK_PREFIX
23223+ incl (%rdi)
23224+ int $4
23225+1234:
23226+ _ASM_EXTABLE(1234b, 1234b)
23227+#endif
23228+
23229 js __read_lock_failed
23230+ pax_force_retaddr
23231 ret
23232 CFI_ENDPROC
23233 END(__read_lock_failed)
23234diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
23235index 15acecf..f768b10 100644
23236--- a/arch/x86/lib/rwsem_64.S
23237+++ b/arch/x86/lib/rwsem_64.S
23238@@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
23239 call rwsem_down_read_failed
23240 popq %rdx
23241 restore_common_regs
23242+ pax_force_retaddr
23243 ret
23244 ENDPROC(call_rwsem_down_read_failed)
23245
23246@@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
23247 movq %rax,%rdi
23248 call rwsem_down_write_failed
23249 restore_common_regs
23250+ pax_force_retaddr
23251 ret
23252 ENDPROC(call_rwsem_down_write_failed)
23253
23254@@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
23255 movq %rax,%rdi
23256 call rwsem_wake
23257 restore_common_regs
23258-1: ret
23259+1: pax_force_retaddr
23260+ ret
23261 ENDPROC(call_rwsem_wake)
23262
23263 /* Fix up special calling conventions */
23264@@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
23265 call rwsem_downgrade_wake
23266 popq %rdx
23267 restore_common_regs
23268+ pax_force_retaddr
23269 ret
23270 ENDPROC(call_rwsem_downgrade_wake)
23271diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23272index bf9a7d5..fb06ab5 100644
23273--- a/arch/x86/lib/thunk_64.S
23274+++ b/arch/x86/lib/thunk_64.S
23275@@ -10,7 +10,8 @@
23276 #include <asm/dwarf2.h>
23277 #include <asm/calling.h>
23278 #include <asm/rwlock.h>
23279-
23280+ #include <asm/alternative-asm.h>
23281+
23282 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23283 .macro thunk name,func
23284 .globl \name
23285@@ -70,6 +71,7 @@
23286 SAVE_ARGS
23287 restore:
23288 RESTORE_ARGS
23289+ pax_force_retaddr
23290 ret
23291 CFI_ENDPROC
23292
23293@@ -77,5 +79,6 @@ restore:
23294 SAVE_ARGS
23295 restore_norax:
23296 RESTORE_ARGS 1
23297+ pax_force_retaddr
23298 ret
23299 CFI_ENDPROC
23300diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23301index 1f118d4..ec4a953 100644
23302--- a/arch/x86/lib/usercopy_32.c
23303+++ b/arch/x86/lib/usercopy_32.c
23304@@ -43,7 +43,7 @@ do { \
23305 __asm__ __volatile__( \
23306 " testl %1,%1\n" \
23307 " jz 2f\n" \
23308- "0: lodsb\n" \
23309+ "0: "__copyuser_seg"lodsb\n" \
23310 " stosb\n" \
23311 " testb %%al,%%al\n" \
23312 " jz 1f\n" \
23313@@ -128,10 +128,12 @@ do { \
23314 int __d0; \
23315 might_fault(); \
23316 __asm__ __volatile__( \
23317+ __COPYUSER_SET_ES \
23318 "0: rep; stosl\n" \
23319 " movl %2,%0\n" \
23320 "1: rep; stosb\n" \
23321 "2:\n" \
23322+ __COPYUSER_RESTORE_ES \
23323 ".section .fixup,\"ax\"\n" \
23324 "3: lea 0(%2,%0,4),%0\n" \
23325 " jmp 2b\n" \
23326@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23327 might_fault();
23328
23329 __asm__ __volatile__(
23330+ __COPYUSER_SET_ES
23331 " testl %0, %0\n"
23332 " jz 3f\n"
23333 " andl %0,%%ecx\n"
23334@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23335 " subl %%ecx,%0\n"
23336 " addl %0,%%eax\n"
23337 "1:\n"
23338+ __COPYUSER_RESTORE_ES
23339 ".section .fixup,\"ax\"\n"
23340 "2: xorl %%eax,%%eax\n"
23341 " jmp 1b\n"
23342@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23343
23344 #ifdef CONFIG_X86_INTEL_USERCOPY
23345 static unsigned long
23346-__copy_user_intel(void __user *to, const void *from, unsigned long size)
23347+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23348 {
23349 int d0, d1;
23350 __asm__ __volatile__(
23351@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23352 " .align 2,0x90\n"
23353 "3: movl 0(%4), %%eax\n"
23354 "4: movl 4(%4), %%edx\n"
23355- "5: movl %%eax, 0(%3)\n"
23356- "6: movl %%edx, 4(%3)\n"
23357+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23358+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23359 "7: movl 8(%4), %%eax\n"
23360 "8: movl 12(%4),%%edx\n"
23361- "9: movl %%eax, 8(%3)\n"
23362- "10: movl %%edx, 12(%3)\n"
23363+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23364+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23365 "11: movl 16(%4), %%eax\n"
23366 "12: movl 20(%4), %%edx\n"
23367- "13: movl %%eax, 16(%3)\n"
23368- "14: movl %%edx, 20(%3)\n"
23369+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23370+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23371 "15: movl 24(%4), %%eax\n"
23372 "16: movl 28(%4), %%edx\n"
23373- "17: movl %%eax, 24(%3)\n"
23374- "18: movl %%edx, 28(%3)\n"
23375+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23376+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23377 "19: movl 32(%4), %%eax\n"
23378 "20: movl 36(%4), %%edx\n"
23379- "21: movl %%eax, 32(%3)\n"
23380- "22: movl %%edx, 36(%3)\n"
23381+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23382+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23383 "23: movl 40(%4), %%eax\n"
23384 "24: movl 44(%4), %%edx\n"
23385- "25: movl %%eax, 40(%3)\n"
23386- "26: movl %%edx, 44(%3)\n"
23387+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23388+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23389 "27: movl 48(%4), %%eax\n"
23390 "28: movl 52(%4), %%edx\n"
23391- "29: movl %%eax, 48(%3)\n"
23392- "30: movl %%edx, 52(%3)\n"
23393+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23394+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23395 "31: movl 56(%4), %%eax\n"
23396 "32: movl 60(%4), %%edx\n"
23397- "33: movl %%eax, 56(%3)\n"
23398- "34: movl %%edx, 60(%3)\n"
23399+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23400+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23401 " addl $-64, %0\n"
23402 " addl $64, %4\n"
23403 " addl $64, %3\n"
23404@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23405 " shrl $2, %0\n"
23406 " andl $3, %%eax\n"
23407 " cld\n"
23408+ __COPYUSER_SET_ES
23409 "99: rep; movsl\n"
23410 "36: movl %%eax, %0\n"
23411 "37: rep; movsb\n"
23412 "100:\n"
23413+ __COPYUSER_RESTORE_ES
23414+ ".section .fixup,\"ax\"\n"
23415+ "101: lea 0(%%eax,%0,4),%0\n"
23416+ " jmp 100b\n"
23417+ ".previous\n"
23418+ ".section __ex_table,\"a\"\n"
23419+ " .align 4\n"
23420+ " .long 1b,100b\n"
23421+ " .long 2b,100b\n"
23422+ " .long 3b,100b\n"
23423+ " .long 4b,100b\n"
23424+ " .long 5b,100b\n"
23425+ " .long 6b,100b\n"
23426+ " .long 7b,100b\n"
23427+ " .long 8b,100b\n"
23428+ " .long 9b,100b\n"
23429+ " .long 10b,100b\n"
23430+ " .long 11b,100b\n"
23431+ " .long 12b,100b\n"
23432+ " .long 13b,100b\n"
23433+ " .long 14b,100b\n"
23434+ " .long 15b,100b\n"
23435+ " .long 16b,100b\n"
23436+ " .long 17b,100b\n"
23437+ " .long 18b,100b\n"
23438+ " .long 19b,100b\n"
23439+ " .long 20b,100b\n"
23440+ " .long 21b,100b\n"
23441+ " .long 22b,100b\n"
23442+ " .long 23b,100b\n"
23443+ " .long 24b,100b\n"
23444+ " .long 25b,100b\n"
23445+ " .long 26b,100b\n"
23446+ " .long 27b,100b\n"
23447+ " .long 28b,100b\n"
23448+ " .long 29b,100b\n"
23449+ " .long 30b,100b\n"
23450+ " .long 31b,100b\n"
23451+ " .long 32b,100b\n"
23452+ " .long 33b,100b\n"
23453+ " .long 34b,100b\n"
23454+ " .long 35b,100b\n"
23455+ " .long 36b,100b\n"
23456+ " .long 37b,100b\n"
23457+ " .long 99b,101b\n"
23458+ ".previous"
23459+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23460+ : "1"(to), "2"(from), "0"(size)
23461+ : "eax", "edx", "memory");
23462+ return size;
23463+}
23464+
23465+static unsigned long
23466+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23467+{
23468+ int d0, d1;
23469+ __asm__ __volatile__(
23470+ " .align 2,0x90\n"
23471+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23472+ " cmpl $67, %0\n"
23473+ " jbe 3f\n"
23474+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23475+ " .align 2,0x90\n"
23476+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23477+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23478+ "5: movl %%eax, 0(%3)\n"
23479+ "6: movl %%edx, 4(%3)\n"
23480+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23481+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23482+ "9: movl %%eax, 8(%3)\n"
23483+ "10: movl %%edx, 12(%3)\n"
23484+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23485+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23486+ "13: movl %%eax, 16(%3)\n"
23487+ "14: movl %%edx, 20(%3)\n"
23488+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23489+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23490+ "17: movl %%eax, 24(%3)\n"
23491+ "18: movl %%edx, 28(%3)\n"
23492+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23493+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23494+ "21: movl %%eax, 32(%3)\n"
23495+ "22: movl %%edx, 36(%3)\n"
23496+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23497+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23498+ "25: movl %%eax, 40(%3)\n"
23499+ "26: movl %%edx, 44(%3)\n"
23500+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23501+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23502+ "29: movl %%eax, 48(%3)\n"
23503+ "30: movl %%edx, 52(%3)\n"
23504+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23505+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23506+ "33: movl %%eax, 56(%3)\n"
23507+ "34: movl %%edx, 60(%3)\n"
23508+ " addl $-64, %0\n"
23509+ " addl $64, %4\n"
23510+ " addl $64, %3\n"
23511+ " cmpl $63, %0\n"
23512+ " ja 1b\n"
23513+ "35: movl %0, %%eax\n"
23514+ " shrl $2, %0\n"
23515+ " andl $3, %%eax\n"
23516+ " cld\n"
23517+ "99: rep; "__copyuser_seg" movsl\n"
23518+ "36: movl %%eax, %0\n"
23519+ "37: rep; "__copyuser_seg" movsb\n"
23520+ "100:\n"
23521 ".section .fixup,\"ax\"\n"
23522 "101: lea 0(%%eax,%0,4),%0\n"
23523 " jmp 100b\n"
23524@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23525 int d0, d1;
23526 __asm__ __volatile__(
23527 " .align 2,0x90\n"
23528- "0: movl 32(%4), %%eax\n"
23529+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23530 " cmpl $67, %0\n"
23531 " jbe 2f\n"
23532- "1: movl 64(%4), %%eax\n"
23533+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23534 " .align 2,0x90\n"
23535- "2: movl 0(%4), %%eax\n"
23536- "21: movl 4(%4), %%edx\n"
23537+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23538+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23539 " movl %%eax, 0(%3)\n"
23540 " movl %%edx, 4(%3)\n"
23541- "3: movl 8(%4), %%eax\n"
23542- "31: movl 12(%4),%%edx\n"
23543+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23544+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23545 " movl %%eax, 8(%3)\n"
23546 " movl %%edx, 12(%3)\n"
23547- "4: movl 16(%4), %%eax\n"
23548- "41: movl 20(%4), %%edx\n"
23549+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23550+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23551 " movl %%eax, 16(%3)\n"
23552 " movl %%edx, 20(%3)\n"
23553- "10: movl 24(%4), %%eax\n"
23554- "51: movl 28(%4), %%edx\n"
23555+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23556+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23557 " movl %%eax, 24(%3)\n"
23558 " movl %%edx, 28(%3)\n"
23559- "11: movl 32(%4), %%eax\n"
23560- "61: movl 36(%4), %%edx\n"
23561+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23562+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23563 " movl %%eax, 32(%3)\n"
23564 " movl %%edx, 36(%3)\n"
23565- "12: movl 40(%4), %%eax\n"
23566- "71: movl 44(%4), %%edx\n"
23567+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23568+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23569 " movl %%eax, 40(%3)\n"
23570 " movl %%edx, 44(%3)\n"
23571- "13: movl 48(%4), %%eax\n"
23572- "81: movl 52(%4), %%edx\n"
23573+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23574+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23575 " movl %%eax, 48(%3)\n"
23576 " movl %%edx, 52(%3)\n"
23577- "14: movl 56(%4), %%eax\n"
23578- "91: movl 60(%4), %%edx\n"
23579+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23580+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23581 " movl %%eax, 56(%3)\n"
23582 " movl %%edx, 60(%3)\n"
23583 " addl $-64, %0\n"
23584@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23585 " shrl $2, %0\n"
23586 " andl $3, %%eax\n"
23587 " cld\n"
23588- "6: rep; movsl\n"
23589+ "6: rep; "__copyuser_seg" movsl\n"
23590 " movl %%eax,%0\n"
23591- "7: rep; movsb\n"
23592+ "7: rep; "__copyuser_seg" movsb\n"
23593 "8:\n"
23594 ".section .fixup,\"ax\"\n"
23595 "9: lea 0(%%eax,%0,4),%0\n"
23596@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23597
23598 __asm__ __volatile__(
23599 " .align 2,0x90\n"
23600- "0: movl 32(%4), %%eax\n"
23601+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23602 " cmpl $67, %0\n"
23603 " jbe 2f\n"
23604- "1: movl 64(%4), %%eax\n"
23605+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23606 " .align 2,0x90\n"
23607- "2: movl 0(%4), %%eax\n"
23608- "21: movl 4(%4), %%edx\n"
23609+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23610+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23611 " movnti %%eax, 0(%3)\n"
23612 " movnti %%edx, 4(%3)\n"
23613- "3: movl 8(%4), %%eax\n"
23614- "31: movl 12(%4),%%edx\n"
23615+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23616+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23617 " movnti %%eax, 8(%3)\n"
23618 " movnti %%edx, 12(%3)\n"
23619- "4: movl 16(%4), %%eax\n"
23620- "41: movl 20(%4), %%edx\n"
23621+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23622+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23623 " movnti %%eax, 16(%3)\n"
23624 " movnti %%edx, 20(%3)\n"
23625- "10: movl 24(%4), %%eax\n"
23626- "51: movl 28(%4), %%edx\n"
23627+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23628+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23629 " movnti %%eax, 24(%3)\n"
23630 " movnti %%edx, 28(%3)\n"
23631- "11: movl 32(%4), %%eax\n"
23632- "61: movl 36(%4), %%edx\n"
23633+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23634+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23635 " movnti %%eax, 32(%3)\n"
23636 " movnti %%edx, 36(%3)\n"
23637- "12: movl 40(%4), %%eax\n"
23638- "71: movl 44(%4), %%edx\n"
23639+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23640+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23641 " movnti %%eax, 40(%3)\n"
23642 " movnti %%edx, 44(%3)\n"
23643- "13: movl 48(%4), %%eax\n"
23644- "81: movl 52(%4), %%edx\n"
23645+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23646+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23647 " movnti %%eax, 48(%3)\n"
23648 " movnti %%edx, 52(%3)\n"
23649- "14: movl 56(%4), %%eax\n"
23650- "91: movl 60(%4), %%edx\n"
23651+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23652+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23653 " movnti %%eax, 56(%3)\n"
23654 " movnti %%edx, 60(%3)\n"
23655 " addl $-64, %0\n"
23656@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23657 " shrl $2, %0\n"
23658 " andl $3, %%eax\n"
23659 " cld\n"
23660- "6: rep; movsl\n"
23661+ "6: rep; "__copyuser_seg" movsl\n"
23662 " movl %%eax,%0\n"
23663- "7: rep; movsb\n"
23664+ "7: rep; "__copyuser_seg" movsb\n"
23665 "8:\n"
23666 ".section .fixup,\"ax\"\n"
23667 "9: lea 0(%%eax,%0,4),%0\n"
23668@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23669
23670 __asm__ __volatile__(
23671 " .align 2,0x90\n"
23672- "0: movl 32(%4), %%eax\n"
23673+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23674 " cmpl $67, %0\n"
23675 " jbe 2f\n"
23676- "1: movl 64(%4), %%eax\n"
23677+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23678 " .align 2,0x90\n"
23679- "2: movl 0(%4), %%eax\n"
23680- "21: movl 4(%4), %%edx\n"
23681+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23682+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23683 " movnti %%eax, 0(%3)\n"
23684 " movnti %%edx, 4(%3)\n"
23685- "3: movl 8(%4), %%eax\n"
23686- "31: movl 12(%4),%%edx\n"
23687+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23688+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23689 " movnti %%eax, 8(%3)\n"
23690 " movnti %%edx, 12(%3)\n"
23691- "4: movl 16(%4), %%eax\n"
23692- "41: movl 20(%4), %%edx\n"
23693+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23694+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23695 " movnti %%eax, 16(%3)\n"
23696 " movnti %%edx, 20(%3)\n"
23697- "10: movl 24(%4), %%eax\n"
23698- "51: movl 28(%4), %%edx\n"
23699+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23700+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23701 " movnti %%eax, 24(%3)\n"
23702 " movnti %%edx, 28(%3)\n"
23703- "11: movl 32(%4), %%eax\n"
23704- "61: movl 36(%4), %%edx\n"
23705+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23706+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23707 " movnti %%eax, 32(%3)\n"
23708 " movnti %%edx, 36(%3)\n"
23709- "12: movl 40(%4), %%eax\n"
23710- "71: movl 44(%4), %%edx\n"
23711+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23712+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23713 " movnti %%eax, 40(%3)\n"
23714 " movnti %%edx, 44(%3)\n"
23715- "13: movl 48(%4), %%eax\n"
23716- "81: movl 52(%4), %%edx\n"
23717+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23718+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23719 " movnti %%eax, 48(%3)\n"
23720 " movnti %%edx, 52(%3)\n"
23721- "14: movl 56(%4), %%eax\n"
23722- "91: movl 60(%4), %%edx\n"
23723+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23724+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23725 " movnti %%eax, 56(%3)\n"
23726 " movnti %%edx, 60(%3)\n"
23727 " addl $-64, %0\n"
23728@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23729 " shrl $2, %0\n"
23730 " andl $3, %%eax\n"
23731 " cld\n"
23732- "6: rep; movsl\n"
23733+ "6: rep; "__copyuser_seg" movsl\n"
23734 " movl %%eax,%0\n"
23735- "7: rep; movsb\n"
23736+ "7: rep; "__copyuser_seg" movsb\n"
23737 "8:\n"
23738 ".section .fixup,\"ax\"\n"
23739 "9: lea 0(%%eax,%0,4),%0\n"
23740@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23741 */
23742 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23743 unsigned long size);
23744-unsigned long __copy_user_intel(void __user *to, const void *from,
23745+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23746+ unsigned long size);
23747+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23748 unsigned long size);
23749 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23750 const void __user *from, unsigned long size);
23751 #endif /* CONFIG_X86_INTEL_USERCOPY */
23752
23753 /* Generic arbitrary sized copy. */
23754-#define __copy_user(to, from, size) \
23755+#define __copy_user(to, from, size, prefix, set, restore) \
23756 do { \
23757 int __d0, __d1, __d2; \
23758 __asm__ __volatile__( \
23759+ set \
23760 " cmp $7,%0\n" \
23761 " jbe 1f\n" \
23762 " movl %1,%0\n" \
23763 " negl %0\n" \
23764 " andl $7,%0\n" \
23765 " subl %0,%3\n" \
23766- "4: rep; movsb\n" \
23767+ "4: rep; "prefix"movsb\n" \
23768 " movl %3,%0\n" \
23769 " shrl $2,%0\n" \
23770 " andl $3,%3\n" \
23771 " .align 2,0x90\n" \
23772- "0: rep; movsl\n" \
23773+ "0: rep; "prefix"movsl\n" \
23774 " movl %3,%0\n" \
23775- "1: rep; movsb\n" \
23776+ "1: rep; "prefix"movsb\n" \
23777 "2:\n" \
23778+ restore \
23779 ".section .fixup,\"ax\"\n" \
23780 "5: addl %3,%0\n" \
23781 " jmp 2b\n" \
23782@@ -682,14 +799,14 @@ do { \
23783 " negl %0\n" \
23784 " andl $7,%0\n" \
23785 " subl %0,%3\n" \
23786- "4: rep; movsb\n" \
23787+ "4: rep; "__copyuser_seg"movsb\n" \
23788 " movl %3,%0\n" \
23789 " shrl $2,%0\n" \
23790 " andl $3,%3\n" \
23791 " .align 2,0x90\n" \
23792- "0: rep; movsl\n" \
23793+ "0: rep; "__copyuser_seg"movsl\n" \
23794 " movl %3,%0\n" \
23795- "1: rep; movsb\n" \
23796+ "1: rep; "__copyuser_seg"movsb\n" \
23797 "2:\n" \
23798 ".section .fixup,\"ax\"\n" \
23799 "5: addl %3,%0\n" \
23800@@ -775,9 +892,9 @@ survive:
23801 }
23802 #endif
23803 if (movsl_is_ok(to, from, n))
23804- __copy_user(to, from, n);
23805+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23806 else
23807- n = __copy_user_intel(to, from, n);
23808+ n = __generic_copy_to_user_intel(to, from, n);
23809 return n;
23810 }
23811 EXPORT_SYMBOL(__copy_to_user_ll);
23812@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23813 unsigned long n)
23814 {
23815 if (movsl_is_ok(to, from, n))
23816- __copy_user(to, from, n);
23817+ __copy_user(to, from, n, __copyuser_seg, "", "");
23818 else
23819- n = __copy_user_intel((void __user *)to,
23820- (const void *)from, n);
23821+ n = __generic_copy_from_user_intel(to, from, n);
23822 return n;
23823 }
23824 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23825@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23826 if (n > 64 && cpu_has_xmm2)
23827 n = __copy_user_intel_nocache(to, from, n);
23828 else
23829- __copy_user(to, from, n);
23830+ __copy_user(to, from, n, __copyuser_seg, "", "");
23831 #else
23832- __copy_user(to, from, n);
23833+ __copy_user(to, from, n, __copyuser_seg, "", "");
23834 #endif
23835 return n;
23836 }
23837 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23838
23839-/**
23840- * copy_to_user: - Copy a block of data into user space.
23841- * @to: Destination address, in user space.
23842- * @from: Source address, in kernel space.
23843- * @n: Number of bytes to copy.
23844- *
23845- * Context: User context only. This function may sleep.
23846- *
23847- * Copy data from kernel space to user space.
23848- *
23849- * Returns number of bytes that could not be copied.
23850- * On success, this will be zero.
23851- */
23852-unsigned long
23853-copy_to_user(void __user *to, const void *from, unsigned long n)
23854+#ifdef CONFIG_PAX_MEMORY_UDEREF
23855+void __set_fs(mm_segment_t x)
23856 {
23857- if (access_ok(VERIFY_WRITE, to, n))
23858- n = __copy_to_user(to, from, n);
23859- return n;
23860+ switch (x.seg) {
23861+ case 0:
23862+ loadsegment(gs, 0);
23863+ break;
23864+ case TASK_SIZE_MAX:
23865+ loadsegment(gs, __USER_DS);
23866+ break;
23867+ case -1UL:
23868+ loadsegment(gs, __KERNEL_DS);
23869+ break;
23870+ default:
23871+ BUG();
23872+ }
23873+ return;
23874 }
23875-EXPORT_SYMBOL(copy_to_user);
23876+EXPORT_SYMBOL(__set_fs);
23877
23878-/**
23879- * copy_from_user: - Copy a block of data from user space.
23880- * @to: Destination address, in kernel space.
23881- * @from: Source address, in user space.
23882- * @n: Number of bytes to copy.
23883- *
23884- * Context: User context only. This function may sleep.
23885- *
23886- * Copy data from user space to kernel space.
23887- *
23888- * Returns number of bytes that could not be copied.
23889- * On success, this will be zero.
23890- *
23891- * If some data could not be copied, this function will pad the copied
23892- * data to the requested size using zero bytes.
23893- */
23894-unsigned long
23895-copy_from_user(void *to, const void __user *from, unsigned long n)
23896+void set_fs(mm_segment_t x)
23897 {
23898- if (access_ok(VERIFY_READ, from, n))
23899- n = __copy_from_user(to, from, n);
23900- else
23901- memset(to, 0, n);
23902- return n;
23903+ current_thread_info()->addr_limit = x;
23904+ __set_fs(x);
23905 }
23906-EXPORT_SYMBOL(copy_from_user);
23907+EXPORT_SYMBOL(set_fs);
23908+#endif
23909diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23910index b7c2849..8633ad8 100644
23911--- a/arch/x86/lib/usercopy_64.c
23912+++ b/arch/x86/lib/usercopy_64.c
23913@@ -42,6 +42,12 @@ long
23914 __strncpy_from_user(char *dst, const char __user *src, long count)
23915 {
23916 long res;
23917+
23918+#ifdef CONFIG_PAX_MEMORY_UDEREF
23919+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23920+ src += PAX_USER_SHADOW_BASE;
23921+#endif
23922+
23923 __do_strncpy_from_user(dst, src, count, res);
23924 return res;
23925 }
23926@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23927 {
23928 long __d0;
23929 might_fault();
23930+
23931+#ifdef CONFIG_PAX_MEMORY_UDEREF
23932+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23933+ addr += PAX_USER_SHADOW_BASE;
23934+#endif
23935+
23936 /* no memory constraint because it doesn't change any memory gcc knows
23937 about */
23938 asm volatile(
23939@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23940 }
23941 EXPORT_SYMBOL(strlen_user);
23942
23943-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23944+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23945 {
23946- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23947- return copy_user_generic((__force void *)to, (__force void *)from, len);
23948- }
23949- return len;
23950+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23951+
23952+#ifdef CONFIG_PAX_MEMORY_UDEREF
23953+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23954+ to += PAX_USER_SHADOW_BASE;
23955+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23956+ from += PAX_USER_SHADOW_BASE;
23957+#endif
23958+
23959+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23960+ }
23961+ return len;
23962 }
23963 EXPORT_SYMBOL(copy_in_user);
23964
23965@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23966 * it is not necessary to optimize tail handling.
23967 */
23968 unsigned long
23969-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23970+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23971 {
23972 char c;
23973 unsigned zero_len;
23974diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23975index 61b41ca..5fef66a 100644
23976--- a/arch/x86/mm/extable.c
23977+++ b/arch/x86/mm/extable.c
23978@@ -1,14 +1,71 @@
23979 #include <linux/module.h>
23980 #include <linux/spinlock.h>
23981+#include <linux/sort.h>
23982 #include <asm/uaccess.h>
23983+#include <asm/pgtable.h>
23984
23985+/*
23986+ * The exception table needs to be sorted so that the binary
23987+ * search that we use to find entries in it works properly.
23988+ * This is used both for the kernel exception table and for
23989+ * the exception tables of modules that get loaded.
23990+ */
23991+static int cmp_ex(const void *a, const void *b)
23992+{
23993+ const struct exception_table_entry *x = a, *y = b;
23994+
23995+ /* avoid overflow */
23996+ if (x->insn > y->insn)
23997+ return 1;
23998+ if (x->insn < y->insn)
23999+ return -1;
24000+ return 0;
24001+}
24002+
24003+static void swap_ex(void *a, void *b, int size)
24004+{
24005+ struct exception_table_entry t, *x = a, *y = b;
24006+
24007+ t = *x;
24008+
24009+ pax_open_kernel();
24010+ *x = *y;
24011+ *y = t;
24012+ pax_close_kernel();
24013+}
24014+
24015+void sort_extable(struct exception_table_entry *start,
24016+ struct exception_table_entry *finish)
24017+{
24018+ sort(start, finish - start, sizeof(struct exception_table_entry),
24019+ cmp_ex, swap_ex);
24020+}
24021+
24022+#ifdef CONFIG_MODULES
24023+/*
24024+ * If the exception table is sorted, any referring to the module init
24025+ * will be at the beginning or the end.
24026+ */
24027+void trim_init_extable(struct module *m)
24028+{
24029+ /*trim the beginning*/
24030+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
24031+ m->extable++;
24032+ m->num_exentries--;
24033+ }
24034+ /*trim the end*/
24035+ while (m->num_exentries &&
24036+ within_module_init(m->extable[m->num_exentries-1].insn, m))
24037+ m->num_exentries--;
24038+}
24039+#endif /* CONFIG_MODULES */
24040
24041 int fixup_exception(struct pt_regs *regs)
24042 {
24043 const struct exception_table_entry *fixup;
24044
24045 #ifdef CONFIG_PNPBIOS
24046- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
24047+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
24048 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
24049 extern u32 pnp_bios_is_utter_crap;
24050 pnp_bios_is_utter_crap = 1;
24051diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
24052index 8ac0d76..ca501e2 100644
24053--- a/arch/x86/mm/fault.c
24054+++ b/arch/x86/mm/fault.c
24055@@ -11,10 +11,19 @@
24056 #include <linux/kprobes.h> /* __kprobes, ... */
24057 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
24058 #include <linux/perf_event.h> /* perf_sw_event */
24059+#include <linux/unistd.h>
24060+#include <linux/compiler.h>
24061
24062 #include <asm/traps.h> /* dotraplinkage, ... */
24063 #include <asm/pgalloc.h> /* pgd_*(), ... */
24064 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
24065+#include <asm/vsyscall.h>
24066+#include <asm/tlbflush.h>
24067+
24068+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24069+#include <asm/stacktrace.h>
24070+#include "../kernel/dumpstack.h"
24071+#endif
24072
24073 /*
24074 * Page fault error code bits:
24075@@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
24076 int ret = 0;
24077
24078 /* kprobe_running() needs smp_processor_id() */
24079- if (kprobes_built_in() && !user_mode_vm(regs)) {
24080+ if (kprobes_built_in() && !user_mode(regs)) {
24081 preempt_disable();
24082 if (kprobe_running() && kprobe_fault_handler(regs, 14))
24083 ret = 1;
24084@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
24085 return !instr_lo || (instr_lo>>1) == 1;
24086 case 0x00:
24087 /* Prefetch instruction is 0x0F0D or 0x0F18 */
24088- if (probe_kernel_address(instr, opcode))
24089+ if (user_mode(regs)) {
24090+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24091+ return 0;
24092+ } else if (probe_kernel_address(instr, opcode))
24093 return 0;
24094
24095 *prefetch = (instr_lo == 0xF) &&
24096@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
24097 while (instr < max_instr) {
24098 unsigned char opcode;
24099
24100- if (probe_kernel_address(instr, opcode))
24101+ if (user_mode(regs)) {
24102+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24103+ break;
24104+ } else if (probe_kernel_address(instr, opcode))
24105 break;
24106
24107 instr++;
24108@@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
24109 force_sig_info(si_signo, &info, tsk);
24110 }
24111
24112+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24113+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
24114+#endif
24115+
24116+#ifdef CONFIG_PAX_EMUTRAMP
24117+static int pax_handle_fetch_fault(struct pt_regs *regs);
24118+#endif
24119+
24120+#ifdef CONFIG_PAX_PAGEEXEC
24121+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
24122+{
24123+ pgd_t *pgd;
24124+ pud_t *pud;
24125+ pmd_t *pmd;
24126+
24127+ pgd = pgd_offset(mm, address);
24128+ if (!pgd_present(*pgd))
24129+ return NULL;
24130+ pud = pud_offset(pgd, address);
24131+ if (!pud_present(*pud))
24132+ return NULL;
24133+ pmd = pmd_offset(pud, address);
24134+ if (!pmd_present(*pmd))
24135+ return NULL;
24136+ return pmd;
24137+}
24138+#endif
24139+
24140 DEFINE_SPINLOCK(pgd_lock);
24141 LIST_HEAD(pgd_list);
24142
24143@@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
24144 address += PMD_SIZE) {
24145
24146 unsigned long flags;
24147+
24148+#ifdef CONFIG_PAX_PER_CPU_PGD
24149+ unsigned long cpu;
24150+#else
24151 struct page *page;
24152+#endif
24153
24154 spin_lock_irqsave(&pgd_lock, flags);
24155+
24156+#ifdef CONFIG_PAX_PER_CPU_PGD
24157+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24158+ pgd_t *pgd = get_cpu_pgd(cpu);
24159+#else
24160 list_for_each_entry(page, &pgd_list, lru) {
24161- if (!vmalloc_sync_one(page_address(page), address))
24162+ pgd_t *pgd = page_address(page);
24163+#endif
24164+
24165+ if (!vmalloc_sync_one(pgd, address))
24166 break;
24167 }
24168 spin_unlock_irqrestore(&pgd_lock, flags);
24169@@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
24170 * an interrupt in the middle of a task switch..
24171 */
24172 pgd_paddr = read_cr3();
24173+
24174+#ifdef CONFIG_PAX_PER_CPU_PGD
24175+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
24176+#endif
24177+
24178 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
24179 if (!pmd_k)
24180 return -1;
24181@@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
24182
24183 const pgd_t *pgd_ref = pgd_offset_k(address);
24184 unsigned long flags;
24185+
24186+#ifdef CONFIG_PAX_PER_CPU_PGD
24187+ unsigned long cpu;
24188+#else
24189 struct page *page;
24190+#endif
24191
24192 if (pgd_none(*pgd_ref))
24193 continue;
24194
24195 spin_lock_irqsave(&pgd_lock, flags);
24196+
24197+#ifdef CONFIG_PAX_PER_CPU_PGD
24198+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
24199+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
24200+#else
24201 list_for_each_entry(page, &pgd_list, lru) {
24202 pgd_t *pgd;
24203 pgd = (pgd_t *)page_address(page) + pgd_index(address);
24204+#endif
24205+
24206 if (pgd_none(*pgd))
24207 set_pgd(pgd, *pgd_ref);
24208 else
24209@@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
24210 * happen within a race in page table update. In the later
24211 * case just flush:
24212 */
24213+
24214+#ifdef CONFIG_PAX_PER_CPU_PGD
24215+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
24216+ pgd = pgd_offset_cpu(smp_processor_id(), address);
24217+#else
24218 pgd = pgd_offset(current->active_mm, address);
24219+#endif
24220+
24221 pgd_ref = pgd_offset_k(address);
24222 if (pgd_none(*pgd_ref))
24223 return -1;
24224@@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
24225 static int is_errata100(struct pt_regs *regs, unsigned long address)
24226 {
24227 #ifdef CONFIG_X86_64
24228- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
24229+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
24230 return 1;
24231 #endif
24232 return 0;
24233@@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
24234 }
24235
24236 static const char nx_warning[] = KERN_CRIT
24237-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
24238+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
24239
24240 static void
24241 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24242@@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24243 if (!oops_may_print())
24244 return;
24245
24246- if (error_code & PF_INSTR) {
24247+ if (nx_enabled && (error_code & PF_INSTR)) {
24248 unsigned int level;
24249
24250 pte_t *pte = lookup_address(address, &level);
24251
24252 if (pte && pte_present(*pte) && !pte_exec(*pte))
24253- printk(nx_warning, current_uid());
24254+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
24255 }
24256
24257+#ifdef CONFIG_PAX_KERNEXEC
24258+ if (init_mm.start_code <= address && address < init_mm.end_code) {
24259+ if (current->signal->curr_ip)
24260+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24261+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24262+ else
24263+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24264+ current->comm, task_pid_nr(current), current_uid(), current_euid());
24265+ }
24266+#endif
24267+
24268 printk(KERN_ALERT "BUG: unable to handle kernel ");
24269 if (address < PAGE_SIZE)
24270 printk(KERN_CONT "NULL pointer dereference");
24271@@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24272 {
24273 struct task_struct *tsk = current;
24274
24275+#ifdef CONFIG_X86_64
24276+ struct mm_struct *mm = tsk->mm;
24277+
24278+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24279+ if (regs->ip == (unsigned long)vgettimeofday) {
24280+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24281+ return;
24282+ } else if (regs->ip == (unsigned long)vtime) {
24283+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24284+ return;
24285+ } else if (regs->ip == (unsigned long)vgetcpu) {
24286+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24287+ return;
24288+ }
24289+ }
24290+#endif
24291+
24292 /* User mode accesses just cause a SIGSEGV */
24293 if (error_code & PF_USER) {
24294 /*
24295@@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24296 if (is_errata100(regs, address))
24297 return;
24298
24299+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24300+ if (pax_is_fetch_fault(regs, error_code, address)) {
24301+
24302+#ifdef CONFIG_PAX_EMUTRAMP
24303+ switch (pax_handle_fetch_fault(regs)) {
24304+ case 2:
24305+ return;
24306+ }
24307+#endif
24308+
24309+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24310+ do_group_exit(SIGKILL);
24311+ }
24312+#endif
24313+
24314 if (unlikely(show_unhandled_signals))
24315 show_signal_msg(regs, error_code, address, tsk);
24316
24317@@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24318 if (fault & VM_FAULT_HWPOISON) {
24319 printk(KERN_ERR
24320 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24321- tsk->comm, tsk->pid, address);
24322+ tsk->comm, task_pid_nr(tsk), address);
24323 code = BUS_MCEERR_AR;
24324 }
24325 #endif
24326@@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24327 return 1;
24328 }
24329
24330+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24331+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24332+{
24333+ pte_t *pte;
24334+ pmd_t *pmd;
24335+ spinlock_t *ptl;
24336+ unsigned char pte_mask;
24337+
24338+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24339+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
24340+ return 0;
24341+
24342+ /* PaX: it's our fault, let's handle it if we can */
24343+
24344+ /* PaX: take a look at read faults before acquiring any locks */
24345+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24346+ /* instruction fetch attempt from a protected page in user mode */
24347+ up_read(&mm->mmap_sem);
24348+
24349+#ifdef CONFIG_PAX_EMUTRAMP
24350+ switch (pax_handle_fetch_fault(regs)) {
24351+ case 2:
24352+ return 1;
24353+ }
24354+#endif
24355+
24356+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24357+ do_group_exit(SIGKILL);
24358+ }
24359+
24360+ pmd = pax_get_pmd(mm, address);
24361+ if (unlikely(!pmd))
24362+ return 0;
24363+
24364+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24365+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24366+ pte_unmap_unlock(pte, ptl);
24367+ return 0;
24368+ }
24369+
24370+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24371+ /* write attempt to a protected page in user mode */
24372+ pte_unmap_unlock(pte, ptl);
24373+ return 0;
24374+ }
24375+
24376+#ifdef CONFIG_SMP
24377+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24378+#else
24379+ if (likely(address > get_limit(regs->cs)))
24380+#endif
24381+ {
24382+ set_pte(pte, pte_mkread(*pte));
24383+ __flush_tlb_one(address);
24384+ pte_unmap_unlock(pte, ptl);
24385+ up_read(&mm->mmap_sem);
24386+ return 1;
24387+ }
24388+
24389+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24390+
24391+ /*
24392+ * PaX: fill DTLB with user rights and retry
24393+ */
24394+ __asm__ __volatile__ (
24395+ "orb %2,(%1)\n"
24396+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24397+/*
24398+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24399+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24400+ * page fault when examined during a TLB load attempt. this is true not only
24401+ * for PTEs holding a non-present entry but also present entries that will
24402+ * raise a page fault (such as those set up by PaX, or the copy-on-write
24403+ * mechanism). in effect it means that we do *not* need to flush the TLBs
24404+ * for our target pages since their PTEs are simply not in the TLBs at all.
24405+
24406+ * the best thing in omitting it is that we gain around 15-20% speed in the
24407+ * fast path of the page fault handler and can get rid of tracing since we
24408+ * can no longer flush unintended entries.
24409+ */
24410+ "invlpg (%0)\n"
24411+#endif
24412+ __copyuser_seg"testb $0,(%0)\n"
24413+ "xorb %3,(%1)\n"
24414+ :
24415+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24416+ : "memory", "cc");
24417+ pte_unmap_unlock(pte, ptl);
24418+ up_read(&mm->mmap_sem);
24419+ return 1;
24420+}
24421+#endif
24422+
24423 /*
24424 * Handle a spurious fault caused by a stale TLB entry.
24425 *
24426@@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24427 static inline int
24428 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24429 {
24430+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24431+ return 1;
24432+
24433 if (write) {
24434 /* write, present and write, not present: */
24435 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24436@@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24437 {
24438 struct vm_area_struct *vma;
24439 struct task_struct *tsk;
24440- unsigned long address;
24441 struct mm_struct *mm;
24442 int write;
24443 int fault;
24444
24445- tsk = current;
24446- mm = tsk->mm;
24447-
24448 /* Get the faulting address: */
24449- address = read_cr2();
24450+ unsigned long address = read_cr2();
24451+
24452+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24453+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24454+ if (!search_exception_tables(regs->ip)) {
24455+ bad_area_nosemaphore(regs, error_code, address);
24456+ return;
24457+ }
24458+ if (address < PAX_USER_SHADOW_BASE) {
24459+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24460+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24461+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24462+ } else
24463+ address -= PAX_USER_SHADOW_BASE;
24464+ }
24465+#endif
24466+
24467+ tsk = current;
24468+ mm = tsk->mm;
24469
24470 /*
24471 * Detect and handle instructions that would cause a page fault for
24472@@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24473 * User-mode registers count as a user access even for any
24474 * potential system fault or CPU buglet:
24475 */
24476- if (user_mode_vm(regs)) {
24477+ if (user_mode(regs)) {
24478 local_irq_enable();
24479 error_code |= PF_USER;
24480 } else {
24481@@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24482 might_sleep();
24483 }
24484
24485+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24486+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24487+ return;
24488+#endif
24489+
24490 vma = find_vma(mm, address);
24491 if (unlikely(!vma)) {
24492 bad_area(regs, error_code, address);
24493@@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24494 bad_area(regs, error_code, address);
24495 return;
24496 }
24497- if (error_code & PF_USER) {
24498- /*
24499- * Accessing the stack below %sp is always a bug.
24500- * The large cushion allows instructions like enter
24501- * and pusha to work. ("enter $65535, $31" pushes
24502- * 32 pointers and then decrements %sp by 65535.)
24503- */
24504- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24505- bad_area(regs, error_code, address);
24506- return;
24507- }
24508+ /*
24509+ * Accessing the stack below %sp is always a bug.
24510+ * The large cushion allows instructions like enter
24511+ * and pusha to work. ("enter $65535, $31" pushes
24512+ * 32 pointers and then decrements %sp by 65535.)
24513+ */
24514+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24515+ bad_area(regs, error_code, address);
24516+ return;
24517 }
24518+
24519+#ifdef CONFIG_PAX_SEGMEXEC
24520+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24521+ bad_area(regs, error_code, address);
24522+ return;
24523+ }
24524+#endif
24525+
24526 if (unlikely(expand_stack(vma, address))) {
24527 bad_area(regs, error_code, address);
24528 return;
24529@@ -1146,3 +1390,292 @@ good_area:
24530
24531 up_read(&mm->mmap_sem);
24532 }
24533+
24534+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24535+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24536+{
24537+ struct mm_struct *mm = current->mm;
24538+ unsigned long ip = regs->ip;
24539+
24540+ if (v8086_mode(regs))
24541+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24542+
24543+#ifdef CONFIG_PAX_PAGEEXEC
24544+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24545+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24546+ return true;
24547+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24548+ return true;
24549+ return false;
24550+ }
24551+#endif
24552+
24553+#ifdef CONFIG_PAX_SEGMEXEC
24554+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24555+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24556+ return true;
24557+ return false;
24558+ }
24559+#endif
24560+
24561+ return false;
24562+}
24563+#endif
24564+
24565+#ifdef CONFIG_PAX_EMUTRAMP
24566+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24567+{
24568+ int err;
24569+
24570+ do { /* PaX: libffi trampoline emulation */
24571+ unsigned char mov, jmp;
24572+ unsigned int addr1, addr2;
24573+
24574+#ifdef CONFIG_X86_64
24575+ if ((regs->ip + 9) >> 32)
24576+ break;
24577+#endif
24578+
24579+ err = get_user(mov, (unsigned char __user *)regs->ip);
24580+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24581+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24582+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24583+
24584+ if (err)
24585+ break;
24586+
24587+ if (mov == 0xB8 && jmp == 0xE9) {
24588+ regs->ax = addr1;
24589+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24590+ return 2;
24591+ }
24592+ } while (0);
24593+
24594+ do { /* PaX: gcc trampoline emulation #1 */
24595+ unsigned char mov1, mov2;
24596+ unsigned short jmp;
24597+ unsigned int addr1, addr2;
24598+
24599+#ifdef CONFIG_X86_64
24600+ if ((regs->ip + 11) >> 32)
24601+ break;
24602+#endif
24603+
24604+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24605+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24606+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24607+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24608+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24609+
24610+ if (err)
24611+ break;
24612+
24613+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24614+ regs->cx = addr1;
24615+ regs->ax = addr2;
24616+ regs->ip = addr2;
24617+ return 2;
24618+ }
24619+ } while (0);
24620+
24621+ do { /* PaX: gcc trampoline emulation #2 */
24622+ unsigned char mov, jmp;
24623+ unsigned int addr1, addr2;
24624+
24625+#ifdef CONFIG_X86_64
24626+ if ((regs->ip + 9) >> 32)
24627+ break;
24628+#endif
24629+
24630+ err = get_user(mov, (unsigned char __user *)regs->ip);
24631+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24632+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24633+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24634+
24635+ if (err)
24636+ break;
24637+
24638+ if (mov == 0xB9 && jmp == 0xE9) {
24639+ regs->cx = addr1;
24640+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24641+ return 2;
24642+ }
24643+ } while (0);
24644+
24645+ return 1; /* PaX in action */
24646+}
24647+
24648+#ifdef CONFIG_X86_64
24649+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24650+{
24651+ int err;
24652+
24653+ do { /* PaX: libffi trampoline emulation */
24654+ unsigned short mov1, mov2, jmp1;
24655+ unsigned char stcclc, jmp2;
24656+ unsigned long addr1, addr2;
24657+
24658+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24659+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24660+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24661+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24662+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24663+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24664+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24665+
24666+ if (err)
24667+ break;
24668+
24669+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24670+ regs->r11 = addr1;
24671+ regs->r10 = addr2;
24672+ if (stcclc == 0xF8)
24673+ regs->flags &= ~X86_EFLAGS_CF;
24674+ else
24675+ regs->flags |= X86_EFLAGS_CF;
24676+ regs->ip = addr1;
24677+ return 2;
24678+ }
24679+ } while (0);
24680+
24681+ do { /* PaX: gcc trampoline emulation #1 */
24682+ unsigned short mov1, mov2, jmp1;
24683+ unsigned char jmp2;
24684+ unsigned int addr1;
24685+ unsigned long addr2;
24686+
24687+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24688+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24689+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24690+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24691+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24692+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24693+
24694+ if (err)
24695+ break;
24696+
24697+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24698+ regs->r11 = addr1;
24699+ regs->r10 = addr2;
24700+ regs->ip = addr1;
24701+ return 2;
24702+ }
24703+ } while (0);
24704+
24705+ do { /* PaX: gcc trampoline emulation #2 */
24706+ unsigned short mov1, mov2, jmp1;
24707+ unsigned char jmp2;
24708+ unsigned long addr1, addr2;
24709+
24710+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24711+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24712+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24713+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24714+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24715+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24716+
24717+ if (err)
24718+ break;
24719+
24720+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24721+ regs->r11 = addr1;
24722+ regs->r10 = addr2;
24723+ regs->ip = addr1;
24724+ return 2;
24725+ }
24726+ } while (0);
24727+
24728+ return 1; /* PaX in action */
24729+}
24730+#endif
24731+
24732+/*
24733+ * PaX: decide what to do with offenders (regs->ip = fault address)
24734+ *
24735+ * returns 1 when task should be killed
24736+ * 2 when gcc trampoline was detected
24737+ */
24738+static int pax_handle_fetch_fault(struct pt_regs *regs)
24739+{
24740+ if (v8086_mode(regs))
24741+ return 1;
24742+
24743+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24744+ return 1;
24745+
24746+#ifdef CONFIG_X86_32
24747+ return pax_handle_fetch_fault_32(regs);
24748+#else
24749+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24750+ return pax_handle_fetch_fault_32(regs);
24751+ else
24752+ return pax_handle_fetch_fault_64(regs);
24753+#endif
24754+}
24755+#endif
24756+
24757+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24758+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24759+{
24760+ long i;
24761+
24762+ printk(KERN_ERR "PAX: bytes at PC: ");
24763+ for (i = 0; i < 20; i++) {
24764+ unsigned char c;
24765+ if (get_user(c, (unsigned char __force_user *)pc+i))
24766+ printk(KERN_CONT "?? ");
24767+ else
24768+ printk(KERN_CONT "%02x ", c);
24769+ }
24770+ printk("\n");
24771+
24772+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24773+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24774+ unsigned long c;
24775+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24776+#ifdef CONFIG_X86_32
24777+ printk(KERN_CONT "???????? ");
24778+#else
24779+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24780+ printk(KERN_CONT "???????? ???????? ");
24781+ else
24782+ printk(KERN_CONT "???????????????? ");
24783+#endif
24784+ } else {
24785+#ifdef CONFIG_X86_64
24786+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24787+ printk(KERN_CONT "%08x ", (unsigned int)c);
24788+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24789+ } else
24790+#endif
24791+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24792+ }
24793+ }
24794+ printk("\n");
24795+}
24796+#endif
24797+
24798+/**
24799+ * probe_kernel_write(): safely attempt to write to a location
24800+ * @dst: address to write to
24801+ * @src: pointer to the data that shall be written
24802+ * @size: size of the data chunk
24803+ *
24804+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24805+ * happens, handle that and return -EFAULT.
24806+ */
24807+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24808+{
24809+ long ret;
24810+ mm_segment_t old_fs = get_fs();
24811+
24812+ set_fs(KERNEL_DS);
24813+ pagefault_disable();
24814+ pax_open_kernel();
24815+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24816+ pax_close_kernel();
24817+ pagefault_enable();
24818+ set_fs(old_fs);
24819+
24820+ return ret ? -EFAULT : 0;
24821+}
24822diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24823index 71da1bc..7a16bf4 100644
24824--- a/arch/x86/mm/gup.c
24825+++ b/arch/x86/mm/gup.c
24826@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24827 addr = start;
24828 len = (unsigned long) nr_pages << PAGE_SHIFT;
24829 end = start + len;
24830- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24831+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24832 (void __user *)start, len)))
24833 return 0;
24834
24835diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24836index 63a6ba6..79abd7a 100644
24837--- a/arch/x86/mm/highmem_32.c
24838+++ b/arch/x86/mm/highmem_32.c
24839@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24840 idx = type + KM_TYPE_NR*smp_processor_id();
24841 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24842 BUG_ON(!pte_none(*(kmap_pte-idx)));
24843+
24844+ pax_open_kernel();
24845 set_pte(kmap_pte-idx, mk_pte(page, prot));
24846+ pax_close_kernel();
24847
24848 return (void *)vaddr;
24849 }
24850diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24851index f46c3407..6ff9a26 100644
24852--- a/arch/x86/mm/hugetlbpage.c
24853+++ b/arch/x86/mm/hugetlbpage.c
24854@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24855 struct hstate *h = hstate_file(file);
24856 struct mm_struct *mm = current->mm;
24857 struct vm_area_struct *vma;
24858- unsigned long start_addr;
24859+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24860+
24861+#ifdef CONFIG_PAX_SEGMEXEC
24862+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24863+ pax_task_size = SEGMEXEC_TASK_SIZE;
24864+#endif
24865+
24866+ pax_task_size -= PAGE_SIZE;
24867
24868 if (len > mm->cached_hole_size) {
24869- start_addr = mm->free_area_cache;
24870+ start_addr = mm->free_area_cache;
24871 } else {
24872- start_addr = TASK_UNMAPPED_BASE;
24873- mm->cached_hole_size = 0;
24874+ start_addr = mm->mmap_base;
24875+ mm->cached_hole_size = 0;
24876 }
24877
24878 full_search:
24879@@ -281,26 +288,27 @@ full_search:
24880
24881 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24882 /* At this point: (!vma || addr < vma->vm_end). */
24883- if (TASK_SIZE - len < addr) {
24884+ if (pax_task_size - len < addr) {
24885 /*
24886 * Start a new search - just in case we missed
24887 * some holes.
24888 */
24889- if (start_addr != TASK_UNMAPPED_BASE) {
24890- start_addr = TASK_UNMAPPED_BASE;
24891+ if (start_addr != mm->mmap_base) {
24892+ start_addr = mm->mmap_base;
24893 mm->cached_hole_size = 0;
24894 goto full_search;
24895 }
24896 return -ENOMEM;
24897 }
24898- if (!vma || addr + len <= vma->vm_start) {
24899- mm->free_area_cache = addr + len;
24900- return addr;
24901- }
24902+ if (check_heap_stack_gap(vma, addr, len))
24903+ break;
24904 if (addr + mm->cached_hole_size < vma->vm_start)
24905 mm->cached_hole_size = vma->vm_start - addr;
24906 addr = ALIGN(vma->vm_end, huge_page_size(h));
24907 }
24908+
24909+ mm->free_area_cache = addr + len;
24910+ return addr;
24911 }
24912
24913 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24914@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24915 {
24916 struct hstate *h = hstate_file(file);
24917 struct mm_struct *mm = current->mm;
24918- struct vm_area_struct *vma, *prev_vma;
24919- unsigned long base = mm->mmap_base, addr = addr0;
24920+ struct vm_area_struct *vma;
24921+ unsigned long base = mm->mmap_base, addr;
24922 unsigned long largest_hole = mm->cached_hole_size;
24923- int first_time = 1;
24924
24925 /* don't allow allocations above current base */
24926 if (mm->free_area_cache > base)
24927@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24928 largest_hole = 0;
24929 mm->free_area_cache = base;
24930 }
24931-try_again:
24932+
24933 /* make sure it can fit in the remaining address space */
24934 if (mm->free_area_cache < len)
24935 goto fail;
24936
24937 /* either no address requested or cant fit in requested address hole */
24938- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24939+ addr = (mm->free_area_cache - len);
24940 do {
24941+ addr &= huge_page_mask(h);
24942+ vma = find_vma(mm, addr);
24943 /*
24944 * Lookup failure means no vma is above this address,
24945 * i.e. return with success:
24946- */
24947- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24948- return addr;
24949-
24950- /*
24951 * new region fits between prev_vma->vm_end and
24952 * vma->vm_start, use it:
24953 */
24954- if (addr + len <= vma->vm_start &&
24955- (!prev_vma || (addr >= prev_vma->vm_end))) {
24956+ if (check_heap_stack_gap(vma, addr, len)) {
24957 /* remember the address as a hint for next time */
24958- mm->cached_hole_size = largest_hole;
24959- return (mm->free_area_cache = addr);
24960- } else {
24961- /* pull free_area_cache down to the first hole */
24962- if (mm->free_area_cache == vma->vm_end) {
24963- mm->free_area_cache = vma->vm_start;
24964- mm->cached_hole_size = largest_hole;
24965- }
24966+ mm->cached_hole_size = largest_hole;
24967+ return (mm->free_area_cache = addr);
24968+ }
24969+ /* pull free_area_cache down to the first hole */
24970+ if (mm->free_area_cache == vma->vm_end) {
24971+ mm->free_area_cache = vma->vm_start;
24972+ mm->cached_hole_size = largest_hole;
24973 }
24974
24975 /* remember the largest hole we saw so far */
24976 if (addr + largest_hole < vma->vm_start)
24977- largest_hole = vma->vm_start - addr;
24978+ largest_hole = vma->vm_start - addr;
24979
24980 /* try just below the current vma->vm_start */
24981- addr = (vma->vm_start - len) & huge_page_mask(h);
24982- } while (len <= vma->vm_start);
24983+ addr = skip_heap_stack_gap(vma, len);
24984+ } while (!IS_ERR_VALUE(addr));
24985
24986 fail:
24987 /*
24988- * if hint left us with no space for the requested
24989- * mapping then try again:
24990- */
24991- if (first_time) {
24992- mm->free_area_cache = base;
24993- largest_hole = 0;
24994- first_time = 0;
24995- goto try_again;
24996- }
24997- /*
24998 * A failed mmap() very likely causes application failure,
24999 * so fall back to the bottom-up function here. This scenario
25000 * can happen with large stack limits and large mmap()
25001 * allocations.
25002 */
25003- mm->free_area_cache = TASK_UNMAPPED_BASE;
25004+
25005+#ifdef CONFIG_PAX_SEGMEXEC
25006+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25007+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
25008+ else
25009+#endif
25010+
25011+ mm->mmap_base = TASK_UNMAPPED_BASE;
25012+
25013+#ifdef CONFIG_PAX_RANDMMAP
25014+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25015+ mm->mmap_base += mm->delta_mmap;
25016+#endif
25017+
25018+ mm->free_area_cache = mm->mmap_base;
25019 mm->cached_hole_size = ~0UL;
25020 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
25021 len, pgoff, flags);
25022@@ -387,6 +393,7 @@ fail:
25023 /*
25024 * Restore the topdown base:
25025 */
25026+ mm->mmap_base = base;
25027 mm->free_area_cache = base;
25028 mm->cached_hole_size = ~0UL;
25029
25030@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25031 struct hstate *h = hstate_file(file);
25032 struct mm_struct *mm = current->mm;
25033 struct vm_area_struct *vma;
25034+ unsigned long pax_task_size = TASK_SIZE;
25035
25036 if (len & ~huge_page_mask(h))
25037 return -EINVAL;
25038- if (len > TASK_SIZE)
25039+
25040+#ifdef CONFIG_PAX_SEGMEXEC
25041+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25042+ pax_task_size = SEGMEXEC_TASK_SIZE;
25043+#endif
25044+
25045+ pax_task_size -= PAGE_SIZE;
25046+
25047+ if (len > pax_task_size)
25048 return -ENOMEM;
25049
25050 if (flags & MAP_FIXED) {
25051@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25052 if (addr) {
25053 addr = ALIGN(addr, huge_page_size(h));
25054 vma = find_vma(mm, addr);
25055- if (TASK_SIZE - len >= addr &&
25056- (!vma || addr + len <= vma->vm_start))
25057+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
25058 return addr;
25059 }
25060 if (mm->get_unmapped_area == arch_get_unmapped_area)
25061diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
25062index 73ffd55..f61c2a7 100644
25063--- a/arch/x86/mm/init.c
25064+++ b/arch/x86/mm/init.c
25065@@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
25066 * cause a hotspot and fill up ZONE_DMA. The page tables
25067 * need roughly 0.5KB per GB.
25068 */
25069-#ifdef CONFIG_X86_32
25070- start = 0x7000;
25071-#else
25072- start = 0x8000;
25073-#endif
25074+ start = 0x100000;
25075 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
25076 tables, PAGE_SIZE);
25077 if (e820_table_start == -1UL)
25078@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25079 #endif
25080
25081 set_nx();
25082- if (nx_enabled)
25083+ if (nx_enabled && cpu_has_nx)
25084 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
25085
25086 /* Enable PSE if available */
25087@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25088 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
25089 * mmio resources as well as potential bios/acpi data regions.
25090 */
25091+
25092 int devmem_is_allowed(unsigned long pagenr)
25093 {
25094+#ifdef CONFIG_GRKERNSEC_KMEM
25095+ /* allow BDA */
25096+ if (!pagenr)
25097+ return 1;
25098+ /* allow EBDA */
25099+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
25100+ return 1;
25101+ /* allow ISA/video mem */
25102+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25103+ return 1;
25104+ /* throw out everything else below 1MB */
25105+ if (pagenr <= 256)
25106+ return 0;
25107+#else
25108 if (pagenr <= 256)
25109 return 1;
25110+#endif
25111+
25112 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
25113 return 0;
25114 if (!page_is_ram(pagenr))
25115@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
25116
25117 void free_initmem(void)
25118 {
25119+
25120+#ifdef CONFIG_PAX_KERNEXEC
25121+#ifdef CONFIG_X86_32
25122+ /* PaX: limit KERNEL_CS to actual size */
25123+ unsigned long addr, limit;
25124+ struct desc_struct d;
25125+ int cpu;
25126+
25127+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
25128+ limit = (limit - 1UL) >> PAGE_SHIFT;
25129+
25130+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
25131+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
25132+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
25133+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
25134+ }
25135+
25136+ /* PaX: make KERNEL_CS read-only */
25137+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
25138+ if (!paravirt_enabled())
25139+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
25140+/*
25141+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
25142+ pgd = pgd_offset_k(addr);
25143+ pud = pud_offset(pgd, addr);
25144+ pmd = pmd_offset(pud, addr);
25145+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25146+ }
25147+*/
25148+#ifdef CONFIG_X86_PAE
25149+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
25150+/*
25151+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
25152+ pgd = pgd_offset_k(addr);
25153+ pud = pud_offset(pgd, addr);
25154+ pmd = pmd_offset(pud, addr);
25155+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25156+ }
25157+*/
25158+#endif
25159+
25160+#ifdef CONFIG_MODULES
25161+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
25162+#endif
25163+
25164+#else
25165+ pgd_t *pgd;
25166+ pud_t *pud;
25167+ pmd_t *pmd;
25168+ unsigned long addr, end;
25169+
25170+ /* PaX: make kernel code/rodata read-only, rest non-executable */
25171+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
25172+ pgd = pgd_offset_k(addr);
25173+ pud = pud_offset(pgd, addr);
25174+ pmd = pmd_offset(pud, addr);
25175+ if (!pmd_present(*pmd))
25176+ continue;
25177+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
25178+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25179+ else
25180+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
25181+ }
25182+
25183+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
25184+ end = addr + KERNEL_IMAGE_SIZE;
25185+ for (; addr < end; addr += PMD_SIZE) {
25186+ pgd = pgd_offset_k(addr);
25187+ pud = pud_offset(pgd, addr);
25188+ pmd = pmd_offset(pud, addr);
25189+ if (!pmd_present(*pmd))
25190+ continue;
25191+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
25192+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
25193+ }
25194+#endif
25195+
25196+ flush_tlb_all();
25197+#endif
25198+
25199 free_init_pages("unused kernel memory",
25200 (unsigned long)(&__init_begin),
25201 (unsigned long)(&__init_end));
25202diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
25203index 30938c1..bda3d5d 100644
25204--- a/arch/x86/mm/init_32.c
25205+++ b/arch/x86/mm/init_32.c
25206@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
25207 }
25208
25209 /*
25210- * Creates a middle page table and puts a pointer to it in the
25211- * given global directory entry. This only returns the gd entry
25212- * in non-PAE compilation mode, since the middle layer is folded.
25213- */
25214-static pmd_t * __init one_md_table_init(pgd_t *pgd)
25215-{
25216- pud_t *pud;
25217- pmd_t *pmd_table;
25218-
25219-#ifdef CONFIG_X86_PAE
25220- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
25221- if (after_bootmem)
25222- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
25223- else
25224- pmd_table = (pmd_t *)alloc_low_page();
25225- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
25226- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
25227- pud = pud_offset(pgd, 0);
25228- BUG_ON(pmd_table != pmd_offset(pud, 0));
25229-
25230- return pmd_table;
25231- }
25232-#endif
25233- pud = pud_offset(pgd, 0);
25234- pmd_table = pmd_offset(pud, 0);
25235-
25236- return pmd_table;
25237-}
25238-
25239-/*
25240 * Create a page table and place a pointer to it in a middle page
25241 * directory entry:
25242 */
25243@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
25244 page_table = (pte_t *)alloc_low_page();
25245
25246 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25247+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25248+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25249+#else
25250 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25251+#endif
25252 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25253 }
25254
25255 return pte_offset_kernel(pmd, 0);
25256 }
25257
25258+static pmd_t * __init one_md_table_init(pgd_t *pgd)
25259+{
25260+ pud_t *pud;
25261+ pmd_t *pmd_table;
25262+
25263+ pud = pud_offset(pgd, 0);
25264+ pmd_table = pmd_offset(pud, 0);
25265+
25266+ return pmd_table;
25267+}
25268+
25269 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25270 {
25271 int pgd_idx = pgd_index(vaddr);
25272@@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25273 int pgd_idx, pmd_idx;
25274 unsigned long vaddr;
25275 pgd_t *pgd;
25276+ pud_t *pud;
25277 pmd_t *pmd;
25278 pte_t *pte = NULL;
25279
25280@@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25281 pgd = pgd_base + pgd_idx;
25282
25283 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25284- pmd = one_md_table_init(pgd);
25285- pmd = pmd + pmd_index(vaddr);
25286+ pud = pud_offset(pgd, vaddr);
25287+ pmd = pmd_offset(pud, vaddr);
25288+
25289+#ifdef CONFIG_X86_PAE
25290+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25291+#endif
25292+
25293 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25294 pmd++, pmd_idx++) {
25295 pte = page_table_kmap_check(one_page_table_init(pmd),
25296@@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25297 }
25298 }
25299
25300-static inline int is_kernel_text(unsigned long addr)
25301+static inline int is_kernel_text(unsigned long start, unsigned long end)
25302 {
25303- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
25304- return 1;
25305- return 0;
25306+ if ((start > ktla_ktva((unsigned long)_etext) ||
25307+ end <= ktla_ktva((unsigned long)_stext)) &&
25308+ (start > ktla_ktva((unsigned long)_einittext) ||
25309+ end <= ktla_ktva((unsigned long)_sinittext)) &&
25310+
25311+#ifdef CONFIG_ACPI_SLEEP
25312+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25313+#endif
25314+
25315+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25316+ return 0;
25317+ return 1;
25318 }
25319
25320 /*
25321@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
25322 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
25323 unsigned long start_pfn, end_pfn;
25324 pgd_t *pgd_base = swapper_pg_dir;
25325- int pgd_idx, pmd_idx, pte_ofs;
25326+ unsigned int pgd_idx, pmd_idx, pte_ofs;
25327 unsigned long pfn;
25328 pgd_t *pgd;
25329+ pud_t *pud;
25330 pmd_t *pmd;
25331 pte_t *pte;
25332 unsigned pages_2m, pages_4k;
25333@@ -278,8 +279,13 @@ repeat:
25334 pfn = start_pfn;
25335 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25336 pgd = pgd_base + pgd_idx;
25337- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25338- pmd = one_md_table_init(pgd);
25339+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25340+ pud = pud_offset(pgd, 0);
25341+ pmd = pmd_offset(pud, 0);
25342+
25343+#ifdef CONFIG_X86_PAE
25344+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25345+#endif
25346
25347 if (pfn >= end_pfn)
25348 continue;
25349@@ -291,14 +297,13 @@ repeat:
25350 #endif
25351 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25352 pmd++, pmd_idx++) {
25353- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25354+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25355
25356 /*
25357 * Map with big pages if possible, otherwise
25358 * create normal page tables:
25359 */
25360 if (use_pse) {
25361- unsigned int addr2;
25362 pgprot_t prot = PAGE_KERNEL_LARGE;
25363 /*
25364 * first pass will use the same initial
25365@@ -308,11 +313,7 @@ repeat:
25366 __pgprot(PTE_IDENT_ATTR |
25367 _PAGE_PSE);
25368
25369- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25370- PAGE_OFFSET + PAGE_SIZE-1;
25371-
25372- if (is_kernel_text(addr) ||
25373- is_kernel_text(addr2))
25374+ if (is_kernel_text(address, address + PMD_SIZE))
25375 prot = PAGE_KERNEL_LARGE_EXEC;
25376
25377 pages_2m++;
25378@@ -329,7 +330,7 @@ repeat:
25379 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25380 pte += pte_ofs;
25381 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25382- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25383+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25384 pgprot_t prot = PAGE_KERNEL;
25385 /*
25386 * first pass will use the same initial
25387@@ -337,7 +338,7 @@ repeat:
25388 */
25389 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25390
25391- if (is_kernel_text(addr))
25392+ if (is_kernel_text(address, address + PAGE_SIZE))
25393 prot = PAGE_KERNEL_EXEC;
25394
25395 pages_4k++;
25396@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25397
25398 pud = pud_offset(pgd, va);
25399 pmd = pmd_offset(pud, va);
25400- if (!pmd_present(*pmd))
25401+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
25402 break;
25403
25404 pte = pte_offset_kernel(pmd, va);
25405@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25406
25407 static void __init pagetable_init(void)
25408 {
25409- pgd_t *pgd_base = swapper_pg_dir;
25410-
25411- permanent_kmaps_init(pgd_base);
25412+ permanent_kmaps_init(swapper_pg_dir);
25413 }
25414
25415 #ifdef CONFIG_ACPI_SLEEP
25416@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25417 * ACPI suspend needs this for resume, because things like the intel-agp
25418 * driver might have split up a kernel 4MB mapping.
25419 */
25420-char swsusp_pg_dir[PAGE_SIZE]
25421+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25422 __attribute__ ((aligned(PAGE_SIZE)));
25423
25424 static inline void save_pg_dir(void)
25425 {
25426- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25427+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25428 }
25429 #else /* !CONFIG_ACPI_SLEEP */
25430 static inline void save_pg_dir(void)
25431@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25432 flush_tlb_all();
25433 }
25434
25435-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25436+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25437 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25438
25439 /* user-defined highmem size */
25440@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25441 * Initialize the boot-time allocator (with low memory only):
25442 */
25443 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25444- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25445+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25446 PAGE_SIZE);
25447 if (bootmap == -1L)
25448 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25449@@ -864,6 +863,12 @@ void __init mem_init(void)
25450
25451 pci_iommu_alloc();
25452
25453+#ifdef CONFIG_PAX_PER_CPU_PGD
25454+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25455+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25456+ KERNEL_PGD_PTRS);
25457+#endif
25458+
25459 #ifdef CONFIG_FLATMEM
25460 BUG_ON(!mem_map);
25461 #endif
25462@@ -881,7 +886,7 @@ void __init mem_init(void)
25463 set_highmem_pages_init();
25464
25465 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25466- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25467+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25468 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25469
25470 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25471@@ -923,10 +928,10 @@ void __init mem_init(void)
25472 ((unsigned long)&__init_end -
25473 (unsigned long)&__init_begin) >> 10,
25474
25475- (unsigned long)&_etext, (unsigned long)&_edata,
25476- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25477+ (unsigned long)&_sdata, (unsigned long)&_edata,
25478+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25479
25480- (unsigned long)&_text, (unsigned long)&_etext,
25481+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25482 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25483
25484 /*
25485@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25486 if (!kernel_set_to_readonly)
25487 return;
25488
25489+ start = ktla_ktva(start);
25490 pr_debug("Set kernel text: %lx - %lx for read write\n",
25491 start, start+size);
25492
25493@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25494 if (!kernel_set_to_readonly)
25495 return;
25496
25497+ start = ktla_ktva(start);
25498 pr_debug("Set kernel text: %lx - %lx for read only\n",
25499 start, start+size);
25500
25501@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25502 unsigned long start = PFN_ALIGN(_text);
25503 unsigned long size = PFN_ALIGN(_etext) - start;
25504
25505+ start = ktla_ktva(start);
25506 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25507 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25508 size >> 10);
25509diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25510index 7d095ad..25d2549 100644
25511--- a/arch/x86/mm/init_64.c
25512+++ b/arch/x86/mm/init_64.c
25513@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25514 pmd = fill_pmd(pud, vaddr);
25515 pte = fill_pte(pmd, vaddr);
25516
25517+ pax_open_kernel();
25518 set_pte(pte, new_pte);
25519+ pax_close_kernel();
25520
25521 /*
25522 * It's enough to flush this one mapping.
25523@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25524 pgd = pgd_offset_k((unsigned long)__va(phys));
25525 if (pgd_none(*pgd)) {
25526 pud = (pud_t *) spp_getpage();
25527- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25528- _PAGE_USER));
25529+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25530 }
25531 pud = pud_offset(pgd, (unsigned long)__va(phys));
25532 if (pud_none(*pud)) {
25533 pmd = (pmd_t *) spp_getpage();
25534- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25535- _PAGE_USER));
25536+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25537 }
25538 pmd = pmd_offset(pud, phys);
25539 BUG_ON(!pmd_none(*pmd));
25540@@ -675,6 +675,12 @@ void __init mem_init(void)
25541
25542 pci_iommu_alloc();
25543
25544+#ifdef CONFIG_PAX_PER_CPU_PGD
25545+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25546+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25547+ KERNEL_PGD_PTRS);
25548+#endif
25549+
25550 /* clear_bss() already clear the empty_zero_page */
25551
25552 reservedpages = 0;
25553@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25554 static struct vm_area_struct gate_vma = {
25555 .vm_start = VSYSCALL_START,
25556 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25557- .vm_page_prot = PAGE_READONLY_EXEC,
25558- .vm_flags = VM_READ | VM_EXEC
25559+ .vm_page_prot = PAGE_READONLY,
25560+ .vm_flags = VM_READ
25561 };
25562
25563 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25564@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25565
25566 const char *arch_vma_name(struct vm_area_struct *vma)
25567 {
25568- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25569+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25570 return "[vdso]";
25571 if (vma == &gate_vma)
25572 return "[vsyscall]";
25573diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25574index 84e236c..69bd3f6 100644
25575--- a/arch/x86/mm/iomap_32.c
25576+++ b/arch/x86/mm/iomap_32.c
25577@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25578 debug_kmap_atomic(type);
25579 idx = type + KM_TYPE_NR * smp_processor_id();
25580 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25581+
25582+ pax_open_kernel();
25583 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25584+ pax_close_kernel();
25585+
25586 arch_flush_lazy_mmu_mode();
25587
25588 return (void *)vaddr;
25589diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25590index 2feb9bd..ab91e7b 100644
25591--- a/arch/x86/mm/ioremap.c
25592+++ b/arch/x86/mm/ioremap.c
25593@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25594 * Second special case: Some BIOSen report the PC BIOS
25595 * area (640->1Mb) as ram even though it is not.
25596 */
25597- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25598- pagenr < (BIOS_END >> PAGE_SHIFT))
25599+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25600+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25601 return 0;
25602
25603 for (i = 0; i < e820.nr_map; i++) {
25604@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25605 /*
25606 * Don't allow anybody to remap normal RAM that we're using..
25607 */
25608- for (pfn = phys_addr >> PAGE_SHIFT;
25609- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25610- pfn++) {
25611-
25612+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25613 int is_ram = page_is_ram(pfn);
25614
25615- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25616+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25617 return NULL;
25618 WARN_ON_ONCE(is_ram);
25619 }
25620@@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25621
25622 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25623 if (page_is_ram(start >> PAGE_SHIFT))
25624+#ifdef CONFIG_HIGHMEM
25625+ if ((start >> PAGE_SHIFT) < max_low_pfn)
25626+#endif
25627 return __va(phys);
25628
25629 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
25630@@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
25631 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25632
25633 static __initdata int after_paging_init;
25634-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25635+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25636
25637 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25638 {
25639@@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
25640 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25641
25642 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25643- memset(bm_pte, 0, sizeof(bm_pte));
25644- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25645+ pmd_populate_user(&init_mm, pmd, bm_pte);
25646
25647 /*
25648 * The boot-ioremap range spans multiple pmds, for which
25649diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25650index 8cc1833..1abbc5b 100644
25651--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25652+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25653@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25654 * memory (e.g. tracked pages)? For now, we need this to avoid
25655 * invoking kmemcheck for PnP BIOS calls.
25656 */
25657- if (regs->flags & X86_VM_MASK)
25658+ if (v8086_mode(regs))
25659 return false;
25660- if (regs->cs != __KERNEL_CS)
25661+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25662 return false;
25663
25664 pte = kmemcheck_pte_lookup(address);
25665diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25666index c9e57af..07a321b 100644
25667--- a/arch/x86/mm/mmap.c
25668+++ b/arch/x86/mm/mmap.c
25669@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25670 * Leave an at least ~128 MB hole with possible stack randomization.
25671 */
25672 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25673-#define MAX_GAP (TASK_SIZE/6*5)
25674+#define MAX_GAP (pax_task_size/6*5)
25675
25676 /*
25677 * True on X86_32 or when emulating IA32 on X86_64
25678@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25679 return rnd << PAGE_SHIFT;
25680 }
25681
25682-static unsigned long mmap_base(void)
25683+static unsigned long mmap_base(struct mm_struct *mm)
25684 {
25685 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25686+ unsigned long pax_task_size = TASK_SIZE;
25687+
25688+#ifdef CONFIG_PAX_SEGMEXEC
25689+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25690+ pax_task_size = SEGMEXEC_TASK_SIZE;
25691+#endif
25692
25693 if (gap < MIN_GAP)
25694 gap = MIN_GAP;
25695 else if (gap > MAX_GAP)
25696 gap = MAX_GAP;
25697
25698- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25699+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25700 }
25701
25702 /*
25703 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25704 * does, but not when emulating X86_32
25705 */
25706-static unsigned long mmap_legacy_base(void)
25707+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25708 {
25709- if (mmap_is_ia32())
25710+ if (mmap_is_ia32()) {
25711+
25712+#ifdef CONFIG_PAX_SEGMEXEC
25713+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25714+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25715+ else
25716+#endif
25717+
25718 return TASK_UNMAPPED_BASE;
25719- else
25720+ } else
25721 return TASK_UNMAPPED_BASE + mmap_rnd();
25722 }
25723
25724@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25725 void arch_pick_mmap_layout(struct mm_struct *mm)
25726 {
25727 if (mmap_is_legacy()) {
25728- mm->mmap_base = mmap_legacy_base();
25729+ mm->mmap_base = mmap_legacy_base(mm);
25730+
25731+#ifdef CONFIG_PAX_RANDMMAP
25732+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25733+ mm->mmap_base += mm->delta_mmap;
25734+#endif
25735+
25736 mm->get_unmapped_area = arch_get_unmapped_area;
25737 mm->unmap_area = arch_unmap_area;
25738 } else {
25739- mm->mmap_base = mmap_base();
25740+ mm->mmap_base = mmap_base(mm);
25741+
25742+#ifdef CONFIG_PAX_RANDMMAP
25743+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25744+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25745+#endif
25746+
25747 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25748 mm->unmap_area = arch_unmap_area_topdown;
25749 }
25750diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25751index 132772a..b961f11 100644
25752--- a/arch/x86/mm/mmio-mod.c
25753+++ b/arch/x86/mm/mmio-mod.c
25754@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25755 break;
25756 default:
25757 {
25758- unsigned char *ip = (unsigned char *)instptr;
25759+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25760 my_trace->opcode = MMIO_UNKNOWN_OP;
25761 my_trace->width = 0;
25762 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25763@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25764 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25765 void __iomem *addr)
25766 {
25767- static atomic_t next_id;
25768+ static atomic_unchecked_t next_id;
25769 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25770 /* These are page-unaligned. */
25771 struct mmiotrace_map map = {
25772@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25773 .private = trace
25774 },
25775 .phys = offset,
25776- .id = atomic_inc_return(&next_id)
25777+ .id = atomic_inc_return_unchecked(&next_id)
25778 };
25779 map.map_id = trace->id;
25780
25781diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25782index d253006..e56dd6a 100644
25783--- a/arch/x86/mm/numa_32.c
25784+++ b/arch/x86/mm/numa_32.c
25785@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25786 }
25787 #endif
25788
25789-extern unsigned long find_max_low_pfn(void);
25790 extern unsigned long highend_pfn, highstart_pfn;
25791
25792 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25793diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25794index e1d1069..2251ff3 100644
25795--- a/arch/x86/mm/pageattr-test.c
25796+++ b/arch/x86/mm/pageattr-test.c
25797@@ -36,7 +36,7 @@ enum {
25798
25799 static int pte_testbit(pte_t pte)
25800 {
25801- return pte_flags(pte) & _PAGE_UNUSED1;
25802+ return pte_flags(pte) & _PAGE_CPA_TEST;
25803 }
25804
25805 struct split_state {
25806diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25807index dd38bfb..b72c63e 100644
25808--- a/arch/x86/mm/pageattr.c
25809+++ b/arch/x86/mm/pageattr.c
25810@@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25811 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25812 */
25813 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25814- pgprot_val(forbidden) |= _PAGE_NX;
25815+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25816
25817 /*
25818 * The kernel text needs to be executable for obvious reasons
25819 * Does not cover __inittext since that is gone later on. On
25820 * 64bit we do not enforce !NX on the low mapping
25821 */
25822- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25823- pgprot_val(forbidden) |= _PAGE_NX;
25824+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25825+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25826
25827+#ifdef CONFIG_DEBUG_RODATA
25828 /*
25829 * The .rodata section needs to be read-only. Using the pfn
25830 * catches all aliases.
25831@@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25832 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25833 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25834 pgprot_val(forbidden) |= _PAGE_RW;
25835+#endif
25836+
25837+#ifdef CONFIG_PAX_KERNEXEC
25838+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25839+ pgprot_val(forbidden) |= _PAGE_RW;
25840+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25841+ }
25842+#endif
25843
25844 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25845
25846@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25847 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25848 {
25849 /* change init_mm */
25850+ pax_open_kernel();
25851 set_pte_atomic(kpte, pte);
25852+
25853 #ifdef CONFIG_X86_32
25854 if (!SHARED_KERNEL_PMD) {
25855+
25856+#ifdef CONFIG_PAX_PER_CPU_PGD
25857+ unsigned long cpu;
25858+#else
25859 struct page *page;
25860+#endif
25861
25862+#ifdef CONFIG_PAX_PER_CPU_PGD
25863+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25864+ pgd_t *pgd = get_cpu_pgd(cpu);
25865+#else
25866 list_for_each_entry(page, &pgd_list, lru) {
25867- pgd_t *pgd;
25868+ pgd_t *pgd = (pgd_t *)page_address(page);
25869+#endif
25870+
25871 pud_t *pud;
25872 pmd_t *pmd;
25873
25874- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25875+ pgd += pgd_index(address);
25876 pud = pud_offset(pgd, address);
25877 pmd = pmd_offset(pud, address);
25878 set_pte_atomic((pte_t *)pmd, pte);
25879 }
25880 }
25881 #endif
25882+ pax_close_kernel();
25883 }
25884
25885 static int
25886diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25887index e78cd0e..de0a817 100644
25888--- a/arch/x86/mm/pat.c
25889+++ b/arch/x86/mm/pat.c
25890@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25891
25892 conflict:
25893 printk(KERN_INFO "%s:%d conflicting memory types "
25894- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25895+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25896 new->end, cattr_name(new->type), cattr_name(entry->type));
25897 return -EBUSY;
25898 }
25899@@ -559,7 +559,7 @@ unlock_ret:
25900
25901 if (err) {
25902 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25903- current->comm, current->pid, start, end);
25904+ current->comm, task_pid_nr(current), start, end);
25905 }
25906
25907 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25908@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25909 while (cursor < to) {
25910 if (!devmem_is_allowed(pfn)) {
25911 printk(KERN_INFO
25912- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25913- current->comm, from, to);
25914+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25915+ current->comm, from, to, cursor);
25916 return 0;
25917 }
25918 cursor += PAGE_SIZE;
25919@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25920 printk(KERN_INFO
25921 "%s:%d ioremap_change_attr failed %s "
25922 "for %Lx-%Lx\n",
25923- current->comm, current->pid,
25924+ current->comm, task_pid_nr(current),
25925 cattr_name(flags),
25926 base, (unsigned long long)(base + size));
25927 return -EINVAL;
25928@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25929 free_memtype(paddr, paddr + size);
25930 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25931 " for %Lx-%Lx, got %s\n",
25932- current->comm, current->pid,
25933+ current->comm, task_pid_nr(current),
25934 cattr_name(want_flags),
25935 (unsigned long long)paddr,
25936 (unsigned long long)(paddr + size),
25937diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25938index df3d5c8..c2223e1 100644
25939--- a/arch/x86/mm/pf_in.c
25940+++ b/arch/x86/mm/pf_in.c
25941@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25942 int i;
25943 enum reason_type rv = OTHERS;
25944
25945- p = (unsigned char *)ins_addr;
25946+ p = (unsigned char *)ktla_ktva(ins_addr);
25947 p += skip_prefix(p, &prf);
25948 p += get_opcode(p, &opcode);
25949
25950@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25951 struct prefix_bits prf;
25952 int i;
25953
25954- p = (unsigned char *)ins_addr;
25955+ p = (unsigned char *)ktla_ktva(ins_addr);
25956 p += skip_prefix(p, &prf);
25957 p += get_opcode(p, &opcode);
25958
25959@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25960 struct prefix_bits prf;
25961 int i;
25962
25963- p = (unsigned char *)ins_addr;
25964+ p = (unsigned char *)ktla_ktva(ins_addr);
25965 p += skip_prefix(p, &prf);
25966 p += get_opcode(p, &opcode);
25967
25968@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25969 int i;
25970 unsigned long rv;
25971
25972- p = (unsigned char *)ins_addr;
25973+ p = (unsigned char *)ktla_ktva(ins_addr);
25974 p += skip_prefix(p, &prf);
25975 p += get_opcode(p, &opcode);
25976 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25977@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25978 int i;
25979 unsigned long rv;
25980
25981- p = (unsigned char *)ins_addr;
25982+ p = (unsigned char *)ktla_ktva(ins_addr);
25983 p += skip_prefix(p, &prf);
25984 p += get_opcode(p, &opcode);
25985 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25986diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25987index e0e6fad..c56b495 100644
25988--- a/arch/x86/mm/pgtable.c
25989+++ b/arch/x86/mm/pgtable.c
25990@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25991 list_del(&page->lru);
25992 }
25993
25994-#define UNSHARED_PTRS_PER_PGD \
25995- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25996+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25997+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25998
25999+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26000+{
26001+ while (count--)
26002+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
26003+}
26004+#endif
26005+
26006+#ifdef CONFIG_PAX_PER_CPU_PGD
26007+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26008+{
26009+ while (count--)
26010+
26011+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26012+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
26013+#else
26014+ *dst++ = *src++;
26015+#endif
26016+
26017+}
26018+#endif
26019+
26020+#ifdef CONFIG_X86_64
26021+#define pxd_t pud_t
26022+#define pyd_t pgd_t
26023+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
26024+#define pxd_free(mm, pud) pud_free((mm), (pud))
26025+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
26026+#define pyd_offset(mm, address) pgd_offset((mm), (address))
26027+#define PYD_SIZE PGDIR_SIZE
26028+#else
26029+#define pxd_t pmd_t
26030+#define pyd_t pud_t
26031+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
26032+#define pxd_free(mm, pud) pmd_free((mm), (pud))
26033+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
26034+#define pyd_offset(mm, address) pud_offset((mm), (address))
26035+#define PYD_SIZE PUD_SIZE
26036+#endif
26037+
26038+#ifdef CONFIG_PAX_PER_CPU_PGD
26039+static inline void pgd_ctor(pgd_t *pgd) {}
26040+static inline void pgd_dtor(pgd_t *pgd) {}
26041+#else
26042 static void pgd_ctor(pgd_t *pgd)
26043 {
26044 /* If the pgd points to a shared pagetable level (either the
26045@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
26046 pgd_list_del(pgd);
26047 spin_unlock_irqrestore(&pgd_lock, flags);
26048 }
26049+#endif
26050
26051 /*
26052 * List of all pgd's needed for non-PAE so it can invalidate entries
26053@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
26054 * -- wli
26055 */
26056
26057-#ifdef CONFIG_X86_PAE
26058+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26059 /*
26060 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
26061 * updating the top-level pagetable entries to guarantee the
26062@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
26063 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
26064 * and initialize the kernel pmds here.
26065 */
26066-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
26067+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26068
26069 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26070 {
26071@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26072 */
26073 flush_tlb_mm(mm);
26074 }
26075+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
26076+#define PREALLOCATED_PXDS USER_PGD_PTRS
26077 #else /* !CONFIG_X86_PAE */
26078
26079 /* No need to prepopulate any pagetable entries in non-PAE modes. */
26080-#define PREALLOCATED_PMDS 0
26081+#define PREALLOCATED_PXDS 0
26082
26083 #endif /* CONFIG_X86_PAE */
26084
26085-static void free_pmds(pmd_t *pmds[])
26086+static void free_pxds(pxd_t *pxds[])
26087 {
26088 int i;
26089
26090- for(i = 0; i < PREALLOCATED_PMDS; i++)
26091- if (pmds[i])
26092- free_page((unsigned long)pmds[i]);
26093+ for(i = 0; i < PREALLOCATED_PXDS; i++)
26094+ if (pxds[i])
26095+ free_page((unsigned long)pxds[i]);
26096 }
26097
26098-static int preallocate_pmds(pmd_t *pmds[])
26099+static int preallocate_pxds(pxd_t *pxds[])
26100 {
26101 int i;
26102 bool failed = false;
26103
26104- for(i = 0; i < PREALLOCATED_PMDS; i++) {
26105- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
26106- if (pmd == NULL)
26107+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
26108+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
26109+ if (pxd == NULL)
26110 failed = true;
26111- pmds[i] = pmd;
26112+ pxds[i] = pxd;
26113 }
26114
26115 if (failed) {
26116- free_pmds(pmds);
26117+ free_pxds(pxds);
26118 return -ENOMEM;
26119 }
26120
26121@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
26122 * preallocate which never got a corresponding vma will need to be
26123 * freed manually.
26124 */
26125-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
26126+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
26127 {
26128 int i;
26129
26130- for(i = 0; i < PREALLOCATED_PMDS; i++) {
26131+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
26132 pgd_t pgd = pgdp[i];
26133
26134 if (pgd_val(pgd) != 0) {
26135- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
26136+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
26137
26138- pgdp[i] = native_make_pgd(0);
26139+ set_pgd(pgdp + i, native_make_pgd(0));
26140
26141- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
26142- pmd_free(mm, pmd);
26143+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
26144+ pxd_free(mm, pxd);
26145 }
26146 }
26147 }
26148
26149-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
26150+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
26151 {
26152- pud_t *pud;
26153+ pyd_t *pyd;
26154 unsigned long addr;
26155 int i;
26156
26157- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
26158+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
26159 return;
26160
26161- pud = pud_offset(pgd, 0);
26162+#ifdef CONFIG_X86_64
26163+ pyd = pyd_offset(mm, 0L);
26164+#else
26165+ pyd = pyd_offset(pgd, 0L);
26166+#endif
26167
26168- for (addr = i = 0; i < PREALLOCATED_PMDS;
26169- i++, pud++, addr += PUD_SIZE) {
26170- pmd_t *pmd = pmds[i];
26171+ for (addr = i = 0; i < PREALLOCATED_PXDS;
26172+ i++, pyd++, addr += PYD_SIZE) {
26173+ pxd_t *pxd = pxds[i];
26174
26175 if (i >= KERNEL_PGD_BOUNDARY)
26176- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26177- sizeof(pmd_t) * PTRS_PER_PMD);
26178+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
26179+ sizeof(pxd_t) * PTRS_PER_PMD);
26180
26181- pud_populate(mm, pud, pmd);
26182+ pyd_populate(mm, pyd, pxd);
26183 }
26184 }
26185
26186 pgd_t *pgd_alloc(struct mm_struct *mm)
26187 {
26188 pgd_t *pgd;
26189- pmd_t *pmds[PREALLOCATED_PMDS];
26190+ pxd_t *pxds[PREALLOCATED_PXDS];
26191+
26192 unsigned long flags;
26193
26194 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
26195@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26196
26197 mm->pgd = pgd;
26198
26199- if (preallocate_pmds(pmds) != 0)
26200+ if (preallocate_pxds(pxds) != 0)
26201 goto out_free_pgd;
26202
26203 if (paravirt_pgd_alloc(mm) != 0)
26204- goto out_free_pmds;
26205+ goto out_free_pxds;
26206
26207 /*
26208 * Make sure that pre-populating the pmds is atomic with
26209@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
26210 spin_lock_irqsave(&pgd_lock, flags);
26211
26212 pgd_ctor(pgd);
26213- pgd_prepopulate_pmd(mm, pgd, pmds);
26214+ pgd_prepopulate_pxd(mm, pgd, pxds);
26215
26216 spin_unlock_irqrestore(&pgd_lock, flags);
26217
26218 return pgd;
26219
26220-out_free_pmds:
26221- free_pmds(pmds);
26222+out_free_pxds:
26223+ free_pxds(pxds);
26224 out_free_pgd:
26225 free_page((unsigned long)pgd);
26226 out:
26227@@ -287,7 +338,7 @@ out:
26228
26229 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
26230 {
26231- pgd_mop_up_pmds(mm, pgd);
26232+ pgd_mop_up_pxds(mm, pgd);
26233 pgd_dtor(pgd);
26234 paravirt_pgd_free(mm, pgd);
26235 free_page((unsigned long)pgd);
26236diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
26237index 46c8834..fcab43d 100644
26238--- a/arch/x86/mm/pgtable_32.c
26239+++ b/arch/x86/mm/pgtable_32.c
26240@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
26241 return;
26242 }
26243 pte = pte_offset_kernel(pmd, vaddr);
26244+
26245+ pax_open_kernel();
26246 if (pte_val(pteval))
26247 set_pte_at(&init_mm, vaddr, pte, pteval);
26248 else
26249 pte_clear(&init_mm, vaddr, pte);
26250+ pax_close_kernel();
26251
26252 /*
26253 * It's enough to flush this one mapping.
26254diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26255index 513d8ed..978c161 100644
26256--- a/arch/x86/mm/setup_nx.c
26257+++ b/arch/x86/mm/setup_nx.c
26258@@ -4,11 +4,10 @@
26259
26260 #include <asm/pgtable.h>
26261
26262+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26263 int nx_enabled;
26264
26265-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26266-static int disable_nx __cpuinitdata;
26267-
26268+#ifndef CONFIG_PAX_PAGEEXEC
26269 /*
26270 * noexec = on|off
26271 *
26272@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
26273 if (!str)
26274 return -EINVAL;
26275 if (!strncmp(str, "on", 2)) {
26276- __supported_pte_mask |= _PAGE_NX;
26277- disable_nx = 0;
26278+ nx_enabled = 1;
26279 } else if (!strncmp(str, "off", 3)) {
26280- disable_nx = 1;
26281- __supported_pte_mask &= ~_PAGE_NX;
26282+ nx_enabled = 0;
26283 }
26284 return 0;
26285 }
26286 early_param("noexec", noexec_setup);
26287 #endif
26288+#endif
26289
26290 #ifdef CONFIG_X86_PAE
26291 void __init set_nx(void)
26292 {
26293- unsigned int v[4], l, h;
26294+ if (!nx_enabled && cpu_has_nx) {
26295+ unsigned l, h;
26296
26297- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
26298- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
26299-
26300- if ((v[3] & (1 << 20)) && !disable_nx) {
26301- rdmsr(MSR_EFER, l, h);
26302- l |= EFER_NX;
26303- wrmsr(MSR_EFER, l, h);
26304- nx_enabled = 1;
26305- __supported_pte_mask |= _PAGE_NX;
26306- }
26307+ __supported_pte_mask &= ~_PAGE_NX;
26308+ rdmsr(MSR_EFER, l, h);
26309+ l &= ~EFER_NX;
26310+ wrmsr(MSR_EFER, l, h);
26311 }
26312 }
26313 #else
26314@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
26315 unsigned long efer;
26316
26317 rdmsrl(MSR_EFER, efer);
26318- if (!(efer & EFER_NX) || disable_nx)
26319+ if (!(efer & EFER_NX) || !nx_enabled)
26320 __supported_pte_mask &= ~_PAGE_NX;
26321 }
26322 #endif
26323diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26324index 36fe08e..b123d3a 100644
26325--- a/arch/x86/mm/tlb.c
26326+++ b/arch/x86/mm/tlb.c
26327@@ -61,7 +61,11 @@ void leave_mm(int cpu)
26328 BUG();
26329 cpumask_clear_cpu(cpu,
26330 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26331+
26332+#ifndef CONFIG_PAX_PER_CPU_PGD
26333 load_cr3(swapper_pg_dir);
26334+#endif
26335+
26336 }
26337 EXPORT_SYMBOL_GPL(leave_mm);
26338
26339diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26340index 829edf0..672adb3 100644
26341--- a/arch/x86/oprofile/backtrace.c
26342+++ b/arch/x86/oprofile/backtrace.c
26343@@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26344 {
26345 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26346
26347- if (!user_mode_vm(regs)) {
26348+ if (!user_mode(regs)) {
26349 unsigned long stack = kernel_stack_pointer(regs);
26350 if (depth)
26351 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26352diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26353index e6a160a..36deff6 100644
26354--- a/arch/x86/oprofile/op_model_p4.c
26355+++ b/arch/x86/oprofile/op_model_p4.c
26356@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26357 #endif
26358 }
26359
26360-static int inline addr_increment(void)
26361+static inline int addr_increment(void)
26362 {
26363 #ifdef CONFIG_SMP
26364 return smp_num_siblings == 2 ? 2 : 1;
26365diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26366index 1331fcf..03901b2 100644
26367--- a/arch/x86/pci/common.c
26368+++ b/arch/x86/pci/common.c
26369@@ -31,8 +31,8 @@ int noioapicreroute = 1;
26370 int pcibios_last_bus = -1;
26371 unsigned long pirq_table_addr;
26372 struct pci_bus *pci_root_bus;
26373-struct pci_raw_ops *raw_pci_ops;
26374-struct pci_raw_ops *raw_pci_ext_ops;
26375+const struct pci_raw_ops *raw_pci_ops;
26376+const struct pci_raw_ops *raw_pci_ext_ops;
26377
26378 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26379 int reg, int len, u32 *val)
26380diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26381index 347d882..4baf6b6 100644
26382--- a/arch/x86/pci/direct.c
26383+++ b/arch/x86/pci/direct.c
26384@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26385
26386 #undef PCI_CONF1_ADDRESS
26387
26388-struct pci_raw_ops pci_direct_conf1 = {
26389+const struct pci_raw_ops pci_direct_conf1 = {
26390 .read = pci_conf1_read,
26391 .write = pci_conf1_write,
26392 };
26393@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26394
26395 #undef PCI_CONF2_ADDRESS
26396
26397-struct pci_raw_ops pci_direct_conf2 = {
26398+const struct pci_raw_ops pci_direct_conf2 = {
26399 .read = pci_conf2_read,
26400 .write = pci_conf2_write,
26401 };
26402@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26403 * This should be close to trivial, but it isn't, because there are buggy
26404 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26405 */
26406-static int __init pci_sanity_check(struct pci_raw_ops *o)
26407+static int __init pci_sanity_check(const struct pci_raw_ops *o)
26408 {
26409 u32 x = 0;
26410 int year, devfn;
26411diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26412index f10a7e9..0425342 100644
26413--- a/arch/x86/pci/mmconfig_32.c
26414+++ b/arch/x86/pci/mmconfig_32.c
26415@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26416 return 0;
26417 }
26418
26419-static struct pci_raw_ops pci_mmcfg = {
26420+static const struct pci_raw_ops pci_mmcfg = {
26421 .read = pci_mmcfg_read,
26422 .write = pci_mmcfg_write,
26423 };
26424diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26425index 94349f8..41600a7 100644
26426--- a/arch/x86/pci/mmconfig_64.c
26427+++ b/arch/x86/pci/mmconfig_64.c
26428@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26429 return 0;
26430 }
26431
26432-static struct pci_raw_ops pci_mmcfg = {
26433+static const struct pci_raw_ops pci_mmcfg = {
26434 .read = pci_mmcfg_read,
26435 .write = pci_mmcfg_write,
26436 };
26437diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26438index 8eb295e..86bd657 100644
26439--- a/arch/x86/pci/numaq_32.c
26440+++ b/arch/x86/pci/numaq_32.c
26441@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26442
26443 #undef PCI_CONF1_MQ_ADDRESS
26444
26445-static struct pci_raw_ops pci_direct_conf1_mq = {
26446+static const struct pci_raw_ops pci_direct_conf1_mq = {
26447 .read = pci_conf1_mq_read,
26448 .write = pci_conf1_mq_write
26449 };
26450diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26451index b889d82..5a58a0a 100644
26452--- a/arch/x86/pci/olpc.c
26453+++ b/arch/x86/pci/olpc.c
26454@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26455 return 0;
26456 }
26457
26458-static struct pci_raw_ops pci_olpc_conf = {
26459+static const struct pci_raw_ops pci_olpc_conf = {
26460 .read = pci_olpc_read,
26461 .write = pci_olpc_write,
26462 };
26463diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26464index 1c975cc..b8e16c2 100644
26465--- a/arch/x86/pci/pcbios.c
26466+++ b/arch/x86/pci/pcbios.c
26467@@ -56,50 +56,93 @@ union bios32 {
26468 static struct {
26469 unsigned long address;
26470 unsigned short segment;
26471-} bios32_indirect = { 0, __KERNEL_CS };
26472+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26473
26474 /*
26475 * Returns the entry point for the given service, NULL on error
26476 */
26477
26478-static unsigned long bios32_service(unsigned long service)
26479+static unsigned long __devinit bios32_service(unsigned long service)
26480 {
26481 unsigned char return_code; /* %al */
26482 unsigned long address; /* %ebx */
26483 unsigned long length; /* %ecx */
26484 unsigned long entry; /* %edx */
26485 unsigned long flags;
26486+ struct desc_struct d, *gdt;
26487
26488 local_irq_save(flags);
26489- __asm__("lcall *(%%edi); cld"
26490+
26491+ gdt = get_cpu_gdt_table(smp_processor_id());
26492+
26493+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26494+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26495+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26496+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26497+
26498+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26499 : "=a" (return_code),
26500 "=b" (address),
26501 "=c" (length),
26502 "=d" (entry)
26503 : "0" (service),
26504 "1" (0),
26505- "D" (&bios32_indirect));
26506+ "D" (&bios32_indirect),
26507+ "r"(__PCIBIOS_DS)
26508+ : "memory");
26509+
26510+ pax_open_kernel();
26511+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26512+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26513+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26514+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26515+ pax_close_kernel();
26516+
26517 local_irq_restore(flags);
26518
26519 switch (return_code) {
26520- case 0:
26521- return address + entry;
26522- case 0x80: /* Not present */
26523- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26524- return 0;
26525- default: /* Shouldn't happen */
26526- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26527- service, return_code);
26528+ case 0: {
26529+ int cpu;
26530+ unsigned char flags;
26531+
26532+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26533+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26534+ printk(KERN_WARNING "bios32_service: not valid\n");
26535 return 0;
26536+ }
26537+ address = address + PAGE_OFFSET;
26538+ length += 16UL; /* some BIOSs underreport this... */
26539+ flags = 4;
26540+ if (length >= 64*1024*1024) {
26541+ length >>= PAGE_SHIFT;
26542+ flags |= 8;
26543+ }
26544+
26545+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26546+ gdt = get_cpu_gdt_table(cpu);
26547+ pack_descriptor(&d, address, length, 0x9b, flags);
26548+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26549+ pack_descriptor(&d, address, length, 0x93, flags);
26550+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26551+ }
26552+ return entry;
26553+ }
26554+ case 0x80: /* Not present */
26555+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26556+ return 0;
26557+ default: /* Shouldn't happen */
26558+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26559+ service, return_code);
26560+ return 0;
26561 }
26562 }
26563
26564 static struct {
26565 unsigned long address;
26566 unsigned short segment;
26567-} pci_indirect = { 0, __KERNEL_CS };
26568+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26569
26570-static int pci_bios_present;
26571+static int pci_bios_present __read_only;
26572
26573 static int __devinit check_pcibios(void)
26574 {
26575@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26576 unsigned long flags, pcibios_entry;
26577
26578 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26579- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26580+ pci_indirect.address = pcibios_entry;
26581
26582 local_irq_save(flags);
26583- __asm__(
26584- "lcall *(%%edi); cld\n\t"
26585+ __asm__("movw %w6, %%ds\n\t"
26586+ "lcall *%%ss:(%%edi); cld\n\t"
26587+ "push %%ss\n\t"
26588+ "pop %%ds\n\t"
26589 "jc 1f\n\t"
26590 "xor %%ah, %%ah\n"
26591 "1:"
26592@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26593 "=b" (ebx),
26594 "=c" (ecx)
26595 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26596- "D" (&pci_indirect)
26597+ "D" (&pci_indirect),
26598+ "r" (__PCIBIOS_DS)
26599 : "memory");
26600 local_irq_restore(flags);
26601
26602@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26603
26604 switch (len) {
26605 case 1:
26606- __asm__("lcall *(%%esi); cld\n\t"
26607+ __asm__("movw %w6, %%ds\n\t"
26608+ "lcall *%%ss:(%%esi); cld\n\t"
26609+ "push %%ss\n\t"
26610+ "pop %%ds\n\t"
26611 "jc 1f\n\t"
26612 "xor %%ah, %%ah\n"
26613 "1:"
26614@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26615 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26616 "b" (bx),
26617 "D" ((long)reg),
26618- "S" (&pci_indirect));
26619+ "S" (&pci_indirect),
26620+ "r" (__PCIBIOS_DS));
26621 /*
26622 * Zero-extend the result beyond 8 bits, do not trust the
26623 * BIOS having done it:
26624@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26625 *value &= 0xff;
26626 break;
26627 case 2:
26628- __asm__("lcall *(%%esi); cld\n\t"
26629+ __asm__("movw %w6, %%ds\n\t"
26630+ "lcall *%%ss:(%%esi); cld\n\t"
26631+ "push %%ss\n\t"
26632+ "pop %%ds\n\t"
26633 "jc 1f\n\t"
26634 "xor %%ah, %%ah\n"
26635 "1:"
26636@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26637 : "1" (PCIBIOS_READ_CONFIG_WORD),
26638 "b" (bx),
26639 "D" ((long)reg),
26640- "S" (&pci_indirect));
26641+ "S" (&pci_indirect),
26642+ "r" (__PCIBIOS_DS));
26643 /*
26644 * Zero-extend the result beyond 16 bits, do not trust the
26645 * BIOS having done it:
26646@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26647 *value &= 0xffff;
26648 break;
26649 case 4:
26650- __asm__("lcall *(%%esi); cld\n\t"
26651+ __asm__("movw %w6, %%ds\n\t"
26652+ "lcall *%%ss:(%%esi); cld\n\t"
26653+ "push %%ss\n\t"
26654+ "pop %%ds\n\t"
26655 "jc 1f\n\t"
26656 "xor %%ah, %%ah\n"
26657 "1:"
26658@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26659 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26660 "b" (bx),
26661 "D" ((long)reg),
26662- "S" (&pci_indirect));
26663+ "S" (&pci_indirect),
26664+ "r" (__PCIBIOS_DS));
26665 break;
26666 }
26667
26668@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26669
26670 switch (len) {
26671 case 1:
26672- __asm__("lcall *(%%esi); cld\n\t"
26673+ __asm__("movw %w6, %%ds\n\t"
26674+ "lcall *%%ss:(%%esi); cld\n\t"
26675+ "push %%ss\n\t"
26676+ "pop %%ds\n\t"
26677 "jc 1f\n\t"
26678 "xor %%ah, %%ah\n"
26679 "1:"
26680@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26681 "c" (value),
26682 "b" (bx),
26683 "D" ((long)reg),
26684- "S" (&pci_indirect));
26685+ "S" (&pci_indirect),
26686+ "r" (__PCIBIOS_DS));
26687 break;
26688 case 2:
26689- __asm__("lcall *(%%esi); cld\n\t"
26690+ __asm__("movw %w6, %%ds\n\t"
26691+ "lcall *%%ss:(%%esi); cld\n\t"
26692+ "push %%ss\n\t"
26693+ "pop %%ds\n\t"
26694 "jc 1f\n\t"
26695 "xor %%ah, %%ah\n"
26696 "1:"
26697@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26698 "c" (value),
26699 "b" (bx),
26700 "D" ((long)reg),
26701- "S" (&pci_indirect));
26702+ "S" (&pci_indirect),
26703+ "r" (__PCIBIOS_DS));
26704 break;
26705 case 4:
26706- __asm__("lcall *(%%esi); cld\n\t"
26707+ __asm__("movw %w6, %%ds\n\t"
26708+ "lcall *%%ss:(%%esi); cld\n\t"
26709+ "push %%ss\n\t"
26710+ "pop %%ds\n\t"
26711 "jc 1f\n\t"
26712 "xor %%ah, %%ah\n"
26713 "1:"
26714@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26715 "c" (value),
26716 "b" (bx),
26717 "D" ((long)reg),
26718- "S" (&pci_indirect));
26719+ "S" (&pci_indirect),
26720+ "r" (__PCIBIOS_DS));
26721 break;
26722 }
26723
26724@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26725 * Function table for BIOS32 access
26726 */
26727
26728-static struct pci_raw_ops pci_bios_access = {
26729+static const struct pci_raw_ops pci_bios_access = {
26730 .read = pci_bios_read,
26731 .write = pci_bios_write
26732 };
26733@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26734 * Try to find PCI BIOS.
26735 */
26736
26737-static struct pci_raw_ops * __devinit pci_find_bios(void)
26738+static const struct pci_raw_ops * __devinit pci_find_bios(void)
26739 {
26740 union bios32 *check;
26741 unsigned char sum;
26742@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26743
26744 DBG("PCI: Fetching IRQ routing table... ");
26745 __asm__("push %%es\n\t"
26746+ "movw %w8, %%ds\n\t"
26747 "push %%ds\n\t"
26748 "pop %%es\n\t"
26749- "lcall *(%%esi); cld\n\t"
26750+ "lcall *%%ss:(%%esi); cld\n\t"
26751 "pop %%es\n\t"
26752+ "push %%ss\n\t"
26753+ "pop %%ds\n"
26754 "jc 1f\n\t"
26755 "xor %%ah, %%ah\n"
26756 "1:"
26757@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26758 "1" (0),
26759 "D" ((long) &opt),
26760 "S" (&pci_indirect),
26761- "m" (opt)
26762+ "m" (opt),
26763+ "r" (__PCIBIOS_DS)
26764 : "memory");
26765 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26766 if (ret & 0xff00)
26767@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26768 {
26769 int ret;
26770
26771- __asm__("lcall *(%%esi); cld\n\t"
26772+ __asm__("movw %w5, %%ds\n\t"
26773+ "lcall *%%ss:(%%esi); cld\n\t"
26774+ "push %%ss\n\t"
26775+ "pop %%ds\n"
26776 "jc 1f\n\t"
26777 "xor %%ah, %%ah\n"
26778 "1:"
26779@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26780 : "0" (PCIBIOS_SET_PCI_HW_INT),
26781 "b" ((dev->bus->number << 8) | dev->devfn),
26782 "c" ((irq << 8) | (pin + 10)),
26783- "S" (&pci_indirect));
26784+ "S" (&pci_indirect),
26785+ "r" (__PCIBIOS_DS));
26786 return !(ret & 0xff00);
26787 }
26788 EXPORT_SYMBOL(pcibios_set_irq_routing);
26789diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26790index fa0f651..9d8f3d9 100644
26791--- a/arch/x86/power/cpu.c
26792+++ b/arch/x86/power/cpu.c
26793@@ -129,7 +129,7 @@ static void do_fpu_end(void)
26794 static void fix_processor_context(void)
26795 {
26796 int cpu = smp_processor_id();
26797- struct tss_struct *t = &per_cpu(init_tss, cpu);
26798+ struct tss_struct *t = init_tss + cpu;
26799
26800 set_tss_desc(cpu, t); /*
26801 * This just modifies memory; should not be
26802@@ -139,7 +139,9 @@ static void fix_processor_context(void)
26803 */
26804
26805 #ifdef CONFIG_X86_64
26806+ pax_open_kernel();
26807 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26808+ pax_close_kernel();
26809
26810 syscall_init(); /* This sets MSR_*STAR and related */
26811 #endif
26812diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26813index dd78ef6..f9d928d 100644
26814--- a/arch/x86/vdso/Makefile
26815+++ b/arch/x86/vdso/Makefile
26816@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26817 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26818 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26819
26820-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26821+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26822 GCOV_PROFILE := n
26823
26824 #
26825diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26826index ee55754..0013b2e 100644
26827--- a/arch/x86/vdso/vclock_gettime.c
26828+++ b/arch/x86/vdso/vclock_gettime.c
26829@@ -22,24 +22,48 @@
26830 #include <asm/hpet.h>
26831 #include <asm/unistd.h>
26832 #include <asm/io.h>
26833+#include <asm/fixmap.h>
26834 #include "vextern.h"
26835
26836 #define gtod vdso_vsyscall_gtod_data
26837
26838+notrace noinline long __vdso_fallback_time(long *t)
26839+{
26840+ long secs;
26841+ asm volatile("syscall"
26842+ : "=a" (secs)
26843+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26844+ return secs;
26845+}
26846+
26847 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26848 {
26849 long ret;
26850 asm("syscall" : "=a" (ret) :
26851- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26852+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26853 return ret;
26854 }
26855
26856+notrace static inline cycle_t __vdso_vread_hpet(void)
26857+{
26858+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26859+}
26860+
26861+notrace static inline cycle_t __vdso_vread_tsc(void)
26862+{
26863+ cycle_t ret = (cycle_t)vget_cycles();
26864+
26865+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26866+}
26867+
26868 notrace static inline long vgetns(void)
26869 {
26870 long v;
26871- cycles_t (*vread)(void);
26872- vread = gtod->clock.vread;
26873- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26874+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26875+ v = __vdso_vread_tsc();
26876+ else
26877+ v = __vdso_vread_hpet();
26878+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26879 return (v * gtod->clock.mult) >> gtod->clock.shift;
26880 }
26881
26882@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26883
26884 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26885 {
26886- if (likely(gtod->sysctl_enabled))
26887+ if (likely(gtod->sysctl_enabled &&
26888+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26889+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26890 switch (clock) {
26891 case CLOCK_REALTIME:
26892 if (likely(gtod->clock.vread))
26893@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26894 int clock_gettime(clockid_t, struct timespec *)
26895 __attribute__((weak, alias("__vdso_clock_gettime")));
26896
26897+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26898+{
26899+ long ret;
26900+ asm("syscall" : "=a" (ret) :
26901+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26902+ return ret;
26903+}
26904+
26905 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26906 {
26907- long ret;
26908- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26909+ if (likely(gtod->sysctl_enabled &&
26910+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26911+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26912+ {
26913 if (likely(tv != NULL)) {
26914 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26915 offsetof(struct timespec, tv_nsec) ||
26916@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26917 }
26918 return 0;
26919 }
26920- asm("syscall" : "=a" (ret) :
26921- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26922- return ret;
26923+ return __vdso_fallback_gettimeofday(tv, tz);
26924 }
26925 int gettimeofday(struct timeval *, struct timezone *)
26926 __attribute__((weak, alias("__vdso_gettimeofday")));
26927diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26928index 4e5dd3b..00ba15e 100644
26929--- a/arch/x86/vdso/vdso.lds.S
26930+++ b/arch/x86/vdso/vdso.lds.S
26931@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26932 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26933 #include "vextern.h"
26934 #undef VEXTERN
26935+
26936+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26937+VEXTERN(fallback_gettimeofday)
26938+VEXTERN(fallback_time)
26939+VEXTERN(getcpu)
26940+#undef VEXTERN
26941diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26942index 58bc00f..d53fb48 100644
26943--- a/arch/x86/vdso/vdso32-setup.c
26944+++ b/arch/x86/vdso/vdso32-setup.c
26945@@ -25,6 +25,7 @@
26946 #include <asm/tlbflush.h>
26947 #include <asm/vdso.h>
26948 #include <asm/proto.h>
26949+#include <asm/mman.h>
26950
26951 enum {
26952 VDSO_DISABLED = 0,
26953@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26954 void enable_sep_cpu(void)
26955 {
26956 int cpu = get_cpu();
26957- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26958+ struct tss_struct *tss = init_tss + cpu;
26959
26960 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26961 put_cpu();
26962@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26963 gate_vma.vm_start = FIXADDR_USER_START;
26964 gate_vma.vm_end = FIXADDR_USER_END;
26965 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26966- gate_vma.vm_page_prot = __P101;
26967+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26968 /*
26969 * Make sure the vDSO gets into every core dump.
26970 * Dumping its contents makes post-mortem fully interpretable later
26971@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26972 if (compat)
26973 addr = VDSO_HIGH_BASE;
26974 else {
26975- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26976+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26977 if (IS_ERR_VALUE(addr)) {
26978 ret = addr;
26979 goto up_fail;
26980 }
26981 }
26982
26983- current->mm->context.vdso = (void *)addr;
26984+ current->mm->context.vdso = addr;
26985
26986 if (compat_uses_vma || !compat) {
26987 /*
26988@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26989 }
26990
26991 current_thread_info()->sysenter_return =
26992- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26993+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26994
26995 up_fail:
26996 if (ret)
26997- current->mm->context.vdso = NULL;
26998+ current->mm->context.vdso = 0;
26999
27000 up_write(&mm->mmap_sem);
27001
27002@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
27003
27004 const char *arch_vma_name(struct vm_area_struct *vma)
27005 {
27006- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27007+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27008 return "[vdso]";
27009+
27010+#ifdef CONFIG_PAX_SEGMEXEC
27011+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27012+ return "[vdso]";
27013+#endif
27014+
27015 return NULL;
27016 }
27017
27018@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
27019 struct mm_struct *mm = tsk->mm;
27020
27021 /* Check to see if this task was created in compat vdso mode */
27022- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27023+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27024 return &gate_vma;
27025 return NULL;
27026 }
27027diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
27028index 1683ba2..48d07f3 100644
27029--- a/arch/x86/vdso/vextern.h
27030+++ b/arch/x86/vdso/vextern.h
27031@@ -11,6 +11,5 @@
27032 put into vextern.h and be referenced as a pointer with vdso prefix.
27033 The main kernel later fills in the values. */
27034
27035-VEXTERN(jiffies)
27036 VEXTERN(vgetcpu_mode)
27037 VEXTERN(vsyscall_gtod_data)
27038diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27039index 21e1aeb..2c0b3c4 100644
27040--- a/arch/x86/vdso/vma.c
27041+++ b/arch/x86/vdso/vma.c
27042@@ -17,8 +17,6 @@
27043 #include "vextern.h" /* Just for VMAGIC. */
27044 #undef VEXTERN
27045
27046-unsigned int __read_mostly vdso_enabled = 1;
27047-
27048 extern char vdso_start[], vdso_end[];
27049 extern unsigned short vdso_sync_cpuid;
27050
27051@@ -27,10 +25,8 @@ static unsigned vdso_size;
27052
27053 static inline void *var_ref(void *p, char *name)
27054 {
27055- if (*(void **)p != (void *)VMAGIC) {
27056- printk("VDSO: variable %s broken\n", name);
27057- vdso_enabled = 0;
27058- }
27059+ if (*(void **)p != (void *)VMAGIC)
27060+ panic("VDSO: variable %s broken\n", name);
27061 return p;
27062 }
27063
27064@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
27065 if (!vbase)
27066 goto oom;
27067
27068- if (memcmp(vbase, "\177ELF", 4)) {
27069- printk("VDSO: I'm broken; not ELF\n");
27070- vdso_enabled = 0;
27071- }
27072+ if (memcmp(vbase, ELFMAG, SELFMAG))
27073+ panic("VDSO: I'm broken; not ELF\n");
27074
27075 #define VEXTERN(x) \
27076 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
27077 #include "vextern.h"
27078 #undef VEXTERN
27079+ vunmap(vbase);
27080 return 0;
27081
27082 oom:
27083- printk("Cannot allocate vdso\n");
27084- vdso_enabled = 0;
27085- return -ENOMEM;
27086+ panic("Cannot allocate vdso\n");
27087 }
27088 __initcall(init_vdso_vars);
27089
27090@@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27091 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27092 {
27093 struct mm_struct *mm = current->mm;
27094- unsigned long addr;
27095+ unsigned long addr = 0;
27096 int ret;
27097
27098- if (!vdso_enabled)
27099- return 0;
27100-
27101 down_write(&mm->mmap_sem);
27102+
27103+#ifdef CONFIG_PAX_RANDMMAP
27104+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27105+#endif
27106+
27107 addr = vdso_addr(mm->start_stack, vdso_size);
27108 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
27109 if (IS_ERR_VALUE(addr)) {
27110@@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27111 goto up_fail;
27112 }
27113
27114- current->mm->context.vdso = (void *)addr;
27115+ current->mm->context.vdso = addr;
27116
27117 ret = install_special_mapping(mm, addr, vdso_size,
27118 VM_READ|VM_EXEC|
27119@@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27120 VM_ALWAYSDUMP,
27121 vdso_pages);
27122 if (ret) {
27123- current->mm->context.vdso = NULL;
27124+ current->mm->context.vdso = 0;
27125 goto up_fail;
27126 }
27127
27128@@ -132,10 +127,3 @@ up_fail:
27129 up_write(&mm->mmap_sem);
27130 return ret;
27131 }
27132-
27133-static __init int vdso_setup(char *s)
27134-{
27135- vdso_enabled = simple_strtoul(s, NULL, 0);
27136- return 0;
27137-}
27138-__setup("vdso=", vdso_setup);
27139diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27140index 0087b00..eecb34f 100644
27141--- a/arch/x86/xen/enlighten.c
27142+++ b/arch/x86/xen/enlighten.c
27143@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27144
27145 struct shared_info xen_dummy_shared_info;
27146
27147-void *xen_initial_gdt;
27148-
27149 /*
27150 * Point at some empty memory to start with. We map the real shared_info
27151 * page as soon as fixmap is up and running.
27152@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
27153
27154 preempt_disable();
27155
27156- start = __get_cpu_var(idt_desc).address;
27157+ start = (unsigned long)__get_cpu_var(idt_desc).address;
27158 end = start + __get_cpu_var(idt_desc).size + 1;
27159
27160 xen_mc_flush();
27161@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
27162 #endif
27163 };
27164
27165-static void xen_reboot(int reason)
27166+static __noreturn void xen_reboot(int reason)
27167 {
27168 struct sched_shutdown r = { .reason = reason };
27169
27170@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
27171 BUG();
27172 }
27173
27174-static void xen_restart(char *msg)
27175+static __noreturn void xen_restart(char *msg)
27176 {
27177 xen_reboot(SHUTDOWN_reboot);
27178 }
27179
27180-static void xen_emergency_restart(void)
27181+static __noreturn void xen_emergency_restart(void)
27182 {
27183 xen_reboot(SHUTDOWN_reboot);
27184 }
27185
27186-static void xen_machine_halt(void)
27187+static __noreturn void xen_machine_halt(void)
27188 {
27189 xen_reboot(SHUTDOWN_poweroff);
27190 }
27191@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
27192 */
27193 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27194
27195-#ifdef CONFIG_X86_64
27196 /* Work out if we support NX */
27197- check_efer();
27198+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27199+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27200+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27201+ unsigned l, h;
27202+
27203+#ifdef CONFIG_X86_PAE
27204+ nx_enabled = 1;
27205+#endif
27206+ __supported_pte_mask |= _PAGE_NX;
27207+ rdmsr(MSR_EFER, l, h);
27208+ l |= EFER_NX;
27209+ wrmsr(MSR_EFER, l, h);
27210+ }
27211 #endif
27212
27213 xen_setup_features();
27214@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
27215
27216 machine_ops = xen_machine_ops;
27217
27218- /*
27219- * The only reliable way to retain the initial address of the
27220- * percpu gdt_page is to remember it here, so we can go and
27221- * mark it RW later, when the initial percpu area is freed.
27222- */
27223- xen_initial_gdt = &per_cpu(gdt_page, 0);
27224-
27225 xen_smp_init();
27226
27227 pgd = (pgd_t *)xen_start_info->pt_base;
27228diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27229index 3f90a2c..2c2ad84 100644
27230--- a/arch/x86/xen/mmu.c
27231+++ b/arch/x86/xen/mmu.c
27232@@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27233 convert_pfn_mfn(init_level4_pgt);
27234 convert_pfn_mfn(level3_ident_pgt);
27235 convert_pfn_mfn(level3_kernel_pgt);
27236+ convert_pfn_mfn(level3_vmalloc_start_pgt);
27237+ convert_pfn_mfn(level3_vmalloc_end_pgt);
27238+ convert_pfn_mfn(level3_vmemmap_pgt);
27239
27240 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27241 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27242@@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27243 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27244 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27245 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27246+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27247+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27248+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27249 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27250+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27251 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27252 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27253
27254@@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
27255 pv_mmu_ops.set_pud = xen_set_pud;
27256 #if PAGETABLE_LEVELS == 4
27257 pv_mmu_ops.set_pgd = xen_set_pgd;
27258+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27259 #endif
27260
27261 /* This will work as long as patching hasn't happened yet
27262@@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
27263 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27264 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27265 .set_pgd = xen_set_pgd_hyper,
27266+ .set_pgd_batched = xen_set_pgd_hyper,
27267
27268 .alloc_pud = xen_alloc_pmd_init,
27269 .release_pud = xen_release_pmd_init,
27270diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27271index a96204a..fca9b8e 100644
27272--- a/arch/x86/xen/smp.c
27273+++ b/arch/x86/xen/smp.c
27274@@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27275 {
27276 BUG_ON(smp_processor_id() != 0);
27277 native_smp_prepare_boot_cpu();
27278-
27279- /* We've switched to the "real" per-cpu gdt, so make sure the
27280- old memory can be recycled */
27281- make_lowmem_page_readwrite(xen_initial_gdt);
27282-
27283 xen_setup_vcpu_info_placement();
27284 }
27285
27286@@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27287 gdt = get_cpu_gdt_table(cpu);
27288
27289 ctxt->flags = VGCF_IN_KERNEL;
27290- ctxt->user_regs.ds = __USER_DS;
27291- ctxt->user_regs.es = __USER_DS;
27292+ ctxt->user_regs.ds = __KERNEL_DS;
27293+ ctxt->user_regs.es = __KERNEL_DS;
27294 ctxt->user_regs.ss = __KERNEL_DS;
27295 #ifdef CONFIG_X86_32
27296 ctxt->user_regs.fs = __KERNEL_PERCPU;
27297- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27298+ savesegment(gs, ctxt->user_regs.gs);
27299 #else
27300 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27301 #endif
27302@@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27303 int rc;
27304
27305 per_cpu(current_task, cpu) = idle;
27306+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27307 #ifdef CONFIG_X86_32
27308 irq_ctx_init(cpu);
27309 #else
27310 clear_tsk_thread_flag(idle, TIF_FORK);
27311- per_cpu(kernel_stack, cpu) =
27312- (unsigned long)task_stack_page(idle) -
27313- KERNEL_STACK_OFFSET + THREAD_SIZE;
27314+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27315 #endif
27316 xen_setup_runstate_info(cpu);
27317 xen_setup_timer(cpu);
27318diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27319index 9a95a9c..4f39e774 100644
27320--- a/arch/x86/xen/xen-asm_32.S
27321+++ b/arch/x86/xen/xen-asm_32.S
27322@@ -83,14 +83,14 @@ ENTRY(xen_iret)
27323 ESP_OFFSET=4 # bytes pushed onto stack
27324
27325 /*
27326- * Store vcpu_info pointer for easy access. Do it this way to
27327- * avoid having to reload %fs
27328+ * Store vcpu_info pointer for easy access.
27329 */
27330 #ifdef CONFIG_SMP
27331- GET_THREAD_INFO(%eax)
27332- movl TI_cpu(%eax), %eax
27333- movl __per_cpu_offset(,%eax,4), %eax
27334- mov per_cpu__xen_vcpu(%eax), %eax
27335+ push %fs
27336+ mov $(__KERNEL_PERCPU), %eax
27337+ mov %eax, %fs
27338+ mov PER_CPU_VAR(xen_vcpu), %eax
27339+ pop %fs
27340 #else
27341 movl per_cpu__xen_vcpu, %eax
27342 #endif
27343diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27344index 1a5ff24..a187d40 100644
27345--- a/arch/x86/xen/xen-head.S
27346+++ b/arch/x86/xen/xen-head.S
27347@@ -19,6 +19,17 @@ ENTRY(startup_xen)
27348 #ifdef CONFIG_X86_32
27349 mov %esi,xen_start_info
27350 mov $init_thread_union+THREAD_SIZE,%esp
27351+#ifdef CONFIG_SMP
27352+ movl $cpu_gdt_table,%edi
27353+ movl $__per_cpu_load,%eax
27354+ movw %ax,__KERNEL_PERCPU + 2(%edi)
27355+ rorl $16,%eax
27356+ movb %al,__KERNEL_PERCPU + 4(%edi)
27357+ movb %ah,__KERNEL_PERCPU + 7(%edi)
27358+ movl $__per_cpu_end - 1,%eax
27359+ subl $__per_cpu_start,%eax
27360+ movw %ax,__KERNEL_PERCPU + 0(%edi)
27361+#endif
27362 #else
27363 mov %rsi,xen_start_info
27364 mov $init_thread_union+THREAD_SIZE,%rsp
27365diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27366index f9153a3..51eab3d 100644
27367--- a/arch/x86/xen/xen-ops.h
27368+++ b/arch/x86/xen/xen-ops.h
27369@@ -10,8 +10,6 @@
27370 extern const char xen_hypervisor_callback[];
27371 extern const char xen_failsafe_callback[];
27372
27373-extern void *xen_initial_gdt;
27374-
27375 struct trap_info;
27376 void xen_copy_trap_info(struct trap_info *traps);
27377
27378diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27379index 15c6308..96e83c2 100644
27380--- a/block/blk-integrity.c
27381+++ b/block/blk-integrity.c
27382@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27383 NULL,
27384 };
27385
27386-static struct sysfs_ops integrity_ops = {
27387+static const struct sysfs_ops integrity_ops = {
27388 .show = &integrity_attr_show,
27389 .store = &integrity_attr_store,
27390 };
27391diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27392index ca56420..f2fc409 100644
27393--- a/block/blk-iopoll.c
27394+++ b/block/blk-iopoll.c
27395@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27396 }
27397 EXPORT_SYMBOL(blk_iopoll_complete);
27398
27399-static void blk_iopoll_softirq(struct softirq_action *h)
27400+static void blk_iopoll_softirq(void)
27401 {
27402 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27403 int rearm = 0, budget = blk_iopoll_budget;
27404diff --git a/block/blk-map.c b/block/blk-map.c
27405index 30a7e51..0aeec6a 100644
27406--- a/block/blk-map.c
27407+++ b/block/blk-map.c
27408@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27409 * direct dma. else, set up kernel bounce buffers
27410 */
27411 uaddr = (unsigned long) ubuf;
27412- if (blk_rq_aligned(q, ubuf, len) && !map_data)
27413+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27414 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27415 else
27416 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27417@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27418 for (i = 0; i < iov_count; i++) {
27419 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27420
27421+ if (!iov[i].iov_len)
27422+ return -EINVAL;
27423+
27424 if (uaddr & queue_dma_alignment(q)) {
27425 unaligned = 1;
27426 break;
27427 }
27428- if (!iov[i].iov_len)
27429- return -EINVAL;
27430 }
27431
27432 if (unaligned || (q->dma_pad_mask & len) || map_data)
27433@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27434 if (!len || !kbuf)
27435 return -EINVAL;
27436
27437- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27438+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27439 if (do_copy)
27440 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27441 else
27442diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27443index ee9c216..58d410a 100644
27444--- a/block/blk-softirq.c
27445+++ b/block/blk-softirq.c
27446@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27447 * Softirq action handler - move entries to local list and loop over them
27448 * while passing them to the queue registered handler.
27449 */
27450-static void blk_done_softirq(struct softirq_action *h)
27451+static void blk_done_softirq(void)
27452 {
27453 struct list_head *cpu_list, local_list;
27454
27455diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27456index bb9c5ea..5330d48 100644
27457--- a/block/blk-sysfs.c
27458+++ b/block/blk-sysfs.c
27459@@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27460 kmem_cache_free(blk_requestq_cachep, q);
27461 }
27462
27463-static struct sysfs_ops queue_sysfs_ops = {
27464+static const struct sysfs_ops queue_sysfs_ops = {
27465 .show = queue_attr_show,
27466 .store = queue_attr_store,
27467 };
27468diff --git a/block/bsg.c b/block/bsg.c
27469index 7154a7a..08ac2f0 100644
27470--- a/block/bsg.c
27471+++ b/block/bsg.c
27472@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27473 struct sg_io_v4 *hdr, struct bsg_device *bd,
27474 fmode_t has_write_perm)
27475 {
27476+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27477+ unsigned char *cmdptr;
27478+
27479 if (hdr->request_len > BLK_MAX_CDB) {
27480 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27481 if (!rq->cmd)
27482 return -ENOMEM;
27483- }
27484+ cmdptr = rq->cmd;
27485+ } else
27486+ cmdptr = tmpcmd;
27487
27488- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27489+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27490 hdr->request_len))
27491 return -EFAULT;
27492
27493+ if (cmdptr != rq->cmd)
27494+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27495+
27496 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27497 if (blk_verify_command(rq->cmd, has_write_perm))
27498 return -EPERM;
27499@@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27500 rq->next_rq = next_rq;
27501 next_rq->cmd_type = rq->cmd_type;
27502
27503- dxferp = (void*)(unsigned long)hdr->din_xferp;
27504+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27505 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27506 hdr->din_xfer_len, GFP_KERNEL);
27507 if (ret)
27508@@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27509
27510 if (hdr->dout_xfer_len) {
27511 dxfer_len = hdr->dout_xfer_len;
27512- dxferp = (void*)(unsigned long)hdr->dout_xferp;
27513+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27514 } else if (hdr->din_xfer_len) {
27515 dxfer_len = hdr->din_xfer_len;
27516- dxferp = (void*)(unsigned long)hdr->din_xferp;
27517+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27518 } else
27519 dxfer_len = 0;
27520
27521@@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27522 int len = min_t(unsigned int, hdr->max_response_len,
27523 rq->sense_len);
27524
27525- ret = copy_to_user((void*)(unsigned long)hdr->response,
27526+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27527 rq->sense, len);
27528 if (!ret)
27529 hdr->response_len = len;
27530diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27531index 9bd086c..ca1fc22 100644
27532--- a/block/compat_ioctl.c
27533+++ b/block/compat_ioctl.c
27534@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27535 err |= __get_user(f->spec1, &uf->spec1);
27536 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27537 err |= __get_user(name, &uf->name);
27538- f->name = compat_ptr(name);
27539+ f->name = (void __force_kernel *)compat_ptr(name);
27540 if (err) {
27541 err = -EFAULT;
27542 goto out;
27543diff --git a/block/elevator.c b/block/elevator.c
27544index a847046..75a1746 100644
27545--- a/block/elevator.c
27546+++ b/block/elevator.c
27547@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27548 return error;
27549 }
27550
27551-static struct sysfs_ops elv_sysfs_ops = {
27552+static const struct sysfs_ops elv_sysfs_ops = {
27553 .show = elv_attr_show,
27554 .store = elv_attr_store,
27555 };
27556diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27557index 2be0a97..bded3fd 100644
27558--- a/block/scsi_ioctl.c
27559+++ b/block/scsi_ioctl.c
27560@@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
27561 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27562 struct sg_io_hdr *hdr, fmode_t mode)
27563 {
27564- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27565+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27566+ unsigned char *cmdptr;
27567+
27568+ if (rq->cmd != rq->__cmd)
27569+ cmdptr = rq->cmd;
27570+ else
27571+ cmdptr = tmpcmd;
27572+
27573+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27574 return -EFAULT;
27575+
27576+ if (cmdptr != rq->cmd)
27577+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27578+
27579 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27580 return -EPERM;
27581
27582@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27583 int err;
27584 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27585 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27586+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27587+ unsigned char *cmdptr;
27588
27589 if (!sic)
27590 return -EINVAL;
27591@@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27592 */
27593 err = -EFAULT;
27594 rq->cmd_len = cmdlen;
27595- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27596+
27597+ if (rq->cmd != rq->__cmd)
27598+ cmdptr = rq->cmd;
27599+ else
27600+ cmdptr = tmpcmd;
27601+
27602+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27603 goto error;
27604
27605+ if (rq->cmd != cmdptr)
27606+ memcpy(rq->cmd, cmdptr, cmdlen);
27607+
27608 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27609 goto error;
27610
27611diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27612index 3533582..f143117 100644
27613--- a/crypto/cryptd.c
27614+++ b/crypto/cryptd.c
27615@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27616
27617 struct cryptd_blkcipher_request_ctx {
27618 crypto_completion_t complete;
27619-};
27620+} __no_const;
27621
27622 struct cryptd_hash_ctx {
27623 struct crypto_shash *child;
27624diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27625index a90d260..7a9765e 100644
27626--- a/crypto/gf128mul.c
27627+++ b/crypto/gf128mul.c
27628@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27629 for (i = 0; i < 7; ++i)
27630 gf128mul_x_lle(&p[i + 1], &p[i]);
27631
27632- memset(r, 0, sizeof(r));
27633+ memset(r, 0, sizeof(*r));
27634 for (i = 0;;) {
27635 u8 ch = ((u8 *)b)[15 - i];
27636
27637@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27638 for (i = 0; i < 7; ++i)
27639 gf128mul_x_bbe(&p[i + 1], &p[i]);
27640
27641- memset(r, 0, sizeof(r));
27642+ memset(r, 0, sizeof(*r));
27643 for (i = 0;;) {
27644 u8 ch = ((u8 *)b)[i];
27645
27646diff --git a/crypto/serpent.c b/crypto/serpent.c
27647index b651a55..023297d 100644
27648--- a/crypto/serpent.c
27649+++ b/crypto/serpent.c
27650@@ -21,6 +21,7 @@
27651 #include <asm/byteorder.h>
27652 #include <linux/crypto.h>
27653 #include <linux/types.h>
27654+#include <linux/sched.h>
27655
27656 /* Key is padded to the maximum of 256 bits before round key generation.
27657 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27658@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27659 u32 r0,r1,r2,r3,r4;
27660 int i;
27661
27662+ pax_track_stack();
27663+
27664 /* Copy key, add padding */
27665
27666 for (i = 0; i < keylen; ++i)
27667diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27668index 0d2cdb8..d8de48d 100644
27669--- a/drivers/acpi/acpi_pad.c
27670+++ b/drivers/acpi/acpi_pad.c
27671@@ -30,7 +30,7 @@
27672 #include <acpi/acpi_bus.h>
27673 #include <acpi/acpi_drivers.h>
27674
27675-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27676+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27677 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27678 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27679 static DEFINE_MUTEX(isolated_cpus_lock);
27680diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27681index 3f4602b..2e41d36 100644
27682--- a/drivers/acpi/battery.c
27683+++ b/drivers/acpi/battery.c
27684@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27685 }
27686
27687 static struct battery_file {
27688- struct file_operations ops;
27689+ const struct file_operations ops;
27690 mode_t mode;
27691 const char *name;
27692 } acpi_battery_file[] = {
27693diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27694index 7338b6a..82f0257 100644
27695--- a/drivers/acpi/dock.c
27696+++ b/drivers/acpi/dock.c
27697@@ -77,7 +77,7 @@ struct dock_dependent_device {
27698 struct list_head list;
27699 struct list_head hotplug_list;
27700 acpi_handle handle;
27701- struct acpi_dock_ops *ops;
27702+ const struct acpi_dock_ops *ops;
27703 void *context;
27704 };
27705
27706@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27707 * the dock driver after _DCK is executed.
27708 */
27709 int
27710-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27711+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27712 void *context)
27713 {
27714 struct dock_dependent_device *dd;
27715diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27716index 7c1c59e..2993595 100644
27717--- a/drivers/acpi/osl.c
27718+++ b/drivers/acpi/osl.c
27719@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27720 void __iomem *virt_addr;
27721
27722 virt_addr = ioremap(phys_addr, width);
27723+ if (!virt_addr)
27724+ return AE_NO_MEMORY;
27725 if (!value)
27726 value = &dummy;
27727
27728@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27729 void __iomem *virt_addr;
27730
27731 virt_addr = ioremap(phys_addr, width);
27732+ if (!virt_addr)
27733+ return AE_NO_MEMORY;
27734
27735 switch (width) {
27736 case 8:
27737diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27738index c216062..eec10d2 100644
27739--- a/drivers/acpi/power_meter.c
27740+++ b/drivers/acpi/power_meter.c
27741@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27742 return res;
27743
27744 temp /= 1000;
27745- if (temp < 0)
27746- return -EINVAL;
27747
27748 mutex_lock(&resource->lock);
27749 resource->trip[attr->index - 7] = temp;
27750diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27751index d0d25e2..961643d 100644
27752--- a/drivers/acpi/proc.c
27753+++ b/drivers/acpi/proc.c
27754@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27755 size_t count, loff_t * ppos)
27756 {
27757 struct list_head *node, *next;
27758- char strbuf[5];
27759- char str[5] = "";
27760- unsigned int len = count;
27761+ char strbuf[5] = {0};
27762 struct acpi_device *found_dev = NULL;
27763
27764- if (len > 4)
27765- len = 4;
27766- if (len < 0)
27767- return -EFAULT;
27768+ if (count > 4)
27769+ count = 4;
27770
27771- if (copy_from_user(strbuf, buffer, len))
27772+ if (copy_from_user(strbuf, buffer, count))
27773 return -EFAULT;
27774- strbuf[len] = '\0';
27775- sscanf(strbuf, "%s", str);
27776+ strbuf[count] = '\0';
27777
27778 mutex_lock(&acpi_device_lock);
27779 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27780@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27781 if (!dev->wakeup.flags.valid)
27782 continue;
27783
27784- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27785+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27786 dev->wakeup.state.enabled =
27787 dev->wakeup.state.enabled ? 0 : 1;
27788 found_dev = dev;
27789diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27790index 7102474..de8ad22 100644
27791--- a/drivers/acpi/processor_core.c
27792+++ b/drivers/acpi/processor_core.c
27793@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27794 return 0;
27795 }
27796
27797- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27798+ BUG_ON(pr->id >= nr_cpu_ids);
27799
27800 /*
27801 * Buggy BIOS check
27802diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27803index d933980..5761f13 100644
27804--- a/drivers/acpi/sbshc.c
27805+++ b/drivers/acpi/sbshc.c
27806@@ -17,7 +17,7 @@
27807
27808 #define PREFIX "ACPI: "
27809
27810-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27811+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27812 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27813
27814 struct acpi_smb_hc {
27815diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27816index 0458094..6978e7b 100644
27817--- a/drivers/acpi/sleep.c
27818+++ b/drivers/acpi/sleep.c
27819@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27820 }
27821 }
27822
27823-static struct platform_suspend_ops acpi_suspend_ops = {
27824+static const struct platform_suspend_ops acpi_suspend_ops = {
27825 .valid = acpi_suspend_state_valid,
27826 .begin = acpi_suspend_begin,
27827 .prepare_late = acpi_pm_prepare,
27828@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27829 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27830 * been requested.
27831 */
27832-static struct platform_suspend_ops acpi_suspend_ops_old = {
27833+static const struct platform_suspend_ops acpi_suspend_ops_old = {
27834 .valid = acpi_suspend_state_valid,
27835 .begin = acpi_suspend_begin_old,
27836 .prepare_late = acpi_pm_disable_gpes,
27837@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27838 acpi_enable_all_runtime_gpes();
27839 }
27840
27841-static struct platform_hibernation_ops acpi_hibernation_ops = {
27842+static const struct platform_hibernation_ops acpi_hibernation_ops = {
27843 .begin = acpi_hibernation_begin,
27844 .end = acpi_pm_end,
27845 .pre_snapshot = acpi_hibernation_pre_snapshot,
27846@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27847 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27848 * been requested.
27849 */
27850-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27851+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27852 .begin = acpi_hibernation_begin_old,
27853 .end = acpi_pm_end,
27854 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27855diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27856index 05dff63..b662ab7 100644
27857--- a/drivers/acpi/video.c
27858+++ b/drivers/acpi/video.c
27859@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27860 vd->brightness->levels[request_level]);
27861 }
27862
27863-static struct backlight_ops acpi_backlight_ops = {
27864+static const struct backlight_ops acpi_backlight_ops = {
27865 .get_brightness = acpi_video_get_brightness,
27866 .update_status = acpi_video_set_brightness,
27867 };
27868diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27869index 6787aab..23ffb0e 100644
27870--- a/drivers/ata/ahci.c
27871+++ b/drivers/ata/ahci.c
27872@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27873 .sdev_attrs = ahci_sdev_attrs,
27874 };
27875
27876-static struct ata_port_operations ahci_ops = {
27877+static const struct ata_port_operations ahci_ops = {
27878 .inherits = &sata_pmp_port_ops,
27879
27880 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27881@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27882 .port_stop = ahci_port_stop,
27883 };
27884
27885-static struct ata_port_operations ahci_vt8251_ops = {
27886+static const struct ata_port_operations ahci_vt8251_ops = {
27887 .inherits = &ahci_ops,
27888 .hardreset = ahci_vt8251_hardreset,
27889 };
27890
27891-static struct ata_port_operations ahci_p5wdh_ops = {
27892+static const struct ata_port_operations ahci_p5wdh_ops = {
27893 .inherits = &ahci_ops,
27894 .hardreset = ahci_p5wdh_hardreset,
27895 };
27896
27897-static struct ata_port_operations ahci_sb600_ops = {
27898+static const struct ata_port_operations ahci_sb600_ops = {
27899 .inherits = &ahci_ops,
27900 .softreset = ahci_sb600_softreset,
27901 .pmp_softreset = ahci_sb600_softreset,
27902diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27903index 99e7196..4968c77 100644
27904--- a/drivers/ata/ata_generic.c
27905+++ b/drivers/ata/ata_generic.c
27906@@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27907 ATA_BMDMA_SHT(DRV_NAME),
27908 };
27909
27910-static struct ata_port_operations generic_port_ops = {
27911+static const struct ata_port_operations generic_port_ops = {
27912 .inherits = &ata_bmdma_port_ops,
27913 .cable_detect = ata_cable_unknown,
27914 .set_mode = generic_set_mode,
27915diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27916index c33591d..000c121 100644
27917--- a/drivers/ata/ata_piix.c
27918+++ b/drivers/ata/ata_piix.c
27919@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27920 ATA_BMDMA_SHT(DRV_NAME),
27921 };
27922
27923-static struct ata_port_operations piix_pata_ops = {
27924+static const struct ata_port_operations piix_pata_ops = {
27925 .inherits = &ata_bmdma32_port_ops,
27926 .cable_detect = ata_cable_40wire,
27927 .set_piomode = piix_set_piomode,
27928@@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27929 .prereset = piix_pata_prereset,
27930 };
27931
27932-static struct ata_port_operations piix_vmw_ops = {
27933+static const struct ata_port_operations piix_vmw_ops = {
27934 .inherits = &piix_pata_ops,
27935 .bmdma_status = piix_vmw_bmdma_status,
27936 };
27937
27938-static struct ata_port_operations ich_pata_ops = {
27939+static const struct ata_port_operations ich_pata_ops = {
27940 .inherits = &piix_pata_ops,
27941 .cable_detect = ich_pata_cable_detect,
27942 .set_dmamode = ich_set_dmamode,
27943 };
27944
27945-static struct ata_port_operations piix_sata_ops = {
27946+static const struct ata_port_operations piix_sata_ops = {
27947 .inherits = &ata_bmdma_port_ops,
27948 };
27949
27950-static struct ata_port_operations piix_sidpr_sata_ops = {
27951+static const struct ata_port_operations piix_sidpr_sata_ops = {
27952 .inherits = &piix_sata_ops,
27953 .hardreset = sata_std_hardreset,
27954 .scr_read = piix_sidpr_scr_read,
27955diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27956index b0882cd..c295d65 100644
27957--- a/drivers/ata/libata-acpi.c
27958+++ b/drivers/ata/libata-acpi.c
27959@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27960 ata_acpi_uevent(dev->link->ap, dev, event);
27961 }
27962
27963-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27964+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27965 .handler = ata_acpi_dev_notify_dock,
27966 .uevent = ata_acpi_dev_uevent,
27967 };
27968
27969-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27970+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27971 .handler = ata_acpi_ap_notify_dock,
27972 .uevent = ata_acpi_ap_uevent,
27973 };
27974diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27975index d4f7f99..94f603e 100644
27976--- a/drivers/ata/libata-core.c
27977+++ b/drivers/ata/libata-core.c
27978@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27979 struct ata_port *ap;
27980 unsigned int tag;
27981
27982- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27983+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27984 ap = qc->ap;
27985
27986 qc->flags = 0;
27987@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27988 struct ata_port *ap;
27989 struct ata_link *link;
27990
27991- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27992+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27993 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27994 ap = qc->ap;
27995 link = qc->dev->link;
27996@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
27997 * LOCKING:
27998 * None.
27999 */
28000-static void ata_finalize_port_ops(struct ata_port_operations *ops)
28001+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
28002 {
28003 static DEFINE_SPINLOCK(lock);
28004 const struct ata_port_operations *cur;
28005@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28006 return;
28007
28008 spin_lock(&lock);
28009+ pax_open_kernel();
28010
28011 for (cur = ops->inherits; cur; cur = cur->inherits) {
28012 void **inherit = (void **)cur;
28013@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28014 if (IS_ERR(*pp))
28015 *pp = NULL;
28016
28017- ops->inherits = NULL;
28018+ *(struct ata_port_operations **)&ops->inherits = NULL;
28019
28020+ pax_close_kernel();
28021 spin_unlock(&lock);
28022 }
28023
28024@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
28025 */
28026 /* KILLME - the only user left is ipr */
28027 void ata_host_init(struct ata_host *host, struct device *dev,
28028- unsigned long flags, struct ata_port_operations *ops)
28029+ unsigned long flags, const struct ata_port_operations *ops)
28030 {
28031 spin_lock_init(&host->lock);
28032 host->dev = dev;
28033@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
28034 /* truly dummy */
28035 }
28036
28037-struct ata_port_operations ata_dummy_port_ops = {
28038+const struct ata_port_operations ata_dummy_port_ops = {
28039 .qc_prep = ata_noop_qc_prep,
28040 .qc_issue = ata_dummy_qc_issue,
28041 .error_handler = ata_dummy_error_handler,
28042diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
28043index e5bdb9b..45a8e72 100644
28044--- a/drivers/ata/libata-eh.c
28045+++ b/drivers/ata/libata-eh.c
28046@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
28047 {
28048 struct ata_link *link;
28049
28050+ pax_track_stack();
28051+
28052 ata_for_each_link(link, ap, HOST_FIRST)
28053 ata_eh_link_report(link);
28054 }
28055@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
28056 */
28057 void ata_std_error_handler(struct ata_port *ap)
28058 {
28059- struct ata_port_operations *ops = ap->ops;
28060+ const struct ata_port_operations *ops = ap->ops;
28061 ata_reset_fn_t hardreset = ops->hardreset;
28062
28063 /* ignore built-in hardreset if SCR access is not available */
28064diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
28065index 51f0ffb..19ce3e3 100644
28066--- a/drivers/ata/libata-pmp.c
28067+++ b/drivers/ata/libata-pmp.c
28068@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
28069 */
28070 static int sata_pmp_eh_recover(struct ata_port *ap)
28071 {
28072- struct ata_port_operations *ops = ap->ops;
28073+ const struct ata_port_operations *ops = ap->ops;
28074 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
28075 struct ata_link *pmp_link = &ap->link;
28076 struct ata_device *pmp_dev = pmp_link->device;
28077diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
28078index d8f35fe..288180a 100644
28079--- a/drivers/ata/pata_acpi.c
28080+++ b/drivers/ata/pata_acpi.c
28081@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
28082 ATA_BMDMA_SHT(DRV_NAME),
28083 };
28084
28085-static struct ata_port_operations pacpi_ops = {
28086+static const struct ata_port_operations pacpi_ops = {
28087 .inherits = &ata_bmdma_port_ops,
28088 .qc_issue = pacpi_qc_issue,
28089 .cable_detect = pacpi_cable_detect,
28090diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
28091index 9434114..1f2f364 100644
28092--- a/drivers/ata/pata_ali.c
28093+++ b/drivers/ata/pata_ali.c
28094@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
28095 * Port operations for PIO only ALi
28096 */
28097
28098-static struct ata_port_operations ali_early_port_ops = {
28099+static const struct ata_port_operations ali_early_port_ops = {
28100 .inherits = &ata_sff_port_ops,
28101 .cable_detect = ata_cable_40wire,
28102 .set_piomode = ali_set_piomode,
28103@@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
28104 * Port operations for DMA capable ALi without cable
28105 * detect
28106 */
28107-static struct ata_port_operations ali_20_port_ops = {
28108+static const struct ata_port_operations ali_20_port_ops = {
28109 .inherits = &ali_dma_base_ops,
28110 .cable_detect = ata_cable_40wire,
28111 .mode_filter = ali_20_filter,
28112@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
28113 /*
28114 * Port operations for DMA capable ALi with cable detect
28115 */
28116-static struct ata_port_operations ali_c2_port_ops = {
28117+static const struct ata_port_operations ali_c2_port_ops = {
28118 .inherits = &ali_dma_base_ops,
28119 .check_atapi_dma = ali_check_atapi_dma,
28120 .cable_detect = ali_c2_cable_detect,
28121@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
28122 /*
28123 * Port operations for DMA capable ALi with cable detect
28124 */
28125-static struct ata_port_operations ali_c4_port_ops = {
28126+static const struct ata_port_operations ali_c4_port_ops = {
28127 .inherits = &ali_dma_base_ops,
28128 .check_atapi_dma = ali_check_atapi_dma,
28129 .cable_detect = ali_c2_cable_detect,
28130@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
28131 /*
28132 * Port operations for DMA capable ALi with cable detect and LBA48
28133 */
28134-static struct ata_port_operations ali_c5_port_ops = {
28135+static const struct ata_port_operations ali_c5_port_ops = {
28136 .inherits = &ali_dma_base_ops,
28137 .check_atapi_dma = ali_check_atapi_dma,
28138 .dev_config = ali_warn_atapi_dma,
28139diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
28140index 567f3f7..c8ee0da 100644
28141--- a/drivers/ata/pata_amd.c
28142+++ b/drivers/ata/pata_amd.c
28143@@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
28144 .prereset = amd_pre_reset,
28145 };
28146
28147-static struct ata_port_operations amd33_port_ops = {
28148+static const struct ata_port_operations amd33_port_ops = {
28149 .inherits = &amd_base_port_ops,
28150 .cable_detect = ata_cable_40wire,
28151 .set_piomode = amd33_set_piomode,
28152 .set_dmamode = amd33_set_dmamode,
28153 };
28154
28155-static struct ata_port_operations amd66_port_ops = {
28156+static const struct ata_port_operations amd66_port_ops = {
28157 .inherits = &amd_base_port_ops,
28158 .cable_detect = ata_cable_unknown,
28159 .set_piomode = amd66_set_piomode,
28160 .set_dmamode = amd66_set_dmamode,
28161 };
28162
28163-static struct ata_port_operations amd100_port_ops = {
28164+static const struct ata_port_operations amd100_port_ops = {
28165 .inherits = &amd_base_port_ops,
28166 .cable_detect = ata_cable_unknown,
28167 .set_piomode = amd100_set_piomode,
28168 .set_dmamode = amd100_set_dmamode,
28169 };
28170
28171-static struct ata_port_operations amd133_port_ops = {
28172+static const struct ata_port_operations amd133_port_ops = {
28173 .inherits = &amd_base_port_ops,
28174 .cable_detect = amd_cable_detect,
28175 .set_piomode = amd133_set_piomode,
28176@@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
28177 .host_stop = nv_host_stop,
28178 };
28179
28180-static struct ata_port_operations nv100_port_ops = {
28181+static const struct ata_port_operations nv100_port_ops = {
28182 .inherits = &nv_base_port_ops,
28183 .set_piomode = nv100_set_piomode,
28184 .set_dmamode = nv100_set_dmamode,
28185 };
28186
28187-static struct ata_port_operations nv133_port_ops = {
28188+static const struct ata_port_operations nv133_port_ops = {
28189 .inherits = &nv_base_port_ops,
28190 .set_piomode = nv133_set_piomode,
28191 .set_dmamode = nv133_set_dmamode,
28192diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
28193index d332cfd..4b7eaae 100644
28194--- a/drivers/ata/pata_artop.c
28195+++ b/drivers/ata/pata_artop.c
28196@@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
28197 ATA_BMDMA_SHT(DRV_NAME),
28198 };
28199
28200-static struct ata_port_operations artop6210_ops = {
28201+static const struct ata_port_operations artop6210_ops = {
28202 .inherits = &ata_bmdma_port_ops,
28203 .cable_detect = ata_cable_40wire,
28204 .set_piomode = artop6210_set_piomode,
28205@@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
28206 .qc_defer = artop6210_qc_defer,
28207 };
28208
28209-static struct ata_port_operations artop6260_ops = {
28210+static const struct ata_port_operations artop6260_ops = {
28211 .inherits = &ata_bmdma_port_ops,
28212 .cable_detect = artop6260_cable_detect,
28213 .set_piomode = artop6260_set_piomode,
28214diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
28215index 5c129f9..7bb7ccb 100644
28216--- a/drivers/ata/pata_at32.c
28217+++ b/drivers/ata/pata_at32.c
28218@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
28219 ATA_PIO_SHT(DRV_NAME),
28220 };
28221
28222-static struct ata_port_operations at32_port_ops = {
28223+static const struct ata_port_operations at32_port_ops = {
28224 .inherits = &ata_sff_port_ops,
28225 .cable_detect = ata_cable_40wire,
28226 .set_piomode = pata_at32_set_piomode,
28227diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
28228index 41c94b1..829006d 100644
28229--- a/drivers/ata/pata_at91.c
28230+++ b/drivers/ata/pata_at91.c
28231@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
28232 ATA_PIO_SHT(DRV_NAME),
28233 };
28234
28235-static struct ata_port_operations pata_at91_port_ops = {
28236+static const struct ata_port_operations pata_at91_port_ops = {
28237 .inherits = &ata_sff_port_ops,
28238
28239 .sff_data_xfer = pata_at91_data_xfer_noirq,
28240diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
28241index ae4454d..d391eb4 100644
28242--- a/drivers/ata/pata_atiixp.c
28243+++ b/drivers/ata/pata_atiixp.c
28244@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
28245 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28246 };
28247
28248-static struct ata_port_operations atiixp_port_ops = {
28249+static const struct ata_port_operations atiixp_port_ops = {
28250 .inherits = &ata_bmdma_port_ops,
28251
28252 .qc_prep = ata_sff_dumb_qc_prep,
28253diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
28254index 6fe7ded..2a425dc 100644
28255--- a/drivers/ata/pata_atp867x.c
28256+++ b/drivers/ata/pata_atp867x.c
28257@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
28258 ATA_BMDMA_SHT(DRV_NAME),
28259 };
28260
28261-static struct ata_port_operations atp867x_ops = {
28262+static const struct ata_port_operations atp867x_ops = {
28263 .inherits = &ata_bmdma_port_ops,
28264 .cable_detect = atp867x_cable_detect,
28265 .set_piomode = atp867x_set_piomode,
28266diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
28267index c4b47a3..b27a367 100644
28268--- a/drivers/ata/pata_bf54x.c
28269+++ b/drivers/ata/pata_bf54x.c
28270@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
28271 .dma_boundary = ATA_DMA_BOUNDARY,
28272 };
28273
28274-static struct ata_port_operations bfin_pata_ops = {
28275+static const struct ata_port_operations bfin_pata_ops = {
28276 .inherits = &ata_sff_port_ops,
28277
28278 .set_piomode = bfin_set_piomode,
28279diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
28280index 5acf9fa..84248be 100644
28281--- a/drivers/ata/pata_cmd640.c
28282+++ b/drivers/ata/pata_cmd640.c
28283@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
28284 ATA_BMDMA_SHT(DRV_NAME),
28285 };
28286
28287-static struct ata_port_operations cmd640_port_ops = {
28288+static const struct ata_port_operations cmd640_port_ops = {
28289 .inherits = &ata_bmdma_port_ops,
28290 /* In theory xfer_noirq is not needed once we kill the prefetcher */
28291 .sff_data_xfer = ata_sff_data_xfer_noirq,
28292diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
28293index ccd2694..c869c3d 100644
28294--- a/drivers/ata/pata_cmd64x.c
28295+++ b/drivers/ata/pata_cmd64x.c
28296@@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
28297 .set_dmamode = cmd64x_set_dmamode,
28298 };
28299
28300-static struct ata_port_operations cmd64x_port_ops = {
28301+static const struct ata_port_operations cmd64x_port_ops = {
28302 .inherits = &cmd64x_base_ops,
28303 .cable_detect = ata_cable_40wire,
28304 };
28305
28306-static struct ata_port_operations cmd646r1_port_ops = {
28307+static const struct ata_port_operations cmd646r1_port_ops = {
28308 .inherits = &cmd64x_base_ops,
28309 .bmdma_stop = cmd646r1_bmdma_stop,
28310 .cable_detect = ata_cable_40wire,
28311 };
28312
28313-static struct ata_port_operations cmd648_port_ops = {
28314+static const struct ata_port_operations cmd648_port_ops = {
28315 .inherits = &cmd64x_base_ops,
28316 .bmdma_stop = cmd648_bmdma_stop,
28317 .cable_detect = cmd648_cable_detect,
28318diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
28319index 0df83cf..d7595b0 100644
28320--- a/drivers/ata/pata_cs5520.c
28321+++ b/drivers/ata/pata_cs5520.c
28322@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
28323 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28324 };
28325
28326-static struct ata_port_operations cs5520_port_ops = {
28327+static const struct ata_port_operations cs5520_port_ops = {
28328 .inherits = &ata_bmdma_port_ops,
28329 .qc_prep = ata_sff_dumb_qc_prep,
28330 .cable_detect = ata_cable_40wire,
28331diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28332index c974b05..6d26b11 100644
28333--- a/drivers/ata/pata_cs5530.c
28334+++ b/drivers/ata/pata_cs5530.c
28335@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28336 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28337 };
28338
28339-static struct ata_port_operations cs5530_port_ops = {
28340+static const struct ata_port_operations cs5530_port_ops = {
28341 .inherits = &ata_bmdma_port_ops,
28342
28343 .qc_prep = ata_sff_dumb_qc_prep,
28344diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28345index 403f561..aacd26b 100644
28346--- a/drivers/ata/pata_cs5535.c
28347+++ b/drivers/ata/pata_cs5535.c
28348@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28349 ATA_BMDMA_SHT(DRV_NAME),
28350 };
28351
28352-static struct ata_port_operations cs5535_port_ops = {
28353+static const struct ata_port_operations cs5535_port_ops = {
28354 .inherits = &ata_bmdma_port_ops,
28355 .cable_detect = cs5535_cable_detect,
28356 .set_piomode = cs5535_set_piomode,
28357diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28358index 6da4cb4..de24a25 100644
28359--- a/drivers/ata/pata_cs5536.c
28360+++ b/drivers/ata/pata_cs5536.c
28361@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28362 ATA_BMDMA_SHT(DRV_NAME),
28363 };
28364
28365-static struct ata_port_operations cs5536_port_ops = {
28366+static const struct ata_port_operations cs5536_port_ops = {
28367 .inherits = &ata_bmdma_port_ops,
28368 .cable_detect = cs5536_cable_detect,
28369 .set_piomode = cs5536_set_piomode,
28370diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28371index 8fb040b..b16a9c9 100644
28372--- a/drivers/ata/pata_cypress.c
28373+++ b/drivers/ata/pata_cypress.c
28374@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28375 ATA_BMDMA_SHT(DRV_NAME),
28376 };
28377
28378-static struct ata_port_operations cy82c693_port_ops = {
28379+static const struct ata_port_operations cy82c693_port_ops = {
28380 .inherits = &ata_bmdma_port_ops,
28381 .cable_detect = ata_cable_40wire,
28382 .set_piomode = cy82c693_set_piomode,
28383diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28384index 2a6412f..555ee11 100644
28385--- a/drivers/ata/pata_efar.c
28386+++ b/drivers/ata/pata_efar.c
28387@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28388 ATA_BMDMA_SHT(DRV_NAME),
28389 };
28390
28391-static struct ata_port_operations efar_ops = {
28392+static const struct ata_port_operations efar_ops = {
28393 .inherits = &ata_bmdma_port_ops,
28394 .cable_detect = efar_cable_detect,
28395 .set_piomode = efar_set_piomode,
28396diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28397index b9d8836..0b92030 100644
28398--- a/drivers/ata/pata_hpt366.c
28399+++ b/drivers/ata/pata_hpt366.c
28400@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28401 * Configuration for HPT366/68
28402 */
28403
28404-static struct ata_port_operations hpt366_port_ops = {
28405+static const struct ata_port_operations hpt366_port_ops = {
28406 .inherits = &ata_bmdma_port_ops,
28407 .cable_detect = hpt36x_cable_detect,
28408 .mode_filter = hpt366_filter,
28409diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28410index 5af7f19..00c4980 100644
28411--- a/drivers/ata/pata_hpt37x.c
28412+++ b/drivers/ata/pata_hpt37x.c
28413@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28414 * Configuration for HPT370
28415 */
28416
28417-static struct ata_port_operations hpt370_port_ops = {
28418+static const struct ata_port_operations hpt370_port_ops = {
28419 .inherits = &ata_bmdma_port_ops,
28420
28421 .bmdma_stop = hpt370_bmdma_stop,
28422@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28423 * Configuration for HPT370A. Close to 370 but less filters
28424 */
28425
28426-static struct ata_port_operations hpt370a_port_ops = {
28427+static const struct ata_port_operations hpt370a_port_ops = {
28428 .inherits = &hpt370_port_ops,
28429 .mode_filter = hpt370a_filter,
28430 };
28431@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28432 * and DMA mode setting functionality.
28433 */
28434
28435-static struct ata_port_operations hpt372_port_ops = {
28436+static const struct ata_port_operations hpt372_port_ops = {
28437 .inherits = &ata_bmdma_port_ops,
28438
28439 .bmdma_stop = hpt37x_bmdma_stop,
28440@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28441 * but we have a different cable detection procedure for function 1.
28442 */
28443
28444-static struct ata_port_operations hpt374_fn1_port_ops = {
28445+static const struct ata_port_operations hpt374_fn1_port_ops = {
28446 .inherits = &hpt372_port_ops,
28447 .prereset = hpt374_fn1_pre_reset,
28448 };
28449diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28450index 100f227..2e39382 100644
28451--- a/drivers/ata/pata_hpt3x2n.c
28452+++ b/drivers/ata/pata_hpt3x2n.c
28453@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28454 * Configuration for HPT3x2n.
28455 */
28456
28457-static struct ata_port_operations hpt3x2n_port_ops = {
28458+static const struct ata_port_operations hpt3x2n_port_ops = {
28459 .inherits = &ata_bmdma_port_ops,
28460
28461 .bmdma_stop = hpt3x2n_bmdma_stop,
28462diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28463index 7e31025..6fca8f4 100644
28464--- a/drivers/ata/pata_hpt3x3.c
28465+++ b/drivers/ata/pata_hpt3x3.c
28466@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28467 ATA_BMDMA_SHT(DRV_NAME),
28468 };
28469
28470-static struct ata_port_operations hpt3x3_port_ops = {
28471+static const struct ata_port_operations hpt3x3_port_ops = {
28472 .inherits = &ata_bmdma_port_ops,
28473 .cable_detect = ata_cable_40wire,
28474 .set_piomode = hpt3x3_set_piomode,
28475diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28476index b663b7f..9a26c2a 100644
28477--- a/drivers/ata/pata_icside.c
28478+++ b/drivers/ata/pata_icside.c
28479@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28480 }
28481 }
28482
28483-static struct ata_port_operations pata_icside_port_ops = {
28484+static const struct ata_port_operations pata_icside_port_ops = {
28485 .inherits = &ata_sff_port_ops,
28486 /* no need to build any PRD tables for DMA */
28487 .qc_prep = ata_noop_qc_prep,
28488diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28489index 4bceb88..457dfb6 100644
28490--- a/drivers/ata/pata_isapnp.c
28491+++ b/drivers/ata/pata_isapnp.c
28492@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28493 ATA_PIO_SHT(DRV_NAME),
28494 };
28495
28496-static struct ata_port_operations isapnp_port_ops = {
28497+static const struct ata_port_operations isapnp_port_ops = {
28498 .inherits = &ata_sff_port_ops,
28499 .cable_detect = ata_cable_40wire,
28500 };
28501
28502-static struct ata_port_operations isapnp_noalt_port_ops = {
28503+static const struct ata_port_operations isapnp_noalt_port_ops = {
28504 .inherits = &ata_sff_port_ops,
28505 .cable_detect = ata_cable_40wire,
28506 /* No altstatus so we don't want to use the lost interrupt poll */
28507diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28508index f156da8..24976e2 100644
28509--- a/drivers/ata/pata_it8213.c
28510+++ b/drivers/ata/pata_it8213.c
28511@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28512 };
28513
28514
28515-static struct ata_port_operations it8213_ops = {
28516+static const struct ata_port_operations it8213_ops = {
28517 .inherits = &ata_bmdma_port_ops,
28518 .cable_detect = it8213_cable_detect,
28519 .set_piomode = it8213_set_piomode,
28520diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28521index 188bc2f..ca9e785 100644
28522--- a/drivers/ata/pata_it821x.c
28523+++ b/drivers/ata/pata_it821x.c
28524@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28525 ATA_BMDMA_SHT(DRV_NAME),
28526 };
28527
28528-static struct ata_port_operations it821x_smart_port_ops = {
28529+static const struct ata_port_operations it821x_smart_port_ops = {
28530 .inherits = &ata_bmdma_port_ops,
28531
28532 .check_atapi_dma= it821x_check_atapi_dma,
28533@@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28534 .port_start = it821x_port_start,
28535 };
28536
28537-static struct ata_port_operations it821x_passthru_port_ops = {
28538+static const struct ata_port_operations it821x_passthru_port_ops = {
28539 .inherits = &ata_bmdma_port_ops,
28540
28541 .check_atapi_dma= it821x_check_atapi_dma,
28542@@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28543 .port_start = it821x_port_start,
28544 };
28545
28546-static struct ata_port_operations it821x_rdc_port_ops = {
28547+static const struct ata_port_operations it821x_rdc_port_ops = {
28548 .inherits = &ata_bmdma_port_ops,
28549
28550 .check_atapi_dma= it821x_check_atapi_dma,
28551diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28552index ba54b08..4b952b7 100644
28553--- a/drivers/ata/pata_ixp4xx_cf.c
28554+++ b/drivers/ata/pata_ixp4xx_cf.c
28555@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28556 ATA_PIO_SHT(DRV_NAME),
28557 };
28558
28559-static struct ata_port_operations ixp4xx_port_ops = {
28560+static const struct ata_port_operations ixp4xx_port_ops = {
28561 .inherits = &ata_sff_port_ops,
28562 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28563 .cable_detect = ata_cable_40wire,
28564diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28565index 3a1474a..434b0ff 100644
28566--- a/drivers/ata/pata_jmicron.c
28567+++ b/drivers/ata/pata_jmicron.c
28568@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28569 ATA_BMDMA_SHT(DRV_NAME),
28570 };
28571
28572-static struct ata_port_operations jmicron_ops = {
28573+static const struct ata_port_operations jmicron_ops = {
28574 .inherits = &ata_bmdma_port_ops,
28575 .prereset = jmicron_pre_reset,
28576 };
28577diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28578index 6932e56..220e71d 100644
28579--- a/drivers/ata/pata_legacy.c
28580+++ b/drivers/ata/pata_legacy.c
28581@@ -106,7 +106,7 @@ struct legacy_probe {
28582
28583 struct legacy_controller {
28584 const char *name;
28585- struct ata_port_operations *ops;
28586+ const struct ata_port_operations *ops;
28587 unsigned int pio_mask;
28588 unsigned int flags;
28589 unsigned int pflags;
28590@@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28591 * pio_mask as well.
28592 */
28593
28594-static struct ata_port_operations simple_port_ops = {
28595+static const struct ata_port_operations simple_port_ops = {
28596 .inherits = &legacy_base_port_ops,
28597 .sff_data_xfer = ata_sff_data_xfer_noirq,
28598 };
28599
28600-static struct ata_port_operations legacy_port_ops = {
28601+static const struct ata_port_operations legacy_port_ops = {
28602 .inherits = &legacy_base_port_ops,
28603 .sff_data_xfer = ata_sff_data_xfer_noirq,
28604 .set_mode = legacy_set_mode,
28605@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28606 return buflen;
28607 }
28608
28609-static struct ata_port_operations pdc20230_port_ops = {
28610+static const struct ata_port_operations pdc20230_port_ops = {
28611 .inherits = &legacy_base_port_ops,
28612 .set_piomode = pdc20230_set_piomode,
28613 .sff_data_xfer = pdc_data_xfer_vlb,
28614@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28615 ioread8(ap->ioaddr.status_addr);
28616 }
28617
28618-static struct ata_port_operations ht6560a_port_ops = {
28619+static const struct ata_port_operations ht6560a_port_ops = {
28620 .inherits = &legacy_base_port_ops,
28621 .set_piomode = ht6560a_set_piomode,
28622 };
28623@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28624 ioread8(ap->ioaddr.status_addr);
28625 }
28626
28627-static struct ata_port_operations ht6560b_port_ops = {
28628+static const struct ata_port_operations ht6560b_port_ops = {
28629 .inherits = &legacy_base_port_ops,
28630 .set_piomode = ht6560b_set_piomode,
28631 };
28632@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28633 }
28634
28635
28636-static struct ata_port_operations opti82c611a_port_ops = {
28637+static const struct ata_port_operations opti82c611a_port_ops = {
28638 .inherits = &legacy_base_port_ops,
28639 .set_piomode = opti82c611a_set_piomode,
28640 };
28641@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28642 return ata_sff_qc_issue(qc);
28643 }
28644
28645-static struct ata_port_operations opti82c46x_port_ops = {
28646+static const struct ata_port_operations opti82c46x_port_ops = {
28647 .inherits = &legacy_base_port_ops,
28648 .set_piomode = opti82c46x_set_piomode,
28649 .qc_issue = opti82c46x_qc_issue,
28650@@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28651 return 0;
28652 }
28653
28654-static struct ata_port_operations qdi6500_port_ops = {
28655+static const struct ata_port_operations qdi6500_port_ops = {
28656 .inherits = &legacy_base_port_ops,
28657 .set_piomode = qdi6500_set_piomode,
28658 .qc_issue = qdi_qc_issue,
28659 .sff_data_xfer = vlb32_data_xfer,
28660 };
28661
28662-static struct ata_port_operations qdi6580_port_ops = {
28663+static const struct ata_port_operations qdi6580_port_ops = {
28664 .inherits = &legacy_base_port_ops,
28665 .set_piomode = qdi6580_set_piomode,
28666 .sff_data_xfer = vlb32_data_xfer,
28667 };
28668
28669-static struct ata_port_operations qdi6580dp_port_ops = {
28670+static const struct ata_port_operations qdi6580dp_port_ops = {
28671 .inherits = &legacy_base_port_ops,
28672 .set_piomode = qdi6580dp_set_piomode,
28673 .sff_data_xfer = vlb32_data_xfer,
28674@@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28675 return 0;
28676 }
28677
28678-static struct ata_port_operations winbond_port_ops = {
28679+static const struct ata_port_operations winbond_port_ops = {
28680 .inherits = &legacy_base_port_ops,
28681 .set_piomode = winbond_set_piomode,
28682 .sff_data_xfer = vlb32_data_xfer,
28683@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28684 int pio_modes = controller->pio_mask;
28685 unsigned long io = probe->port;
28686 u32 mask = (1 << probe->slot);
28687- struct ata_port_operations *ops = controller->ops;
28688+ const struct ata_port_operations *ops = controller->ops;
28689 struct legacy_data *ld = &legacy_data[probe->slot];
28690 struct ata_host *host = NULL;
28691 struct ata_port *ap;
28692diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28693index 2096fb7..4d090fc 100644
28694--- a/drivers/ata/pata_marvell.c
28695+++ b/drivers/ata/pata_marvell.c
28696@@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28697 ATA_BMDMA_SHT(DRV_NAME),
28698 };
28699
28700-static struct ata_port_operations marvell_ops = {
28701+static const struct ata_port_operations marvell_ops = {
28702 .inherits = &ata_bmdma_port_ops,
28703 .cable_detect = marvell_cable_detect,
28704 .prereset = marvell_pre_reset,
28705diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28706index 99d41be..7d56aa8 100644
28707--- a/drivers/ata/pata_mpc52xx.c
28708+++ b/drivers/ata/pata_mpc52xx.c
28709@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28710 ATA_PIO_SHT(DRV_NAME),
28711 };
28712
28713-static struct ata_port_operations mpc52xx_ata_port_ops = {
28714+static const struct ata_port_operations mpc52xx_ata_port_ops = {
28715 .inherits = &ata_bmdma_port_ops,
28716 .sff_dev_select = mpc52xx_ata_dev_select,
28717 .set_piomode = mpc52xx_ata_set_piomode,
28718diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28719index b21f002..0a27e7f 100644
28720--- a/drivers/ata/pata_mpiix.c
28721+++ b/drivers/ata/pata_mpiix.c
28722@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28723 ATA_PIO_SHT(DRV_NAME),
28724 };
28725
28726-static struct ata_port_operations mpiix_port_ops = {
28727+static const struct ata_port_operations mpiix_port_ops = {
28728 .inherits = &ata_sff_port_ops,
28729 .qc_issue = mpiix_qc_issue,
28730 .cable_detect = ata_cable_40wire,
28731diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28732index f0d52f7..89c3be3 100644
28733--- a/drivers/ata/pata_netcell.c
28734+++ b/drivers/ata/pata_netcell.c
28735@@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28736 ATA_BMDMA_SHT(DRV_NAME),
28737 };
28738
28739-static struct ata_port_operations netcell_ops = {
28740+static const struct ata_port_operations netcell_ops = {
28741 .inherits = &ata_bmdma_port_ops,
28742 .cable_detect = ata_cable_80wire,
28743 .read_id = netcell_read_id,
28744diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28745index dd53a66..a3f4317 100644
28746--- a/drivers/ata/pata_ninja32.c
28747+++ b/drivers/ata/pata_ninja32.c
28748@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28749 ATA_BMDMA_SHT(DRV_NAME),
28750 };
28751
28752-static struct ata_port_operations ninja32_port_ops = {
28753+static const struct ata_port_operations ninja32_port_ops = {
28754 .inherits = &ata_bmdma_port_ops,
28755 .sff_dev_select = ninja32_dev_select,
28756 .cable_detect = ata_cable_40wire,
28757diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28758index ca53fac..9aa93ef 100644
28759--- a/drivers/ata/pata_ns87410.c
28760+++ b/drivers/ata/pata_ns87410.c
28761@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28762 ATA_PIO_SHT(DRV_NAME),
28763 };
28764
28765-static struct ata_port_operations ns87410_port_ops = {
28766+static const struct ata_port_operations ns87410_port_ops = {
28767 .inherits = &ata_sff_port_ops,
28768 .qc_issue = ns87410_qc_issue,
28769 .cable_detect = ata_cable_40wire,
28770diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28771index 773b159..55f454e 100644
28772--- a/drivers/ata/pata_ns87415.c
28773+++ b/drivers/ata/pata_ns87415.c
28774@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28775 }
28776 #endif /* 87560 SuperIO Support */
28777
28778-static struct ata_port_operations ns87415_pata_ops = {
28779+static const struct ata_port_operations ns87415_pata_ops = {
28780 .inherits = &ata_bmdma_port_ops,
28781
28782 .check_atapi_dma = ns87415_check_atapi_dma,
28783@@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28784 };
28785
28786 #if defined(CONFIG_SUPERIO)
28787-static struct ata_port_operations ns87560_pata_ops = {
28788+static const struct ata_port_operations ns87560_pata_ops = {
28789 .inherits = &ns87415_pata_ops,
28790 .sff_tf_read = ns87560_tf_read,
28791 .sff_check_status = ns87560_check_status,
28792diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28793index d6f6956..639295b 100644
28794--- a/drivers/ata/pata_octeon_cf.c
28795+++ b/drivers/ata/pata_octeon_cf.c
28796@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28797 return 0;
28798 }
28799
28800+/* cannot be const */
28801 static struct ata_port_operations octeon_cf_ops = {
28802 .inherits = &ata_sff_port_ops,
28803 .check_atapi_dma = octeon_cf_check_atapi_dma,
28804diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28805index 84ac503..adee1cd 100644
28806--- a/drivers/ata/pata_oldpiix.c
28807+++ b/drivers/ata/pata_oldpiix.c
28808@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28809 ATA_BMDMA_SHT(DRV_NAME),
28810 };
28811
28812-static struct ata_port_operations oldpiix_pata_ops = {
28813+static const struct ata_port_operations oldpiix_pata_ops = {
28814 .inherits = &ata_bmdma_port_ops,
28815 .qc_issue = oldpiix_qc_issue,
28816 .cable_detect = ata_cable_40wire,
28817diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28818index 99eddda..3a4c0aa 100644
28819--- a/drivers/ata/pata_opti.c
28820+++ b/drivers/ata/pata_opti.c
28821@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28822 ATA_PIO_SHT(DRV_NAME),
28823 };
28824
28825-static struct ata_port_operations opti_port_ops = {
28826+static const struct ata_port_operations opti_port_ops = {
28827 .inherits = &ata_sff_port_ops,
28828 .cable_detect = ata_cable_40wire,
28829 .set_piomode = opti_set_piomode,
28830diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28831index 86885a4..8e9968d 100644
28832--- a/drivers/ata/pata_optidma.c
28833+++ b/drivers/ata/pata_optidma.c
28834@@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28835 ATA_BMDMA_SHT(DRV_NAME),
28836 };
28837
28838-static struct ata_port_operations optidma_port_ops = {
28839+static const struct ata_port_operations optidma_port_ops = {
28840 .inherits = &ata_bmdma_port_ops,
28841 .cable_detect = ata_cable_40wire,
28842 .set_piomode = optidma_set_pio_mode,
28843@@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28844 .prereset = optidma_pre_reset,
28845 };
28846
28847-static struct ata_port_operations optiplus_port_ops = {
28848+static const struct ata_port_operations optiplus_port_ops = {
28849 .inherits = &optidma_port_ops,
28850 .set_piomode = optiplus_set_pio_mode,
28851 .set_dmamode = optiplus_set_dma_mode,
28852diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28853index 11fb4cc..1a14022 100644
28854--- a/drivers/ata/pata_palmld.c
28855+++ b/drivers/ata/pata_palmld.c
28856@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28857 ATA_PIO_SHT(DRV_NAME),
28858 };
28859
28860-static struct ata_port_operations palmld_port_ops = {
28861+static const struct ata_port_operations palmld_port_ops = {
28862 .inherits = &ata_sff_port_ops,
28863 .sff_data_xfer = ata_sff_data_xfer_noirq,
28864 .cable_detect = ata_cable_40wire,
28865diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28866index dc99e26..7f4b1e4 100644
28867--- a/drivers/ata/pata_pcmcia.c
28868+++ b/drivers/ata/pata_pcmcia.c
28869@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28870 ATA_PIO_SHT(DRV_NAME),
28871 };
28872
28873-static struct ata_port_operations pcmcia_port_ops = {
28874+static const struct ata_port_operations pcmcia_port_ops = {
28875 .inherits = &ata_sff_port_ops,
28876 .sff_data_xfer = ata_sff_data_xfer_noirq,
28877 .cable_detect = ata_cable_40wire,
28878 .set_mode = pcmcia_set_mode,
28879 };
28880
28881-static struct ata_port_operations pcmcia_8bit_port_ops = {
28882+static const struct ata_port_operations pcmcia_8bit_port_ops = {
28883 .inherits = &ata_sff_port_ops,
28884 .sff_data_xfer = ata_data_xfer_8bit,
28885 .cable_detect = ata_cable_40wire,
28886@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28887 unsigned long io_base, ctl_base;
28888 void __iomem *io_addr, *ctl_addr;
28889 int n_ports = 1;
28890- struct ata_port_operations *ops = &pcmcia_port_ops;
28891+ const struct ata_port_operations *ops = &pcmcia_port_ops;
28892
28893 info = kzalloc(sizeof(*info), GFP_KERNEL);
28894 if (info == NULL)
28895diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28896index ca5cad0..3a1f125 100644
28897--- a/drivers/ata/pata_pdc2027x.c
28898+++ b/drivers/ata/pata_pdc2027x.c
28899@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28900 ATA_BMDMA_SHT(DRV_NAME),
28901 };
28902
28903-static struct ata_port_operations pdc2027x_pata100_ops = {
28904+static const struct ata_port_operations pdc2027x_pata100_ops = {
28905 .inherits = &ata_bmdma_port_ops,
28906 .check_atapi_dma = pdc2027x_check_atapi_dma,
28907 .cable_detect = pdc2027x_cable_detect,
28908 .prereset = pdc2027x_prereset,
28909 };
28910
28911-static struct ata_port_operations pdc2027x_pata133_ops = {
28912+static const struct ata_port_operations pdc2027x_pata133_ops = {
28913 .inherits = &pdc2027x_pata100_ops,
28914 .mode_filter = pdc2027x_mode_filter,
28915 .set_piomode = pdc2027x_set_piomode,
28916diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28917index 2911120..4bf62aa 100644
28918--- a/drivers/ata/pata_pdc202xx_old.c
28919+++ b/drivers/ata/pata_pdc202xx_old.c
28920@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28921 ATA_BMDMA_SHT(DRV_NAME),
28922 };
28923
28924-static struct ata_port_operations pdc2024x_port_ops = {
28925+static const struct ata_port_operations pdc2024x_port_ops = {
28926 .inherits = &ata_bmdma_port_ops,
28927
28928 .cable_detect = ata_cable_40wire,
28929@@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28930 .sff_exec_command = pdc202xx_exec_command,
28931 };
28932
28933-static struct ata_port_operations pdc2026x_port_ops = {
28934+static const struct ata_port_operations pdc2026x_port_ops = {
28935 .inherits = &pdc2024x_port_ops,
28936
28937 .check_atapi_dma = pdc2026x_check_atapi_dma,
28938diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28939index 3f6ebc6..a18c358 100644
28940--- a/drivers/ata/pata_platform.c
28941+++ b/drivers/ata/pata_platform.c
28942@@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28943 ATA_PIO_SHT(DRV_NAME),
28944 };
28945
28946-static struct ata_port_operations pata_platform_port_ops = {
28947+static const struct ata_port_operations pata_platform_port_ops = {
28948 .inherits = &ata_sff_port_ops,
28949 .sff_data_xfer = ata_sff_data_xfer_noirq,
28950 .cable_detect = ata_cable_unknown,
28951diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28952index 45879dc..165a9f9 100644
28953--- a/drivers/ata/pata_qdi.c
28954+++ b/drivers/ata/pata_qdi.c
28955@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28956 ATA_PIO_SHT(DRV_NAME),
28957 };
28958
28959-static struct ata_port_operations qdi6500_port_ops = {
28960+static const struct ata_port_operations qdi6500_port_ops = {
28961 .inherits = &ata_sff_port_ops,
28962 .qc_issue = qdi_qc_issue,
28963 .sff_data_xfer = qdi_data_xfer,
28964@@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28965 .set_piomode = qdi6500_set_piomode,
28966 };
28967
28968-static struct ata_port_operations qdi6580_port_ops = {
28969+static const struct ata_port_operations qdi6580_port_ops = {
28970 .inherits = &qdi6500_port_ops,
28971 .set_piomode = qdi6580_set_piomode,
28972 };
28973diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
28974index 4401b33..716c5cc 100644
28975--- a/drivers/ata/pata_radisys.c
28976+++ b/drivers/ata/pata_radisys.c
28977@@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
28978 ATA_BMDMA_SHT(DRV_NAME),
28979 };
28980
28981-static struct ata_port_operations radisys_pata_ops = {
28982+static const struct ata_port_operations radisys_pata_ops = {
28983 .inherits = &ata_bmdma_port_ops,
28984 .qc_issue = radisys_qc_issue,
28985 .cable_detect = ata_cable_unknown,
28986diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
28987index 45f1e10..fab6bca 100644
28988--- a/drivers/ata/pata_rb532_cf.c
28989+++ b/drivers/ata/pata_rb532_cf.c
28990@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
28991 return IRQ_HANDLED;
28992 }
28993
28994-static struct ata_port_operations rb532_pata_port_ops = {
28995+static const struct ata_port_operations rb532_pata_port_ops = {
28996 .inherits = &ata_sff_port_ops,
28997 .sff_data_xfer = ata_sff_data_xfer32,
28998 };
28999diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
29000index c843a1e..b5853c3 100644
29001--- a/drivers/ata/pata_rdc.c
29002+++ b/drivers/ata/pata_rdc.c
29003@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
29004 pci_write_config_byte(dev, 0x48, udma_enable);
29005 }
29006
29007-static struct ata_port_operations rdc_pata_ops = {
29008+static const struct ata_port_operations rdc_pata_ops = {
29009 .inherits = &ata_bmdma32_port_ops,
29010 .cable_detect = rdc_pata_cable_detect,
29011 .set_piomode = rdc_set_piomode,
29012diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
29013index a5e4dfe..080c8c9 100644
29014--- a/drivers/ata/pata_rz1000.c
29015+++ b/drivers/ata/pata_rz1000.c
29016@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
29017 ATA_PIO_SHT(DRV_NAME),
29018 };
29019
29020-static struct ata_port_operations rz1000_port_ops = {
29021+static const struct ata_port_operations rz1000_port_ops = {
29022 .inherits = &ata_sff_port_ops,
29023 .cable_detect = ata_cable_40wire,
29024 .set_mode = rz1000_set_mode,
29025diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
29026index 3bbed83..e309daf 100644
29027--- a/drivers/ata/pata_sc1200.c
29028+++ b/drivers/ata/pata_sc1200.c
29029@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
29030 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29031 };
29032
29033-static struct ata_port_operations sc1200_port_ops = {
29034+static const struct ata_port_operations sc1200_port_ops = {
29035 .inherits = &ata_bmdma_port_ops,
29036 .qc_prep = ata_sff_dumb_qc_prep,
29037 .qc_issue = sc1200_qc_issue,
29038diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
29039index 4257d6b..4c1d9d5 100644
29040--- a/drivers/ata/pata_scc.c
29041+++ b/drivers/ata/pata_scc.c
29042@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
29043 ATA_BMDMA_SHT(DRV_NAME),
29044 };
29045
29046-static struct ata_port_operations scc_pata_ops = {
29047+static const struct ata_port_operations scc_pata_ops = {
29048 .inherits = &ata_bmdma_port_ops,
29049
29050 .set_piomode = scc_set_piomode,
29051diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
29052index 99cceb4..e2e0a87 100644
29053--- a/drivers/ata/pata_sch.c
29054+++ b/drivers/ata/pata_sch.c
29055@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
29056 ATA_BMDMA_SHT(DRV_NAME),
29057 };
29058
29059-static struct ata_port_operations sch_pata_ops = {
29060+static const struct ata_port_operations sch_pata_ops = {
29061 .inherits = &ata_bmdma_port_ops,
29062 .cable_detect = ata_cable_unknown,
29063 .set_piomode = sch_set_piomode,
29064diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
29065index beaed12..39969f1 100644
29066--- a/drivers/ata/pata_serverworks.c
29067+++ b/drivers/ata/pata_serverworks.c
29068@@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
29069 ATA_BMDMA_SHT(DRV_NAME),
29070 };
29071
29072-static struct ata_port_operations serverworks_osb4_port_ops = {
29073+static const struct ata_port_operations serverworks_osb4_port_ops = {
29074 .inherits = &ata_bmdma_port_ops,
29075 .cable_detect = serverworks_cable_detect,
29076 .mode_filter = serverworks_osb4_filter,
29077@@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
29078 .set_dmamode = serverworks_set_dmamode,
29079 };
29080
29081-static struct ata_port_operations serverworks_csb_port_ops = {
29082+static const struct ata_port_operations serverworks_csb_port_ops = {
29083 .inherits = &serverworks_osb4_port_ops,
29084 .mode_filter = serverworks_csb_filter,
29085 };
29086diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
29087index a2ace48..0463b44 100644
29088--- a/drivers/ata/pata_sil680.c
29089+++ b/drivers/ata/pata_sil680.c
29090@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
29091 ATA_BMDMA_SHT(DRV_NAME),
29092 };
29093
29094-static struct ata_port_operations sil680_port_ops = {
29095+static const struct ata_port_operations sil680_port_ops = {
29096 .inherits = &ata_bmdma32_port_ops,
29097 .cable_detect = sil680_cable_detect,
29098 .set_piomode = sil680_set_piomode,
29099diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
29100index 488e77b..b3724d5 100644
29101--- a/drivers/ata/pata_sis.c
29102+++ b/drivers/ata/pata_sis.c
29103@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
29104 ATA_BMDMA_SHT(DRV_NAME),
29105 };
29106
29107-static struct ata_port_operations sis_133_for_sata_ops = {
29108+static const struct ata_port_operations sis_133_for_sata_ops = {
29109 .inherits = &ata_bmdma_port_ops,
29110 .set_piomode = sis_133_set_piomode,
29111 .set_dmamode = sis_133_set_dmamode,
29112 .cable_detect = sis_133_cable_detect,
29113 };
29114
29115-static struct ata_port_operations sis_base_ops = {
29116+static const struct ata_port_operations sis_base_ops = {
29117 .inherits = &ata_bmdma_port_ops,
29118 .prereset = sis_pre_reset,
29119 };
29120
29121-static struct ata_port_operations sis_133_ops = {
29122+static const struct ata_port_operations sis_133_ops = {
29123 .inherits = &sis_base_ops,
29124 .set_piomode = sis_133_set_piomode,
29125 .set_dmamode = sis_133_set_dmamode,
29126 .cable_detect = sis_133_cable_detect,
29127 };
29128
29129-static struct ata_port_operations sis_133_early_ops = {
29130+static const struct ata_port_operations sis_133_early_ops = {
29131 .inherits = &sis_base_ops,
29132 .set_piomode = sis_100_set_piomode,
29133 .set_dmamode = sis_133_early_set_dmamode,
29134 .cable_detect = sis_66_cable_detect,
29135 };
29136
29137-static struct ata_port_operations sis_100_ops = {
29138+static const struct ata_port_operations sis_100_ops = {
29139 .inherits = &sis_base_ops,
29140 .set_piomode = sis_100_set_piomode,
29141 .set_dmamode = sis_100_set_dmamode,
29142 .cable_detect = sis_66_cable_detect,
29143 };
29144
29145-static struct ata_port_operations sis_66_ops = {
29146+static const struct ata_port_operations sis_66_ops = {
29147 .inherits = &sis_base_ops,
29148 .set_piomode = sis_old_set_piomode,
29149 .set_dmamode = sis_66_set_dmamode,
29150 .cable_detect = sis_66_cable_detect,
29151 };
29152
29153-static struct ata_port_operations sis_old_ops = {
29154+static const struct ata_port_operations sis_old_ops = {
29155 .inherits = &sis_base_ops,
29156 .set_piomode = sis_old_set_piomode,
29157 .set_dmamode = sis_old_set_dmamode,
29158diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
29159index 29f733c..43e9ca0 100644
29160--- a/drivers/ata/pata_sl82c105.c
29161+++ b/drivers/ata/pata_sl82c105.c
29162@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
29163 ATA_BMDMA_SHT(DRV_NAME),
29164 };
29165
29166-static struct ata_port_operations sl82c105_port_ops = {
29167+static const struct ata_port_operations sl82c105_port_ops = {
29168 .inherits = &ata_bmdma_port_ops,
29169 .qc_defer = sl82c105_qc_defer,
29170 .bmdma_start = sl82c105_bmdma_start,
29171diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
29172index f1f13ff..df39e99 100644
29173--- a/drivers/ata/pata_triflex.c
29174+++ b/drivers/ata/pata_triflex.c
29175@@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
29176 ATA_BMDMA_SHT(DRV_NAME),
29177 };
29178
29179-static struct ata_port_operations triflex_port_ops = {
29180+static const struct ata_port_operations triflex_port_ops = {
29181 .inherits = &ata_bmdma_port_ops,
29182 .bmdma_start = triflex_bmdma_start,
29183 .bmdma_stop = triflex_bmdma_stop,
29184diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
29185index 1d73b8d..98a4b29 100644
29186--- a/drivers/ata/pata_via.c
29187+++ b/drivers/ata/pata_via.c
29188@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
29189 ATA_BMDMA_SHT(DRV_NAME),
29190 };
29191
29192-static struct ata_port_operations via_port_ops = {
29193+static const struct ata_port_operations via_port_ops = {
29194 .inherits = &ata_bmdma_port_ops,
29195 .cable_detect = via_cable_detect,
29196 .set_piomode = via_set_piomode,
29197@@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
29198 .port_start = via_port_start,
29199 };
29200
29201-static struct ata_port_operations via_port_ops_noirq = {
29202+static const struct ata_port_operations via_port_ops_noirq = {
29203 .inherits = &via_port_ops,
29204 .sff_data_xfer = ata_sff_data_xfer_noirq,
29205 };
29206diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
29207index 6d8619b..ad511c4 100644
29208--- a/drivers/ata/pata_winbond.c
29209+++ b/drivers/ata/pata_winbond.c
29210@@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
29211 ATA_PIO_SHT(DRV_NAME),
29212 };
29213
29214-static struct ata_port_operations winbond_port_ops = {
29215+static const struct ata_port_operations winbond_port_ops = {
29216 .inherits = &ata_sff_port_ops,
29217 .sff_data_xfer = winbond_data_xfer,
29218 .cable_detect = ata_cable_40wire,
29219diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
29220index 6c65b07..f996ec7 100644
29221--- a/drivers/ata/pdc_adma.c
29222+++ b/drivers/ata/pdc_adma.c
29223@@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
29224 .dma_boundary = ADMA_DMA_BOUNDARY,
29225 };
29226
29227-static struct ata_port_operations adma_ata_ops = {
29228+static const struct ata_port_operations adma_ata_ops = {
29229 .inherits = &ata_sff_port_ops,
29230
29231 .lost_interrupt = ATA_OP_NULL,
29232diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
29233index 172b57e..c49bc1e 100644
29234--- a/drivers/ata/sata_fsl.c
29235+++ b/drivers/ata/sata_fsl.c
29236@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
29237 .dma_boundary = ATA_DMA_BOUNDARY,
29238 };
29239
29240-static struct ata_port_operations sata_fsl_ops = {
29241+static const struct ata_port_operations sata_fsl_ops = {
29242 .inherits = &sata_pmp_port_ops,
29243
29244 .qc_defer = ata_std_qc_defer,
29245diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
29246index 4406902..60603ef 100644
29247--- a/drivers/ata/sata_inic162x.c
29248+++ b/drivers/ata/sata_inic162x.c
29249@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
29250 return 0;
29251 }
29252
29253-static struct ata_port_operations inic_port_ops = {
29254+static const struct ata_port_operations inic_port_ops = {
29255 .inherits = &sata_port_ops,
29256
29257 .check_atapi_dma = inic_check_atapi_dma,
29258diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
29259index cf41126..8107be6 100644
29260--- a/drivers/ata/sata_mv.c
29261+++ b/drivers/ata/sata_mv.c
29262@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
29263 .dma_boundary = MV_DMA_BOUNDARY,
29264 };
29265
29266-static struct ata_port_operations mv5_ops = {
29267+static const struct ata_port_operations mv5_ops = {
29268 .inherits = &ata_sff_port_ops,
29269
29270 .lost_interrupt = ATA_OP_NULL,
29271@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
29272 .port_stop = mv_port_stop,
29273 };
29274
29275-static struct ata_port_operations mv6_ops = {
29276+static const struct ata_port_operations mv6_ops = {
29277 .inherits = &mv5_ops,
29278 .dev_config = mv6_dev_config,
29279 .scr_read = mv_scr_read,
29280@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
29281 .bmdma_status = mv_bmdma_status,
29282 };
29283
29284-static struct ata_port_operations mv_iie_ops = {
29285+static const struct ata_port_operations mv_iie_ops = {
29286 .inherits = &mv6_ops,
29287 .dev_config = ATA_OP_NULL,
29288 .qc_prep = mv_qc_prep_iie,
29289diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
29290index ae2297c..d5c9c33 100644
29291--- a/drivers/ata/sata_nv.c
29292+++ b/drivers/ata/sata_nv.c
29293@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
29294 * cases. Define nv_hardreset() which only kicks in for post-boot
29295 * probing and use it for all variants.
29296 */
29297-static struct ata_port_operations nv_generic_ops = {
29298+static const struct ata_port_operations nv_generic_ops = {
29299 .inherits = &ata_bmdma_port_ops,
29300 .lost_interrupt = ATA_OP_NULL,
29301 .scr_read = nv_scr_read,
29302@@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
29303 .hardreset = nv_hardreset,
29304 };
29305
29306-static struct ata_port_operations nv_nf2_ops = {
29307+static const struct ata_port_operations nv_nf2_ops = {
29308 .inherits = &nv_generic_ops,
29309 .freeze = nv_nf2_freeze,
29310 .thaw = nv_nf2_thaw,
29311 };
29312
29313-static struct ata_port_operations nv_ck804_ops = {
29314+static const struct ata_port_operations nv_ck804_ops = {
29315 .inherits = &nv_generic_ops,
29316 .freeze = nv_ck804_freeze,
29317 .thaw = nv_ck804_thaw,
29318 .host_stop = nv_ck804_host_stop,
29319 };
29320
29321-static struct ata_port_operations nv_adma_ops = {
29322+static const struct ata_port_operations nv_adma_ops = {
29323 .inherits = &nv_ck804_ops,
29324
29325 .check_atapi_dma = nv_adma_check_atapi_dma,
29326@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
29327 .host_stop = nv_adma_host_stop,
29328 };
29329
29330-static struct ata_port_operations nv_swncq_ops = {
29331+static const struct ata_port_operations nv_swncq_ops = {
29332 .inherits = &nv_generic_ops,
29333
29334 .qc_defer = ata_std_qc_defer,
29335diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29336index 07d8d00..6cc70bb 100644
29337--- a/drivers/ata/sata_promise.c
29338+++ b/drivers/ata/sata_promise.c
29339@@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29340 .error_handler = pdc_error_handler,
29341 };
29342
29343-static struct ata_port_operations pdc_sata_ops = {
29344+static const struct ata_port_operations pdc_sata_ops = {
29345 .inherits = &pdc_common_ops,
29346 .cable_detect = pdc_sata_cable_detect,
29347 .freeze = pdc_sata_freeze,
29348@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29349
29350 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29351 and ->freeze/thaw that ignore the hotplug controls. */
29352-static struct ata_port_operations pdc_old_sata_ops = {
29353+static const struct ata_port_operations pdc_old_sata_ops = {
29354 .inherits = &pdc_sata_ops,
29355 .freeze = pdc_freeze,
29356 .thaw = pdc_thaw,
29357 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29358 };
29359
29360-static struct ata_port_operations pdc_pata_ops = {
29361+static const struct ata_port_operations pdc_pata_ops = {
29362 .inherits = &pdc_common_ops,
29363 .cable_detect = pdc_pata_cable_detect,
29364 .freeze = pdc_freeze,
29365diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29366index 326c0cf..36ecebe 100644
29367--- a/drivers/ata/sata_qstor.c
29368+++ b/drivers/ata/sata_qstor.c
29369@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29370 .dma_boundary = QS_DMA_BOUNDARY,
29371 };
29372
29373-static struct ata_port_operations qs_ata_ops = {
29374+static const struct ata_port_operations qs_ata_ops = {
29375 .inherits = &ata_sff_port_ops,
29376
29377 .check_atapi_dma = qs_check_atapi_dma,
29378diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29379index 3cb69d5..0871d3c 100644
29380--- a/drivers/ata/sata_sil.c
29381+++ b/drivers/ata/sata_sil.c
29382@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29383 .sg_tablesize = ATA_MAX_PRD
29384 };
29385
29386-static struct ata_port_operations sil_ops = {
29387+static const struct ata_port_operations sil_ops = {
29388 .inherits = &ata_bmdma32_port_ops,
29389 .dev_config = sil_dev_config,
29390 .set_mode = sil_set_mode,
29391diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29392index e6946fc..eddb794 100644
29393--- a/drivers/ata/sata_sil24.c
29394+++ b/drivers/ata/sata_sil24.c
29395@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29396 .dma_boundary = ATA_DMA_BOUNDARY,
29397 };
29398
29399-static struct ata_port_operations sil24_ops = {
29400+static const struct ata_port_operations sil24_ops = {
29401 .inherits = &sata_pmp_port_ops,
29402
29403 .qc_defer = sil24_qc_defer,
29404diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29405index f8a91bf..9cb06b6 100644
29406--- a/drivers/ata/sata_sis.c
29407+++ b/drivers/ata/sata_sis.c
29408@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29409 ATA_BMDMA_SHT(DRV_NAME),
29410 };
29411
29412-static struct ata_port_operations sis_ops = {
29413+static const struct ata_port_operations sis_ops = {
29414 .inherits = &ata_bmdma_port_ops,
29415 .scr_read = sis_scr_read,
29416 .scr_write = sis_scr_write,
29417diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29418index 7257f2d..d04c6f5 100644
29419--- a/drivers/ata/sata_svw.c
29420+++ b/drivers/ata/sata_svw.c
29421@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29422 };
29423
29424
29425-static struct ata_port_operations k2_sata_ops = {
29426+static const struct ata_port_operations k2_sata_ops = {
29427 .inherits = &ata_bmdma_port_ops,
29428 .sff_tf_load = k2_sata_tf_load,
29429 .sff_tf_read = k2_sata_tf_read,
29430diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29431index bbcf970..cd0df0d 100644
29432--- a/drivers/ata/sata_sx4.c
29433+++ b/drivers/ata/sata_sx4.c
29434@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29435 };
29436
29437 /* TODO: inherit from base port_ops after converting to new EH */
29438-static struct ata_port_operations pdc_20621_ops = {
29439+static const struct ata_port_operations pdc_20621_ops = {
29440 .inherits = &ata_sff_port_ops,
29441
29442 .check_atapi_dma = pdc_check_atapi_dma,
29443diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29444index e5bff47..089d859 100644
29445--- a/drivers/ata/sata_uli.c
29446+++ b/drivers/ata/sata_uli.c
29447@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29448 ATA_BMDMA_SHT(DRV_NAME),
29449 };
29450
29451-static struct ata_port_operations uli_ops = {
29452+static const struct ata_port_operations uli_ops = {
29453 .inherits = &ata_bmdma_port_ops,
29454 .scr_read = uli_scr_read,
29455 .scr_write = uli_scr_write,
29456diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29457index f5dcca7..77b94eb 100644
29458--- a/drivers/ata/sata_via.c
29459+++ b/drivers/ata/sata_via.c
29460@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29461 ATA_BMDMA_SHT(DRV_NAME),
29462 };
29463
29464-static struct ata_port_operations svia_base_ops = {
29465+static const struct ata_port_operations svia_base_ops = {
29466 .inherits = &ata_bmdma_port_ops,
29467 .sff_tf_load = svia_tf_load,
29468 };
29469
29470-static struct ata_port_operations vt6420_sata_ops = {
29471+static const struct ata_port_operations vt6420_sata_ops = {
29472 .inherits = &svia_base_ops,
29473 .freeze = svia_noop_freeze,
29474 .prereset = vt6420_prereset,
29475 .bmdma_start = vt6420_bmdma_start,
29476 };
29477
29478-static struct ata_port_operations vt6421_pata_ops = {
29479+static const struct ata_port_operations vt6421_pata_ops = {
29480 .inherits = &svia_base_ops,
29481 .cable_detect = vt6421_pata_cable_detect,
29482 .set_piomode = vt6421_set_pio_mode,
29483 .set_dmamode = vt6421_set_dma_mode,
29484 };
29485
29486-static struct ata_port_operations vt6421_sata_ops = {
29487+static const struct ata_port_operations vt6421_sata_ops = {
29488 .inherits = &svia_base_ops,
29489 .scr_read = svia_scr_read,
29490 .scr_write = svia_scr_write,
29491 };
29492
29493-static struct ata_port_operations vt8251_ops = {
29494+static const struct ata_port_operations vt8251_ops = {
29495 .inherits = &svia_base_ops,
29496 .hardreset = sata_std_hardreset,
29497 .scr_read = vt8251_scr_read,
29498diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29499index 8b2a278..51e65d3 100644
29500--- a/drivers/ata/sata_vsc.c
29501+++ b/drivers/ata/sata_vsc.c
29502@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29503 };
29504
29505
29506-static struct ata_port_operations vsc_sata_ops = {
29507+static const struct ata_port_operations vsc_sata_ops = {
29508 .inherits = &ata_bmdma_port_ops,
29509 /* The IRQ handling is not quite standard SFF behaviour so we
29510 cannot use the default lost interrupt handler */
29511diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29512index 5effec6..7e4019a 100644
29513--- a/drivers/atm/adummy.c
29514+++ b/drivers/atm/adummy.c
29515@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29516 vcc->pop(vcc, skb);
29517 else
29518 dev_kfree_skb_any(skb);
29519- atomic_inc(&vcc->stats->tx);
29520+ atomic_inc_unchecked(&vcc->stats->tx);
29521
29522 return 0;
29523 }
29524diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29525index 66e1813..26a27c6 100644
29526--- a/drivers/atm/ambassador.c
29527+++ b/drivers/atm/ambassador.c
29528@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29529 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29530
29531 // VC layer stats
29532- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29533+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29534
29535 // free the descriptor
29536 kfree (tx_descr);
29537@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29538 dump_skb ("<<<", vc, skb);
29539
29540 // VC layer stats
29541- atomic_inc(&atm_vcc->stats->rx);
29542+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29543 __net_timestamp(skb);
29544 // end of our responsability
29545 atm_vcc->push (atm_vcc, skb);
29546@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29547 } else {
29548 PRINTK (KERN_INFO, "dropped over-size frame");
29549 // should we count this?
29550- atomic_inc(&atm_vcc->stats->rx_drop);
29551+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29552 }
29553
29554 } else {
29555@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29556 }
29557
29558 if (check_area (skb->data, skb->len)) {
29559- atomic_inc(&atm_vcc->stats->tx_err);
29560+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29561 return -ENOMEM; // ?
29562 }
29563
29564diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29565index 02ad83d..6daffeb 100644
29566--- a/drivers/atm/atmtcp.c
29567+++ b/drivers/atm/atmtcp.c
29568@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29569 if (vcc->pop) vcc->pop(vcc,skb);
29570 else dev_kfree_skb(skb);
29571 if (dev_data) return 0;
29572- atomic_inc(&vcc->stats->tx_err);
29573+ atomic_inc_unchecked(&vcc->stats->tx_err);
29574 return -ENOLINK;
29575 }
29576 size = skb->len+sizeof(struct atmtcp_hdr);
29577@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29578 if (!new_skb) {
29579 if (vcc->pop) vcc->pop(vcc,skb);
29580 else dev_kfree_skb(skb);
29581- atomic_inc(&vcc->stats->tx_err);
29582+ atomic_inc_unchecked(&vcc->stats->tx_err);
29583 return -ENOBUFS;
29584 }
29585 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29586@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29587 if (vcc->pop) vcc->pop(vcc,skb);
29588 else dev_kfree_skb(skb);
29589 out_vcc->push(out_vcc,new_skb);
29590- atomic_inc(&vcc->stats->tx);
29591- atomic_inc(&out_vcc->stats->rx);
29592+ atomic_inc_unchecked(&vcc->stats->tx);
29593+ atomic_inc_unchecked(&out_vcc->stats->rx);
29594 return 0;
29595 }
29596
29597@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29598 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29599 read_unlock(&vcc_sklist_lock);
29600 if (!out_vcc) {
29601- atomic_inc(&vcc->stats->tx_err);
29602+ atomic_inc_unchecked(&vcc->stats->tx_err);
29603 goto done;
29604 }
29605 skb_pull(skb,sizeof(struct atmtcp_hdr));
29606@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29607 __net_timestamp(new_skb);
29608 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29609 out_vcc->push(out_vcc,new_skb);
29610- atomic_inc(&vcc->stats->tx);
29611- atomic_inc(&out_vcc->stats->rx);
29612+ atomic_inc_unchecked(&vcc->stats->tx);
29613+ atomic_inc_unchecked(&out_vcc->stats->rx);
29614 done:
29615 if (vcc->pop) vcc->pop(vcc,skb);
29616 else dev_kfree_skb(skb);
29617diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29618index 0c30261..3da356e 100644
29619--- a/drivers/atm/eni.c
29620+++ b/drivers/atm/eni.c
29621@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29622 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29623 vcc->dev->number);
29624 length = 0;
29625- atomic_inc(&vcc->stats->rx_err);
29626+ atomic_inc_unchecked(&vcc->stats->rx_err);
29627 }
29628 else {
29629 length = ATM_CELL_SIZE-1; /* no HEC */
29630@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29631 size);
29632 }
29633 eff = length = 0;
29634- atomic_inc(&vcc->stats->rx_err);
29635+ atomic_inc_unchecked(&vcc->stats->rx_err);
29636 }
29637 else {
29638 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29639@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29640 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29641 vcc->dev->number,vcc->vci,length,size << 2,descr);
29642 length = eff = 0;
29643- atomic_inc(&vcc->stats->rx_err);
29644+ atomic_inc_unchecked(&vcc->stats->rx_err);
29645 }
29646 }
29647 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29648@@ -770,7 +770,7 @@ rx_dequeued++;
29649 vcc->push(vcc,skb);
29650 pushed++;
29651 }
29652- atomic_inc(&vcc->stats->rx);
29653+ atomic_inc_unchecked(&vcc->stats->rx);
29654 }
29655 wake_up(&eni_dev->rx_wait);
29656 }
29657@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29658 PCI_DMA_TODEVICE);
29659 if (vcc->pop) vcc->pop(vcc,skb);
29660 else dev_kfree_skb_irq(skb);
29661- atomic_inc(&vcc->stats->tx);
29662+ atomic_inc_unchecked(&vcc->stats->tx);
29663 wake_up(&eni_dev->tx_wait);
29664 dma_complete++;
29665 }
29666@@ -1570,7 +1570,7 @@ tx_complete++;
29667 /*--------------------------------- entries ---------------------------------*/
29668
29669
29670-static const char *media_name[] __devinitdata = {
29671+static const char *media_name[] __devinitconst = {
29672 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29673 "UTP", "05?", "06?", "07?", /* 4- 7 */
29674 "TAXI","09?", "10?", "11?", /* 8-11 */
29675diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29676index cd5049a..a51209f 100644
29677--- a/drivers/atm/firestream.c
29678+++ b/drivers/atm/firestream.c
29679@@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29680 }
29681 }
29682
29683- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29684+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29685
29686 fs_dprintk (FS_DEBUG_TXMEM, "i");
29687 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29688@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29689 #endif
29690 skb_put (skb, qe->p1 & 0xffff);
29691 ATM_SKB(skb)->vcc = atm_vcc;
29692- atomic_inc(&atm_vcc->stats->rx);
29693+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29694 __net_timestamp(skb);
29695 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29696 atm_vcc->push (atm_vcc, skb);
29697@@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29698 kfree (pe);
29699 }
29700 if (atm_vcc)
29701- atomic_inc(&atm_vcc->stats->rx_drop);
29702+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29703 break;
29704 case 0x1f: /* Reassembly abort: no buffers. */
29705 /* Silently increment error counter. */
29706 if (atm_vcc)
29707- atomic_inc(&atm_vcc->stats->rx_drop);
29708+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29709 break;
29710 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29711 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29712diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29713index f766cc4..a34002e 100644
29714--- a/drivers/atm/fore200e.c
29715+++ b/drivers/atm/fore200e.c
29716@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29717 #endif
29718 /* check error condition */
29719 if (*entry->status & STATUS_ERROR)
29720- atomic_inc(&vcc->stats->tx_err);
29721+ atomic_inc_unchecked(&vcc->stats->tx_err);
29722 else
29723- atomic_inc(&vcc->stats->tx);
29724+ atomic_inc_unchecked(&vcc->stats->tx);
29725 }
29726 }
29727
29728@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29729 if (skb == NULL) {
29730 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29731
29732- atomic_inc(&vcc->stats->rx_drop);
29733+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29734 return -ENOMEM;
29735 }
29736
29737@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29738
29739 dev_kfree_skb_any(skb);
29740
29741- atomic_inc(&vcc->stats->rx_drop);
29742+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29743 return -ENOMEM;
29744 }
29745
29746 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29747
29748 vcc->push(vcc, skb);
29749- atomic_inc(&vcc->stats->rx);
29750+ atomic_inc_unchecked(&vcc->stats->rx);
29751
29752 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29753
29754@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29755 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29756 fore200e->atm_dev->number,
29757 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29758- atomic_inc(&vcc->stats->rx_err);
29759+ atomic_inc_unchecked(&vcc->stats->rx_err);
29760 }
29761 }
29762
29763@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29764 goto retry_here;
29765 }
29766
29767- atomic_inc(&vcc->stats->tx_err);
29768+ atomic_inc_unchecked(&vcc->stats->tx_err);
29769
29770 fore200e->tx_sat++;
29771 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29772diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29773index 7066703..2b130de 100644
29774--- a/drivers/atm/he.c
29775+++ b/drivers/atm/he.c
29776@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29777
29778 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29779 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29780- atomic_inc(&vcc->stats->rx_drop);
29781+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29782 goto return_host_buffers;
29783 }
29784
29785@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29786 RBRQ_LEN_ERR(he_dev->rbrq_head)
29787 ? "LEN_ERR" : "",
29788 vcc->vpi, vcc->vci);
29789- atomic_inc(&vcc->stats->rx_err);
29790+ atomic_inc_unchecked(&vcc->stats->rx_err);
29791 goto return_host_buffers;
29792 }
29793
29794@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29795 vcc->push(vcc, skb);
29796 spin_lock(&he_dev->global_lock);
29797
29798- atomic_inc(&vcc->stats->rx);
29799+ atomic_inc_unchecked(&vcc->stats->rx);
29800
29801 return_host_buffers:
29802 ++pdus_assembled;
29803@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29804 tpd->vcc->pop(tpd->vcc, tpd->skb);
29805 else
29806 dev_kfree_skb_any(tpd->skb);
29807- atomic_inc(&tpd->vcc->stats->tx_err);
29808+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29809 }
29810 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29811 return;
29812@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29813 vcc->pop(vcc, skb);
29814 else
29815 dev_kfree_skb_any(skb);
29816- atomic_inc(&vcc->stats->tx_err);
29817+ atomic_inc_unchecked(&vcc->stats->tx_err);
29818 return -EINVAL;
29819 }
29820
29821@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29822 vcc->pop(vcc, skb);
29823 else
29824 dev_kfree_skb_any(skb);
29825- atomic_inc(&vcc->stats->tx_err);
29826+ atomic_inc_unchecked(&vcc->stats->tx_err);
29827 return -EINVAL;
29828 }
29829 #endif
29830@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29831 vcc->pop(vcc, skb);
29832 else
29833 dev_kfree_skb_any(skb);
29834- atomic_inc(&vcc->stats->tx_err);
29835+ atomic_inc_unchecked(&vcc->stats->tx_err);
29836 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29837 return -ENOMEM;
29838 }
29839@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29840 vcc->pop(vcc, skb);
29841 else
29842 dev_kfree_skb_any(skb);
29843- atomic_inc(&vcc->stats->tx_err);
29844+ atomic_inc_unchecked(&vcc->stats->tx_err);
29845 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29846 return -ENOMEM;
29847 }
29848@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29849 __enqueue_tpd(he_dev, tpd, cid);
29850 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29851
29852- atomic_inc(&vcc->stats->tx);
29853+ atomic_inc_unchecked(&vcc->stats->tx);
29854
29855 return 0;
29856 }
29857diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29858index 4e49021..01b1512 100644
29859--- a/drivers/atm/horizon.c
29860+++ b/drivers/atm/horizon.c
29861@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29862 {
29863 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29864 // VC layer stats
29865- atomic_inc(&vcc->stats->rx);
29866+ atomic_inc_unchecked(&vcc->stats->rx);
29867 __net_timestamp(skb);
29868 // end of our responsability
29869 vcc->push (vcc, skb);
29870@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29871 dev->tx_iovec = NULL;
29872
29873 // VC layer stats
29874- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29875+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29876
29877 // free the skb
29878 hrz_kfree_skb (skb);
29879diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29880index e33ae00..9deb4ab 100644
29881--- a/drivers/atm/idt77252.c
29882+++ b/drivers/atm/idt77252.c
29883@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29884 else
29885 dev_kfree_skb(skb);
29886
29887- atomic_inc(&vcc->stats->tx);
29888+ atomic_inc_unchecked(&vcc->stats->tx);
29889 }
29890
29891 atomic_dec(&scq->used);
29892@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29893 if ((sb = dev_alloc_skb(64)) == NULL) {
29894 printk("%s: Can't allocate buffers for aal0.\n",
29895 card->name);
29896- atomic_add(i, &vcc->stats->rx_drop);
29897+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
29898 break;
29899 }
29900 if (!atm_charge(vcc, sb->truesize)) {
29901 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29902 card->name);
29903- atomic_add(i - 1, &vcc->stats->rx_drop);
29904+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29905 dev_kfree_skb(sb);
29906 break;
29907 }
29908@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29909 ATM_SKB(sb)->vcc = vcc;
29910 __net_timestamp(sb);
29911 vcc->push(vcc, sb);
29912- atomic_inc(&vcc->stats->rx);
29913+ atomic_inc_unchecked(&vcc->stats->rx);
29914
29915 cell += ATM_CELL_PAYLOAD;
29916 }
29917@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29918 "(CDC: %08x)\n",
29919 card->name, len, rpp->len, readl(SAR_REG_CDC));
29920 recycle_rx_pool_skb(card, rpp);
29921- atomic_inc(&vcc->stats->rx_err);
29922+ atomic_inc_unchecked(&vcc->stats->rx_err);
29923 return;
29924 }
29925 if (stat & SAR_RSQE_CRC) {
29926 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29927 recycle_rx_pool_skb(card, rpp);
29928- atomic_inc(&vcc->stats->rx_err);
29929+ atomic_inc_unchecked(&vcc->stats->rx_err);
29930 return;
29931 }
29932 if (skb_queue_len(&rpp->queue) > 1) {
29933@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29934 RXPRINTK("%s: Can't alloc RX skb.\n",
29935 card->name);
29936 recycle_rx_pool_skb(card, rpp);
29937- atomic_inc(&vcc->stats->rx_err);
29938+ atomic_inc_unchecked(&vcc->stats->rx_err);
29939 return;
29940 }
29941 if (!atm_charge(vcc, skb->truesize)) {
29942@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29943 __net_timestamp(skb);
29944
29945 vcc->push(vcc, skb);
29946- atomic_inc(&vcc->stats->rx);
29947+ atomic_inc_unchecked(&vcc->stats->rx);
29948
29949 return;
29950 }
29951@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29952 __net_timestamp(skb);
29953
29954 vcc->push(vcc, skb);
29955- atomic_inc(&vcc->stats->rx);
29956+ atomic_inc_unchecked(&vcc->stats->rx);
29957
29958 if (skb->truesize > SAR_FB_SIZE_3)
29959 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29960@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29961 if (vcc->qos.aal != ATM_AAL0) {
29962 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29963 card->name, vpi, vci);
29964- atomic_inc(&vcc->stats->rx_drop);
29965+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29966 goto drop;
29967 }
29968
29969 if ((sb = dev_alloc_skb(64)) == NULL) {
29970 printk("%s: Can't allocate buffers for AAL0.\n",
29971 card->name);
29972- atomic_inc(&vcc->stats->rx_err);
29973+ atomic_inc_unchecked(&vcc->stats->rx_err);
29974 goto drop;
29975 }
29976
29977@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29978 ATM_SKB(sb)->vcc = vcc;
29979 __net_timestamp(sb);
29980 vcc->push(vcc, sb);
29981- atomic_inc(&vcc->stats->rx);
29982+ atomic_inc_unchecked(&vcc->stats->rx);
29983
29984 drop:
29985 skb_pull(queue, 64);
29986@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29987
29988 if (vc == NULL) {
29989 printk("%s: NULL connection in send().\n", card->name);
29990- atomic_inc(&vcc->stats->tx_err);
29991+ atomic_inc_unchecked(&vcc->stats->tx_err);
29992 dev_kfree_skb(skb);
29993 return -EINVAL;
29994 }
29995 if (!test_bit(VCF_TX, &vc->flags)) {
29996 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29997- atomic_inc(&vcc->stats->tx_err);
29998+ atomic_inc_unchecked(&vcc->stats->tx_err);
29999 dev_kfree_skb(skb);
30000 return -EINVAL;
30001 }
30002@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30003 break;
30004 default:
30005 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
30006- atomic_inc(&vcc->stats->tx_err);
30007+ atomic_inc_unchecked(&vcc->stats->tx_err);
30008 dev_kfree_skb(skb);
30009 return -EINVAL;
30010 }
30011
30012 if (skb_shinfo(skb)->nr_frags != 0) {
30013 printk("%s: No scatter-gather yet.\n", card->name);
30014- atomic_inc(&vcc->stats->tx_err);
30015+ atomic_inc_unchecked(&vcc->stats->tx_err);
30016 dev_kfree_skb(skb);
30017 return -EINVAL;
30018 }
30019@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30020
30021 err = queue_skb(card, vc, skb, oam);
30022 if (err) {
30023- atomic_inc(&vcc->stats->tx_err);
30024+ atomic_inc_unchecked(&vcc->stats->tx_err);
30025 dev_kfree_skb(skb);
30026 return err;
30027 }
30028@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
30029 skb = dev_alloc_skb(64);
30030 if (!skb) {
30031 printk("%s: Out of memory in send_oam().\n", card->name);
30032- atomic_inc(&vcc->stats->tx_err);
30033+ atomic_inc_unchecked(&vcc->stats->tx_err);
30034 return -ENOMEM;
30035 }
30036 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
30037diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
30038index b2c1b37..faa672b 100644
30039--- a/drivers/atm/iphase.c
30040+++ b/drivers/atm/iphase.c
30041@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
30042 status = (u_short) (buf_desc_ptr->desc_mode);
30043 if (status & (RX_CER | RX_PTE | RX_OFL))
30044 {
30045- atomic_inc(&vcc->stats->rx_err);
30046+ atomic_inc_unchecked(&vcc->stats->rx_err);
30047 IF_ERR(printk("IA: bad packet, dropping it");)
30048 if (status & RX_CER) {
30049 IF_ERR(printk(" cause: packet CRC error\n");)
30050@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
30051 len = dma_addr - buf_addr;
30052 if (len > iadev->rx_buf_sz) {
30053 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
30054- atomic_inc(&vcc->stats->rx_err);
30055+ atomic_inc_unchecked(&vcc->stats->rx_err);
30056 goto out_free_desc;
30057 }
30058
30059@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30060 ia_vcc = INPH_IA_VCC(vcc);
30061 if (ia_vcc == NULL)
30062 {
30063- atomic_inc(&vcc->stats->rx_err);
30064+ atomic_inc_unchecked(&vcc->stats->rx_err);
30065 dev_kfree_skb_any(skb);
30066 atm_return(vcc, atm_guess_pdu2truesize(len));
30067 goto INCR_DLE;
30068@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30069 if ((length > iadev->rx_buf_sz) || (length >
30070 (skb->len - sizeof(struct cpcs_trailer))))
30071 {
30072- atomic_inc(&vcc->stats->rx_err);
30073+ atomic_inc_unchecked(&vcc->stats->rx_err);
30074 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
30075 length, skb->len);)
30076 dev_kfree_skb_any(skb);
30077@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
30078
30079 IF_RX(printk("rx_dle_intr: skb push");)
30080 vcc->push(vcc,skb);
30081- atomic_inc(&vcc->stats->rx);
30082+ atomic_inc_unchecked(&vcc->stats->rx);
30083 iadev->rx_pkt_cnt++;
30084 }
30085 INCR_DLE:
30086@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
30087 {
30088 struct k_sonet_stats *stats;
30089 stats = &PRIV(_ia_dev[board])->sonet_stats;
30090- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
30091- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
30092- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
30093- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
30094- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
30095- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
30096- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
30097- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
30098- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
30099+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
30100+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
30101+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
30102+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
30103+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
30104+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
30105+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
30106+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
30107+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
30108 }
30109 ia_cmds.status = 0;
30110 break;
30111@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30112 if ((desc == 0) || (desc > iadev->num_tx_desc))
30113 {
30114 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
30115- atomic_inc(&vcc->stats->tx);
30116+ atomic_inc_unchecked(&vcc->stats->tx);
30117 if (vcc->pop)
30118 vcc->pop(vcc, skb);
30119 else
30120@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
30121 ATM_DESC(skb) = vcc->vci;
30122 skb_queue_tail(&iadev->tx_dma_q, skb);
30123
30124- atomic_inc(&vcc->stats->tx);
30125+ atomic_inc_unchecked(&vcc->stats->tx);
30126 iadev->tx_pkt_cnt++;
30127 /* Increment transaction counter */
30128 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
30129
30130 #if 0
30131 /* add flow control logic */
30132- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
30133+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
30134 if (iavcc->vc_desc_cnt > 10) {
30135 vcc->tx_quota = vcc->tx_quota * 3 / 4;
30136 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
30137diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
30138index cf97c34..8d30655 100644
30139--- a/drivers/atm/lanai.c
30140+++ b/drivers/atm/lanai.c
30141@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
30142 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
30143 lanai_endtx(lanai, lvcc);
30144 lanai_free_skb(lvcc->tx.atmvcc, skb);
30145- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
30146+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
30147 }
30148
30149 /* Try to fill the buffer - don't call unless there is backlog */
30150@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
30151 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
30152 __net_timestamp(skb);
30153 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
30154- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
30155+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
30156 out:
30157 lvcc->rx.buf.ptr = end;
30158 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
30159@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30160 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
30161 "vcc %d\n", lanai->number, (unsigned int) s, vci);
30162 lanai->stats.service_rxnotaal5++;
30163- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30164+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30165 return 0;
30166 }
30167 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
30168@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30169 int bytes;
30170 read_unlock(&vcc_sklist_lock);
30171 DPRINTK("got trashed rx pdu on vci %d\n", vci);
30172- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30173+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30174 lvcc->stats.x.aal5.service_trash++;
30175 bytes = (SERVICE_GET_END(s) * 16) -
30176 (((unsigned long) lvcc->rx.buf.ptr) -
30177@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30178 }
30179 if (s & SERVICE_STREAM) {
30180 read_unlock(&vcc_sklist_lock);
30181- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30182+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30183 lvcc->stats.x.aal5.service_stream++;
30184 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
30185 "PDU on VCI %d!\n", lanai->number, vci);
30186@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
30187 return 0;
30188 }
30189 DPRINTK("got rx crc error on vci %d\n", vci);
30190- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
30191+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
30192 lvcc->stats.x.aal5.service_rxcrc++;
30193 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
30194 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
30195diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
30196index 3da804b..d3b0eed 100644
30197--- a/drivers/atm/nicstar.c
30198+++ b/drivers/atm/nicstar.c
30199@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30200 if ((vc = (vc_map *) vcc->dev_data) == NULL)
30201 {
30202 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
30203- atomic_inc(&vcc->stats->tx_err);
30204+ atomic_inc_unchecked(&vcc->stats->tx_err);
30205 dev_kfree_skb_any(skb);
30206 return -EINVAL;
30207 }
30208@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30209 if (!vc->tx)
30210 {
30211 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
30212- atomic_inc(&vcc->stats->tx_err);
30213+ atomic_inc_unchecked(&vcc->stats->tx_err);
30214 dev_kfree_skb_any(skb);
30215 return -EINVAL;
30216 }
30217@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30218 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
30219 {
30220 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
30221- atomic_inc(&vcc->stats->tx_err);
30222+ atomic_inc_unchecked(&vcc->stats->tx_err);
30223 dev_kfree_skb_any(skb);
30224 return -EINVAL;
30225 }
30226@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30227 if (skb_shinfo(skb)->nr_frags != 0)
30228 {
30229 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30230- atomic_inc(&vcc->stats->tx_err);
30231+ atomic_inc_unchecked(&vcc->stats->tx_err);
30232 dev_kfree_skb_any(skb);
30233 return -EINVAL;
30234 }
30235@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30236
30237 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
30238 {
30239- atomic_inc(&vcc->stats->tx_err);
30240+ atomic_inc_unchecked(&vcc->stats->tx_err);
30241 dev_kfree_skb_any(skb);
30242 return -EIO;
30243 }
30244- atomic_inc(&vcc->stats->tx);
30245+ atomic_inc_unchecked(&vcc->stats->tx);
30246
30247 return 0;
30248 }
30249@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30250 {
30251 printk("nicstar%d: Can't allocate buffers for aal0.\n",
30252 card->index);
30253- atomic_add(i,&vcc->stats->rx_drop);
30254+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
30255 break;
30256 }
30257 if (!atm_charge(vcc, sb->truesize))
30258 {
30259 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
30260 card->index);
30261- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30262+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30263 dev_kfree_skb_any(sb);
30264 break;
30265 }
30266@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30267 ATM_SKB(sb)->vcc = vcc;
30268 __net_timestamp(sb);
30269 vcc->push(vcc, sb);
30270- atomic_inc(&vcc->stats->rx);
30271+ atomic_inc_unchecked(&vcc->stats->rx);
30272 cell += ATM_CELL_PAYLOAD;
30273 }
30274
30275@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30276 if (iovb == NULL)
30277 {
30278 printk("nicstar%d: Out of iovec buffers.\n", card->index);
30279- atomic_inc(&vcc->stats->rx_drop);
30280+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30281 recycle_rx_buf(card, skb);
30282 return;
30283 }
30284@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30285 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
30286 {
30287 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30288- atomic_inc(&vcc->stats->rx_err);
30289+ atomic_inc_unchecked(&vcc->stats->rx_err);
30290 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
30291 NS_SKB(iovb)->iovcnt = 0;
30292 iovb->len = 0;
30293@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30294 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
30295 card->index);
30296 which_list(card, skb);
30297- atomic_inc(&vcc->stats->rx_err);
30298+ atomic_inc_unchecked(&vcc->stats->rx_err);
30299 recycle_rx_buf(card, skb);
30300 vc->rx_iov = NULL;
30301 recycle_iov_buf(card, iovb);
30302@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30303 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
30304 card->index);
30305 which_list(card, skb);
30306- atomic_inc(&vcc->stats->rx_err);
30307+ atomic_inc_unchecked(&vcc->stats->rx_err);
30308 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30309 NS_SKB(iovb)->iovcnt);
30310 vc->rx_iov = NULL;
30311@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30312 printk(" - PDU size mismatch.\n");
30313 else
30314 printk(".\n");
30315- atomic_inc(&vcc->stats->rx_err);
30316+ atomic_inc_unchecked(&vcc->stats->rx_err);
30317 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30318 NS_SKB(iovb)->iovcnt);
30319 vc->rx_iov = NULL;
30320@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30321 if (!atm_charge(vcc, skb->truesize))
30322 {
30323 push_rxbufs(card, skb);
30324- atomic_inc(&vcc->stats->rx_drop);
30325+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30326 }
30327 else
30328 {
30329@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30330 ATM_SKB(skb)->vcc = vcc;
30331 __net_timestamp(skb);
30332 vcc->push(vcc, skb);
30333- atomic_inc(&vcc->stats->rx);
30334+ atomic_inc_unchecked(&vcc->stats->rx);
30335 }
30336 }
30337 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30338@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30339 if (!atm_charge(vcc, sb->truesize))
30340 {
30341 push_rxbufs(card, sb);
30342- atomic_inc(&vcc->stats->rx_drop);
30343+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30344 }
30345 else
30346 {
30347@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30348 ATM_SKB(sb)->vcc = vcc;
30349 __net_timestamp(sb);
30350 vcc->push(vcc, sb);
30351- atomic_inc(&vcc->stats->rx);
30352+ atomic_inc_unchecked(&vcc->stats->rx);
30353 }
30354
30355 push_rxbufs(card, skb);
30356@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30357 if (!atm_charge(vcc, skb->truesize))
30358 {
30359 push_rxbufs(card, skb);
30360- atomic_inc(&vcc->stats->rx_drop);
30361+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30362 }
30363 else
30364 {
30365@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30366 ATM_SKB(skb)->vcc = vcc;
30367 __net_timestamp(skb);
30368 vcc->push(vcc, skb);
30369- atomic_inc(&vcc->stats->rx);
30370+ atomic_inc_unchecked(&vcc->stats->rx);
30371 }
30372
30373 push_rxbufs(card, sb);
30374@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30375 if (hb == NULL)
30376 {
30377 printk("nicstar%d: Out of huge buffers.\n", card->index);
30378- atomic_inc(&vcc->stats->rx_drop);
30379+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30380 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30381 NS_SKB(iovb)->iovcnt);
30382 vc->rx_iov = NULL;
30383@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30384 }
30385 else
30386 dev_kfree_skb_any(hb);
30387- atomic_inc(&vcc->stats->rx_drop);
30388+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30389 }
30390 else
30391 {
30392@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30393 #endif /* NS_USE_DESTRUCTORS */
30394 __net_timestamp(hb);
30395 vcc->push(vcc, hb);
30396- atomic_inc(&vcc->stats->rx);
30397+ atomic_inc_unchecked(&vcc->stats->rx);
30398 }
30399 }
30400
30401diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30402index 84c93ff..e6ed269 100644
30403--- a/drivers/atm/solos-pci.c
30404+++ b/drivers/atm/solos-pci.c
30405@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30406 }
30407 atm_charge(vcc, skb->truesize);
30408 vcc->push(vcc, skb);
30409- atomic_inc(&vcc->stats->rx);
30410+ atomic_inc_unchecked(&vcc->stats->rx);
30411 break;
30412
30413 case PKT_STATUS:
30414@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30415 char msg[500];
30416 char item[10];
30417
30418+ pax_track_stack();
30419+
30420 len = buf->len;
30421 for (i = 0; i < len; i++){
30422 if(i % 8 == 0)
30423@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30424 vcc = SKB_CB(oldskb)->vcc;
30425
30426 if (vcc) {
30427- atomic_inc(&vcc->stats->tx);
30428+ atomic_inc_unchecked(&vcc->stats->tx);
30429 solos_pop(vcc, oldskb);
30430 } else
30431 dev_kfree_skb_irq(oldskb);
30432diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30433index 6dd3f59..ee377f3 100644
30434--- a/drivers/atm/suni.c
30435+++ b/drivers/atm/suni.c
30436@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30437
30438
30439 #define ADD_LIMITED(s,v) \
30440- atomic_add((v),&stats->s); \
30441- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30442+ atomic_add_unchecked((v),&stats->s); \
30443+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30444
30445
30446 static void suni_hz(unsigned long from_timer)
30447diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30448index fc8cb07..4a80e53 100644
30449--- a/drivers/atm/uPD98402.c
30450+++ b/drivers/atm/uPD98402.c
30451@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30452 struct sonet_stats tmp;
30453 int error = 0;
30454
30455- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30456+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30457 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30458 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30459 if (zero && !error) {
30460@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30461
30462
30463 #define ADD_LIMITED(s,v) \
30464- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30465- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30466- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30467+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30468+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30469+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30470
30471
30472 static void stat_event(struct atm_dev *dev)
30473@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30474 if (reason & uPD98402_INT_PFM) stat_event(dev);
30475 if (reason & uPD98402_INT_PCO) {
30476 (void) GET(PCOCR); /* clear interrupt cause */
30477- atomic_add(GET(HECCT),
30478+ atomic_add_unchecked(GET(HECCT),
30479 &PRIV(dev)->sonet_stats.uncorr_hcs);
30480 }
30481 if ((reason & uPD98402_INT_RFO) &&
30482@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30483 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30484 uPD98402_INT_LOS),PIMR); /* enable them */
30485 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30486- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30487- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30488- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30489+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30490+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30491+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30492 return 0;
30493 }
30494
30495diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30496index 2e9635b..32927b4 100644
30497--- a/drivers/atm/zatm.c
30498+++ b/drivers/atm/zatm.c
30499@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30500 }
30501 if (!size) {
30502 dev_kfree_skb_irq(skb);
30503- if (vcc) atomic_inc(&vcc->stats->rx_err);
30504+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30505 continue;
30506 }
30507 if (!atm_charge(vcc,skb->truesize)) {
30508@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30509 skb->len = size;
30510 ATM_SKB(skb)->vcc = vcc;
30511 vcc->push(vcc,skb);
30512- atomic_inc(&vcc->stats->rx);
30513+ atomic_inc_unchecked(&vcc->stats->rx);
30514 }
30515 zout(pos & 0xffff,MTA(mbx));
30516 #if 0 /* probably a stupid idea */
30517@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30518 skb_queue_head(&zatm_vcc->backlog,skb);
30519 break;
30520 }
30521- atomic_inc(&vcc->stats->tx);
30522+ atomic_inc_unchecked(&vcc->stats->tx);
30523 wake_up(&zatm_vcc->tx_wait);
30524 }
30525
30526diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30527index 63c143e..fece183 100644
30528--- a/drivers/base/bus.c
30529+++ b/drivers/base/bus.c
30530@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30531 return ret;
30532 }
30533
30534-static struct sysfs_ops driver_sysfs_ops = {
30535+static const struct sysfs_ops driver_sysfs_ops = {
30536 .show = drv_attr_show,
30537 .store = drv_attr_store,
30538 };
30539@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30540 return ret;
30541 }
30542
30543-static struct sysfs_ops bus_sysfs_ops = {
30544+static const struct sysfs_ops bus_sysfs_ops = {
30545 .show = bus_attr_show,
30546 .store = bus_attr_store,
30547 };
30548@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30549 return 0;
30550 }
30551
30552-static struct kset_uevent_ops bus_uevent_ops = {
30553+static const struct kset_uevent_ops bus_uevent_ops = {
30554 .filter = bus_uevent_filter,
30555 };
30556
30557diff --git a/drivers/base/class.c b/drivers/base/class.c
30558index 6e2c3b0..cb61871 100644
30559--- a/drivers/base/class.c
30560+++ b/drivers/base/class.c
30561@@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30562 kfree(cp);
30563 }
30564
30565-static struct sysfs_ops class_sysfs_ops = {
30566+static const struct sysfs_ops class_sysfs_ops = {
30567 .show = class_attr_show,
30568 .store = class_attr_store,
30569 };
30570diff --git a/drivers/base/core.c b/drivers/base/core.c
30571index f33d768..a9358d0 100644
30572--- a/drivers/base/core.c
30573+++ b/drivers/base/core.c
30574@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30575 return ret;
30576 }
30577
30578-static struct sysfs_ops dev_sysfs_ops = {
30579+static const struct sysfs_ops dev_sysfs_ops = {
30580 .show = dev_attr_show,
30581 .store = dev_attr_store,
30582 };
30583@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30584 return retval;
30585 }
30586
30587-static struct kset_uevent_ops device_uevent_ops = {
30588+static const struct kset_uevent_ops device_uevent_ops = {
30589 .filter = dev_uevent_filter,
30590 .name = dev_uevent_name,
30591 .uevent = dev_uevent,
30592diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30593index 989429c..2272b00 100644
30594--- a/drivers/base/memory.c
30595+++ b/drivers/base/memory.c
30596@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30597 return retval;
30598 }
30599
30600-static struct kset_uevent_ops memory_uevent_ops = {
30601+static const struct kset_uevent_ops memory_uevent_ops = {
30602 .name = memory_uevent_name,
30603 .uevent = memory_uevent,
30604 };
30605diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30606index 3f202f7..61c4a6f 100644
30607--- a/drivers/base/sys.c
30608+++ b/drivers/base/sys.c
30609@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30610 return -EIO;
30611 }
30612
30613-static struct sysfs_ops sysfs_ops = {
30614+static const struct sysfs_ops sysfs_ops = {
30615 .show = sysdev_show,
30616 .store = sysdev_store,
30617 };
30618@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30619 return -EIO;
30620 }
30621
30622-static struct sysfs_ops sysfs_class_ops = {
30623+static const struct sysfs_ops sysfs_class_ops = {
30624 .show = sysdev_class_show,
30625 .store = sysdev_class_store,
30626 };
30627diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30628index eb4fa19..1954777 100644
30629--- a/drivers/block/DAC960.c
30630+++ b/drivers/block/DAC960.c
30631@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30632 unsigned long flags;
30633 int Channel, TargetID;
30634
30635+ pax_track_stack();
30636+
30637 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30638 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30639 sizeof(DAC960_SCSI_Inquiry_T) +
30640diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30641index 68b90d9..7e2e3f3 100644
30642--- a/drivers/block/cciss.c
30643+++ b/drivers/block/cciss.c
30644@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30645 int err;
30646 u32 cp;
30647
30648+ memset(&arg64, 0, sizeof(arg64));
30649+
30650 err = 0;
30651 err |=
30652 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30653@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30654 /* Wait (up to 20 seconds) for a command to complete */
30655
30656 for (i = 20 * HZ; i > 0; i--) {
30657- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30658+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30659 if (done == FIFO_EMPTY)
30660 schedule_timeout_uninterruptible(1);
30661 else
30662@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30663 resend_cmd1:
30664
30665 /* Disable interrupt on the board. */
30666- h->access.set_intr_mask(h, CCISS_INTR_OFF);
30667+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
30668
30669 /* Make sure there is room in the command FIFO */
30670 /* Actually it should be completely empty at this time */
30671@@ -2884,13 +2886,13 @@ resend_cmd1:
30672 /* tape side of the driver. */
30673 for (i = 200000; i > 0; i--) {
30674 /* if fifo isn't full go */
30675- if (!(h->access.fifo_full(h)))
30676+ if (!(h->access->fifo_full(h)))
30677 break;
30678 udelay(10);
30679 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30680 " waiting!\n", h->ctlr);
30681 }
30682- h->access.submit_command(h, c); /* Send the cmd */
30683+ h->access->submit_command(h, c); /* Send the cmd */
30684 do {
30685 complete = pollcomplete(h->ctlr);
30686
30687@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30688 while (!hlist_empty(&h->reqQ)) {
30689 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30690 /* can't do anything if fifo is full */
30691- if ((h->access.fifo_full(h))) {
30692+ if ((h->access->fifo_full(h))) {
30693 printk(KERN_WARNING "cciss: fifo full\n");
30694 break;
30695 }
30696@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30697 h->Qdepth--;
30698
30699 /* Tell the controller execute command */
30700- h->access.submit_command(h, c);
30701+ h->access->submit_command(h, c);
30702
30703 /* Put job onto the completed Q */
30704 addQ(&h->cmpQ, c);
30705@@ -3393,17 +3395,17 @@ startio:
30706
30707 static inline unsigned long get_next_completion(ctlr_info_t *h)
30708 {
30709- return h->access.command_completed(h);
30710+ return h->access->command_completed(h);
30711 }
30712
30713 static inline int interrupt_pending(ctlr_info_t *h)
30714 {
30715- return h->access.intr_pending(h);
30716+ return h->access->intr_pending(h);
30717 }
30718
30719 static inline long interrupt_not_for_us(ctlr_info_t *h)
30720 {
30721- return (((h->access.intr_pending(h) == 0) ||
30722+ return (((h->access->intr_pending(h) == 0) ||
30723 (h->interrupts_enabled == 0)));
30724 }
30725
30726@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30727 */
30728 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30729 c->product_name = products[prod_index].product_name;
30730- c->access = *(products[prod_index].access);
30731+ c->access = products[prod_index].access;
30732 c->nr_cmds = c->max_commands - 4;
30733 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30734 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30735@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30736 }
30737
30738 /* make sure the board interrupts are off */
30739- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30740+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30741 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30742 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30743 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30744@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30745 cciss_scsi_setup(i);
30746
30747 /* Turn the interrupts on so we can service requests */
30748- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30749+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30750
30751 /* Get the firmware version */
30752 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30753diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30754index 04d6bf8..36e712d 100644
30755--- a/drivers/block/cciss.h
30756+++ b/drivers/block/cciss.h
30757@@ -90,7 +90,7 @@ struct ctlr_info
30758 // information about each logical volume
30759 drive_info_struct *drv[CISS_MAX_LUN];
30760
30761- struct access_method access;
30762+ struct access_method *access;
30763
30764 /* queue and queue Info */
30765 struct hlist_head reqQ;
30766diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30767index 6422651..bb1bdef 100644
30768--- a/drivers/block/cpqarray.c
30769+++ b/drivers/block/cpqarray.c
30770@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30771 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30772 goto Enomem4;
30773 }
30774- hba[i]->access.set_intr_mask(hba[i], 0);
30775+ hba[i]->access->set_intr_mask(hba[i], 0);
30776 if (request_irq(hba[i]->intr, do_ida_intr,
30777 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30778 {
30779@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30780 add_timer(&hba[i]->timer);
30781
30782 /* Enable IRQ now that spinlock and rate limit timer are set up */
30783- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30784+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30785
30786 for(j=0; j<NWD; j++) {
30787 struct gendisk *disk = ida_gendisk[i][j];
30788@@ -695,7 +695,7 @@ DBGINFO(
30789 for(i=0; i<NR_PRODUCTS; i++) {
30790 if (board_id == products[i].board_id) {
30791 c->product_name = products[i].product_name;
30792- c->access = *(products[i].access);
30793+ c->access = products[i].access;
30794 break;
30795 }
30796 }
30797@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30798 hba[ctlr]->intr = intr;
30799 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30800 hba[ctlr]->product_name = products[j].product_name;
30801- hba[ctlr]->access = *(products[j].access);
30802+ hba[ctlr]->access = products[j].access;
30803 hba[ctlr]->ctlr = ctlr;
30804 hba[ctlr]->board_id = board_id;
30805 hba[ctlr]->pci_dev = NULL; /* not PCI */
30806@@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30807 struct scatterlist tmp_sg[SG_MAX];
30808 int i, dir, seg;
30809
30810+ pax_track_stack();
30811+
30812 if (blk_queue_plugged(q))
30813 goto startio;
30814
30815@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30816
30817 while((c = h->reqQ) != NULL) {
30818 /* Can't do anything if we're busy */
30819- if (h->access.fifo_full(h) == 0)
30820+ if (h->access->fifo_full(h) == 0)
30821 return;
30822
30823 /* Get the first entry from the request Q */
30824@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30825 h->Qdepth--;
30826
30827 /* Tell the controller to do our bidding */
30828- h->access.submit_command(h, c);
30829+ h->access->submit_command(h, c);
30830
30831 /* Get onto the completion Q */
30832 addQ(&h->cmpQ, c);
30833@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30834 unsigned long flags;
30835 __u32 a,a1;
30836
30837- istat = h->access.intr_pending(h);
30838+ istat = h->access->intr_pending(h);
30839 /* Is this interrupt for us? */
30840 if (istat == 0)
30841 return IRQ_NONE;
30842@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30843 */
30844 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30845 if (istat & FIFO_NOT_EMPTY) {
30846- while((a = h->access.command_completed(h))) {
30847+ while((a = h->access->command_completed(h))) {
30848 a1 = a; a &= ~3;
30849 if ((c = h->cmpQ) == NULL)
30850 {
30851@@ -1434,11 +1436,11 @@ static int sendcmd(
30852 /*
30853 * Disable interrupt
30854 */
30855- info_p->access.set_intr_mask(info_p, 0);
30856+ info_p->access->set_intr_mask(info_p, 0);
30857 /* Make sure there is room in the command FIFO */
30858 /* Actually it should be completely empty at this time. */
30859 for (i = 200000; i > 0; i--) {
30860- temp = info_p->access.fifo_full(info_p);
30861+ temp = info_p->access->fifo_full(info_p);
30862 if (temp != 0) {
30863 break;
30864 }
30865@@ -1451,7 +1453,7 @@ DBG(
30866 /*
30867 * Send the cmd
30868 */
30869- info_p->access.submit_command(info_p, c);
30870+ info_p->access->submit_command(info_p, c);
30871 complete = pollcomplete(ctlr);
30872
30873 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30874@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30875 * we check the new geometry. Then turn interrupts back on when
30876 * we're done.
30877 */
30878- host->access.set_intr_mask(host, 0);
30879+ host->access->set_intr_mask(host, 0);
30880 getgeometry(ctlr);
30881- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30882+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30883
30884 for(i=0; i<NWD; i++) {
30885 struct gendisk *disk = ida_gendisk[ctlr][i];
30886@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30887 /* Wait (up to 2 seconds) for a command to complete */
30888
30889 for (i = 200000; i > 0; i--) {
30890- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30891+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30892 if (done == 0) {
30893 udelay(10); /* a short fixed delay */
30894 } else
30895diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30896index be73e9d..7fbf140 100644
30897--- a/drivers/block/cpqarray.h
30898+++ b/drivers/block/cpqarray.h
30899@@ -99,7 +99,7 @@ struct ctlr_info {
30900 drv_info_t drv[NWD];
30901 struct proc_dir_entry *proc;
30902
30903- struct access_method access;
30904+ struct access_method *access;
30905
30906 cmdlist_t *reqQ;
30907 cmdlist_t *cmpQ;
30908diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30909index 8ec2d70..2804b30 100644
30910--- a/drivers/block/loop.c
30911+++ b/drivers/block/loop.c
30912@@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30913 mm_segment_t old_fs = get_fs();
30914
30915 set_fs(get_ds());
30916- bw = file->f_op->write(file, buf, len, &pos);
30917+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30918 set_fs(old_fs);
30919 if (likely(bw == len))
30920 return 0;
30921diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30922index 26ada47..083c480 100644
30923--- a/drivers/block/nbd.c
30924+++ b/drivers/block/nbd.c
30925@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30926 struct kvec iov;
30927 sigset_t blocked, oldset;
30928
30929+ pax_track_stack();
30930+
30931 if (unlikely(!sock)) {
30932 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30933 lo->disk->disk_name, (send ? "send" : "recv"));
30934@@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30935 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30936 unsigned int cmd, unsigned long arg)
30937 {
30938+ pax_track_stack();
30939+
30940 switch (cmd) {
30941 case NBD_DISCONNECT: {
30942 struct request sreq;
30943diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30944index a5d585d..d087be3 100644
30945--- a/drivers/block/pktcdvd.c
30946+++ b/drivers/block/pktcdvd.c
30947@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30948 return len;
30949 }
30950
30951-static struct sysfs_ops kobj_pkt_ops = {
30952+static const struct sysfs_ops kobj_pkt_ops = {
30953 .show = kobj_pkt_show,
30954 .store = kobj_pkt_store
30955 };
30956diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
30957index 59cccc9..a4592ec 100644
30958--- a/drivers/cdrom/cdrom.c
30959+++ b/drivers/cdrom/cdrom.c
30960@@ -2057,11 +2057,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
30961 if (!nr)
30962 return -ENOMEM;
30963
30964- if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) {
30965- ret = -EFAULT;
30966- goto out;
30967- }
30968-
30969 cgc.data_direction = CGC_DATA_READ;
30970 while (nframes > 0) {
30971 if (nr > nframes)
30972@@ -2070,7 +2065,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
30973 ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
30974 if (ret)
30975 break;
30976- if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
30977+ if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
30978 ret = -EFAULT;
30979 break;
30980 }
30981@@ -2078,7 +2073,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
30982 nframes -= nr;
30983 lba += nr;
30984 }
30985-out:
30986 kfree(cgc.buffer);
30987 return ret;
30988 }
30989diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30990index 6aad99e..89cd142 100644
30991--- a/drivers/char/Kconfig
30992+++ b/drivers/char/Kconfig
30993@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30994
30995 config DEVKMEM
30996 bool "/dev/kmem virtual device support"
30997- default y
30998+ default n
30999+ depends on !GRKERNSEC_KMEM
31000 help
31001 Say Y here if you want to support the /dev/kmem device. The
31002 /dev/kmem device is rarely used, but can be used for certain
31003@@ -1114,6 +1115,7 @@ config DEVPORT
31004 bool
31005 depends on !M68K
31006 depends on ISA || PCI
31007+ depends on !GRKERNSEC_KMEM
31008 default y
31009
31010 source "drivers/s390/char/Kconfig"
31011diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
31012index a96f319..a778a5b 100644
31013--- a/drivers/char/agp/frontend.c
31014+++ b/drivers/char/agp/frontend.c
31015@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
31016 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
31017 return -EFAULT;
31018
31019- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
31020+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
31021 return -EFAULT;
31022
31023 client = agp_find_client_by_pid(reserve.pid);
31024diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
31025index d8cff90..9628e70 100644
31026--- a/drivers/char/briq_panel.c
31027+++ b/drivers/char/briq_panel.c
31028@@ -10,6 +10,7 @@
31029 #include <linux/types.h>
31030 #include <linux/errno.h>
31031 #include <linux/tty.h>
31032+#include <linux/mutex.h>
31033 #include <linux/timer.h>
31034 #include <linux/kernel.h>
31035 #include <linux/wait.h>
31036@@ -36,6 +37,7 @@ static int vfd_is_open;
31037 static unsigned char vfd[40];
31038 static int vfd_cursor;
31039 static unsigned char ledpb, led;
31040+static DEFINE_MUTEX(vfd_mutex);
31041
31042 static void update_vfd(void)
31043 {
31044@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31045 if (!vfd_is_open)
31046 return -EBUSY;
31047
31048+ mutex_lock(&vfd_mutex);
31049 for (;;) {
31050 char c;
31051 if (!indx)
31052 break;
31053- if (get_user(c, buf))
31054+ if (get_user(c, buf)) {
31055+ mutex_unlock(&vfd_mutex);
31056 return -EFAULT;
31057+ }
31058 if (esc) {
31059 set_led(c);
31060 esc = 0;
31061@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31062 buf++;
31063 }
31064 update_vfd();
31065+ mutex_unlock(&vfd_mutex);
31066
31067 return len;
31068 }
31069diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
31070index 31e7c91..161afc0 100644
31071--- a/drivers/char/genrtc.c
31072+++ b/drivers/char/genrtc.c
31073@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
31074 switch (cmd) {
31075
31076 case RTC_PLL_GET:
31077+ memset(&pll, 0, sizeof(pll));
31078 if (get_rtc_pll(&pll))
31079 return -EINVAL;
31080 else
31081diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
31082index 006466d..a2bb21c 100644
31083--- a/drivers/char/hpet.c
31084+++ b/drivers/char/hpet.c
31085@@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
31086 return 0;
31087 }
31088
31089-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
31090+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
31091
31092 static int
31093 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
31094@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
31095 }
31096
31097 static int
31098-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31099+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
31100 {
31101 struct hpet_timer __iomem *timer;
31102 struct hpet __iomem *hpet;
31103@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
31104 {
31105 struct hpet_info info;
31106
31107+ memset(&info, 0, sizeof(info));
31108+
31109 if (devp->hd_ireqfreq)
31110 info.hi_ireqfreq =
31111 hpet_time_div(hpetp, devp->hd_ireqfreq);
31112- else
31113- info.hi_ireqfreq = 0;
31114 info.hi_flags =
31115 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
31116 info.hi_hpet = hpetp->hp_which;
31117diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
31118index 0afc8b8..6913fc3 100644
31119--- a/drivers/char/hvc_beat.c
31120+++ b/drivers/char/hvc_beat.c
31121@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
31122 return cnt;
31123 }
31124
31125-static struct hv_ops hvc_beat_get_put_ops = {
31126+static const struct hv_ops hvc_beat_get_put_ops = {
31127 .get_chars = hvc_beat_get_chars,
31128 .put_chars = hvc_beat_put_chars,
31129 };
31130diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
31131index 98097f2..407dddc 100644
31132--- a/drivers/char/hvc_console.c
31133+++ b/drivers/char/hvc_console.c
31134@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
31135 * console interfaces but can still be used as a tty device. This has to be
31136 * static because kmalloc will not work during early console init.
31137 */
31138-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31139+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
31140 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
31141 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
31142
31143@@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
31144 * vty adapters do NOT get an hvc_instantiate() callback since they
31145 * appear after early console init.
31146 */
31147-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
31148+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
31149 {
31150 struct hvc_struct *hp;
31151
31152@@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
31153 };
31154
31155 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
31156- struct hv_ops *ops, int outbuf_size)
31157+ const struct hv_ops *ops, int outbuf_size)
31158 {
31159 struct hvc_struct *hp;
31160 int i;
31161diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
31162index 10950ca..ed176c3 100644
31163--- a/drivers/char/hvc_console.h
31164+++ b/drivers/char/hvc_console.h
31165@@ -55,7 +55,7 @@ struct hvc_struct {
31166 int outbuf_size;
31167 int n_outbuf;
31168 uint32_t vtermno;
31169- struct hv_ops *ops;
31170+ const struct hv_ops *ops;
31171 int irq_requested;
31172 int data;
31173 struct winsize ws;
31174@@ -76,11 +76,11 @@ struct hv_ops {
31175 };
31176
31177 /* Register a vterm and a slot index for use as a console (console_init) */
31178-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
31179+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
31180
31181 /* register a vterm for hvc tty operation (module_init or hotplug add) */
31182 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
31183- struct hv_ops *ops, int outbuf_size);
31184+ const struct hv_ops *ops, int outbuf_size);
31185 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
31186 extern int hvc_remove(struct hvc_struct *hp);
31187
31188diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
31189index 936d05b..fd02426 100644
31190--- a/drivers/char/hvc_iseries.c
31191+++ b/drivers/char/hvc_iseries.c
31192@@ -197,7 +197,7 @@ done:
31193 return sent;
31194 }
31195
31196-static struct hv_ops hvc_get_put_ops = {
31197+static const struct hv_ops hvc_get_put_ops = {
31198 .get_chars = get_chars,
31199 .put_chars = put_chars,
31200 .notifier_add = notifier_add_irq,
31201diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
31202index b0e168f..69cda2a 100644
31203--- a/drivers/char/hvc_iucv.c
31204+++ b/drivers/char/hvc_iucv.c
31205@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
31206
31207
31208 /* HVC operations */
31209-static struct hv_ops hvc_iucv_ops = {
31210+static const struct hv_ops hvc_iucv_ops = {
31211 .get_chars = hvc_iucv_get_chars,
31212 .put_chars = hvc_iucv_put_chars,
31213 .notifier_add = hvc_iucv_notifier_add,
31214diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
31215index 88590d0..61c4a61 100644
31216--- a/drivers/char/hvc_rtas.c
31217+++ b/drivers/char/hvc_rtas.c
31218@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
31219 return i;
31220 }
31221
31222-static struct hv_ops hvc_rtas_get_put_ops = {
31223+static const struct hv_ops hvc_rtas_get_put_ops = {
31224 .get_chars = hvc_rtas_read_console,
31225 .put_chars = hvc_rtas_write_console,
31226 };
31227diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
31228index bd63ba8..b0957e6 100644
31229--- a/drivers/char/hvc_udbg.c
31230+++ b/drivers/char/hvc_udbg.c
31231@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
31232 return i;
31233 }
31234
31235-static struct hv_ops hvc_udbg_ops = {
31236+static const struct hv_ops hvc_udbg_ops = {
31237 .get_chars = hvc_udbg_get,
31238 .put_chars = hvc_udbg_put,
31239 };
31240diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
31241index 10be343..27370e9 100644
31242--- a/drivers/char/hvc_vio.c
31243+++ b/drivers/char/hvc_vio.c
31244@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
31245 return got;
31246 }
31247
31248-static struct hv_ops hvc_get_put_ops = {
31249+static const struct hv_ops hvc_get_put_ops = {
31250 .get_chars = filtered_get_chars,
31251 .put_chars = hvc_put_chars,
31252 .notifier_add = notifier_add_irq,
31253diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
31254index a6ee32b..94f8c26 100644
31255--- a/drivers/char/hvc_xen.c
31256+++ b/drivers/char/hvc_xen.c
31257@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
31258 return recv;
31259 }
31260
31261-static struct hv_ops hvc_ops = {
31262+static const struct hv_ops hvc_ops = {
31263 .get_chars = read_console,
31264 .put_chars = write_console,
31265 .notifier_add = notifier_add_irq,
31266diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
31267index 266b858..f3ee0bb 100644
31268--- a/drivers/char/hvcs.c
31269+++ b/drivers/char/hvcs.c
31270@@ -82,6 +82,7 @@
31271 #include <asm/hvcserver.h>
31272 #include <asm/uaccess.h>
31273 #include <asm/vio.h>
31274+#include <asm/local.h>
31275
31276 /*
31277 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
31278@@ -269,7 +270,7 @@ struct hvcs_struct {
31279 unsigned int index;
31280
31281 struct tty_struct *tty;
31282- int open_count;
31283+ local_t open_count;
31284
31285 /*
31286 * Used to tell the driver kernel_thread what operations need to take
31287@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
31288
31289 spin_lock_irqsave(&hvcsd->lock, flags);
31290
31291- if (hvcsd->open_count > 0) {
31292+ if (local_read(&hvcsd->open_count) > 0) {
31293 spin_unlock_irqrestore(&hvcsd->lock, flags);
31294 printk(KERN_INFO "HVCS: vterm state unchanged. "
31295 "The hvcs device node is still in use.\n");
31296@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
31297 if ((retval = hvcs_partner_connect(hvcsd)))
31298 goto error_release;
31299
31300- hvcsd->open_count = 1;
31301+ local_set(&hvcsd->open_count, 1);
31302 hvcsd->tty = tty;
31303 tty->driver_data = hvcsd;
31304
31305@@ -1169,7 +1170,7 @@ fast_open:
31306
31307 spin_lock_irqsave(&hvcsd->lock, flags);
31308 kref_get(&hvcsd->kref);
31309- hvcsd->open_count++;
31310+ local_inc(&hvcsd->open_count);
31311 hvcsd->todo_mask |= HVCS_SCHED_READ;
31312 spin_unlock_irqrestore(&hvcsd->lock, flags);
31313
31314@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31315 hvcsd = tty->driver_data;
31316
31317 spin_lock_irqsave(&hvcsd->lock, flags);
31318- if (--hvcsd->open_count == 0) {
31319+ if (local_dec_and_test(&hvcsd->open_count)) {
31320
31321 vio_disable_interrupts(hvcsd->vdev);
31322
31323@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31324 free_irq(irq, hvcsd);
31325 kref_put(&hvcsd->kref, destroy_hvcs_struct);
31326 return;
31327- } else if (hvcsd->open_count < 0) {
31328+ } else if (local_read(&hvcsd->open_count) < 0) {
31329 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
31330 " is missmanaged.\n",
31331- hvcsd->vdev->unit_address, hvcsd->open_count);
31332+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
31333 }
31334
31335 spin_unlock_irqrestore(&hvcsd->lock, flags);
31336@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31337
31338 spin_lock_irqsave(&hvcsd->lock, flags);
31339 /* Preserve this so that we know how many kref refs to put */
31340- temp_open_count = hvcsd->open_count;
31341+ temp_open_count = local_read(&hvcsd->open_count);
31342
31343 /*
31344 * Don't kref put inside the spinlock because the destruction
31345@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31346 hvcsd->tty->driver_data = NULL;
31347 hvcsd->tty = NULL;
31348
31349- hvcsd->open_count = 0;
31350+ local_set(&hvcsd->open_count, 0);
31351
31352 /* This will drop any buffered data on the floor which is OK in a hangup
31353 * scenario. */
31354@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
31355 * the middle of a write operation? This is a crummy place to do this
31356 * but we want to keep it all in the spinlock.
31357 */
31358- if (hvcsd->open_count <= 0) {
31359+ if (local_read(&hvcsd->open_count) <= 0) {
31360 spin_unlock_irqrestore(&hvcsd->lock, flags);
31361 return -ENODEV;
31362 }
31363@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31364 {
31365 struct hvcs_struct *hvcsd = tty->driver_data;
31366
31367- if (!hvcsd || hvcsd->open_count <= 0)
31368+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31369 return 0;
31370
31371 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31372diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31373index ec5e3f8..02455ba 100644
31374--- a/drivers/char/ipmi/ipmi_msghandler.c
31375+++ b/drivers/char/ipmi/ipmi_msghandler.c
31376@@ -414,7 +414,7 @@ struct ipmi_smi {
31377 struct proc_dir_entry *proc_dir;
31378 char proc_dir_name[10];
31379
31380- atomic_t stats[IPMI_NUM_STATS];
31381+ atomic_unchecked_t stats[IPMI_NUM_STATS];
31382
31383 /*
31384 * run_to_completion duplicate of smb_info, smi_info
31385@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31386
31387
31388 #define ipmi_inc_stat(intf, stat) \
31389- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31390+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31391 #define ipmi_get_stat(intf, stat) \
31392- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31393+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31394
31395 static int is_lan_addr(struct ipmi_addr *addr)
31396 {
31397@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31398 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31399 init_waitqueue_head(&intf->waitq);
31400 for (i = 0; i < IPMI_NUM_STATS; i++)
31401- atomic_set(&intf->stats[i], 0);
31402+ atomic_set_unchecked(&intf->stats[i], 0);
31403
31404 intf->proc_dir = NULL;
31405
31406@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31407 struct ipmi_smi_msg smi_msg;
31408 struct ipmi_recv_msg recv_msg;
31409
31410+ pax_track_stack();
31411+
31412 si = (struct ipmi_system_interface_addr *) &addr;
31413 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31414 si->channel = IPMI_BMC_CHANNEL;
31415diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31416index abae8c9..8021979 100644
31417--- a/drivers/char/ipmi/ipmi_si_intf.c
31418+++ b/drivers/char/ipmi/ipmi_si_intf.c
31419@@ -277,7 +277,7 @@ struct smi_info {
31420 unsigned char slave_addr;
31421
31422 /* Counters and things for the proc filesystem. */
31423- atomic_t stats[SI_NUM_STATS];
31424+ atomic_unchecked_t stats[SI_NUM_STATS];
31425
31426 struct task_struct *thread;
31427
31428@@ -285,9 +285,9 @@ struct smi_info {
31429 };
31430
31431 #define smi_inc_stat(smi, stat) \
31432- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31433+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31434 #define smi_get_stat(smi, stat) \
31435- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31436+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31437
31438 #define SI_MAX_PARMS 4
31439
31440@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31441 atomic_set(&new_smi->req_events, 0);
31442 new_smi->run_to_completion = 0;
31443 for (i = 0; i < SI_NUM_STATS; i++)
31444- atomic_set(&new_smi->stats[i], 0);
31445+ atomic_set_unchecked(&new_smi->stats[i], 0);
31446
31447 new_smi->interrupt_disabled = 0;
31448 atomic_set(&new_smi->stop_operation, 0);
31449diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31450index 402838f..55e2200 100644
31451--- a/drivers/char/istallion.c
31452+++ b/drivers/char/istallion.c
31453@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31454 * re-used for each stats call.
31455 */
31456 static comstats_t stli_comstats;
31457-static combrd_t stli_brdstats;
31458 static struct asystats stli_cdkstats;
31459
31460 /*****************************************************************************/
31461@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31462 {
31463 struct stlibrd *brdp;
31464 unsigned int i;
31465+ combrd_t stli_brdstats;
31466
31467 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31468 return -EFAULT;
31469@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31470 struct stliport stli_dummyport;
31471 struct stliport *portp;
31472
31473+ pax_track_stack();
31474+
31475 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31476 return -EFAULT;
31477 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31478@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31479 struct stlibrd stli_dummybrd;
31480 struct stlibrd *brdp;
31481
31482+ pax_track_stack();
31483+
31484 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31485 return -EFAULT;
31486 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31487diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31488index 950837c..e55a288 100644
31489--- a/drivers/char/keyboard.c
31490+++ b/drivers/char/keyboard.c
31491@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31492 kbd->kbdmode == VC_MEDIUMRAW) &&
31493 value != KVAL(K_SAK))
31494 return; /* SAK is allowed even in raw mode */
31495+
31496+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31497+ {
31498+ void *func = fn_handler[value];
31499+ if (func == fn_show_state || func == fn_show_ptregs ||
31500+ func == fn_show_mem)
31501+ return;
31502+ }
31503+#endif
31504+
31505 fn_handler[value](vc);
31506 }
31507
31508@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31509 .evbit = { BIT_MASK(EV_SND) },
31510 },
31511
31512- { }, /* Terminating entry */
31513+ { 0 }, /* Terminating entry */
31514 };
31515
31516 MODULE_DEVICE_TABLE(input, kbd_ids);
31517diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31518index 87c67b4..230527a 100644
31519--- a/drivers/char/mbcs.c
31520+++ b/drivers/char/mbcs.c
31521@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31522 return 0;
31523 }
31524
31525-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31526+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31527 {
31528 .part_num = MBCS_PART_NUM,
31529 .mfg_num = MBCS_MFG_NUM,
31530diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31531index 1270f64..8495f49 100644
31532--- a/drivers/char/mem.c
31533+++ b/drivers/char/mem.c
31534@@ -18,6 +18,7 @@
31535 #include <linux/raw.h>
31536 #include <linux/tty.h>
31537 #include <linux/capability.h>
31538+#include <linux/security.h>
31539 #include <linux/ptrace.h>
31540 #include <linux/device.h>
31541 #include <linux/highmem.h>
31542@@ -35,6 +36,10 @@
31543 # include <linux/efi.h>
31544 #endif
31545
31546+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31547+extern struct file_operations grsec_fops;
31548+#endif
31549+
31550 static inline unsigned long size_inside_page(unsigned long start,
31551 unsigned long size)
31552 {
31553@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31554
31555 while (cursor < to) {
31556 if (!devmem_is_allowed(pfn)) {
31557+#ifdef CONFIG_GRKERNSEC_KMEM
31558+ gr_handle_mem_readwrite(from, to);
31559+#else
31560 printk(KERN_INFO
31561 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31562 current->comm, from, to);
31563+#endif
31564 return 0;
31565 }
31566 cursor += PAGE_SIZE;
31567@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31568 }
31569 return 1;
31570 }
31571+#elif defined(CONFIG_GRKERNSEC_KMEM)
31572+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31573+{
31574+ return 0;
31575+}
31576 #else
31577 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31578 {
31579@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31580 #endif
31581
31582 while (count > 0) {
31583+ char *temp;
31584+
31585 /*
31586 * Handle first page in case it's not aligned
31587 */
31588@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31589 if (!ptr)
31590 return -EFAULT;
31591
31592- if (copy_to_user(buf, ptr, sz)) {
31593+#ifdef CONFIG_PAX_USERCOPY
31594+ temp = kmalloc(sz, GFP_KERNEL);
31595+ if (!temp) {
31596+ unxlate_dev_mem_ptr(p, ptr);
31597+ return -ENOMEM;
31598+ }
31599+ memcpy(temp, ptr, sz);
31600+#else
31601+ temp = ptr;
31602+#endif
31603+
31604+ if (copy_to_user(buf, temp, sz)) {
31605+
31606+#ifdef CONFIG_PAX_USERCOPY
31607+ kfree(temp);
31608+#endif
31609+
31610 unxlate_dev_mem_ptr(p, ptr);
31611 return -EFAULT;
31612 }
31613
31614+#ifdef CONFIG_PAX_USERCOPY
31615+ kfree(temp);
31616+#endif
31617+
31618 unxlate_dev_mem_ptr(p, ptr);
31619
31620 buf += sz;
31621@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31622 size_t count, loff_t *ppos)
31623 {
31624 unsigned long p = *ppos;
31625- ssize_t low_count, read, sz;
31626+ ssize_t low_count, read, sz, err = 0;
31627 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31628- int err = 0;
31629
31630 read = 0;
31631 if (p < (unsigned long) high_memory) {
31632@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31633 }
31634 #endif
31635 while (low_count > 0) {
31636+ char *temp;
31637+
31638 sz = size_inside_page(p, low_count);
31639
31640 /*
31641@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31642 */
31643 kbuf = xlate_dev_kmem_ptr((char *)p);
31644
31645- if (copy_to_user(buf, kbuf, sz))
31646+#ifdef CONFIG_PAX_USERCOPY
31647+ temp = kmalloc(sz, GFP_KERNEL);
31648+ if (!temp)
31649+ return -ENOMEM;
31650+ memcpy(temp, kbuf, sz);
31651+#else
31652+ temp = kbuf;
31653+#endif
31654+
31655+ err = copy_to_user(buf, temp, sz);
31656+
31657+#ifdef CONFIG_PAX_USERCOPY
31658+ kfree(temp);
31659+#endif
31660+
31661+ if (err)
31662 return -EFAULT;
31663 buf += sz;
31664 p += sz;
31665@@ -889,6 +941,9 @@ static const struct memdev {
31666 #ifdef CONFIG_CRASH_DUMP
31667 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31668 #endif
31669+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31670+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31671+#endif
31672 };
31673
31674 static int memory_open(struct inode *inode, struct file *filp)
31675diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
31676index 918711a..4ffaf5e 100644
31677--- a/drivers/char/mmtimer.c
31678+++ b/drivers/char/mmtimer.c
31679@@ -756,7 +756,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
31680 return err;
31681 }
31682
31683-static struct k_clock sgi_clock = {
31684+static k_clock_no_const sgi_clock = {
31685 .res = 0,
31686 .clock_set = sgi_clock_set,
31687 .clock_get = sgi_clock_get,
31688diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31689index 674b3ab..a8d1970 100644
31690--- a/drivers/char/pcmcia/ipwireless/tty.c
31691+++ b/drivers/char/pcmcia/ipwireless/tty.c
31692@@ -29,6 +29,7 @@
31693 #include <linux/tty_driver.h>
31694 #include <linux/tty_flip.h>
31695 #include <linux/uaccess.h>
31696+#include <asm/local.h>
31697
31698 #include "tty.h"
31699 #include "network.h"
31700@@ -51,7 +52,7 @@ struct ipw_tty {
31701 int tty_type;
31702 struct ipw_network *network;
31703 struct tty_struct *linux_tty;
31704- int open_count;
31705+ local_t open_count;
31706 unsigned int control_lines;
31707 struct mutex ipw_tty_mutex;
31708 int tx_bytes_queued;
31709@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31710 mutex_unlock(&tty->ipw_tty_mutex);
31711 return -ENODEV;
31712 }
31713- if (tty->open_count == 0)
31714+ if (local_read(&tty->open_count) == 0)
31715 tty->tx_bytes_queued = 0;
31716
31717- tty->open_count++;
31718+ local_inc(&tty->open_count);
31719
31720 tty->linux_tty = linux_tty;
31721 linux_tty->driver_data = tty;
31722@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31723
31724 static void do_ipw_close(struct ipw_tty *tty)
31725 {
31726- tty->open_count--;
31727-
31728- if (tty->open_count == 0) {
31729+ if (local_dec_return(&tty->open_count) == 0) {
31730 struct tty_struct *linux_tty = tty->linux_tty;
31731
31732 if (linux_tty != NULL) {
31733@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31734 return;
31735
31736 mutex_lock(&tty->ipw_tty_mutex);
31737- if (tty->open_count == 0) {
31738+ if (local_read(&tty->open_count) == 0) {
31739 mutex_unlock(&tty->ipw_tty_mutex);
31740 return;
31741 }
31742@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31743 return;
31744 }
31745
31746- if (!tty->open_count) {
31747+ if (!local_read(&tty->open_count)) {
31748 mutex_unlock(&tty->ipw_tty_mutex);
31749 return;
31750 }
31751@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31752 return -ENODEV;
31753
31754 mutex_lock(&tty->ipw_tty_mutex);
31755- if (!tty->open_count) {
31756+ if (!local_read(&tty->open_count)) {
31757 mutex_unlock(&tty->ipw_tty_mutex);
31758 return -EINVAL;
31759 }
31760@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31761 if (!tty)
31762 return -ENODEV;
31763
31764- if (!tty->open_count)
31765+ if (!local_read(&tty->open_count))
31766 return -EINVAL;
31767
31768 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31769@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31770 if (!tty)
31771 return 0;
31772
31773- if (!tty->open_count)
31774+ if (!local_read(&tty->open_count))
31775 return 0;
31776
31777 return tty->tx_bytes_queued;
31778@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31779 if (!tty)
31780 return -ENODEV;
31781
31782- if (!tty->open_count)
31783+ if (!local_read(&tty->open_count))
31784 return -EINVAL;
31785
31786 return get_control_lines(tty);
31787@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31788 if (!tty)
31789 return -ENODEV;
31790
31791- if (!tty->open_count)
31792+ if (!local_read(&tty->open_count))
31793 return -EINVAL;
31794
31795 return set_control_lines(tty, set, clear);
31796@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31797 if (!tty)
31798 return -ENODEV;
31799
31800- if (!tty->open_count)
31801+ if (!local_read(&tty->open_count))
31802 return -EINVAL;
31803
31804 /* FIXME: Exactly how is the tty object locked here .. */
31805@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31806 against a parallel ioctl etc */
31807 mutex_lock(&ttyj->ipw_tty_mutex);
31808 }
31809- while (ttyj->open_count)
31810+ while (local_read(&ttyj->open_count))
31811 do_ipw_close(ttyj);
31812 ipwireless_disassociate_network_ttys(network,
31813 ttyj->channel_idx);
31814diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31815index 62f282e..e45c45c 100644
31816--- a/drivers/char/pty.c
31817+++ b/drivers/char/pty.c
31818@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31819 register_sysctl_table(pty_root_table);
31820
31821 /* Now create the /dev/ptmx special device */
31822+ pax_open_kernel();
31823 tty_default_fops(&ptmx_fops);
31824- ptmx_fops.open = ptmx_open;
31825+ *(void **)&ptmx_fops.open = ptmx_open;
31826+ pax_close_kernel();
31827
31828 cdev_init(&ptmx_cdev, &ptmx_fops);
31829 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31830diff --git a/drivers/char/random.c b/drivers/char/random.c
31831index 3a19e2d..6ed09d3 100644
31832--- a/drivers/char/random.c
31833+++ b/drivers/char/random.c
31834@@ -254,8 +254,13 @@
31835 /*
31836 * Configuration information
31837 */
31838+#ifdef CONFIG_GRKERNSEC_RANDNET
31839+#define INPUT_POOL_WORDS 512
31840+#define OUTPUT_POOL_WORDS 128
31841+#else
31842 #define INPUT_POOL_WORDS 128
31843 #define OUTPUT_POOL_WORDS 32
31844+#endif
31845 #define SEC_XFER_SIZE 512
31846
31847 /*
31848@@ -292,10 +297,17 @@ static struct poolinfo {
31849 int poolwords;
31850 int tap1, tap2, tap3, tap4, tap5;
31851 } poolinfo_table[] = {
31852+#ifdef CONFIG_GRKERNSEC_RANDNET
31853+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31854+ { 512, 411, 308, 208, 104, 1 },
31855+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31856+ { 128, 103, 76, 51, 25, 1 },
31857+#else
31858 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31859 { 128, 103, 76, 51, 25, 1 },
31860 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31861 { 32, 26, 20, 14, 7, 1 },
31862+#endif
31863 #if 0
31864 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31865 { 2048, 1638, 1231, 819, 411, 1 },
31866@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31867 #include <linux/sysctl.h>
31868
31869 static int min_read_thresh = 8, min_write_thresh;
31870-static int max_read_thresh = INPUT_POOL_WORDS * 32;
31871+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31872 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31873 static char sysctl_bootid[16];
31874
31875diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31876index 0e29a23..0efc2c2 100644
31877--- a/drivers/char/rocket.c
31878+++ b/drivers/char/rocket.c
31879@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31880 struct rocket_ports tmp;
31881 int board;
31882
31883+ pax_track_stack();
31884+
31885 if (!retports)
31886 return -EFAULT;
31887 memset(&tmp, 0, sizeof (tmp));
31888diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31889index 8c262aa..4d3b058 100644
31890--- a/drivers/char/sonypi.c
31891+++ b/drivers/char/sonypi.c
31892@@ -55,6 +55,7 @@
31893 #include <asm/uaccess.h>
31894 #include <asm/io.h>
31895 #include <asm/system.h>
31896+#include <asm/local.h>
31897
31898 #include <linux/sonypi.h>
31899
31900@@ -491,7 +492,7 @@ static struct sonypi_device {
31901 spinlock_t fifo_lock;
31902 wait_queue_head_t fifo_proc_list;
31903 struct fasync_struct *fifo_async;
31904- int open_count;
31905+ local_t open_count;
31906 int model;
31907 struct input_dev *input_jog_dev;
31908 struct input_dev *input_key_dev;
31909@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31910 static int sonypi_misc_release(struct inode *inode, struct file *file)
31911 {
31912 mutex_lock(&sonypi_device.lock);
31913- sonypi_device.open_count--;
31914+ local_dec(&sonypi_device.open_count);
31915 mutex_unlock(&sonypi_device.lock);
31916 return 0;
31917 }
31918@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31919 lock_kernel();
31920 mutex_lock(&sonypi_device.lock);
31921 /* Flush input queue on first open */
31922- if (!sonypi_device.open_count)
31923+ if (!local_read(&sonypi_device.open_count))
31924 kfifo_reset(sonypi_device.fifo);
31925- sonypi_device.open_count++;
31926+ local_inc(&sonypi_device.open_count);
31927 mutex_unlock(&sonypi_device.lock);
31928 unlock_kernel();
31929 return 0;
31930diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31931index db6dcfa..13834cb 100644
31932--- a/drivers/char/stallion.c
31933+++ b/drivers/char/stallion.c
31934@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31935 struct stlport stl_dummyport;
31936 struct stlport *portp;
31937
31938+ pax_track_stack();
31939+
31940 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31941 return -EFAULT;
31942 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31943diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31944index a0789f6..cea3902 100644
31945--- a/drivers/char/tpm/tpm.c
31946+++ b/drivers/char/tpm/tpm.c
31947@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31948 chip->vendor.req_complete_val)
31949 goto out_recv;
31950
31951- if ((status == chip->vendor.req_canceled)) {
31952+ if (status == chip->vendor.req_canceled) {
31953 dev_err(chip->dev, "Operation Canceled\n");
31954 rc = -ECANCELED;
31955 goto out;
31956@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31957
31958 struct tpm_chip *chip = dev_get_drvdata(dev);
31959
31960+ pax_track_stack();
31961+
31962 tpm_cmd.header.in = tpm_readpubek_header;
31963 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31964 "attempting to read the PUBEK");
31965diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31966index bf2170f..ce8cab9 100644
31967--- a/drivers/char/tpm/tpm_bios.c
31968+++ b/drivers/char/tpm/tpm_bios.c
31969@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31970 event = addr;
31971
31972 if ((event->event_type == 0 && event->event_size == 0) ||
31973- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31974+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31975 return NULL;
31976
31977 return addr;
31978@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31979 return NULL;
31980
31981 if ((event->event_type == 0 && event->event_size == 0) ||
31982- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31983+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31984 return NULL;
31985
31986 (*pos)++;
31987@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31988 int i;
31989
31990 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31991- seq_putc(m, data[i]);
31992+ if (!seq_putc(m, data[i]))
31993+ return -EFAULT;
31994
31995 return 0;
31996 }
31997@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31998 log->bios_event_log_end = log->bios_event_log + len;
31999
32000 virt = acpi_os_map_memory(start, len);
32001+ if (!virt) {
32002+ kfree(log->bios_event_log);
32003+ log->bios_event_log = NULL;
32004+ return -EFAULT;
32005+ }
32006
32007- memcpy(log->bios_event_log, virt, len);
32008+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
32009
32010 acpi_os_unmap_memory(virt, len);
32011 return 0;
32012diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
32013index 123cedf..6664cb4 100644
32014--- a/drivers/char/tty_io.c
32015+++ b/drivers/char/tty_io.c
32016@@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
32017 static int tty_release(struct inode *, struct file *);
32018 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
32019 #ifdef CONFIG_COMPAT
32020-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32021+long tty_compat_ioctl(struct file *file, unsigned int cmd,
32022 unsigned long arg);
32023 #else
32024 #define tty_compat_ioctl NULL
32025@@ -1774,6 +1774,7 @@ got_driver:
32026
32027 if (IS_ERR(tty)) {
32028 mutex_unlock(&tty_mutex);
32029+ tty_driver_kref_put(driver);
32030 return PTR_ERR(tty);
32031 }
32032 }
32033@@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
32034 return retval;
32035 }
32036
32037+EXPORT_SYMBOL(tty_ioctl);
32038+
32039 #ifdef CONFIG_COMPAT
32040-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32041+long tty_compat_ioctl(struct file *file, unsigned int cmd,
32042 unsigned long arg)
32043 {
32044 struct inode *inode = file->f_dentry->d_inode;
32045@@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32046
32047 return retval;
32048 }
32049+
32050+EXPORT_SYMBOL(tty_compat_ioctl);
32051 #endif
32052
32053 /*
32054@@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
32055
32056 void tty_default_fops(struct file_operations *fops)
32057 {
32058- *fops = tty_fops;
32059+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
32060 }
32061
32062 /*
32063diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
32064index d814a3d..b55b9c9 100644
32065--- a/drivers/char/tty_ldisc.c
32066+++ b/drivers/char/tty_ldisc.c
32067@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
32068 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
32069 struct tty_ldisc_ops *ldo = ld->ops;
32070
32071- ldo->refcount--;
32072+ atomic_dec(&ldo->refcount);
32073 module_put(ldo->owner);
32074 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32075
32076@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
32077 spin_lock_irqsave(&tty_ldisc_lock, flags);
32078 tty_ldiscs[disc] = new_ldisc;
32079 new_ldisc->num = disc;
32080- new_ldisc->refcount = 0;
32081+ atomic_set(&new_ldisc->refcount, 0);
32082 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32083
32084 return ret;
32085@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
32086 return -EINVAL;
32087
32088 spin_lock_irqsave(&tty_ldisc_lock, flags);
32089- if (tty_ldiscs[disc]->refcount)
32090+ if (atomic_read(&tty_ldiscs[disc]->refcount))
32091 ret = -EBUSY;
32092 else
32093 tty_ldiscs[disc] = NULL;
32094@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
32095 if (ldops) {
32096 ret = ERR_PTR(-EAGAIN);
32097 if (try_module_get(ldops->owner)) {
32098- ldops->refcount++;
32099+ atomic_inc(&ldops->refcount);
32100 ret = ldops;
32101 }
32102 }
32103@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
32104 unsigned long flags;
32105
32106 spin_lock_irqsave(&tty_ldisc_lock, flags);
32107- ldops->refcount--;
32108+ atomic_dec(&ldops->refcount);
32109 module_put(ldops->owner);
32110 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
32111 }
32112diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
32113index a035ae3..c27fe2c 100644
32114--- a/drivers/char/virtio_console.c
32115+++ b/drivers/char/virtio_console.c
32116@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
32117 * virtqueue, so we let the drivers do some boutique early-output thing. */
32118 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
32119 {
32120- virtio_cons.put_chars = put_chars;
32121+ pax_open_kernel();
32122+ *(void **)&virtio_cons.put_chars = put_chars;
32123+ pax_close_kernel();
32124 return hvc_instantiate(0, 0, &virtio_cons);
32125 }
32126
32127@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
32128 out_vq = vqs[1];
32129
32130 /* Start using the new console output. */
32131- virtio_cons.get_chars = get_chars;
32132- virtio_cons.put_chars = put_chars;
32133- virtio_cons.notifier_add = notifier_add_vio;
32134- virtio_cons.notifier_del = notifier_del_vio;
32135- virtio_cons.notifier_hangup = notifier_del_vio;
32136+ pax_open_kernel();
32137+ *(void **)&virtio_cons.get_chars = get_chars;
32138+ *(void **)&virtio_cons.put_chars = put_chars;
32139+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
32140+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
32141+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
32142+ pax_close_kernel();
32143
32144 /* The first argument of hvc_alloc() is the virtual console number, so
32145 * we use zero. The second argument is the parameter for the
32146diff --git a/drivers/char/vt.c b/drivers/char/vt.c
32147index 0c80c68..53d59c1 100644
32148--- a/drivers/char/vt.c
32149+++ b/drivers/char/vt.c
32150@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
32151
32152 static void notify_write(struct vc_data *vc, unsigned int unicode)
32153 {
32154- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
32155+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
32156 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
32157 }
32158
32159diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
32160index 6351a26..999af95 100644
32161--- a/drivers/char/vt_ioctl.c
32162+++ b/drivers/char/vt_ioctl.c
32163@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32164 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
32165 return -EFAULT;
32166
32167- if (!capable(CAP_SYS_TTY_CONFIG))
32168- perm = 0;
32169-
32170 switch (cmd) {
32171 case KDGKBENT:
32172 key_map = key_maps[s];
32173@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
32174 val = (i ? K_HOLE : K_NOSUCHMAP);
32175 return put_user(val, &user_kbe->kb_value);
32176 case KDSKBENT:
32177+ if (!capable(CAP_SYS_TTY_CONFIG))
32178+ perm = 0;
32179+
32180 if (!perm)
32181 return -EPERM;
32182+
32183 if (!i && v == K_NOSUCHMAP) {
32184 /* deallocate map */
32185 key_map = key_maps[s];
32186@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32187 int i, j, k;
32188 int ret;
32189
32190- if (!capable(CAP_SYS_TTY_CONFIG))
32191- perm = 0;
32192-
32193 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
32194 if (!kbs) {
32195 ret = -ENOMEM;
32196@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
32197 kfree(kbs);
32198 return ((p && *p) ? -EOVERFLOW : 0);
32199 case KDSKBSENT:
32200+ if (!capable(CAP_SYS_TTY_CONFIG))
32201+ perm = 0;
32202+
32203 if (!perm) {
32204 ret = -EPERM;
32205 goto reterr;
32206diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
32207index c7ae026..1769c1d 100644
32208--- a/drivers/cpufreq/cpufreq.c
32209+++ b/drivers/cpufreq/cpufreq.c
32210@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
32211 complete(&policy->kobj_unregister);
32212 }
32213
32214-static struct sysfs_ops sysfs_ops = {
32215+static const struct sysfs_ops sysfs_ops = {
32216 .show = show,
32217 .store = store,
32218 };
32219diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
32220index 97b0038..2056670 100644
32221--- a/drivers/cpuidle/sysfs.c
32222+++ b/drivers/cpuidle/sysfs.c
32223@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
32224 return ret;
32225 }
32226
32227-static struct sysfs_ops cpuidle_sysfs_ops = {
32228+static const struct sysfs_ops cpuidle_sysfs_ops = {
32229 .show = cpuidle_show,
32230 .store = cpuidle_store,
32231 };
32232@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
32233 return ret;
32234 }
32235
32236-static struct sysfs_ops cpuidle_state_sysfs_ops = {
32237+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
32238 .show = cpuidle_state_show,
32239 };
32240
32241@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
32242 .release = cpuidle_state_sysfs_release,
32243 };
32244
32245-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32246+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
32247 {
32248 kobject_put(&device->kobjs[i]->kobj);
32249 wait_for_completion(&device->kobjs[i]->kobj_unregister);
32250diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
32251index 5f753fc..0377ae9 100644
32252--- a/drivers/crypto/hifn_795x.c
32253+++ b/drivers/crypto/hifn_795x.c
32254@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
32255 0xCA, 0x34, 0x2B, 0x2E};
32256 struct scatterlist sg;
32257
32258+ pax_track_stack();
32259+
32260 memset(src, 0, sizeof(src));
32261 memset(ctx.key, 0, sizeof(ctx.key));
32262
32263diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
32264index 71e6482..de8d96c 100644
32265--- a/drivers/crypto/padlock-aes.c
32266+++ b/drivers/crypto/padlock-aes.c
32267@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
32268 struct crypto_aes_ctx gen_aes;
32269 int cpu;
32270
32271+ pax_track_stack();
32272+
32273 if (key_len % 8) {
32274 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
32275 return -EINVAL;
32276diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
32277index dcc4ab7..cc834bb 100644
32278--- a/drivers/dma/ioat/dma.c
32279+++ b/drivers/dma/ioat/dma.c
32280@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
32281 return entry->show(&chan->common, page);
32282 }
32283
32284-struct sysfs_ops ioat_sysfs_ops = {
32285+const struct sysfs_ops ioat_sysfs_ops = {
32286 .show = ioat_attr_show,
32287 };
32288
32289diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
32290index bbc3e78..f2db62c 100644
32291--- a/drivers/dma/ioat/dma.h
32292+++ b/drivers/dma/ioat/dma.h
32293@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
32294 unsigned long *phys_complete);
32295 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
32296 void ioat_kobject_del(struct ioatdma_device *device);
32297-extern struct sysfs_ops ioat_sysfs_ops;
32298+extern const struct sysfs_ops ioat_sysfs_ops;
32299 extern struct ioat_sysfs_entry ioat_version_attr;
32300 extern struct ioat_sysfs_entry ioat_cap_attr;
32301 #endif /* IOATDMA_H */
32302diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
32303index 9908c9e..3ceb0e5 100644
32304--- a/drivers/dma/ioat/dma_v3.c
32305+++ b/drivers/dma/ioat/dma_v3.c
32306@@ -71,10 +71,10 @@
32307 /* provide a lookup table for setting the source address in the base or
32308 * extended descriptor of an xor or pq descriptor
32309 */
32310-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
32311-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
32312-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
32313-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
32314+static const u8 xor_idx_to_desc = 0xd0;
32315+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
32316+static const u8 pq_idx_to_desc = 0xf8;
32317+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
32318
32319 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
32320 {
32321diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
32322index 85c464a..afd1e73 100644
32323--- a/drivers/edac/amd64_edac.c
32324+++ b/drivers/edac/amd64_edac.c
32325@@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
32326 * PCI core identifies what devices are on a system during boot, and then
32327 * inquiry this table to see if this driver is for a given device found.
32328 */
32329-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
32330+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
32331 {
32332 .vendor = PCI_VENDOR_ID_AMD,
32333 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
32334diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
32335index 2b95f1a..4f52793 100644
32336--- a/drivers/edac/amd76x_edac.c
32337+++ b/drivers/edac/amd76x_edac.c
32338@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
32339 edac_mc_free(mci);
32340 }
32341
32342-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
32343+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
32344 {
32345 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32346 AMD762},
32347diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
32348index d205d49..74c9672 100644
32349--- a/drivers/edac/e752x_edac.c
32350+++ b/drivers/edac/e752x_edac.c
32351@@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
32352 edac_mc_free(mci);
32353 }
32354
32355-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
32356+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
32357 {
32358 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32359 E7520},
32360diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
32361index c7d11cc..c59c1ca 100644
32362--- a/drivers/edac/e7xxx_edac.c
32363+++ b/drivers/edac/e7xxx_edac.c
32364@@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
32365 edac_mc_free(mci);
32366 }
32367
32368-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
32369+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
32370 {
32371 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32372 E7205},
32373diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
32374index 5376457..5fdedbc 100644
32375--- a/drivers/edac/edac_device_sysfs.c
32376+++ b/drivers/edac/edac_device_sysfs.c
32377@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
32378 }
32379
32380 /* edac_dev file operations for an 'ctl_info' */
32381-static struct sysfs_ops device_ctl_info_ops = {
32382+static const struct sysfs_ops device_ctl_info_ops = {
32383 .show = edac_dev_ctl_info_show,
32384 .store = edac_dev_ctl_info_store
32385 };
32386@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32387 }
32388
32389 /* edac_dev file operations for an 'instance' */
32390-static struct sysfs_ops device_instance_ops = {
32391+static const struct sysfs_ops device_instance_ops = {
32392 .show = edac_dev_instance_show,
32393 .store = edac_dev_instance_store
32394 };
32395@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32396 }
32397
32398 /* edac_dev file operations for a 'block' */
32399-static struct sysfs_ops device_block_ops = {
32400+static const struct sysfs_ops device_block_ops = {
32401 .show = edac_dev_block_show,
32402 .store = edac_dev_block_store
32403 };
32404diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32405index e1d4ce0..88840e9 100644
32406--- a/drivers/edac/edac_mc_sysfs.c
32407+++ b/drivers/edac/edac_mc_sysfs.c
32408@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32409 return -EIO;
32410 }
32411
32412-static struct sysfs_ops csrowfs_ops = {
32413+static const struct sysfs_ops csrowfs_ops = {
32414 .show = csrowdev_show,
32415 .store = csrowdev_store
32416 };
32417@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32418 }
32419
32420 /* Intermediate show/store table */
32421-static struct sysfs_ops mci_ops = {
32422+static const struct sysfs_ops mci_ops = {
32423 .show = mcidev_show,
32424 .store = mcidev_store
32425 };
32426diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32427index 422728c..d8d9c88 100644
32428--- a/drivers/edac/edac_pci_sysfs.c
32429+++ b/drivers/edac/edac_pci_sysfs.c
32430@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32431 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32432 static int edac_pci_poll_msec = 1000; /* one second workq period */
32433
32434-static atomic_t pci_parity_count = ATOMIC_INIT(0);
32435-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32436+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32437+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32438
32439 static struct kobject *edac_pci_top_main_kobj;
32440 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32441@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32442 }
32443
32444 /* fs_ops table */
32445-static struct sysfs_ops pci_instance_ops = {
32446+static const struct sysfs_ops pci_instance_ops = {
32447 .show = edac_pci_instance_show,
32448 .store = edac_pci_instance_store
32449 };
32450@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32451 return -EIO;
32452 }
32453
32454-static struct sysfs_ops edac_pci_sysfs_ops = {
32455+static const struct sysfs_ops edac_pci_sysfs_ops = {
32456 .show = edac_pci_dev_show,
32457 .store = edac_pci_dev_store
32458 };
32459@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32460 edac_printk(KERN_CRIT, EDAC_PCI,
32461 "Signaled System Error on %s\n",
32462 pci_name(dev));
32463- atomic_inc(&pci_nonparity_count);
32464+ atomic_inc_unchecked(&pci_nonparity_count);
32465 }
32466
32467 if (status & (PCI_STATUS_PARITY)) {
32468@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32469 "Master Data Parity Error on %s\n",
32470 pci_name(dev));
32471
32472- atomic_inc(&pci_parity_count);
32473+ atomic_inc_unchecked(&pci_parity_count);
32474 }
32475
32476 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32477@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32478 "Detected Parity Error on %s\n",
32479 pci_name(dev));
32480
32481- atomic_inc(&pci_parity_count);
32482+ atomic_inc_unchecked(&pci_parity_count);
32483 }
32484 }
32485
32486@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32487 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32488 "Signaled System Error on %s\n",
32489 pci_name(dev));
32490- atomic_inc(&pci_nonparity_count);
32491+ atomic_inc_unchecked(&pci_nonparity_count);
32492 }
32493
32494 if (status & (PCI_STATUS_PARITY)) {
32495@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32496 "Master Data Parity Error on "
32497 "%s\n", pci_name(dev));
32498
32499- atomic_inc(&pci_parity_count);
32500+ atomic_inc_unchecked(&pci_parity_count);
32501 }
32502
32503 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32504@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32505 "Detected Parity Error on %s\n",
32506 pci_name(dev));
32507
32508- atomic_inc(&pci_parity_count);
32509+ atomic_inc_unchecked(&pci_parity_count);
32510 }
32511 }
32512 }
32513@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32514 if (!check_pci_errors)
32515 return;
32516
32517- before_count = atomic_read(&pci_parity_count);
32518+ before_count = atomic_read_unchecked(&pci_parity_count);
32519
32520 /* scan all PCI devices looking for a Parity Error on devices and
32521 * bridges.
32522@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32523 /* Only if operator has selected panic on PCI Error */
32524 if (edac_pci_get_panic_on_pe()) {
32525 /* If the count is different 'after' from 'before' */
32526- if (before_count != atomic_read(&pci_parity_count))
32527+ if (before_count != atomic_read_unchecked(&pci_parity_count))
32528 panic("EDAC: PCI Parity Error");
32529 }
32530 }
32531diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32532index 6c9a0f2..9c1cf7e 100644
32533--- a/drivers/edac/i3000_edac.c
32534+++ b/drivers/edac/i3000_edac.c
32535@@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32536 edac_mc_free(mci);
32537 }
32538
32539-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32540+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32541 {
32542 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32543 I3000},
32544diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32545index fde4db9..fe108f9 100644
32546--- a/drivers/edac/i3200_edac.c
32547+++ b/drivers/edac/i3200_edac.c
32548@@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32549 edac_mc_free(mci);
32550 }
32551
32552-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32553+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32554 {
32555 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32556 I3200},
32557diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32558index adc10a2..57d4ccf 100644
32559--- a/drivers/edac/i5000_edac.c
32560+++ b/drivers/edac/i5000_edac.c
32561@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32562 *
32563 * The "E500P" device is the first device supported.
32564 */
32565-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32566+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32567 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32568 .driver_data = I5000P},
32569
32570diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32571index 22db05a..b2b5503 100644
32572--- a/drivers/edac/i5100_edac.c
32573+++ b/drivers/edac/i5100_edac.c
32574@@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32575 edac_mc_free(mci);
32576 }
32577
32578-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32579+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32580 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32581 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32582 { 0, }
32583diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32584index f99d106..f050710 100644
32585--- a/drivers/edac/i5400_edac.c
32586+++ b/drivers/edac/i5400_edac.c
32587@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32588 *
32589 * The "E500P" device is the first device supported.
32590 */
32591-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32592+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32593 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32594 {0,} /* 0 terminated list. */
32595 };
32596diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32597index 577760a..9ce16ce 100644
32598--- a/drivers/edac/i82443bxgx_edac.c
32599+++ b/drivers/edac/i82443bxgx_edac.c
32600@@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32601
32602 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32603
32604-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32605+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32606 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32607 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32608 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32609diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32610index c0088ba..64a7b98 100644
32611--- a/drivers/edac/i82860_edac.c
32612+++ b/drivers/edac/i82860_edac.c
32613@@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32614 edac_mc_free(mci);
32615 }
32616
32617-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32618+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32619 {
32620 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32621 I82860},
32622diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32623index b2d83b9..a34357b 100644
32624--- a/drivers/edac/i82875p_edac.c
32625+++ b/drivers/edac/i82875p_edac.c
32626@@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32627 edac_mc_free(mci);
32628 }
32629
32630-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32631+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32632 {
32633 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32634 I82875P},
32635diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32636index 2eed3ea..87bbbd1 100644
32637--- a/drivers/edac/i82975x_edac.c
32638+++ b/drivers/edac/i82975x_edac.c
32639@@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32640 edac_mc_free(mci);
32641 }
32642
32643-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32644+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32645 {
32646 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32647 I82975X
32648diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32649index 9900675..78ac2b6 100644
32650--- a/drivers/edac/r82600_edac.c
32651+++ b/drivers/edac/r82600_edac.c
32652@@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32653 edac_mc_free(mci);
32654 }
32655
32656-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32657+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32658 {
32659 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32660 },
32661diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32662index d4ec605..4cfec4e 100644
32663--- a/drivers/edac/x38_edac.c
32664+++ b/drivers/edac/x38_edac.c
32665@@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32666 edac_mc_free(mci);
32667 }
32668
32669-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32670+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32671 {
32672 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32673 X38},
32674diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32675index 3fc2ceb..daf098f 100644
32676--- a/drivers/firewire/core-card.c
32677+++ b/drivers/firewire/core-card.c
32678@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32679
32680 void fw_core_remove_card(struct fw_card *card)
32681 {
32682- struct fw_card_driver dummy_driver = dummy_driver_template;
32683+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
32684
32685 card->driver->update_phy_reg(card, 4,
32686 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32687diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32688index 4560d8f..36db24a 100644
32689--- a/drivers/firewire/core-cdev.c
32690+++ b/drivers/firewire/core-cdev.c
32691@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32692 int ret;
32693
32694 if ((request->channels == 0 && request->bandwidth == 0) ||
32695- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32696- request->bandwidth < 0)
32697+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32698 return -EINVAL;
32699
32700 r = kmalloc(sizeof(*r), GFP_KERNEL);
32701diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32702index da628c7..cf54a2c 100644
32703--- a/drivers/firewire/core-transaction.c
32704+++ b/drivers/firewire/core-transaction.c
32705@@ -36,6 +36,7 @@
32706 #include <linux/string.h>
32707 #include <linux/timer.h>
32708 #include <linux/types.h>
32709+#include <linux/sched.h>
32710
32711 #include <asm/byteorder.h>
32712
32713@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32714 struct transaction_callback_data d;
32715 struct fw_transaction t;
32716
32717+ pax_track_stack();
32718+
32719 init_completion(&d.done);
32720 d.payload = payload;
32721 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32722diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32723index 7ff6e75..a2965d9 100644
32724--- a/drivers/firewire/core.h
32725+++ b/drivers/firewire/core.h
32726@@ -86,6 +86,7 @@ struct fw_card_driver {
32727
32728 int (*stop_iso)(struct fw_iso_context *ctx);
32729 };
32730+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32731
32732 void fw_card_initialize(struct fw_card *card,
32733 const struct fw_card_driver *driver, struct device *device);
32734diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32735index 3a2ccb0..82fd7c4 100644
32736--- a/drivers/firmware/dmi_scan.c
32737+++ b/drivers/firmware/dmi_scan.c
32738@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32739 }
32740 }
32741 else {
32742- /*
32743- * no iounmap() for that ioremap(); it would be a no-op, but
32744- * it's so early in setup that sucker gets confused into doing
32745- * what it shouldn't if we actually call it.
32746- */
32747 p = dmi_ioremap(0xF0000, 0x10000);
32748 if (p == NULL)
32749 goto error;
32750@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32751 if (buf == NULL)
32752 return -1;
32753
32754- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32755+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32756
32757 iounmap(buf);
32758 return 0;
32759diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32760index 9e4f59d..110e24e 100644
32761--- a/drivers/firmware/edd.c
32762+++ b/drivers/firmware/edd.c
32763@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32764 return ret;
32765 }
32766
32767-static struct sysfs_ops edd_attr_ops = {
32768+static const struct sysfs_ops edd_attr_ops = {
32769 .show = edd_attr_show,
32770 };
32771
32772diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32773index f4f709d..082f06e 100644
32774--- a/drivers/firmware/efivars.c
32775+++ b/drivers/firmware/efivars.c
32776@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32777 return ret;
32778 }
32779
32780-static struct sysfs_ops efivar_attr_ops = {
32781+static const struct sysfs_ops efivar_attr_ops = {
32782 .show = efivar_attr_show,
32783 .store = efivar_attr_store,
32784 };
32785diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32786index 051d1eb..0a5d4e7 100644
32787--- a/drivers/firmware/iscsi_ibft.c
32788+++ b/drivers/firmware/iscsi_ibft.c
32789@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32790 return ret;
32791 }
32792
32793-static struct sysfs_ops ibft_attr_ops = {
32794+static const struct sysfs_ops ibft_attr_ops = {
32795 .show = ibft_show_attribute,
32796 };
32797
32798diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32799index 56f9234..8c58c7b 100644
32800--- a/drivers/firmware/memmap.c
32801+++ b/drivers/firmware/memmap.c
32802@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32803 NULL
32804 };
32805
32806-static struct sysfs_ops memmap_attr_ops = {
32807+static const struct sysfs_ops memmap_attr_ops = {
32808 .show = memmap_attr_show,
32809 };
32810
32811diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32812index b16c9a8..2af7d3f 100644
32813--- a/drivers/gpio/vr41xx_giu.c
32814+++ b/drivers/gpio/vr41xx_giu.c
32815@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32816 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32817 maskl, pendl, maskh, pendh);
32818
32819- atomic_inc(&irq_err_count);
32820+ atomic_inc_unchecked(&irq_err_count);
32821
32822 return -EINVAL;
32823 }
32824diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32825index bea6efc..3dc0f42 100644
32826--- a/drivers/gpu/drm/drm_crtc.c
32827+++ b/drivers/gpu/drm/drm_crtc.c
32828@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32829 */
32830 if ((out_resp->count_modes >= mode_count) && mode_count) {
32831 copied = 0;
32832- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32833+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32834 list_for_each_entry(mode, &connector->modes, head) {
32835 drm_crtc_convert_to_umode(&u_mode, mode);
32836 if (copy_to_user(mode_ptr + copied,
32837@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32838
32839 if ((out_resp->count_props >= props_count) && props_count) {
32840 copied = 0;
32841- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32842- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32843+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32844+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32845 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32846 if (connector->property_ids[i] != 0) {
32847 if (put_user(connector->property_ids[i],
32848@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32849
32850 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32851 copied = 0;
32852- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32853+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32854 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32855 if (connector->encoder_ids[i] != 0) {
32856 if (put_user(connector->encoder_ids[i],
32857@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32858 }
32859
32860 for (i = 0; i < crtc_req->count_connectors; i++) {
32861- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32862+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32863 if (get_user(out_id, &set_connectors_ptr[i])) {
32864 ret = -EFAULT;
32865 goto out;
32866@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32867 out_resp->flags = property->flags;
32868
32869 if ((out_resp->count_values >= value_count) && value_count) {
32870- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32871+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32872 for (i = 0; i < value_count; i++) {
32873 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32874 ret = -EFAULT;
32875@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32876 if (property->flags & DRM_MODE_PROP_ENUM) {
32877 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32878 copied = 0;
32879- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32880+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32881 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32882
32883 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32884@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32885 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32886 copied = 0;
32887 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32888- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32889+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32890
32891 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32892 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32893@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32894 blob = obj_to_blob(obj);
32895
32896 if (out_resp->length == blob->length) {
32897- blob_ptr = (void *)(unsigned long)out_resp->data;
32898+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
32899 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32900 ret = -EFAULT;
32901 goto done;
32902diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32903index 1b8745d..92fdbf6 100644
32904--- a/drivers/gpu/drm/drm_crtc_helper.c
32905+++ b/drivers/gpu/drm/drm_crtc_helper.c
32906@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32907 struct drm_crtc *tmp;
32908 int crtc_mask = 1;
32909
32910- WARN(!crtc, "checking null crtc?");
32911+ BUG_ON(!crtc);
32912
32913 dev = crtc->dev;
32914
32915@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32916
32917 adjusted_mode = drm_mode_duplicate(dev, mode);
32918
32919+ pax_track_stack();
32920+
32921 crtc->enabled = drm_helper_crtc_in_use(crtc);
32922
32923 if (!crtc->enabled)
32924diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32925index 0e27d98..dec8768 100644
32926--- a/drivers/gpu/drm/drm_drv.c
32927+++ b/drivers/gpu/drm/drm_drv.c
32928@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32929 char *kdata = NULL;
32930
32931 atomic_inc(&dev->ioctl_count);
32932- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32933+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32934 ++file_priv->ioctl_count;
32935
32936 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32937diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32938index 519161e..98c840c 100644
32939--- a/drivers/gpu/drm/drm_fops.c
32940+++ b/drivers/gpu/drm/drm_fops.c
32941@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32942 }
32943
32944 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32945- atomic_set(&dev->counts[i], 0);
32946+ atomic_set_unchecked(&dev->counts[i], 0);
32947
32948 dev->sigdata.lock = NULL;
32949
32950@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32951
32952 retcode = drm_open_helper(inode, filp, dev);
32953 if (!retcode) {
32954- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32955+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32956 spin_lock(&dev->count_lock);
32957- if (!dev->open_count++) {
32958+ if (local_inc_return(&dev->open_count) == 1) {
32959 spin_unlock(&dev->count_lock);
32960 retcode = drm_setup(dev);
32961 goto out;
32962@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32963
32964 lock_kernel();
32965
32966- DRM_DEBUG("open_count = %d\n", dev->open_count);
32967+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32968
32969 if (dev->driver->preclose)
32970 dev->driver->preclose(dev, file_priv);
32971@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32972 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32973 task_pid_nr(current),
32974 (long)old_encode_dev(file_priv->minor->device),
32975- dev->open_count);
32976+ local_read(&dev->open_count));
32977
32978 /* Release any auth tokens that might point to this file_priv,
32979 (do that under the drm_global_mutex) */
32980@@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
32981 * End inline drm_release
32982 */
32983
32984- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32985+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32986 spin_lock(&dev->count_lock);
32987- if (!--dev->open_count) {
32988+ if (local_dec_and_test(&dev->open_count)) {
32989 if (atomic_read(&dev->ioctl_count)) {
32990 DRM_ERROR("Device busy: %d\n",
32991 atomic_read(&dev->ioctl_count));
32992diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32993index 8bf3770..79422805 100644
32994--- a/drivers/gpu/drm/drm_gem.c
32995+++ b/drivers/gpu/drm/drm_gem.c
32996@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32997 spin_lock_init(&dev->object_name_lock);
32998 idr_init(&dev->object_name_idr);
32999 atomic_set(&dev->object_count, 0);
33000- atomic_set(&dev->object_memory, 0);
33001+ atomic_set_unchecked(&dev->object_memory, 0);
33002 atomic_set(&dev->pin_count, 0);
33003- atomic_set(&dev->pin_memory, 0);
33004+ atomic_set_unchecked(&dev->pin_memory, 0);
33005 atomic_set(&dev->gtt_count, 0);
33006- atomic_set(&dev->gtt_memory, 0);
33007+ atomic_set_unchecked(&dev->gtt_memory, 0);
33008
33009 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
33010 if (!mm) {
33011@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
33012 goto fput;
33013 }
33014 atomic_inc(&dev->object_count);
33015- atomic_add(obj->size, &dev->object_memory);
33016+ atomic_add_unchecked(obj->size, &dev->object_memory);
33017 return obj;
33018 fput:
33019 fput(obj->filp);
33020@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
33021
33022 fput(obj->filp);
33023 atomic_dec(&dev->object_count);
33024- atomic_sub(obj->size, &dev->object_memory);
33025+ atomic_sub_unchecked(obj->size, &dev->object_memory);
33026 kfree(obj);
33027 }
33028 EXPORT_SYMBOL(drm_gem_object_free);
33029diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
33030index f0f6c6b..34af322 100644
33031--- a/drivers/gpu/drm/drm_info.c
33032+++ b/drivers/gpu/drm/drm_info.c
33033@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
33034 struct drm_local_map *map;
33035 struct drm_map_list *r_list;
33036
33037- /* Hardcoded from _DRM_FRAME_BUFFER,
33038- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
33039- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
33040- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
33041+ static const char * const types[] = {
33042+ [_DRM_FRAME_BUFFER] = "FB",
33043+ [_DRM_REGISTERS] = "REG",
33044+ [_DRM_SHM] = "SHM",
33045+ [_DRM_AGP] = "AGP",
33046+ [_DRM_SCATTER_GATHER] = "SG",
33047+ [_DRM_CONSISTENT] = "PCI",
33048+ [_DRM_GEM] = "GEM" };
33049 const char *type;
33050 int i;
33051
33052@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
33053 map = r_list->map;
33054 if (!map)
33055 continue;
33056- if (map->type < 0 || map->type > 5)
33057+ if (map->type >= ARRAY_SIZE(types))
33058 type = "??";
33059 else
33060 type = types[map->type];
33061@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
33062 struct drm_device *dev = node->minor->dev;
33063
33064 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
33065- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
33066+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
33067 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
33068- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
33069- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
33070+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
33071+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
33072 seq_printf(m, "%d gtt total\n", dev->gtt_total);
33073 return 0;
33074 }
33075@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
33076 mutex_lock(&dev->struct_mutex);
33077 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
33078 atomic_read(&dev->vma_count),
33079+#ifdef CONFIG_GRKERNSEC_HIDESYM
33080+ NULL, 0);
33081+#else
33082 high_memory, (u64)virt_to_phys(high_memory));
33083+#endif
33084
33085 list_for_each_entry(pt, &dev->vmalist, head) {
33086 vma = pt->vma;
33087@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
33088 continue;
33089 seq_printf(m,
33090 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
33091- pt->pid, vma->vm_start, vma->vm_end,
33092+ pt->pid,
33093+#ifdef CONFIG_GRKERNSEC_HIDESYM
33094+ 0, 0,
33095+#else
33096+ vma->vm_start, vma->vm_end,
33097+#endif
33098 vma->vm_flags & VM_READ ? 'r' : '-',
33099 vma->vm_flags & VM_WRITE ? 'w' : '-',
33100 vma->vm_flags & VM_EXEC ? 'x' : '-',
33101 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
33102 vma->vm_flags & VM_LOCKED ? 'l' : '-',
33103 vma->vm_flags & VM_IO ? 'i' : '-',
33104+#ifdef CONFIG_GRKERNSEC_HIDESYM
33105+ 0);
33106+#else
33107 vma->vm_pgoff);
33108+#endif
33109
33110 #if defined(__i386__)
33111 pgprot = pgprot_val(vma->vm_page_prot);
33112diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
33113index 282d9fd..71e5f11 100644
33114--- a/drivers/gpu/drm/drm_ioc32.c
33115+++ b/drivers/gpu/drm/drm_ioc32.c
33116@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
33117 request = compat_alloc_user_space(nbytes);
33118 if (!access_ok(VERIFY_WRITE, request, nbytes))
33119 return -EFAULT;
33120- list = (struct drm_buf_desc *) (request + 1);
33121+ list = (struct drm_buf_desc __user *) (request + 1);
33122
33123 if (__put_user(count, &request->count)
33124 || __put_user(list, &request->list))
33125@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
33126 request = compat_alloc_user_space(nbytes);
33127 if (!access_ok(VERIFY_WRITE, request, nbytes))
33128 return -EFAULT;
33129- list = (struct drm_buf_pub *) (request + 1);
33130+ list = (struct drm_buf_pub __user *) (request + 1);
33131
33132 if (__put_user(count, &request->count)
33133 || __put_user(list, &request->list))
33134diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
33135index 9b9ff46..4ea724c 100644
33136--- a/drivers/gpu/drm/drm_ioctl.c
33137+++ b/drivers/gpu/drm/drm_ioctl.c
33138@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
33139 stats->data[i].value =
33140 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
33141 else
33142- stats->data[i].value = atomic_read(&dev->counts[i]);
33143+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
33144 stats->data[i].type = dev->types[i];
33145 }
33146
33147diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
33148index e2f70a5..c703e86 100644
33149--- a/drivers/gpu/drm/drm_lock.c
33150+++ b/drivers/gpu/drm/drm_lock.c
33151@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33152 if (drm_lock_take(&master->lock, lock->context)) {
33153 master->lock.file_priv = file_priv;
33154 master->lock.lock_time = jiffies;
33155- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
33156+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
33157 break; /* Got lock */
33158 }
33159
33160@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
33161 return -EINVAL;
33162 }
33163
33164- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
33165+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
33166
33167 /* kernel_context_switch isn't used by any of the x86 drm
33168 * modules but is required by the Sparc driver.
33169diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
33170index 7d1d88c..b9131b2 100644
33171--- a/drivers/gpu/drm/i810/i810_dma.c
33172+++ b/drivers/gpu/drm/i810/i810_dma.c
33173@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
33174 dma->buflist[vertex->idx],
33175 vertex->discard, vertex->used);
33176
33177- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33178- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33179+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
33180+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33181 sarea_priv->last_enqueue = dev_priv->counter - 1;
33182 sarea_priv->last_dispatch = (int)hw_status[5];
33183
33184@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
33185 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
33186 mc->last_render);
33187
33188- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33189- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
33190+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
33191+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
33192 sarea_priv->last_enqueue = dev_priv->counter - 1;
33193 sarea_priv->last_dispatch = (int)hw_status[5];
33194
33195diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
33196index 21e2691..7321edd 100644
33197--- a/drivers/gpu/drm/i810/i810_drv.h
33198+++ b/drivers/gpu/drm/i810/i810_drv.h
33199@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
33200 int page_flipping;
33201
33202 wait_queue_head_t irq_queue;
33203- atomic_t irq_received;
33204- atomic_t irq_emitted;
33205+ atomic_unchecked_t irq_received;
33206+ atomic_unchecked_t irq_emitted;
33207
33208 int front_offset;
33209 } drm_i810_private_t;
33210diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
33211index da82afe..48a45de 100644
33212--- a/drivers/gpu/drm/i830/i830_drv.h
33213+++ b/drivers/gpu/drm/i830/i830_drv.h
33214@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
33215 int page_flipping;
33216
33217 wait_queue_head_t irq_queue;
33218- atomic_t irq_received;
33219- atomic_t irq_emitted;
33220+ atomic_unchecked_t irq_received;
33221+ atomic_unchecked_t irq_emitted;
33222
33223 int use_mi_batchbuffer_start;
33224
33225diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
33226index 91ec2bb..6f21fab 100644
33227--- a/drivers/gpu/drm/i830/i830_irq.c
33228+++ b/drivers/gpu/drm/i830/i830_irq.c
33229@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
33230
33231 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
33232
33233- atomic_inc(&dev_priv->irq_received);
33234+ atomic_inc_unchecked(&dev_priv->irq_received);
33235 wake_up_interruptible(&dev_priv->irq_queue);
33236
33237 return IRQ_HANDLED;
33238@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
33239
33240 DRM_DEBUG("%s\n", __func__);
33241
33242- atomic_inc(&dev_priv->irq_emitted);
33243+ atomic_inc_unchecked(&dev_priv->irq_emitted);
33244
33245 BEGIN_LP_RING(2);
33246 OUT_RING(0);
33247 OUT_RING(GFX_OP_USER_INTERRUPT);
33248 ADVANCE_LP_RING();
33249
33250- return atomic_read(&dev_priv->irq_emitted);
33251+ return atomic_read_unchecked(&dev_priv->irq_emitted);
33252 }
33253
33254 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33255@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33256
33257 DRM_DEBUG("%s\n", __func__);
33258
33259- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33260+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33261 return 0;
33262
33263 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
33264@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
33265
33266 for (;;) {
33267 __set_current_state(TASK_INTERRUPTIBLE);
33268- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33269+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33270 break;
33271 if ((signed)(end - jiffies) <= 0) {
33272 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
33273@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
33274 I830_WRITE16(I830REG_HWSTAM, 0xffff);
33275 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
33276 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
33277- atomic_set(&dev_priv->irq_received, 0);
33278- atomic_set(&dev_priv->irq_emitted, 0);
33279+ atomic_set_unchecked(&dev_priv->irq_received, 0);
33280+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
33281 init_waitqueue_head(&dev_priv->irq_queue);
33282 }
33283
33284diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
33285index 288fc50..c6092055 100644
33286--- a/drivers/gpu/drm/i915/dvo.h
33287+++ b/drivers/gpu/drm/i915/dvo.h
33288@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
33289 *
33290 * \return singly-linked list of modes or NULL if no modes found.
33291 */
33292- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
33293+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
33294
33295 /**
33296 * Clean up driver-specific bits of the output
33297 */
33298- void (*destroy) (struct intel_dvo_device *dvo);
33299+ void (* const destroy) (struct intel_dvo_device *dvo);
33300
33301 /**
33302 * Debugging hook to dump device registers to log file
33303 */
33304- void (*dump_regs)(struct intel_dvo_device *dvo);
33305+ void (* const dump_regs)(struct intel_dvo_device *dvo);
33306 };
33307
33308-extern struct intel_dvo_dev_ops sil164_ops;
33309-extern struct intel_dvo_dev_ops ch7xxx_ops;
33310-extern struct intel_dvo_dev_ops ivch_ops;
33311-extern struct intel_dvo_dev_ops tfp410_ops;
33312-extern struct intel_dvo_dev_ops ch7017_ops;
33313+extern const struct intel_dvo_dev_ops sil164_ops;
33314+extern const struct intel_dvo_dev_ops ch7xxx_ops;
33315+extern const struct intel_dvo_dev_ops ivch_ops;
33316+extern const struct intel_dvo_dev_ops tfp410_ops;
33317+extern const struct intel_dvo_dev_ops ch7017_ops;
33318
33319 #endif /* _INTEL_DVO_H */
33320diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
33321index 621815b..499d82e 100644
33322--- a/drivers/gpu/drm/i915/dvo_ch7017.c
33323+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
33324@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
33325 }
33326 }
33327
33328-struct intel_dvo_dev_ops ch7017_ops = {
33329+const struct intel_dvo_dev_ops ch7017_ops = {
33330 .init = ch7017_init,
33331 .detect = ch7017_detect,
33332 .mode_valid = ch7017_mode_valid,
33333diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33334index a9b8962..ac769ba 100644
33335--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
33336+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33337@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
33338 }
33339 }
33340
33341-struct intel_dvo_dev_ops ch7xxx_ops = {
33342+const struct intel_dvo_dev_ops ch7xxx_ops = {
33343 .init = ch7xxx_init,
33344 .detect = ch7xxx_detect,
33345 .mode_valid = ch7xxx_mode_valid,
33346diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
33347index aa176f9..ed2930c 100644
33348--- a/drivers/gpu/drm/i915/dvo_ivch.c
33349+++ b/drivers/gpu/drm/i915/dvo_ivch.c
33350@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
33351 }
33352 }
33353
33354-struct intel_dvo_dev_ops ivch_ops= {
33355+const struct intel_dvo_dev_ops ivch_ops= {
33356 .init = ivch_init,
33357 .dpms = ivch_dpms,
33358 .save = ivch_save,
33359diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
33360index e1c1f73..7dbebcf 100644
33361--- a/drivers/gpu/drm/i915/dvo_sil164.c
33362+++ b/drivers/gpu/drm/i915/dvo_sil164.c
33363@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
33364 }
33365 }
33366
33367-struct intel_dvo_dev_ops sil164_ops = {
33368+const struct intel_dvo_dev_ops sil164_ops = {
33369 .init = sil164_init,
33370 .detect = sil164_detect,
33371 .mode_valid = sil164_mode_valid,
33372diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
33373index 16dce84..7e1b6f8 100644
33374--- a/drivers/gpu/drm/i915/dvo_tfp410.c
33375+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
33376@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
33377 }
33378 }
33379
33380-struct intel_dvo_dev_ops tfp410_ops = {
33381+const struct intel_dvo_dev_ops tfp410_ops = {
33382 .init = tfp410_init,
33383 .detect = tfp410_detect,
33384 .mode_valid = tfp410_mode_valid,
33385diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33386index 7e859d6..7d1cf2b 100644
33387--- a/drivers/gpu/drm/i915/i915_debugfs.c
33388+++ b/drivers/gpu/drm/i915/i915_debugfs.c
33389@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33390 I915_READ(GTIMR));
33391 }
33392 seq_printf(m, "Interrupts received: %d\n",
33393- atomic_read(&dev_priv->irq_received));
33394+ atomic_read_unchecked(&dev_priv->irq_received));
33395 if (dev_priv->hw_status_page != NULL) {
33396 seq_printf(m, "Current sequence: %d\n",
33397 i915_get_gem_seqno(dev));
33398diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33399index 5449239..7e4f68d 100644
33400--- a/drivers/gpu/drm/i915/i915_drv.c
33401+++ b/drivers/gpu/drm/i915/i915_drv.c
33402@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33403 return i915_resume(dev);
33404 }
33405
33406-static struct vm_operations_struct i915_gem_vm_ops = {
33407+static const struct vm_operations_struct i915_gem_vm_ops = {
33408 .fault = i915_gem_fault,
33409 .open = drm_gem_vm_open,
33410 .close = drm_gem_vm_close,
33411diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33412index 97163f7..c24c7c7 100644
33413--- a/drivers/gpu/drm/i915/i915_drv.h
33414+++ b/drivers/gpu/drm/i915/i915_drv.h
33415@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33416 /* display clock increase/decrease */
33417 /* pll clock increase/decrease */
33418 /* clock gating init */
33419-};
33420+} __no_const;
33421
33422 typedef struct drm_i915_private {
33423 struct drm_device *dev;
33424@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33425 int page_flipping;
33426
33427 wait_queue_head_t irq_queue;
33428- atomic_t irq_received;
33429+ atomic_unchecked_t irq_received;
33430 /** Protects user_irq_refcount and irq_mask_reg */
33431 spinlock_t user_irq_lock;
33432 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33433diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33434index 27a3074..eb3f959 100644
33435--- a/drivers/gpu/drm/i915/i915_gem.c
33436+++ b/drivers/gpu/drm/i915/i915_gem.c
33437@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33438
33439 args->aper_size = dev->gtt_total;
33440 args->aper_available_size = (args->aper_size -
33441- atomic_read(&dev->pin_memory));
33442+ atomic_read_unchecked(&dev->pin_memory));
33443
33444 return 0;
33445 }
33446@@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33447
33448 if (obj_priv->gtt_space) {
33449 atomic_dec(&dev->gtt_count);
33450- atomic_sub(obj->size, &dev->gtt_memory);
33451+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33452
33453 drm_mm_put_block(obj_priv->gtt_space);
33454 obj_priv->gtt_space = NULL;
33455@@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33456 goto search_free;
33457 }
33458 atomic_inc(&dev->gtt_count);
33459- atomic_add(obj->size, &dev->gtt_memory);
33460+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
33461
33462 /* Assert that the object is not currently in any GPU domain. As it
33463 * wasn't in the GTT, there shouldn't be any way it could have been in
33464@@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33465 "%d/%d gtt bytes\n",
33466 atomic_read(&dev->object_count),
33467 atomic_read(&dev->pin_count),
33468- atomic_read(&dev->object_memory),
33469- atomic_read(&dev->pin_memory),
33470- atomic_read(&dev->gtt_memory),
33471+ atomic_read_unchecked(&dev->object_memory),
33472+ atomic_read_unchecked(&dev->pin_memory),
33473+ atomic_read_unchecked(&dev->gtt_memory),
33474 dev->gtt_total);
33475 }
33476 goto err;
33477@@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33478 */
33479 if (obj_priv->pin_count == 1) {
33480 atomic_inc(&dev->pin_count);
33481- atomic_add(obj->size, &dev->pin_memory);
33482+ atomic_add_unchecked(obj->size, &dev->pin_memory);
33483 if (!obj_priv->active &&
33484 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
33485 !list_empty(&obj_priv->list))
33486@@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
33487 list_move_tail(&obj_priv->list,
33488 &dev_priv->mm.inactive_list);
33489 atomic_dec(&dev->pin_count);
33490- atomic_sub(obj->size, &dev->pin_memory);
33491+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
33492 }
33493 i915_verify_inactive(dev, __FILE__, __LINE__);
33494 }
33495diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33496index 63f28ad..f5469da 100644
33497--- a/drivers/gpu/drm/i915/i915_irq.c
33498+++ b/drivers/gpu/drm/i915/i915_irq.c
33499@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33500 int irq_received;
33501 int ret = IRQ_NONE;
33502
33503- atomic_inc(&dev_priv->irq_received);
33504+ atomic_inc_unchecked(&dev_priv->irq_received);
33505
33506 if (IS_IGDNG(dev))
33507 return igdng_irq_handler(dev);
33508@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33509 {
33510 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33511
33512- atomic_set(&dev_priv->irq_received, 0);
33513+ atomic_set_unchecked(&dev_priv->irq_received, 0);
33514
33515 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33516 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33517diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33518index 5d9c6a7..d1b0e29 100644
33519--- a/drivers/gpu/drm/i915/intel_sdvo.c
33520+++ b/drivers/gpu/drm/i915/intel_sdvo.c
33521@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33522 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33523
33524 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33525- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33526+ pax_open_kernel();
33527+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33528+ pax_close_kernel();
33529
33530 /* Read the regs to test if we can talk to the device */
33531 for (i = 0; i < 0x40; i++) {
33532diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33533index be6c6b9..8615d9c 100644
33534--- a/drivers/gpu/drm/mga/mga_drv.h
33535+++ b/drivers/gpu/drm/mga/mga_drv.h
33536@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33537 u32 clear_cmd;
33538 u32 maccess;
33539
33540- atomic_t vbl_received; /**< Number of vblanks received. */
33541+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33542 wait_queue_head_t fence_queue;
33543- atomic_t last_fence_retired;
33544+ atomic_unchecked_t last_fence_retired;
33545 u32 next_fence_to_post;
33546
33547 unsigned int fb_cpp;
33548diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33549index daa6041..a28a5da 100644
33550--- a/drivers/gpu/drm/mga/mga_irq.c
33551+++ b/drivers/gpu/drm/mga/mga_irq.c
33552@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33553 if (crtc != 0)
33554 return 0;
33555
33556- return atomic_read(&dev_priv->vbl_received);
33557+ return atomic_read_unchecked(&dev_priv->vbl_received);
33558 }
33559
33560
33561@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33562 /* VBLANK interrupt */
33563 if (status & MGA_VLINEPEN) {
33564 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33565- atomic_inc(&dev_priv->vbl_received);
33566+ atomic_inc_unchecked(&dev_priv->vbl_received);
33567 drm_handle_vblank(dev, 0);
33568 handled = 1;
33569 }
33570@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33571 MGA_WRITE(MGA_PRIMEND, prim_end);
33572 }
33573
33574- atomic_inc(&dev_priv->last_fence_retired);
33575+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
33576 DRM_WAKEUP(&dev_priv->fence_queue);
33577 handled = 1;
33578 }
33579@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33580 * using fences.
33581 */
33582 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33583- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33584+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33585 - *sequence) <= (1 << 23)));
33586
33587 *sequence = cur_fence;
33588diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33589index 4c39a40..b22a9ea 100644
33590--- a/drivers/gpu/drm/r128/r128_cce.c
33591+++ b/drivers/gpu/drm/r128/r128_cce.c
33592@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33593
33594 /* GH: Simple idle check.
33595 */
33596- atomic_set(&dev_priv->idle_count, 0);
33597+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33598
33599 /* We don't support anything other than bus-mastering ring mode,
33600 * but the ring can be in either AGP or PCI space for the ring
33601diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33602index 3c60829..4faf484 100644
33603--- a/drivers/gpu/drm/r128/r128_drv.h
33604+++ b/drivers/gpu/drm/r128/r128_drv.h
33605@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33606 int is_pci;
33607 unsigned long cce_buffers_offset;
33608
33609- atomic_t idle_count;
33610+ atomic_unchecked_t idle_count;
33611
33612 int page_flipping;
33613 int current_page;
33614 u32 crtc_offset;
33615 u32 crtc_offset_cntl;
33616
33617- atomic_t vbl_received;
33618+ atomic_unchecked_t vbl_received;
33619
33620 u32 color_fmt;
33621 unsigned int front_offset;
33622diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33623index 69810fb..97bf17a 100644
33624--- a/drivers/gpu/drm/r128/r128_irq.c
33625+++ b/drivers/gpu/drm/r128/r128_irq.c
33626@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33627 if (crtc != 0)
33628 return 0;
33629
33630- return atomic_read(&dev_priv->vbl_received);
33631+ return atomic_read_unchecked(&dev_priv->vbl_received);
33632 }
33633
33634 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33635@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33636 /* VBLANK interrupt */
33637 if (status & R128_CRTC_VBLANK_INT) {
33638 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33639- atomic_inc(&dev_priv->vbl_received);
33640+ atomic_inc_unchecked(&dev_priv->vbl_received);
33641 drm_handle_vblank(dev, 0);
33642 return IRQ_HANDLED;
33643 }
33644diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33645index af2665c..51922d2 100644
33646--- a/drivers/gpu/drm/r128/r128_state.c
33647+++ b/drivers/gpu/drm/r128/r128_state.c
33648@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33649
33650 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33651 {
33652- if (atomic_read(&dev_priv->idle_count) == 0) {
33653+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33654 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33655 } else {
33656- atomic_set(&dev_priv->idle_count, 0);
33657+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33658 }
33659 }
33660
33661diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33662index dd72b91..8644b3c 100644
33663--- a/drivers/gpu/drm/radeon/atom.c
33664+++ b/drivers/gpu/drm/radeon/atom.c
33665@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33666 char name[512];
33667 int i;
33668
33669+ pax_track_stack();
33670+
33671 ctx->card = card;
33672 ctx->bios = bios;
33673
33674diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33675index 0d79577..efaa7a5 100644
33676--- a/drivers/gpu/drm/radeon/mkregtable.c
33677+++ b/drivers/gpu/drm/radeon/mkregtable.c
33678@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33679 regex_t mask_rex;
33680 regmatch_t match[4];
33681 char buf[1024];
33682- size_t end;
33683+ long end;
33684 int len;
33685 int done = 0;
33686 int r;
33687 unsigned o;
33688 struct offset *offset;
33689 char last_reg_s[10];
33690- int last_reg;
33691+ unsigned long last_reg;
33692
33693 if (regcomp
33694 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33695diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33696index 6735213..38c2c67 100644
33697--- a/drivers/gpu/drm/radeon/radeon.h
33698+++ b/drivers/gpu/drm/radeon/radeon.h
33699@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33700 */
33701 struct radeon_fence_driver {
33702 uint32_t scratch_reg;
33703- atomic_t seq;
33704+ atomic_unchecked_t seq;
33705 uint32_t last_seq;
33706 unsigned long count_timeout;
33707 wait_queue_head_t queue;
33708@@ -640,7 +640,7 @@ struct radeon_asic {
33709 uint32_t offset, uint32_t obj_size);
33710 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33711 void (*bandwidth_update)(struct radeon_device *rdev);
33712-};
33713+} __no_const;
33714
33715 /*
33716 * Asic structures
33717diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33718index 4e928b9..d8b6008 100644
33719--- a/drivers/gpu/drm/radeon/radeon_atombios.c
33720+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33721@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33722 bool linkb;
33723 struct radeon_i2c_bus_rec ddc_bus;
33724
33725+ pax_track_stack();
33726+
33727 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33728
33729 if (data_offset == 0)
33730@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33731 }
33732 }
33733
33734-struct bios_connector {
33735+static struct bios_connector {
33736 bool valid;
33737 uint16_t line_mux;
33738 uint16_t devices;
33739 int connector_type;
33740 struct radeon_i2c_bus_rec ddc_bus;
33741-};
33742+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33743
33744 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33745 drm_device
33746@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33747 uint8_t dac;
33748 union atom_supported_devices *supported_devices;
33749 int i, j;
33750- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33751
33752 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33753
33754diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33755index 083a181..ccccae0 100644
33756--- a/drivers/gpu/drm/radeon/radeon_display.c
33757+++ b/drivers/gpu/drm/radeon/radeon_display.c
33758@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33759
33760 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33761 error = freq - current_freq;
33762- error = error < 0 ? 0xffffffff : error;
33763+ error = (int32_t)error < 0 ? 0xffffffff : error;
33764 } else
33765 error = abs(current_freq - freq);
33766 vco_diff = abs(vco - best_vco);
33767diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33768index 76e4070..193fa7f 100644
33769--- a/drivers/gpu/drm/radeon/radeon_drv.h
33770+++ b/drivers/gpu/drm/radeon/radeon_drv.h
33771@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33772
33773 /* SW interrupt */
33774 wait_queue_head_t swi_queue;
33775- atomic_t swi_emitted;
33776+ atomic_unchecked_t swi_emitted;
33777 int vblank_crtc;
33778 uint32_t irq_enable_reg;
33779 uint32_t r500_disp_irq_reg;
33780diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33781index 3beb26d..6ce9c4a 100644
33782--- a/drivers/gpu/drm/radeon/radeon_fence.c
33783+++ b/drivers/gpu/drm/radeon/radeon_fence.c
33784@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33785 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33786 return 0;
33787 }
33788- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33789+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33790 if (!rdev->cp.ready) {
33791 /* FIXME: cp is not running assume everythings is done right
33792 * away
33793@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33794 return r;
33795 }
33796 WREG32(rdev->fence_drv.scratch_reg, 0);
33797- atomic_set(&rdev->fence_drv.seq, 0);
33798+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33799 INIT_LIST_HEAD(&rdev->fence_drv.created);
33800 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33801 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33802diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33803index a1bf11d..4a123c0 100644
33804--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33805+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33806@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33807 request = compat_alloc_user_space(sizeof(*request));
33808 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33809 || __put_user(req32.param, &request->param)
33810- || __put_user((void __user *)(unsigned long)req32.value,
33811+ || __put_user((unsigned long)req32.value,
33812 &request->value))
33813 return -EFAULT;
33814
33815diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33816index b79ecc4..8dab92d 100644
33817--- a/drivers/gpu/drm/radeon/radeon_irq.c
33818+++ b/drivers/gpu/drm/radeon/radeon_irq.c
33819@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33820 unsigned int ret;
33821 RING_LOCALS;
33822
33823- atomic_inc(&dev_priv->swi_emitted);
33824- ret = atomic_read(&dev_priv->swi_emitted);
33825+ atomic_inc_unchecked(&dev_priv->swi_emitted);
33826+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33827
33828 BEGIN_RING(4);
33829 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33830@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33831 drm_radeon_private_t *dev_priv =
33832 (drm_radeon_private_t *) dev->dev_private;
33833
33834- atomic_set(&dev_priv->swi_emitted, 0);
33835+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33836 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33837
33838 dev->max_vblank_count = 0x001fffff;
33839diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33840index 4747910..48ca4b3 100644
33841--- a/drivers/gpu/drm/radeon/radeon_state.c
33842+++ b/drivers/gpu/drm/radeon/radeon_state.c
33843@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33844 {
33845 drm_radeon_private_t *dev_priv = dev->dev_private;
33846 drm_radeon_getparam_t *param = data;
33847- int value;
33848+ int value = 0;
33849
33850 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33851
33852diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33853index 1381e06..0e53b17 100644
33854--- a/drivers/gpu/drm/radeon/radeon_ttm.c
33855+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33856@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33857 DRM_INFO("radeon: ttm finalized\n");
33858 }
33859
33860-static struct vm_operations_struct radeon_ttm_vm_ops;
33861-static const struct vm_operations_struct *ttm_vm_ops = NULL;
33862-
33863-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33864-{
33865- struct ttm_buffer_object *bo;
33866- int r;
33867-
33868- bo = (struct ttm_buffer_object *)vma->vm_private_data;
33869- if (bo == NULL) {
33870- return VM_FAULT_NOPAGE;
33871- }
33872- r = ttm_vm_ops->fault(vma, vmf);
33873- return r;
33874-}
33875-
33876 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33877 {
33878 struct drm_file *file_priv;
33879 struct radeon_device *rdev;
33880- int r;
33881
33882 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33883 return drm_mmap(filp, vma);
33884@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33885
33886 file_priv = (struct drm_file *)filp->private_data;
33887 rdev = file_priv->minor->dev->dev_private;
33888- if (rdev == NULL) {
33889+ if (!rdev)
33890 return -EINVAL;
33891- }
33892- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33893- if (unlikely(r != 0)) {
33894- return r;
33895- }
33896- if (unlikely(ttm_vm_ops == NULL)) {
33897- ttm_vm_ops = vma->vm_ops;
33898- radeon_ttm_vm_ops = *ttm_vm_ops;
33899- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33900- }
33901- vma->vm_ops = &radeon_ttm_vm_ops;
33902- return 0;
33903+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33904 }
33905
33906
33907diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33908index b12ff76..0bd0c6e 100644
33909--- a/drivers/gpu/drm/radeon/rs690.c
33910+++ b/drivers/gpu/drm/radeon/rs690.c
33911@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33912 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33913 rdev->pm.sideport_bandwidth.full)
33914 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33915- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33916+ read_delay_latency.full = rfixed_const(800 * 1000);
33917 read_delay_latency.full = rfixed_div(read_delay_latency,
33918 rdev->pm.igp_sideport_mclk);
33919+ a.full = rfixed_const(370);
33920+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33921 } else {
33922 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33923 rdev->pm.k8_bandwidth.full)
33924diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33925index 0ed436e..e6e7ce3 100644
33926--- a/drivers/gpu/drm/ttm/ttm_bo.c
33927+++ b/drivers/gpu/drm/ttm/ttm_bo.c
33928@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33929 NULL
33930 };
33931
33932-static struct sysfs_ops ttm_bo_global_ops = {
33933+static const struct sysfs_ops ttm_bo_global_ops = {
33934 .show = &ttm_bo_global_show
33935 };
33936
33937diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33938index 1c040d0..f9e4af8 100644
33939--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33940+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33941@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33942 {
33943 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33944 vma->vm_private_data;
33945- struct ttm_bo_device *bdev = bo->bdev;
33946+ struct ttm_bo_device *bdev;
33947 unsigned long bus_base;
33948 unsigned long bus_offset;
33949 unsigned long bus_size;
33950@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33951 unsigned long address = (unsigned long)vmf->virtual_address;
33952 int retval = VM_FAULT_NOPAGE;
33953
33954+ if (!bo)
33955+ return VM_FAULT_NOPAGE;
33956+ bdev = bo->bdev;
33957+
33958 /*
33959 * Work around locking order reversal in fault / nopfn
33960 * between mmap_sem and bo_reserve: Perform a trylock operation
33961diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33962index b170071..28ae90e 100644
33963--- a/drivers/gpu/drm/ttm/ttm_global.c
33964+++ b/drivers/gpu/drm/ttm/ttm_global.c
33965@@ -36,7 +36,7 @@
33966 struct ttm_global_item {
33967 struct mutex mutex;
33968 void *object;
33969- int refcount;
33970+ atomic_t refcount;
33971 };
33972
33973 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33974@@ -49,7 +49,7 @@ void ttm_global_init(void)
33975 struct ttm_global_item *item = &glob[i];
33976 mutex_init(&item->mutex);
33977 item->object = NULL;
33978- item->refcount = 0;
33979+ atomic_set(&item->refcount, 0);
33980 }
33981 }
33982
33983@@ -59,7 +59,7 @@ void ttm_global_release(void)
33984 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33985 struct ttm_global_item *item = &glob[i];
33986 BUG_ON(item->object != NULL);
33987- BUG_ON(item->refcount != 0);
33988+ BUG_ON(atomic_read(&item->refcount) != 0);
33989 }
33990 }
33991
33992@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33993 void *object;
33994
33995 mutex_lock(&item->mutex);
33996- if (item->refcount == 0) {
33997+ if (atomic_read(&item->refcount) == 0) {
33998 item->object = kzalloc(ref->size, GFP_KERNEL);
33999 if (unlikely(item->object == NULL)) {
34000 ret = -ENOMEM;
34001@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
34002 goto out_err;
34003
34004 }
34005- ++item->refcount;
34006+ atomic_inc(&item->refcount);
34007 ref->object = item->object;
34008 object = item->object;
34009 mutex_unlock(&item->mutex);
34010@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
34011 struct ttm_global_item *item = &glob[ref->global_type];
34012
34013 mutex_lock(&item->mutex);
34014- BUG_ON(item->refcount == 0);
34015+ BUG_ON(atomic_read(&item->refcount) == 0);
34016 BUG_ON(ref->object != item->object);
34017- if (--item->refcount == 0) {
34018+ if (atomic_dec_and_test(&item->refcount)) {
34019 ref->release(ref);
34020 item->object = NULL;
34021 }
34022diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
34023index 072c281..d8ef483 100644
34024--- a/drivers/gpu/drm/ttm/ttm_memory.c
34025+++ b/drivers/gpu/drm/ttm/ttm_memory.c
34026@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
34027 NULL
34028 };
34029
34030-static struct sysfs_ops ttm_mem_zone_ops = {
34031+static const struct sysfs_ops ttm_mem_zone_ops = {
34032 .show = &ttm_mem_zone_show,
34033 .store = &ttm_mem_zone_store
34034 };
34035diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
34036index cafcb84..b8e66cc 100644
34037--- a/drivers/gpu/drm/via/via_drv.h
34038+++ b/drivers/gpu/drm/via/via_drv.h
34039@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
34040 typedef uint32_t maskarray_t[5];
34041
34042 typedef struct drm_via_irq {
34043- atomic_t irq_received;
34044+ atomic_unchecked_t irq_received;
34045 uint32_t pending_mask;
34046 uint32_t enable_mask;
34047 wait_queue_head_t irq_queue;
34048@@ -75,7 +75,7 @@ typedef struct drm_via_private {
34049 struct timeval last_vblank;
34050 int last_vblank_valid;
34051 unsigned usec_per_vblank;
34052- atomic_t vbl_received;
34053+ atomic_unchecked_t vbl_received;
34054 drm_via_state_t hc_state;
34055 char pci_buf[VIA_PCI_BUF_SIZE];
34056 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
34057diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
34058index 5935b88..127a8a6 100644
34059--- a/drivers/gpu/drm/via/via_irq.c
34060+++ b/drivers/gpu/drm/via/via_irq.c
34061@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
34062 if (crtc != 0)
34063 return 0;
34064
34065- return atomic_read(&dev_priv->vbl_received);
34066+ return atomic_read_unchecked(&dev_priv->vbl_received);
34067 }
34068
34069 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34070@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34071
34072 status = VIA_READ(VIA_REG_INTERRUPT);
34073 if (status & VIA_IRQ_VBLANK_PENDING) {
34074- atomic_inc(&dev_priv->vbl_received);
34075- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
34076+ atomic_inc_unchecked(&dev_priv->vbl_received);
34077+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
34078 do_gettimeofday(&cur_vblank);
34079 if (dev_priv->last_vblank_valid) {
34080 dev_priv->usec_per_vblank =
34081@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34082 dev_priv->last_vblank = cur_vblank;
34083 dev_priv->last_vblank_valid = 1;
34084 }
34085- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
34086+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
34087 DRM_DEBUG("US per vblank is: %u\n",
34088 dev_priv->usec_per_vblank);
34089 }
34090@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
34091
34092 for (i = 0; i < dev_priv->num_irqs; ++i) {
34093 if (status & cur_irq->pending_mask) {
34094- atomic_inc(&cur_irq->irq_received);
34095+ atomic_inc_unchecked(&cur_irq->irq_received);
34096 DRM_WAKEUP(&cur_irq->irq_queue);
34097 handled = 1;
34098 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
34099@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
34100 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34101 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
34102 masks[irq][4]));
34103- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
34104+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
34105 } else {
34106 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
34107 (((cur_irq_sequence =
34108- atomic_read(&cur_irq->irq_received)) -
34109+ atomic_read_unchecked(&cur_irq->irq_received)) -
34110 *sequence) <= (1 << 23)));
34111 }
34112 *sequence = cur_irq_sequence;
34113@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
34114 }
34115
34116 for (i = 0; i < dev_priv->num_irqs; ++i) {
34117- atomic_set(&cur_irq->irq_received, 0);
34118+ atomic_set_unchecked(&cur_irq->irq_received, 0);
34119 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
34120 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
34121 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
34122@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
34123 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
34124 case VIA_IRQ_RELATIVE:
34125 irqwait->request.sequence +=
34126- atomic_read(&cur_irq->irq_received);
34127+ atomic_read_unchecked(&cur_irq->irq_received);
34128 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
34129 case VIA_IRQ_ABSOLUTE:
34130 break;
34131diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
34132index aa8688d..6a0140c 100644
34133--- a/drivers/gpu/vga/vgaarb.c
34134+++ b/drivers/gpu/vga/vgaarb.c
34135@@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
34136 uc = &priv->cards[i];
34137 }
34138
34139- if (!uc)
34140- return -EINVAL;
34141+ if (!uc) {
34142+ ret_val = -EINVAL;
34143+ goto done;
34144+ }
34145
34146- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
34147- return -EINVAL;
34148+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
34149+ ret_val = -EINVAL;
34150+ goto done;
34151+ }
34152
34153- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
34154- return -EINVAL;
34155+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
34156+ ret_val = -EINVAL;
34157+ goto done;
34158+ }
34159
34160 vga_put(pdev, io_state);
34161
34162diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
34163index 11f8069..4783396 100644
34164--- a/drivers/hid/hid-core.c
34165+++ b/drivers/hid/hid-core.c
34166@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
34167
34168 int hid_add_device(struct hid_device *hdev)
34169 {
34170- static atomic_t id = ATOMIC_INIT(0);
34171+ static atomic_unchecked_t id = ATOMIC_INIT(0);
34172 int ret;
34173
34174 if (WARN_ON(hdev->status & HID_STAT_ADDED))
34175@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
34176 /* XXX hack, any other cleaner solution after the driver core
34177 * is converted to allow more than 20 bytes as the device name? */
34178 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
34179- hdev->vendor, hdev->product, atomic_inc_return(&id));
34180+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
34181
34182 ret = device_add(&hdev->dev);
34183 if (!ret)
34184diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
34185index 8b6ee24..70f657d 100644
34186--- a/drivers/hid/usbhid/hiddev.c
34187+++ b/drivers/hid/usbhid/hiddev.c
34188@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
34189 return put_user(HID_VERSION, (int __user *)arg);
34190
34191 case HIDIOCAPPLICATION:
34192- if (arg < 0 || arg >= hid->maxapplication)
34193+ if (arg >= hid->maxapplication)
34194 return -EINVAL;
34195
34196 for (i = 0; i < hid->maxcollection; i++)
34197diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
34198index 5d5ed69..f40533e 100644
34199--- a/drivers/hwmon/lis3lv02d.c
34200+++ b/drivers/hwmon/lis3lv02d.c
34201@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
34202 * the lid is closed. This leads to interrupts as soon as a little move
34203 * is done.
34204 */
34205- atomic_inc(&lis3_dev.count);
34206+ atomic_inc_unchecked(&lis3_dev.count);
34207
34208 wake_up_interruptible(&lis3_dev.misc_wait);
34209 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
34210@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34211 if (test_and_set_bit(0, &lis3_dev.misc_opened))
34212 return -EBUSY; /* already open */
34213
34214- atomic_set(&lis3_dev.count, 0);
34215+ atomic_set_unchecked(&lis3_dev.count, 0);
34216
34217 /*
34218 * The sensor can generate interrupts for free-fall and direction
34219@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34220 add_wait_queue(&lis3_dev.misc_wait, &wait);
34221 while (true) {
34222 set_current_state(TASK_INTERRUPTIBLE);
34223- data = atomic_xchg(&lis3_dev.count, 0);
34224+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
34225 if (data)
34226 break;
34227
34228@@ -244,7 +244,7 @@ out:
34229 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34230 {
34231 poll_wait(file, &lis3_dev.misc_wait, wait);
34232- if (atomic_read(&lis3_dev.count))
34233+ if (atomic_read_unchecked(&lis3_dev.count))
34234 return POLLIN | POLLRDNORM;
34235 return 0;
34236 }
34237diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
34238index 7cdd76f..fe0efdf 100644
34239--- a/drivers/hwmon/lis3lv02d.h
34240+++ b/drivers/hwmon/lis3lv02d.h
34241@@ -201,7 +201,7 @@ struct lis3lv02d {
34242
34243 struct input_polled_dev *idev; /* input device */
34244 struct platform_device *pdev; /* platform device */
34245- atomic_t count; /* interrupt count after last read */
34246+ atomic_unchecked_t count; /* interrupt count after last read */
34247 int xcalib; /* calibrated null value for x */
34248 int ycalib; /* calibrated null value for y */
34249 int zcalib; /* calibrated null value for z */
34250diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
34251index 740785e..5a5c6c6 100644
34252--- a/drivers/hwmon/sht15.c
34253+++ b/drivers/hwmon/sht15.c
34254@@ -112,7 +112,7 @@ struct sht15_data {
34255 int supply_uV;
34256 int supply_uV_valid;
34257 struct work_struct update_supply_work;
34258- atomic_t interrupt_handled;
34259+ atomic_unchecked_t interrupt_handled;
34260 };
34261
34262 /**
34263@@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
34264 return ret;
34265
34266 gpio_direction_input(data->pdata->gpio_data);
34267- atomic_set(&data->interrupt_handled, 0);
34268+ atomic_set_unchecked(&data->interrupt_handled, 0);
34269
34270 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34271 if (gpio_get_value(data->pdata->gpio_data) == 0) {
34272 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
34273 /* Only relevant if the interrupt hasn't occured. */
34274- if (!atomic_read(&data->interrupt_handled))
34275+ if (!atomic_read_unchecked(&data->interrupt_handled))
34276 schedule_work(&data->read_work);
34277 }
34278 ret = wait_event_timeout(data->wait_queue,
34279@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
34280 struct sht15_data *data = d;
34281 /* First disable the interrupt */
34282 disable_irq_nosync(irq);
34283- atomic_inc(&data->interrupt_handled);
34284+ atomic_inc_unchecked(&data->interrupt_handled);
34285 /* Then schedule a reading work struct */
34286 if (data->flag != SHT15_READING_NOTHING)
34287 schedule_work(&data->read_work);
34288@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
34289 here as could have gone low in meantime so verify
34290 it hasn't!
34291 */
34292- atomic_set(&data->interrupt_handled, 0);
34293+ atomic_set_unchecked(&data->interrupt_handled, 0);
34294 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34295 /* If still not occured or another handler has been scheduled */
34296 if (gpio_get_value(data->pdata->gpio_data)
34297- || atomic_read(&data->interrupt_handled))
34298+ || atomic_read_unchecked(&data->interrupt_handled))
34299 return;
34300 }
34301 /* Read the data back from the device */
34302diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
34303index 97851c5..cb40626 100644
34304--- a/drivers/hwmon/w83791d.c
34305+++ b/drivers/hwmon/w83791d.c
34306@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
34307 struct i2c_board_info *info);
34308 static int w83791d_remove(struct i2c_client *client);
34309
34310-static int w83791d_read(struct i2c_client *client, u8 register);
34311-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
34312+static int w83791d_read(struct i2c_client *client, u8 reg);
34313+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
34314 static struct w83791d_data *w83791d_update_device(struct device *dev);
34315
34316 #ifdef DEBUG
34317diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
34318index 378fcb5..5e91fa8 100644
34319--- a/drivers/i2c/busses/i2c-amd756-s4882.c
34320+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
34321@@ -43,7 +43,7 @@
34322 extern struct i2c_adapter amd756_smbus;
34323
34324 static struct i2c_adapter *s4882_adapter;
34325-static struct i2c_algorithm *s4882_algo;
34326+static i2c_algorithm_no_const *s4882_algo;
34327
34328 /* Wrapper access functions for multiplexed SMBus */
34329 static DEFINE_MUTEX(amd756_lock);
34330diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
34331index 29015eb..af2d8e9 100644
34332--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
34333+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
34334@@ -41,7 +41,7 @@
34335 extern struct i2c_adapter *nforce2_smbus;
34336
34337 static struct i2c_adapter *s4985_adapter;
34338-static struct i2c_algorithm *s4985_algo;
34339+static i2c_algorithm_no_const *s4985_algo;
34340
34341 /* Wrapper access functions for multiplexed SMBus */
34342 static DEFINE_MUTEX(nforce2_lock);
34343diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
34344index 878f8ec..12376fc 100644
34345--- a/drivers/ide/aec62xx.c
34346+++ b/drivers/ide/aec62xx.c
34347@@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
34348 .cable_detect = atp86x_cable_detect,
34349 };
34350
34351-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
34352+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
34353 { /* 0: AEC6210 */
34354 .name = DRV_NAME,
34355 .init_chipset = init_chipset_aec62xx,
34356diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
34357index e59b6de..4b4fc65 100644
34358--- a/drivers/ide/alim15x3.c
34359+++ b/drivers/ide/alim15x3.c
34360@@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
34361 .dma_sff_read_status = ide_dma_sff_read_status,
34362 };
34363
34364-static const struct ide_port_info ali15x3_chipset __devinitdata = {
34365+static const struct ide_port_info ali15x3_chipset __devinitconst = {
34366 .name = DRV_NAME,
34367 .init_chipset = init_chipset_ali15x3,
34368 .init_hwif = init_hwif_ali15x3,
34369diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
34370index 628cd2e..087a414 100644
34371--- a/drivers/ide/amd74xx.c
34372+++ b/drivers/ide/amd74xx.c
34373@@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
34374 .udma_mask = udma, \
34375 }
34376
34377-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
34378+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
34379 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
34380 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
34381 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
34382diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
34383index 837322b..837fd71 100644
34384--- a/drivers/ide/atiixp.c
34385+++ b/drivers/ide/atiixp.c
34386@@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34387 .cable_detect = atiixp_cable_detect,
34388 };
34389
34390-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34391+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34392 { /* 0: IXP200/300/400/700 */
34393 .name = DRV_NAME,
34394 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34395diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34396index ca0c46f..d55318a 100644
34397--- a/drivers/ide/cmd64x.c
34398+++ b/drivers/ide/cmd64x.c
34399@@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34400 .dma_sff_read_status = ide_dma_sff_read_status,
34401 };
34402
34403-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34404+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34405 { /* 0: CMD643 */
34406 .name = DRV_NAME,
34407 .init_chipset = init_chipset_cmd64x,
34408diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34409index 09f98ed..cebc5bc 100644
34410--- a/drivers/ide/cs5520.c
34411+++ b/drivers/ide/cs5520.c
34412@@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34413 .set_dma_mode = cs5520_set_dma_mode,
34414 };
34415
34416-static const struct ide_port_info cyrix_chipset __devinitdata = {
34417+static const struct ide_port_info cyrix_chipset __devinitconst = {
34418 .name = DRV_NAME,
34419 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34420 .port_ops = &cs5520_port_ops,
34421diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34422index 40bf05e..7d58ca0 100644
34423--- a/drivers/ide/cs5530.c
34424+++ b/drivers/ide/cs5530.c
34425@@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34426 .udma_filter = cs5530_udma_filter,
34427 };
34428
34429-static const struct ide_port_info cs5530_chipset __devinitdata = {
34430+static const struct ide_port_info cs5530_chipset __devinitconst = {
34431 .name = DRV_NAME,
34432 .init_chipset = init_chipset_cs5530,
34433 .init_hwif = init_hwif_cs5530,
34434diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34435index 983d957..53e6172 100644
34436--- a/drivers/ide/cs5535.c
34437+++ b/drivers/ide/cs5535.c
34438@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34439 .cable_detect = cs5535_cable_detect,
34440 };
34441
34442-static const struct ide_port_info cs5535_chipset __devinitdata = {
34443+static const struct ide_port_info cs5535_chipset __devinitconst = {
34444 .name = DRV_NAME,
34445 .port_ops = &cs5535_port_ops,
34446 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34447diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34448index 74fc540..8e933d8 100644
34449--- a/drivers/ide/cy82c693.c
34450+++ b/drivers/ide/cy82c693.c
34451@@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34452 .set_dma_mode = cy82c693_set_dma_mode,
34453 };
34454
34455-static const struct ide_port_info cy82c693_chipset __devinitdata = {
34456+static const struct ide_port_info cy82c693_chipset __devinitconst = {
34457 .name = DRV_NAME,
34458 .init_iops = init_iops_cy82c693,
34459 .port_ops = &cy82c693_port_ops,
34460diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34461index 7ce68ef..e78197d 100644
34462--- a/drivers/ide/hpt366.c
34463+++ b/drivers/ide/hpt366.c
34464@@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34465 }
34466 };
34467
34468-static const struct hpt_info hpt36x __devinitdata = {
34469+static const struct hpt_info hpt36x __devinitconst = {
34470 .chip_name = "HPT36x",
34471 .chip_type = HPT36x,
34472 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34473@@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34474 .timings = &hpt36x_timings
34475 };
34476
34477-static const struct hpt_info hpt370 __devinitdata = {
34478+static const struct hpt_info hpt370 __devinitconst = {
34479 .chip_name = "HPT370",
34480 .chip_type = HPT370,
34481 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34482@@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
34483 .timings = &hpt37x_timings
34484 };
34485
34486-static const struct hpt_info hpt370a __devinitdata = {
34487+static const struct hpt_info hpt370a __devinitconst = {
34488 .chip_name = "HPT370A",
34489 .chip_type = HPT370A,
34490 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34491@@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34492 .timings = &hpt37x_timings
34493 };
34494
34495-static const struct hpt_info hpt374 __devinitdata = {
34496+static const struct hpt_info hpt374 __devinitconst = {
34497 .chip_name = "HPT374",
34498 .chip_type = HPT374,
34499 .udma_mask = ATA_UDMA5,
34500@@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34501 .timings = &hpt37x_timings
34502 };
34503
34504-static const struct hpt_info hpt372 __devinitdata = {
34505+static const struct hpt_info hpt372 __devinitconst = {
34506 .chip_name = "HPT372",
34507 .chip_type = HPT372,
34508 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34509@@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34510 .timings = &hpt37x_timings
34511 };
34512
34513-static const struct hpt_info hpt372a __devinitdata = {
34514+static const struct hpt_info hpt372a __devinitconst = {
34515 .chip_name = "HPT372A",
34516 .chip_type = HPT372A,
34517 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34518@@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34519 .timings = &hpt37x_timings
34520 };
34521
34522-static const struct hpt_info hpt302 __devinitdata = {
34523+static const struct hpt_info hpt302 __devinitconst = {
34524 .chip_name = "HPT302",
34525 .chip_type = HPT302,
34526 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34527@@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34528 .timings = &hpt37x_timings
34529 };
34530
34531-static const struct hpt_info hpt371 __devinitdata = {
34532+static const struct hpt_info hpt371 __devinitconst = {
34533 .chip_name = "HPT371",
34534 .chip_type = HPT371,
34535 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34536@@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34537 .timings = &hpt37x_timings
34538 };
34539
34540-static const struct hpt_info hpt372n __devinitdata = {
34541+static const struct hpt_info hpt372n __devinitconst = {
34542 .chip_name = "HPT372N",
34543 .chip_type = HPT372N,
34544 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34545@@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34546 .timings = &hpt37x_timings
34547 };
34548
34549-static const struct hpt_info hpt302n __devinitdata = {
34550+static const struct hpt_info hpt302n __devinitconst = {
34551 .chip_name = "HPT302N",
34552 .chip_type = HPT302N,
34553 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34554@@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34555 .timings = &hpt37x_timings
34556 };
34557
34558-static const struct hpt_info hpt371n __devinitdata = {
34559+static const struct hpt_info hpt371n __devinitconst = {
34560 .chip_name = "HPT371N",
34561 .chip_type = HPT371N,
34562 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34563@@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34564 .dma_sff_read_status = ide_dma_sff_read_status,
34565 };
34566
34567-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34568+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34569 { /* 0: HPT36x */
34570 .name = DRV_NAME,
34571 .init_chipset = init_chipset_hpt366,
34572diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34573index 2de76cc..74186a1 100644
34574--- a/drivers/ide/ide-cd.c
34575+++ b/drivers/ide/ide-cd.c
34576@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34577 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34578 if ((unsigned long)buf & alignment
34579 || blk_rq_bytes(rq) & q->dma_pad_mask
34580- || object_is_on_stack(buf))
34581+ || object_starts_on_stack(buf))
34582 drive->dma = 0;
34583 }
34584 }
34585diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34586index fefbdfc..62ff465 100644
34587--- a/drivers/ide/ide-floppy.c
34588+++ b/drivers/ide/ide-floppy.c
34589@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34590 u8 pc_buf[256], header_len, desc_cnt;
34591 int i, rc = 1, blocks, length;
34592
34593+ pax_track_stack();
34594+
34595 ide_debug_log(IDE_DBG_FUNC, "enter");
34596
34597 drive->bios_cyl = 0;
34598diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34599index 39d4e01..11538ce 100644
34600--- a/drivers/ide/ide-pci-generic.c
34601+++ b/drivers/ide/ide-pci-generic.c
34602@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34603 .udma_mask = ATA_UDMA6, \
34604 }
34605
34606-static const struct ide_port_info generic_chipsets[] __devinitdata = {
34607+static const struct ide_port_info generic_chipsets[] __devinitconst = {
34608 /* 0: Unknown */
34609 DECLARE_GENERIC_PCI_DEV(0),
34610
34611diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34612index 0d266a5..aaca790 100644
34613--- a/drivers/ide/it8172.c
34614+++ b/drivers/ide/it8172.c
34615@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34616 .set_dma_mode = it8172_set_dma_mode,
34617 };
34618
34619-static const struct ide_port_info it8172_port_info __devinitdata = {
34620+static const struct ide_port_info it8172_port_info __devinitconst = {
34621 .name = DRV_NAME,
34622 .port_ops = &it8172_port_ops,
34623 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34624diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34625index 4797616..4be488a 100644
34626--- a/drivers/ide/it8213.c
34627+++ b/drivers/ide/it8213.c
34628@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34629 .cable_detect = it8213_cable_detect,
34630 };
34631
34632-static const struct ide_port_info it8213_chipset __devinitdata = {
34633+static const struct ide_port_info it8213_chipset __devinitconst = {
34634 .name = DRV_NAME,
34635 .enablebits = { {0x41, 0x80, 0x80} },
34636 .port_ops = &it8213_port_ops,
34637diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34638index 51aa745..146ee60 100644
34639--- a/drivers/ide/it821x.c
34640+++ b/drivers/ide/it821x.c
34641@@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34642 .cable_detect = it821x_cable_detect,
34643 };
34644
34645-static const struct ide_port_info it821x_chipset __devinitdata = {
34646+static const struct ide_port_info it821x_chipset __devinitconst = {
34647 .name = DRV_NAME,
34648 .init_chipset = init_chipset_it821x,
34649 .init_hwif = init_hwif_it821x,
34650diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34651index bf2be64..9270098 100644
34652--- a/drivers/ide/jmicron.c
34653+++ b/drivers/ide/jmicron.c
34654@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34655 .cable_detect = jmicron_cable_detect,
34656 };
34657
34658-static const struct ide_port_info jmicron_chipset __devinitdata = {
34659+static const struct ide_port_info jmicron_chipset __devinitconst = {
34660 .name = DRV_NAME,
34661 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34662 .port_ops = &jmicron_port_ops,
34663diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34664index 95327a2..73f78d8 100644
34665--- a/drivers/ide/ns87415.c
34666+++ b/drivers/ide/ns87415.c
34667@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34668 .dma_sff_read_status = superio_dma_sff_read_status,
34669 };
34670
34671-static const struct ide_port_info ns87415_chipset __devinitdata = {
34672+static const struct ide_port_info ns87415_chipset __devinitconst = {
34673 .name = DRV_NAME,
34674 .init_hwif = init_hwif_ns87415,
34675 .tp_ops = &ns87415_tp_ops,
34676diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34677index f1d70d6..e1de05b 100644
34678--- a/drivers/ide/opti621.c
34679+++ b/drivers/ide/opti621.c
34680@@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34681 .set_pio_mode = opti621_set_pio_mode,
34682 };
34683
34684-static const struct ide_port_info opti621_chipset __devinitdata = {
34685+static const struct ide_port_info opti621_chipset __devinitconst = {
34686 .name = DRV_NAME,
34687 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34688 .port_ops = &opti621_port_ops,
34689diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34690index 65ba823..7311f4d 100644
34691--- a/drivers/ide/pdc202xx_new.c
34692+++ b/drivers/ide/pdc202xx_new.c
34693@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34694 .udma_mask = udma, \
34695 }
34696
34697-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34698+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34699 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34700 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34701 };
34702diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34703index cb812f3..af816ef 100644
34704--- a/drivers/ide/pdc202xx_old.c
34705+++ b/drivers/ide/pdc202xx_old.c
34706@@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34707 .max_sectors = sectors, \
34708 }
34709
34710-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34711+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34712 { /* 0: PDC20246 */
34713 .name = DRV_NAME,
34714 .init_chipset = init_chipset_pdc202xx,
34715diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34716index bf14f39..15c4b98 100644
34717--- a/drivers/ide/piix.c
34718+++ b/drivers/ide/piix.c
34719@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34720 .udma_mask = udma, \
34721 }
34722
34723-static const struct ide_port_info piix_pci_info[] __devinitdata = {
34724+static const struct ide_port_info piix_pci_info[] __devinitconst = {
34725 /* 0: MPIIX */
34726 { /*
34727 * MPIIX actually has only a single IDE channel mapped to
34728diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34729index a6414a8..c04173e 100644
34730--- a/drivers/ide/rz1000.c
34731+++ b/drivers/ide/rz1000.c
34732@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34733 }
34734 }
34735
34736-static const struct ide_port_info rz1000_chipset __devinitdata = {
34737+static const struct ide_port_info rz1000_chipset __devinitconst = {
34738 .name = DRV_NAME,
34739 .host_flags = IDE_HFLAG_NO_DMA,
34740 };
34741diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34742index d467478..9203942 100644
34743--- a/drivers/ide/sc1200.c
34744+++ b/drivers/ide/sc1200.c
34745@@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34746 .dma_sff_read_status = ide_dma_sff_read_status,
34747 };
34748
34749-static const struct ide_port_info sc1200_chipset __devinitdata = {
34750+static const struct ide_port_info sc1200_chipset __devinitconst = {
34751 .name = DRV_NAME,
34752 .port_ops = &sc1200_port_ops,
34753 .dma_ops = &sc1200_dma_ops,
34754diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34755index 1104bb3..59c5194 100644
34756--- a/drivers/ide/scc_pata.c
34757+++ b/drivers/ide/scc_pata.c
34758@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34759 .dma_sff_read_status = scc_dma_sff_read_status,
34760 };
34761
34762-static const struct ide_port_info scc_chipset __devinitdata = {
34763+static const struct ide_port_info scc_chipset __devinitconst = {
34764 .name = "sccIDE",
34765 .init_iops = init_iops_scc,
34766 .init_dma = scc_init_dma,
34767diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34768index b6554ef..6cc2cc3 100644
34769--- a/drivers/ide/serverworks.c
34770+++ b/drivers/ide/serverworks.c
34771@@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34772 .cable_detect = svwks_cable_detect,
34773 };
34774
34775-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34776+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34777 { /* 0: OSB4 */
34778 .name = DRV_NAME,
34779 .init_chipset = init_chipset_svwks,
34780diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34781index ab3db61..afed580 100644
34782--- a/drivers/ide/setup-pci.c
34783+++ b/drivers/ide/setup-pci.c
34784@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34785 int ret, i, n_ports = dev2 ? 4 : 2;
34786 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34787
34788+ pax_track_stack();
34789+
34790 for (i = 0; i < n_ports / 2; i++) {
34791 ret = ide_setup_pci_controller(pdev[i], d, !i);
34792 if (ret < 0)
34793diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34794index d95df52..0b03a39 100644
34795--- a/drivers/ide/siimage.c
34796+++ b/drivers/ide/siimage.c
34797@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34798 .udma_mask = ATA_UDMA6, \
34799 }
34800
34801-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34802+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34803 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34804 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34805 };
34806diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34807index 3b88eba..ca8699d 100644
34808--- a/drivers/ide/sis5513.c
34809+++ b/drivers/ide/sis5513.c
34810@@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34811 .cable_detect = sis_cable_detect,
34812 };
34813
34814-static const struct ide_port_info sis5513_chipset __devinitdata = {
34815+static const struct ide_port_info sis5513_chipset __devinitconst = {
34816 .name = DRV_NAME,
34817 .init_chipset = init_chipset_sis5513,
34818 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34819diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34820index d698da4..fca42a4 100644
34821--- a/drivers/ide/sl82c105.c
34822+++ b/drivers/ide/sl82c105.c
34823@@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34824 .dma_sff_read_status = ide_dma_sff_read_status,
34825 };
34826
34827-static const struct ide_port_info sl82c105_chipset __devinitdata = {
34828+static const struct ide_port_info sl82c105_chipset __devinitconst = {
34829 .name = DRV_NAME,
34830 .init_chipset = init_chipset_sl82c105,
34831 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34832diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34833index 1ccfb40..83d5779 100644
34834--- a/drivers/ide/slc90e66.c
34835+++ b/drivers/ide/slc90e66.c
34836@@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34837 .cable_detect = slc90e66_cable_detect,
34838 };
34839
34840-static const struct ide_port_info slc90e66_chipset __devinitdata = {
34841+static const struct ide_port_info slc90e66_chipset __devinitconst = {
34842 .name = DRV_NAME,
34843 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34844 .port_ops = &slc90e66_port_ops,
34845diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34846index 05a93d6..5f9e325 100644
34847--- a/drivers/ide/tc86c001.c
34848+++ b/drivers/ide/tc86c001.c
34849@@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34850 .dma_sff_read_status = ide_dma_sff_read_status,
34851 };
34852
34853-static const struct ide_port_info tc86c001_chipset __devinitdata = {
34854+static const struct ide_port_info tc86c001_chipset __devinitconst = {
34855 .name = DRV_NAME,
34856 .init_hwif = init_hwif_tc86c001,
34857 .port_ops = &tc86c001_port_ops,
34858diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34859index 8773c3b..7907d6c 100644
34860--- a/drivers/ide/triflex.c
34861+++ b/drivers/ide/triflex.c
34862@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34863 .set_dma_mode = triflex_set_mode,
34864 };
34865
34866-static const struct ide_port_info triflex_device __devinitdata = {
34867+static const struct ide_port_info triflex_device __devinitconst = {
34868 .name = DRV_NAME,
34869 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34870 .port_ops = &triflex_port_ops,
34871diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34872index 4b42ca0..e494a98 100644
34873--- a/drivers/ide/trm290.c
34874+++ b/drivers/ide/trm290.c
34875@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34876 .dma_check = trm290_dma_check,
34877 };
34878
34879-static const struct ide_port_info trm290_chipset __devinitdata = {
34880+static const struct ide_port_info trm290_chipset __devinitconst = {
34881 .name = DRV_NAME,
34882 .init_hwif = init_hwif_trm290,
34883 .tp_ops = &trm290_tp_ops,
34884diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34885index 028de26..520d5d5 100644
34886--- a/drivers/ide/via82cxxx.c
34887+++ b/drivers/ide/via82cxxx.c
34888@@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34889 .cable_detect = via82cxxx_cable_detect,
34890 };
34891
34892-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34893+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34894 .name = DRV_NAME,
34895 .init_chipset = init_chipset_via82cxxx,
34896 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34897diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34898index 2cd00b5..14de699 100644
34899--- a/drivers/ieee1394/dv1394.c
34900+++ b/drivers/ieee1394/dv1394.c
34901@@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34902 based upon DIF section and sequence
34903 */
34904
34905-static void inline
34906+static inline void
34907 frame_put_packet (struct frame *f, struct packet *p)
34908 {
34909 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34910diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34911index e947d8f..6a966b9 100644
34912--- a/drivers/ieee1394/hosts.c
34913+++ b/drivers/ieee1394/hosts.c
34914@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34915 }
34916
34917 static struct hpsb_host_driver dummy_driver = {
34918+ .name = "dummy",
34919 .transmit_packet = dummy_transmit_packet,
34920 .devctl = dummy_devctl,
34921 .isoctl = dummy_isoctl
34922diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34923index ddaab6e..8d37435 100644
34924--- a/drivers/ieee1394/init_ohci1394_dma.c
34925+++ b/drivers/ieee1394/init_ohci1394_dma.c
34926@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34927 for (func = 0; func < 8; func++) {
34928 u32 class = read_pci_config(num,slot,func,
34929 PCI_CLASS_REVISION);
34930- if ((class == 0xffffffff))
34931+ if (class == 0xffffffff)
34932 continue; /* No device at this func */
34933
34934 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34935diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34936index 65c1429..5d8c11f 100644
34937--- a/drivers/ieee1394/ohci1394.c
34938+++ b/drivers/ieee1394/ohci1394.c
34939@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34940 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34941
34942 /* Module Parameters */
34943-static int phys_dma = 1;
34944+static int phys_dma;
34945 module_param(phys_dma, int, 0444);
34946-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34947+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34948
34949 static void dma_trm_tasklet(unsigned long data);
34950 static void dma_trm_reset(struct dma_trm_ctx *d);
34951diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34952index f199896..78c9fc8 100644
34953--- a/drivers/ieee1394/sbp2.c
34954+++ b/drivers/ieee1394/sbp2.c
34955@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34956 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34957 MODULE_LICENSE("GPL");
34958
34959-static int sbp2_module_init(void)
34960+static int __init sbp2_module_init(void)
34961 {
34962 int ret;
34963
34964diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34965index a5dea6b..0cefe8f 100644
34966--- a/drivers/infiniband/core/cm.c
34967+++ b/drivers/infiniband/core/cm.c
34968@@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34969
34970 struct cm_counter_group {
34971 struct kobject obj;
34972- atomic_long_t counter[CM_ATTR_COUNT];
34973+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34974 };
34975
34976 struct cm_counter_attribute {
34977@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34978 struct ib_mad_send_buf *msg = NULL;
34979 int ret;
34980
34981- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34982+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34983 counter[CM_REQ_COUNTER]);
34984
34985 /* Quick state check to discard duplicate REQs. */
34986@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34987 if (!cm_id_priv)
34988 return;
34989
34990- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34991+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34992 counter[CM_REP_COUNTER]);
34993 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34994 if (ret)
34995@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34996 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34997 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34998 spin_unlock_irq(&cm_id_priv->lock);
34999- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35000+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35001 counter[CM_RTU_COUNTER]);
35002 goto out;
35003 }
35004@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
35005 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
35006 dreq_msg->local_comm_id);
35007 if (!cm_id_priv) {
35008- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35009+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35010 counter[CM_DREQ_COUNTER]);
35011 cm_issue_drep(work->port, work->mad_recv_wc);
35012 return -EINVAL;
35013@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
35014 case IB_CM_MRA_REP_RCVD:
35015 break;
35016 case IB_CM_TIMEWAIT:
35017- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35018+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35019 counter[CM_DREQ_COUNTER]);
35020 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35021 goto unlock;
35022@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
35023 cm_free_msg(msg);
35024 goto deref;
35025 case IB_CM_DREQ_RCVD:
35026- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35027+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35028 counter[CM_DREQ_COUNTER]);
35029 goto unlock;
35030 default:
35031@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
35032 ib_modify_mad(cm_id_priv->av.port->mad_agent,
35033 cm_id_priv->msg, timeout)) {
35034 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
35035- atomic_long_inc(&work->port->
35036+ atomic_long_inc_unchecked(&work->port->
35037 counter_group[CM_RECV_DUPLICATES].
35038 counter[CM_MRA_COUNTER]);
35039 goto out;
35040@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
35041 break;
35042 case IB_CM_MRA_REQ_RCVD:
35043 case IB_CM_MRA_REP_RCVD:
35044- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35045+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35046 counter[CM_MRA_COUNTER]);
35047 /* fall through */
35048 default:
35049@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
35050 case IB_CM_LAP_IDLE:
35051 break;
35052 case IB_CM_MRA_LAP_SENT:
35053- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35054+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35055 counter[CM_LAP_COUNTER]);
35056 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35057 goto unlock;
35058@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
35059 cm_free_msg(msg);
35060 goto deref;
35061 case IB_CM_LAP_RCVD:
35062- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35063+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35064 counter[CM_LAP_COUNTER]);
35065 goto unlock;
35066 default:
35067@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
35068 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
35069 if (cur_cm_id_priv) {
35070 spin_unlock_irq(&cm.lock);
35071- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35072+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35073 counter[CM_SIDR_REQ_COUNTER]);
35074 goto out; /* Duplicate message. */
35075 }
35076@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
35077 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
35078 msg->retries = 1;
35079
35080- atomic_long_add(1 + msg->retries,
35081+ atomic_long_add_unchecked(1 + msg->retries,
35082 &port->counter_group[CM_XMIT].counter[attr_index]);
35083 if (msg->retries)
35084- atomic_long_add(msg->retries,
35085+ atomic_long_add_unchecked(msg->retries,
35086 &port->counter_group[CM_XMIT_RETRIES].
35087 counter[attr_index]);
35088
35089@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
35090 }
35091
35092 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
35093- atomic_long_inc(&port->counter_group[CM_RECV].
35094+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
35095 counter[attr_id - CM_ATTR_ID_OFFSET]);
35096
35097 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
35098@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
35099 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
35100
35101 return sprintf(buf, "%ld\n",
35102- atomic_long_read(&group->counter[cm_attr->index]));
35103+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
35104 }
35105
35106-static struct sysfs_ops cm_counter_ops = {
35107+static const struct sysfs_ops cm_counter_ops = {
35108 .show = cm_show_counter
35109 };
35110
35111diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
35112index 8fd3a6f..61d8075 100644
35113--- a/drivers/infiniband/core/cma.c
35114+++ b/drivers/infiniband/core/cma.c
35115@@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
35116
35117 req.private_data_len = sizeof(struct cma_hdr) +
35118 conn_param->private_data_len;
35119+ if (req.private_data_len < conn_param->private_data_len)
35120+ return -EINVAL;
35121+
35122 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35123 if (!req.private_data)
35124 return -ENOMEM;
35125@@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
35126 memset(&req, 0, sizeof req);
35127 offset = cma_user_data_offset(id_priv->id.ps);
35128 req.private_data_len = offset + conn_param->private_data_len;
35129+ if (req.private_data_len < conn_param->private_data_len)
35130+ return -EINVAL;
35131+
35132 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
35133 if (!private_data)
35134 return -ENOMEM;
35135diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
35136index 4507043..14ad522 100644
35137--- a/drivers/infiniband/core/fmr_pool.c
35138+++ b/drivers/infiniband/core/fmr_pool.c
35139@@ -97,8 +97,8 @@ struct ib_fmr_pool {
35140
35141 struct task_struct *thread;
35142
35143- atomic_t req_ser;
35144- atomic_t flush_ser;
35145+ atomic_unchecked_t req_ser;
35146+ atomic_unchecked_t flush_ser;
35147
35148 wait_queue_head_t force_wait;
35149 };
35150@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35151 struct ib_fmr_pool *pool = pool_ptr;
35152
35153 do {
35154- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
35155+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
35156 ib_fmr_batch_release(pool);
35157
35158- atomic_inc(&pool->flush_ser);
35159+ atomic_inc_unchecked(&pool->flush_ser);
35160 wake_up_interruptible(&pool->force_wait);
35161
35162 if (pool->flush_function)
35163@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
35164 }
35165
35166 set_current_state(TASK_INTERRUPTIBLE);
35167- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
35168+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
35169 !kthread_should_stop())
35170 schedule();
35171 __set_current_state(TASK_RUNNING);
35172@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
35173 pool->dirty_watermark = params->dirty_watermark;
35174 pool->dirty_len = 0;
35175 spin_lock_init(&pool->pool_lock);
35176- atomic_set(&pool->req_ser, 0);
35177- atomic_set(&pool->flush_ser, 0);
35178+ atomic_set_unchecked(&pool->req_ser, 0);
35179+ atomic_set_unchecked(&pool->flush_ser, 0);
35180 init_waitqueue_head(&pool->force_wait);
35181
35182 pool->thread = kthread_run(ib_fmr_cleanup_thread,
35183@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
35184 }
35185 spin_unlock_irq(&pool->pool_lock);
35186
35187- serial = atomic_inc_return(&pool->req_ser);
35188+ serial = atomic_inc_return_unchecked(&pool->req_ser);
35189 wake_up_process(pool->thread);
35190
35191 if (wait_event_interruptible(pool->force_wait,
35192- atomic_read(&pool->flush_ser) - serial >= 0))
35193+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
35194 return -EINTR;
35195
35196 return 0;
35197@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
35198 } else {
35199 list_add_tail(&fmr->list, &pool->dirty_list);
35200 if (++pool->dirty_len >= pool->dirty_watermark) {
35201- atomic_inc(&pool->req_ser);
35202+ atomic_inc_unchecked(&pool->req_ser);
35203 wake_up_process(pool->thread);
35204 }
35205 }
35206diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
35207index 158a214..1558bb7 100644
35208--- a/drivers/infiniband/core/sysfs.c
35209+++ b/drivers/infiniband/core/sysfs.c
35210@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
35211 return port_attr->show(p, port_attr, buf);
35212 }
35213
35214-static struct sysfs_ops port_sysfs_ops = {
35215+static const struct sysfs_ops port_sysfs_ops = {
35216 .show = port_attr_show
35217 };
35218
35219diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
35220index 5440da0..1194ecb 100644
35221--- a/drivers/infiniband/core/uverbs_marshall.c
35222+++ b/drivers/infiniband/core/uverbs_marshall.c
35223@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
35224 dst->grh.sgid_index = src->grh.sgid_index;
35225 dst->grh.hop_limit = src->grh.hop_limit;
35226 dst->grh.traffic_class = src->grh.traffic_class;
35227+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
35228 dst->dlid = src->dlid;
35229 dst->sl = src->sl;
35230 dst->src_path_bits = src->src_path_bits;
35231 dst->static_rate = src->static_rate;
35232 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
35233 dst->port_num = src->port_num;
35234+ dst->reserved = 0;
35235 }
35236 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
35237
35238 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35239 struct ib_qp_attr *src)
35240 {
35241+ dst->qp_state = src->qp_state;
35242 dst->cur_qp_state = src->cur_qp_state;
35243 dst->path_mtu = src->path_mtu;
35244 dst->path_mig_state = src->path_mig_state;
35245@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
35246 dst->rnr_retry = src->rnr_retry;
35247 dst->alt_port_num = src->alt_port_num;
35248 dst->alt_timeout = src->alt_timeout;
35249+ memset(dst->reserved, 0, sizeof(dst->reserved));
35250 }
35251 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
35252
35253diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
35254index 100da85..62e6b88 100644
35255--- a/drivers/infiniband/hw/ipath/ipath_fs.c
35256+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
35257@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
35258 struct infinipath_counters counters;
35259 struct ipath_devdata *dd;
35260
35261+ pax_track_stack();
35262+
35263 dd = file->f_path.dentry->d_inode->i_private;
35264 dd->ipath_f_read_counters(dd, &counters);
35265
35266diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
35267index cbde0cf..afaf55c 100644
35268--- a/drivers/infiniband/hw/nes/nes.c
35269+++ b/drivers/infiniband/hw/nes/nes.c
35270@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
35271 LIST_HEAD(nes_adapter_list);
35272 static LIST_HEAD(nes_dev_list);
35273
35274-atomic_t qps_destroyed;
35275+atomic_unchecked_t qps_destroyed;
35276
35277 static unsigned int ee_flsh_adapter;
35278 static unsigned int sysfs_nonidx_addr;
35279@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
35280 struct nes_adapter *nesadapter = nesdev->nesadapter;
35281 u32 qp_id;
35282
35283- atomic_inc(&qps_destroyed);
35284+ atomic_inc_unchecked(&qps_destroyed);
35285
35286 /* Free the control structures */
35287
35288diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
35289index bcc6abc..9c76b2f 100644
35290--- a/drivers/infiniband/hw/nes/nes.h
35291+++ b/drivers/infiniband/hw/nes/nes.h
35292@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
35293 extern unsigned int wqm_quanta;
35294 extern struct list_head nes_adapter_list;
35295
35296-extern atomic_t cm_connects;
35297-extern atomic_t cm_accepts;
35298-extern atomic_t cm_disconnects;
35299-extern atomic_t cm_closes;
35300-extern atomic_t cm_connecteds;
35301-extern atomic_t cm_connect_reqs;
35302-extern atomic_t cm_rejects;
35303-extern atomic_t mod_qp_timouts;
35304-extern atomic_t qps_created;
35305-extern atomic_t qps_destroyed;
35306-extern atomic_t sw_qps_destroyed;
35307+extern atomic_unchecked_t cm_connects;
35308+extern atomic_unchecked_t cm_accepts;
35309+extern atomic_unchecked_t cm_disconnects;
35310+extern atomic_unchecked_t cm_closes;
35311+extern atomic_unchecked_t cm_connecteds;
35312+extern atomic_unchecked_t cm_connect_reqs;
35313+extern atomic_unchecked_t cm_rejects;
35314+extern atomic_unchecked_t mod_qp_timouts;
35315+extern atomic_unchecked_t qps_created;
35316+extern atomic_unchecked_t qps_destroyed;
35317+extern atomic_unchecked_t sw_qps_destroyed;
35318 extern u32 mh_detected;
35319 extern u32 mh_pauses_sent;
35320 extern u32 cm_packets_sent;
35321@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
35322 extern u32 cm_listens_created;
35323 extern u32 cm_listens_destroyed;
35324 extern u32 cm_backlog_drops;
35325-extern atomic_t cm_loopbacks;
35326-extern atomic_t cm_nodes_created;
35327-extern atomic_t cm_nodes_destroyed;
35328-extern atomic_t cm_accel_dropped_pkts;
35329-extern atomic_t cm_resets_recvd;
35330+extern atomic_unchecked_t cm_loopbacks;
35331+extern atomic_unchecked_t cm_nodes_created;
35332+extern atomic_unchecked_t cm_nodes_destroyed;
35333+extern atomic_unchecked_t cm_accel_dropped_pkts;
35334+extern atomic_unchecked_t cm_resets_recvd;
35335
35336 extern u32 int_mod_timer_init;
35337 extern u32 int_mod_cq_depth_256;
35338diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
35339index 73473db..5ed06e8 100644
35340--- a/drivers/infiniband/hw/nes/nes_cm.c
35341+++ b/drivers/infiniband/hw/nes/nes_cm.c
35342@@ -69,11 +69,11 @@ u32 cm_packets_received;
35343 u32 cm_listens_created;
35344 u32 cm_listens_destroyed;
35345 u32 cm_backlog_drops;
35346-atomic_t cm_loopbacks;
35347-atomic_t cm_nodes_created;
35348-atomic_t cm_nodes_destroyed;
35349-atomic_t cm_accel_dropped_pkts;
35350-atomic_t cm_resets_recvd;
35351+atomic_unchecked_t cm_loopbacks;
35352+atomic_unchecked_t cm_nodes_created;
35353+atomic_unchecked_t cm_nodes_destroyed;
35354+atomic_unchecked_t cm_accel_dropped_pkts;
35355+atomic_unchecked_t cm_resets_recvd;
35356
35357 static inline int mini_cm_accelerated(struct nes_cm_core *,
35358 struct nes_cm_node *);
35359@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
35360
35361 static struct nes_cm_core *g_cm_core;
35362
35363-atomic_t cm_connects;
35364-atomic_t cm_accepts;
35365-atomic_t cm_disconnects;
35366-atomic_t cm_closes;
35367-atomic_t cm_connecteds;
35368-atomic_t cm_connect_reqs;
35369-atomic_t cm_rejects;
35370+atomic_unchecked_t cm_connects;
35371+atomic_unchecked_t cm_accepts;
35372+atomic_unchecked_t cm_disconnects;
35373+atomic_unchecked_t cm_closes;
35374+atomic_unchecked_t cm_connecteds;
35375+atomic_unchecked_t cm_connect_reqs;
35376+atomic_unchecked_t cm_rejects;
35377
35378
35379 /**
35380@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
35381 cm_node->rem_mac);
35382
35383 add_hte_node(cm_core, cm_node);
35384- atomic_inc(&cm_nodes_created);
35385+ atomic_inc_unchecked(&cm_nodes_created);
35386
35387 return cm_node;
35388 }
35389@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
35390 }
35391
35392 atomic_dec(&cm_core->node_cnt);
35393- atomic_inc(&cm_nodes_destroyed);
35394+ atomic_inc_unchecked(&cm_nodes_destroyed);
35395 nesqp = cm_node->nesqp;
35396 if (nesqp) {
35397 nesqp->cm_node = NULL;
35398@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
35399
35400 static void drop_packet(struct sk_buff *skb)
35401 {
35402- atomic_inc(&cm_accel_dropped_pkts);
35403+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
35404 dev_kfree_skb_any(skb);
35405 }
35406
35407@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35408
35409 int reset = 0; /* whether to send reset in case of err.. */
35410 int passive_state;
35411- atomic_inc(&cm_resets_recvd);
35412+ atomic_inc_unchecked(&cm_resets_recvd);
35413 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35414 " refcnt=%d\n", cm_node, cm_node->state,
35415 atomic_read(&cm_node->ref_count));
35416@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35417 rem_ref_cm_node(cm_node->cm_core, cm_node);
35418 return NULL;
35419 }
35420- atomic_inc(&cm_loopbacks);
35421+ atomic_inc_unchecked(&cm_loopbacks);
35422 loopbackremotenode->loopbackpartner = cm_node;
35423 loopbackremotenode->tcp_cntxt.rcv_wscale =
35424 NES_CM_DEFAULT_RCV_WND_SCALE;
35425@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35426 add_ref_cm_node(cm_node);
35427 } else if (cm_node->state == NES_CM_STATE_TSA) {
35428 rem_ref_cm_node(cm_core, cm_node);
35429- atomic_inc(&cm_accel_dropped_pkts);
35430+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
35431 dev_kfree_skb_any(skb);
35432 break;
35433 }
35434@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35435
35436 if ((cm_id) && (cm_id->event_handler)) {
35437 if (issue_disconn) {
35438- atomic_inc(&cm_disconnects);
35439+ atomic_inc_unchecked(&cm_disconnects);
35440 cm_event.event = IW_CM_EVENT_DISCONNECT;
35441 cm_event.status = disconn_status;
35442 cm_event.local_addr = cm_id->local_addr;
35443@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35444 }
35445
35446 if (issue_close) {
35447- atomic_inc(&cm_closes);
35448+ atomic_inc_unchecked(&cm_closes);
35449 nes_disconnect(nesqp, 1);
35450
35451 cm_id->provider_data = nesqp;
35452@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35453
35454 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35455 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35456- atomic_inc(&cm_accepts);
35457+ atomic_inc_unchecked(&cm_accepts);
35458
35459 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35460 atomic_read(&nesvnic->netdev->refcnt));
35461@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35462
35463 struct nes_cm_core *cm_core;
35464
35465- atomic_inc(&cm_rejects);
35466+ atomic_inc_unchecked(&cm_rejects);
35467 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35468 loopback = cm_node->loopbackpartner;
35469 cm_core = cm_node->cm_core;
35470@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35471 ntohl(cm_id->local_addr.sin_addr.s_addr),
35472 ntohs(cm_id->local_addr.sin_port));
35473
35474- atomic_inc(&cm_connects);
35475+ atomic_inc_unchecked(&cm_connects);
35476 nesqp->active_conn = 1;
35477
35478 /* cache the cm_id in the qp */
35479@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35480 if (nesqp->destroyed) {
35481 return;
35482 }
35483- atomic_inc(&cm_connecteds);
35484+ atomic_inc_unchecked(&cm_connecteds);
35485 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35486 " local port 0x%04X. jiffies = %lu.\n",
35487 nesqp->hwqp.qp_id,
35488@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35489
35490 ret = cm_id->event_handler(cm_id, &cm_event);
35491 cm_id->add_ref(cm_id);
35492- atomic_inc(&cm_closes);
35493+ atomic_inc_unchecked(&cm_closes);
35494 cm_event.event = IW_CM_EVENT_CLOSE;
35495 cm_event.status = IW_CM_EVENT_STATUS_OK;
35496 cm_event.provider_data = cm_id->provider_data;
35497@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35498 return;
35499 cm_id = cm_node->cm_id;
35500
35501- atomic_inc(&cm_connect_reqs);
35502+ atomic_inc_unchecked(&cm_connect_reqs);
35503 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35504 cm_node, cm_id, jiffies);
35505
35506@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35507 return;
35508 cm_id = cm_node->cm_id;
35509
35510- atomic_inc(&cm_connect_reqs);
35511+ atomic_inc_unchecked(&cm_connect_reqs);
35512 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35513 cm_node, cm_id, jiffies);
35514
35515diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35516index e593af3..870694a 100644
35517--- a/drivers/infiniband/hw/nes/nes_nic.c
35518+++ b/drivers/infiniband/hw/nes/nes_nic.c
35519@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35520 target_stat_values[++index] = mh_detected;
35521 target_stat_values[++index] = mh_pauses_sent;
35522 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35523- target_stat_values[++index] = atomic_read(&cm_connects);
35524- target_stat_values[++index] = atomic_read(&cm_accepts);
35525- target_stat_values[++index] = atomic_read(&cm_disconnects);
35526- target_stat_values[++index] = atomic_read(&cm_connecteds);
35527- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35528- target_stat_values[++index] = atomic_read(&cm_rejects);
35529- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35530- target_stat_values[++index] = atomic_read(&qps_created);
35531- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35532- target_stat_values[++index] = atomic_read(&qps_destroyed);
35533- target_stat_values[++index] = atomic_read(&cm_closes);
35534+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35535+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35536+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35537+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35538+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35539+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35540+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35541+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35542+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35543+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35544+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35545 target_stat_values[++index] = cm_packets_sent;
35546 target_stat_values[++index] = cm_packets_bounced;
35547 target_stat_values[++index] = cm_packets_created;
35548@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35549 target_stat_values[++index] = cm_listens_created;
35550 target_stat_values[++index] = cm_listens_destroyed;
35551 target_stat_values[++index] = cm_backlog_drops;
35552- target_stat_values[++index] = atomic_read(&cm_loopbacks);
35553- target_stat_values[++index] = atomic_read(&cm_nodes_created);
35554- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35555- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35556- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35557+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35558+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35559+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35560+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35561+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35562 target_stat_values[++index] = int_mod_timer_init;
35563 target_stat_values[++index] = int_mod_cq_depth_1;
35564 target_stat_values[++index] = int_mod_cq_depth_4;
35565diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35566index a680c42..f914deb 100644
35567--- a/drivers/infiniband/hw/nes/nes_verbs.c
35568+++ b/drivers/infiniband/hw/nes/nes_verbs.c
35569@@ -45,9 +45,9 @@
35570
35571 #include <rdma/ib_umem.h>
35572
35573-atomic_t mod_qp_timouts;
35574-atomic_t qps_created;
35575-atomic_t sw_qps_destroyed;
35576+atomic_unchecked_t mod_qp_timouts;
35577+atomic_unchecked_t qps_created;
35578+atomic_unchecked_t sw_qps_destroyed;
35579
35580 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35581
35582@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35583 if (init_attr->create_flags)
35584 return ERR_PTR(-EINVAL);
35585
35586- atomic_inc(&qps_created);
35587+ atomic_inc_unchecked(&qps_created);
35588 switch (init_attr->qp_type) {
35589 case IB_QPT_RC:
35590 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35591@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35592 struct iw_cm_event cm_event;
35593 int ret;
35594
35595- atomic_inc(&sw_qps_destroyed);
35596+ atomic_inc_unchecked(&sw_qps_destroyed);
35597 nesqp->destroyed = 1;
35598
35599 /* Blow away the connection if it exists. */
35600diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35601index ac11be0..3883c04 100644
35602--- a/drivers/input/gameport/gameport.c
35603+++ b/drivers/input/gameport/gameport.c
35604@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35605 */
35606 static void gameport_init_port(struct gameport *gameport)
35607 {
35608- static atomic_t gameport_no = ATOMIC_INIT(0);
35609+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35610
35611 __module_get(THIS_MODULE);
35612
35613 mutex_init(&gameport->drv_mutex);
35614 device_initialize(&gameport->dev);
35615- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35616+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35617 gameport->dev.bus = &gameport_bus;
35618 gameport->dev.release = gameport_release_port;
35619 if (gameport->parent)
35620diff --git a/drivers/input/input.c b/drivers/input/input.c
35621index c82ae82..8cfb9cb 100644
35622--- a/drivers/input/input.c
35623+++ b/drivers/input/input.c
35624@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35625 */
35626 int input_register_device(struct input_dev *dev)
35627 {
35628- static atomic_t input_no = ATOMIC_INIT(0);
35629+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35630 struct input_handler *handler;
35631 const char *path;
35632 int error;
35633@@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35634 dev->setkeycode = input_default_setkeycode;
35635
35636 dev_set_name(&dev->dev, "input%ld",
35637- (unsigned long) atomic_inc_return(&input_no) - 1);
35638+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35639
35640 error = device_add(&dev->dev);
35641 if (error)
35642diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35643index ca13a6b..b032b0c 100644
35644--- a/drivers/input/joystick/sidewinder.c
35645+++ b/drivers/input/joystick/sidewinder.c
35646@@ -30,6 +30,7 @@
35647 #include <linux/kernel.h>
35648 #include <linux/module.h>
35649 #include <linux/slab.h>
35650+#include <linux/sched.h>
35651 #include <linux/init.h>
35652 #include <linux/input.h>
35653 #include <linux/gameport.h>
35654@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35655 unsigned char buf[SW_LENGTH];
35656 int i;
35657
35658+ pax_track_stack();
35659+
35660 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35661
35662 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35663diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35664index 79e3edc..01412b9 100644
35665--- a/drivers/input/joystick/xpad.c
35666+++ b/drivers/input/joystick/xpad.c
35667@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35668
35669 static int xpad_led_probe(struct usb_xpad *xpad)
35670 {
35671- static atomic_t led_seq = ATOMIC_INIT(0);
35672+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35673 long led_no;
35674 struct xpad_led *led;
35675 struct led_classdev *led_cdev;
35676@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35677 if (!led)
35678 return -ENOMEM;
35679
35680- led_no = (long)atomic_inc_return(&led_seq) - 1;
35681+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35682
35683 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35684 led->xpad = xpad;
35685diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35686index 0236f0d..c7327f1 100644
35687--- a/drivers/input/serio/serio.c
35688+++ b/drivers/input/serio/serio.c
35689@@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35690 */
35691 static void serio_init_port(struct serio *serio)
35692 {
35693- static atomic_t serio_no = ATOMIC_INIT(0);
35694+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35695
35696 __module_get(THIS_MODULE);
35697
35698@@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35699 mutex_init(&serio->drv_mutex);
35700 device_initialize(&serio->dev);
35701 dev_set_name(&serio->dev, "serio%ld",
35702- (long)atomic_inc_return(&serio_no) - 1);
35703+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
35704 serio->dev.bus = &serio_bus;
35705 serio->dev.release = serio_release_port;
35706 if (serio->parent) {
35707diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35708index 33dcd8d..2783d25 100644
35709--- a/drivers/isdn/gigaset/common.c
35710+++ b/drivers/isdn/gigaset/common.c
35711@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35712 cs->commands_pending = 0;
35713 cs->cur_at_seq = 0;
35714 cs->gotfwver = -1;
35715- cs->open_count = 0;
35716+ local_set(&cs->open_count, 0);
35717 cs->dev = NULL;
35718 cs->tty = NULL;
35719 cs->tty_dev = NULL;
35720diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35721index a2f6125..6a70677 100644
35722--- a/drivers/isdn/gigaset/gigaset.h
35723+++ b/drivers/isdn/gigaset/gigaset.h
35724@@ -34,6 +34,7 @@
35725 #include <linux/tty_driver.h>
35726 #include <linux/list.h>
35727 #include <asm/atomic.h>
35728+#include <asm/local.h>
35729
35730 #define GIG_VERSION {0,5,0,0}
35731 #define GIG_COMPAT {0,4,0,0}
35732@@ -446,7 +447,7 @@ struct cardstate {
35733 spinlock_t cmdlock;
35734 unsigned curlen, cmdbytes;
35735
35736- unsigned open_count;
35737+ local_t open_count;
35738 struct tty_struct *tty;
35739 struct tasklet_struct if_wake_tasklet;
35740 unsigned control_state;
35741diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35742index b3065b8..c7e8cc9 100644
35743--- a/drivers/isdn/gigaset/interface.c
35744+++ b/drivers/isdn/gigaset/interface.c
35745@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35746 return -ERESTARTSYS; // FIXME -EINTR?
35747 tty->driver_data = cs;
35748
35749- ++cs->open_count;
35750-
35751- if (cs->open_count == 1) {
35752+ if (local_inc_return(&cs->open_count) == 1) {
35753 spin_lock_irqsave(&cs->lock, flags);
35754 cs->tty = tty;
35755 spin_unlock_irqrestore(&cs->lock, flags);
35756@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35757
35758 if (!cs->connected)
35759 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35760- else if (!cs->open_count)
35761+ else if (!local_read(&cs->open_count))
35762 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35763 else {
35764- if (!--cs->open_count) {
35765+ if (!local_dec_return(&cs->open_count)) {
35766 spin_lock_irqsave(&cs->lock, flags);
35767 cs->tty = NULL;
35768 spin_unlock_irqrestore(&cs->lock, flags);
35769@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35770 if (!cs->connected) {
35771 gig_dbg(DEBUG_IF, "not connected");
35772 retval = -ENODEV;
35773- } else if (!cs->open_count)
35774+ } else if (!local_read(&cs->open_count))
35775 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35776 else {
35777 retval = 0;
35778@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35779 if (!cs->connected) {
35780 gig_dbg(DEBUG_IF, "not connected");
35781 retval = -ENODEV;
35782- } else if (!cs->open_count)
35783+ } else if (!local_read(&cs->open_count))
35784 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35785 else if (cs->mstate != MS_LOCKED) {
35786 dev_warn(cs->dev, "can't write to unlocked device\n");
35787@@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35788 if (!cs->connected) {
35789 gig_dbg(DEBUG_IF, "not connected");
35790 retval = -ENODEV;
35791- } else if (!cs->open_count)
35792+ } else if (!local_read(&cs->open_count))
35793 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35794 else if (cs->mstate != MS_LOCKED) {
35795 dev_warn(cs->dev, "can't write to unlocked device\n");
35796@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35797
35798 if (!cs->connected)
35799 gig_dbg(DEBUG_IF, "not connected");
35800- else if (!cs->open_count)
35801+ else if (!local_read(&cs->open_count))
35802 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35803 else if (cs->mstate != MS_LOCKED)
35804 dev_warn(cs->dev, "can't write to unlocked device\n");
35805@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35806
35807 if (!cs->connected)
35808 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35809- else if (!cs->open_count)
35810+ else if (!local_read(&cs->open_count))
35811 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35812 else {
35813 //FIXME
35814@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35815
35816 if (!cs->connected)
35817 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35818- else if (!cs->open_count)
35819+ else if (!local_read(&cs->open_count))
35820 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35821 else {
35822 //FIXME
35823@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35824 goto out;
35825 }
35826
35827- if (!cs->open_count) {
35828+ if (!local_read(&cs->open_count)) {
35829 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35830 goto out;
35831 }
35832diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35833index a7c0083..62a7cb6 100644
35834--- a/drivers/isdn/hardware/avm/b1.c
35835+++ b/drivers/isdn/hardware/avm/b1.c
35836@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35837 }
35838 if (left) {
35839 if (t4file->user) {
35840- if (copy_from_user(buf, dp, left))
35841+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35842 return -EFAULT;
35843 } else {
35844 memcpy(buf, dp, left);
35845@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35846 }
35847 if (left) {
35848 if (config->user) {
35849- if (copy_from_user(buf, dp, left))
35850+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35851 return -EFAULT;
35852 } else {
35853 memcpy(buf, dp, left);
35854diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35855index f130724..c373c68 100644
35856--- a/drivers/isdn/hardware/eicon/capidtmf.c
35857+++ b/drivers/isdn/hardware/eicon/capidtmf.c
35858@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35859 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35860 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35861
35862+ pax_track_stack();
35863
35864 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35865 {
35866diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35867index 4d425c6..a9be6c4 100644
35868--- a/drivers/isdn/hardware/eicon/capifunc.c
35869+++ b/drivers/isdn/hardware/eicon/capifunc.c
35870@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35871 IDI_SYNC_REQ req;
35872 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35873
35874+ pax_track_stack();
35875+
35876 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35877
35878 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35879diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35880index 3029234..ef0d9e2 100644
35881--- a/drivers/isdn/hardware/eicon/diddfunc.c
35882+++ b/drivers/isdn/hardware/eicon/diddfunc.c
35883@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35884 IDI_SYNC_REQ req;
35885 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35886
35887+ pax_track_stack();
35888+
35889 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35890
35891 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35892diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35893index d36a4c0..11e7d1a 100644
35894--- a/drivers/isdn/hardware/eicon/divasfunc.c
35895+++ b/drivers/isdn/hardware/eicon/divasfunc.c
35896@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35897 IDI_SYNC_REQ req;
35898 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35899
35900+ pax_track_stack();
35901+
35902 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35903
35904 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35905diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35906index 85784a7..a19ca98 100644
35907--- a/drivers/isdn/hardware/eicon/divasync.h
35908+++ b/drivers/isdn/hardware/eicon/divasync.h
35909@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35910 } diva_didd_add_adapter_t;
35911 typedef struct _diva_didd_remove_adapter {
35912 IDI_CALL p_request;
35913-} diva_didd_remove_adapter_t;
35914+} __no_const diva_didd_remove_adapter_t;
35915 typedef struct _diva_didd_read_adapter_array {
35916 void * buffer;
35917 dword length;
35918diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35919index db87d51..7d09acf 100644
35920--- a/drivers/isdn/hardware/eicon/idifunc.c
35921+++ b/drivers/isdn/hardware/eicon/idifunc.c
35922@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35923 IDI_SYNC_REQ req;
35924 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35925
35926+ pax_track_stack();
35927+
35928 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35929
35930 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35931diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35932index ae89fb8..0fab299 100644
35933--- a/drivers/isdn/hardware/eicon/message.c
35934+++ b/drivers/isdn/hardware/eicon/message.c
35935@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35936 dword d;
35937 word w;
35938
35939+ pax_track_stack();
35940+
35941 a = plci->adapter;
35942 Id = ((word)plci->Id<<8)|a->Id;
35943 PUT_WORD(&SS_Ind[4],0x0000);
35944@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35945 word j, n, w;
35946 dword d;
35947
35948+ pax_track_stack();
35949+
35950
35951 for(i=0;i<8;i++) bp_parms[i].length = 0;
35952 for(i=0;i<2;i++) global_config[i].length = 0;
35953@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35954 const byte llc3[] = {4,3,2,2,6,6,0};
35955 const byte header[] = {0,2,3,3,0,0,0};
35956
35957+ pax_track_stack();
35958+
35959 for(i=0;i<8;i++) bp_parms[i].length = 0;
35960 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35961 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35962@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35963 word appl_number_group_type[MAX_APPL];
35964 PLCI *auxplci;
35965
35966+ pax_track_stack();
35967+
35968 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35969
35970 if(!a->group_optimization_enabled)
35971diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35972index a564b75..f3cf8b5 100644
35973--- a/drivers/isdn/hardware/eicon/mntfunc.c
35974+++ b/drivers/isdn/hardware/eicon/mntfunc.c
35975@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35976 IDI_SYNC_REQ req;
35977 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35978
35979+ pax_track_stack();
35980+
35981 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35982
35983 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35984diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35985index a3bd163..8956575 100644
35986--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35987+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35988@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35989 typedef struct _diva_os_idi_adapter_interface {
35990 diva_init_card_proc_t cleanup_adapter_proc;
35991 diva_cmd_card_proc_t cmd_proc;
35992-} diva_os_idi_adapter_interface_t;
35993+} __no_const diva_os_idi_adapter_interface_t;
35994
35995 typedef struct _diva_os_xdi_adapter {
35996 struct list_head link;
35997diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35998index adb1e8c..21b590b 100644
35999--- a/drivers/isdn/i4l/isdn_common.c
36000+++ b/drivers/isdn/i4l/isdn_common.c
36001@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
36002 } iocpar;
36003 void __user *argp = (void __user *)arg;
36004
36005+ pax_track_stack();
36006+
36007 #define name iocpar.name
36008 #define bname iocpar.bname
36009 #define iocts iocpar.iocts
36010diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
36011index 90b56ed..5ed3305 100644
36012--- a/drivers/isdn/i4l/isdn_net.c
36013+++ b/drivers/isdn/i4l/isdn_net.c
36014@@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
36015 {
36016 isdn_net_local *lp = netdev_priv(dev);
36017 unsigned char *p;
36018- ushort len = 0;
36019+ int len = 0;
36020
36021 switch (lp->p_encap) {
36022 case ISDN_NET_ENCAP_ETHER:
36023diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
36024index bf7997a..cf091db 100644
36025--- a/drivers/isdn/icn/icn.c
36026+++ b/drivers/isdn/icn/icn.c
36027@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
36028 if (count > len)
36029 count = len;
36030 if (user) {
36031- if (copy_from_user(msg, buf, count))
36032+ if (count > sizeof msg || copy_from_user(msg, buf, count))
36033 return -EFAULT;
36034 } else
36035 memcpy(msg, buf, count);
36036diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
36037index feb0fa4..f76f830 100644
36038--- a/drivers/isdn/mISDN/socket.c
36039+++ b/drivers/isdn/mISDN/socket.c
36040@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36041 if (dev) {
36042 struct mISDN_devinfo di;
36043
36044+ memset(&di, 0, sizeof(di));
36045 di.id = dev->id;
36046 di.Dprotocols = dev->Dprotocols;
36047 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36048@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36049 if (dev) {
36050 struct mISDN_devinfo di;
36051
36052+ memset(&di, 0, sizeof(di));
36053 di.id = dev->id;
36054 di.Dprotocols = dev->Dprotocols;
36055 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36056diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
36057index 485be8b..f0225bc 100644
36058--- a/drivers/isdn/sc/interrupt.c
36059+++ b/drivers/isdn/sc/interrupt.c
36060@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36061 }
36062 else if(callid>=0x0000 && callid<=0x7FFF)
36063 {
36064+ int len;
36065+
36066 pr_debug("%s: Got Incoming Call\n",
36067 sc_adapter[card]->devicename);
36068- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
36069- strcpy(setup.eazmsn,
36070- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
36071+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
36072+ sizeof(setup.phone));
36073+ if (len >= sizeof(setup.phone))
36074+ continue;
36075+ len = strlcpy(setup.eazmsn,
36076+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36077+ sizeof(setup.eazmsn));
36078+ if (len >= sizeof(setup.eazmsn))
36079+ continue;
36080 setup.si1 = 7;
36081 setup.si2 = 0;
36082 setup.plan = 0;
36083@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
36084 * Handle a GetMyNumber Rsp
36085 */
36086 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
36087- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
36088+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
36089+ rcvmsg.msg_data.byte_array,
36090+ sizeof(rcvmsg.msg_data.byte_array));
36091 continue;
36092 }
36093
36094diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
36095index 8744d24..d1f9a9a 100644
36096--- a/drivers/lguest/core.c
36097+++ b/drivers/lguest/core.c
36098@@ -91,9 +91,17 @@ static __init int map_switcher(void)
36099 * it's worked so far. The end address needs +1 because __get_vm_area
36100 * allocates an extra guard page, so we need space for that.
36101 */
36102+
36103+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
36104+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36105+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
36106+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36107+#else
36108 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
36109 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
36110 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
36111+#endif
36112+
36113 if (!switcher_vma) {
36114 err = -ENOMEM;
36115 printk("lguest: could not map switcher pages high\n");
36116@@ -118,7 +126,7 @@ static __init int map_switcher(void)
36117 * Now the Switcher is mapped at the right address, we can't fail!
36118 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
36119 */
36120- memcpy(switcher_vma->addr, start_switcher_text,
36121+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
36122 end_switcher_text - start_switcher_text);
36123
36124 printk(KERN_INFO "lguest: mapped switcher at %p\n",
36125diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
36126index 6ae3888..8b38145 100644
36127--- a/drivers/lguest/x86/core.c
36128+++ b/drivers/lguest/x86/core.c
36129@@ -59,7 +59,7 @@ static struct {
36130 /* Offset from where switcher.S was compiled to where we've copied it */
36131 static unsigned long switcher_offset(void)
36132 {
36133- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
36134+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
36135 }
36136
36137 /* This cpu's struct lguest_pages. */
36138@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
36139 * These copies are pretty cheap, so we do them unconditionally: */
36140 /* Save the current Host top-level page directory.
36141 */
36142+
36143+#ifdef CONFIG_PAX_PER_CPU_PGD
36144+ pages->state.host_cr3 = read_cr3();
36145+#else
36146 pages->state.host_cr3 = __pa(current->mm->pgd);
36147+#endif
36148+
36149 /*
36150 * Set up the Guest's page tables to see this CPU's pages (and no
36151 * other CPU's pages).
36152@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
36153 * compiled-in switcher code and the high-mapped copy we just made.
36154 */
36155 for (i = 0; i < IDT_ENTRIES; i++)
36156- default_idt_entries[i] += switcher_offset();
36157+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
36158
36159 /*
36160 * Set up the Switcher's per-cpu areas.
36161@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
36162 * it will be undisturbed when we switch. To change %cs and jump we
36163 * need this structure to feed to Intel's "lcall" instruction.
36164 */
36165- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
36166+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
36167 lguest_entry.segment = LGUEST_CS;
36168
36169 /*
36170diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
36171index 40634b0..4f5855e 100644
36172--- a/drivers/lguest/x86/switcher_32.S
36173+++ b/drivers/lguest/x86/switcher_32.S
36174@@ -87,6 +87,7 @@
36175 #include <asm/page.h>
36176 #include <asm/segment.h>
36177 #include <asm/lguest.h>
36178+#include <asm/processor-flags.h>
36179
36180 // We mark the start of the code to copy
36181 // It's placed in .text tho it's never run here
36182@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
36183 // Changes type when we load it: damn Intel!
36184 // For after we switch over our page tables
36185 // That entry will be read-only: we'd crash.
36186+
36187+#ifdef CONFIG_PAX_KERNEXEC
36188+ mov %cr0, %edx
36189+ xor $X86_CR0_WP, %edx
36190+ mov %edx, %cr0
36191+#endif
36192+
36193 movl $(GDT_ENTRY_TSS*8), %edx
36194 ltr %dx
36195
36196@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
36197 // Let's clear it again for our return.
36198 // The GDT descriptor of the Host
36199 // Points to the table after two "size" bytes
36200- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
36201+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
36202 // Clear "used" from type field (byte 5, bit 2)
36203- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
36204+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
36205+
36206+#ifdef CONFIG_PAX_KERNEXEC
36207+ mov %cr0, %eax
36208+ xor $X86_CR0_WP, %eax
36209+ mov %eax, %cr0
36210+#endif
36211
36212 // Once our page table's switched, the Guest is live!
36213 // The Host fades as we run this final step.
36214@@ -295,13 +309,12 @@ deliver_to_host:
36215 // I consulted gcc, and it gave
36216 // These instructions, which I gladly credit:
36217 leal (%edx,%ebx,8), %eax
36218- movzwl (%eax),%edx
36219- movl 4(%eax), %eax
36220- xorw %ax, %ax
36221- orl %eax, %edx
36222+ movl 4(%eax), %edx
36223+ movw (%eax), %dx
36224 // Now the address of the handler's in %edx
36225 // We call it now: its "iret" drops us home.
36226- jmp *%edx
36227+ ljmp $__KERNEL_CS, $1f
36228+1: jmp *%edx
36229
36230 // Every interrupt can come to us here
36231 // But we must truly tell each apart.
36232diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
36233index 588a5b0..b71db89 100644
36234--- a/drivers/macintosh/macio_asic.c
36235+++ b/drivers/macintosh/macio_asic.c
36236@@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
36237 * MacIO is matched against any Apple ID, it's probe() function
36238 * will then decide wether it applies or not
36239 */
36240-static const struct pci_device_id __devinitdata pci_ids [] = { {
36241+static const struct pci_device_id __devinitconst pci_ids [] = { {
36242 .vendor = PCI_VENDOR_ID_APPLE,
36243 .device = PCI_ANY_ID,
36244 .subvendor = PCI_ANY_ID,
36245diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
36246index a348bb0..ecd9b3f 100644
36247--- a/drivers/macintosh/via-pmu-backlight.c
36248+++ b/drivers/macintosh/via-pmu-backlight.c
36249@@ -15,7 +15,7 @@
36250
36251 #define MAX_PMU_LEVEL 0xFF
36252
36253-static struct backlight_ops pmu_backlight_data;
36254+static const struct backlight_ops pmu_backlight_data;
36255 static DEFINE_SPINLOCK(pmu_backlight_lock);
36256 static int sleeping, uses_pmu_bl;
36257 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
36258@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
36259 return bd->props.brightness;
36260 }
36261
36262-static struct backlight_ops pmu_backlight_data = {
36263+static const struct backlight_ops pmu_backlight_data = {
36264 .get_brightness = pmu_backlight_get_brightness,
36265 .update_status = pmu_backlight_update_status,
36266
36267diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
36268index 6f308a4..b5f7ff7 100644
36269--- a/drivers/macintosh/via-pmu.c
36270+++ b/drivers/macintosh/via-pmu.c
36271@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
36272 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
36273 }
36274
36275-static struct platform_suspend_ops pmu_pm_ops = {
36276+static const struct platform_suspend_ops pmu_pm_ops = {
36277 .enter = powerbook_sleep,
36278 .valid = pmu_sleep_valid,
36279 };
36280diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
36281index 818b617..4656e38 100644
36282--- a/drivers/md/dm-ioctl.c
36283+++ b/drivers/md/dm-ioctl.c
36284@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
36285 cmd == DM_LIST_VERSIONS_CMD)
36286 return 0;
36287
36288- if ((cmd == DM_DEV_CREATE_CMD)) {
36289+ if (cmd == DM_DEV_CREATE_CMD) {
36290 if (!*param->name) {
36291 DMWARN("name not supplied when creating device");
36292 return -EINVAL;
36293diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
36294index 6021d0a..a878643 100644
36295--- a/drivers/md/dm-raid1.c
36296+++ b/drivers/md/dm-raid1.c
36297@@ -41,7 +41,7 @@ enum dm_raid1_error {
36298
36299 struct mirror {
36300 struct mirror_set *ms;
36301- atomic_t error_count;
36302+ atomic_unchecked_t error_count;
36303 unsigned long error_type;
36304 struct dm_dev *dev;
36305 sector_t offset;
36306@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36307 * simple way to tell if a device has encountered
36308 * errors.
36309 */
36310- atomic_inc(&m->error_count);
36311+ atomic_inc_unchecked(&m->error_count);
36312
36313 if (test_and_set_bit(error_type, &m->error_type))
36314 return;
36315@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36316 }
36317
36318 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
36319- if (!atomic_read(&new->error_count)) {
36320+ if (!atomic_read_unchecked(&new->error_count)) {
36321 set_default_mirror(new);
36322 break;
36323 }
36324@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
36325 struct mirror *m = get_default_mirror(ms);
36326
36327 do {
36328- if (likely(!atomic_read(&m->error_count)))
36329+ if (likely(!atomic_read_unchecked(&m->error_count)))
36330 return m;
36331
36332 if (m-- == ms->mirror)
36333@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
36334 {
36335 struct mirror *default_mirror = get_default_mirror(m->ms);
36336
36337- return !atomic_read(&default_mirror->error_count);
36338+ return !atomic_read_unchecked(&default_mirror->error_count);
36339 }
36340
36341 static int mirror_available(struct mirror_set *ms, struct bio *bio)
36342@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
36343 */
36344 if (likely(region_in_sync(ms, region, 1)))
36345 m = choose_mirror(ms, bio->bi_sector);
36346- else if (m && atomic_read(&m->error_count))
36347+ else if (m && atomic_read_unchecked(&m->error_count))
36348 m = NULL;
36349
36350 if (likely(m))
36351@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
36352 }
36353
36354 ms->mirror[mirror].ms = ms;
36355- atomic_set(&(ms->mirror[mirror].error_count), 0);
36356+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
36357 ms->mirror[mirror].error_type = 0;
36358 ms->mirror[mirror].offset = offset;
36359
36360@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
36361 */
36362 static char device_status_char(struct mirror *m)
36363 {
36364- if (!atomic_read(&(m->error_count)))
36365+ if (!atomic_read_unchecked(&(m->error_count)))
36366 return 'A';
36367
36368 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
36369diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
36370index bd58703..9f26571 100644
36371--- a/drivers/md/dm-stripe.c
36372+++ b/drivers/md/dm-stripe.c
36373@@ -20,7 +20,7 @@ struct stripe {
36374 struct dm_dev *dev;
36375 sector_t physical_start;
36376
36377- atomic_t error_count;
36378+ atomic_unchecked_t error_count;
36379 };
36380
36381 struct stripe_c {
36382@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
36383 kfree(sc);
36384 return r;
36385 }
36386- atomic_set(&(sc->stripe[i].error_count), 0);
36387+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
36388 }
36389
36390 ti->private = sc;
36391@@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
36392 DMEMIT("%d ", sc->stripes);
36393 for (i = 0; i < sc->stripes; i++) {
36394 DMEMIT("%s ", sc->stripe[i].dev->name);
36395- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
36396+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
36397 'D' : 'A';
36398 }
36399 buffer[i] = '\0';
36400@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
36401 */
36402 for (i = 0; i < sc->stripes; i++)
36403 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
36404- atomic_inc(&(sc->stripe[i].error_count));
36405- if (atomic_read(&(sc->stripe[i].error_count)) <
36406+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
36407+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
36408 DM_IO_ERROR_THRESHOLD)
36409 queue_work(kstriped, &sc->kstriped_ws);
36410 }
36411diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
36412index 4b04590..13a77b2 100644
36413--- a/drivers/md/dm-sysfs.c
36414+++ b/drivers/md/dm-sysfs.c
36415@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
36416 NULL,
36417 };
36418
36419-static struct sysfs_ops dm_sysfs_ops = {
36420+static const struct sysfs_ops dm_sysfs_ops = {
36421 .show = dm_attr_show,
36422 };
36423
36424diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36425index 03345bb..332250d 100644
36426--- a/drivers/md/dm-table.c
36427+++ b/drivers/md/dm-table.c
36428@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36429 if (!dev_size)
36430 return 0;
36431
36432- if ((start >= dev_size) || (start + len > dev_size)) {
36433+ if ((start >= dev_size) || (len > dev_size - start)) {
36434 DMWARN("%s: %s too small for target: "
36435 "start=%llu, len=%llu, dev_size=%llu",
36436 dm_device_name(ti->table->md), bdevname(bdev, b),
36437diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36438index c988ac2..c418141 100644
36439--- a/drivers/md/dm.c
36440+++ b/drivers/md/dm.c
36441@@ -165,9 +165,9 @@ struct mapped_device {
36442 /*
36443 * Event handling.
36444 */
36445- atomic_t event_nr;
36446+ atomic_unchecked_t event_nr;
36447 wait_queue_head_t eventq;
36448- atomic_t uevent_seq;
36449+ atomic_unchecked_t uevent_seq;
36450 struct list_head uevent_list;
36451 spinlock_t uevent_lock; /* Protect access to uevent_list */
36452
36453@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36454 rwlock_init(&md->map_lock);
36455 atomic_set(&md->holders, 1);
36456 atomic_set(&md->open_count, 0);
36457- atomic_set(&md->event_nr, 0);
36458- atomic_set(&md->uevent_seq, 0);
36459+ atomic_set_unchecked(&md->event_nr, 0);
36460+ atomic_set_unchecked(&md->uevent_seq, 0);
36461 INIT_LIST_HEAD(&md->uevent_list);
36462 spin_lock_init(&md->uevent_lock);
36463
36464@@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36465
36466 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36467
36468- atomic_inc(&md->event_nr);
36469+ atomic_inc_unchecked(&md->event_nr);
36470 wake_up(&md->eventq);
36471 }
36472
36473@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36474
36475 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36476 {
36477- return atomic_add_return(1, &md->uevent_seq);
36478+ return atomic_add_return_unchecked(1, &md->uevent_seq);
36479 }
36480
36481 uint32_t dm_get_event_nr(struct mapped_device *md)
36482 {
36483- return atomic_read(&md->event_nr);
36484+ return atomic_read_unchecked(&md->event_nr);
36485 }
36486
36487 int dm_wait_event(struct mapped_device *md, int event_nr)
36488 {
36489 return wait_event_interruptible(md->eventq,
36490- (event_nr != atomic_read(&md->event_nr)));
36491+ (event_nr != atomic_read_unchecked(&md->event_nr)));
36492 }
36493
36494 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36495diff --git a/drivers/md/md.c b/drivers/md/md.c
36496index 4ce6e2f..7a9530a 100644
36497--- a/drivers/md/md.c
36498+++ b/drivers/md/md.c
36499@@ -153,10 +153,10 @@ static int start_readonly;
36500 * start build, activate spare
36501 */
36502 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36503-static atomic_t md_event_count;
36504+static atomic_unchecked_t md_event_count;
36505 void md_new_event(mddev_t *mddev)
36506 {
36507- atomic_inc(&md_event_count);
36508+ atomic_inc_unchecked(&md_event_count);
36509 wake_up(&md_event_waiters);
36510 }
36511 EXPORT_SYMBOL_GPL(md_new_event);
36512@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36513 */
36514 static void md_new_event_inintr(mddev_t *mddev)
36515 {
36516- atomic_inc(&md_event_count);
36517+ atomic_inc_unchecked(&md_event_count);
36518 wake_up(&md_event_waiters);
36519 }
36520
36521@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
36522
36523 rdev->preferred_minor = 0xffff;
36524 rdev->data_offset = le64_to_cpu(sb->data_offset);
36525- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36526+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36527
36528 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36529 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36530@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36531 else
36532 sb->resync_offset = cpu_to_le64(0);
36533
36534- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36535+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36536
36537 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36538 sb->size = cpu_to_le64(mddev->dev_sectors);
36539@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36540 static ssize_t
36541 errors_show(mdk_rdev_t *rdev, char *page)
36542 {
36543- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36544+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36545 }
36546
36547 static ssize_t
36548@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36549 char *e;
36550 unsigned long n = simple_strtoul(buf, &e, 10);
36551 if (*buf && (*e == 0 || *e == '\n')) {
36552- atomic_set(&rdev->corrected_errors, n);
36553+ atomic_set_unchecked(&rdev->corrected_errors, n);
36554 return len;
36555 }
36556 return -EINVAL;
36557@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36558 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36559 kfree(rdev);
36560 }
36561-static struct sysfs_ops rdev_sysfs_ops = {
36562+static const struct sysfs_ops rdev_sysfs_ops = {
36563 .show = rdev_attr_show,
36564 .store = rdev_attr_store,
36565 };
36566@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36567 rdev->data_offset = 0;
36568 rdev->sb_events = 0;
36569 atomic_set(&rdev->nr_pending, 0);
36570- atomic_set(&rdev->read_errors, 0);
36571- atomic_set(&rdev->corrected_errors, 0);
36572+ atomic_set_unchecked(&rdev->read_errors, 0);
36573+ atomic_set_unchecked(&rdev->corrected_errors, 0);
36574
36575 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36576 if (!size) {
36577@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36578 kfree(mddev);
36579 }
36580
36581-static struct sysfs_ops md_sysfs_ops = {
36582+static const struct sysfs_ops md_sysfs_ops = {
36583 .show = md_attr_show,
36584 .store = md_attr_store,
36585 };
36586@@ -4482,7 +4482,8 @@ out:
36587 err = 0;
36588 blk_integrity_unregister(disk);
36589 md_new_event(mddev);
36590- sysfs_notify_dirent(mddev->sysfs_state);
36591+ if (mddev->sysfs_state)
36592+ sysfs_notify_dirent(mddev->sysfs_state);
36593 return err;
36594 }
36595
36596@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36597
36598 spin_unlock(&pers_lock);
36599 seq_printf(seq, "\n");
36600- mi->event = atomic_read(&md_event_count);
36601+ mi->event = atomic_read_unchecked(&md_event_count);
36602 return 0;
36603 }
36604 if (v == (void*)2) {
36605@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36606 chunk_kb ? "KB" : "B");
36607 if (bitmap->file) {
36608 seq_printf(seq, ", file: ");
36609- seq_path(seq, &bitmap->file->f_path, " \t\n");
36610+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36611 }
36612
36613 seq_printf(seq, "\n");
36614@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36615 else {
36616 struct seq_file *p = file->private_data;
36617 p->private = mi;
36618- mi->event = atomic_read(&md_event_count);
36619+ mi->event = atomic_read_unchecked(&md_event_count);
36620 }
36621 return error;
36622 }
36623@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36624 /* always allow read */
36625 mask = POLLIN | POLLRDNORM;
36626
36627- if (mi->event != atomic_read(&md_event_count))
36628+ if (mi->event != atomic_read_unchecked(&md_event_count))
36629 mask |= POLLERR | POLLPRI;
36630 return mask;
36631 }
36632@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36633 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36634 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36635 (int)part_stat_read(&disk->part0, sectors[1]) -
36636- atomic_read(&disk->sync_io);
36637+ atomic_read_unchecked(&disk->sync_io);
36638 /* sync IO will cause sync_io to increase before the disk_stats
36639 * as sync_io is counted when a request starts, and
36640 * disk_stats is counted when it completes.
36641diff --git a/drivers/md/md.h b/drivers/md/md.h
36642index 87430fe..0024a4c 100644
36643--- a/drivers/md/md.h
36644+++ b/drivers/md/md.h
36645@@ -94,10 +94,10 @@ struct mdk_rdev_s
36646 * only maintained for arrays that
36647 * support hot removal
36648 */
36649- atomic_t read_errors; /* number of consecutive read errors that
36650+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
36651 * we have tried to ignore.
36652 */
36653- atomic_t corrected_errors; /* number of corrected read errors,
36654+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36655 * for reporting to userspace and storing
36656 * in superblock.
36657 */
36658@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36659
36660 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36661 {
36662- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36663+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36664 }
36665
36666 struct mdk_personality
36667diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36668index 968cb14..f0ad2e4 100644
36669--- a/drivers/md/raid1.c
36670+++ b/drivers/md/raid1.c
36671@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36672 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36673 continue;
36674 rdev = conf->mirrors[d].rdev;
36675- atomic_add(s, &rdev->corrected_errors);
36676+ atomic_add_unchecked(s, &rdev->corrected_errors);
36677 if (sync_page_io(rdev->bdev,
36678 sect + rdev->data_offset,
36679 s<<9,
36680@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36681 /* Well, this device is dead */
36682 md_error(mddev, rdev);
36683 else {
36684- atomic_add(s, &rdev->corrected_errors);
36685+ atomic_add_unchecked(s, &rdev->corrected_errors);
36686 printk(KERN_INFO
36687 "raid1:%s: read error corrected "
36688 "(%d sectors at %llu on %s)\n",
36689diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36690index 1b4e232..cf0f534 100644
36691--- a/drivers/md/raid10.c
36692+++ b/drivers/md/raid10.c
36693@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36694 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36695 set_bit(R10BIO_Uptodate, &r10_bio->state);
36696 else {
36697- atomic_add(r10_bio->sectors,
36698+ atomic_add_unchecked(r10_bio->sectors,
36699 &conf->mirrors[d].rdev->corrected_errors);
36700 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36701 md_error(r10_bio->mddev,
36702@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36703 test_bit(In_sync, &rdev->flags)) {
36704 atomic_inc(&rdev->nr_pending);
36705 rcu_read_unlock();
36706- atomic_add(s, &rdev->corrected_errors);
36707+ atomic_add_unchecked(s, &rdev->corrected_errors);
36708 if (sync_page_io(rdev->bdev,
36709 r10_bio->devs[sl].addr +
36710 sect + rdev->data_offset,
36711diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36712index 883215d..675bf47 100644
36713--- a/drivers/md/raid5.c
36714+++ b/drivers/md/raid5.c
36715@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36716 bi->bi_next = NULL;
36717 if ((rw & WRITE) &&
36718 test_bit(R5_ReWrite, &sh->dev[i].flags))
36719- atomic_add(STRIPE_SECTORS,
36720+ atomic_add_unchecked(STRIPE_SECTORS,
36721 &rdev->corrected_errors);
36722 generic_make_request(bi);
36723 } else {
36724@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36725 clear_bit(R5_ReadError, &sh->dev[i].flags);
36726 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36727 }
36728- if (atomic_read(&conf->disks[i].rdev->read_errors))
36729- atomic_set(&conf->disks[i].rdev->read_errors, 0);
36730+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36731+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36732 } else {
36733 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36734 int retry = 0;
36735 rdev = conf->disks[i].rdev;
36736
36737 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36738- atomic_inc(&rdev->read_errors);
36739+ atomic_inc_unchecked(&rdev->read_errors);
36740 if (conf->mddev->degraded >= conf->max_degraded)
36741 printk_rl(KERN_WARNING
36742 "raid5:%s: read error not correctable "
36743@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36744 (unsigned long long)(sh->sector
36745 + rdev->data_offset),
36746 bdn);
36747- else if (atomic_read(&rdev->read_errors)
36748+ else if (atomic_read_unchecked(&rdev->read_errors)
36749 > conf->max_nr_stripes)
36750 printk(KERN_WARNING
36751 "raid5:%s: Too many read errors, failing device %s.\n",
36752@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36753 sector_t r_sector;
36754 struct stripe_head sh2;
36755
36756+ pax_track_stack();
36757
36758 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36759 stripe = new_sector;
36760diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36761index 05bde9c..2f31d40 100644
36762--- a/drivers/media/common/saa7146_hlp.c
36763+++ b/drivers/media/common/saa7146_hlp.c
36764@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36765
36766 int x[32], y[32], w[32], h[32];
36767
36768+ pax_track_stack();
36769+
36770 /* clear out memory */
36771 memset(&line_list[0], 0x00, sizeof(u32)*32);
36772 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36773diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36774index cb22da5..82b686e 100644
36775--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36776+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36777@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36778 u8 buf[HOST_LINK_BUF_SIZE];
36779 int i;
36780
36781+ pax_track_stack();
36782+
36783 dprintk("%s\n", __func__);
36784
36785 /* check if we have space for a link buf in the rx_buffer */
36786@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36787 unsigned long timeout;
36788 int written;
36789
36790+ pax_track_stack();
36791+
36792 dprintk("%s\n", __func__);
36793
36794 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36795diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36796index 2fe05d0..a3289c4 100644
36797--- a/drivers/media/dvb/dvb-core/dvb_demux.h
36798+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36799@@ -71,7 +71,7 @@ struct dvb_demux_feed {
36800 union {
36801 dmx_ts_cb ts;
36802 dmx_section_cb sec;
36803- } cb;
36804+ } __no_const cb;
36805
36806 struct dvb_demux *demux;
36807 void *priv;
36808diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36809index 94159b9..376bd8e 100644
36810--- a/drivers/media/dvb/dvb-core/dvbdev.c
36811+++ b/drivers/media/dvb/dvb-core/dvbdev.c
36812@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36813 const struct dvb_device *template, void *priv, int type)
36814 {
36815 struct dvb_device *dvbdev;
36816- struct file_operations *dvbdevfops;
36817+ file_operations_no_const *dvbdevfops;
36818 struct device *clsdev;
36819 int minor;
36820 int id;
36821diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36822index 2a53dd0..db8c07a 100644
36823--- a/drivers/media/dvb/dvb-usb/cxusb.c
36824+++ b/drivers/media/dvb/dvb-usb/cxusb.c
36825@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36826 struct dib0700_adapter_state {
36827 int (*set_param_save) (struct dvb_frontend *,
36828 struct dvb_frontend_parameters *);
36829-};
36830+} __no_const;
36831
36832 static int dib7070_set_param_override(struct dvb_frontend *fe,
36833 struct dvb_frontend_parameters *fep)
36834diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36835index db7f7f7..f55e96f 100644
36836--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36837+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36838@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36839
36840 u8 buf[260];
36841
36842+ pax_track_stack();
36843+
36844 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36845 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36846
36847diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36848index 524acf5..5ffc403 100644
36849--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36850+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36851@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36852
36853 struct dib0700_adapter_state {
36854 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36855-};
36856+} __no_const;
36857
36858 /* Hauppauge Nova-T 500 (aka Bristol)
36859 * has a LNA on GPIO0 which is enabled by setting 1 */
36860diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36861index ba91735..4261d84 100644
36862--- a/drivers/media/dvb/frontends/dib3000.h
36863+++ b/drivers/media/dvb/frontends/dib3000.h
36864@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36865 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36866 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36867 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36868-};
36869+} __no_const;
36870
36871 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36872 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36873diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36874index c709ce6..b3fe620 100644
36875--- a/drivers/media/dvb/frontends/or51211.c
36876+++ b/drivers/media/dvb/frontends/or51211.c
36877@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36878 u8 tudata[585];
36879 int i;
36880
36881+ pax_track_stack();
36882+
36883 dprintk("Firmware is %zd bytes\n",fw->size);
36884
36885 /* Get eprom data */
36886diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36887index 482d0f3..ee1e202 100644
36888--- a/drivers/media/radio/radio-cadet.c
36889+++ b/drivers/media/radio/radio-cadet.c
36890@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36891 while (i < count && dev->rdsin != dev->rdsout)
36892 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36893
36894- if (copy_to_user(data, readbuf, i))
36895+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36896 return -EFAULT;
36897 return i;
36898 }
36899diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36900index 6dd51e2..0359b92 100644
36901--- a/drivers/media/video/cx18/cx18-driver.c
36902+++ b/drivers/media/video/cx18/cx18-driver.c
36903@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36904
36905 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36906
36907-static atomic_t cx18_instance = ATOMIC_INIT(0);
36908+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36909
36910 /* Parameter declarations */
36911 static int cardtype[CX18_MAX_CARDS];
36912@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36913 struct i2c_client c;
36914 u8 eedata[256];
36915
36916+ pax_track_stack();
36917+
36918 memset(&c, 0, sizeof(c));
36919 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36920 c.adapter = &cx->i2c_adap[0];
36921@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36922 struct cx18 *cx;
36923
36924 /* FIXME - module parameter arrays constrain max instances */
36925- i = atomic_inc_return(&cx18_instance) - 1;
36926+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36927 if (i >= CX18_MAX_CARDS) {
36928 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36929 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36930diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36931index 463ec34..2f4625a 100644
36932--- a/drivers/media/video/ivtv/ivtv-driver.c
36933+++ b/drivers/media/video/ivtv/ivtv-driver.c
36934@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36935 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36936
36937 /* ivtv instance counter */
36938-static atomic_t ivtv_instance = ATOMIC_INIT(0);
36939+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36940
36941 /* Parameter declarations */
36942 static int cardtype[IVTV_MAX_CARDS];
36943diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36944index 5fc4ac0..652a54a 100644
36945--- a/drivers/media/video/omap24xxcam.c
36946+++ b/drivers/media/video/omap24xxcam.c
36947@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36948 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36949
36950 do_gettimeofday(&vb->ts);
36951- vb->field_count = atomic_add_return(2, &fh->field_count);
36952+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36953 if (csr & csr_error) {
36954 vb->state = VIDEOBUF_ERROR;
36955 if (!atomic_read(&fh->cam->in_reset)) {
36956diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36957index 2ce67f5..cf26a5b 100644
36958--- a/drivers/media/video/omap24xxcam.h
36959+++ b/drivers/media/video/omap24xxcam.h
36960@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36961 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36962 struct videobuf_queue vbq;
36963 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36964- atomic_t field_count; /* field counter for videobuf_buffer */
36965+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36966 /* accessing cam here doesn't need serialisation: it's constant */
36967 struct omap24xxcam_device *cam;
36968 };
36969diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36970index 299afa4..eb47459 100644
36971--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36972+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36973@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36974 u8 *eeprom;
36975 struct tveeprom tvdata;
36976
36977+ pax_track_stack();
36978+
36979 memset(&tvdata,0,sizeof(tvdata));
36980
36981 eeprom = pvr2_eeprom_fetch(hdw);
36982diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36983index 5b152ff..3320638 100644
36984--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36985+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36986@@ -195,7 +195,7 @@ struct pvr2_hdw {
36987
36988 /* I2C stuff */
36989 struct i2c_adapter i2c_adap;
36990- struct i2c_algorithm i2c_algo;
36991+ i2c_algorithm_no_const i2c_algo;
36992 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36993 int i2c_cx25840_hack_state;
36994 int i2c_linked;
36995diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36996index 1eabff6..8e2313a 100644
36997--- a/drivers/media/video/saa7134/saa6752hs.c
36998+++ b/drivers/media/video/saa7134/saa6752hs.c
36999@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
37000 unsigned char localPAT[256];
37001 unsigned char localPMT[256];
37002
37003+ pax_track_stack();
37004+
37005 /* Set video format - must be done first as it resets other settings */
37006 set_reg8(client, 0x41, h->video_format);
37007
37008diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
37009index 9c1d3ac..b1b49e9 100644
37010--- a/drivers/media/video/saa7164/saa7164-cmd.c
37011+++ b/drivers/media/video/saa7164/saa7164-cmd.c
37012@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
37013 wait_queue_head_t *q = 0;
37014 dprintk(DBGLVL_CMD, "%s()\n", __func__);
37015
37016+ pax_track_stack();
37017+
37018 /* While any outstand message on the bus exists... */
37019 do {
37020
37021@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
37022 u8 tmp[512];
37023 dprintk(DBGLVL_CMD, "%s()\n", __func__);
37024
37025+ pax_track_stack();
37026+
37027 while (loop) {
37028
37029 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
37030diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
37031index b085496..cde0270 100644
37032--- a/drivers/media/video/usbvideo/ibmcam.c
37033+++ b/drivers/media/video/usbvideo/ibmcam.c
37034@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
37035 static int __init ibmcam_init(void)
37036 {
37037 struct usbvideo_cb cbTbl;
37038- memset(&cbTbl, 0, sizeof(cbTbl));
37039- cbTbl.probe = ibmcam_probe;
37040- cbTbl.setupOnOpen = ibmcam_setup_on_open;
37041- cbTbl.videoStart = ibmcam_video_start;
37042- cbTbl.videoStop = ibmcam_video_stop;
37043- cbTbl.processData = ibmcam_ProcessIsocData;
37044- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37045- cbTbl.adjustPicture = ibmcam_adjust_picture;
37046- cbTbl.getFPS = ibmcam_calculate_fps;
37047+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
37048+ *(void **)&cbTbl.probe = ibmcam_probe;
37049+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
37050+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
37051+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
37052+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
37053+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37054+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
37055+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
37056 return usbvideo_register(
37057 &cams,
37058 MAX_IBMCAM,
37059diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
37060index 31d57f2..600b735 100644
37061--- a/drivers/media/video/usbvideo/konicawc.c
37062+++ b/drivers/media/video/usbvideo/konicawc.c
37063@@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
37064 int error;
37065
37066 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37067- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37068+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37069
37070 cam->input = input_dev = input_allocate_device();
37071 if (!input_dev) {
37072@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
37073 struct usbvideo_cb cbTbl;
37074 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
37075 DRIVER_DESC "\n");
37076- memset(&cbTbl, 0, sizeof(cbTbl));
37077- cbTbl.probe = konicawc_probe;
37078- cbTbl.setupOnOpen = konicawc_setup_on_open;
37079- cbTbl.processData = konicawc_process_isoc;
37080- cbTbl.getFPS = konicawc_calculate_fps;
37081- cbTbl.setVideoMode = konicawc_set_video_mode;
37082- cbTbl.startDataPump = konicawc_start_data;
37083- cbTbl.stopDataPump = konicawc_stop_data;
37084- cbTbl.adjustPicture = konicawc_adjust_picture;
37085- cbTbl.userFree = konicawc_free_uvd;
37086+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
37087+ *(void **)&cbTbl.probe = konicawc_probe;
37088+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
37089+ *(void **)&cbTbl.processData = konicawc_process_isoc;
37090+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
37091+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
37092+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
37093+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
37094+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
37095+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
37096 return usbvideo_register(
37097 &cams,
37098 MAX_CAMERAS,
37099diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
37100index 803d3e4..c4d1b96 100644
37101--- a/drivers/media/video/usbvideo/quickcam_messenger.c
37102+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
37103@@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
37104 int error;
37105
37106 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
37107- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37108+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
37109
37110 cam->input = input_dev = input_allocate_device();
37111 if (!input_dev) {
37112diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
37113index fbd1b63..292f9f0 100644
37114--- a/drivers/media/video/usbvideo/ultracam.c
37115+++ b/drivers/media/video/usbvideo/ultracam.c
37116@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
37117 {
37118 struct usbvideo_cb cbTbl;
37119 memset(&cbTbl, 0, sizeof(cbTbl));
37120- cbTbl.probe = ultracam_probe;
37121- cbTbl.setupOnOpen = ultracam_setup_on_open;
37122- cbTbl.videoStart = ultracam_video_start;
37123- cbTbl.videoStop = ultracam_video_stop;
37124- cbTbl.processData = ultracam_ProcessIsocData;
37125- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37126- cbTbl.adjustPicture = ultracam_adjust_picture;
37127- cbTbl.getFPS = ultracam_calculate_fps;
37128+ *(void **)&cbTbl.probe = ultracam_probe;
37129+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
37130+ *(void **)&cbTbl.videoStart = ultracam_video_start;
37131+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
37132+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
37133+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37134+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
37135+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
37136 return usbvideo_register(
37137 &cams,
37138 MAX_CAMERAS,
37139diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
37140index dea8b32..34f6878 100644
37141--- a/drivers/media/video/usbvideo/usbvideo.c
37142+++ b/drivers/media/video/usbvideo/usbvideo.c
37143@@ -697,15 +697,15 @@ int usbvideo_register(
37144 __func__, cams, base_size, num_cams);
37145
37146 /* Copy callbacks, apply defaults for those that are not set */
37147- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
37148+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
37149 if (cams->cb.getFrame == NULL)
37150- cams->cb.getFrame = usbvideo_GetFrame;
37151+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
37152 if (cams->cb.disconnect == NULL)
37153- cams->cb.disconnect = usbvideo_Disconnect;
37154+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
37155 if (cams->cb.startDataPump == NULL)
37156- cams->cb.startDataPump = usbvideo_StartDataPump;
37157+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
37158 if (cams->cb.stopDataPump == NULL)
37159- cams->cb.stopDataPump = usbvideo_StopDataPump;
37160+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
37161
37162 cams->num_cameras = num_cams;
37163 cams->cam = (struct uvd *) &cams[1];
37164diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
37165index c66985b..7fa143a 100644
37166--- a/drivers/media/video/usbvideo/usbvideo.h
37167+++ b/drivers/media/video/usbvideo/usbvideo.h
37168@@ -268,7 +268,7 @@ struct usbvideo_cb {
37169 int (*startDataPump)(struct uvd *uvd);
37170 void (*stopDataPump)(struct uvd *uvd);
37171 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
37172-};
37173+} __no_const;
37174
37175 struct usbvideo {
37176 int num_cameras; /* As allocated */
37177diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
37178index e0f91e4..37554ea 100644
37179--- a/drivers/media/video/usbvision/usbvision-core.c
37180+++ b/drivers/media/video/usbvision/usbvision-core.c
37181@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
37182 unsigned char rv, gv, bv;
37183 static unsigned char *Y, *U, *V;
37184
37185+ pax_track_stack();
37186+
37187 frame = usbvision->curFrame;
37188 imageSize = frame->frmwidth * frame->frmheight;
37189 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
37190diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
37191index 0d06e7c..3d17d24 100644
37192--- a/drivers/media/video/v4l2-device.c
37193+++ b/drivers/media/video/v4l2-device.c
37194@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
37195 EXPORT_SYMBOL_GPL(v4l2_device_register);
37196
37197 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
37198- atomic_t *instance)
37199+ atomic_unchecked_t *instance)
37200 {
37201- int num = atomic_inc_return(instance) - 1;
37202+ int num = atomic_inc_return_unchecked(instance) - 1;
37203 int len = strlen(basename);
37204
37205 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
37206diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
37207index 032ebae..6a3532c 100644
37208--- a/drivers/media/video/videobuf-dma-sg.c
37209+++ b/drivers/media/video/videobuf-dma-sg.c
37210@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
37211 {
37212 struct videobuf_queue q;
37213
37214+ pax_track_stack();
37215+
37216 /* Required to make generic handler to call __videobuf_alloc */
37217 q.int_ops = &sg_ops;
37218
37219diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
37220index b6992b7..9fa7547 100644
37221--- a/drivers/message/fusion/mptbase.c
37222+++ b/drivers/message/fusion/mptbase.c
37223@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
37224 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
37225 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
37226
37227+#ifdef CONFIG_GRKERNSEC_HIDESYM
37228+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37229+ NULL, NULL);
37230+#else
37231 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
37232 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
37233+#endif
37234+
37235 /*
37236 * Rounding UP to nearest 4-kB boundary here...
37237 */
37238diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
37239index 83873e3..e360e9a 100644
37240--- a/drivers/message/fusion/mptsas.c
37241+++ b/drivers/message/fusion/mptsas.c
37242@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
37243 return 0;
37244 }
37245
37246+static inline void
37247+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37248+{
37249+ if (phy_info->port_details) {
37250+ phy_info->port_details->rphy = rphy;
37251+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37252+ ioc->name, rphy));
37253+ }
37254+
37255+ if (rphy) {
37256+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37257+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37258+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37259+ ioc->name, rphy, rphy->dev.release));
37260+ }
37261+}
37262+
37263 /* no mutex */
37264 static void
37265 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
37266@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
37267 return NULL;
37268 }
37269
37270-static inline void
37271-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37272-{
37273- if (phy_info->port_details) {
37274- phy_info->port_details->rphy = rphy;
37275- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37276- ioc->name, rphy));
37277- }
37278-
37279- if (rphy) {
37280- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37281- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37282- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37283- ioc->name, rphy, rphy->dev.release));
37284- }
37285-}
37286-
37287 static inline struct sas_port *
37288 mptsas_get_port(struct mptsas_phyinfo *phy_info)
37289 {
37290diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
37291index bd096ca..332cf76 100644
37292--- a/drivers/message/fusion/mptscsih.c
37293+++ b/drivers/message/fusion/mptscsih.c
37294@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
37295
37296 h = shost_priv(SChost);
37297
37298- if (h) {
37299- if (h->info_kbuf == NULL)
37300- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37301- return h->info_kbuf;
37302- h->info_kbuf[0] = '\0';
37303+ if (!h)
37304+ return NULL;
37305
37306- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37307- h->info_kbuf[size-1] = '\0';
37308- }
37309+ if (h->info_kbuf == NULL)
37310+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37311+ return h->info_kbuf;
37312+ h->info_kbuf[0] = '\0';
37313+
37314+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37315+ h->info_kbuf[size-1] = '\0';
37316
37317 return h->info_kbuf;
37318 }
37319diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
37320index efba702..59b2c0f 100644
37321--- a/drivers/message/i2o/i2o_config.c
37322+++ b/drivers/message/i2o/i2o_config.c
37323@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
37324 struct i2o_message *msg;
37325 unsigned int iop;
37326
37327+ pax_track_stack();
37328+
37329 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
37330 return -EFAULT;
37331
37332diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
37333index 7045c45..c07b170 100644
37334--- a/drivers/message/i2o/i2o_proc.c
37335+++ b/drivers/message/i2o/i2o_proc.c
37336@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
37337 "Array Controller Device"
37338 };
37339
37340-static char *chtostr(u8 * chars, int n)
37341-{
37342- char tmp[256];
37343- tmp[0] = 0;
37344- return strncat(tmp, (char *)chars, n);
37345-}
37346-
37347 static int i2o_report_query_status(struct seq_file *seq, int block_status,
37348 char *group)
37349 {
37350@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
37351
37352 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
37353 seq_printf(seq, "%-#8x", ddm_table.module_id);
37354- seq_printf(seq, "%-29s",
37355- chtostr(ddm_table.module_name_version, 28));
37356+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
37357 seq_printf(seq, "%9d ", ddm_table.data_size);
37358 seq_printf(seq, "%8d", ddm_table.code_size);
37359
37360@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
37361
37362 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
37363 seq_printf(seq, "%-#8x", dst->module_id);
37364- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
37365- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
37366+ seq_printf(seq, "%-.28s", dst->module_name_version);
37367+ seq_printf(seq, "%-.8s", dst->date);
37368 seq_printf(seq, "%8d ", dst->module_size);
37369 seq_printf(seq, "%8d ", dst->mpb_size);
37370 seq_printf(seq, "0x%04x", dst->module_flags);
37371@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
37372 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
37373 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
37374 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
37375- seq_printf(seq, "Vendor info : %s\n",
37376- chtostr((u8 *) (work32 + 2), 16));
37377- seq_printf(seq, "Product info : %s\n",
37378- chtostr((u8 *) (work32 + 6), 16));
37379- seq_printf(seq, "Description : %s\n",
37380- chtostr((u8 *) (work32 + 10), 16));
37381- seq_printf(seq, "Product rev. : %s\n",
37382- chtostr((u8 *) (work32 + 14), 8));
37383+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
37384+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
37385+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
37386+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
37387
37388 seq_printf(seq, "Serial number : ");
37389 print_serial_number(seq, (u8 *) (work32 + 16),
37390@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
37391 }
37392
37393 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
37394- seq_printf(seq, "Module name : %s\n",
37395- chtostr(result.module_name, 24));
37396- seq_printf(seq, "Module revision : %s\n",
37397- chtostr(result.module_rev, 8));
37398+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
37399+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
37400
37401 seq_printf(seq, "Serial number : ");
37402 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
37403@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
37404 return 0;
37405 }
37406
37407- seq_printf(seq, "Device name : %s\n",
37408- chtostr(result.device_name, 64));
37409- seq_printf(seq, "Service name : %s\n",
37410- chtostr(result.service_name, 64));
37411- seq_printf(seq, "Physical name : %s\n",
37412- chtostr(result.physical_location, 64));
37413- seq_printf(seq, "Instance number : %s\n",
37414- chtostr(result.instance_number, 4));
37415+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
37416+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
37417+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
37418+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
37419
37420 return 0;
37421 }
37422diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37423index 27cf4af..b1205b8 100644
37424--- a/drivers/message/i2o/iop.c
37425+++ b/drivers/message/i2o/iop.c
37426@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37427
37428 spin_lock_irqsave(&c->context_list_lock, flags);
37429
37430- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37431- atomic_inc(&c->context_list_counter);
37432+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37433+ atomic_inc_unchecked(&c->context_list_counter);
37434
37435- entry->context = atomic_read(&c->context_list_counter);
37436+ entry->context = atomic_read_unchecked(&c->context_list_counter);
37437
37438 list_add(&entry->list, &c->context_list);
37439
37440@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37441
37442 #if BITS_PER_LONG == 64
37443 spin_lock_init(&c->context_list_lock);
37444- atomic_set(&c->context_list_counter, 0);
37445+ atomic_set_unchecked(&c->context_list_counter, 0);
37446 INIT_LIST_HEAD(&c->context_list);
37447 #endif
37448
37449diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37450index 78e3e85..66c9a0d 100644
37451--- a/drivers/mfd/ab3100-core.c
37452+++ b/drivers/mfd/ab3100-core.c
37453@@ -777,7 +777,7 @@ struct ab_family_id {
37454 char *name;
37455 };
37456
37457-static const struct ab_family_id ids[] __initdata = {
37458+static const struct ab_family_id ids[] __initconst = {
37459 /* AB3100 */
37460 {
37461 .id = 0xc0,
37462diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37463index 8d8c932..8104515 100644
37464--- a/drivers/mfd/wm8350-i2c.c
37465+++ b/drivers/mfd/wm8350-i2c.c
37466@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37467 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37468 int ret;
37469
37470+ pax_track_stack();
37471+
37472 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37473 return -EINVAL;
37474
37475diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37476index e4ff50b..4cc3f04 100644
37477--- a/drivers/misc/kgdbts.c
37478+++ b/drivers/misc/kgdbts.c
37479@@ -118,7 +118,7 @@
37480 } while (0)
37481 #define MAX_CONFIG_LEN 40
37482
37483-static struct kgdb_io kgdbts_io_ops;
37484+static const struct kgdb_io kgdbts_io_ops;
37485 static char get_buf[BUFMAX];
37486 static int get_buf_cnt;
37487 static char put_buf[BUFMAX];
37488@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
37489 module_put(THIS_MODULE);
37490 }
37491
37492-static struct kgdb_io kgdbts_io_ops = {
37493+static const struct kgdb_io kgdbts_io_ops = {
37494 .name = "kgdbts",
37495 .read_char = kgdbts_get_char,
37496 .write_char = kgdbts_put_char,
37497diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
37498index 37e7cfc..67cfb76 100644
37499--- a/drivers/misc/sgi-gru/gruhandles.c
37500+++ b/drivers/misc/sgi-gru/gruhandles.c
37501@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37502
37503 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
37504 {
37505- atomic_long_inc(&mcs_op_statistics[op].count);
37506- atomic_long_add(clks, &mcs_op_statistics[op].total);
37507+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
37508+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
37509 if (mcs_op_statistics[op].max < clks)
37510 mcs_op_statistics[op].max = clks;
37511 }
37512diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
37513index 3f2375c..467c6e6 100644
37514--- a/drivers/misc/sgi-gru/gruprocfs.c
37515+++ b/drivers/misc/sgi-gru/gruprocfs.c
37516@@ -32,9 +32,9 @@
37517
37518 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
37519
37520-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
37521+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
37522 {
37523- unsigned long val = atomic_long_read(v);
37524+ unsigned long val = atomic_long_read_unchecked(v);
37525
37526 if (val)
37527 seq_printf(s, "%16lu %s\n", val, id);
37528@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37529 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37530
37531 for (op = 0; op < mcsop_last; op++) {
37532- count = atomic_long_read(&mcs_op_statistics[op].count);
37533- total = atomic_long_read(&mcs_op_statistics[op].total);
37534+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37535+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37536 max = mcs_op_statistics[op].max;
37537 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37538 count ? total / count : 0, max);
37539diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37540index 46990bc..4a251b5 100644
37541--- a/drivers/misc/sgi-gru/grutables.h
37542+++ b/drivers/misc/sgi-gru/grutables.h
37543@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37544 * GRU statistics.
37545 */
37546 struct gru_stats_s {
37547- atomic_long_t vdata_alloc;
37548- atomic_long_t vdata_free;
37549- atomic_long_t gts_alloc;
37550- atomic_long_t gts_free;
37551- atomic_long_t vdata_double_alloc;
37552- atomic_long_t gts_double_allocate;
37553- atomic_long_t assign_context;
37554- atomic_long_t assign_context_failed;
37555- atomic_long_t free_context;
37556- atomic_long_t load_user_context;
37557- atomic_long_t load_kernel_context;
37558- atomic_long_t lock_kernel_context;
37559- atomic_long_t unlock_kernel_context;
37560- atomic_long_t steal_user_context;
37561- atomic_long_t steal_kernel_context;
37562- atomic_long_t steal_context_failed;
37563- atomic_long_t nopfn;
37564- atomic_long_t break_cow;
37565- atomic_long_t asid_new;
37566- atomic_long_t asid_next;
37567- atomic_long_t asid_wrap;
37568- atomic_long_t asid_reuse;
37569- atomic_long_t intr;
37570- atomic_long_t intr_mm_lock_failed;
37571- atomic_long_t call_os;
37572- atomic_long_t call_os_offnode_reference;
37573- atomic_long_t call_os_check_for_bug;
37574- atomic_long_t call_os_wait_queue;
37575- atomic_long_t user_flush_tlb;
37576- atomic_long_t user_unload_context;
37577- atomic_long_t user_exception;
37578- atomic_long_t set_context_option;
37579- atomic_long_t migrate_check;
37580- atomic_long_t migrated_retarget;
37581- atomic_long_t migrated_unload;
37582- atomic_long_t migrated_unload_delay;
37583- atomic_long_t migrated_nopfn_retarget;
37584- atomic_long_t migrated_nopfn_unload;
37585- atomic_long_t tlb_dropin;
37586- atomic_long_t tlb_dropin_fail_no_asid;
37587- atomic_long_t tlb_dropin_fail_upm;
37588- atomic_long_t tlb_dropin_fail_invalid;
37589- atomic_long_t tlb_dropin_fail_range_active;
37590- atomic_long_t tlb_dropin_fail_idle;
37591- atomic_long_t tlb_dropin_fail_fmm;
37592- atomic_long_t tlb_dropin_fail_no_exception;
37593- atomic_long_t tlb_dropin_fail_no_exception_war;
37594- atomic_long_t tfh_stale_on_fault;
37595- atomic_long_t mmu_invalidate_range;
37596- atomic_long_t mmu_invalidate_page;
37597- atomic_long_t mmu_clear_flush_young;
37598- atomic_long_t flush_tlb;
37599- atomic_long_t flush_tlb_gru;
37600- atomic_long_t flush_tlb_gru_tgh;
37601- atomic_long_t flush_tlb_gru_zero_asid;
37602+ atomic_long_unchecked_t vdata_alloc;
37603+ atomic_long_unchecked_t vdata_free;
37604+ atomic_long_unchecked_t gts_alloc;
37605+ atomic_long_unchecked_t gts_free;
37606+ atomic_long_unchecked_t vdata_double_alloc;
37607+ atomic_long_unchecked_t gts_double_allocate;
37608+ atomic_long_unchecked_t assign_context;
37609+ atomic_long_unchecked_t assign_context_failed;
37610+ atomic_long_unchecked_t free_context;
37611+ atomic_long_unchecked_t load_user_context;
37612+ atomic_long_unchecked_t load_kernel_context;
37613+ atomic_long_unchecked_t lock_kernel_context;
37614+ atomic_long_unchecked_t unlock_kernel_context;
37615+ atomic_long_unchecked_t steal_user_context;
37616+ atomic_long_unchecked_t steal_kernel_context;
37617+ atomic_long_unchecked_t steal_context_failed;
37618+ atomic_long_unchecked_t nopfn;
37619+ atomic_long_unchecked_t break_cow;
37620+ atomic_long_unchecked_t asid_new;
37621+ atomic_long_unchecked_t asid_next;
37622+ atomic_long_unchecked_t asid_wrap;
37623+ atomic_long_unchecked_t asid_reuse;
37624+ atomic_long_unchecked_t intr;
37625+ atomic_long_unchecked_t intr_mm_lock_failed;
37626+ atomic_long_unchecked_t call_os;
37627+ atomic_long_unchecked_t call_os_offnode_reference;
37628+ atomic_long_unchecked_t call_os_check_for_bug;
37629+ atomic_long_unchecked_t call_os_wait_queue;
37630+ atomic_long_unchecked_t user_flush_tlb;
37631+ atomic_long_unchecked_t user_unload_context;
37632+ atomic_long_unchecked_t user_exception;
37633+ atomic_long_unchecked_t set_context_option;
37634+ atomic_long_unchecked_t migrate_check;
37635+ atomic_long_unchecked_t migrated_retarget;
37636+ atomic_long_unchecked_t migrated_unload;
37637+ atomic_long_unchecked_t migrated_unload_delay;
37638+ atomic_long_unchecked_t migrated_nopfn_retarget;
37639+ atomic_long_unchecked_t migrated_nopfn_unload;
37640+ atomic_long_unchecked_t tlb_dropin;
37641+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37642+ atomic_long_unchecked_t tlb_dropin_fail_upm;
37643+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
37644+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
37645+ atomic_long_unchecked_t tlb_dropin_fail_idle;
37646+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
37647+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37648+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37649+ atomic_long_unchecked_t tfh_stale_on_fault;
37650+ atomic_long_unchecked_t mmu_invalidate_range;
37651+ atomic_long_unchecked_t mmu_invalidate_page;
37652+ atomic_long_unchecked_t mmu_clear_flush_young;
37653+ atomic_long_unchecked_t flush_tlb;
37654+ atomic_long_unchecked_t flush_tlb_gru;
37655+ atomic_long_unchecked_t flush_tlb_gru_tgh;
37656+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37657
37658- atomic_long_t copy_gpa;
37659+ atomic_long_unchecked_t copy_gpa;
37660
37661- atomic_long_t mesq_receive;
37662- atomic_long_t mesq_receive_none;
37663- atomic_long_t mesq_send;
37664- atomic_long_t mesq_send_failed;
37665- atomic_long_t mesq_noop;
37666- atomic_long_t mesq_send_unexpected_error;
37667- atomic_long_t mesq_send_lb_overflow;
37668- atomic_long_t mesq_send_qlimit_reached;
37669- atomic_long_t mesq_send_amo_nacked;
37670- atomic_long_t mesq_send_put_nacked;
37671- atomic_long_t mesq_qf_not_full;
37672- atomic_long_t mesq_qf_locked;
37673- atomic_long_t mesq_qf_noop_not_full;
37674- atomic_long_t mesq_qf_switch_head_failed;
37675- atomic_long_t mesq_qf_unexpected_error;
37676- atomic_long_t mesq_noop_unexpected_error;
37677- atomic_long_t mesq_noop_lb_overflow;
37678- atomic_long_t mesq_noop_qlimit_reached;
37679- atomic_long_t mesq_noop_amo_nacked;
37680- atomic_long_t mesq_noop_put_nacked;
37681+ atomic_long_unchecked_t mesq_receive;
37682+ atomic_long_unchecked_t mesq_receive_none;
37683+ atomic_long_unchecked_t mesq_send;
37684+ atomic_long_unchecked_t mesq_send_failed;
37685+ atomic_long_unchecked_t mesq_noop;
37686+ atomic_long_unchecked_t mesq_send_unexpected_error;
37687+ atomic_long_unchecked_t mesq_send_lb_overflow;
37688+ atomic_long_unchecked_t mesq_send_qlimit_reached;
37689+ atomic_long_unchecked_t mesq_send_amo_nacked;
37690+ atomic_long_unchecked_t mesq_send_put_nacked;
37691+ atomic_long_unchecked_t mesq_qf_not_full;
37692+ atomic_long_unchecked_t mesq_qf_locked;
37693+ atomic_long_unchecked_t mesq_qf_noop_not_full;
37694+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
37695+ atomic_long_unchecked_t mesq_qf_unexpected_error;
37696+ atomic_long_unchecked_t mesq_noop_unexpected_error;
37697+ atomic_long_unchecked_t mesq_noop_lb_overflow;
37698+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
37699+ atomic_long_unchecked_t mesq_noop_amo_nacked;
37700+ atomic_long_unchecked_t mesq_noop_put_nacked;
37701
37702 };
37703
37704@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37705 cchop_deallocate, tghop_invalidate, mcsop_last};
37706
37707 struct mcs_op_statistic {
37708- atomic_long_t count;
37709- atomic_long_t total;
37710+ atomic_long_unchecked_t count;
37711+ atomic_long_unchecked_t total;
37712 unsigned long max;
37713 };
37714
37715@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37716
37717 #define STAT(id) do { \
37718 if (gru_options & OPT_STATS) \
37719- atomic_long_inc(&gru_stats.id); \
37720+ atomic_long_inc_unchecked(&gru_stats.id); \
37721 } while (0)
37722
37723 #ifdef CONFIG_SGI_GRU_DEBUG
37724diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37725index 2275126..12a9dbfb 100644
37726--- a/drivers/misc/sgi-xp/xp.h
37727+++ b/drivers/misc/sgi-xp/xp.h
37728@@ -289,7 +289,7 @@ struct xpc_interface {
37729 xpc_notify_func, void *);
37730 void (*received) (short, int, void *);
37731 enum xp_retval (*partid_to_nasids) (short, void *);
37732-};
37733+} __no_const;
37734
37735 extern struct xpc_interface xpc_interface;
37736
37737diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37738index b94d5f7..7f494c5 100644
37739--- a/drivers/misc/sgi-xp/xpc.h
37740+++ b/drivers/misc/sgi-xp/xpc.h
37741@@ -835,6 +835,7 @@ struct xpc_arch_operations {
37742 void (*received_payload) (struct xpc_channel *, void *);
37743 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37744 };
37745+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37746
37747 /* struct xpc_partition act_state values (for XPC HB) */
37748
37749@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37750 /* found in xpc_main.c */
37751 extern struct device *xpc_part;
37752 extern struct device *xpc_chan;
37753-extern struct xpc_arch_operations xpc_arch_ops;
37754+extern xpc_arch_operations_no_const xpc_arch_ops;
37755 extern int xpc_disengage_timelimit;
37756 extern int xpc_disengage_timedout;
37757 extern int xpc_activate_IRQ_rcvd;
37758diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37759index fd3688a..7e211a4 100644
37760--- a/drivers/misc/sgi-xp/xpc_main.c
37761+++ b/drivers/misc/sgi-xp/xpc_main.c
37762@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37763 .notifier_call = xpc_system_die,
37764 };
37765
37766-struct xpc_arch_operations xpc_arch_ops;
37767+xpc_arch_operations_no_const xpc_arch_ops;
37768
37769 /*
37770 * Timer function to enforce the timelimit on the partition disengage.
37771diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37772index 8b70e03..700bda6 100644
37773--- a/drivers/misc/sgi-xp/xpc_sn2.c
37774+++ b/drivers/misc/sgi-xp/xpc_sn2.c
37775@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37776 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37777 }
37778
37779-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37780+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37781 .setup_partitions = xpc_setup_partitions_sn2,
37782 .teardown_partitions = xpc_teardown_partitions_sn2,
37783 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37784@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37785 int ret;
37786 size_t buf_size;
37787
37788- xpc_arch_ops = xpc_arch_ops_sn2;
37789+ pax_open_kernel();
37790+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37791+ pax_close_kernel();
37792
37793 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37794 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37795diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37796index 8e08d71..7cb8c9b 100644
37797--- a/drivers/misc/sgi-xp/xpc_uv.c
37798+++ b/drivers/misc/sgi-xp/xpc_uv.c
37799@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37800 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37801 }
37802
37803-static struct xpc_arch_operations xpc_arch_ops_uv = {
37804+static const struct xpc_arch_operations xpc_arch_ops_uv = {
37805 .setup_partitions = xpc_setup_partitions_uv,
37806 .teardown_partitions = xpc_teardown_partitions_uv,
37807 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37808@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37809 int
37810 xpc_init_uv(void)
37811 {
37812- xpc_arch_ops = xpc_arch_ops_uv;
37813+ pax_open_kernel();
37814+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37815+ pax_close_kernel();
37816
37817 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37818 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37819diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37820index 6fd20b42..650efe3 100644
37821--- a/drivers/mmc/host/sdhci-pci.c
37822+++ b/drivers/mmc/host/sdhci-pci.c
37823@@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37824 .probe = via_probe,
37825 };
37826
37827-static const struct pci_device_id pci_ids[] __devinitdata = {
37828+static const struct pci_device_id pci_ids[] __devinitconst = {
37829 {
37830 .vendor = PCI_VENDOR_ID_RICOH,
37831 .device = PCI_DEVICE_ID_RICOH_R5C822,
37832diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37833index e7563a9..5f90ce5 100644
37834--- a/drivers/mtd/chips/cfi_cmdset_0001.c
37835+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37836@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37837 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37838 unsigned long timeo = jiffies + HZ;
37839
37840+ pax_track_stack();
37841+
37842 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37843 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37844 goto sleep;
37845@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37846 unsigned long initial_adr;
37847 int initial_len = len;
37848
37849+ pax_track_stack();
37850+
37851 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37852 adr += chip->start;
37853 initial_adr = adr;
37854@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37855 int retries = 3;
37856 int ret;
37857
37858+ pax_track_stack();
37859+
37860 adr += chip->start;
37861
37862 retry:
37863diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37864index 0667a67..3ab97ed 100644
37865--- a/drivers/mtd/chips/cfi_cmdset_0020.c
37866+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37867@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37868 unsigned long cmd_addr;
37869 struct cfi_private *cfi = map->fldrv_priv;
37870
37871+ pax_track_stack();
37872+
37873 adr += chip->start;
37874
37875 /* Ensure cmd read/writes are aligned. */
37876@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37877 DECLARE_WAITQUEUE(wait, current);
37878 int wbufsize, z;
37879
37880+ pax_track_stack();
37881+
37882 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37883 if (adr & (map_bankwidth(map)-1))
37884 return -EINVAL;
37885@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37886 DECLARE_WAITQUEUE(wait, current);
37887 int ret = 0;
37888
37889+ pax_track_stack();
37890+
37891 adr += chip->start;
37892
37893 /* Let's determine this according to the interleave only once */
37894@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37895 unsigned long timeo = jiffies + HZ;
37896 DECLARE_WAITQUEUE(wait, current);
37897
37898+ pax_track_stack();
37899+
37900 adr += chip->start;
37901
37902 /* Let's determine this according to the interleave only once */
37903@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37904 unsigned long timeo = jiffies + HZ;
37905 DECLARE_WAITQUEUE(wait, current);
37906
37907+ pax_track_stack();
37908+
37909 adr += chip->start;
37910
37911 /* Let's determine this according to the interleave only once */
37912diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37913index 5bf5f46..c5de373 100644
37914--- a/drivers/mtd/devices/doc2000.c
37915+++ b/drivers/mtd/devices/doc2000.c
37916@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37917
37918 /* The ECC will not be calculated correctly if less than 512 is written */
37919 /* DBB-
37920- if (len != 0x200 && eccbuf)
37921+ if (len != 0x200)
37922 printk(KERN_WARNING
37923 "ECC needs a full sector write (adr: %lx size %lx)\n",
37924 (long) to, (long) len);
37925diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37926index 0990f78..bb4e8a4 100644
37927--- a/drivers/mtd/devices/doc2001.c
37928+++ b/drivers/mtd/devices/doc2001.c
37929@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37930 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37931
37932 /* Don't allow read past end of device */
37933- if (from >= this->totlen)
37934+ if (from >= this->totlen || !len)
37935 return -EINVAL;
37936
37937 /* Don't allow a single read to cross a 512-byte block boundary */
37938diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37939index e56d6b4..f07e6cf 100644
37940--- a/drivers/mtd/ftl.c
37941+++ b/drivers/mtd/ftl.c
37942@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37943 loff_t offset;
37944 uint16_t srcunitswap = cpu_to_le16(srcunit);
37945
37946+ pax_track_stack();
37947+
37948 eun = &part->EUNInfo[srcunit];
37949 xfer = &part->XferInfo[xferunit];
37950 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37951diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37952index 8aca552..146446e 100755
37953--- a/drivers/mtd/inftlcore.c
37954+++ b/drivers/mtd/inftlcore.c
37955@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37956 struct inftl_oob oob;
37957 size_t retlen;
37958
37959+ pax_track_stack();
37960+
37961 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37962 "pending=%d)\n", inftl, thisVUC, pendingblock);
37963
37964diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37965index 32e82ae..ed50953 100644
37966--- a/drivers/mtd/inftlmount.c
37967+++ b/drivers/mtd/inftlmount.c
37968@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37969 struct INFTLPartition *ip;
37970 size_t retlen;
37971
37972+ pax_track_stack();
37973+
37974 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37975
37976 /*
37977diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37978index 79bf40f..fe5f8fd 100644
37979--- a/drivers/mtd/lpddr/qinfo_probe.c
37980+++ b/drivers/mtd/lpddr/qinfo_probe.c
37981@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37982 {
37983 map_word pfow_val[4];
37984
37985+ pax_track_stack();
37986+
37987 /* Check identification string */
37988 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37989 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37990diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37991index 726a1b8..f46b460 100644
37992--- a/drivers/mtd/mtdchar.c
37993+++ b/drivers/mtd/mtdchar.c
37994@@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37995 u_long size;
37996 struct mtd_info_user info;
37997
37998+ pax_track_stack();
37999+
38000 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
38001
38002 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
38003diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
38004index 1002e18..26d82d5 100644
38005--- a/drivers/mtd/nftlcore.c
38006+++ b/drivers/mtd/nftlcore.c
38007@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
38008 int inplace = 1;
38009 size_t retlen;
38010
38011+ pax_track_stack();
38012+
38013 memset(BlockMap, 0xff, sizeof(BlockMap));
38014 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
38015
38016diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
38017index 8b22b18..6fada85 100644
38018--- a/drivers/mtd/nftlmount.c
38019+++ b/drivers/mtd/nftlmount.c
38020@@ -23,6 +23,7 @@
38021 #include <asm/errno.h>
38022 #include <linux/delay.h>
38023 #include <linux/slab.h>
38024+#include <linux/sched.h>
38025 #include <linux/mtd/mtd.h>
38026 #include <linux/mtd/nand.h>
38027 #include <linux/mtd/nftl.h>
38028@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
38029 struct mtd_info *mtd = nftl->mbd.mtd;
38030 unsigned int i;
38031
38032+ pax_track_stack();
38033+
38034 /* Assume logical EraseSize == physical erasesize for starting the scan.
38035 We'll sort it out later if we find a MediaHeader which says otherwise */
38036 /* Actually, we won't. The new DiskOnChip driver has already scanned
38037diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
38038index 14cec04..d775b87 100644
38039--- a/drivers/mtd/ubi/build.c
38040+++ b/drivers/mtd/ubi/build.c
38041@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
38042 static int __init bytes_str_to_int(const char *str)
38043 {
38044 char *endp;
38045- unsigned long result;
38046+ unsigned long result, scale = 1;
38047
38048 result = simple_strtoul(str, &endp, 0);
38049 if (str == endp || result >= INT_MAX) {
38050@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
38051
38052 switch (*endp) {
38053 case 'G':
38054- result *= 1024;
38055+ scale *= 1024;
38056 case 'M':
38057- result *= 1024;
38058+ scale *= 1024;
38059 case 'K':
38060- result *= 1024;
38061+ scale *= 1024;
38062 if (endp[1] == 'i' && endp[2] == 'B')
38063 endp += 2;
38064 case '\0':
38065@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
38066 return -EINVAL;
38067 }
38068
38069- return result;
38070+ if ((intoverflow_t)result*scale >= INT_MAX) {
38071+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
38072+ str);
38073+ return -EINVAL;
38074+ }
38075+
38076+ return result*scale;
38077 }
38078
38079 /**
38080diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
38081index ab68886..ca405e8 100644
38082--- a/drivers/net/atlx/atl2.c
38083+++ b/drivers/net/atlx/atl2.c
38084@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
38085 */
38086
38087 #define ATL2_PARAM(X, desc) \
38088- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38089+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
38090 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
38091 MODULE_PARM_DESC(X, desc);
38092 #else
38093diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
38094index 4874b2b..67f8526 100644
38095--- a/drivers/net/bnx2.c
38096+++ b/drivers/net/bnx2.c
38097@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
38098 int rc = 0;
38099 u32 magic, csum;
38100
38101+ pax_track_stack();
38102+
38103 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
38104 goto test_nvram_done;
38105
38106diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
38107index fd3eb07..8a6978d 100644
38108--- a/drivers/net/cxgb3/l2t.h
38109+++ b/drivers/net/cxgb3/l2t.h
38110@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
38111 */
38112 struct l2t_skb_cb {
38113 arp_failure_handler_func arp_failure_handler;
38114-};
38115+} __no_const;
38116
38117 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
38118
38119diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
38120index 032cfe0..411af379 100644
38121--- a/drivers/net/cxgb3/t3_hw.c
38122+++ b/drivers/net/cxgb3/t3_hw.c
38123@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
38124 int i, addr, ret;
38125 struct t3_vpd vpd;
38126
38127+ pax_track_stack();
38128+
38129 /*
38130 * Card information is normally at VPD_BASE but some early cards had
38131 * it at 0.
38132diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
38133index d1e0563..b9e129c 100644
38134--- a/drivers/net/e1000e/82571.c
38135+++ b/drivers/net/e1000e/82571.c
38136@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
38137 {
38138 struct e1000_hw *hw = &adapter->hw;
38139 struct e1000_mac_info *mac = &hw->mac;
38140- struct e1000_mac_operations *func = &mac->ops;
38141+ e1000_mac_operations_no_const *func = &mac->ops;
38142 u32 swsm = 0;
38143 u32 swsm2 = 0;
38144 bool force_clear_smbi = false;
38145@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
38146 temp = er32(ICRXDMTC);
38147 }
38148
38149-static struct e1000_mac_operations e82571_mac_ops = {
38150+static const struct e1000_mac_operations e82571_mac_ops = {
38151 /* .check_mng_mode: mac type dependent */
38152 /* .check_for_link: media type dependent */
38153 .id_led_init = e1000e_id_led_init,
38154@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
38155 .setup_led = e1000e_setup_led_generic,
38156 };
38157
38158-static struct e1000_phy_operations e82_phy_ops_igp = {
38159+static const struct e1000_phy_operations e82_phy_ops_igp = {
38160 .acquire_phy = e1000_get_hw_semaphore_82571,
38161 .check_reset_block = e1000e_check_reset_block_generic,
38162 .commit_phy = NULL,
38163@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
38164 .cfg_on_link_up = NULL,
38165 };
38166
38167-static struct e1000_phy_operations e82_phy_ops_m88 = {
38168+static const struct e1000_phy_operations e82_phy_ops_m88 = {
38169 .acquire_phy = e1000_get_hw_semaphore_82571,
38170 .check_reset_block = e1000e_check_reset_block_generic,
38171 .commit_phy = e1000e_phy_sw_reset,
38172@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
38173 .cfg_on_link_up = NULL,
38174 };
38175
38176-static struct e1000_phy_operations e82_phy_ops_bm = {
38177+static const struct e1000_phy_operations e82_phy_ops_bm = {
38178 .acquire_phy = e1000_get_hw_semaphore_82571,
38179 .check_reset_block = e1000e_check_reset_block_generic,
38180 .commit_phy = e1000e_phy_sw_reset,
38181@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
38182 .cfg_on_link_up = NULL,
38183 };
38184
38185-static struct e1000_nvm_operations e82571_nvm_ops = {
38186+static const struct e1000_nvm_operations e82571_nvm_ops = {
38187 .acquire_nvm = e1000_acquire_nvm_82571,
38188 .read_nvm = e1000e_read_nvm_eerd,
38189 .release_nvm = e1000_release_nvm_82571,
38190diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
38191index 47db9bd..fa58ccd 100644
38192--- a/drivers/net/e1000e/e1000.h
38193+++ b/drivers/net/e1000e/e1000.h
38194@@ -375,9 +375,9 @@ struct e1000_info {
38195 u32 pba;
38196 u32 max_hw_frame_size;
38197 s32 (*get_variants)(struct e1000_adapter *);
38198- struct e1000_mac_operations *mac_ops;
38199- struct e1000_phy_operations *phy_ops;
38200- struct e1000_nvm_operations *nvm_ops;
38201+ const struct e1000_mac_operations *mac_ops;
38202+ const struct e1000_phy_operations *phy_ops;
38203+ const struct e1000_nvm_operations *nvm_ops;
38204 };
38205
38206 /* hardware capability, feature, and workaround flags */
38207diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
38208index ae5d736..e9a93a1 100644
38209--- a/drivers/net/e1000e/es2lan.c
38210+++ b/drivers/net/e1000e/es2lan.c
38211@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
38212 {
38213 struct e1000_hw *hw = &adapter->hw;
38214 struct e1000_mac_info *mac = &hw->mac;
38215- struct e1000_mac_operations *func = &mac->ops;
38216+ e1000_mac_operations_no_const *func = &mac->ops;
38217
38218 /* Set media type */
38219 switch (adapter->pdev->device) {
38220@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
38221 temp = er32(ICRXDMTC);
38222 }
38223
38224-static struct e1000_mac_operations es2_mac_ops = {
38225+static const struct e1000_mac_operations es2_mac_ops = {
38226 .id_led_init = e1000e_id_led_init,
38227 .check_mng_mode = e1000e_check_mng_mode_generic,
38228 /* check_for_link dependent on media type */
38229@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
38230 .setup_led = e1000e_setup_led_generic,
38231 };
38232
38233-static struct e1000_phy_operations es2_phy_ops = {
38234+static const struct e1000_phy_operations es2_phy_ops = {
38235 .acquire_phy = e1000_acquire_phy_80003es2lan,
38236 .check_reset_block = e1000e_check_reset_block_generic,
38237 .commit_phy = e1000e_phy_sw_reset,
38238@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
38239 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
38240 };
38241
38242-static struct e1000_nvm_operations es2_nvm_ops = {
38243+static const struct e1000_nvm_operations es2_nvm_ops = {
38244 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
38245 .read_nvm = e1000e_read_nvm_eerd,
38246 .release_nvm = e1000_release_nvm_80003es2lan,
38247diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
38248index 11f3b7c..6381887 100644
38249--- a/drivers/net/e1000e/hw.h
38250+++ b/drivers/net/e1000e/hw.h
38251@@ -753,6 +753,7 @@ struct e1000_mac_operations {
38252 s32 (*setup_physical_interface)(struct e1000_hw *);
38253 s32 (*setup_led)(struct e1000_hw *);
38254 };
38255+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38256
38257 /* Function pointers for the PHY. */
38258 struct e1000_phy_operations {
38259@@ -774,6 +775,7 @@ struct e1000_phy_operations {
38260 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
38261 s32 (*cfg_on_link_up)(struct e1000_hw *);
38262 };
38263+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38264
38265 /* Function pointers for the NVM. */
38266 struct e1000_nvm_operations {
38267@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
38268 s32 (*validate_nvm)(struct e1000_hw *);
38269 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
38270 };
38271+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38272
38273 struct e1000_mac_info {
38274- struct e1000_mac_operations ops;
38275+ e1000_mac_operations_no_const ops;
38276
38277 u8 addr[6];
38278 u8 perm_addr[6];
38279@@ -823,7 +826,7 @@ struct e1000_mac_info {
38280 };
38281
38282 struct e1000_phy_info {
38283- struct e1000_phy_operations ops;
38284+ e1000_phy_operations_no_const ops;
38285
38286 enum e1000_phy_type type;
38287
38288@@ -857,7 +860,7 @@ struct e1000_phy_info {
38289 };
38290
38291 struct e1000_nvm_info {
38292- struct e1000_nvm_operations ops;
38293+ e1000_nvm_operations_no_const ops;
38294
38295 enum e1000_nvm_type type;
38296 enum e1000_nvm_override override;
38297diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
38298index de39f9a..e28d3e0 100644
38299--- a/drivers/net/e1000e/ich8lan.c
38300+++ b/drivers/net/e1000e/ich8lan.c
38301@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
38302 }
38303 }
38304
38305-static struct e1000_mac_operations ich8_mac_ops = {
38306+static const struct e1000_mac_operations ich8_mac_ops = {
38307 .id_led_init = e1000e_id_led_init,
38308 .check_mng_mode = e1000_check_mng_mode_ich8lan,
38309 .check_for_link = e1000_check_for_copper_link_ich8lan,
38310@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
38311 /* id_led_init dependent on mac type */
38312 };
38313
38314-static struct e1000_phy_operations ich8_phy_ops = {
38315+static const struct e1000_phy_operations ich8_phy_ops = {
38316 .acquire_phy = e1000_acquire_swflag_ich8lan,
38317 .check_reset_block = e1000_check_reset_block_ich8lan,
38318 .commit_phy = NULL,
38319@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
38320 .write_phy_reg = e1000e_write_phy_reg_igp,
38321 };
38322
38323-static struct e1000_nvm_operations ich8_nvm_ops = {
38324+static const struct e1000_nvm_operations ich8_nvm_ops = {
38325 .acquire_nvm = e1000_acquire_nvm_ich8lan,
38326 .read_nvm = e1000_read_nvm_ich8lan,
38327 .release_nvm = e1000_release_nvm_ich8lan,
38328diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
38329index 18d5fbb..542d96d 100644
38330--- a/drivers/net/fealnx.c
38331+++ b/drivers/net/fealnx.c
38332@@ -151,7 +151,7 @@ struct chip_info {
38333 int flags;
38334 };
38335
38336-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
38337+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
38338 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38339 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
38340 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38341diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
38342index 0e5b54b..b503f82 100644
38343--- a/drivers/net/hamradio/6pack.c
38344+++ b/drivers/net/hamradio/6pack.c
38345@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
38346 unsigned char buf[512];
38347 int count1;
38348
38349+ pax_track_stack();
38350+
38351 if (!count)
38352 return;
38353
38354diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
38355index 5862282..7cce8cb 100644
38356--- a/drivers/net/ibmveth.c
38357+++ b/drivers/net/ibmveth.c
38358@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
38359 NULL,
38360 };
38361
38362-static struct sysfs_ops veth_pool_ops = {
38363+static const struct sysfs_ops veth_pool_ops = {
38364 .show = veth_pool_show,
38365 .store = veth_pool_store,
38366 };
38367diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
38368index d617f2d..57b5309 100644
38369--- a/drivers/net/igb/e1000_82575.c
38370+++ b/drivers/net/igb/e1000_82575.c
38371@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
38372 wr32(E1000_VT_CTL, vt_ctl);
38373 }
38374
38375-static struct e1000_mac_operations e1000_mac_ops_82575 = {
38376+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
38377 .reset_hw = igb_reset_hw_82575,
38378 .init_hw = igb_init_hw_82575,
38379 .check_for_link = igb_check_for_link_82575,
38380@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
38381 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
38382 };
38383
38384-static struct e1000_phy_operations e1000_phy_ops_82575 = {
38385+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
38386 .acquire = igb_acquire_phy_82575,
38387 .get_cfg_done = igb_get_cfg_done_82575,
38388 .release = igb_release_phy_82575,
38389 };
38390
38391-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38392+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38393 .acquire = igb_acquire_nvm_82575,
38394 .read = igb_read_nvm_eerd,
38395 .release = igb_release_nvm_82575,
38396diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
38397index 72081df..d855cf5 100644
38398--- a/drivers/net/igb/e1000_hw.h
38399+++ b/drivers/net/igb/e1000_hw.h
38400@@ -288,6 +288,7 @@ struct e1000_mac_operations {
38401 s32 (*read_mac_addr)(struct e1000_hw *);
38402 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
38403 };
38404+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38405
38406 struct e1000_phy_operations {
38407 s32 (*acquire)(struct e1000_hw *);
38408@@ -303,6 +304,7 @@ struct e1000_phy_operations {
38409 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
38410 s32 (*write_reg)(struct e1000_hw *, u32, u16);
38411 };
38412+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38413
38414 struct e1000_nvm_operations {
38415 s32 (*acquire)(struct e1000_hw *);
38416@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
38417 void (*release)(struct e1000_hw *);
38418 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
38419 };
38420+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38421
38422 struct e1000_info {
38423 s32 (*get_invariants)(struct e1000_hw *);
38424@@ -321,7 +324,7 @@ struct e1000_info {
38425 extern const struct e1000_info e1000_82575_info;
38426
38427 struct e1000_mac_info {
38428- struct e1000_mac_operations ops;
38429+ e1000_mac_operations_no_const ops;
38430
38431 u8 addr[6];
38432 u8 perm_addr[6];
38433@@ -365,7 +368,7 @@ struct e1000_mac_info {
38434 };
38435
38436 struct e1000_phy_info {
38437- struct e1000_phy_operations ops;
38438+ e1000_phy_operations_no_const ops;
38439
38440 enum e1000_phy_type type;
38441
38442@@ -400,7 +403,7 @@ struct e1000_phy_info {
38443 };
38444
38445 struct e1000_nvm_info {
38446- struct e1000_nvm_operations ops;
38447+ e1000_nvm_operations_no_const ops;
38448
38449 enum e1000_nvm_type type;
38450 enum e1000_nvm_override override;
38451@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38452 s32 (*check_for_ack)(struct e1000_hw *, u16);
38453 s32 (*check_for_rst)(struct e1000_hw *, u16);
38454 };
38455+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38456
38457 struct e1000_mbx_stats {
38458 u32 msgs_tx;
38459@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38460 };
38461
38462 struct e1000_mbx_info {
38463- struct e1000_mbx_operations ops;
38464+ e1000_mbx_operations_no_const ops;
38465 struct e1000_mbx_stats stats;
38466 u32 timeout;
38467 u32 usec_delay;
38468diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38469index 1e8ce37..549c453 100644
38470--- a/drivers/net/igbvf/vf.h
38471+++ b/drivers/net/igbvf/vf.h
38472@@ -187,9 +187,10 @@ struct e1000_mac_operations {
38473 s32 (*read_mac_addr)(struct e1000_hw *);
38474 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38475 };
38476+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38477
38478 struct e1000_mac_info {
38479- struct e1000_mac_operations ops;
38480+ e1000_mac_operations_no_const ops;
38481 u8 addr[6];
38482 u8 perm_addr[6];
38483
38484@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
38485 s32 (*check_for_ack)(struct e1000_hw *);
38486 s32 (*check_for_rst)(struct e1000_hw *);
38487 };
38488+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38489
38490 struct e1000_mbx_stats {
38491 u32 msgs_tx;
38492@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
38493 };
38494
38495 struct e1000_mbx_info {
38496- struct e1000_mbx_operations ops;
38497+ e1000_mbx_operations_no_const ops;
38498 struct e1000_mbx_stats stats;
38499 u32 timeout;
38500 u32 usec_delay;
38501diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
38502index aa7286b..a61394f 100644
38503--- a/drivers/net/iseries_veth.c
38504+++ b/drivers/net/iseries_veth.c
38505@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
38506 NULL
38507 };
38508
38509-static struct sysfs_ops veth_cnx_sysfs_ops = {
38510+static const struct sysfs_ops veth_cnx_sysfs_ops = {
38511 .show = veth_cnx_attribute_show
38512 };
38513
38514@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
38515 NULL
38516 };
38517
38518-static struct sysfs_ops veth_port_sysfs_ops = {
38519+static const struct sysfs_ops veth_port_sysfs_ops = {
38520 .show = veth_port_attribute_show
38521 };
38522
38523diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
38524index 8aa44dc..fa1e797 100644
38525--- a/drivers/net/ixgb/ixgb_main.c
38526+++ b/drivers/net/ixgb/ixgb_main.c
38527@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38528 u32 rctl;
38529 int i;
38530
38531+ pax_track_stack();
38532+
38533 /* Check for Promiscuous and All Multicast modes */
38534
38535 rctl = IXGB_READ_REG(hw, RCTL);
38536diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38537index af35e1d..8781785 100644
38538--- a/drivers/net/ixgb/ixgb_param.c
38539+++ b/drivers/net/ixgb/ixgb_param.c
38540@@ -260,6 +260,9 @@ void __devinit
38541 ixgb_check_options(struct ixgb_adapter *adapter)
38542 {
38543 int bd = adapter->bd_number;
38544+
38545+ pax_track_stack();
38546+
38547 if (bd >= IXGB_MAX_NIC) {
38548 printk(KERN_NOTICE
38549 "Warning: no configuration for board #%i\n", bd);
38550diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38551index b17aa73..ed74540 100644
38552--- a/drivers/net/ixgbe/ixgbe_type.h
38553+++ b/drivers/net/ixgbe/ixgbe_type.h
38554@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38555 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38556 s32 (*update_checksum)(struct ixgbe_hw *);
38557 };
38558+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38559
38560 struct ixgbe_mac_operations {
38561 s32 (*init_hw)(struct ixgbe_hw *);
38562@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38563 /* Flow Control */
38564 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38565 };
38566+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38567
38568 struct ixgbe_phy_operations {
38569 s32 (*identify)(struct ixgbe_hw *);
38570@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38571 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38572 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38573 };
38574+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38575
38576 struct ixgbe_eeprom_info {
38577- struct ixgbe_eeprom_operations ops;
38578+ ixgbe_eeprom_operations_no_const ops;
38579 enum ixgbe_eeprom_type type;
38580 u32 semaphore_delay;
38581 u16 word_size;
38582@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38583 };
38584
38585 struct ixgbe_mac_info {
38586- struct ixgbe_mac_operations ops;
38587+ ixgbe_mac_operations_no_const ops;
38588 enum ixgbe_mac_type type;
38589 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38590 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38591@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38592 };
38593
38594 struct ixgbe_phy_info {
38595- struct ixgbe_phy_operations ops;
38596+ ixgbe_phy_operations_no_const ops;
38597 struct mdio_if_info mdio;
38598 enum ixgbe_phy_type type;
38599 u32 id;
38600diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38601index 291a505..2543756 100644
38602--- a/drivers/net/mlx4/main.c
38603+++ b/drivers/net/mlx4/main.c
38604@@ -38,6 +38,7 @@
38605 #include <linux/errno.h>
38606 #include <linux/pci.h>
38607 #include <linux/dma-mapping.h>
38608+#include <linux/sched.h>
38609
38610 #include <linux/mlx4/device.h>
38611 #include <linux/mlx4/doorbell.h>
38612@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38613 u64 icm_size;
38614 int err;
38615
38616+ pax_track_stack();
38617+
38618 err = mlx4_QUERY_FW(dev);
38619 if (err) {
38620 if (err == -EACCES)
38621diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38622index 2dce134..fa5ce75 100644
38623--- a/drivers/net/niu.c
38624+++ b/drivers/net/niu.c
38625@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38626 int i, num_irqs, err;
38627 u8 first_ldg;
38628
38629+ pax_track_stack();
38630+
38631 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38632 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38633 ldg_num_map[i] = first_ldg + i;
38634diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38635index c1b3f09..97cd8c4 100644
38636--- a/drivers/net/pcnet32.c
38637+++ b/drivers/net/pcnet32.c
38638@@ -79,7 +79,7 @@ static int cards_found;
38639 /*
38640 * VLB I/O addresses
38641 */
38642-static unsigned int pcnet32_portlist[] __initdata =
38643+static unsigned int pcnet32_portlist[] __devinitdata =
38644 { 0x300, 0x320, 0x340, 0x360, 0 };
38645
38646 static int pcnet32_debug = 0;
38647@@ -267,7 +267,7 @@ struct pcnet32_private {
38648 struct sk_buff **rx_skbuff;
38649 dma_addr_t *tx_dma_addr;
38650 dma_addr_t *rx_dma_addr;
38651- struct pcnet32_access a;
38652+ struct pcnet32_access *a;
38653 spinlock_t lock; /* Guard lock */
38654 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38655 unsigned int rx_ring_size; /* current rx ring size */
38656@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38657 u16 val;
38658
38659 netif_wake_queue(dev);
38660- val = lp->a.read_csr(ioaddr, CSR3);
38661+ val = lp->a->read_csr(ioaddr, CSR3);
38662 val &= 0x00ff;
38663- lp->a.write_csr(ioaddr, CSR3, val);
38664+ lp->a->write_csr(ioaddr, CSR3, val);
38665 napi_enable(&lp->napi);
38666 }
38667
38668@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38669 r = mii_link_ok(&lp->mii_if);
38670 } else if (lp->chip_version >= PCNET32_79C970A) {
38671 ulong ioaddr = dev->base_addr; /* card base I/O address */
38672- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38673+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38674 } else { /* can not detect link on really old chips */
38675 r = 1;
38676 }
38677@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38678 pcnet32_netif_stop(dev);
38679
38680 spin_lock_irqsave(&lp->lock, flags);
38681- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38682+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38683
38684 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38685
38686@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38687 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38688 {
38689 struct pcnet32_private *lp = netdev_priv(dev);
38690- struct pcnet32_access *a = &lp->a; /* access to registers */
38691+ struct pcnet32_access *a = lp->a; /* access to registers */
38692 ulong ioaddr = dev->base_addr; /* card base I/O address */
38693 struct sk_buff *skb; /* sk buff */
38694 int x, i; /* counters */
38695@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38696 pcnet32_netif_stop(dev);
38697
38698 spin_lock_irqsave(&lp->lock, flags);
38699- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38700+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38701
38702 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38703
38704 /* Reset the PCNET32 */
38705- lp->a.reset(ioaddr);
38706- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38707+ lp->a->reset(ioaddr);
38708+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38709
38710 /* switch pcnet32 to 32bit mode */
38711- lp->a.write_bcr(ioaddr, 20, 2);
38712+ lp->a->write_bcr(ioaddr, 20, 2);
38713
38714 /* purge & init rings but don't actually restart */
38715 pcnet32_restart(dev, 0x0000);
38716
38717- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38718+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38719
38720 /* Initialize Transmit buffers. */
38721 size = data_len + 15;
38722@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38723
38724 /* set int loopback in CSR15 */
38725 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38726- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38727+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38728
38729 teststatus = cpu_to_le16(0x8000);
38730- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38731+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38732
38733 /* Check status of descriptors */
38734 for (x = 0; x < numbuffs; x++) {
38735@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38736 }
38737 }
38738
38739- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38740+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38741 wmb();
38742 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38743 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38744@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38745 pcnet32_restart(dev, CSR0_NORMAL);
38746 } else {
38747 pcnet32_purge_rx_ring(dev);
38748- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38749+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38750 }
38751 spin_unlock_irqrestore(&lp->lock, flags);
38752
38753@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38754 static void pcnet32_led_blink_callback(struct net_device *dev)
38755 {
38756 struct pcnet32_private *lp = netdev_priv(dev);
38757- struct pcnet32_access *a = &lp->a;
38758+ struct pcnet32_access *a = lp->a;
38759 ulong ioaddr = dev->base_addr;
38760 unsigned long flags;
38761 int i;
38762@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38763 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38764 {
38765 struct pcnet32_private *lp = netdev_priv(dev);
38766- struct pcnet32_access *a = &lp->a;
38767+ struct pcnet32_access *a = lp->a;
38768 ulong ioaddr = dev->base_addr;
38769 unsigned long flags;
38770 int i, regs[4];
38771@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38772 {
38773 int csr5;
38774 struct pcnet32_private *lp = netdev_priv(dev);
38775- struct pcnet32_access *a = &lp->a;
38776+ struct pcnet32_access *a = lp->a;
38777 ulong ioaddr = dev->base_addr;
38778 int ticks;
38779
38780@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38781 spin_lock_irqsave(&lp->lock, flags);
38782 if (pcnet32_tx(dev)) {
38783 /* reset the chip to clear the error condition, then restart */
38784- lp->a.reset(ioaddr);
38785- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38786+ lp->a->reset(ioaddr);
38787+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38788 pcnet32_restart(dev, CSR0_START);
38789 netif_wake_queue(dev);
38790 }
38791@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38792 __napi_complete(napi);
38793
38794 /* clear interrupt masks */
38795- val = lp->a.read_csr(ioaddr, CSR3);
38796+ val = lp->a->read_csr(ioaddr, CSR3);
38797 val &= 0x00ff;
38798- lp->a.write_csr(ioaddr, CSR3, val);
38799+ lp->a->write_csr(ioaddr, CSR3, val);
38800
38801 /* Set interrupt enable. */
38802- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38803+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38804
38805 spin_unlock_irqrestore(&lp->lock, flags);
38806 }
38807@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38808 int i, csr0;
38809 u16 *buff = ptr;
38810 struct pcnet32_private *lp = netdev_priv(dev);
38811- struct pcnet32_access *a = &lp->a;
38812+ struct pcnet32_access *a = lp->a;
38813 ulong ioaddr = dev->base_addr;
38814 unsigned long flags;
38815
38816@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38817 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38818 if (lp->phymask & (1 << j)) {
38819 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38820- lp->a.write_bcr(ioaddr, 33,
38821+ lp->a->write_bcr(ioaddr, 33,
38822 (j << 5) | i);
38823- *buff++ = lp->a.read_bcr(ioaddr, 34);
38824+ *buff++ = lp->a->read_bcr(ioaddr, 34);
38825 }
38826 }
38827 }
38828@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38829 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38830 lp->options |= PCNET32_PORT_FD;
38831
38832- lp->a = *a;
38833+ lp->a = a;
38834
38835 /* prior to register_netdev, dev->name is not yet correct */
38836 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38837@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38838 if (lp->mii) {
38839 /* lp->phycount and lp->phymask are set to 0 by memset above */
38840
38841- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38842+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38843 /* scan for PHYs */
38844 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38845 unsigned short id1, id2;
38846@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38847 "Found PHY %04x:%04x at address %d.\n",
38848 id1, id2, i);
38849 }
38850- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38851+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38852 if (lp->phycount > 1) {
38853 lp->options |= PCNET32_PORT_MII;
38854 }
38855@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38856 }
38857
38858 /* Reset the PCNET32 */
38859- lp->a.reset(ioaddr);
38860+ lp->a->reset(ioaddr);
38861
38862 /* switch pcnet32 to 32bit mode */
38863- lp->a.write_bcr(ioaddr, 20, 2);
38864+ lp->a->write_bcr(ioaddr, 20, 2);
38865
38866 if (netif_msg_ifup(lp))
38867 printk(KERN_DEBUG
38868@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38869 (u32) (lp->init_dma_addr));
38870
38871 /* set/reset autoselect bit */
38872- val = lp->a.read_bcr(ioaddr, 2) & ~2;
38873+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
38874 if (lp->options & PCNET32_PORT_ASEL)
38875 val |= 2;
38876- lp->a.write_bcr(ioaddr, 2, val);
38877+ lp->a->write_bcr(ioaddr, 2, val);
38878
38879 /* handle full duplex setting */
38880 if (lp->mii_if.full_duplex) {
38881- val = lp->a.read_bcr(ioaddr, 9) & ~3;
38882+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
38883 if (lp->options & PCNET32_PORT_FD) {
38884 val |= 1;
38885 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38886@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38887 if (lp->chip_version == 0x2627)
38888 val |= 3;
38889 }
38890- lp->a.write_bcr(ioaddr, 9, val);
38891+ lp->a->write_bcr(ioaddr, 9, val);
38892 }
38893
38894 /* set/reset GPSI bit in test register */
38895- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38896+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38897 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38898 val |= 0x10;
38899- lp->a.write_csr(ioaddr, 124, val);
38900+ lp->a->write_csr(ioaddr, 124, val);
38901
38902 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38903 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38904@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38905 * duplex, and/or enable auto negotiation, and clear DANAS
38906 */
38907 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38908- lp->a.write_bcr(ioaddr, 32,
38909- lp->a.read_bcr(ioaddr, 32) | 0x0080);
38910+ lp->a->write_bcr(ioaddr, 32,
38911+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
38912 /* disable Auto Negotiation, set 10Mpbs, HD */
38913- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38914+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38915 if (lp->options & PCNET32_PORT_FD)
38916 val |= 0x10;
38917 if (lp->options & PCNET32_PORT_100)
38918 val |= 0x08;
38919- lp->a.write_bcr(ioaddr, 32, val);
38920+ lp->a->write_bcr(ioaddr, 32, val);
38921 } else {
38922 if (lp->options & PCNET32_PORT_ASEL) {
38923- lp->a.write_bcr(ioaddr, 32,
38924- lp->a.read_bcr(ioaddr,
38925+ lp->a->write_bcr(ioaddr, 32,
38926+ lp->a->read_bcr(ioaddr,
38927 32) | 0x0080);
38928 /* enable auto negotiate, setup, disable fd */
38929- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38930+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38931 val |= 0x20;
38932- lp->a.write_bcr(ioaddr, 32, val);
38933+ lp->a->write_bcr(ioaddr, 32, val);
38934 }
38935 }
38936 } else {
38937@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38938 * There is really no good other way to handle multiple PHYs
38939 * other than turning off all automatics
38940 */
38941- val = lp->a.read_bcr(ioaddr, 2);
38942- lp->a.write_bcr(ioaddr, 2, val & ~2);
38943- val = lp->a.read_bcr(ioaddr, 32);
38944- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38945+ val = lp->a->read_bcr(ioaddr, 2);
38946+ lp->a->write_bcr(ioaddr, 2, val & ~2);
38947+ val = lp->a->read_bcr(ioaddr, 32);
38948+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38949
38950 if (!(lp->options & PCNET32_PORT_ASEL)) {
38951 /* setup ecmd */
38952@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38953 ecmd.speed =
38954 lp->
38955 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38956- bcr9 = lp->a.read_bcr(ioaddr, 9);
38957+ bcr9 = lp->a->read_bcr(ioaddr, 9);
38958
38959 if (lp->options & PCNET32_PORT_FD) {
38960 ecmd.duplex = DUPLEX_FULL;
38961@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38962 ecmd.duplex = DUPLEX_HALF;
38963 bcr9 |= ~(1 << 0);
38964 }
38965- lp->a.write_bcr(ioaddr, 9, bcr9);
38966+ lp->a->write_bcr(ioaddr, 9, bcr9);
38967 }
38968
38969 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38970@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38971
38972 #ifdef DO_DXSUFLO
38973 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38974- val = lp->a.read_csr(ioaddr, CSR3);
38975+ val = lp->a->read_csr(ioaddr, CSR3);
38976 val |= 0x40;
38977- lp->a.write_csr(ioaddr, CSR3, val);
38978+ lp->a->write_csr(ioaddr, CSR3, val);
38979 }
38980 #endif
38981
38982@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38983 napi_enable(&lp->napi);
38984
38985 /* Re-initialize the PCNET32, and start it when done. */
38986- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38987- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38988+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38989+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38990
38991- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38992- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38993+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38994+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38995
38996 netif_start_queue(dev);
38997
38998@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38999
39000 i = 0;
39001 while (i++ < 100)
39002- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
39003+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
39004 break;
39005 /*
39006 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
39007 * reports that doing so triggers a bug in the '974.
39008 */
39009- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
39010+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
39011
39012 if (netif_msg_ifup(lp))
39013 printk(KERN_DEBUG
39014 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
39015 dev->name, i,
39016 (u32) (lp->init_dma_addr),
39017- lp->a.read_csr(ioaddr, CSR0));
39018+ lp->a->read_csr(ioaddr, CSR0));
39019
39020 spin_unlock_irqrestore(&lp->lock, flags);
39021
39022@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
39023 * Switch back to 16bit mode to avoid problems with dumb
39024 * DOS packet driver after a warm reboot
39025 */
39026- lp->a.write_bcr(ioaddr, 20, 4);
39027+ lp->a->write_bcr(ioaddr, 20, 4);
39028
39029 err_free_irq:
39030 spin_unlock_irqrestore(&lp->lock, flags);
39031@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
39032
39033 /* wait for stop */
39034 for (i = 0; i < 100; i++)
39035- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
39036+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
39037 break;
39038
39039 if (i >= 100 && netif_msg_drv(lp))
39040@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
39041 return;
39042
39043 /* ReInit Ring */
39044- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
39045+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
39046 i = 0;
39047 while (i++ < 1000)
39048- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
39049+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
39050 break;
39051
39052- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
39053+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
39054 }
39055
39056 static void pcnet32_tx_timeout(struct net_device *dev)
39057@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
39058 if (pcnet32_debug & NETIF_MSG_DRV)
39059 printk(KERN_ERR
39060 "%s: transmit timed out, status %4.4x, resetting.\n",
39061- dev->name, lp->a.read_csr(ioaddr, CSR0));
39062- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39063+ dev->name, lp->a->read_csr(ioaddr, CSR0));
39064+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39065 dev->stats.tx_errors++;
39066 if (netif_msg_tx_err(lp)) {
39067 int i;
39068@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39069 if (netif_msg_tx_queued(lp)) {
39070 printk(KERN_DEBUG
39071 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
39072- dev->name, lp->a.read_csr(ioaddr, CSR0));
39073+ dev->name, lp->a->read_csr(ioaddr, CSR0));
39074 }
39075
39076 /* Default status -- will not enable Successful-TxDone
39077@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
39078 dev->stats.tx_bytes += skb->len;
39079
39080 /* Trigger an immediate send poll. */
39081- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39082+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
39083
39084 dev->trans_start = jiffies;
39085
39086@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
39087
39088 spin_lock(&lp->lock);
39089
39090- csr0 = lp->a.read_csr(ioaddr, CSR0);
39091+ csr0 = lp->a->read_csr(ioaddr, CSR0);
39092 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
39093 if (csr0 == 0xffff) {
39094 break; /* PCMCIA remove happened */
39095 }
39096 /* Acknowledge all of the current interrupt sources ASAP. */
39097- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39098+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
39099
39100 if (netif_msg_intr(lp))
39101 printk(KERN_DEBUG
39102 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
39103- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
39104+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
39105
39106 /* Log misc errors. */
39107 if (csr0 & 0x4000)
39108@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
39109 if (napi_schedule_prep(&lp->napi)) {
39110 u16 val;
39111 /* set interrupt masks */
39112- val = lp->a.read_csr(ioaddr, CSR3);
39113+ val = lp->a->read_csr(ioaddr, CSR3);
39114 val |= 0x5f00;
39115- lp->a.write_csr(ioaddr, CSR3, val);
39116+ lp->a->write_csr(ioaddr, CSR3, val);
39117
39118 __napi_schedule(&lp->napi);
39119 break;
39120 }
39121- csr0 = lp->a.read_csr(ioaddr, CSR0);
39122+ csr0 = lp->a->read_csr(ioaddr, CSR0);
39123 }
39124
39125 if (netif_msg_intr(lp))
39126 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
39127- dev->name, lp->a.read_csr(ioaddr, CSR0));
39128+ dev->name, lp->a->read_csr(ioaddr, CSR0));
39129
39130 spin_unlock(&lp->lock);
39131
39132@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
39133
39134 spin_lock_irqsave(&lp->lock, flags);
39135
39136- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39137+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39138
39139 if (netif_msg_ifdown(lp))
39140 printk(KERN_DEBUG
39141 "%s: Shutting down ethercard, status was %2.2x.\n",
39142- dev->name, lp->a.read_csr(ioaddr, CSR0));
39143+ dev->name, lp->a->read_csr(ioaddr, CSR0));
39144
39145 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
39146- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39147+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39148
39149 /*
39150 * Switch back to 16bit mode to avoid problems with dumb
39151 * DOS packet driver after a warm reboot
39152 */
39153- lp->a.write_bcr(ioaddr, 20, 4);
39154+ lp->a->write_bcr(ioaddr, 20, 4);
39155
39156 spin_unlock_irqrestore(&lp->lock, flags);
39157
39158@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
39159 unsigned long flags;
39160
39161 spin_lock_irqsave(&lp->lock, flags);
39162- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
39163+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
39164 spin_unlock_irqrestore(&lp->lock, flags);
39165
39166 return &dev->stats;
39167@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
39168 if (dev->flags & IFF_ALLMULTI) {
39169 ib->filter[0] = cpu_to_le32(~0U);
39170 ib->filter[1] = cpu_to_le32(~0U);
39171- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39172- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39173- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39174- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39175+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
39176+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
39177+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
39178+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
39179 return;
39180 }
39181 /* clear the multicast filter */
39182@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
39183 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
39184 }
39185 for (i = 0; i < 4; i++)
39186- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
39187+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
39188 le16_to_cpu(mcast_table[i]));
39189 return;
39190 }
39191@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39192
39193 spin_lock_irqsave(&lp->lock, flags);
39194 suspended = pcnet32_suspend(dev, &flags, 0);
39195- csr15 = lp->a.read_csr(ioaddr, CSR15);
39196+ csr15 = lp->a->read_csr(ioaddr, CSR15);
39197 if (dev->flags & IFF_PROMISC) {
39198 /* Log any net taps. */
39199 if (netif_msg_hw(lp))
39200@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
39201 lp->init_block->mode =
39202 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
39203 7);
39204- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
39205+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
39206 } else {
39207 lp->init_block->mode =
39208 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
39209- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39210+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
39211 pcnet32_load_multicast(dev);
39212 }
39213
39214 if (suspended) {
39215 int csr5;
39216 /* clear SUSPEND (SPND) - CSR5 bit 0 */
39217- csr5 = lp->a.read_csr(ioaddr, CSR5);
39218- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39219+ csr5 = lp->a->read_csr(ioaddr, CSR5);
39220+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
39221 } else {
39222- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
39223+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
39224 pcnet32_restart(dev, CSR0_NORMAL);
39225 netif_wake_queue(dev);
39226 }
39227@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
39228 if (!lp->mii)
39229 return 0;
39230
39231- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39232- val_out = lp->a.read_bcr(ioaddr, 34);
39233+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39234+ val_out = lp->a->read_bcr(ioaddr, 34);
39235
39236 return val_out;
39237 }
39238@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
39239 if (!lp->mii)
39240 return;
39241
39242- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39243- lp->a.write_bcr(ioaddr, 34, val);
39244+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
39245+ lp->a->write_bcr(ioaddr, 34, val);
39246 }
39247
39248 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39249@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39250 curr_link = mii_link_ok(&lp->mii_if);
39251 } else {
39252 ulong ioaddr = dev->base_addr; /* card base I/O address */
39253- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
39254+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
39255 }
39256 if (!curr_link) {
39257 if (prev_link || verbose) {
39258@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
39259 (ecmd.duplex ==
39260 DUPLEX_FULL) ? "full" : "half");
39261 }
39262- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
39263+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
39264 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
39265 if (lp->mii_if.full_duplex)
39266 bcr9 |= (1 << 0);
39267 else
39268 bcr9 &= ~(1 << 0);
39269- lp->a.write_bcr(dev->base_addr, 9, bcr9);
39270+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
39271 }
39272 } else {
39273 if (netif_msg_link(lp))
39274diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
39275index 7cc9898..6eb50d3 100644
39276--- a/drivers/net/sis190.c
39277+++ b/drivers/net/sis190.c
39278@@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
39279 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
39280 struct net_device *dev)
39281 {
39282- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
39283+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
39284 struct sis190_private *tp = netdev_priv(dev);
39285 struct pci_dev *isa_bridge;
39286 u8 reg, tmp8;
39287diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
39288index e13685a..60c948c 100644
39289--- a/drivers/net/sundance.c
39290+++ b/drivers/net/sundance.c
39291@@ -225,7 +225,7 @@ enum {
39292 struct pci_id_info {
39293 const char *name;
39294 };
39295-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39296+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39297 {"D-Link DFE-550TX FAST Ethernet Adapter"},
39298 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
39299 {"D-Link DFE-580TX 4 port Server Adapter"},
39300diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
39301index 529f55a..cccaa18 100644
39302--- a/drivers/net/tg3.h
39303+++ b/drivers/net/tg3.h
39304@@ -95,6 +95,7 @@
39305 #define CHIPREV_ID_5750_A0 0x4000
39306 #define CHIPREV_ID_5750_A1 0x4001
39307 #define CHIPREV_ID_5750_A3 0x4003
39308+#define CHIPREV_ID_5750_C1 0x4201
39309 #define CHIPREV_ID_5750_C2 0x4202
39310 #define CHIPREV_ID_5752_A0_HW 0x5000
39311 #define CHIPREV_ID_5752_A0 0x6000
39312diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
39313index b9db1b5..720f9ce 100644
39314--- a/drivers/net/tokenring/abyss.c
39315+++ b/drivers/net/tokenring/abyss.c
39316@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
39317
39318 static int __init abyss_init (void)
39319 {
39320- abyss_netdev_ops = tms380tr_netdev_ops;
39321+ pax_open_kernel();
39322+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39323
39324- abyss_netdev_ops.ndo_open = abyss_open;
39325- abyss_netdev_ops.ndo_stop = abyss_close;
39326+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
39327+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
39328+ pax_close_kernel();
39329
39330 return pci_register_driver(&abyss_driver);
39331 }
39332diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
39333index 456f8bf..373e56d 100644
39334--- a/drivers/net/tokenring/madgemc.c
39335+++ b/drivers/net/tokenring/madgemc.c
39336@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
39337
39338 static int __init madgemc_init (void)
39339 {
39340- madgemc_netdev_ops = tms380tr_netdev_ops;
39341- madgemc_netdev_ops.ndo_open = madgemc_open;
39342- madgemc_netdev_ops.ndo_stop = madgemc_close;
39343+ pax_open_kernel();
39344+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39345+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
39346+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
39347+ pax_close_kernel();
39348
39349 return mca_register_driver (&madgemc_driver);
39350 }
39351diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
39352index 16e8783..925bd49 100644
39353--- a/drivers/net/tokenring/proteon.c
39354+++ b/drivers/net/tokenring/proteon.c
39355@@ -353,9 +353,11 @@ static int __init proteon_init(void)
39356 struct platform_device *pdev;
39357 int i, num = 0, err = 0;
39358
39359- proteon_netdev_ops = tms380tr_netdev_ops;
39360- proteon_netdev_ops.ndo_open = proteon_open;
39361- proteon_netdev_ops.ndo_stop = tms380tr_close;
39362+ pax_open_kernel();
39363+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39364+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
39365+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
39366+ pax_close_kernel();
39367
39368 err = platform_driver_register(&proteon_driver);
39369 if (err)
39370diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
39371index 46db5c5..37c1536 100644
39372--- a/drivers/net/tokenring/skisa.c
39373+++ b/drivers/net/tokenring/skisa.c
39374@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
39375 struct platform_device *pdev;
39376 int i, num = 0, err = 0;
39377
39378- sk_isa_netdev_ops = tms380tr_netdev_ops;
39379- sk_isa_netdev_ops.ndo_open = sk_isa_open;
39380- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39381+ pax_open_kernel();
39382+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39383+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
39384+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39385+ pax_close_kernel();
39386
39387 err = platform_driver_register(&sk_isa_driver);
39388 if (err)
39389diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
39390index 74e5ba4..5cf6bc9 100644
39391--- a/drivers/net/tulip/de2104x.c
39392+++ b/drivers/net/tulip/de2104x.c
39393@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
39394 struct de_srom_info_leaf *il;
39395 void *bufp;
39396
39397+ pax_track_stack();
39398+
39399 /* download entire eeprom */
39400 for (i = 0; i < DE_EEPROM_WORDS; i++)
39401 ((__le16 *)ee_data)[i] =
39402diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
39403index a8349b7..90f9dfe 100644
39404--- a/drivers/net/tulip/de4x5.c
39405+++ b/drivers/net/tulip/de4x5.c
39406@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39407 for (i=0; i<ETH_ALEN; i++) {
39408 tmp.addr[i] = dev->dev_addr[i];
39409 }
39410- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39411+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39412 break;
39413
39414 case DE4X5_SET_HWADDR: /* Set the hardware address */
39415@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39416 spin_lock_irqsave(&lp->lock, flags);
39417 memcpy(&statbuf, &lp->pktStats, ioc->len);
39418 spin_unlock_irqrestore(&lp->lock, flags);
39419- if (copy_to_user(ioc->data, &statbuf, ioc->len))
39420+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39421 return -EFAULT;
39422 break;
39423 }
39424diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39425index 391acd3..56d11cd 100644
39426--- a/drivers/net/tulip/eeprom.c
39427+++ b/drivers/net/tulip/eeprom.c
39428@@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39429 {NULL}};
39430
39431
39432-static const char *block_name[] __devinitdata = {
39433+static const char *block_name[] __devinitconst = {
39434 "21140 non-MII",
39435 "21140 MII PHY",
39436 "21142 Serial PHY",
39437diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39438index b38d3b7..b1cff23 100644
39439--- a/drivers/net/tulip/winbond-840.c
39440+++ b/drivers/net/tulip/winbond-840.c
39441@@ -235,7 +235,7 @@ struct pci_id_info {
39442 int drv_flags; /* Driver use, intended as capability flags. */
39443 };
39444
39445-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39446+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39447 { /* Sometime a Level-One switch card. */
39448 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39449 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39450diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39451index f450bc9..2b747c8 100644
39452--- a/drivers/net/usb/hso.c
39453+++ b/drivers/net/usb/hso.c
39454@@ -71,7 +71,7 @@
39455 #include <asm/byteorder.h>
39456 #include <linux/serial_core.h>
39457 #include <linux/serial.h>
39458-
39459+#include <asm/local.h>
39460
39461 #define DRIVER_VERSION "1.2"
39462 #define MOD_AUTHOR "Option Wireless"
39463@@ -258,7 +258,7 @@ struct hso_serial {
39464
39465 /* from usb_serial_port */
39466 struct tty_struct *tty;
39467- int open_count;
39468+ local_t open_count;
39469 spinlock_t serial_lock;
39470
39471 int (*write_data) (struct hso_serial *serial);
39472@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39473 struct urb *urb;
39474
39475 urb = serial->rx_urb[0];
39476- if (serial->open_count > 0) {
39477+ if (local_read(&serial->open_count) > 0) {
39478 count = put_rxbuf_data(urb, serial);
39479 if (count == -1)
39480 return;
39481@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39482 DUMP1(urb->transfer_buffer, urb->actual_length);
39483
39484 /* Anyone listening? */
39485- if (serial->open_count == 0)
39486+ if (local_read(&serial->open_count) == 0)
39487 return;
39488
39489 if (status == 0) {
39490@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39491 spin_unlock_irq(&serial->serial_lock);
39492
39493 /* check for port already opened, if not set the termios */
39494- serial->open_count++;
39495- if (serial->open_count == 1) {
39496+ if (local_inc_return(&serial->open_count) == 1) {
39497 tty->low_latency = 1;
39498 serial->rx_state = RX_IDLE;
39499 /* Force default termio settings */
39500@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39501 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39502 if (result) {
39503 hso_stop_serial_device(serial->parent);
39504- serial->open_count--;
39505+ local_dec(&serial->open_count);
39506 kref_put(&serial->parent->ref, hso_serial_ref_free);
39507 }
39508 } else {
39509@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39510
39511 /* reset the rts and dtr */
39512 /* do the actual close */
39513- serial->open_count--;
39514+ local_dec(&serial->open_count);
39515
39516- if (serial->open_count <= 0) {
39517- serial->open_count = 0;
39518+ if (local_read(&serial->open_count) <= 0) {
39519+ local_set(&serial->open_count, 0);
39520 spin_lock_irq(&serial->serial_lock);
39521 if (serial->tty == tty) {
39522 serial->tty->driver_data = NULL;
39523@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39524
39525 /* the actual setup */
39526 spin_lock_irqsave(&serial->serial_lock, flags);
39527- if (serial->open_count)
39528+ if (local_read(&serial->open_count))
39529 _hso_serial_set_termios(tty, old);
39530 else
39531 tty->termios = old;
39532@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39533 /* Start all serial ports */
39534 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39535 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39536- if (dev2ser(serial_table[i])->open_count) {
39537+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
39538 result =
39539 hso_start_serial_device(serial_table[i], GFP_NOIO);
39540 hso_kick_transmit(dev2ser(serial_table[i]));
39541diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39542index 3e94f0c..ffdd926 100644
39543--- a/drivers/net/vxge/vxge-config.h
39544+++ b/drivers/net/vxge/vxge-config.h
39545@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39546 void (*link_down)(struct __vxge_hw_device *devh);
39547 void (*crit_err)(struct __vxge_hw_device *devh,
39548 enum vxge_hw_event type, u64 ext_data);
39549-};
39550+} __no_const;
39551
39552 /*
39553 * struct __vxge_hw_blockpool_entry - Block private data structure
39554diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39555index 068d7a9..35293de 100644
39556--- a/drivers/net/vxge/vxge-main.c
39557+++ b/drivers/net/vxge/vxge-main.c
39558@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39559 struct sk_buff *completed[NR_SKB_COMPLETED];
39560 int more;
39561
39562+ pax_track_stack();
39563+
39564 do {
39565 more = 0;
39566 skb_ptr = completed;
39567@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39568 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39569 int index;
39570
39571+ pax_track_stack();
39572+
39573 /*
39574 * Filling
39575 * - itable with bucket numbers
39576diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39577index 461742b..81be42e 100644
39578--- a/drivers/net/vxge/vxge-traffic.h
39579+++ b/drivers/net/vxge/vxge-traffic.h
39580@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39581 struct vxge_hw_mempool_dma *dma_object,
39582 u32 index,
39583 u32 is_last);
39584-};
39585+} __no_const;
39586
39587 void
39588 __vxge_hw_mempool_destroy(
39589diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39590index cd8cb95..4153b79 100644
39591--- a/drivers/net/wan/cycx_x25.c
39592+++ b/drivers/net/wan/cycx_x25.c
39593@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39594 unsigned char hex[1024],
39595 * phex = hex;
39596
39597+ pax_track_stack();
39598+
39599 if (len >= (sizeof(hex) / 2))
39600 len = (sizeof(hex) / 2) - 1;
39601
39602diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39603index aa9248f..a4e3c3b 100644
39604--- a/drivers/net/wan/hdlc_x25.c
39605+++ b/drivers/net/wan/hdlc_x25.c
39606@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39607
39608 static int x25_open(struct net_device *dev)
39609 {
39610- struct lapb_register_struct cb;
39611+ static struct lapb_register_struct cb = {
39612+ .connect_confirmation = x25_connected,
39613+ .connect_indication = x25_connected,
39614+ .disconnect_confirmation = x25_disconnected,
39615+ .disconnect_indication = x25_disconnected,
39616+ .data_indication = x25_data_indication,
39617+ .data_transmit = x25_data_transmit
39618+ };
39619 int result;
39620
39621- cb.connect_confirmation = x25_connected;
39622- cb.connect_indication = x25_connected;
39623- cb.disconnect_confirmation = x25_disconnected;
39624- cb.disconnect_indication = x25_disconnected;
39625- cb.data_indication = x25_data_indication;
39626- cb.data_transmit = x25_data_transmit;
39627-
39628 result = lapb_register(dev, &cb);
39629 if (result != LAPB_OK)
39630 return result;
39631diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39632index 5ad287c..783b020 100644
39633--- a/drivers/net/wimax/i2400m/usb-fw.c
39634+++ b/drivers/net/wimax/i2400m/usb-fw.c
39635@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39636 int do_autopm = 1;
39637 DECLARE_COMPLETION_ONSTACK(notif_completion);
39638
39639+ pax_track_stack();
39640+
39641 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39642 i2400m, ack, ack_size);
39643 BUG_ON(_ack == i2400m->bm_ack_buf);
39644diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39645index 6c26840..62c97c3 100644
39646--- a/drivers/net/wireless/airo.c
39647+++ b/drivers/net/wireless/airo.c
39648@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39649 BSSListElement * loop_net;
39650 BSSListElement * tmp_net;
39651
39652+ pax_track_stack();
39653+
39654 /* Blow away current list of scan results */
39655 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39656 list_move_tail (&loop_net->list, &ai->network_free_list);
39657@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39658 WepKeyRid wkr;
39659 int rc;
39660
39661+ pax_track_stack();
39662+
39663 memset( &mySsid, 0, sizeof( mySsid ) );
39664 kfree (ai->flash);
39665 ai->flash = NULL;
39666@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39667 __le32 *vals = stats.vals;
39668 int len;
39669
39670+ pax_track_stack();
39671+
39672 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39673 return -ENOMEM;
39674 data = (struct proc_data *)file->private_data;
39675@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39676 /* If doLoseSync is not 1, we won't do a Lose Sync */
39677 int doLoseSync = -1;
39678
39679+ pax_track_stack();
39680+
39681 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39682 return -ENOMEM;
39683 data = (struct proc_data *)file->private_data;
39684@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39685 int i;
39686 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39687
39688+ pax_track_stack();
39689+
39690 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39691 if (!qual)
39692 return -ENOMEM;
39693@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39694 CapabilityRid cap_rid;
39695 __le32 *vals = stats_rid.vals;
39696
39697+ pax_track_stack();
39698+
39699 /* Get stats out of the card */
39700 clear_bit(JOB_WSTATS, &local->jobs);
39701 if (local->power.event) {
39702diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39703index 747508c..82e965d 100644
39704--- a/drivers/net/wireless/ath/ath5k/debug.c
39705+++ b/drivers/net/wireless/ath/ath5k/debug.c
39706@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39707 unsigned int v;
39708 u64 tsf;
39709
39710+ pax_track_stack();
39711+
39712 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39713 len += snprintf(buf+len, sizeof(buf)-len,
39714 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39715@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39716 unsigned int len = 0;
39717 unsigned int i;
39718
39719+ pax_track_stack();
39720+
39721 len += snprintf(buf+len, sizeof(buf)-len,
39722 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39723
39724diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39725index 2be4c22..593b1eb 100644
39726--- a/drivers/net/wireless/ath/ath9k/debug.c
39727+++ b/drivers/net/wireless/ath/ath9k/debug.c
39728@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39729 char buf[512];
39730 unsigned int len = 0;
39731
39732+ pax_track_stack();
39733+
39734 len += snprintf(buf + len, sizeof(buf) - len,
39735 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39736 len += snprintf(buf + len, sizeof(buf) - len,
39737@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39738 int i;
39739 u8 addr[ETH_ALEN];
39740
39741+ pax_track_stack();
39742+
39743 len += snprintf(buf + len, sizeof(buf) - len,
39744 "primary: %s (%s chan=%d ht=%d)\n",
39745 wiphy_name(sc->pri_wiphy->hw->wiphy),
39746diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39747index 80b19a4..dab3a45 100644
39748--- a/drivers/net/wireless/b43/debugfs.c
39749+++ b/drivers/net/wireless/b43/debugfs.c
39750@@ -43,7 +43,7 @@ static struct dentry *rootdir;
39751 struct b43_debugfs_fops {
39752 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39753 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39754- struct file_operations fops;
39755+ const struct file_operations fops;
39756 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39757 size_t file_struct_offset;
39758 };
39759diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39760index 1f85ac5..c99b4b4 100644
39761--- a/drivers/net/wireless/b43legacy/debugfs.c
39762+++ b/drivers/net/wireless/b43legacy/debugfs.c
39763@@ -44,7 +44,7 @@ static struct dentry *rootdir;
39764 struct b43legacy_debugfs_fops {
39765 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39766 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39767- struct file_operations fops;
39768+ const struct file_operations fops;
39769 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39770 size_t file_struct_offset;
39771 /* Take wl->irq_lock before calling read/write? */
39772diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39773index 43102bf..3b569c3 100644
39774--- a/drivers/net/wireless/ipw2x00/ipw2100.c
39775+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39776@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39777 int err;
39778 DECLARE_SSID_BUF(ssid);
39779
39780+ pax_track_stack();
39781+
39782 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39783
39784 if (ssid_len)
39785@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39786 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39787 int err;
39788
39789+ pax_track_stack();
39790+
39791 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39792 idx, keylen, len);
39793
39794diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39795index 282b1f7..169f0cf 100644
39796--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39797+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39798@@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39799 unsigned long flags;
39800 DECLARE_SSID_BUF(ssid);
39801
39802+ pax_track_stack();
39803+
39804 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39805 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39806 print_ssid(ssid, info_element->data, info_element->len),
39807diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39808index 950267a..80d5fd2 100644
39809--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39810+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39811@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39812 },
39813 };
39814
39815-static struct iwl_ops iwl1000_ops = {
39816+static const struct iwl_ops iwl1000_ops = {
39817 .ucode = &iwl5000_ucode,
39818 .lib = &iwl1000_lib,
39819 .hcmd = &iwl5000_hcmd,
39820diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39821index 56bfcc3..b348020 100644
39822--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39823+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39824@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39825 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39826 };
39827
39828-static struct iwl_ops iwl3945_ops = {
39829+static const struct iwl_ops iwl3945_ops = {
39830 .ucode = &iwl3945_ucode,
39831 .lib = &iwl3945_lib,
39832 .hcmd = &iwl3945_hcmd,
39833diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39834index 585b8d4..e142963 100644
39835--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39836+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39837@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39838 },
39839 };
39840
39841-static struct iwl_ops iwl4965_ops = {
39842+static const struct iwl_ops iwl4965_ops = {
39843 .ucode = &iwl4965_ucode,
39844 .lib = &iwl4965_lib,
39845 .hcmd = &iwl4965_hcmd,
39846diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39847index 1f423f2..e37c192 100644
39848--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39849+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39850@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39851 },
39852 };
39853
39854-struct iwl_ops iwl5000_ops = {
39855+const struct iwl_ops iwl5000_ops = {
39856 .ucode = &iwl5000_ucode,
39857 .lib = &iwl5000_lib,
39858 .hcmd = &iwl5000_hcmd,
39859 .utils = &iwl5000_hcmd_utils,
39860 };
39861
39862-static struct iwl_ops iwl5150_ops = {
39863+static const struct iwl_ops iwl5150_ops = {
39864 .ucode = &iwl5000_ucode,
39865 .lib = &iwl5150_lib,
39866 .hcmd = &iwl5000_hcmd,
39867diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39868index 1473452..f07d5e1 100644
39869--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39870+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39871@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39872 .calc_rssi = iwl5000_calc_rssi,
39873 };
39874
39875-static struct iwl_ops iwl6000_ops = {
39876+static const struct iwl_ops iwl6000_ops = {
39877 .ucode = &iwl5000_ucode,
39878 .lib = &iwl6000_lib,
39879 .hcmd = &iwl5000_hcmd,
39880diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39881index 1a3dfa2..b3e0a61 100644
39882--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39883+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39884@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39885 u8 active_index = 0;
39886 s32 tpt = 0;
39887
39888+ pax_track_stack();
39889+
39890 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39891
39892 if (!ieee80211_is_data(hdr->frame_control) ||
39893@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39894 u8 valid_tx_ant = 0;
39895 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39896
39897+ pax_track_stack();
39898+
39899 /* Override starting rate (index 0) if needed for debug purposes */
39900 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39901
39902diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39903index 0e56d78..6a3c107 100644
39904--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39905+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39906@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39907 if (iwl_debug_level & IWL_DL_INFO)
39908 dev_printk(KERN_DEBUG, &(pdev->dev),
39909 "Disabling hw_scan\n");
39910- iwl_hw_ops.hw_scan = NULL;
39911+ pax_open_kernel();
39912+ *(void **)&iwl_hw_ops.hw_scan = NULL;
39913+ pax_close_kernel();
39914 }
39915
39916 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39917diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39918index cbc6290..eb323d7 100644
39919--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39920+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39921@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39922 #endif
39923
39924 #else
39925-#define IWL_DEBUG(__priv, level, fmt, args...)
39926-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39927+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39928+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39929 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39930 void *p, u32 len)
39931 {}
39932diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39933index a198bcf..8e68233 100644
39934--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39935+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39936@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39937 int pos = 0;
39938 const size_t bufsz = sizeof(buf);
39939
39940+ pax_track_stack();
39941+
39942 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39943 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39944 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39945@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39946 const size_t bufsz = sizeof(buf);
39947 ssize_t ret;
39948
39949+ pax_track_stack();
39950+
39951 for (i = 0; i < AC_NUM; i++) {
39952 pos += scnprintf(buf + pos, bufsz - pos,
39953 "\tcw_min\tcw_max\taifsn\ttxop\n");
39954diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39955index 3539ea4..b174bfa 100644
39956--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39957+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39958@@ -68,7 +68,7 @@ struct iwl_tx_queue;
39959
39960 /* shared structures from iwl-5000.c */
39961 extern struct iwl_mod_params iwl50_mod_params;
39962-extern struct iwl_ops iwl5000_ops;
39963+extern const struct iwl_ops iwl5000_ops;
39964 extern struct iwl_ucode_ops iwl5000_ucode;
39965 extern struct iwl_lib_ops iwl5000_lib;
39966 extern struct iwl_hcmd_ops iwl5000_hcmd;
39967diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39968index 619590d..69235ee 100644
39969--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39970+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39971@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39972 */
39973 if (iwl3945_mod_params.disable_hw_scan) {
39974 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39975- iwl3945_hw_ops.hw_scan = NULL;
39976+ pax_open_kernel();
39977+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39978+ pax_close_kernel();
39979 }
39980
39981
39982diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39983index 1465379..fe4d78b 100644
39984--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39985+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39986@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39987 int buf_len = 512;
39988 size_t len = 0;
39989
39990+ pax_track_stack();
39991+
39992 if (*ppos != 0)
39993 return 0;
39994 if (count < sizeof(buf))
39995diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39996index 893a55c..7f66a50 100644
39997--- a/drivers/net/wireless/libertas/debugfs.c
39998+++ b/drivers/net/wireless/libertas/debugfs.c
39999@@ -708,7 +708,7 @@ out_unlock:
40000 struct lbs_debugfs_files {
40001 const char *name;
40002 int perm;
40003- struct file_operations fops;
40004+ const struct file_operations fops;
40005 };
40006
40007 static const struct lbs_debugfs_files debugfs_files[] = {
40008diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
40009index 2ecbedb..42704f0 100644
40010--- a/drivers/net/wireless/rndis_wlan.c
40011+++ b/drivers/net/wireless/rndis_wlan.c
40012@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
40013
40014 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
40015
40016- if (rts_threshold < 0 || rts_threshold > 2347)
40017+ if (rts_threshold > 2347)
40018 rts_threshold = 2347;
40019
40020 tmp = cpu_to_le32(rts_threshold);
40021diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
40022index 334ccd6..47f8944 100644
40023--- a/drivers/oprofile/buffer_sync.c
40024+++ b/drivers/oprofile/buffer_sync.c
40025@@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
40026 if (cookie == NO_COOKIE)
40027 offset = pc;
40028 if (cookie == INVALID_COOKIE) {
40029- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40030+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40031 offset = pc;
40032 }
40033 if (cookie != last_cookie) {
40034@@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
40035 /* add userspace sample */
40036
40037 if (!mm) {
40038- atomic_inc(&oprofile_stats.sample_lost_no_mm);
40039+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
40040 return 0;
40041 }
40042
40043 cookie = lookup_dcookie(mm, s->eip, &offset);
40044
40045 if (cookie == INVALID_COOKIE) {
40046- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
40047+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
40048 return 0;
40049 }
40050
40051@@ -562,7 +562,7 @@ void sync_buffer(int cpu)
40052 /* ignore backtraces if failed to add a sample */
40053 if (state == sb_bt_start) {
40054 state = sb_bt_ignore;
40055- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
40056+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
40057 }
40058 }
40059 release_mm(mm);
40060diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
40061index 5df60a6..72f5c1c 100644
40062--- a/drivers/oprofile/event_buffer.c
40063+++ b/drivers/oprofile/event_buffer.c
40064@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
40065 }
40066
40067 if (buffer_pos == buffer_size) {
40068- atomic_inc(&oprofile_stats.event_lost_overflow);
40069+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
40070 return;
40071 }
40072
40073diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
40074index dc8a042..fe5f315 100644
40075--- a/drivers/oprofile/oprof.c
40076+++ b/drivers/oprofile/oprof.c
40077@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
40078 if (oprofile_ops.switch_events())
40079 return;
40080
40081- atomic_inc(&oprofile_stats.multiplex_counter);
40082+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
40083 start_switch_worker();
40084 }
40085
40086diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
40087index 61689e8..387f7f8 100644
40088--- a/drivers/oprofile/oprofile_stats.c
40089+++ b/drivers/oprofile/oprofile_stats.c
40090@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
40091 cpu_buf->sample_invalid_eip = 0;
40092 }
40093
40094- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
40095- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
40096- atomic_set(&oprofile_stats.event_lost_overflow, 0);
40097- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
40098- atomic_set(&oprofile_stats.multiplex_counter, 0);
40099+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
40100+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
40101+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
40102+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
40103+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
40104 }
40105
40106
40107diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
40108index 0b54e46..a37c527 100644
40109--- a/drivers/oprofile/oprofile_stats.h
40110+++ b/drivers/oprofile/oprofile_stats.h
40111@@ -13,11 +13,11 @@
40112 #include <asm/atomic.h>
40113
40114 struct oprofile_stat_struct {
40115- atomic_t sample_lost_no_mm;
40116- atomic_t sample_lost_no_mapping;
40117- atomic_t bt_lost_no_mapping;
40118- atomic_t event_lost_overflow;
40119- atomic_t multiplex_counter;
40120+ atomic_unchecked_t sample_lost_no_mm;
40121+ atomic_unchecked_t sample_lost_no_mapping;
40122+ atomic_unchecked_t bt_lost_no_mapping;
40123+ atomic_unchecked_t event_lost_overflow;
40124+ atomic_unchecked_t multiplex_counter;
40125 };
40126
40127 extern struct oprofile_stat_struct oprofile_stats;
40128diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
40129index 2766a6d..80c77e2 100644
40130--- a/drivers/oprofile/oprofilefs.c
40131+++ b/drivers/oprofile/oprofilefs.c
40132@@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
40133
40134
40135 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
40136- char const *name, atomic_t *val)
40137+ char const *name, atomic_unchecked_t *val)
40138 {
40139 struct dentry *d = __oprofilefs_create_file(sb, root, name,
40140 &atomic_ro_fops, 0444);
40141diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
40142index 13a64bc..ad62835 100644
40143--- a/drivers/parisc/pdc_stable.c
40144+++ b/drivers/parisc/pdc_stable.c
40145@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
40146 return ret;
40147 }
40148
40149-static struct sysfs_ops pdcspath_attr_ops = {
40150+static const struct sysfs_ops pdcspath_attr_ops = {
40151 .show = pdcspath_attr_show,
40152 .store = pdcspath_attr_store,
40153 };
40154diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40155index 8eefe56..40751a7 100644
40156--- a/drivers/parport/procfs.c
40157+++ b/drivers/parport/procfs.c
40158@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40159
40160 *ppos += len;
40161
40162- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40163+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40164 }
40165
40166 #ifdef CONFIG_PARPORT_1284
40167@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40168
40169 *ppos += len;
40170
40171- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40172+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40173 }
40174 #endif /* IEEE1284.3 support. */
40175
40176diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
40177index 73e7d8e..c80f3d2 100644
40178--- a/drivers/pci/hotplug/acpiphp_glue.c
40179+++ b/drivers/pci/hotplug/acpiphp_glue.c
40180@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
40181 }
40182
40183
40184-static struct acpi_dock_ops acpiphp_dock_ops = {
40185+static const struct acpi_dock_ops acpiphp_dock_ops = {
40186 .handler = handle_hotplug_event_func,
40187 };
40188
40189diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
40190index 9fff878..ad0ad53 100644
40191--- a/drivers/pci/hotplug/cpci_hotplug.h
40192+++ b/drivers/pci/hotplug/cpci_hotplug.h
40193@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
40194 int (*hardware_test) (struct slot* slot, u32 value);
40195 u8 (*get_power) (struct slot* slot);
40196 int (*set_power) (struct slot* slot, int value);
40197-};
40198+} __no_const;
40199
40200 struct cpci_hp_controller {
40201 unsigned int irq;
40202diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
40203index 76ba8a1..20ca857 100644
40204--- a/drivers/pci/hotplug/cpqphp_nvram.c
40205+++ b/drivers/pci/hotplug/cpqphp_nvram.c
40206@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
40207
40208 void compaq_nvram_init (void __iomem *rom_start)
40209 {
40210+
40211+#ifndef CONFIG_PAX_KERNEXEC
40212 if (rom_start) {
40213 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
40214 }
40215+#endif
40216+
40217 dbg("int15 entry = %p\n", compaq_int15_entry_point);
40218
40219 /* initialize our int15 lock */
40220diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
40221index 6151389..0a894ef 100644
40222--- a/drivers/pci/hotplug/fakephp.c
40223+++ b/drivers/pci/hotplug/fakephp.c
40224@@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
40225 }
40226
40227 static struct kobj_type legacy_ktype = {
40228- .sysfs_ops = &(struct sysfs_ops){
40229+ .sysfs_ops = &(const struct sysfs_ops){
40230 .store = legacy_store, .show = legacy_show
40231 },
40232 .release = &legacy_release,
40233diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
40234index 5b680df..fe05b7e 100644
40235--- a/drivers/pci/intel-iommu.c
40236+++ b/drivers/pci/intel-iommu.c
40237@@ -2643,7 +2643,7 @@ error:
40238 return 0;
40239 }
40240
40241-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
40242+dma_addr_t intel_map_page(struct device *dev, struct page *page,
40243 unsigned long offset, size_t size,
40244 enum dma_data_direction dir,
40245 struct dma_attrs *attrs)
40246@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
40247 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
40248 }
40249
40250-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40251+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40252 size_t size, enum dma_data_direction dir,
40253 struct dma_attrs *attrs)
40254 {
40255@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
40256 }
40257 }
40258
40259-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40260+void *intel_alloc_coherent(struct device *hwdev, size_t size,
40261 dma_addr_t *dma_handle, gfp_t flags)
40262 {
40263 void *vaddr;
40264@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
40265 return NULL;
40266 }
40267
40268-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40269+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40270 dma_addr_t dma_handle)
40271 {
40272 int order;
40273@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40274 free_pages((unsigned long)vaddr, order);
40275 }
40276
40277-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40278+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40279 int nelems, enum dma_data_direction dir,
40280 struct dma_attrs *attrs)
40281 {
40282@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
40283 return nelems;
40284 }
40285
40286-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40287+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40288 enum dma_data_direction dir, struct dma_attrs *attrs)
40289 {
40290 int i;
40291@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
40292 return nelems;
40293 }
40294
40295-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40296+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40297 {
40298 return !dma_addr;
40299 }
40300
40301-struct dma_map_ops intel_dma_ops = {
40302+const struct dma_map_ops intel_dma_ops = {
40303 .alloc_coherent = intel_alloc_coherent,
40304 .free_coherent = intel_free_coherent,
40305 .map_sg = intel_map_sg,
40306diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40307index 5b7056c..607bc94 100644
40308--- a/drivers/pci/pcie/aspm.c
40309+++ b/drivers/pci/pcie/aspm.c
40310@@ -27,9 +27,9 @@
40311 #define MODULE_PARAM_PREFIX "pcie_aspm."
40312
40313 /* Note: those are not register definitions */
40314-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40315-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40316-#define ASPM_STATE_L1 (4) /* L1 state */
40317+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40318+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40319+#define ASPM_STATE_L1 (4U) /* L1 state */
40320 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40321 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40322
40323diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40324index 8105e32..ca10419 100644
40325--- a/drivers/pci/probe.c
40326+++ b/drivers/pci/probe.c
40327@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
40328 return ret;
40329 }
40330
40331-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
40332+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
40333 struct device_attribute *attr,
40334 char *buf)
40335 {
40336 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
40337 }
40338
40339-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
40340+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
40341 struct device_attribute *attr,
40342 char *buf)
40343 {
40344diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40345index a03ad8c..024b0da 100644
40346--- a/drivers/pci/proc.c
40347+++ b/drivers/pci/proc.c
40348@@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40349 static int __init pci_proc_init(void)
40350 {
40351 struct pci_dev *dev = NULL;
40352+
40353+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40354+#ifdef CONFIG_GRKERNSEC_PROC_USER
40355+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40356+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40357+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40358+#endif
40359+#else
40360 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40361+#endif
40362 proc_create("devices", 0, proc_bus_pci_dir,
40363 &proc_bus_pci_dev_operations);
40364 proc_initialized = 1;
40365diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
40366index 8c02b6c..5584d8e 100644
40367--- a/drivers/pci/slot.c
40368+++ b/drivers/pci/slot.c
40369@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
40370 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
40371 }
40372
40373-static struct sysfs_ops pci_slot_sysfs_ops = {
40374+static const struct sysfs_ops pci_slot_sysfs_ops = {
40375 .show = pci_slot_attr_show,
40376 .store = pci_slot_attr_store,
40377 };
40378diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
40379index 30cf71d2..50938f1 100644
40380--- a/drivers/pcmcia/pcmcia_ioctl.c
40381+++ b/drivers/pcmcia/pcmcia_ioctl.c
40382@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
40383 return -EFAULT;
40384 }
40385 }
40386- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40387+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40388 if (!buf)
40389 return -ENOMEM;
40390
40391diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
40392index 52183c4..b224c69 100644
40393--- a/drivers/platform/x86/acer-wmi.c
40394+++ b/drivers/platform/x86/acer-wmi.c
40395@@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
40396 return 0;
40397 }
40398
40399-static struct backlight_ops acer_bl_ops = {
40400+static const struct backlight_ops acer_bl_ops = {
40401 .get_brightness = read_brightness,
40402 .update_status = update_bl_status,
40403 };
40404diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
40405index 767cb61..a87380b 100644
40406--- a/drivers/platform/x86/asus-laptop.c
40407+++ b/drivers/platform/x86/asus-laptop.c
40408@@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
40409 */
40410 static int read_brightness(struct backlight_device *bd);
40411 static int update_bl_status(struct backlight_device *bd);
40412-static struct backlight_ops asusbl_ops = {
40413+static const struct backlight_ops asusbl_ops = {
40414 .get_brightness = read_brightness,
40415 .update_status = update_bl_status,
40416 };
40417diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
40418index d66c07a..a4abaac 100644
40419--- a/drivers/platform/x86/asus_acpi.c
40420+++ b/drivers/platform/x86/asus_acpi.c
40421@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40422 return 0;
40423 }
40424
40425-static struct backlight_ops asus_backlight_data = {
40426+static const struct backlight_ops asus_backlight_data = {
40427 .get_brightness = read_brightness,
40428 .update_status = set_brightness_status,
40429 };
40430diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40431index 11003bb..550ff1b 100644
40432--- a/drivers/platform/x86/compal-laptop.c
40433+++ b/drivers/platform/x86/compal-laptop.c
40434@@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40435 return set_lcd_level(b->props.brightness);
40436 }
40437
40438-static struct backlight_ops compalbl_ops = {
40439+static const struct backlight_ops compalbl_ops = {
40440 .get_brightness = bl_get_brightness,
40441 .update_status = bl_update_status,
40442 };
40443diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40444index 07a74da..9dc99fa 100644
40445--- a/drivers/platform/x86/dell-laptop.c
40446+++ b/drivers/platform/x86/dell-laptop.c
40447@@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40448 return buffer.output[1];
40449 }
40450
40451-static struct backlight_ops dell_ops = {
40452+static const struct backlight_ops dell_ops = {
40453 .get_brightness = dell_get_intensity,
40454 .update_status = dell_send_intensity,
40455 };
40456diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40457index c533b1c..5c81f22 100644
40458--- a/drivers/platform/x86/eeepc-laptop.c
40459+++ b/drivers/platform/x86/eeepc-laptop.c
40460@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40461 */
40462 static int read_brightness(struct backlight_device *bd);
40463 static int update_bl_status(struct backlight_device *bd);
40464-static struct backlight_ops eeepcbl_ops = {
40465+static const struct backlight_ops eeepcbl_ops = {
40466 .get_brightness = read_brightness,
40467 .update_status = update_bl_status,
40468 };
40469diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40470index bcd4ba8..a249b35 100644
40471--- a/drivers/platform/x86/fujitsu-laptop.c
40472+++ b/drivers/platform/x86/fujitsu-laptop.c
40473@@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40474 return ret;
40475 }
40476
40477-static struct backlight_ops fujitsubl_ops = {
40478+static const struct backlight_ops fujitsubl_ops = {
40479 .get_brightness = bl_get_brightness,
40480 .update_status = bl_update_status,
40481 };
40482diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40483index 759763d..1093ba2 100644
40484--- a/drivers/platform/x86/msi-laptop.c
40485+++ b/drivers/platform/x86/msi-laptop.c
40486@@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
40487 return set_lcd_level(b->props.brightness);
40488 }
40489
40490-static struct backlight_ops msibl_ops = {
40491+static const struct backlight_ops msibl_ops = {
40492 .get_brightness = bl_get_brightness,
40493 .update_status = bl_update_status,
40494 };
40495diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
40496index fe7cf01..9012d8d 100644
40497--- a/drivers/platform/x86/panasonic-laptop.c
40498+++ b/drivers/platform/x86/panasonic-laptop.c
40499@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
40500 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
40501 }
40502
40503-static struct backlight_ops pcc_backlight_ops = {
40504+static const struct backlight_ops pcc_backlight_ops = {
40505 .get_brightness = bl_get,
40506 .update_status = bl_set_status,
40507 };
40508diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40509index a2a742c..b37e25e 100644
40510--- a/drivers/platform/x86/sony-laptop.c
40511+++ b/drivers/platform/x86/sony-laptop.c
40512@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
40513 }
40514
40515 static struct backlight_device *sony_backlight_device;
40516-static struct backlight_ops sony_backlight_ops = {
40517+static const struct backlight_ops sony_backlight_ops = {
40518 .update_status = sony_backlight_update_status,
40519 .get_brightness = sony_backlight_get_brightness,
40520 };
40521diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40522index 68271ae..5e8fb10 100644
40523--- a/drivers/platform/x86/thinkpad_acpi.c
40524+++ b/drivers/platform/x86/thinkpad_acpi.c
40525@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40526 return 0;
40527 }
40528
40529-void static hotkey_mask_warn_incomplete_mask(void)
40530+static void hotkey_mask_warn_incomplete_mask(void)
40531 {
40532 /* log only what the user can fix... */
40533 const u32 wantedmask = hotkey_driver_mask &
40534@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40535 BACKLIGHT_UPDATE_HOTKEY);
40536 }
40537
40538-static struct backlight_ops ibm_backlight_data = {
40539+static const struct backlight_ops ibm_backlight_data = {
40540 .get_brightness = brightness_get,
40541 .update_status = brightness_update_status,
40542 };
40543diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40544index 51c0a8b..0786629 100644
40545--- a/drivers/platform/x86/toshiba_acpi.c
40546+++ b/drivers/platform/x86/toshiba_acpi.c
40547@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40548 return AE_OK;
40549 }
40550
40551-static struct backlight_ops toshiba_backlight_data = {
40552+static const struct backlight_ops toshiba_backlight_data = {
40553 .get_brightness = get_lcd,
40554 .update_status = set_lcd_status,
40555 };
40556diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40557index fc83783c..cf370d7 100644
40558--- a/drivers/pnp/pnpbios/bioscalls.c
40559+++ b/drivers/pnp/pnpbios/bioscalls.c
40560@@ -60,7 +60,7 @@ do { \
40561 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40562 } while(0)
40563
40564-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40565+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40566 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40567
40568 /*
40569@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40570
40571 cpu = get_cpu();
40572 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40573+
40574+ pax_open_kernel();
40575 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40576+ pax_close_kernel();
40577
40578 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40579 spin_lock_irqsave(&pnp_bios_lock, flags);
40580@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40581 :"memory");
40582 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40583
40584+ pax_open_kernel();
40585 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40586+ pax_close_kernel();
40587+
40588 put_cpu();
40589
40590 /* If we get here and this is set then the PnP BIOS faulted on us. */
40591@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40592 return status;
40593 }
40594
40595-void pnpbios_calls_init(union pnp_bios_install_struct *header)
40596+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40597 {
40598 int i;
40599
40600@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40601 pnp_bios_callpoint.offset = header->fields.pm16offset;
40602 pnp_bios_callpoint.segment = PNP_CS16;
40603
40604+ pax_open_kernel();
40605+
40606 for_each_possible_cpu(i) {
40607 struct desc_struct *gdt = get_cpu_gdt_table(i);
40608 if (!gdt)
40609@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40610 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40611 (unsigned long)__va(header->fields.pm16dseg));
40612 }
40613+
40614+ pax_close_kernel();
40615 }
40616diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40617index ba97654..66b99d4 100644
40618--- a/drivers/pnp/resource.c
40619+++ b/drivers/pnp/resource.c
40620@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40621 return 1;
40622
40623 /* check if the resource is valid */
40624- if (*irq < 0 || *irq > 15)
40625+ if (*irq > 15)
40626 return 0;
40627
40628 /* check if the resource is reserved */
40629@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40630 return 1;
40631
40632 /* check if the resource is valid */
40633- if (*dma < 0 || *dma == 4 || *dma > 7)
40634+ if (*dma == 4 || *dma > 7)
40635 return 0;
40636
40637 /* check if the resource is reserved */
40638diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40639index 62bb981..24a2dc9 100644
40640--- a/drivers/power/bq27x00_battery.c
40641+++ b/drivers/power/bq27x00_battery.c
40642@@ -44,7 +44,7 @@ struct bq27x00_device_info;
40643 struct bq27x00_access_methods {
40644 int (*read)(u8 reg, int *rt_value, int b_single,
40645 struct bq27x00_device_info *di);
40646-};
40647+} __no_const;
40648
40649 struct bq27x00_device_info {
40650 struct device *dev;
40651diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40652index 62227cd..b5b538b 100644
40653--- a/drivers/rtc/rtc-dev.c
40654+++ b/drivers/rtc/rtc-dev.c
40655@@ -14,6 +14,7 @@
40656 #include <linux/module.h>
40657 #include <linux/rtc.h>
40658 #include <linux/sched.h>
40659+#include <linux/grsecurity.h>
40660 #include "rtc-core.h"
40661
40662 static dev_t rtc_devt;
40663@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40664 if (copy_from_user(&tm, uarg, sizeof(tm)))
40665 return -EFAULT;
40666
40667+ gr_log_timechange();
40668+
40669 return rtc_set_time(rtc, &tm);
40670
40671 case RTC_PIE_ON:
40672diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40673index 968e3c7..fbc637a 100644
40674--- a/drivers/s390/cio/qdio_perf.c
40675+++ b/drivers/s390/cio/qdio_perf.c
40676@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40677 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40678 {
40679 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40680- (long)atomic_long_read(&perf_stats.qdio_int));
40681+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40682 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40683- (long)atomic_long_read(&perf_stats.pci_int));
40684+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40685 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40686- (long)atomic_long_read(&perf_stats.thin_int));
40687+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40688 seq_printf(m, "\n");
40689 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40690- (long)atomic_long_read(&perf_stats.tasklet_inbound));
40691+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40692 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40693- (long)atomic_long_read(&perf_stats.tasklet_outbound));
40694+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40695 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40696- (long)atomic_long_read(&perf_stats.tasklet_thinint),
40697- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40698+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40699+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40700 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40701- (long)atomic_long_read(&perf_stats.thinint_inbound),
40702- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40703+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40704+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40705 seq_printf(m, "\n");
40706 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40707- (long)atomic_long_read(&perf_stats.siga_in));
40708+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40709 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40710- (long)atomic_long_read(&perf_stats.siga_out));
40711+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40712 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40713- (long)atomic_long_read(&perf_stats.siga_sync));
40714+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40715 seq_printf(m, "\n");
40716 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40717- (long)atomic_long_read(&perf_stats.inbound_handler));
40718+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40719 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40720- (long)atomic_long_read(&perf_stats.outbound_handler));
40721+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40722 seq_printf(m, "\n");
40723 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40724- (long)atomic_long_read(&perf_stats.fast_requeue));
40725+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40726 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40727- (long)atomic_long_read(&perf_stats.outbound_target_full));
40728+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40729 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40730- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40731+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40732 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40733- (long)atomic_long_read(&perf_stats.debug_stop_polling));
40734+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40735 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40736- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40737+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40738 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40739- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40740- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40741+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40742+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40743 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40744- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40745- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40746+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40747+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40748 seq_printf(m, "\n");
40749 return 0;
40750 }
40751diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40752index ff4504c..b3604c3 100644
40753--- a/drivers/s390/cio/qdio_perf.h
40754+++ b/drivers/s390/cio/qdio_perf.h
40755@@ -13,46 +13,46 @@
40756
40757 struct qdio_perf_stats {
40758 /* interrupt handler calls */
40759- atomic_long_t qdio_int;
40760- atomic_long_t pci_int;
40761- atomic_long_t thin_int;
40762+ atomic_long_unchecked_t qdio_int;
40763+ atomic_long_unchecked_t pci_int;
40764+ atomic_long_unchecked_t thin_int;
40765
40766 /* tasklet runs */
40767- atomic_long_t tasklet_inbound;
40768- atomic_long_t tasklet_outbound;
40769- atomic_long_t tasklet_thinint;
40770- atomic_long_t tasklet_thinint_loop;
40771- atomic_long_t thinint_inbound;
40772- atomic_long_t thinint_inbound_loop;
40773- atomic_long_t thinint_inbound_loop2;
40774+ atomic_long_unchecked_t tasklet_inbound;
40775+ atomic_long_unchecked_t tasklet_outbound;
40776+ atomic_long_unchecked_t tasklet_thinint;
40777+ atomic_long_unchecked_t tasklet_thinint_loop;
40778+ atomic_long_unchecked_t thinint_inbound;
40779+ atomic_long_unchecked_t thinint_inbound_loop;
40780+ atomic_long_unchecked_t thinint_inbound_loop2;
40781
40782 /* signal adapter calls */
40783- atomic_long_t siga_out;
40784- atomic_long_t siga_in;
40785- atomic_long_t siga_sync;
40786+ atomic_long_unchecked_t siga_out;
40787+ atomic_long_unchecked_t siga_in;
40788+ atomic_long_unchecked_t siga_sync;
40789
40790 /* misc */
40791- atomic_long_t inbound_handler;
40792- atomic_long_t outbound_handler;
40793- atomic_long_t fast_requeue;
40794- atomic_long_t outbound_target_full;
40795+ atomic_long_unchecked_t inbound_handler;
40796+ atomic_long_unchecked_t outbound_handler;
40797+ atomic_long_unchecked_t fast_requeue;
40798+ atomic_long_unchecked_t outbound_target_full;
40799
40800 /* for debugging */
40801- atomic_long_t debug_tl_out_timer;
40802- atomic_long_t debug_stop_polling;
40803- atomic_long_t debug_eqbs_all;
40804- atomic_long_t debug_eqbs_incomplete;
40805- atomic_long_t debug_sqbs_all;
40806- atomic_long_t debug_sqbs_incomplete;
40807+ atomic_long_unchecked_t debug_tl_out_timer;
40808+ atomic_long_unchecked_t debug_stop_polling;
40809+ atomic_long_unchecked_t debug_eqbs_all;
40810+ atomic_long_unchecked_t debug_eqbs_incomplete;
40811+ atomic_long_unchecked_t debug_sqbs_all;
40812+ atomic_long_unchecked_t debug_sqbs_incomplete;
40813 };
40814
40815 extern struct qdio_perf_stats perf_stats;
40816 extern int qdio_performance_stats;
40817
40818-static inline void qdio_perf_stat_inc(atomic_long_t *count)
40819+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40820 {
40821 if (qdio_performance_stats)
40822- atomic_long_inc(count);
40823+ atomic_long_inc_unchecked(count);
40824 }
40825
40826 int qdio_setup_perf_stats(void);
40827diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40828index 1ddcf40..a85f062 100644
40829--- a/drivers/scsi/BusLogic.c
40830+++ b/drivers/scsi/BusLogic.c
40831@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40832 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40833 *PrototypeHostAdapter)
40834 {
40835+ pax_track_stack();
40836+
40837 /*
40838 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40839 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40840diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40841index cdbdec9..b7d560b 100644
40842--- a/drivers/scsi/aacraid/aacraid.h
40843+++ b/drivers/scsi/aacraid/aacraid.h
40844@@ -471,7 +471,7 @@ struct adapter_ops
40845 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40846 /* Administrative operations */
40847 int (*adapter_comm)(struct aac_dev * dev, int comm);
40848-};
40849+} __no_const;
40850
40851 /*
40852 * Define which interrupt handler needs to be installed
40853diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40854index a5b8e7b..a6a0e43 100644
40855--- a/drivers/scsi/aacraid/commctrl.c
40856+++ b/drivers/scsi/aacraid/commctrl.c
40857@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40858 u32 actual_fibsize64, actual_fibsize = 0;
40859 int i;
40860
40861+ pax_track_stack();
40862
40863 if (dev->in_reset) {
40864 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40865diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40866index 9b97c3e..f099725 100644
40867--- a/drivers/scsi/aacraid/linit.c
40868+++ b/drivers/scsi/aacraid/linit.c
40869@@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40870 #elif defined(__devinitconst)
40871 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40872 #else
40873-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40874+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40875 #endif
40876 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40877 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40878diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40879index 996f722..9127845 100644
40880--- a/drivers/scsi/aic94xx/aic94xx_init.c
40881+++ b/drivers/scsi/aic94xx/aic94xx_init.c
40882@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40883 flash_error_table[i].reason);
40884 }
40885
40886-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40887+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40888 asd_show_update_bios, asd_store_update_bios);
40889
40890 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40891@@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40892 .lldd_control_phy = asd_control_phy,
40893 };
40894
40895-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40896+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40897 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40898 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40899 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40900diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40901index 58efd4b..cb48dc7 100644
40902--- a/drivers/scsi/bfa/bfa_ioc.h
40903+++ b/drivers/scsi/bfa/bfa_ioc.h
40904@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40905 bfa_ioc_disable_cbfn_t disable_cbfn;
40906 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40907 bfa_ioc_reset_cbfn_t reset_cbfn;
40908-};
40909+} __no_const;
40910
40911 /**
40912 * Heartbeat failure notification queue element.
40913diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40914index 7ad177e..5503586 100644
40915--- a/drivers/scsi/bfa/bfa_iocfc.h
40916+++ b/drivers/scsi/bfa/bfa_iocfc.h
40917@@ -61,7 +61,7 @@ struct bfa_hwif_s {
40918 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40919 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40920 u32 *nvecs, u32 *maxvec);
40921-};
40922+} __no_const;
40923 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40924
40925 struct bfa_iocfc_s {
40926diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40927index 4967643..cbec06b 100644
40928--- a/drivers/scsi/dpt_i2o.c
40929+++ b/drivers/scsi/dpt_i2o.c
40930@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40931 dma_addr_t addr;
40932 ulong flags = 0;
40933
40934+ pax_track_stack();
40935+
40936 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40937 // get user msg size in u32s
40938 if(get_user(size, &user_msg[0])){
40939@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40940 s32 rcode;
40941 dma_addr_t addr;
40942
40943+ pax_track_stack();
40944+
40945 memset(msg, 0 , sizeof(msg));
40946 len = scsi_bufflen(cmd);
40947 direction = 0x00000000;
40948diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40949index c7076ce..e20c67c 100644
40950--- a/drivers/scsi/eata.c
40951+++ b/drivers/scsi/eata.c
40952@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40953 struct hostdata *ha;
40954 char name[16];
40955
40956+ pax_track_stack();
40957+
40958 sprintf(name, "%s%d", driver_name, j);
40959
40960 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40961diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40962index 11ae5c9..891daec 100644
40963--- a/drivers/scsi/fcoe/libfcoe.c
40964+++ b/drivers/scsi/fcoe/libfcoe.c
40965@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40966 size_t rlen;
40967 size_t dlen;
40968
40969+ pax_track_stack();
40970+
40971 fiph = (struct fip_header *)skb->data;
40972 sub = fiph->fip_subcode;
40973 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40974diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40975index 71c7bbe..e93088a 100644
40976--- a/drivers/scsi/fnic/fnic_main.c
40977+++ b/drivers/scsi/fnic/fnic_main.c
40978@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40979 /* Start local port initiatialization */
40980
40981 lp->link_up = 0;
40982- lp->tt = fnic_transport_template;
40983+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40984
40985 lp->max_retry_count = fnic->config.flogi_retries;
40986 lp->max_rport_retry_count = fnic->config.plogi_retries;
40987diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40988index bb96d74..9ec3ce4 100644
40989--- a/drivers/scsi/gdth.c
40990+++ b/drivers/scsi/gdth.c
40991@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40992 ulong flags;
40993 gdth_ha_str *ha;
40994
40995+ pax_track_stack();
40996+
40997 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40998 return -EFAULT;
40999 ha = gdth_find_ha(ldrv.ionode);
41000@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
41001 gdth_ha_str *ha;
41002 int rval;
41003
41004+ pax_track_stack();
41005+
41006 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
41007 res.number >= MAX_HDRIVES)
41008 return -EFAULT;
41009@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
41010 gdth_ha_str *ha;
41011 int rval;
41012
41013+ pax_track_stack();
41014+
41015 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
41016 return -EFAULT;
41017 ha = gdth_find_ha(gen.ionode);
41018@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
41019 int i;
41020 gdth_cmd_str gdtcmd;
41021 char cmnd[MAX_COMMAND_SIZE];
41022+
41023+ pax_track_stack();
41024+
41025 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
41026
41027 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
41028diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
41029index 1258da3..20d8ae6 100644
41030--- a/drivers/scsi/gdth_proc.c
41031+++ b/drivers/scsi/gdth_proc.c
41032@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
41033 ulong64 paddr;
41034
41035 char cmnd[MAX_COMMAND_SIZE];
41036+
41037+ pax_track_stack();
41038+
41039 memset(cmnd, 0xff, 12);
41040 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
41041
41042@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
41043 gdth_hget_str *phg;
41044 char cmnd[MAX_COMMAND_SIZE];
41045
41046+ pax_track_stack();
41047+
41048 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
41049 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
41050 if (!gdtcmd || !estr)
41051diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
41052index d03a926..f324286 100644
41053--- a/drivers/scsi/hosts.c
41054+++ b/drivers/scsi/hosts.c
41055@@ -40,7 +40,7 @@
41056 #include "scsi_logging.h"
41057
41058
41059-static atomic_t scsi_host_next_hn; /* host_no for next new host */
41060+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
41061
41062
41063 static void scsi_host_cls_release(struct device *dev)
41064@@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
41065 * subtract one because we increment first then return, but we need to
41066 * know what the next host number was before increment
41067 */
41068- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
41069+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
41070 shost->dma_channel = 0xff;
41071
41072 /* These three are default values which can be overridden */
41073diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
41074index a601159..55e19d2 100644
41075--- a/drivers/scsi/ipr.c
41076+++ b/drivers/scsi/ipr.c
41077@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
41078 return true;
41079 }
41080
41081-static struct ata_port_operations ipr_sata_ops = {
41082+static const struct ata_port_operations ipr_sata_ops = {
41083 .phy_reset = ipr_ata_phy_reset,
41084 .hardreset = ipr_sata_reset,
41085 .post_internal_cmd = ipr_ata_post_internal,
41086diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
41087index 4e49fbc..97907ff 100644
41088--- a/drivers/scsi/ips.h
41089+++ b/drivers/scsi/ips.h
41090@@ -1027,7 +1027,7 @@ typedef struct {
41091 int (*intr)(struct ips_ha *);
41092 void (*enableint)(struct ips_ha *);
41093 uint32_t (*statupd)(struct ips_ha *);
41094-} ips_hw_func_t;
41095+} __no_const ips_hw_func_t;
41096
41097 typedef struct ips_ha {
41098 uint8_t ha_id[IPS_MAX_CHANNELS+1];
41099diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
41100index c1c1574..a9c9348 100644
41101--- a/drivers/scsi/libfc/fc_exch.c
41102+++ b/drivers/scsi/libfc/fc_exch.c
41103@@ -86,12 +86,12 @@ struct fc_exch_mgr {
41104 * all together if not used XXX
41105 */
41106 struct {
41107- atomic_t no_free_exch;
41108- atomic_t no_free_exch_xid;
41109- atomic_t xid_not_found;
41110- atomic_t xid_busy;
41111- atomic_t seq_not_found;
41112- atomic_t non_bls_resp;
41113+ atomic_unchecked_t no_free_exch;
41114+ atomic_unchecked_t no_free_exch_xid;
41115+ atomic_unchecked_t xid_not_found;
41116+ atomic_unchecked_t xid_busy;
41117+ atomic_unchecked_t seq_not_found;
41118+ atomic_unchecked_t non_bls_resp;
41119 } stats;
41120 };
41121 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
41122@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
41123 /* allocate memory for exchange */
41124 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
41125 if (!ep) {
41126- atomic_inc(&mp->stats.no_free_exch);
41127+ atomic_inc_unchecked(&mp->stats.no_free_exch);
41128 goto out;
41129 }
41130 memset(ep, 0, sizeof(*ep));
41131@@ -557,7 +557,7 @@ out:
41132 return ep;
41133 err:
41134 spin_unlock_bh(&pool->lock);
41135- atomic_inc(&mp->stats.no_free_exch_xid);
41136+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
41137 mempool_free(ep, mp->ep_pool);
41138 return NULL;
41139 }
41140@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41141 xid = ntohs(fh->fh_ox_id); /* we originated exch */
41142 ep = fc_exch_find(mp, xid);
41143 if (!ep) {
41144- atomic_inc(&mp->stats.xid_not_found);
41145+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41146 reject = FC_RJT_OX_ID;
41147 goto out;
41148 }
41149@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41150 ep = fc_exch_find(mp, xid);
41151 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
41152 if (ep) {
41153- atomic_inc(&mp->stats.xid_busy);
41154+ atomic_inc_unchecked(&mp->stats.xid_busy);
41155 reject = FC_RJT_RX_ID;
41156 goto rel;
41157 }
41158@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41159 }
41160 xid = ep->xid; /* get our XID */
41161 } else if (!ep) {
41162- atomic_inc(&mp->stats.xid_not_found);
41163+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41164 reject = FC_RJT_RX_ID; /* XID not found */
41165 goto out;
41166 }
41167@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41168 } else {
41169 sp = &ep->seq;
41170 if (sp->id != fh->fh_seq_id) {
41171- atomic_inc(&mp->stats.seq_not_found);
41172+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41173 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
41174 goto rel;
41175 }
41176@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41177
41178 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41179 if (!ep) {
41180- atomic_inc(&mp->stats.xid_not_found);
41181+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41182 goto out;
41183 }
41184 if (ep->esb_stat & ESB_ST_COMPLETE) {
41185- atomic_inc(&mp->stats.xid_not_found);
41186+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41187 goto out;
41188 }
41189 if (ep->rxid == FC_XID_UNKNOWN)
41190 ep->rxid = ntohs(fh->fh_rx_id);
41191 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41192- atomic_inc(&mp->stats.xid_not_found);
41193+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41194 goto rel;
41195 }
41196 if (ep->did != ntoh24(fh->fh_s_id) &&
41197 ep->did != FC_FID_FLOGI) {
41198- atomic_inc(&mp->stats.xid_not_found);
41199+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41200 goto rel;
41201 }
41202 sof = fr_sof(fp);
41203@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41204 } else {
41205 sp = &ep->seq;
41206 if (sp->id != fh->fh_seq_id) {
41207- atomic_inc(&mp->stats.seq_not_found);
41208+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41209 goto rel;
41210 }
41211 }
41212@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41213 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41214
41215 if (!sp)
41216- atomic_inc(&mp->stats.xid_not_found);
41217+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41218 else
41219- atomic_inc(&mp->stats.non_bls_resp);
41220+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
41221
41222 fc_frame_free(fp);
41223 }
41224diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41225index 0ee989f..a582241 100644
41226--- a/drivers/scsi/libsas/sas_ata.c
41227+++ b/drivers/scsi/libsas/sas_ata.c
41228@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
41229 }
41230 }
41231
41232-static struct ata_port_operations sas_sata_ops = {
41233+static const struct ata_port_operations sas_sata_ops = {
41234 .phy_reset = sas_ata_phy_reset,
41235 .post_internal_cmd = sas_ata_post_internal,
41236 .qc_defer = ata_std_qc_defer,
41237diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41238index aa10f79..5cc79e4 100644
41239--- a/drivers/scsi/lpfc/lpfc.h
41240+++ b/drivers/scsi/lpfc/lpfc.h
41241@@ -400,7 +400,7 @@ struct lpfc_vport {
41242 struct dentry *debug_nodelist;
41243 struct dentry *vport_debugfs_root;
41244 struct lpfc_debugfs_trc *disc_trc;
41245- atomic_t disc_trc_cnt;
41246+ atomic_unchecked_t disc_trc_cnt;
41247 #endif
41248 uint8_t stat_data_enabled;
41249 uint8_t stat_data_blocked;
41250@@ -725,8 +725,8 @@ struct lpfc_hba {
41251 struct timer_list fabric_block_timer;
41252 unsigned long bit_flags;
41253 #define FABRIC_COMANDS_BLOCKED 0
41254- atomic_t num_rsrc_err;
41255- atomic_t num_cmd_success;
41256+ atomic_unchecked_t num_rsrc_err;
41257+ atomic_unchecked_t num_cmd_success;
41258 unsigned long last_rsrc_error_time;
41259 unsigned long last_ramp_down_time;
41260 unsigned long last_ramp_up_time;
41261@@ -740,7 +740,7 @@ struct lpfc_hba {
41262 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
41263 struct dentry *debug_slow_ring_trc;
41264 struct lpfc_debugfs_trc *slow_ring_trc;
41265- atomic_t slow_ring_trc_cnt;
41266+ atomic_unchecked_t slow_ring_trc_cnt;
41267 #endif
41268
41269 /* Used for deferred freeing of ELS data buffers */
41270diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41271index 8d0f0de..7c77a62 100644
41272--- a/drivers/scsi/lpfc/lpfc_debugfs.c
41273+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41274@@ -124,7 +124,7 @@ struct lpfc_debug {
41275 int len;
41276 };
41277
41278-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41279+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41280 static unsigned long lpfc_debugfs_start_time = 0L;
41281
41282 /**
41283@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41284 lpfc_debugfs_enable = 0;
41285
41286 len = 0;
41287- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41288+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41289 (lpfc_debugfs_max_disc_trc - 1);
41290 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41291 dtp = vport->disc_trc + i;
41292@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41293 lpfc_debugfs_enable = 0;
41294
41295 len = 0;
41296- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41297+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41298 (lpfc_debugfs_max_slow_ring_trc - 1);
41299 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41300 dtp = phba->slow_ring_trc + i;
41301@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
41302 uint32_t *ptr;
41303 char buffer[1024];
41304
41305+ pax_track_stack();
41306+
41307 off = 0;
41308 spin_lock_irq(&phba->hbalock);
41309
41310@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41311 !vport || !vport->disc_trc)
41312 return;
41313
41314- index = atomic_inc_return(&vport->disc_trc_cnt) &
41315+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41316 (lpfc_debugfs_max_disc_trc - 1);
41317 dtp = vport->disc_trc + index;
41318 dtp->fmt = fmt;
41319 dtp->data1 = data1;
41320 dtp->data2 = data2;
41321 dtp->data3 = data3;
41322- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41323+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41324 dtp->jif = jiffies;
41325 #endif
41326 return;
41327@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41328 !phba || !phba->slow_ring_trc)
41329 return;
41330
41331- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41332+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41333 (lpfc_debugfs_max_slow_ring_trc - 1);
41334 dtp = phba->slow_ring_trc + index;
41335 dtp->fmt = fmt;
41336 dtp->data1 = data1;
41337 dtp->data2 = data2;
41338 dtp->data3 = data3;
41339- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41340+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41341 dtp->jif = jiffies;
41342 #endif
41343 return;
41344@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41345 "slow_ring buffer\n");
41346 goto debug_failed;
41347 }
41348- atomic_set(&phba->slow_ring_trc_cnt, 0);
41349+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41350 memset(phba->slow_ring_trc, 0,
41351 (sizeof(struct lpfc_debugfs_trc) *
41352 lpfc_debugfs_max_slow_ring_trc));
41353@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41354 "buffer\n");
41355 goto debug_failed;
41356 }
41357- atomic_set(&vport->disc_trc_cnt, 0);
41358+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41359
41360 snprintf(name, sizeof(name), "discovery_trace");
41361 vport->debug_disc_trc =
41362diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41363index 549bc7d..8189dbb 100644
41364--- a/drivers/scsi/lpfc/lpfc_init.c
41365+++ b/drivers/scsi/lpfc/lpfc_init.c
41366@@ -8021,8 +8021,10 @@ lpfc_init(void)
41367 printk(LPFC_COPYRIGHT "\n");
41368
41369 if (lpfc_enable_npiv) {
41370- lpfc_transport_functions.vport_create = lpfc_vport_create;
41371- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41372+ pax_open_kernel();
41373+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41374+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41375+ pax_close_kernel();
41376 }
41377 lpfc_transport_template =
41378 fc_attach_transport(&lpfc_transport_functions);
41379diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41380index c88f59f..ff2a42f 100644
41381--- a/drivers/scsi/lpfc/lpfc_scsi.c
41382+++ b/drivers/scsi/lpfc/lpfc_scsi.c
41383@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41384 uint32_t evt_posted;
41385
41386 spin_lock_irqsave(&phba->hbalock, flags);
41387- atomic_inc(&phba->num_rsrc_err);
41388+ atomic_inc_unchecked(&phba->num_rsrc_err);
41389 phba->last_rsrc_error_time = jiffies;
41390
41391 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41392@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41393 unsigned long flags;
41394 struct lpfc_hba *phba = vport->phba;
41395 uint32_t evt_posted;
41396- atomic_inc(&phba->num_cmd_success);
41397+ atomic_inc_unchecked(&phba->num_cmd_success);
41398
41399 if (vport->cfg_lun_queue_depth <= queue_depth)
41400 return;
41401@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41402 int i;
41403 struct lpfc_rport_data *rdata;
41404
41405- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41406- num_cmd_success = atomic_read(&phba->num_cmd_success);
41407+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41408+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41409
41410 vports = lpfc_create_vport_work_array(phba);
41411 if (vports != NULL)
41412@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41413 }
41414 }
41415 lpfc_destroy_vport_work_array(phba, vports);
41416- atomic_set(&phba->num_rsrc_err, 0);
41417- atomic_set(&phba->num_cmd_success, 0);
41418+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41419+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41420 }
41421
41422 /**
41423@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41424 }
41425 }
41426 lpfc_destroy_vport_work_array(phba, vports);
41427- atomic_set(&phba->num_rsrc_err, 0);
41428- atomic_set(&phba->num_cmd_success, 0);
41429+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41430+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41431 }
41432
41433 /**
41434diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41435index 234f0b7..3020aea 100644
41436--- a/drivers/scsi/megaraid/megaraid_mbox.c
41437+++ b/drivers/scsi/megaraid/megaraid_mbox.c
41438@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41439 int rval;
41440 int i;
41441
41442+ pax_track_stack();
41443+
41444 // Allocate memory for the base list of scb for management module.
41445 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41446
41447diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41448index 7a117c1..ee01e9e 100644
41449--- a/drivers/scsi/osd/osd_initiator.c
41450+++ b/drivers/scsi/osd/osd_initiator.c
41451@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41452 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41453 int ret;
41454
41455+ pax_track_stack();
41456+
41457 or = osd_start_request(od, GFP_KERNEL);
41458 if (!or)
41459 return -ENOMEM;
41460diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41461index 9ab8c86..9425ad3 100644
41462--- a/drivers/scsi/pmcraid.c
41463+++ b/drivers/scsi/pmcraid.c
41464@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41465 res->scsi_dev = scsi_dev;
41466 scsi_dev->hostdata = res;
41467 res->change_detected = 0;
41468- atomic_set(&res->read_failures, 0);
41469- atomic_set(&res->write_failures, 0);
41470+ atomic_set_unchecked(&res->read_failures, 0);
41471+ atomic_set_unchecked(&res->write_failures, 0);
41472 rc = 0;
41473 }
41474 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41475@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41476
41477 /* If this was a SCSI read/write command keep count of errors */
41478 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41479- atomic_inc(&res->read_failures);
41480+ atomic_inc_unchecked(&res->read_failures);
41481 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41482- atomic_inc(&res->write_failures);
41483+ atomic_inc_unchecked(&res->write_failures);
41484
41485 if (!RES_IS_GSCSI(res->cfg_entry) &&
41486 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41487@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41488
41489 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41490 /* add resources only after host is added into system */
41491- if (!atomic_read(&pinstance->expose_resources))
41492+ if (!atomic_read_unchecked(&pinstance->expose_resources))
41493 return;
41494
41495 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
41496@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
41497 init_waitqueue_head(&pinstance->reset_wait_q);
41498
41499 atomic_set(&pinstance->outstanding_cmds, 0);
41500- atomic_set(&pinstance->expose_resources, 0);
41501+ atomic_set_unchecked(&pinstance->expose_resources, 0);
41502
41503 INIT_LIST_HEAD(&pinstance->free_res_q);
41504 INIT_LIST_HEAD(&pinstance->used_res_q);
41505@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
41506 /* Schedule worker thread to handle CCN and take care of adding and
41507 * removing devices to OS
41508 */
41509- atomic_set(&pinstance->expose_resources, 1);
41510+ atomic_set_unchecked(&pinstance->expose_resources, 1);
41511 schedule_work(&pinstance->worker_q);
41512 return rc;
41513
41514diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41515index 3441b3f..6cbe8f7 100644
41516--- a/drivers/scsi/pmcraid.h
41517+++ b/drivers/scsi/pmcraid.h
41518@@ -690,7 +690,7 @@ struct pmcraid_instance {
41519 atomic_t outstanding_cmds;
41520
41521 /* should add/delete resources to mid-layer now ?*/
41522- atomic_t expose_resources;
41523+ atomic_unchecked_t expose_resources;
41524
41525 /* Tasklet to handle deferred processing */
41526 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41527@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41528 struct list_head queue; /* link to "to be exposed" resources */
41529 struct pmcraid_config_table_entry cfg_entry;
41530 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41531- atomic_t read_failures; /* count of failed READ commands */
41532- atomic_t write_failures; /* count of failed WRITE commands */
41533+ atomic_unchecked_t read_failures; /* count of failed READ commands */
41534+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41535
41536 /* To indicate add/delete/modify during CCN */
41537 u8 change_detected;
41538diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41539index 2150618..7034215 100644
41540--- a/drivers/scsi/qla2xxx/qla_def.h
41541+++ b/drivers/scsi/qla2xxx/qla_def.h
41542@@ -2089,7 +2089,7 @@ struct isp_operations {
41543
41544 int (*get_flash_version) (struct scsi_qla_host *, void *);
41545 int (*start_scsi) (srb_t *);
41546-};
41547+} __no_const;
41548
41549 /* MSI-X Support *************************************************************/
41550
41551diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41552index 81b5f29..2ae1fad 100644
41553--- a/drivers/scsi/qla4xxx/ql4_def.h
41554+++ b/drivers/scsi/qla4xxx/ql4_def.h
41555@@ -240,7 +240,7 @@ struct ddb_entry {
41556 atomic_t retry_relogin_timer; /* Min Time between relogins
41557 * (4000 only) */
41558 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41559- atomic_t relogin_retry_count; /* Num of times relogin has been
41560+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41561 * retried */
41562
41563 uint16_t port;
41564diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41565index af8c323..515dd51 100644
41566--- a/drivers/scsi/qla4xxx/ql4_init.c
41567+++ b/drivers/scsi/qla4xxx/ql4_init.c
41568@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41569 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41570 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41571 atomic_set(&ddb_entry->relogin_timer, 0);
41572- atomic_set(&ddb_entry->relogin_retry_count, 0);
41573+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41574 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41575 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41576 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41577@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41578 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41579 atomic_set(&ddb_entry->port_down_timer,
41580 ha->port_down_retry_count);
41581- atomic_set(&ddb_entry->relogin_retry_count, 0);
41582+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41583 atomic_set(&ddb_entry->relogin_timer, 0);
41584 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41585 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41586diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41587index 83c8b5e..a82b348 100644
41588--- a/drivers/scsi/qla4xxx/ql4_os.c
41589+++ b/drivers/scsi/qla4xxx/ql4_os.c
41590@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41591 ddb_entry->fw_ddb_device_state ==
41592 DDB_DS_SESSION_FAILED) {
41593 /* Reset retry relogin timer */
41594- atomic_inc(&ddb_entry->relogin_retry_count);
41595+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41596 DEBUG2(printk("scsi%ld: index[%d] relogin"
41597 " timed out-retrying"
41598 " relogin (%d)\n",
41599 ha->host_no,
41600 ddb_entry->fw_ddb_index,
41601- atomic_read(&ddb_entry->
41602+ atomic_read_unchecked(&ddb_entry->
41603 relogin_retry_count))
41604 );
41605 start_dpc++;
41606diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41607index dd098ca..686ce01 100644
41608--- a/drivers/scsi/scsi.c
41609+++ b/drivers/scsi/scsi.c
41610@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41611 unsigned long timeout;
41612 int rtn = 0;
41613
41614- atomic_inc(&cmd->device->iorequest_cnt);
41615+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41616
41617 /* check if the device is still usable */
41618 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41619diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41620index bc3e363..e1a8e50 100644
41621--- a/drivers/scsi/scsi_debug.c
41622+++ b/drivers/scsi/scsi_debug.c
41623@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41624 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41625 unsigned char *cmd = (unsigned char *)scp->cmnd;
41626
41627+ pax_track_stack();
41628+
41629 if ((errsts = check_readiness(scp, 1, devip)))
41630 return errsts;
41631 memset(arr, 0, sizeof(arr));
41632@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41633 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41634 unsigned char *cmd = (unsigned char *)scp->cmnd;
41635
41636+ pax_track_stack();
41637+
41638 if ((errsts = check_readiness(scp, 1, devip)))
41639 return errsts;
41640 memset(arr, 0, sizeof(arr));
41641diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41642index 8df12522..c4c1472 100644
41643--- a/drivers/scsi/scsi_lib.c
41644+++ b/drivers/scsi/scsi_lib.c
41645@@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41646 shost = sdev->host;
41647 scsi_init_cmd_errh(cmd);
41648 cmd->result = DID_NO_CONNECT << 16;
41649- atomic_inc(&cmd->device->iorequest_cnt);
41650+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41651
41652 /*
41653 * SCSI request completion path will do scsi_device_unbusy(),
41654@@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
41655 */
41656 cmd->serial_number = 0;
41657
41658- atomic_inc(&cmd->device->iodone_cnt);
41659+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
41660 if (cmd->result)
41661- atomic_inc(&cmd->device->ioerr_cnt);
41662+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41663
41664 disposition = scsi_decide_disposition(cmd);
41665 if (disposition != SUCCESS &&
41666diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41667index 91a93e0..eae0fe3 100644
41668--- a/drivers/scsi/scsi_sysfs.c
41669+++ b/drivers/scsi/scsi_sysfs.c
41670@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41671 char *buf) \
41672 { \
41673 struct scsi_device *sdev = to_scsi_device(dev); \
41674- unsigned long long count = atomic_read(&sdev->field); \
41675+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
41676 return snprintf(buf, 20, "0x%llx\n", count); \
41677 } \
41678 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41679diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41680index 1030327..f91fd30 100644
41681--- a/drivers/scsi/scsi_tgt_lib.c
41682+++ b/drivers/scsi/scsi_tgt_lib.c
41683@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41684 int err;
41685
41686 dprintk("%lx %u\n", uaddr, len);
41687- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41688+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41689 if (err) {
41690 /*
41691 * TODO: need to fixup sg_tablesize, max_segment_size,
41692diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41693index db02e31..1b42ea9 100644
41694--- a/drivers/scsi/scsi_transport_fc.c
41695+++ b/drivers/scsi/scsi_transport_fc.c
41696@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41697 * Netlink Infrastructure
41698 */
41699
41700-static atomic_t fc_event_seq;
41701+static atomic_unchecked_t fc_event_seq;
41702
41703 /**
41704 * fc_get_event_number - Obtain the next sequential FC event number
41705@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41706 u32
41707 fc_get_event_number(void)
41708 {
41709- return atomic_add_return(1, &fc_event_seq);
41710+ return atomic_add_return_unchecked(1, &fc_event_seq);
41711 }
41712 EXPORT_SYMBOL(fc_get_event_number);
41713
41714@@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41715 {
41716 int error;
41717
41718- atomic_set(&fc_event_seq, 0);
41719+ atomic_set_unchecked(&fc_event_seq, 0);
41720
41721 error = transport_class_register(&fc_host_class);
41722 if (error)
41723diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41724index de2f8c4..63c5278 100644
41725--- a/drivers/scsi/scsi_transport_iscsi.c
41726+++ b/drivers/scsi/scsi_transport_iscsi.c
41727@@ -81,7 +81,7 @@ struct iscsi_internal {
41728 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41729 };
41730
41731-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41732+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41733 static struct workqueue_struct *iscsi_eh_timer_workq;
41734
41735 /*
41736@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41737 int err;
41738
41739 ihost = shost->shost_data;
41740- session->sid = atomic_add_return(1, &iscsi_session_nr);
41741+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41742
41743 if (id == ISCSI_MAX_TARGET) {
41744 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41745@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41746 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41747 ISCSI_TRANSPORT_VERSION);
41748
41749- atomic_set(&iscsi_session_nr, 0);
41750+ atomic_set_unchecked(&iscsi_session_nr, 0);
41751
41752 err = class_register(&iscsi_transport_class);
41753 if (err)
41754diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41755index 21a045e..ec89e03 100644
41756--- a/drivers/scsi/scsi_transport_srp.c
41757+++ b/drivers/scsi/scsi_transport_srp.c
41758@@ -33,7 +33,7 @@
41759 #include "scsi_transport_srp_internal.h"
41760
41761 struct srp_host_attrs {
41762- atomic_t next_port_id;
41763+ atomic_unchecked_t next_port_id;
41764 };
41765 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41766
41767@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41768 struct Scsi_Host *shost = dev_to_shost(dev);
41769 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41770
41771- atomic_set(&srp_host->next_port_id, 0);
41772+ atomic_set_unchecked(&srp_host->next_port_id, 0);
41773 return 0;
41774 }
41775
41776@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41777 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41778 rport->roles = ids->roles;
41779
41780- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41781+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41782 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41783
41784 transport_setup_device(&rport->dev);
41785diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41786index 040f751..98a5ed2 100644
41787--- a/drivers/scsi/sg.c
41788+++ b/drivers/scsi/sg.c
41789@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41790 sdp->disk->disk_name,
41791 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41792 NULL,
41793- (char *)arg);
41794+ (char __user *)arg);
41795 case BLKTRACESTART:
41796 return blk_trace_startstop(sdp->device->request_queue, 1);
41797 case BLKTRACESTOP:
41798@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41799 const struct file_operations * fops;
41800 };
41801
41802-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41803+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41804 {"allow_dio", &adio_fops},
41805 {"debug", &debug_fops},
41806 {"def_reserved_size", &dressz_fops},
41807@@ -2307,7 +2307,7 @@ sg_proc_init(void)
41808 {
41809 int k, mask;
41810 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41811- struct sg_proc_leaf * leaf;
41812+ const struct sg_proc_leaf * leaf;
41813
41814 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41815 if (!sg_proc_sgp)
41816diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41817index c19ca5e..3eb5959 100644
41818--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41819+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41820@@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41821 int do_iounmap = 0;
41822 int do_disable_device = 1;
41823
41824+ pax_track_stack();
41825+
41826 memset(&sym_dev, 0, sizeof(sym_dev));
41827 memset(&nvram, 0, sizeof(nvram));
41828 sym_dev.pdev = pdev;
41829diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41830index eadc1ab..2d81457 100644
41831--- a/drivers/serial/kgdboc.c
41832+++ b/drivers/serial/kgdboc.c
41833@@ -18,7 +18,7 @@
41834
41835 #define MAX_CONFIG_LEN 40
41836
41837-static struct kgdb_io kgdboc_io_ops;
41838+static const struct kgdb_io kgdboc_io_ops;
41839
41840 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41841 static int configured = -1;
41842@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41843 module_put(THIS_MODULE);
41844 }
41845
41846-static struct kgdb_io kgdboc_io_ops = {
41847+static const struct kgdb_io kgdboc_io_ops = {
41848 .name = "kgdboc",
41849 .read_char = kgdboc_get_char,
41850 .write_char = kgdboc_put_char,
41851diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41852index b76f246..7f41af7 100644
41853--- a/drivers/spi/spi.c
41854+++ b/drivers/spi/spi.c
41855@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41856 EXPORT_SYMBOL_GPL(spi_sync);
41857
41858 /* portable code must never pass more than 32 bytes */
41859-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41860+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41861
41862 static u8 *buf;
41863
41864diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41865index b9b37ff..19dfa23 100644
41866--- a/drivers/staging/android/binder.c
41867+++ b/drivers/staging/android/binder.c
41868@@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41869 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41870 }
41871
41872-static struct vm_operations_struct binder_vm_ops = {
41873+static const struct vm_operations_struct binder_vm_ops = {
41874 .open = binder_vma_open,
41875 .close = binder_vma_close,
41876 };
41877diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41878index cda26bb..39fed3f 100644
41879--- a/drivers/staging/b3dfg/b3dfg.c
41880+++ b/drivers/staging/b3dfg/b3dfg.c
41881@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41882 return VM_FAULT_NOPAGE;
41883 }
41884
41885-static struct vm_operations_struct b3dfg_vm_ops = {
41886+static const struct vm_operations_struct b3dfg_vm_ops = {
41887 .fault = b3dfg_vma_fault,
41888 };
41889
41890@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41891 return r;
41892 }
41893
41894-static struct file_operations b3dfg_fops = {
41895+static const struct file_operations b3dfg_fops = {
41896 .owner = THIS_MODULE,
41897 .open = b3dfg_open,
41898 .release = b3dfg_release,
41899diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41900index 908f25a..c9a579b 100644
41901--- a/drivers/staging/comedi/comedi_fops.c
41902+++ b/drivers/staging/comedi/comedi_fops.c
41903@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41904 mutex_unlock(&dev->mutex);
41905 }
41906
41907-static struct vm_operations_struct comedi_vm_ops = {
41908+static const struct vm_operations_struct comedi_vm_ops = {
41909 .close = comedi_unmap,
41910 };
41911
41912diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41913index e55a0db..577b776 100644
41914--- a/drivers/staging/dream/qdsp5/adsp_driver.c
41915+++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41916@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41917 static dev_t adsp_devno;
41918 static struct class *adsp_class;
41919
41920-static struct file_operations adsp_fops = {
41921+static const struct file_operations adsp_fops = {
41922 .owner = THIS_MODULE,
41923 .open = adsp_open,
41924 .unlocked_ioctl = adsp_ioctl,
41925diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41926index ad2390f..4116ee8 100644
41927--- a/drivers/staging/dream/qdsp5/audio_aac.c
41928+++ b/drivers/staging/dream/qdsp5/audio_aac.c
41929@@ -1022,7 +1022,7 @@ done:
41930 return rc;
41931 }
41932
41933-static struct file_operations audio_aac_fops = {
41934+static const struct file_operations audio_aac_fops = {
41935 .owner = THIS_MODULE,
41936 .open = audio_open,
41937 .release = audio_release,
41938diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41939index cd818a5..870b37b 100644
41940--- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41941+++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41942@@ -833,7 +833,7 @@ done:
41943 return rc;
41944 }
41945
41946-static struct file_operations audio_amrnb_fops = {
41947+static const struct file_operations audio_amrnb_fops = {
41948 .owner = THIS_MODULE,
41949 .open = audamrnb_open,
41950 .release = audamrnb_release,
41951diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41952index 4b43e18..cedafda 100644
41953--- a/drivers/staging/dream/qdsp5/audio_evrc.c
41954+++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41955@@ -805,7 +805,7 @@ dma_fail:
41956 return rc;
41957 }
41958
41959-static struct file_operations audio_evrc_fops = {
41960+static const struct file_operations audio_evrc_fops = {
41961 .owner = THIS_MODULE,
41962 .open = audevrc_open,
41963 .release = audevrc_release,
41964diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41965index 3d950a2..9431118 100644
41966--- a/drivers/staging/dream/qdsp5/audio_in.c
41967+++ b/drivers/staging/dream/qdsp5/audio_in.c
41968@@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41969 return 0;
41970 }
41971
41972-static struct file_operations audio_fops = {
41973+static const struct file_operations audio_fops = {
41974 .owner = THIS_MODULE,
41975 .open = audio_in_open,
41976 .release = audio_in_release,
41977@@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41978 .unlocked_ioctl = audio_in_ioctl,
41979 };
41980
41981-static struct file_operations audpre_fops = {
41982+static const struct file_operations audpre_fops = {
41983 .owner = THIS_MODULE,
41984 .open = audpre_open,
41985 .unlocked_ioctl = audpre_ioctl,
41986diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41987index b95574f..286c2f4 100644
41988--- a/drivers/staging/dream/qdsp5/audio_mp3.c
41989+++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41990@@ -941,7 +941,7 @@ done:
41991 return rc;
41992 }
41993
41994-static struct file_operations audio_mp3_fops = {
41995+static const struct file_operations audio_mp3_fops = {
41996 .owner = THIS_MODULE,
41997 .open = audio_open,
41998 .release = audio_release,
41999diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
42000index d1adcf6..f8f9833 100644
42001--- a/drivers/staging/dream/qdsp5/audio_out.c
42002+++ b/drivers/staging/dream/qdsp5/audio_out.c
42003@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
42004 return 0;
42005 }
42006
42007-static struct file_operations audio_fops = {
42008+static const struct file_operations audio_fops = {
42009 .owner = THIS_MODULE,
42010 .open = audio_open,
42011 .release = audio_release,
42012@@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
42013 .unlocked_ioctl = audio_ioctl,
42014 };
42015
42016-static struct file_operations audpp_fops = {
42017+static const struct file_operations audpp_fops = {
42018 .owner = THIS_MODULE,
42019 .open = audpp_open,
42020 .unlocked_ioctl = audpp_ioctl,
42021diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
42022index f0f50e3..f6b9dbc 100644
42023--- a/drivers/staging/dream/qdsp5/audio_qcelp.c
42024+++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
42025@@ -816,7 +816,7 @@ err:
42026 return rc;
42027 }
42028
42029-static struct file_operations audio_qcelp_fops = {
42030+static const struct file_operations audio_qcelp_fops = {
42031 .owner = THIS_MODULE,
42032 .open = audqcelp_open,
42033 .release = audqcelp_release,
42034diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
42035index 037d7ff..5469ec3 100644
42036--- a/drivers/staging/dream/qdsp5/snd.c
42037+++ b/drivers/staging/dream/qdsp5/snd.c
42038@@ -242,7 +242,7 @@ err:
42039 return rc;
42040 }
42041
42042-static struct file_operations snd_fops = {
42043+static const struct file_operations snd_fops = {
42044 .owner = THIS_MODULE,
42045 .open = snd_open,
42046 .release = snd_release,
42047diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
42048index d4e7d88..0ea632a 100644
42049--- a/drivers/staging/dream/smd/smd_qmi.c
42050+++ b/drivers/staging/dream/smd/smd_qmi.c
42051@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
42052 return 0;
42053 }
42054
42055-static struct file_operations qmi_fops = {
42056+static const struct file_operations qmi_fops = {
42057 .owner = THIS_MODULE,
42058 .read = qmi_read,
42059 .write = qmi_write,
42060diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42061index cd3910b..ff053d3 100644
42062--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
42063+++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
42064@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
42065 return rc;
42066 }
42067
42068-static struct file_operations rpcrouter_server_fops = {
42069+static const struct file_operations rpcrouter_server_fops = {
42070 .owner = THIS_MODULE,
42071 .open = rpcrouter_open,
42072 .release = rpcrouter_release,
42073@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
42074 .unlocked_ioctl = rpcrouter_ioctl,
42075 };
42076
42077-static struct file_operations rpcrouter_router_fops = {
42078+static const struct file_operations rpcrouter_router_fops = {
42079 .owner = THIS_MODULE,
42080 .open = rpcrouter_open,
42081 .release = rpcrouter_release,
42082diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
42083index c24e4e0..07665be 100644
42084--- a/drivers/staging/dst/dcore.c
42085+++ b/drivers/staging/dst/dcore.c
42086@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
42087 return 0;
42088 }
42089
42090-static struct block_device_operations dst_blk_ops = {
42091+static const struct block_device_operations dst_blk_ops = {
42092 .open = dst_bdev_open,
42093 .release = dst_bdev_release,
42094 .owner = THIS_MODULE,
42095@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
42096 n->size = ctl->size;
42097
42098 atomic_set(&n->refcnt, 1);
42099- atomic_long_set(&n->gen, 0);
42100+ atomic_long_set_unchecked(&n->gen, 0);
42101 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
42102
42103 err = dst_node_sysfs_init(n);
42104diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
42105index 557d372..8d84422 100644
42106--- a/drivers/staging/dst/trans.c
42107+++ b/drivers/staging/dst/trans.c
42108@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
42109 t->error = 0;
42110 t->retries = 0;
42111 atomic_set(&t->refcnt, 1);
42112- t->gen = atomic_long_inc_return(&n->gen);
42113+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
42114
42115 t->enc = bio_data_dir(bio);
42116 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
42117diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
42118index 94f7752..d051514 100644
42119--- a/drivers/staging/et131x/et1310_tx.c
42120+++ b/drivers/staging/et131x/et1310_tx.c
42121@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
42122 struct net_device_stats *stats = &etdev->net_stats;
42123
42124 if (pMpTcb->Flags & fMP_DEST_BROAD)
42125- atomic_inc(&etdev->Stats.brdcstxmt);
42126+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
42127 else if (pMpTcb->Flags & fMP_DEST_MULTI)
42128- atomic_inc(&etdev->Stats.multixmt);
42129+ atomic_inc_unchecked(&etdev->Stats.multixmt);
42130 else
42131- atomic_inc(&etdev->Stats.unixmt);
42132+ atomic_inc_unchecked(&etdev->Stats.unixmt);
42133
42134 if (pMpTcb->Packet) {
42135 stats->tx_bytes += pMpTcb->Packet->len;
42136diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
42137index 1dfe06f..f469b4d 100644
42138--- a/drivers/staging/et131x/et131x_adapter.h
42139+++ b/drivers/staging/et131x/et131x_adapter.h
42140@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
42141 * operations
42142 */
42143 u32 unircv; /* # multicast packets received */
42144- atomic_t unixmt; /* # multicast packets for Tx */
42145+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
42146 u32 multircv; /* # multicast packets received */
42147- atomic_t multixmt; /* # multicast packets for Tx */
42148+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
42149 u32 brdcstrcv; /* # broadcast packets received */
42150- atomic_t brdcstxmt; /* # broadcast packets for Tx */
42151+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
42152 u32 norcvbuf; /* # Rx packets discarded */
42153 u32 noxmtbuf; /* # Tx packets discarded */
42154
42155diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
42156index 4bd353a..e28f455 100644
42157--- a/drivers/staging/go7007/go7007-v4l2.c
42158+++ b/drivers/staging/go7007/go7007-v4l2.c
42159@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42160 return 0;
42161 }
42162
42163-static struct vm_operations_struct go7007_vm_ops = {
42164+static const struct vm_operations_struct go7007_vm_ops = {
42165 .open = go7007_vm_open,
42166 .close = go7007_vm_close,
42167 .fault = go7007_vm_fault,
42168diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
42169index 366dc95..b974d87 100644
42170--- a/drivers/staging/hv/Channel.c
42171+++ b/drivers/staging/hv/Channel.c
42172@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
42173
42174 DPRINT_ENTER(VMBUS);
42175
42176- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
42177- atomic_inc(&gVmbusConnection.NextGpadlHandle);
42178+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
42179+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
42180
42181 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
42182 ASSERT(msgInfo != NULL);
42183diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
42184index b12237f..01ae28a 100644
42185--- a/drivers/staging/hv/Hv.c
42186+++ b/drivers/staging/hv/Hv.c
42187@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
42188 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
42189 u32 outputAddressHi = outputAddress >> 32;
42190 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
42191- volatile void *hypercallPage = gHvContext.HypercallPage;
42192+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
42193
42194 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
42195 Control, Input, Output);
42196diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
42197index d089bb1..2ebc158 100644
42198--- a/drivers/staging/hv/VmbusApi.h
42199+++ b/drivers/staging/hv/VmbusApi.h
42200@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
42201 u32 *GpadlHandle);
42202 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
42203 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
42204-};
42205+} __no_const;
42206
42207 /* Base driver object */
42208 struct hv_driver {
42209diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
42210index 5a37cce..6ecc88c 100644
42211--- a/drivers/staging/hv/VmbusPrivate.h
42212+++ b/drivers/staging/hv/VmbusPrivate.h
42213@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
42214 struct VMBUS_CONNECTION {
42215 enum VMBUS_CONNECT_STATE ConnectState;
42216
42217- atomic_t NextGpadlHandle;
42218+ atomic_unchecked_t NextGpadlHandle;
42219
42220 /*
42221 * Represents channel interrupts. Each bit position represents a
42222diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
42223index 871a202..ca50ddf 100644
42224--- a/drivers/staging/hv/blkvsc_drv.c
42225+++ b/drivers/staging/hv/blkvsc_drv.c
42226@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
42227 /* The one and only one */
42228 static struct blkvsc_driver_context g_blkvsc_drv;
42229
42230-static struct block_device_operations block_ops = {
42231+static const struct block_device_operations block_ops = {
42232 .owner = THIS_MODULE,
42233 .open = blkvsc_open,
42234 .release = blkvsc_release,
42235diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
42236index 6acc49a..fbc8d46 100644
42237--- a/drivers/staging/hv/vmbus_drv.c
42238+++ b/drivers/staging/hv/vmbus_drv.c
42239@@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42240 to_device_context(root_device_obj);
42241 struct device_context *child_device_ctx =
42242 to_device_context(child_device_obj);
42243- static atomic_t device_num = ATOMIC_INIT(0);
42244+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
42245
42246 DPRINT_ENTER(VMBUS_DRV);
42247
42248@@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
42249
42250 /* Set the device name. Otherwise, device_register() will fail. */
42251 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
42252- atomic_inc_return(&device_num));
42253+ atomic_inc_return_unchecked(&device_num));
42254
42255 /* The new device belongs to this bus */
42256 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
42257diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
42258index d926189..17b19fd 100644
42259--- a/drivers/staging/iio/ring_generic.h
42260+++ b/drivers/staging/iio/ring_generic.h
42261@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
42262
42263 int (*is_enabled)(struct iio_ring_buffer *ring);
42264 int (*enable)(struct iio_ring_buffer *ring);
42265-};
42266+} __no_const;
42267
42268 /**
42269 * struct iio_ring_buffer - general ring buffer structure
42270diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42271index 1b237b7..88c624e 100644
42272--- a/drivers/staging/octeon/ethernet-rx.c
42273+++ b/drivers/staging/octeon/ethernet-rx.c
42274@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42275 /* Increment RX stats for virtual ports */
42276 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42277 #ifdef CONFIG_64BIT
42278- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42279- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42280+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42281+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42282 #else
42283- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42284- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42285+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42286+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42287 #endif
42288 }
42289 netif_receive_skb(skb);
42290@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42291 dev->name);
42292 */
42293 #ifdef CONFIG_64BIT
42294- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42295+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
42296 #else
42297- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42298+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
42299 #endif
42300 dev_kfree_skb_irq(skb);
42301 }
42302diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42303index 492c502..d9909f1 100644
42304--- a/drivers/staging/octeon/ethernet.c
42305+++ b/drivers/staging/octeon/ethernet.c
42306@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42307 * since the RX tasklet also increments it.
42308 */
42309 #ifdef CONFIG_64BIT
42310- atomic64_add(rx_status.dropped_packets,
42311- (atomic64_t *)&priv->stats.rx_dropped);
42312+ atomic64_add_unchecked(rx_status.dropped_packets,
42313+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42314 #else
42315- atomic_add(rx_status.dropped_packets,
42316- (atomic_t *)&priv->stats.rx_dropped);
42317+ atomic_add_unchecked(rx_status.dropped_packets,
42318+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
42319 #endif
42320 }
42321
42322diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
42323index a35bd5d..28fff45 100644
42324--- a/drivers/staging/otus/80211core/pub_zfi.h
42325+++ b/drivers/staging/otus/80211core/pub_zfi.h
42326@@ -531,7 +531,7 @@ struct zsCbFuncTbl
42327 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
42328
42329 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
42330-};
42331+} __no_const;
42332
42333 extern void zfZeroMemory(u8_t* va, u16_t length);
42334 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
42335diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
42336index c39a25f..696f5aa 100644
42337--- a/drivers/staging/panel/panel.c
42338+++ b/drivers/staging/panel/panel.c
42339@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
42340 return 0;
42341 }
42342
42343-static struct file_operations lcd_fops = {
42344+static const struct file_operations lcd_fops = {
42345 .write = lcd_write,
42346 .open = lcd_open,
42347 .release = lcd_release,
42348@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
42349 return 0;
42350 }
42351
42352-static struct file_operations keypad_fops = {
42353+static const struct file_operations keypad_fops = {
42354 .read = keypad_read, /* read */
42355 .open = keypad_open, /* open */
42356 .release = keypad_release, /* close */
42357diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
42358index 270ebcb..37e46af 100644
42359--- a/drivers/staging/phison/phison.c
42360+++ b/drivers/staging/phison/phison.c
42361@@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
42362 ATA_BMDMA_SHT(DRV_NAME),
42363 };
42364
42365-static struct ata_port_operations phison_ops = {
42366+static const struct ata_port_operations phison_ops = {
42367 .inherits = &ata_bmdma_port_ops,
42368 .prereset = phison_pre_reset,
42369 };
42370diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
42371index 2eb8e3d..57616a7 100644
42372--- a/drivers/staging/poch/poch.c
42373+++ b/drivers/staging/poch/poch.c
42374@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
42375 return 0;
42376 }
42377
42378-static struct file_operations poch_fops = {
42379+static const struct file_operations poch_fops = {
42380 .owner = THIS_MODULE,
42381 .open = poch_open,
42382 .release = poch_release,
42383diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
42384index c94de31..19402bc 100644
42385--- a/drivers/staging/pohmelfs/inode.c
42386+++ b/drivers/staging/pohmelfs/inode.c
42387@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42388 mutex_init(&psb->mcache_lock);
42389 psb->mcache_root = RB_ROOT;
42390 psb->mcache_timeout = msecs_to_jiffies(5000);
42391- atomic_long_set(&psb->mcache_gen, 0);
42392+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
42393
42394 psb->trans_max_pages = 100;
42395
42396@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42397 INIT_LIST_HEAD(&psb->crypto_ready_list);
42398 INIT_LIST_HEAD(&psb->crypto_active_list);
42399
42400- atomic_set(&psb->trans_gen, 1);
42401+ atomic_set_unchecked(&psb->trans_gen, 1);
42402 atomic_long_set(&psb->total_inodes, 0);
42403
42404 mutex_init(&psb->state_lock);
42405diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
42406index e22665c..a2a9390 100644
42407--- a/drivers/staging/pohmelfs/mcache.c
42408+++ b/drivers/staging/pohmelfs/mcache.c
42409@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
42410 m->data = data;
42411 m->start = start;
42412 m->size = size;
42413- m->gen = atomic_long_inc_return(&psb->mcache_gen);
42414+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
42415
42416 mutex_lock(&psb->mcache_lock);
42417 err = pohmelfs_mcache_insert(psb, m);
42418diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
42419index 623a07d..4035c19 100644
42420--- a/drivers/staging/pohmelfs/netfs.h
42421+++ b/drivers/staging/pohmelfs/netfs.h
42422@@ -570,14 +570,14 @@ struct pohmelfs_config;
42423 struct pohmelfs_sb {
42424 struct rb_root mcache_root;
42425 struct mutex mcache_lock;
42426- atomic_long_t mcache_gen;
42427+ atomic_long_unchecked_t mcache_gen;
42428 unsigned long mcache_timeout;
42429
42430 unsigned int idx;
42431
42432 unsigned int trans_retries;
42433
42434- atomic_t trans_gen;
42435+ atomic_unchecked_t trans_gen;
42436
42437 unsigned int crypto_attached_size;
42438 unsigned int crypto_align_size;
42439diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42440index 36a2535..0591bf4 100644
42441--- a/drivers/staging/pohmelfs/trans.c
42442+++ b/drivers/staging/pohmelfs/trans.c
42443@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42444 int err;
42445 struct netfs_cmd *cmd = t->iovec.iov_base;
42446
42447- t->gen = atomic_inc_return(&psb->trans_gen);
42448+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42449
42450 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42451 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42452diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42453index f890a16..509ece8 100644
42454--- a/drivers/staging/sep/sep_driver.c
42455+++ b/drivers/staging/sep/sep_driver.c
42456@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42457 static dev_t sep_devno;
42458
42459 /* the files operations structure of the driver */
42460-static struct file_operations sep_file_operations = {
42461+static const struct file_operations sep_file_operations = {
42462 .owner = THIS_MODULE,
42463 .ioctl = sep_ioctl,
42464 .poll = sep_poll,
42465diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42466index 5e16bc3..7655b10 100644
42467--- a/drivers/staging/usbip/usbip_common.h
42468+++ b/drivers/staging/usbip/usbip_common.h
42469@@ -374,7 +374,7 @@ struct usbip_device {
42470 void (*shutdown)(struct usbip_device *);
42471 void (*reset)(struct usbip_device *);
42472 void (*unusable)(struct usbip_device *);
42473- } eh_ops;
42474+ } __no_const eh_ops;
42475 };
42476
42477
42478diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42479index 57f7946..d9df23d 100644
42480--- a/drivers/staging/usbip/vhci.h
42481+++ b/drivers/staging/usbip/vhci.h
42482@@ -92,7 +92,7 @@ struct vhci_hcd {
42483 unsigned resuming:1;
42484 unsigned long re_timeout;
42485
42486- atomic_t seqnum;
42487+ atomic_unchecked_t seqnum;
42488
42489 /*
42490 * NOTE:
42491diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42492index 20cd7db..c2693ff 100644
42493--- a/drivers/staging/usbip/vhci_hcd.c
42494+++ b/drivers/staging/usbip/vhci_hcd.c
42495@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
42496 return;
42497 }
42498
42499- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42500+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42501 if (priv->seqnum == 0xffff)
42502 usbip_uinfo("seqnum max\n");
42503
42504@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42505 return -ENOMEM;
42506 }
42507
42508- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42509+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42510 if (unlink->seqnum == 0xffff)
42511 usbip_uinfo("seqnum max\n");
42512
42513@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
42514 vdev->rhport = rhport;
42515 }
42516
42517- atomic_set(&vhci->seqnum, 0);
42518+ atomic_set_unchecked(&vhci->seqnum, 0);
42519 spin_lock_init(&vhci->lock);
42520
42521
42522diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42523index 7fd76fe..673695a 100644
42524--- a/drivers/staging/usbip/vhci_rx.c
42525+++ b/drivers/staging/usbip/vhci_rx.c
42526@@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42527 usbip_uerr("cannot find a urb of seqnum %u\n",
42528 pdu->base.seqnum);
42529 usbip_uinfo("max seqnum %d\n",
42530- atomic_read(&the_controller->seqnum));
42531+ atomic_read_unchecked(&the_controller->seqnum));
42532 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42533 return;
42534 }
42535diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42536index 7891288..8e31300 100644
42537--- a/drivers/staging/vme/devices/vme_user.c
42538+++ b/drivers/staging/vme/devices/vme_user.c
42539@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42540 static int __init vme_user_probe(struct device *, int, int);
42541 static int __exit vme_user_remove(struct device *, int, int);
42542
42543-static struct file_operations vme_user_fops = {
42544+static const struct file_operations vme_user_fops = {
42545 .open = vme_user_open,
42546 .release = vme_user_release,
42547 .read = vme_user_read,
42548diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42549index 58abf44..00c1fc8 100644
42550--- a/drivers/staging/vt6655/hostap.c
42551+++ b/drivers/staging/vt6655/hostap.c
42552@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42553 PSDevice apdev_priv;
42554 struct net_device *dev = pDevice->dev;
42555 int ret;
42556- const struct net_device_ops apdev_netdev_ops = {
42557+ net_device_ops_no_const apdev_netdev_ops = {
42558 .ndo_start_xmit = pDevice->tx_80211,
42559 };
42560
42561diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42562index 0c8267a..db1f363 100644
42563--- a/drivers/staging/vt6656/hostap.c
42564+++ b/drivers/staging/vt6656/hostap.c
42565@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42566 PSDevice apdev_priv;
42567 struct net_device *dev = pDevice->dev;
42568 int ret;
42569- const struct net_device_ops apdev_netdev_ops = {
42570+ net_device_ops_no_const apdev_netdev_ops = {
42571 .ndo_start_xmit = pDevice->tx_80211,
42572 };
42573
42574diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42575index 925678b..da7f5ed 100644
42576--- a/drivers/staging/wlan-ng/hfa384x_usb.c
42577+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42578@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42579
42580 struct usbctlx_completor {
42581 int (*complete) (struct usbctlx_completor *);
42582-};
42583+} __no_const;
42584 typedef struct usbctlx_completor usbctlx_completor_t;
42585
42586 static int
42587diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42588index 40de151..924f268 100644
42589--- a/drivers/telephony/ixj.c
42590+++ b/drivers/telephony/ixj.c
42591@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42592 bool mContinue;
42593 char *pIn, *pOut;
42594
42595+ pax_track_stack();
42596+
42597 if (!SCI_Prepare(j))
42598 return 0;
42599
42600diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42601index e941367..b631f5a 100644
42602--- a/drivers/uio/uio.c
42603+++ b/drivers/uio/uio.c
42604@@ -23,6 +23,7 @@
42605 #include <linux/string.h>
42606 #include <linux/kobject.h>
42607 #include <linux/uio_driver.h>
42608+#include <asm/local.h>
42609
42610 #define UIO_MAX_DEVICES 255
42611
42612@@ -30,10 +31,10 @@ struct uio_device {
42613 struct module *owner;
42614 struct device *dev;
42615 int minor;
42616- atomic_t event;
42617+ atomic_unchecked_t event;
42618 struct fasync_struct *async_queue;
42619 wait_queue_head_t wait;
42620- int vma_count;
42621+ local_t vma_count;
42622 struct uio_info *info;
42623 struct kobject *map_dir;
42624 struct kobject *portio_dir;
42625@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42626 return entry->show(mem, buf);
42627 }
42628
42629-static struct sysfs_ops map_sysfs_ops = {
42630+static const struct sysfs_ops map_sysfs_ops = {
42631 .show = map_type_show,
42632 };
42633
42634@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42635 return entry->show(port, buf);
42636 }
42637
42638-static struct sysfs_ops portio_sysfs_ops = {
42639+static const struct sysfs_ops portio_sysfs_ops = {
42640 .show = portio_type_show,
42641 };
42642
42643@@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42644 struct uio_device *idev = dev_get_drvdata(dev);
42645 if (idev)
42646 return sprintf(buf, "%u\n",
42647- (unsigned int)atomic_read(&idev->event));
42648+ (unsigned int)atomic_read_unchecked(&idev->event));
42649 else
42650 return -ENODEV;
42651 }
42652@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42653 {
42654 struct uio_device *idev = info->uio_dev;
42655
42656- atomic_inc(&idev->event);
42657+ atomic_inc_unchecked(&idev->event);
42658 wake_up_interruptible(&idev->wait);
42659 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42660 }
42661@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42662 }
42663
42664 listener->dev = idev;
42665- listener->event_count = atomic_read(&idev->event);
42666+ listener->event_count = atomic_read_unchecked(&idev->event);
42667 filep->private_data = listener;
42668
42669 if (idev->info->open) {
42670@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42671 return -EIO;
42672
42673 poll_wait(filep, &idev->wait, wait);
42674- if (listener->event_count != atomic_read(&idev->event))
42675+ if (listener->event_count != atomic_read_unchecked(&idev->event))
42676 return POLLIN | POLLRDNORM;
42677 return 0;
42678 }
42679@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42680 do {
42681 set_current_state(TASK_INTERRUPTIBLE);
42682
42683- event_count = atomic_read(&idev->event);
42684+ event_count = atomic_read_unchecked(&idev->event);
42685 if (event_count != listener->event_count) {
42686 if (copy_to_user(buf, &event_count, count))
42687 retval = -EFAULT;
42688@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42689 static void uio_vma_open(struct vm_area_struct *vma)
42690 {
42691 struct uio_device *idev = vma->vm_private_data;
42692- idev->vma_count++;
42693+ local_inc(&idev->vma_count);
42694 }
42695
42696 static void uio_vma_close(struct vm_area_struct *vma)
42697 {
42698 struct uio_device *idev = vma->vm_private_data;
42699- idev->vma_count--;
42700+ local_dec(&idev->vma_count);
42701 }
42702
42703 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42704@@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42705 idev->owner = owner;
42706 idev->info = info;
42707 init_waitqueue_head(&idev->wait);
42708- atomic_set(&idev->event, 0);
42709+ atomic_set_unchecked(&idev->event, 0);
42710
42711 ret = uio_get_minor(idev);
42712 if (ret)
42713diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42714index fbea856..06efea6 100644
42715--- a/drivers/usb/atm/usbatm.c
42716+++ b/drivers/usb/atm/usbatm.c
42717@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42718 if (printk_ratelimit())
42719 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42720 __func__, vpi, vci);
42721- atomic_inc(&vcc->stats->rx_err);
42722+ atomic_inc_unchecked(&vcc->stats->rx_err);
42723 return;
42724 }
42725
42726@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42727 if (length > ATM_MAX_AAL5_PDU) {
42728 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42729 __func__, length, vcc);
42730- atomic_inc(&vcc->stats->rx_err);
42731+ atomic_inc_unchecked(&vcc->stats->rx_err);
42732 goto out;
42733 }
42734
42735@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42736 if (sarb->len < pdu_length) {
42737 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42738 __func__, pdu_length, sarb->len, vcc);
42739- atomic_inc(&vcc->stats->rx_err);
42740+ atomic_inc_unchecked(&vcc->stats->rx_err);
42741 goto out;
42742 }
42743
42744 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42745 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42746 __func__, vcc);
42747- atomic_inc(&vcc->stats->rx_err);
42748+ atomic_inc_unchecked(&vcc->stats->rx_err);
42749 goto out;
42750 }
42751
42752@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42753 if (printk_ratelimit())
42754 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42755 __func__, length);
42756- atomic_inc(&vcc->stats->rx_drop);
42757+ atomic_inc_unchecked(&vcc->stats->rx_drop);
42758 goto out;
42759 }
42760
42761@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42762
42763 vcc->push(vcc, skb);
42764
42765- atomic_inc(&vcc->stats->rx);
42766+ atomic_inc_unchecked(&vcc->stats->rx);
42767 out:
42768 skb_trim(sarb, 0);
42769 }
42770@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42771 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42772
42773 usbatm_pop(vcc, skb);
42774- atomic_inc(&vcc->stats->tx);
42775+ atomic_inc_unchecked(&vcc->stats->tx);
42776
42777 skb = skb_dequeue(&instance->sndqueue);
42778 }
42779@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42780 if (!left--)
42781 return sprintf(page,
42782 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42783- atomic_read(&atm_dev->stats.aal5.tx),
42784- atomic_read(&atm_dev->stats.aal5.tx_err),
42785- atomic_read(&atm_dev->stats.aal5.rx),
42786- atomic_read(&atm_dev->stats.aal5.rx_err),
42787- atomic_read(&atm_dev->stats.aal5.rx_drop));
42788+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42789+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42790+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42791+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42792+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42793
42794 if (!left--) {
42795 if (instance->disconnected)
42796diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42797index 24e6205..fe5a5d4 100644
42798--- a/drivers/usb/core/hcd.c
42799+++ b/drivers/usb/core/hcd.c
42800@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42801
42802 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42803
42804-struct usb_mon_operations *mon_ops;
42805+const struct usb_mon_operations *mon_ops;
42806
42807 /*
42808 * The registration is unlocked.
42809@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42810 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42811 */
42812
42813-int usb_mon_register (struct usb_mon_operations *ops)
42814+int usb_mon_register (const struct usb_mon_operations *ops)
42815 {
42816
42817 if (mon_ops)
42818diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42819index bcbe104..9cfd1c6 100644
42820--- a/drivers/usb/core/hcd.h
42821+++ b/drivers/usb/core/hcd.h
42822@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42823 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42824
42825 struct usb_mon_operations {
42826- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42827- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42828- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42829+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42830+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42831+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42832 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42833 };
42834
42835-extern struct usb_mon_operations *mon_ops;
42836+extern const struct usb_mon_operations *mon_ops;
42837
42838 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42839 {
42840@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42841 (*mon_ops->urb_complete)(bus, urb, status);
42842 }
42843
42844-int usb_mon_register(struct usb_mon_operations *ops);
42845+int usb_mon_register(const struct usb_mon_operations *ops);
42846 void usb_mon_deregister(void);
42847
42848 #else
42849diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42850index 409cc94..a673bad 100644
42851--- a/drivers/usb/core/message.c
42852+++ b/drivers/usb/core/message.c
42853@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42854 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42855 if (buf) {
42856 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42857- if (len > 0) {
42858- smallbuf = kmalloc(++len, GFP_NOIO);
42859+ if (len++ > 0) {
42860+ smallbuf = kmalloc(len, GFP_NOIO);
42861 if (!smallbuf)
42862 return buf;
42863 memcpy(smallbuf, buf, len);
42864diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42865index 62ff5e7..530b74e 100644
42866--- a/drivers/usb/misc/appledisplay.c
42867+++ b/drivers/usb/misc/appledisplay.c
42868@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42869 return pdata->msgdata[1];
42870 }
42871
42872-static struct backlight_ops appledisplay_bl_data = {
42873+static const struct backlight_ops appledisplay_bl_data = {
42874 .get_brightness = appledisplay_bl_get_brightness,
42875 .update_status = appledisplay_bl_update_status,
42876 };
42877diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42878index e0c2db3..bd8cb66 100644
42879--- a/drivers/usb/mon/mon_main.c
42880+++ b/drivers/usb/mon/mon_main.c
42881@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42882 /*
42883 * Ops
42884 */
42885-static struct usb_mon_operations mon_ops_0 = {
42886+static const struct usb_mon_operations mon_ops_0 = {
42887 .urb_submit = mon_submit,
42888 .urb_submit_error = mon_submit_error,
42889 .urb_complete = mon_complete,
42890diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42891index d6bea3e..60b250e 100644
42892--- a/drivers/usb/wusbcore/wa-hc.h
42893+++ b/drivers/usb/wusbcore/wa-hc.h
42894@@ -192,7 +192,7 @@ struct wahc {
42895 struct list_head xfer_delayed_list;
42896 spinlock_t xfer_list_lock;
42897 struct work_struct xfer_work;
42898- atomic_t xfer_id_count;
42899+ atomic_unchecked_t xfer_id_count;
42900 };
42901
42902
42903@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42904 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42905 spin_lock_init(&wa->xfer_list_lock);
42906 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42907- atomic_set(&wa->xfer_id_count, 1);
42908+ atomic_set_unchecked(&wa->xfer_id_count, 1);
42909 }
42910
42911 /**
42912diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42913index 613a5fc..3174865 100644
42914--- a/drivers/usb/wusbcore/wa-xfer.c
42915+++ b/drivers/usb/wusbcore/wa-xfer.c
42916@@ -293,7 +293,7 @@ out:
42917 */
42918 static void wa_xfer_id_init(struct wa_xfer *xfer)
42919 {
42920- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42921+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42922 }
42923
42924 /*
42925diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42926index aa42fce..f8a828c 100644
42927--- a/drivers/uwb/wlp/messages.c
42928+++ b/drivers/uwb/wlp/messages.c
42929@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42930 size_t len = skb->len;
42931 size_t used;
42932 ssize_t result;
42933- struct wlp_nonce enonce, rnonce;
42934+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42935 enum wlp_assc_error assc_err;
42936 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42937 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42938diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42939index 0370399..6627c94 100644
42940--- a/drivers/uwb/wlp/sysfs.c
42941+++ b/drivers/uwb/wlp/sysfs.c
42942@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42943 return ret;
42944 }
42945
42946-static
42947-struct sysfs_ops wss_sysfs_ops = {
42948+static const struct sysfs_ops wss_sysfs_ops = {
42949 .show = wlp_wss_attr_show,
42950 .store = wlp_wss_attr_store,
42951 };
42952diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42953index 8c5e432..5ee90ea 100644
42954--- a/drivers/video/atmel_lcdfb.c
42955+++ b/drivers/video/atmel_lcdfb.c
42956@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42957 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42958 }
42959
42960-static struct backlight_ops atmel_lcdc_bl_ops = {
42961+static const struct backlight_ops atmel_lcdc_bl_ops = {
42962 .update_status = atmel_bl_update_status,
42963 .get_brightness = atmel_bl_get_brightness,
42964 };
42965diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42966index e4e4d43..66bcbcc 100644
42967--- a/drivers/video/aty/aty128fb.c
42968+++ b/drivers/video/aty/aty128fb.c
42969@@ -149,7 +149,7 @@ enum {
42970 };
42971
42972 /* Must match above enum */
42973-static const char *r128_family[] __devinitdata = {
42974+static const char *r128_family[] __devinitconst = {
42975 "AGP",
42976 "PCI",
42977 "PRO AGP",
42978@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42979 return bd->props.brightness;
42980 }
42981
42982-static struct backlight_ops aty128_bl_data = {
42983+static const struct backlight_ops aty128_bl_data = {
42984 .get_brightness = aty128_bl_get_brightness,
42985 .update_status = aty128_bl_update_status,
42986 };
42987diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42988index 913b4a4..9295a38 100644
42989--- a/drivers/video/aty/atyfb_base.c
42990+++ b/drivers/video/aty/atyfb_base.c
42991@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42992 return bd->props.brightness;
42993 }
42994
42995-static struct backlight_ops aty_bl_data = {
42996+static const struct backlight_ops aty_bl_data = {
42997 .get_brightness = aty_bl_get_brightness,
42998 .update_status = aty_bl_update_status,
42999 };
43000diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
43001index 1a056ad..221bd6a 100644
43002--- a/drivers/video/aty/radeon_backlight.c
43003+++ b/drivers/video/aty/radeon_backlight.c
43004@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
43005 return bd->props.brightness;
43006 }
43007
43008-static struct backlight_ops radeon_bl_data = {
43009+static const struct backlight_ops radeon_bl_data = {
43010 .get_brightness = radeon_bl_get_brightness,
43011 .update_status = radeon_bl_update_status,
43012 };
43013diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
43014index ad05da5..3cb2cb9 100644
43015--- a/drivers/video/backlight/adp5520_bl.c
43016+++ b/drivers/video/backlight/adp5520_bl.c
43017@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
43018 return error ? data->current_brightness : reg_val;
43019 }
43020
43021-static struct backlight_ops adp5520_bl_ops = {
43022+static const struct backlight_ops adp5520_bl_ops = {
43023 .update_status = adp5520_bl_update_status,
43024 .get_brightness = adp5520_bl_get_brightness,
43025 };
43026diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
43027index 2c3bdfc..d769b0b 100644
43028--- a/drivers/video/backlight/adx_bl.c
43029+++ b/drivers/video/backlight/adx_bl.c
43030@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
43031 return 1;
43032 }
43033
43034-static struct backlight_ops adx_backlight_ops = {
43035+static const struct backlight_ops adx_backlight_ops = {
43036 .options = 0,
43037 .update_status = adx_backlight_update_status,
43038 .get_brightness = adx_backlight_get_brightness,
43039diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
43040index 505c082..6b6b3cc 100644
43041--- a/drivers/video/backlight/atmel-pwm-bl.c
43042+++ b/drivers/video/backlight/atmel-pwm-bl.c
43043@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
43044 return pwm_channel_enable(&pwmbl->pwmc);
43045 }
43046
43047-static struct backlight_ops atmel_pwm_bl_ops = {
43048+static const struct backlight_ops atmel_pwm_bl_ops = {
43049 .get_brightness = atmel_pwm_bl_get_intensity,
43050 .update_status = atmel_pwm_bl_set_intensity,
43051 };
43052diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
43053index 5e20e6e..89025e6 100644
43054--- a/drivers/video/backlight/backlight.c
43055+++ b/drivers/video/backlight/backlight.c
43056@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
43057 * ERR_PTR() or a pointer to the newly allocated device.
43058 */
43059 struct backlight_device *backlight_device_register(const char *name,
43060- struct device *parent, void *devdata, struct backlight_ops *ops)
43061+ struct device *parent, void *devdata, const struct backlight_ops *ops)
43062 {
43063 struct backlight_device *new_bd;
43064 int rc;
43065diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
43066index 9677494..b4bcf80 100644
43067--- a/drivers/video/backlight/corgi_lcd.c
43068+++ b/drivers/video/backlight/corgi_lcd.c
43069@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
43070 }
43071 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
43072
43073-static struct backlight_ops corgi_bl_ops = {
43074+static const struct backlight_ops corgi_bl_ops = {
43075 .get_brightness = corgi_bl_get_intensity,
43076 .update_status = corgi_bl_update_status,
43077 };
43078diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
43079index b9fe62b..2914bf1 100644
43080--- a/drivers/video/backlight/cr_bllcd.c
43081+++ b/drivers/video/backlight/cr_bllcd.c
43082@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
43083 return intensity;
43084 }
43085
43086-static struct backlight_ops cr_backlight_ops = {
43087+static const struct backlight_ops cr_backlight_ops = {
43088 .get_brightness = cr_backlight_get_intensity,
43089 .update_status = cr_backlight_set_intensity,
43090 };
43091diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
43092index 701a108..feacfd5 100644
43093--- a/drivers/video/backlight/da903x_bl.c
43094+++ b/drivers/video/backlight/da903x_bl.c
43095@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
43096 return data->current_brightness;
43097 }
43098
43099-static struct backlight_ops da903x_backlight_ops = {
43100+static const struct backlight_ops da903x_backlight_ops = {
43101 .update_status = da903x_backlight_update_status,
43102 .get_brightness = da903x_backlight_get_brightness,
43103 };
43104diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
43105index 6d27f62..e6d348e 100644
43106--- a/drivers/video/backlight/generic_bl.c
43107+++ b/drivers/video/backlight/generic_bl.c
43108@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
43109 }
43110 EXPORT_SYMBOL(corgibl_limit_intensity);
43111
43112-static struct backlight_ops genericbl_ops = {
43113+static const struct backlight_ops genericbl_ops = {
43114 .options = BL_CORE_SUSPENDRESUME,
43115 .get_brightness = genericbl_get_intensity,
43116 .update_status = genericbl_send_intensity,
43117diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
43118index 7fb4eef..f7cc528 100644
43119--- a/drivers/video/backlight/hp680_bl.c
43120+++ b/drivers/video/backlight/hp680_bl.c
43121@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
43122 return current_intensity;
43123 }
43124
43125-static struct backlight_ops hp680bl_ops = {
43126+static const struct backlight_ops hp680bl_ops = {
43127 .get_brightness = hp680bl_get_intensity,
43128 .update_status = hp680bl_set_intensity,
43129 };
43130diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
43131index 7aed256..db9071f 100644
43132--- a/drivers/video/backlight/jornada720_bl.c
43133+++ b/drivers/video/backlight/jornada720_bl.c
43134@@ -93,7 +93,7 @@ out:
43135 return ret;
43136 }
43137
43138-static struct backlight_ops jornada_bl_ops = {
43139+static const struct backlight_ops jornada_bl_ops = {
43140 .get_brightness = jornada_bl_get_brightness,
43141 .update_status = jornada_bl_update_status,
43142 .options = BL_CORE_SUSPENDRESUME,
43143diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
43144index a38fda1..939e7b8 100644
43145--- a/drivers/video/backlight/kb3886_bl.c
43146+++ b/drivers/video/backlight/kb3886_bl.c
43147@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
43148 return kb3886bl_intensity;
43149 }
43150
43151-static struct backlight_ops kb3886bl_ops = {
43152+static const struct backlight_ops kb3886bl_ops = {
43153 .get_brightness = kb3886bl_get_intensity,
43154 .update_status = kb3886bl_send_intensity,
43155 };
43156diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
43157index 6b488b8..00a9591 100644
43158--- a/drivers/video/backlight/locomolcd.c
43159+++ b/drivers/video/backlight/locomolcd.c
43160@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
43161 return current_intensity;
43162 }
43163
43164-static struct backlight_ops locomobl_data = {
43165+static const struct backlight_ops locomobl_data = {
43166 .get_brightness = locomolcd_get_intensity,
43167 .update_status = locomolcd_set_intensity,
43168 };
43169diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
43170index 99bdfa8..3dac448 100644
43171--- a/drivers/video/backlight/mbp_nvidia_bl.c
43172+++ b/drivers/video/backlight/mbp_nvidia_bl.c
43173@@ -33,7 +33,7 @@ struct dmi_match_data {
43174 unsigned long iostart;
43175 unsigned long iolen;
43176 /* Backlight operations structure. */
43177- struct backlight_ops backlight_ops;
43178+ const struct backlight_ops backlight_ops;
43179 };
43180
43181 /* Module parameters. */
43182diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
43183index cbad67e..3cf900e 100644
43184--- a/drivers/video/backlight/omap1_bl.c
43185+++ b/drivers/video/backlight/omap1_bl.c
43186@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
43187 return bl->current_intensity;
43188 }
43189
43190-static struct backlight_ops omapbl_ops = {
43191+static const struct backlight_ops omapbl_ops = {
43192 .get_brightness = omapbl_get_intensity,
43193 .update_status = omapbl_update_status,
43194 };
43195diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
43196index 9edaf24..075786e 100644
43197--- a/drivers/video/backlight/progear_bl.c
43198+++ b/drivers/video/backlight/progear_bl.c
43199@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
43200 return intensity - HW_LEVEL_MIN;
43201 }
43202
43203-static struct backlight_ops progearbl_ops = {
43204+static const struct backlight_ops progearbl_ops = {
43205 .get_brightness = progearbl_get_intensity,
43206 .update_status = progearbl_set_intensity,
43207 };
43208diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
43209index 8871662..df9e0b3 100644
43210--- a/drivers/video/backlight/pwm_bl.c
43211+++ b/drivers/video/backlight/pwm_bl.c
43212@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
43213 return bl->props.brightness;
43214 }
43215
43216-static struct backlight_ops pwm_backlight_ops = {
43217+static const struct backlight_ops pwm_backlight_ops = {
43218 .update_status = pwm_backlight_update_status,
43219 .get_brightness = pwm_backlight_get_brightness,
43220 };
43221diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
43222index 43edbad..e14ce4d 100644
43223--- a/drivers/video/backlight/tosa_bl.c
43224+++ b/drivers/video/backlight/tosa_bl.c
43225@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
43226 return props->brightness;
43227 }
43228
43229-static struct backlight_ops bl_ops = {
43230+static const struct backlight_ops bl_ops = {
43231 .get_brightness = tosa_bl_get_brightness,
43232 .update_status = tosa_bl_update_status,
43233 };
43234diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
43235index 467bdb7..e32add3 100644
43236--- a/drivers/video/backlight/wm831x_bl.c
43237+++ b/drivers/video/backlight/wm831x_bl.c
43238@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
43239 return data->current_brightness;
43240 }
43241
43242-static struct backlight_ops wm831x_backlight_ops = {
43243+static const struct backlight_ops wm831x_backlight_ops = {
43244 .options = BL_CORE_SUSPENDRESUME,
43245 .update_status = wm831x_backlight_update_status,
43246 .get_brightness = wm831x_backlight_get_brightness,
43247diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
43248index e49ae5e..db4e6f7 100644
43249--- a/drivers/video/bf54x-lq043fb.c
43250+++ b/drivers/video/bf54x-lq043fb.c
43251@@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43252 return 0;
43253 }
43254
43255-static struct backlight_ops bfin_lq043fb_bl_ops = {
43256+static const struct backlight_ops bfin_lq043fb_bl_ops = {
43257 .get_brightness = bl_get_brightness,
43258 };
43259
43260diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
43261index 2c72a7c..d523e52 100644
43262--- a/drivers/video/bfin-t350mcqb-fb.c
43263+++ b/drivers/video/bfin-t350mcqb-fb.c
43264@@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
43265 return 0;
43266 }
43267
43268-static struct backlight_ops bfin_lq043fb_bl_ops = {
43269+static const struct backlight_ops bfin_lq043fb_bl_ops = {
43270 .get_brightness = bl_get_brightness,
43271 };
43272
43273diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
43274index f53b9f1..958bf4e 100644
43275--- a/drivers/video/fbcmap.c
43276+++ b/drivers/video/fbcmap.c
43277@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
43278 rc = -ENODEV;
43279 goto out;
43280 }
43281- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
43282- !info->fbops->fb_setcmap)) {
43283+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
43284 rc = -EINVAL;
43285 goto out1;
43286 }
43287diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43288index 99bbd28..ad3829e 100644
43289--- a/drivers/video/fbmem.c
43290+++ b/drivers/video/fbmem.c
43291@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43292 image->dx += image->width + 8;
43293 }
43294 } else if (rotate == FB_ROTATE_UD) {
43295- for (x = 0; x < num && image->dx >= 0; x++) {
43296+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43297 info->fbops->fb_imageblit(info, image);
43298 image->dx -= image->width + 8;
43299 }
43300@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43301 image->dy += image->height + 8;
43302 }
43303 } else if (rotate == FB_ROTATE_CCW) {
43304- for (x = 0; x < num && image->dy >= 0; x++) {
43305+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43306 info->fbops->fb_imageblit(info, image);
43307 image->dy -= image->height + 8;
43308 }
43309@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
43310 int flags = info->flags;
43311 int ret = 0;
43312
43313+ pax_track_stack();
43314+
43315 if (var->activate & FB_ACTIVATE_INV_MODE) {
43316 struct fb_videomode mode1, mode2;
43317
43318@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43319 void __user *argp = (void __user *)arg;
43320 long ret = 0;
43321
43322+ pax_track_stack();
43323+
43324 switch (cmd) {
43325 case FBIOGET_VSCREENINFO:
43326 if (!lock_fb_info(info))
43327@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43328 return -EFAULT;
43329 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43330 return -EINVAL;
43331- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43332+ if (con2fb.framebuffer >= FB_MAX)
43333 return -EINVAL;
43334 if (!registered_fb[con2fb.framebuffer])
43335 request_module("fb%d", con2fb.framebuffer);
43336diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
43337index f20eff8..3e4f622 100644
43338--- a/drivers/video/geode/gx1fb_core.c
43339+++ b/drivers/video/geode/gx1fb_core.c
43340@@ -30,7 +30,7 @@ static int crt_option = 1;
43341 static char panel_option[32] = "";
43342
43343 /* Modes relevant to the GX1 (taken from modedb.c) */
43344-static const struct fb_videomode __initdata gx1_modedb[] = {
43345+static const struct fb_videomode __initconst gx1_modedb[] = {
43346 /* 640x480-60 VESA */
43347 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
43348 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
43349diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
43350index 896e53d..4d87d0b 100644
43351--- a/drivers/video/gxt4500.c
43352+++ b/drivers/video/gxt4500.c
43353@@ -156,7 +156,7 @@ struct gxt4500_par {
43354 static char *mode_option;
43355
43356 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
43357-static const struct fb_videomode defaultmode __devinitdata = {
43358+static const struct fb_videomode defaultmode __devinitconst = {
43359 .refresh = 60,
43360 .xres = 1280,
43361 .yres = 1024,
43362@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
43363 return 0;
43364 }
43365
43366-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
43367+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
43368 .id = "IBM GXT4500P",
43369 .type = FB_TYPE_PACKED_PIXELS,
43370 .visual = FB_VISUAL_PSEUDOCOLOR,
43371diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43372index f5bedee..28c6028 100644
43373--- a/drivers/video/i810/i810_accel.c
43374+++ b/drivers/video/i810/i810_accel.c
43375@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43376 }
43377 }
43378 printk("ringbuffer lockup!!!\n");
43379+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43380 i810_report_error(mmio);
43381 par->dev_flags |= LOCKUP;
43382 info->pixmap.scan_align = 1;
43383diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
43384index 5743ea2..457f82c 100644
43385--- a/drivers/video/i810/i810_main.c
43386+++ b/drivers/video/i810/i810_main.c
43387@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
43388 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
43389
43390 /* PCI */
43391-static const char *i810_pci_list[] __devinitdata = {
43392+static const char *i810_pci_list[] __devinitconst = {
43393 "Intel(R) 810 Framebuffer Device" ,
43394 "Intel(R) 810-DC100 Framebuffer Device" ,
43395 "Intel(R) 810E Framebuffer Device" ,
43396diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43397index 3c14e43..eafa544 100644
43398--- a/drivers/video/logo/logo_linux_clut224.ppm
43399+++ b/drivers/video/logo/logo_linux_clut224.ppm
43400@@ -1,1604 +1,1123 @@
43401 P3
43402-# Standard 224-color Linux logo
43403 80 80
43404 255
43405- 0 0 0 0 0 0 0 0 0 0 0 0
43406- 0 0 0 0 0 0 0 0 0 0 0 0
43407- 0 0 0 0 0 0 0 0 0 0 0 0
43408- 0 0 0 0 0 0 0 0 0 0 0 0
43409- 0 0 0 0 0 0 0 0 0 0 0 0
43410- 0 0 0 0 0 0 0 0 0 0 0 0
43411- 0 0 0 0 0 0 0 0 0 0 0 0
43412- 0 0 0 0 0 0 0 0 0 0 0 0
43413- 0 0 0 0 0 0 0 0 0 0 0 0
43414- 6 6 6 6 6 6 10 10 10 10 10 10
43415- 10 10 10 6 6 6 6 6 6 6 6 6
43416- 0 0 0 0 0 0 0 0 0 0 0 0
43417- 0 0 0 0 0 0 0 0 0 0 0 0
43418- 0 0 0 0 0 0 0 0 0 0 0 0
43419- 0 0 0 0 0 0 0 0 0 0 0 0
43420- 0 0 0 0 0 0 0 0 0 0 0 0
43421- 0 0 0 0 0 0 0 0 0 0 0 0
43422- 0 0 0 0 0 0 0 0 0 0 0 0
43423- 0 0 0 0 0 0 0 0 0 0 0 0
43424- 0 0 0 0 0 0 0 0 0 0 0 0
43425- 0 0 0 0 0 0 0 0 0 0 0 0
43426- 0 0 0 0 0 0 0 0 0 0 0 0
43427- 0 0 0 0 0 0 0 0 0 0 0 0
43428- 0 0 0 0 0 0 0 0 0 0 0 0
43429- 0 0 0 0 0 0 0 0 0 0 0 0
43430- 0 0 0 0 0 0 0 0 0 0 0 0
43431- 0 0 0 0 0 0 0 0 0 0 0 0
43432- 0 0 0 0 0 0 0 0 0 0 0 0
43433- 0 0 0 6 6 6 10 10 10 14 14 14
43434- 22 22 22 26 26 26 30 30 30 34 34 34
43435- 30 30 30 30 30 30 26 26 26 18 18 18
43436- 14 14 14 10 10 10 6 6 6 0 0 0
43437- 0 0 0 0 0 0 0 0 0 0 0 0
43438- 0 0 0 0 0 0 0 0 0 0 0 0
43439- 0 0 0 0 0 0 0 0 0 0 0 0
43440- 0 0 0 0 0 0 0 0 0 0 0 0
43441- 0 0 0 0 0 0 0 0 0 0 0 0
43442- 0 0 0 0 0 0 0 0 0 0 0 0
43443- 0 0 0 0 0 0 0 0 0 0 0 0
43444- 0 0 0 0 0 0 0 0 0 0 0 0
43445- 0 0 0 0 0 0 0 0 0 0 0 0
43446- 0 0 0 0 0 1 0 0 1 0 0 0
43447- 0 0 0 0 0 0 0 0 0 0 0 0
43448- 0 0 0 0 0 0 0 0 0 0 0 0
43449- 0 0 0 0 0 0 0 0 0 0 0 0
43450- 0 0 0 0 0 0 0 0 0 0 0 0
43451- 0 0 0 0 0 0 0 0 0 0 0 0
43452- 0 0 0 0 0 0 0 0 0 0 0 0
43453- 6 6 6 14 14 14 26 26 26 42 42 42
43454- 54 54 54 66 66 66 78 78 78 78 78 78
43455- 78 78 78 74 74 74 66 66 66 54 54 54
43456- 42 42 42 26 26 26 18 18 18 10 10 10
43457- 6 6 6 0 0 0 0 0 0 0 0 0
43458- 0 0 0 0 0 0 0 0 0 0 0 0
43459- 0 0 0 0 0 0 0 0 0 0 0 0
43460- 0 0 0 0 0 0 0 0 0 0 0 0
43461- 0 0 0 0 0 0 0 0 0 0 0 0
43462- 0 0 0 0 0 0 0 0 0 0 0 0
43463- 0 0 0 0 0 0 0 0 0 0 0 0
43464- 0 0 0 0 0 0 0 0 0 0 0 0
43465- 0 0 0 0 0 0 0 0 0 0 0 0
43466- 0 0 1 0 0 0 0 0 0 0 0 0
43467- 0 0 0 0 0 0 0 0 0 0 0 0
43468- 0 0 0 0 0 0 0 0 0 0 0 0
43469- 0 0 0 0 0 0 0 0 0 0 0 0
43470- 0 0 0 0 0 0 0 0 0 0 0 0
43471- 0 0 0 0 0 0 0 0 0 0 0 0
43472- 0 0 0 0 0 0 0 0 0 10 10 10
43473- 22 22 22 42 42 42 66 66 66 86 86 86
43474- 66 66 66 38 38 38 38 38 38 22 22 22
43475- 26 26 26 34 34 34 54 54 54 66 66 66
43476- 86 86 86 70 70 70 46 46 46 26 26 26
43477- 14 14 14 6 6 6 0 0 0 0 0 0
43478- 0 0 0 0 0 0 0 0 0 0 0 0
43479- 0 0 0 0 0 0 0 0 0 0 0 0
43480- 0 0 0 0 0 0 0 0 0 0 0 0
43481- 0 0 0 0 0 0 0 0 0 0 0 0
43482- 0 0 0 0 0 0 0 0 0 0 0 0
43483- 0 0 0 0 0 0 0 0 0 0 0 0
43484- 0 0 0 0 0 0 0 0 0 0 0 0
43485- 0 0 0 0 0 0 0 0 0 0 0 0
43486- 0 0 1 0 0 1 0 0 1 0 0 0
43487- 0 0 0 0 0 0 0 0 0 0 0 0
43488- 0 0 0 0 0 0 0 0 0 0 0 0
43489- 0 0 0 0 0 0 0 0 0 0 0 0
43490- 0 0 0 0 0 0 0 0 0 0 0 0
43491- 0 0 0 0 0 0 0 0 0 0 0 0
43492- 0 0 0 0 0 0 10 10 10 26 26 26
43493- 50 50 50 82 82 82 58 58 58 6 6 6
43494- 2 2 6 2 2 6 2 2 6 2 2 6
43495- 2 2 6 2 2 6 2 2 6 2 2 6
43496- 6 6 6 54 54 54 86 86 86 66 66 66
43497- 38 38 38 18 18 18 6 6 6 0 0 0
43498- 0 0 0 0 0 0 0 0 0 0 0 0
43499- 0 0 0 0 0 0 0 0 0 0 0 0
43500- 0 0 0 0 0 0 0 0 0 0 0 0
43501- 0 0 0 0 0 0 0 0 0 0 0 0
43502- 0 0 0 0 0 0 0 0 0 0 0 0
43503- 0 0 0 0 0 0 0 0 0 0 0 0
43504- 0 0 0 0 0 0 0 0 0 0 0 0
43505- 0 0 0 0 0 0 0 0 0 0 0 0
43506- 0 0 0 0 0 0 0 0 0 0 0 0
43507- 0 0 0 0 0 0 0 0 0 0 0 0
43508- 0 0 0 0 0 0 0 0 0 0 0 0
43509- 0 0 0 0 0 0 0 0 0 0 0 0
43510- 0 0 0 0 0 0 0 0 0 0 0 0
43511- 0 0 0 0 0 0 0 0 0 0 0 0
43512- 0 0 0 6 6 6 22 22 22 50 50 50
43513- 78 78 78 34 34 34 2 2 6 2 2 6
43514- 2 2 6 2 2 6 2 2 6 2 2 6
43515- 2 2 6 2 2 6 2 2 6 2 2 6
43516- 2 2 6 2 2 6 6 6 6 70 70 70
43517- 78 78 78 46 46 46 22 22 22 6 6 6
43518- 0 0 0 0 0 0 0 0 0 0 0 0
43519- 0 0 0 0 0 0 0 0 0 0 0 0
43520- 0 0 0 0 0 0 0 0 0 0 0 0
43521- 0 0 0 0 0 0 0 0 0 0 0 0
43522- 0 0 0 0 0 0 0 0 0 0 0 0
43523- 0 0 0 0 0 0 0 0 0 0 0 0
43524- 0 0 0 0 0 0 0 0 0 0 0 0
43525- 0 0 0 0 0 0 0 0 0 0 0 0
43526- 0 0 1 0 0 1 0 0 1 0 0 0
43527- 0 0 0 0 0 0 0 0 0 0 0 0
43528- 0 0 0 0 0 0 0 0 0 0 0 0
43529- 0 0 0 0 0 0 0 0 0 0 0 0
43530- 0 0 0 0 0 0 0 0 0 0 0 0
43531- 0 0 0 0 0 0 0 0 0 0 0 0
43532- 6 6 6 18 18 18 42 42 42 82 82 82
43533- 26 26 26 2 2 6 2 2 6 2 2 6
43534- 2 2 6 2 2 6 2 2 6 2 2 6
43535- 2 2 6 2 2 6 2 2 6 14 14 14
43536- 46 46 46 34 34 34 6 6 6 2 2 6
43537- 42 42 42 78 78 78 42 42 42 18 18 18
43538- 6 6 6 0 0 0 0 0 0 0 0 0
43539- 0 0 0 0 0 0 0 0 0 0 0 0
43540- 0 0 0 0 0 0 0 0 0 0 0 0
43541- 0 0 0 0 0 0 0 0 0 0 0 0
43542- 0 0 0 0 0 0 0 0 0 0 0 0
43543- 0 0 0 0 0 0 0 0 0 0 0 0
43544- 0 0 0 0 0 0 0 0 0 0 0 0
43545- 0 0 0 0 0 0 0 0 0 0 0 0
43546- 0 0 1 0 0 0 0 0 1 0 0 0
43547- 0 0 0 0 0 0 0 0 0 0 0 0
43548- 0 0 0 0 0 0 0 0 0 0 0 0
43549- 0 0 0 0 0 0 0 0 0 0 0 0
43550- 0 0 0 0 0 0 0 0 0 0 0 0
43551- 0 0 0 0 0 0 0 0 0 0 0 0
43552- 10 10 10 30 30 30 66 66 66 58 58 58
43553- 2 2 6 2 2 6 2 2 6 2 2 6
43554- 2 2 6 2 2 6 2 2 6 2 2 6
43555- 2 2 6 2 2 6 2 2 6 26 26 26
43556- 86 86 86 101 101 101 46 46 46 10 10 10
43557- 2 2 6 58 58 58 70 70 70 34 34 34
43558- 10 10 10 0 0 0 0 0 0 0 0 0
43559- 0 0 0 0 0 0 0 0 0 0 0 0
43560- 0 0 0 0 0 0 0 0 0 0 0 0
43561- 0 0 0 0 0 0 0 0 0 0 0 0
43562- 0 0 0 0 0 0 0 0 0 0 0 0
43563- 0 0 0 0 0 0 0 0 0 0 0 0
43564- 0 0 0 0 0 0 0 0 0 0 0 0
43565- 0 0 0 0 0 0 0 0 0 0 0 0
43566- 0 0 1 0 0 1 0 0 1 0 0 0
43567- 0 0 0 0 0 0 0 0 0 0 0 0
43568- 0 0 0 0 0 0 0 0 0 0 0 0
43569- 0 0 0 0 0 0 0 0 0 0 0 0
43570- 0 0 0 0 0 0 0 0 0 0 0 0
43571- 0 0 0 0 0 0 0 0 0 0 0 0
43572- 14 14 14 42 42 42 86 86 86 10 10 10
43573- 2 2 6 2 2 6 2 2 6 2 2 6
43574- 2 2 6 2 2 6 2 2 6 2 2 6
43575- 2 2 6 2 2 6 2 2 6 30 30 30
43576- 94 94 94 94 94 94 58 58 58 26 26 26
43577- 2 2 6 6 6 6 78 78 78 54 54 54
43578- 22 22 22 6 6 6 0 0 0 0 0 0
43579- 0 0 0 0 0 0 0 0 0 0 0 0
43580- 0 0 0 0 0 0 0 0 0 0 0 0
43581- 0 0 0 0 0 0 0 0 0 0 0 0
43582- 0 0 0 0 0 0 0 0 0 0 0 0
43583- 0 0 0 0 0 0 0 0 0 0 0 0
43584- 0 0 0 0 0 0 0 0 0 0 0 0
43585- 0 0 0 0 0 0 0 0 0 0 0 0
43586- 0 0 0 0 0 0 0 0 0 0 0 0
43587- 0 0 0 0 0 0 0 0 0 0 0 0
43588- 0 0 0 0 0 0 0 0 0 0 0 0
43589- 0 0 0 0 0 0 0 0 0 0 0 0
43590- 0 0 0 0 0 0 0 0 0 0 0 0
43591- 0 0 0 0 0 0 0 0 0 6 6 6
43592- 22 22 22 62 62 62 62 62 62 2 2 6
43593- 2 2 6 2 2 6 2 2 6 2 2 6
43594- 2 2 6 2 2 6 2 2 6 2 2 6
43595- 2 2 6 2 2 6 2 2 6 26 26 26
43596- 54 54 54 38 38 38 18 18 18 10 10 10
43597- 2 2 6 2 2 6 34 34 34 82 82 82
43598- 38 38 38 14 14 14 0 0 0 0 0 0
43599- 0 0 0 0 0 0 0 0 0 0 0 0
43600- 0 0 0 0 0 0 0 0 0 0 0 0
43601- 0 0 0 0 0 0 0 0 0 0 0 0
43602- 0 0 0 0 0 0 0 0 0 0 0 0
43603- 0 0 0 0 0 0 0 0 0 0 0 0
43604- 0 0 0 0 0 0 0 0 0 0 0 0
43605- 0 0 0 0 0 0 0 0 0 0 0 0
43606- 0 0 0 0 0 1 0 0 1 0 0 0
43607- 0 0 0 0 0 0 0 0 0 0 0 0
43608- 0 0 0 0 0 0 0 0 0 0 0 0
43609- 0 0 0 0 0 0 0 0 0 0 0 0
43610- 0 0 0 0 0 0 0 0 0 0 0 0
43611- 0 0 0 0 0 0 0 0 0 6 6 6
43612- 30 30 30 78 78 78 30 30 30 2 2 6
43613- 2 2 6 2 2 6 2 2 6 2 2 6
43614- 2 2 6 2 2 6 2 2 6 2 2 6
43615- 2 2 6 2 2 6 2 2 6 10 10 10
43616- 10 10 10 2 2 6 2 2 6 2 2 6
43617- 2 2 6 2 2 6 2 2 6 78 78 78
43618- 50 50 50 18 18 18 6 6 6 0 0 0
43619- 0 0 0 0 0 0 0 0 0 0 0 0
43620- 0 0 0 0 0 0 0 0 0 0 0 0
43621- 0 0 0 0 0 0 0 0 0 0 0 0
43622- 0 0 0 0 0 0 0 0 0 0 0 0
43623- 0 0 0 0 0 0 0 0 0 0 0 0
43624- 0 0 0 0 0 0 0 0 0 0 0 0
43625- 0 0 0 0 0 0 0 0 0 0 0 0
43626- 0 0 1 0 0 0 0 0 0 0 0 0
43627- 0 0 0 0 0 0 0 0 0 0 0 0
43628- 0 0 0 0 0 0 0 0 0 0 0 0
43629- 0 0 0 0 0 0 0 0 0 0 0 0
43630- 0 0 0 0 0 0 0 0 0 0 0 0
43631- 0 0 0 0 0 0 0 0 0 10 10 10
43632- 38 38 38 86 86 86 14 14 14 2 2 6
43633- 2 2 6 2 2 6 2 2 6 2 2 6
43634- 2 2 6 2 2 6 2 2 6 2 2 6
43635- 2 2 6 2 2 6 2 2 6 2 2 6
43636- 2 2 6 2 2 6 2 2 6 2 2 6
43637- 2 2 6 2 2 6 2 2 6 54 54 54
43638- 66 66 66 26 26 26 6 6 6 0 0 0
43639- 0 0 0 0 0 0 0 0 0 0 0 0
43640- 0 0 0 0 0 0 0 0 0 0 0 0
43641- 0 0 0 0 0 0 0 0 0 0 0 0
43642- 0 0 0 0 0 0 0 0 0 0 0 0
43643- 0 0 0 0 0 0 0 0 0 0 0 0
43644- 0 0 0 0 0 0 0 0 0 0 0 0
43645- 0 0 0 0 0 0 0 0 0 0 0 0
43646- 0 0 0 0 0 1 0 0 1 0 0 0
43647- 0 0 0 0 0 0 0 0 0 0 0 0
43648- 0 0 0 0 0 0 0 0 0 0 0 0
43649- 0 0 0 0 0 0 0 0 0 0 0 0
43650- 0 0 0 0 0 0 0 0 0 0 0 0
43651- 0 0 0 0 0 0 0 0 0 14 14 14
43652- 42 42 42 82 82 82 2 2 6 2 2 6
43653- 2 2 6 6 6 6 10 10 10 2 2 6
43654- 2 2 6 2 2 6 2 2 6 2 2 6
43655- 2 2 6 2 2 6 2 2 6 6 6 6
43656- 14 14 14 10 10 10 2 2 6 2 2 6
43657- 2 2 6 2 2 6 2 2 6 18 18 18
43658- 82 82 82 34 34 34 10 10 10 0 0 0
43659- 0 0 0 0 0 0 0 0 0 0 0 0
43660- 0 0 0 0 0 0 0 0 0 0 0 0
43661- 0 0 0 0 0 0 0 0 0 0 0 0
43662- 0 0 0 0 0 0 0 0 0 0 0 0
43663- 0 0 0 0 0 0 0 0 0 0 0 0
43664- 0 0 0 0 0 0 0 0 0 0 0 0
43665- 0 0 0 0 0 0 0 0 0 0 0 0
43666- 0 0 1 0 0 0 0 0 0 0 0 0
43667- 0 0 0 0 0 0 0 0 0 0 0 0
43668- 0 0 0 0 0 0 0 0 0 0 0 0
43669- 0 0 0 0 0 0 0 0 0 0 0 0
43670- 0 0 0 0 0 0 0 0 0 0 0 0
43671- 0 0 0 0 0 0 0 0 0 14 14 14
43672- 46 46 46 86 86 86 2 2 6 2 2 6
43673- 6 6 6 6 6 6 22 22 22 34 34 34
43674- 6 6 6 2 2 6 2 2 6 2 2 6
43675- 2 2 6 2 2 6 18 18 18 34 34 34
43676- 10 10 10 50 50 50 22 22 22 2 2 6
43677- 2 2 6 2 2 6 2 2 6 10 10 10
43678- 86 86 86 42 42 42 14 14 14 0 0 0
43679- 0 0 0 0 0 0 0 0 0 0 0 0
43680- 0 0 0 0 0 0 0 0 0 0 0 0
43681- 0 0 0 0 0 0 0 0 0 0 0 0
43682- 0 0 0 0 0 0 0 0 0 0 0 0
43683- 0 0 0 0 0 0 0 0 0 0 0 0
43684- 0 0 0 0 0 0 0 0 0 0 0 0
43685- 0 0 0 0 0 0 0 0 0 0 0 0
43686- 0 0 1 0 0 1 0 0 1 0 0 0
43687- 0 0 0 0 0 0 0 0 0 0 0 0
43688- 0 0 0 0 0 0 0 0 0 0 0 0
43689- 0 0 0 0 0 0 0 0 0 0 0 0
43690- 0 0 0 0 0 0 0 0 0 0 0 0
43691- 0 0 0 0 0 0 0 0 0 14 14 14
43692- 46 46 46 86 86 86 2 2 6 2 2 6
43693- 38 38 38 116 116 116 94 94 94 22 22 22
43694- 22 22 22 2 2 6 2 2 6 2 2 6
43695- 14 14 14 86 86 86 138 138 138 162 162 162
43696-154 154 154 38 38 38 26 26 26 6 6 6
43697- 2 2 6 2 2 6 2 2 6 2 2 6
43698- 86 86 86 46 46 46 14 14 14 0 0 0
43699- 0 0 0 0 0 0 0 0 0 0 0 0
43700- 0 0 0 0 0 0 0 0 0 0 0 0
43701- 0 0 0 0 0 0 0 0 0 0 0 0
43702- 0 0 0 0 0 0 0 0 0 0 0 0
43703- 0 0 0 0 0 0 0 0 0 0 0 0
43704- 0 0 0 0 0 0 0 0 0 0 0 0
43705- 0 0 0 0 0 0 0 0 0 0 0 0
43706- 0 0 0 0 0 0 0 0 0 0 0 0
43707- 0 0 0 0 0 0 0 0 0 0 0 0
43708- 0 0 0 0 0 0 0 0 0 0 0 0
43709- 0 0 0 0 0 0 0 0 0 0 0 0
43710- 0 0 0 0 0 0 0 0 0 0 0 0
43711- 0 0 0 0 0 0 0 0 0 14 14 14
43712- 46 46 46 86 86 86 2 2 6 14 14 14
43713-134 134 134 198 198 198 195 195 195 116 116 116
43714- 10 10 10 2 2 6 2 2 6 6 6 6
43715-101 98 89 187 187 187 210 210 210 218 218 218
43716-214 214 214 134 134 134 14 14 14 6 6 6
43717- 2 2 6 2 2 6 2 2 6 2 2 6
43718- 86 86 86 50 50 50 18 18 18 6 6 6
43719- 0 0 0 0 0 0 0 0 0 0 0 0
43720- 0 0 0 0 0 0 0 0 0 0 0 0
43721- 0 0 0 0 0 0 0 0 0 0 0 0
43722- 0 0 0 0 0 0 0 0 0 0 0 0
43723- 0 0 0 0 0 0 0 0 0 0 0 0
43724- 0 0 0 0 0 0 0 0 0 0 0 0
43725- 0 0 0 0 0 0 0 0 1 0 0 0
43726- 0 0 1 0 0 1 0 0 1 0 0 0
43727- 0 0 0 0 0 0 0 0 0 0 0 0
43728- 0 0 0 0 0 0 0 0 0 0 0 0
43729- 0 0 0 0 0 0 0 0 0 0 0 0
43730- 0 0 0 0 0 0 0 0 0 0 0 0
43731- 0 0 0 0 0 0 0 0 0 14 14 14
43732- 46 46 46 86 86 86 2 2 6 54 54 54
43733-218 218 218 195 195 195 226 226 226 246 246 246
43734- 58 58 58 2 2 6 2 2 6 30 30 30
43735-210 210 210 253 253 253 174 174 174 123 123 123
43736-221 221 221 234 234 234 74 74 74 2 2 6
43737- 2 2 6 2 2 6 2 2 6 2 2 6
43738- 70 70 70 58 58 58 22 22 22 6 6 6
43739- 0 0 0 0 0 0 0 0 0 0 0 0
43740- 0 0 0 0 0 0 0 0 0 0 0 0
43741- 0 0 0 0 0 0 0 0 0 0 0 0
43742- 0 0 0 0 0 0 0 0 0 0 0 0
43743- 0 0 0 0 0 0 0 0 0 0 0 0
43744- 0 0 0 0 0 0 0 0 0 0 0 0
43745- 0 0 0 0 0 0 0 0 0 0 0 0
43746- 0 0 0 0 0 0 0 0 0 0 0 0
43747- 0 0 0 0 0 0 0 0 0 0 0 0
43748- 0 0 0 0 0 0 0 0 0 0 0 0
43749- 0 0 0 0 0 0 0 0 0 0 0 0
43750- 0 0 0 0 0 0 0 0 0 0 0 0
43751- 0 0 0 0 0 0 0 0 0 14 14 14
43752- 46 46 46 82 82 82 2 2 6 106 106 106
43753-170 170 170 26 26 26 86 86 86 226 226 226
43754-123 123 123 10 10 10 14 14 14 46 46 46
43755-231 231 231 190 190 190 6 6 6 70 70 70
43756- 90 90 90 238 238 238 158 158 158 2 2 6
43757- 2 2 6 2 2 6 2 2 6 2 2 6
43758- 70 70 70 58 58 58 22 22 22 6 6 6
43759- 0 0 0 0 0 0 0 0 0 0 0 0
43760- 0 0 0 0 0 0 0 0 0 0 0 0
43761- 0 0 0 0 0 0 0 0 0 0 0 0
43762- 0 0 0 0 0 0 0 0 0 0 0 0
43763- 0 0 0 0 0 0 0 0 0 0 0 0
43764- 0 0 0 0 0 0 0 0 0 0 0 0
43765- 0 0 0 0 0 0 0 0 1 0 0 0
43766- 0 0 1 0 0 1 0 0 1 0 0 0
43767- 0 0 0 0 0 0 0 0 0 0 0 0
43768- 0 0 0 0 0 0 0 0 0 0 0 0
43769- 0 0 0 0 0 0 0 0 0 0 0 0
43770- 0 0 0 0 0 0 0 0 0 0 0 0
43771- 0 0 0 0 0 0 0 0 0 14 14 14
43772- 42 42 42 86 86 86 6 6 6 116 116 116
43773-106 106 106 6 6 6 70 70 70 149 149 149
43774-128 128 128 18 18 18 38 38 38 54 54 54
43775-221 221 221 106 106 106 2 2 6 14 14 14
43776- 46 46 46 190 190 190 198 198 198 2 2 6
43777- 2 2 6 2 2 6 2 2 6 2 2 6
43778- 74 74 74 62 62 62 22 22 22 6 6 6
43779- 0 0 0 0 0 0 0 0 0 0 0 0
43780- 0 0 0 0 0 0 0 0 0 0 0 0
43781- 0 0 0 0 0 0 0 0 0 0 0 0
43782- 0 0 0 0 0 0 0 0 0 0 0 0
43783- 0 0 0 0 0 0 0 0 0 0 0 0
43784- 0 0 0 0 0 0 0 0 0 0 0 0
43785- 0 0 0 0 0 0 0 0 1 0 0 0
43786- 0 0 1 0 0 0 0 0 1 0 0 0
43787- 0 0 0 0 0 0 0 0 0 0 0 0
43788- 0 0 0 0 0 0 0 0 0 0 0 0
43789- 0 0 0 0 0 0 0 0 0 0 0 0
43790- 0 0 0 0 0 0 0 0 0 0 0 0
43791- 0 0 0 0 0 0 0 0 0 14 14 14
43792- 42 42 42 94 94 94 14 14 14 101 101 101
43793-128 128 128 2 2 6 18 18 18 116 116 116
43794-118 98 46 121 92 8 121 92 8 98 78 10
43795-162 162 162 106 106 106 2 2 6 2 2 6
43796- 2 2 6 195 195 195 195 195 195 6 6 6
43797- 2 2 6 2 2 6 2 2 6 2 2 6
43798- 74 74 74 62 62 62 22 22 22 6 6 6
43799- 0 0 0 0 0 0 0 0 0 0 0 0
43800- 0 0 0 0 0 0 0 0 0 0 0 0
43801- 0 0 0 0 0 0 0 0 0 0 0 0
43802- 0 0 0 0 0 0 0 0 0 0 0 0
43803- 0 0 0 0 0 0 0 0 0 0 0 0
43804- 0 0 0 0 0 0 0 0 0 0 0 0
43805- 0 0 0 0 0 0 0 0 1 0 0 1
43806- 0 0 1 0 0 0 0 0 1 0 0 0
43807- 0 0 0 0 0 0 0 0 0 0 0 0
43808- 0 0 0 0 0 0 0 0 0 0 0 0
43809- 0 0 0 0 0 0 0 0 0 0 0 0
43810- 0 0 0 0 0 0 0 0 0 0 0 0
43811- 0 0 0 0 0 0 0 0 0 10 10 10
43812- 38 38 38 90 90 90 14 14 14 58 58 58
43813-210 210 210 26 26 26 54 38 6 154 114 10
43814-226 170 11 236 186 11 225 175 15 184 144 12
43815-215 174 15 175 146 61 37 26 9 2 2 6
43816- 70 70 70 246 246 246 138 138 138 2 2 6
43817- 2 2 6 2 2 6 2 2 6 2 2 6
43818- 70 70 70 66 66 66 26 26 26 6 6 6
43819- 0 0 0 0 0 0 0 0 0 0 0 0
43820- 0 0 0 0 0 0 0 0 0 0 0 0
43821- 0 0 0 0 0 0 0 0 0 0 0 0
43822- 0 0 0 0 0 0 0 0 0 0 0 0
43823- 0 0 0 0 0 0 0 0 0 0 0 0
43824- 0 0 0 0 0 0 0 0 0 0 0 0
43825- 0 0 0 0 0 0 0 0 0 0 0 0
43826- 0 0 0 0 0 0 0 0 0 0 0 0
43827- 0 0 0 0 0 0 0 0 0 0 0 0
43828- 0 0 0 0 0 0 0 0 0 0 0 0
43829- 0 0 0 0 0 0 0 0 0 0 0 0
43830- 0 0 0 0 0 0 0 0 0 0 0 0
43831- 0 0 0 0 0 0 0 0 0 10 10 10
43832- 38 38 38 86 86 86 14 14 14 10 10 10
43833-195 195 195 188 164 115 192 133 9 225 175 15
43834-239 182 13 234 190 10 232 195 16 232 200 30
43835-245 207 45 241 208 19 232 195 16 184 144 12
43836-218 194 134 211 206 186 42 42 42 2 2 6
43837- 2 2 6 2 2 6 2 2 6 2 2 6
43838- 50 50 50 74 74 74 30 30 30 6 6 6
43839- 0 0 0 0 0 0 0 0 0 0 0 0
43840- 0 0 0 0 0 0 0 0 0 0 0 0
43841- 0 0 0 0 0 0 0 0 0 0 0 0
43842- 0 0 0 0 0 0 0 0 0 0 0 0
43843- 0 0 0 0 0 0 0 0 0 0 0 0
43844- 0 0 0 0 0 0 0 0 0 0 0 0
43845- 0 0 0 0 0 0 0 0 0 0 0 0
43846- 0 0 0 0 0 0 0 0 0 0 0 0
43847- 0 0 0 0 0 0 0 0 0 0 0 0
43848- 0 0 0 0 0 0 0 0 0 0 0 0
43849- 0 0 0 0 0 0 0 0 0 0 0 0
43850- 0 0 0 0 0 0 0 0 0 0 0 0
43851- 0 0 0 0 0 0 0 0 0 10 10 10
43852- 34 34 34 86 86 86 14 14 14 2 2 6
43853-121 87 25 192 133 9 219 162 10 239 182 13
43854-236 186 11 232 195 16 241 208 19 244 214 54
43855-246 218 60 246 218 38 246 215 20 241 208 19
43856-241 208 19 226 184 13 121 87 25 2 2 6
43857- 2 2 6 2 2 6 2 2 6 2 2 6
43858- 50 50 50 82 82 82 34 34 34 10 10 10
43859- 0 0 0 0 0 0 0 0 0 0 0 0
43860- 0 0 0 0 0 0 0 0 0 0 0 0
43861- 0 0 0 0 0 0 0 0 0 0 0 0
43862- 0 0 0 0 0 0 0 0 0 0 0 0
43863- 0 0 0 0 0 0 0 0 0 0 0 0
43864- 0 0 0 0 0 0 0 0 0 0 0 0
43865- 0 0 0 0 0 0 0 0 0 0 0 0
43866- 0 0 0 0 0 0 0 0 0 0 0 0
43867- 0 0 0 0 0 0 0 0 0 0 0 0
43868- 0 0 0 0 0 0 0 0 0 0 0 0
43869- 0 0 0 0 0 0 0 0 0 0 0 0
43870- 0 0 0 0 0 0 0 0 0 0 0 0
43871- 0 0 0 0 0 0 0 0 0 10 10 10
43872- 34 34 34 82 82 82 30 30 30 61 42 6
43873-180 123 7 206 145 10 230 174 11 239 182 13
43874-234 190 10 238 202 15 241 208 19 246 218 74
43875-246 218 38 246 215 20 246 215 20 246 215 20
43876-226 184 13 215 174 15 184 144 12 6 6 6
43877- 2 2 6 2 2 6 2 2 6 2 2 6
43878- 26 26 26 94 94 94 42 42 42 14 14 14
43879- 0 0 0 0 0 0 0 0 0 0 0 0
43880- 0 0 0 0 0 0 0 0 0 0 0 0
43881- 0 0 0 0 0 0 0 0 0 0 0 0
43882- 0 0 0 0 0 0 0 0 0 0 0 0
43883- 0 0 0 0 0 0 0 0 0 0 0 0
43884- 0 0 0 0 0 0 0 0 0 0 0 0
43885- 0 0 0 0 0 0 0 0 0 0 0 0
43886- 0 0 0 0 0 0 0 0 0 0 0 0
43887- 0 0 0 0 0 0 0 0 0 0 0 0
43888- 0 0 0 0 0 0 0 0 0 0 0 0
43889- 0 0 0 0 0 0 0 0 0 0 0 0
43890- 0 0 0 0 0 0 0 0 0 0 0 0
43891- 0 0 0 0 0 0 0 0 0 10 10 10
43892- 30 30 30 78 78 78 50 50 50 104 69 6
43893-192 133 9 216 158 10 236 178 12 236 186 11
43894-232 195 16 241 208 19 244 214 54 245 215 43
43895-246 215 20 246 215 20 241 208 19 198 155 10
43896-200 144 11 216 158 10 156 118 10 2 2 6
43897- 2 2 6 2 2 6 2 2 6 2 2 6
43898- 6 6 6 90 90 90 54 54 54 18 18 18
43899- 6 6 6 0 0 0 0 0 0 0 0 0
43900- 0 0 0 0 0 0 0 0 0 0 0 0
43901- 0 0 0 0 0 0 0 0 0 0 0 0
43902- 0 0 0 0 0 0 0 0 0 0 0 0
43903- 0 0 0 0 0 0 0 0 0 0 0 0
43904- 0 0 0 0 0 0 0 0 0 0 0 0
43905- 0 0 0 0 0 0 0 0 0 0 0 0
43906- 0 0 0 0 0 0 0 0 0 0 0 0
43907- 0 0 0 0 0 0 0 0 0 0 0 0
43908- 0 0 0 0 0 0 0 0 0 0 0 0
43909- 0 0 0 0 0 0 0 0 0 0 0 0
43910- 0 0 0 0 0 0 0 0 0 0 0 0
43911- 0 0 0 0 0 0 0 0 0 10 10 10
43912- 30 30 30 78 78 78 46 46 46 22 22 22
43913-137 92 6 210 162 10 239 182 13 238 190 10
43914-238 202 15 241 208 19 246 215 20 246 215 20
43915-241 208 19 203 166 17 185 133 11 210 150 10
43916-216 158 10 210 150 10 102 78 10 2 2 6
43917- 6 6 6 54 54 54 14 14 14 2 2 6
43918- 2 2 6 62 62 62 74 74 74 30 30 30
43919- 10 10 10 0 0 0 0 0 0 0 0 0
43920- 0 0 0 0 0 0 0 0 0 0 0 0
43921- 0 0 0 0 0 0 0 0 0 0 0 0
43922- 0 0 0 0 0 0 0 0 0 0 0 0
43923- 0 0 0 0 0 0 0 0 0 0 0 0
43924- 0 0 0 0 0 0 0 0 0 0 0 0
43925- 0 0 0 0 0 0 0 0 0 0 0 0
43926- 0 0 0 0 0 0 0 0 0 0 0 0
43927- 0 0 0 0 0 0 0 0 0 0 0 0
43928- 0 0 0 0 0 0 0 0 0 0 0 0
43929- 0 0 0 0 0 0 0 0 0 0 0 0
43930- 0 0 0 0 0 0 0 0 0 0 0 0
43931- 0 0 0 0 0 0 0 0 0 10 10 10
43932- 34 34 34 78 78 78 50 50 50 6 6 6
43933- 94 70 30 139 102 15 190 146 13 226 184 13
43934-232 200 30 232 195 16 215 174 15 190 146 13
43935-168 122 10 192 133 9 210 150 10 213 154 11
43936-202 150 34 182 157 106 101 98 89 2 2 6
43937- 2 2 6 78 78 78 116 116 116 58 58 58
43938- 2 2 6 22 22 22 90 90 90 46 46 46
43939- 18 18 18 6 6 6 0 0 0 0 0 0
43940- 0 0 0 0 0 0 0 0 0 0 0 0
43941- 0 0 0 0 0 0 0 0 0 0 0 0
43942- 0 0 0 0 0 0 0 0 0 0 0 0
43943- 0 0 0 0 0 0 0 0 0 0 0 0
43944- 0 0 0 0 0 0 0 0 0 0 0 0
43945- 0 0 0 0 0 0 0 0 0 0 0 0
43946- 0 0 0 0 0 0 0 0 0 0 0 0
43947- 0 0 0 0 0 0 0 0 0 0 0 0
43948- 0 0 0 0 0 0 0 0 0 0 0 0
43949- 0 0 0 0 0 0 0 0 0 0 0 0
43950- 0 0 0 0 0 0 0 0 0 0 0 0
43951- 0 0 0 0 0 0 0 0 0 10 10 10
43952- 38 38 38 86 86 86 50 50 50 6 6 6
43953-128 128 128 174 154 114 156 107 11 168 122 10
43954-198 155 10 184 144 12 197 138 11 200 144 11
43955-206 145 10 206 145 10 197 138 11 188 164 115
43956-195 195 195 198 198 198 174 174 174 14 14 14
43957- 2 2 6 22 22 22 116 116 116 116 116 116
43958- 22 22 22 2 2 6 74 74 74 70 70 70
43959- 30 30 30 10 10 10 0 0 0 0 0 0
43960- 0 0 0 0 0 0 0 0 0 0 0 0
43961- 0 0 0 0 0 0 0 0 0 0 0 0
43962- 0 0 0 0 0 0 0 0 0 0 0 0
43963- 0 0 0 0 0 0 0 0 0 0 0 0
43964- 0 0 0 0 0 0 0 0 0 0 0 0
43965- 0 0 0 0 0 0 0 0 0 0 0 0
43966- 0 0 0 0 0 0 0 0 0 0 0 0
43967- 0 0 0 0 0 0 0 0 0 0 0 0
43968- 0 0 0 0 0 0 0 0 0 0 0 0
43969- 0 0 0 0 0 0 0 0 0 0 0 0
43970- 0 0 0 0 0 0 0 0 0 0 0 0
43971- 0 0 0 0 0 0 6 6 6 18 18 18
43972- 50 50 50 101 101 101 26 26 26 10 10 10
43973-138 138 138 190 190 190 174 154 114 156 107 11
43974-197 138 11 200 144 11 197 138 11 192 133 9
43975-180 123 7 190 142 34 190 178 144 187 187 187
43976-202 202 202 221 221 221 214 214 214 66 66 66
43977- 2 2 6 2 2 6 50 50 50 62 62 62
43978- 6 6 6 2 2 6 10 10 10 90 90 90
43979- 50 50 50 18 18 18 6 6 6 0 0 0
43980- 0 0 0 0 0 0 0 0 0 0 0 0
43981- 0 0 0 0 0 0 0 0 0 0 0 0
43982- 0 0 0 0 0 0 0 0 0 0 0 0
43983- 0 0 0 0 0 0 0 0 0 0 0 0
43984- 0 0 0 0 0 0 0 0 0 0 0 0
43985- 0 0 0 0 0 0 0 0 0 0 0 0
43986- 0 0 0 0 0 0 0 0 0 0 0 0
43987- 0 0 0 0 0 0 0 0 0 0 0 0
43988- 0 0 0 0 0 0 0 0 0 0 0 0
43989- 0 0 0 0 0 0 0 0 0 0 0 0
43990- 0 0 0 0 0 0 0 0 0 0 0 0
43991- 0 0 0 0 0 0 10 10 10 34 34 34
43992- 74 74 74 74 74 74 2 2 6 6 6 6
43993-144 144 144 198 198 198 190 190 190 178 166 146
43994-154 121 60 156 107 11 156 107 11 168 124 44
43995-174 154 114 187 187 187 190 190 190 210 210 210
43996-246 246 246 253 253 253 253 253 253 182 182 182
43997- 6 6 6 2 2 6 2 2 6 2 2 6
43998- 2 2 6 2 2 6 2 2 6 62 62 62
43999- 74 74 74 34 34 34 14 14 14 0 0 0
44000- 0 0 0 0 0 0 0 0 0 0 0 0
44001- 0 0 0 0 0 0 0 0 0 0 0 0
44002- 0 0 0 0 0 0 0 0 0 0 0 0
44003- 0 0 0 0 0 0 0 0 0 0 0 0
44004- 0 0 0 0 0 0 0 0 0 0 0 0
44005- 0 0 0 0 0 0 0 0 0 0 0 0
44006- 0 0 0 0 0 0 0 0 0 0 0 0
44007- 0 0 0 0 0 0 0 0 0 0 0 0
44008- 0 0 0 0 0 0 0 0 0 0 0 0
44009- 0 0 0 0 0 0 0 0 0 0 0 0
44010- 0 0 0 0 0 0 0 0 0 0 0 0
44011- 0 0 0 10 10 10 22 22 22 54 54 54
44012- 94 94 94 18 18 18 2 2 6 46 46 46
44013-234 234 234 221 221 221 190 190 190 190 190 190
44014-190 190 190 187 187 187 187 187 187 190 190 190
44015-190 190 190 195 195 195 214 214 214 242 242 242
44016-253 253 253 253 253 253 253 253 253 253 253 253
44017- 82 82 82 2 2 6 2 2 6 2 2 6
44018- 2 2 6 2 2 6 2 2 6 14 14 14
44019- 86 86 86 54 54 54 22 22 22 6 6 6
44020- 0 0 0 0 0 0 0 0 0 0 0 0
44021- 0 0 0 0 0 0 0 0 0 0 0 0
44022- 0 0 0 0 0 0 0 0 0 0 0 0
44023- 0 0 0 0 0 0 0 0 0 0 0 0
44024- 0 0 0 0 0 0 0 0 0 0 0 0
44025- 0 0 0 0 0 0 0 0 0 0 0 0
44026- 0 0 0 0 0 0 0 0 0 0 0 0
44027- 0 0 0 0 0 0 0 0 0 0 0 0
44028- 0 0 0 0 0 0 0 0 0 0 0 0
44029- 0 0 0 0 0 0 0 0 0 0 0 0
44030- 0 0 0 0 0 0 0 0 0 0 0 0
44031- 6 6 6 18 18 18 46 46 46 90 90 90
44032- 46 46 46 18 18 18 6 6 6 182 182 182
44033-253 253 253 246 246 246 206 206 206 190 190 190
44034-190 190 190 190 190 190 190 190 190 190 190 190
44035-206 206 206 231 231 231 250 250 250 253 253 253
44036-253 253 253 253 253 253 253 253 253 253 253 253
44037-202 202 202 14 14 14 2 2 6 2 2 6
44038- 2 2 6 2 2 6 2 2 6 2 2 6
44039- 42 42 42 86 86 86 42 42 42 18 18 18
44040- 6 6 6 0 0 0 0 0 0 0 0 0
44041- 0 0 0 0 0 0 0 0 0 0 0 0
44042- 0 0 0 0 0 0 0 0 0 0 0 0
44043- 0 0 0 0 0 0 0 0 0 0 0 0
44044- 0 0 0 0 0 0 0 0 0 0 0 0
44045- 0 0 0 0 0 0 0 0 0 0 0 0
44046- 0 0 0 0 0 0 0 0 0 0 0 0
44047- 0 0 0 0 0 0 0 0 0 0 0 0
44048- 0 0 0 0 0 0 0 0 0 0 0 0
44049- 0 0 0 0 0 0 0 0 0 0 0 0
44050- 0 0 0 0 0 0 0 0 0 6 6 6
44051- 14 14 14 38 38 38 74 74 74 66 66 66
44052- 2 2 6 6 6 6 90 90 90 250 250 250
44053-253 253 253 253 253 253 238 238 238 198 198 198
44054-190 190 190 190 190 190 195 195 195 221 221 221
44055-246 246 246 253 253 253 253 253 253 253 253 253
44056-253 253 253 253 253 253 253 253 253 253 253 253
44057-253 253 253 82 82 82 2 2 6 2 2 6
44058- 2 2 6 2 2 6 2 2 6 2 2 6
44059- 2 2 6 78 78 78 70 70 70 34 34 34
44060- 14 14 14 6 6 6 0 0 0 0 0 0
44061- 0 0 0 0 0 0 0 0 0 0 0 0
44062- 0 0 0 0 0 0 0 0 0 0 0 0
44063- 0 0 0 0 0 0 0 0 0 0 0 0
44064- 0 0 0 0 0 0 0 0 0 0 0 0
44065- 0 0 0 0 0 0 0 0 0 0 0 0
44066- 0 0 0 0 0 0 0 0 0 0 0 0
44067- 0 0 0 0 0 0 0 0 0 0 0 0
44068- 0 0 0 0 0 0 0 0 0 0 0 0
44069- 0 0 0 0 0 0 0 0 0 0 0 0
44070- 0 0 0 0 0 0 0 0 0 14 14 14
44071- 34 34 34 66 66 66 78 78 78 6 6 6
44072- 2 2 6 18 18 18 218 218 218 253 253 253
44073-253 253 253 253 253 253 253 253 253 246 246 246
44074-226 226 226 231 231 231 246 246 246 253 253 253
44075-253 253 253 253 253 253 253 253 253 253 253 253
44076-253 253 253 253 253 253 253 253 253 253 253 253
44077-253 253 253 178 178 178 2 2 6 2 2 6
44078- 2 2 6 2 2 6 2 2 6 2 2 6
44079- 2 2 6 18 18 18 90 90 90 62 62 62
44080- 30 30 30 10 10 10 0 0 0 0 0 0
44081- 0 0 0 0 0 0 0 0 0 0 0 0
44082- 0 0 0 0 0 0 0 0 0 0 0 0
44083- 0 0 0 0 0 0 0 0 0 0 0 0
44084- 0 0 0 0 0 0 0 0 0 0 0 0
44085- 0 0 0 0 0 0 0 0 0 0 0 0
44086- 0 0 0 0 0 0 0 0 0 0 0 0
44087- 0 0 0 0 0 0 0 0 0 0 0 0
44088- 0 0 0 0 0 0 0 0 0 0 0 0
44089- 0 0 0 0 0 0 0 0 0 0 0 0
44090- 0 0 0 0 0 0 10 10 10 26 26 26
44091- 58 58 58 90 90 90 18 18 18 2 2 6
44092- 2 2 6 110 110 110 253 253 253 253 253 253
44093-253 253 253 253 253 253 253 253 253 253 253 253
44094-250 250 250 253 253 253 253 253 253 253 253 253
44095-253 253 253 253 253 253 253 253 253 253 253 253
44096-253 253 253 253 253 253 253 253 253 253 253 253
44097-253 253 253 231 231 231 18 18 18 2 2 6
44098- 2 2 6 2 2 6 2 2 6 2 2 6
44099- 2 2 6 2 2 6 18 18 18 94 94 94
44100- 54 54 54 26 26 26 10 10 10 0 0 0
44101- 0 0 0 0 0 0 0 0 0 0 0 0
44102- 0 0 0 0 0 0 0 0 0 0 0 0
44103- 0 0 0 0 0 0 0 0 0 0 0 0
44104- 0 0 0 0 0 0 0 0 0 0 0 0
44105- 0 0 0 0 0 0 0 0 0 0 0 0
44106- 0 0 0 0 0 0 0 0 0 0 0 0
44107- 0 0 0 0 0 0 0 0 0 0 0 0
44108- 0 0 0 0 0 0 0 0 0 0 0 0
44109- 0 0 0 0 0 0 0 0 0 0 0 0
44110- 0 0 0 6 6 6 22 22 22 50 50 50
44111- 90 90 90 26 26 26 2 2 6 2 2 6
44112- 14 14 14 195 195 195 250 250 250 253 253 253
44113-253 253 253 253 253 253 253 253 253 253 253 253
44114-253 253 253 253 253 253 253 253 253 253 253 253
44115-253 253 253 253 253 253 253 253 253 253 253 253
44116-253 253 253 253 253 253 253 253 253 253 253 253
44117-250 250 250 242 242 242 54 54 54 2 2 6
44118- 2 2 6 2 2 6 2 2 6 2 2 6
44119- 2 2 6 2 2 6 2 2 6 38 38 38
44120- 86 86 86 50 50 50 22 22 22 6 6 6
44121- 0 0 0 0 0 0 0 0 0 0 0 0
44122- 0 0 0 0 0 0 0 0 0 0 0 0
44123- 0 0 0 0 0 0 0 0 0 0 0 0
44124- 0 0 0 0 0 0 0 0 0 0 0 0
44125- 0 0 0 0 0 0 0 0 0 0 0 0
44126- 0 0 0 0 0 0 0 0 0 0 0 0
44127- 0 0 0 0 0 0 0 0 0 0 0 0
44128- 0 0 0 0 0 0 0 0 0 0 0 0
44129- 0 0 0 0 0 0 0 0 0 0 0 0
44130- 6 6 6 14 14 14 38 38 38 82 82 82
44131- 34 34 34 2 2 6 2 2 6 2 2 6
44132- 42 42 42 195 195 195 246 246 246 253 253 253
44133-253 253 253 253 253 253 253 253 253 250 250 250
44134-242 242 242 242 242 242 250 250 250 253 253 253
44135-253 253 253 253 253 253 253 253 253 253 253 253
44136-253 253 253 250 250 250 246 246 246 238 238 238
44137-226 226 226 231 231 231 101 101 101 6 6 6
44138- 2 2 6 2 2 6 2 2 6 2 2 6
44139- 2 2 6 2 2 6 2 2 6 2 2 6
44140- 38 38 38 82 82 82 42 42 42 14 14 14
44141- 6 6 6 0 0 0 0 0 0 0 0 0
44142- 0 0 0 0 0 0 0 0 0 0 0 0
44143- 0 0 0 0 0 0 0 0 0 0 0 0
44144- 0 0 0 0 0 0 0 0 0 0 0 0
44145- 0 0 0 0 0 0 0 0 0 0 0 0
44146- 0 0 0 0 0 0 0 0 0 0 0 0
44147- 0 0 0 0 0 0 0 0 0 0 0 0
44148- 0 0 0 0 0 0 0 0 0 0 0 0
44149- 0 0 0 0 0 0 0 0 0 0 0 0
44150- 10 10 10 26 26 26 62 62 62 66 66 66
44151- 2 2 6 2 2 6 2 2 6 6 6 6
44152- 70 70 70 170 170 170 206 206 206 234 234 234
44153-246 246 246 250 250 250 250 250 250 238 238 238
44154-226 226 226 231 231 231 238 238 238 250 250 250
44155-250 250 250 250 250 250 246 246 246 231 231 231
44156-214 214 214 206 206 206 202 202 202 202 202 202
44157-198 198 198 202 202 202 182 182 182 18 18 18
44158- 2 2 6 2 2 6 2 2 6 2 2 6
44159- 2 2 6 2 2 6 2 2 6 2 2 6
44160- 2 2 6 62 62 62 66 66 66 30 30 30
44161- 10 10 10 0 0 0 0 0 0 0 0 0
44162- 0 0 0 0 0 0 0 0 0 0 0 0
44163- 0 0 0 0 0 0 0 0 0 0 0 0
44164- 0 0 0 0 0 0 0 0 0 0 0 0
44165- 0 0 0 0 0 0 0 0 0 0 0 0
44166- 0 0 0 0 0 0 0 0 0 0 0 0
44167- 0 0 0 0 0 0 0 0 0 0 0 0
44168- 0 0 0 0 0 0 0 0 0 0 0 0
44169- 0 0 0 0 0 0 0 0 0 0 0 0
44170- 14 14 14 42 42 42 82 82 82 18 18 18
44171- 2 2 6 2 2 6 2 2 6 10 10 10
44172- 94 94 94 182 182 182 218 218 218 242 242 242
44173-250 250 250 253 253 253 253 253 253 250 250 250
44174-234 234 234 253 253 253 253 253 253 253 253 253
44175-253 253 253 253 253 253 253 253 253 246 246 246
44176-238 238 238 226 226 226 210 210 210 202 202 202
44177-195 195 195 195 195 195 210 210 210 158 158 158
44178- 6 6 6 14 14 14 50 50 50 14 14 14
44179- 2 2 6 2 2 6 2 2 6 2 2 6
44180- 2 2 6 6 6 6 86 86 86 46 46 46
44181- 18 18 18 6 6 6 0 0 0 0 0 0
44182- 0 0 0 0 0 0 0 0 0 0 0 0
44183- 0 0 0 0 0 0 0 0 0 0 0 0
44184- 0 0 0 0 0 0 0 0 0 0 0 0
44185- 0 0 0 0 0 0 0 0 0 0 0 0
44186- 0 0 0 0 0 0 0 0 0 0 0 0
44187- 0 0 0 0 0 0 0 0 0 0 0 0
44188- 0 0 0 0 0 0 0 0 0 0 0 0
44189- 0 0 0 0 0 0 0 0 0 6 6 6
44190- 22 22 22 54 54 54 70 70 70 2 2 6
44191- 2 2 6 10 10 10 2 2 6 22 22 22
44192-166 166 166 231 231 231 250 250 250 253 253 253
44193-253 253 253 253 253 253 253 253 253 250 250 250
44194-242 242 242 253 253 253 253 253 253 253 253 253
44195-253 253 253 253 253 253 253 253 253 253 253 253
44196-253 253 253 253 253 253 253 253 253 246 246 246
44197-231 231 231 206 206 206 198 198 198 226 226 226
44198- 94 94 94 2 2 6 6 6 6 38 38 38
44199- 30 30 30 2 2 6 2 2 6 2 2 6
44200- 2 2 6 2 2 6 62 62 62 66 66 66
44201- 26 26 26 10 10 10 0 0 0 0 0 0
44202- 0 0 0 0 0 0 0 0 0 0 0 0
44203- 0 0 0 0 0 0 0 0 0 0 0 0
44204- 0 0 0 0 0 0 0 0 0 0 0 0
44205- 0 0 0 0 0 0 0 0 0 0 0 0
44206- 0 0 0 0 0 0 0 0 0 0 0 0
44207- 0 0 0 0 0 0 0 0 0 0 0 0
44208- 0 0 0 0 0 0 0 0 0 0 0 0
44209- 0 0 0 0 0 0 0 0 0 10 10 10
44210- 30 30 30 74 74 74 50 50 50 2 2 6
44211- 26 26 26 26 26 26 2 2 6 106 106 106
44212-238 238 238 253 253 253 253 253 253 253 253 253
44213-253 253 253 253 253 253 253 253 253 253 253 253
44214-253 253 253 253 253 253 253 253 253 253 253 253
44215-253 253 253 253 253 253 253 253 253 253 253 253
44216-253 253 253 253 253 253 253 253 253 253 253 253
44217-253 253 253 246 246 246 218 218 218 202 202 202
44218-210 210 210 14 14 14 2 2 6 2 2 6
44219- 30 30 30 22 22 22 2 2 6 2 2 6
44220- 2 2 6 2 2 6 18 18 18 86 86 86
44221- 42 42 42 14 14 14 0 0 0 0 0 0
44222- 0 0 0 0 0 0 0 0 0 0 0 0
44223- 0 0 0 0 0 0 0 0 0 0 0 0
44224- 0 0 0 0 0 0 0 0 0 0 0 0
44225- 0 0 0 0 0 0 0 0 0 0 0 0
44226- 0 0 0 0 0 0 0 0 0 0 0 0
44227- 0 0 0 0 0 0 0 0 0 0 0 0
44228- 0 0 0 0 0 0 0 0 0 0 0 0
44229- 0 0 0 0 0 0 0 0 0 14 14 14
44230- 42 42 42 90 90 90 22 22 22 2 2 6
44231- 42 42 42 2 2 6 18 18 18 218 218 218
44232-253 253 253 253 253 253 253 253 253 253 253 253
44233-253 253 253 253 253 253 253 253 253 253 253 253
44234-253 253 253 253 253 253 253 253 253 253 253 253
44235-253 253 253 253 253 253 253 253 253 253 253 253
44236-253 253 253 253 253 253 253 253 253 253 253 253
44237-253 253 253 253 253 253 250 250 250 221 221 221
44238-218 218 218 101 101 101 2 2 6 14 14 14
44239- 18 18 18 38 38 38 10 10 10 2 2 6
44240- 2 2 6 2 2 6 2 2 6 78 78 78
44241- 58 58 58 22 22 22 6 6 6 0 0 0
44242- 0 0 0 0 0 0 0 0 0 0 0 0
44243- 0 0 0 0 0 0 0 0 0 0 0 0
44244- 0 0 0 0 0 0 0 0 0 0 0 0
44245- 0 0 0 0 0 0 0 0 0 0 0 0
44246- 0 0 0 0 0 0 0 0 0 0 0 0
44247- 0 0 0 0 0 0 0 0 0 0 0 0
44248- 0 0 0 0 0 0 0 0 0 0 0 0
44249- 0 0 0 0 0 0 6 6 6 18 18 18
44250- 54 54 54 82 82 82 2 2 6 26 26 26
44251- 22 22 22 2 2 6 123 123 123 253 253 253
44252-253 253 253 253 253 253 253 253 253 253 253 253
44253-253 253 253 253 253 253 253 253 253 253 253 253
44254-253 253 253 253 253 253 253 253 253 253 253 253
44255-253 253 253 253 253 253 253 253 253 253 253 253
44256-253 253 253 253 253 253 253 253 253 253 253 253
44257-253 253 253 253 253 253 253 253 253 250 250 250
44258-238 238 238 198 198 198 6 6 6 38 38 38
44259- 58 58 58 26 26 26 38 38 38 2 2 6
44260- 2 2 6 2 2 6 2 2 6 46 46 46
44261- 78 78 78 30 30 30 10 10 10 0 0 0
44262- 0 0 0 0 0 0 0 0 0 0 0 0
44263- 0 0 0 0 0 0 0 0 0 0 0 0
44264- 0 0 0 0 0 0 0 0 0 0 0 0
44265- 0 0 0 0 0 0 0 0 0 0 0 0
44266- 0 0 0 0 0 0 0 0 0 0 0 0
44267- 0 0 0 0 0 0 0 0 0 0 0 0
44268- 0 0 0 0 0 0 0 0 0 0 0 0
44269- 0 0 0 0 0 0 10 10 10 30 30 30
44270- 74 74 74 58 58 58 2 2 6 42 42 42
44271- 2 2 6 22 22 22 231 231 231 253 253 253
44272-253 253 253 253 253 253 253 253 253 253 253 253
44273-253 253 253 253 253 253 253 253 253 250 250 250
44274-253 253 253 253 253 253 253 253 253 253 253 253
44275-253 253 253 253 253 253 253 253 253 253 253 253
44276-253 253 253 253 253 253 253 253 253 253 253 253
44277-253 253 253 253 253 253 253 253 253 253 253 253
44278-253 253 253 246 246 246 46 46 46 38 38 38
44279- 42 42 42 14 14 14 38 38 38 14 14 14
44280- 2 2 6 2 2 6 2 2 6 6 6 6
44281- 86 86 86 46 46 46 14 14 14 0 0 0
44282- 0 0 0 0 0 0 0 0 0 0 0 0
44283- 0 0 0 0 0 0 0 0 0 0 0 0
44284- 0 0 0 0 0 0 0 0 0 0 0 0
44285- 0 0 0 0 0 0 0 0 0 0 0 0
44286- 0 0 0 0 0 0 0 0 0 0 0 0
44287- 0 0 0 0 0 0 0 0 0 0 0 0
44288- 0 0 0 0 0 0 0 0 0 0 0 0
44289- 0 0 0 6 6 6 14 14 14 42 42 42
44290- 90 90 90 18 18 18 18 18 18 26 26 26
44291- 2 2 6 116 116 116 253 253 253 253 253 253
44292-253 253 253 253 253 253 253 253 253 253 253 253
44293-253 253 253 253 253 253 250 250 250 238 238 238
44294-253 253 253 253 253 253 253 253 253 253 253 253
44295-253 253 253 253 253 253 253 253 253 253 253 253
44296-253 253 253 253 253 253 253 253 253 253 253 253
44297-253 253 253 253 253 253 253 253 253 253 253 253
44298-253 253 253 253 253 253 94 94 94 6 6 6
44299- 2 2 6 2 2 6 10 10 10 34 34 34
44300- 2 2 6 2 2 6 2 2 6 2 2 6
44301- 74 74 74 58 58 58 22 22 22 6 6 6
44302- 0 0 0 0 0 0 0 0 0 0 0 0
44303- 0 0 0 0 0 0 0 0 0 0 0 0
44304- 0 0 0 0 0 0 0 0 0 0 0 0
44305- 0 0 0 0 0 0 0 0 0 0 0 0
44306- 0 0 0 0 0 0 0 0 0 0 0 0
44307- 0 0 0 0 0 0 0 0 0 0 0 0
44308- 0 0 0 0 0 0 0 0 0 0 0 0
44309- 0 0 0 10 10 10 26 26 26 66 66 66
44310- 82 82 82 2 2 6 38 38 38 6 6 6
44311- 14 14 14 210 210 210 253 253 253 253 253 253
44312-253 253 253 253 253 253 253 253 253 253 253 253
44313-253 253 253 253 253 253 246 246 246 242 242 242
44314-253 253 253 253 253 253 253 253 253 253 253 253
44315-253 253 253 253 253 253 253 253 253 253 253 253
44316-253 253 253 253 253 253 253 253 253 253 253 253
44317-253 253 253 253 253 253 253 253 253 253 253 253
44318-253 253 253 253 253 253 144 144 144 2 2 6
44319- 2 2 6 2 2 6 2 2 6 46 46 46
44320- 2 2 6 2 2 6 2 2 6 2 2 6
44321- 42 42 42 74 74 74 30 30 30 10 10 10
44322- 0 0 0 0 0 0 0 0 0 0 0 0
44323- 0 0 0 0 0 0 0 0 0 0 0 0
44324- 0 0 0 0 0 0 0 0 0 0 0 0
44325- 0 0 0 0 0 0 0 0 0 0 0 0
44326- 0 0 0 0 0 0 0 0 0 0 0 0
44327- 0 0 0 0 0 0 0 0 0 0 0 0
44328- 0 0 0 0 0 0 0 0 0 0 0 0
44329- 6 6 6 14 14 14 42 42 42 90 90 90
44330- 26 26 26 6 6 6 42 42 42 2 2 6
44331- 74 74 74 250 250 250 253 253 253 253 253 253
44332-253 253 253 253 253 253 253 253 253 253 253 253
44333-253 253 253 253 253 253 242 242 242 242 242 242
44334-253 253 253 253 253 253 253 253 253 253 253 253
44335-253 253 253 253 253 253 253 253 253 253 253 253
44336-253 253 253 253 253 253 253 253 253 253 253 253
44337-253 253 253 253 253 253 253 253 253 253 253 253
44338-253 253 253 253 253 253 182 182 182 2 2 6
44339- 2 2 6 2 2 6 2 2 6 46 46 46
44340- 2 2 6 2 2 6 2 2 6 2 2 6
44341- 10 10 10 86 86 86 38 38 38 10 10 10
44342- 0 0 0 0 0 0 0 0 0 0 0 0
44343- 0 0 0 0 0 0 0 0 0 0 0 0
44344- 0 0 0 0 0 0 0 0 0 0 0 0
44345- 0 0 0 0 0 0 0 0 0 0 0 0
44346- 0 0 0 0 0 0 0 0 0 0 0 0
44347- 0 0 0 0 0 0 0 0 0 0 0 0
44348- 0 0 0 0 0 0 0 0 0 0 0 0
44349- 10 10 10 26 26 26 66 66 66 82 82 82
44350- 2 2 6 22 22 22 18 18 18 2 2 6
44351-149 149 149 253 253 253 253 253 253 253 253 253
44352-253 253 253 253 253 253 253 253 253 253 253 253
44353-253 253 253 253 253 253 234 234 234 242 242 242
44354-253 253 253 253 253 253 253 253 253 253 253 253
44355-253 253 253 253 253 253 253 253 253 253 253 253
44356-253 253 253 253 253 253 253 253 253 253 253 253
44357-253 253 253 253 253 253 253 253 253 253 253 253
44358-253 253 253 253 253 253 206 206 206 2 2 6
44359- 2 2 6 2 2 6 2 2 6 38 38 38
44360- 2 2 6 2 2 6 2 2 6 2 2 6
44361- 6 6 6 86 86 86 46 46 46 14 14 14
44362- 0 0 0 0 0 0 0 0 0 0 0 0
44363- 0 0 0 0 0 0 0 0 0 0 0 0
44364- 0 0 0 0 0 0 0 0 0 0 0 0
44365- 0 0 0 0 0 0 0 0 0 0 0 0
44366- 0 0 0 0 0 0 0 0 0 0 0 0
44367- 0 0 0 0 0 0 0 0 0 0 0 0
44368- 0 0 0 0 0 0 0 0 0 6 6 6
44369- 18 18 18 46 46 46 86 86 86 18 18 18
44370- 2 2 6 34 34 34 10 10 10 6 6 6
44371-210 210 210 253 253 253 253 253 253 253 253 253
44372-253 253 253 253 253 253 253 253 253 253 253 253
44373-253 253 253 253 253 253 234 234 234 242 242 242
44374-253 253 253 253 253 253 253 253 253 253 253 253
44375-253 253 253 253 253 253 253 253 253 253 253 253
44376-253 253 253 253 253 253 253 253 253 253 253 253
44377-253 253 253 253 253 253 253 253 253 253 253 253
44378-253 253 253 253 253 253 221 221 221 6 6 6
44379- 2 2 6 2 2 6 6 6 6 30 30 30
44380- 2 2 6 2 2 6 2 2 6 2 2 6
44381- 2 2 6 82 82 82 54 54 54 18 18 18
44382- 6 6 6 0 0 0 0 0 0 0 0 0
44383- 0 0 0 0 0 0 0 0 0 0 0 0
44384- 0 0 0 0 0 0 0 0 0 0 0 0
44385- 0 0 0 0 0 0 0 0 0 0 0 0
44386- 0 0 0 0 0 0 0 0 0 0 0 0
44387- 0 0 0 0 0 0 0 0 0 0 0 0
44388- 0 0 0 0 0 0 0 0 0 10 10 10
44389- 26 26 26 66 66 66 62 62 62 2 2 6
44390- 2 2 6 38 38 38 10 10 10 26 26 26
44391-238 238 238 253 253 253 253 253 253 253 253 253
44392-253 253 253 253 253 253 253 253 253 253 253 253
44393-253 253 253 253 253 253 231 231 231 238 238 238
44394-253 253 253 253 253 253 253 253 253 253 253 253
44395-253 253 253 253 253 253 253 253 253 253 253 253
44396-253 253 253 253 253 253 253 253 253 253 253 253
44397-253 253 253 253 253 253 253 253 253 253 253 253
44398-253 253 253 253 253 253 231 231 231 6 6 6
44399- 2 2 6 2 2 6 10 10 10 30 30 30
44400- 2 2 6 2 2 6 2 2 6 2 2 6
44401- 2 2 6 66 66 66 58 58 58 22 22 22
44402- 6 6 6 0 0 0 0 0 0 0 0 0
44403- 0 0 0 0 0 0 0 0 0 0 0 0
44404- 0 0 0 0 0 0 0 0 0 0 0 0
44405- 0 0 0 0 0 0 0 0 0 0 0 0
44406- 0 0 0 0 0 0 0 0 0 0 0 0
44407- 0 0 0 0 0 0 0 0 0 0 0 0
44408- 0 0 0 0 0 0 0 0 0 10 10 10
44409- 38 38 38 78 78 78 6 6 6 2 2 6
44410- 2 2 6 46 46 46 14 14 14 42 42 42
44411-246 246 246 253 253 253 253 253 253 253 253 253
44412-253 253 253 253 253 253 253 253 253 253 253 253
44413-253 253 253 253 253 253 231 231 231 242 242 242
44414-253 253 253 253 253 253 253 253 253 253 253 253
44415-253 253 253 253 253 253 253 253 253 253 253 253
44416-253 253 253 253 253 253 253 253 253 253 253 253
44417-253 253 253 253 253 253 253 253 253 253 253 253
44418-253 253 253 253 253 253 234 234 234 10 10 10
44419- 2 2 6 2 2 6 22 22 22 14 14 14
44420- 2 2 6 2 2 6 2 2 6 2 2 6
44421- 2 2 6 66 66 66 62 62 62 22 22 22
44422- 6 6 6 0 0 0 0 0 0 0 0 0
44423- 0 0 0 0 0 0 0 0 0 0 0 0
44424- 0 0 0 0 0 0 0 0 0 0 0 0
44425- 0 0 0 0 0 0 0 0 0 0 0 0
44426- 0 0 0 0 0 0 0 0 0 0 0 0
44427- 0 0 0 0 0 0 0 0 0 0 0 0
44428- 0 0 0 0 0 0 6 6 6 18 18 18
44429- 50 50 50 74 74 74 2 2 6 2 2 6
44430- 14 14 14 70 70 70 34 34 34 62 62 62
44431-250 250 250 253 253 253 253 253 253 253 253 253
44432-253 253 253 253 253 253 253 253 253 253 253 253
44433-253 253 253 253 253 253 231 231 231 246 246 246
44434-253 253 253 253 253 253 253 253 253 253 253 253
44435-253 253 253 253 253 253 253 253 253 253 253 253
44436-253 253 253 253 253 253 253 253 253 253 253 253
44437-253 253 253 253 253 253 253 253 253 253 253 253
44438-253 253 253 253 253 253 234 234 234 14 14 14
44439- 2 2 6 2 2 6 30 30 30 2 2 6
44440- 2 2 6 2 2 6 2 2 6 2 2 6
44441- 2 2 6 66 66 66 62 62 62 22 22 22
44442- 6 6 6 0 0 0 0 0 0 0 0 0
44443- 0 0 0 0 0 0 0 0 0 0 0 0
44444- 0 0 0 0 0 0 0 0 0 0 0 0
44445- 0 0 0 0 0 0 0 0 0 0 0 0
44446- 0 0 0 0 0 0 0 0 0 0 0 0
44447- 0 0 0 0 0 0 0 0 0 0 0 0
44448- 0 0 0 0 0 0 6 6 6 18 18 18
44449- 54 54 54 62 62 62 2 2 6 2 2 6
44450- 2 2 6 30 30 30 46 46 46 70 70 70
44451-250 250 250 253 253 253 253 253 253 253 253 253
44452-253 253 253 253 253 253 253 253 253 253 253 253
44453-253 253 253 253 253 253 231 231 231 246 246 246
44454-253 253 253 253 253 253 253 253 253 253 253 253
44455-253 253 253 253 253 253 253 253 253 253 253 253
44456-253 253 253 253 253 253 253 253 253 253 253 253
44457-253 253 253 253 253 253 253 253 253 253 253 253
44458-253 253 253 253 253 253 226 226 226 10 10 10
44459- 2 2 6 6 6 6 30 30 30 2 2 6
44460- 2 2 6 2 2 6 2 2 6 2 2 6
44461- 2 2 6 66 66 66 58 58 58 22 22 22
44462- 6 6 6 0 0 0 0 0 0 0 0 0
44463- 0 0 0 0 0 0 0 0 0 0 0 0
44464- 0 0 0 0 0 0 0 0 0 0 0 0
44465- 0 0 0 0 0 0 0 0 0 0 0 0
44466- 0 0 0 0 0 0 0 0 0 0 0 0
44467- 0 0 0 0 0 0 0 0 0 0 0 0
44468- 0 0 0 0 0 0 6 6 6 22 22 22
44469- 58 58 58 62 62 62 2 2 6 2 2 6
44470- 2 2 6 2 2 6 30 30 30 78 78 78
44471-250 250 250 253 253 253 253 253 253 253 253 253
44472-253 253 253 253 253 253 253 253 253 253 253 253
44473-253 253 253 253 253 253 231 231 231 246 246 246
44474-253 253 253 253 253 253 253 253 253 253 253 253
44475-253 253 253 253 253 253 253 253 253 253 253 253
44476-253 253 253 253 253 253 253 253 253 253 253 253
44477-253 253 253 253 253 253 253 253 253 253 253 253
44478-253 253 253 253 253 253 206 206 206 2 2 6
44479- 22 22 22 34 34 34 18 14 6 22 22 22
44480- 26 26 26 18 18 18 6 6 6 2 2 6
44481- 2 2 6 82 82 82 54 54 54 18 18 18
44482- 6 6 6 0 0 0 0 0 0 0 0 0
44483- 0 0 0 0 0 0 0 0 0 0 0 0
44484- 0 0 0 0 0 0 0 0 0 0 0 0
44485- 0 0 0 0 0 0 0 0 0 0 0 0
44486- 0 0 0 0 0 0 0 0 0 0 0 0
44487- 0 0 0 0 0 0 0 0 0 0 0 0
44488- 0 0 0 0 0 0 6 6 6 26 26 26
44489- 62 62 62 106 106 106 74 54 14 185 133 11
44490-210 162 10 121 92 8 6 6 6 62 62 62
44491-238 238 238 253 253 253 253 253 253 253 253 253
44492-253 253 253 253 253 253 253 253 253 253 253 253
44493-253 253 253 253 253 253 231 231 231 246 246 246
44494-253 253 253 253 253 253 253 253 253 253 253 253
44495-253 253 253 253 253 253 253 253 253 253 253 253
44496-253 253 253 253 253 253 253 253 253 253 253 253
44497-253 253 253 253 253 253 253 253 253 253 253 253
44498-253 253 253 253 253 253 158 158 158 18 18 18
44499- 14 14 14 2 2 6 2 2 6 2 2 6
44500- 6 6 6 18 18 18 66 66 66 38 38 38
44501- 6 6 6 94 94 94 50 50 50 18 18 18
44502- 6 6 6 0 0 0 0 0 0 0 0 0
44503- 0 0 0 0 0 0 0 0 0 0 0 0
44504- 0 0 0 0 0 0 0 0 0 0 0 0
44505- 0 0 0 0 0 0 0 0 0 0 0 0
44506- 0 0 0 0 0 0 0 0 0 0 0 0
44507- 0 0 0 0 0 0 0 0 0 6 6 6
44508- 10 10 10 10 10 10 18 18 18 38 38 38
44509- 78 78 78 142 134 106 216 158 10 242 186 14
44510-246 190 14 246 190 14 156 118 10 10 10 10
44511- 90 90 90 238 238 238 253 253 253 253 253 253
44512-253 253 253 253 253 253 253 253 253 253 253 253
44513-253 253 253 253 253 253 231 231 231 250 250 250
44514-253 253 253 253 253 253 253 253 253 253 253 253
44515-253 253 253 253 253 253 253 253 253 253 253 253
44516-253 253 253 253 253 253 253 253 253 253 253 253
44517-253 253 253 253 253 253 253 253 253 246 230 190
44518-238 204 91 238 204 91 181 142 44 37 26 9
44519- 2 2 6 2 2 6 2 2 6 2 2 6
44520- 2 2 6 2 2 6 38 38 38 46 46 46
44521- 26 26 26 106 106 106 54 54 54 18 18 18
44522- 6 6 6 0 0 0 0 0 0 0 0 0
44523- 0 0 0 0 0 0 0 0 0 0 0 0
44524- 0 0 0 0 0 0 0 0 0 0 0 0
44525- 0 0 0 0 0 0 0 0 0 0 0 0
44526- 0 0 0 0 0 0 0 0 0 0 0 0
44527- 0 0 0 6 6 6 14 14 14 22 22 22
44528- 30 30 30 38 38 38 50 50 50 70 70 70
44529-106 106 106 190 142 34 226 170 11 242 186 14
44530-246 190 14 246 190 14 246 190 14 154 114 10
44531- 6 6 6 74 74 74 226 226 226 253 253 253
44532-253 253 253 253 253 253 253 253 253 253 253 253
44533-253 253 253 253 253 253 231 231 231 250 250 250
44534-253 253 253 253 253 253 253 253 253 253 253 253
44535-253 253 253 253 253 253 253 253 253 253 253 253
44536-253 253 253 253 253 253 253 253 253 253 253 253
44537-253 253 253 253 253 253 253 253 253 228 184 62
44538-241 196 14 241 208 19 232 195 16 38 30 10
44539- 2 2 6 2 2 6 2 2 6 2 2 6
44540- 2 2 6 6 6 6 30 30 30 26 26 26
44541-203 166 17 154 142 90 66 66 66 26 26 26
44542- 6 6 6 0 0 0 0 0 0 0 0 0
44543- 0 0 0 0 0 0 0 0 0 0 0 0
44544- 0 0 0 0 0 0 0 0 0 0 0 0
44545- 0 0 0 0 0 0 0 0 0 0 0 0
44546- 0 0 0 0 0 0 0 0 0 0 0 0
44547- 6 6 6 18 18 18 38 38 38 58 58 58
44548- 78 78 78 86 86 86 101 101 101 123 123 123
44549-175 146 61 210 150 10 234 174 13 246 186 14
44550-246 190 14 246 190 14 246 190 14 238 190 10
44551-102 78 10 2 2 6 46 46 46 198 198 198
44552-253 253 253 253 253 253 253 253 253 253 253 253
44553-253 253 253 253 253 253 234 234 234 242 242 242
44554-253 253 253 253 253 253 253 253 253 253 253 253
44555-253 253 253 253 253 253 253 253 253 253 253 253
44556-253 253 253 253 253 253 253 253 253 253 253 253
44557-253 253 253 253 253 253 253 253 253 224 178 62
44558-242 186 14 241 196 14 210 166 10 22 18 6
44559- 2 2 6 2 2 6 2 2 6 2 2 6
44560- 2 2 6 2 2 6 6 6 6 121 92 8
44561-238 202 15 232 195 16 82 82 82 34 34 34
44562- 10 10 10 0 0 0 0 0 0 0 0 0
44563- 0 0 0 0 0 0 0 0 0 0 0 0
44564- 0 0 0 0 0 0 0 0 0 0 0 0
44565- 0 0 0 0 0 0 0 0 0 0 0 0
44566- 0 0 0 0 0 0 0 0 0 0 0 0
44567- 14 14 14 38 38 38 70 70 70 154 122 46
44568-190 142 34 200 144 11 197 138 11 197 138 11
44569-213 154 11 226 170 11 242 186 14 246 190 14
44570-246 190 14 246 190 14 246 190 14 246 190 14
44571-225 175 15 46 32 6 2 2 6 22 22 22
44572-158 158 158 250 250 250 253 253 253 253 253 253
44573-253 253 253 253 253 253 253 253 253 253 253 253
44574-253 253 253 253 253 253 253 253 253 253 253 253
44575-253 253 253 253 253 253 253 253 253 253 253 253
44576-253 253 253 253 253 253 253 253 253 253 253 253
44577-253 253 253 250 250 250 242 242 242 224 178 62
44578-239 182 13 236 186 11 213 154 11 46 32 6
44579- 2 2 6 2 2 6 2 2 6 2 2 6
44580- 2 2 6 2 2 6 61 42 6 225 175 15
44581-238 190 10 236 186 11 112 100 78 42 42 42
44582- 14 14 14 0 0 0 0 0 0 0 0 0
44583- 0 0 0 0 0 0 0 0 0 0 0 0
44584- 0 0 0 0 0 0 0 0 0 0 0 0
44585- 0 0 0 0 0 0 0 0 0 0 0 0
44586- 0 0 0 0 0 0 0 0 0 6 6 6
44587- 22 22 22 54 54 54 154 122 46 213 154 11
44588-226 170 11 230 174 11 226 170 11 226 170 11
44589-236 178 12 242 186 14 246 190 14 246 190 14
44590-246 190 14 246 190 14 246 190 14 246 190 14
44591-241 196 14 184 144 12 10 10 10 2 2 6
44592- 6 6 6 116 116 116 242 242 242 253 253 253
44593-253 253 253 253 253 253 253 253 253 253 253 253
44594-253 253 253 253 253 253 253 253 253 253 253 253
44595-253 253 253 253 253 253 253 253 253 253 253 253
44596-253 253 253 253 253 253 253 253 253 253 253 253
44597-253 253 253 231 231 231 198 198 198 214 170 54
44598-236 178 12 236 178 12 210 150 10 137 92 6
44599- 18 14 6 2 2 6 2 2 6 2 2 6
44600- 6 6 6 70 47 6 200 144 11 236 178 12
44601-239 182 13 239 182 13 124 112 88 58 58 58
44602- 22 22 22 6 6 6 0 0 0 0 0 0
44603- 0 0 0 0 0 0 0 0 0 0 0 0
44604- 0 0 0 0 0 0 0 0 0 0 0 0
44605- 0 0 0 0 0 0 0 0 0 0 0 0
44606- 0 0 0 0 0 0 0 0 0 10 10 10
44607- 30 30 30 70 70 70 180 133 36 226 170 11
44608-239 182 13 242 186 14 242 186 14 246 186 14
44609-246 190 14 246 190 14 246 190 14 246 190 14
44610-246 190 14 246 190 14 246 190 14 246 190 14
44611-246 190 14 232 195 16 98 70 6 2 2 6
44612- 2 2 6 2 2 6 66 66 66 221 221 221
44613-253 253 253 253 253 253 253 253 253 253 253 253
44614-253 253 253 253 253 253 253 253 253 253 253 253
44615-253 253 253 253 253 253 253 253 253 253 253 253
44616-253 253 253 253 253 253 253 253 253 253 253 253
44617-253 253 253 206 206 206 198 198 198 214 166 58
44618-230 174 11 230 174 11 216 158 10 192 133 9
44619-163 110 8 116 81 8 102 78 10 116 81 8
44620-167 114 7 197 138 11 226 170 11 239 182 13
44621-242 186 14 242 186 14 162 146 94 78 78 78
44622- 34 34 34 14 14 14 6 6 6 0 0 0
44623- 0 0 0 0 0 0 0 0 0 0 0 0
44624- 0 0 0 0 0 0 0 0 0 0 0 0
44625- 0 0 0 0 0 0 0 0 0 0 0 0
44626- 0 0 0 0 0 0 0 0 0 6 6 6
44627- 30 30 30 78 78 78 190 142 34 226 170 11
44628-239 182 13 246 190 14 246 190 14 246 190 14
44629-246 190 14 246 190 14 246 190 14 246 190 14
44630-246 190 14 246 190 14 246 190 14 246 190 14
44631-246 190 14 241 196 14 203 166 17 22 18 6
44632- 2 2 6 2 2 6 2 2 6 38 38 38
44633-218 218 218 253 253 253 253 253 253 253 253 253
44634-253 253 253 253 253 253 253 253 253 253 253 253
44635-253 253 253 253 253 253 253 253 253 253 253 253
44636-253 253 253 253 253 253 253 253 253 253 253 253
44637-250 250 250 206 206 206 198 198 198 202 162 69
44638-226 170 11 236 178 12 224 166 10 210 150 10
44639-200 144 11 197 138 11 192 133 9 197 138 11
44640-210 150 10 226 170 11 242 186 14 246 190 14
44641-246 190 14 246 186 14 225 175 15 124 112 88
44642- 62 62 62 30 30 30 14 14 14 6 6 6
44643- 0 0 0 0 0 0 0 0 0 0 0 0
44644- 0 0 0 0 0 0 0 0 0 0 0 0
44645- 0 0 0 0 0 0 0 0 0 0 0 0
44646- 0 0 0 0 0 0 0 0 0 10 10 10
44647- 30 30 30 78 78 78 174 135 50 224 166 10
44648-239 182 13 246 190 14 246 190 14 246 190 14
44649-246 190 14 246 190 14 246 190 14 246 190 14
44650-246 190 14 246 190 14 246 190 14 246 190 14
44651-246 190 14 246 190 14 241 196 14 139 102 15
44652- 2 2 6 2 2 6 2 2 6 2 2 6
44653- 78 78 78 250 250 250 253 253 253 253 253 253
44654-253 253 253 253 253 253 253 253 253 253 253 253
44655-253 253 253 253 253 253 253 253 253 253 253 253
44656-253 253 253 253 253 253 253 253 253 253 253 253
44657-250 250 250 214 214 214 198 198 198 190 150 46
44658-219 162 10 236 178 12 234 174 13 224 166 10
44659-216 158 10 213 154 11 213 154 11 216 158 10
44660-226 170 11 239 182 13 246 190 14 246 190 14
44661-246 190 14 246 190 14 242 186 14 206 162 42
44662-101 101 101 58 58 58 30 30 30 14 14 14
44663- 6 6 6 0 0 0 0 0 0 0 0 0
44664- 0 0 0 0 0 0 0 0 0 0 0 0
44665- 0 0 0 0 0 0 0 0 0 0 0 0
44666- 0 0 0 0 0 0 0 0 0 10 10 10
44667- 30 30 30 74 74 74 174 135 50 216 158 10
44668-236 178 12 246 190 14 246 190 14 246 190 14
44669-246 190 14 246 190 14 246 190 14 246 190 14
44670-246 190 14 246 190 14 246 190 14 246 190 14
44671-246 190 14 246 190 14 241 196 14 226 184 13
44672- 61 42 6 2 2 6 2 2 6 2 2 6
44673- 22 22 22 238 238 238 253 253 253 253 253 253
44674-253 253 253 253 253 253 253 253 253 253 253 253
44675-253 253 253 253 253 253 253 253 253 253 253 253
44676-253 253 253 253 253 253 253 253 253 253 253 253
44677-253 253 253 226 226 226 187 187 187 180 133 36
44678-216 158 10 236 178 12 239 182 13 236 178 12
44679-230 174 11 226 170 11 226 170 11 230 174 11
44680-236 178 12 242 186 14 246 190 14 246 190 14
44681-246 190 14 246 190 14 246 186 14 239 182 13
44682-206 162 42 106 106 106 66 66 66 34 34 34
44683- 14 14 14 6 6 6 0 0 0 0 0 0
44684- 0 0 0 0 0 0 0 0 0 0 0 0
44685- 0 0 0 0 0 0 0 0 0 0 0 0
44686- 0 0 0 0 0 0 0 0 0 6 6 6
44687- 26 26 26 70 70 70 163 133 67 213 154 11
44688-236 178 12 246 190 14 246 190 14 246 190 14
44689-246 190 14 246 190 14 246 190 14 246 190 14
44690-246 190 14 246 190 14 246 190 14 246 190 14
44691-246 190 14 246 190 14 246 190 14 241 196 14
44692-190 146 13 18 14 6 2 2 6 2 2 6
44693- 46 46 46 246 246 246 253 253 253 253 253 253
44694-253 253 253 253 253 253 253 253 253 253 253 253
44695-253 253 253 253 253 253 253 253 253 253 253 253
44696-253 253 253 253 253 253 253 253 253 253 253 253
44697-253 253 253 221 221 221 86 86 86 156 107 11
44698-216 158 10 236 178 12 242 186 14 246 186 14
44699-242 186 14 239 182 13 239 182 13 242 186 14
44700-242 186 14 246 186 14 246 190 14 246 190 14
44701-246 190 14 246 190 14 246 190 14 246 190 14
44702-242 186 14 225 175 15 142 122 72 66 66 66
44703- 30 30 30 10 10 10 0 0 0 0 0 0
44704- 0 0 0 0 0 0 0 0 0 0 0 0
44705- 0 0 0 0 0 0 0 0 0 0 0 0
44706- 0 0 0 0 0 0 0 0 0 6 6 6
44707- 26 26 26 70 70 70 163 133 67 210 150 10
44708-236 178 12 246 190 14 246 190 14 246 190 14
44709-246 190 14 246 190 14 246 190 14 246 190 14
44710-246 190 14 246 190 14 246 190 14 246 190 14
44711-246 190 14 246 190 14 246 190 14 246 190 14
44712-232 195 16 121 92 8 34 34 34 106 106 106
44713-221 221 221 253 253 253 253 253 253 253 253 253
44714-253 253 253 253 253 253 253 253 253 253 253 253
44715-253 253 253 253 253 253 253 253 253 253 253 253
44716-253 253 253 253 253 253 253 253 253 253 253 253
44717-242 242 242 82 82 82 18 14 6 163 110 8
44718-216 158 10 236 178 12 242 186 14 246 190 14
44719-246 190 14 246 190 14 246 190 14 246 190 14
44720-246 190 14 246 190 14 246 190 14 246 190 14
44721-246 190 14 246 190 14 246 190 14 246 190 14
44722-246 190 14 246 190 14 242 186 14 163 133 67
44723- 46 46 46 18 18 18 6 6 6 0 0 0
44724- 0 0 0 0 0 0 0 0 0 0 0 0
44725- 0 0 0 0 0 0 0 0 0 0 0 0
44726- 0 0 0 0 0 0 0 0 0 10 10 10
44727- 30 30 30 78 78 78 163 133 67 210 150 10
44728-236 178 12 246 186 14 246 190 14 246 190 14
44729-246 190 14 246 190 14 246 190 14 246 190 14
44730-246 190 14 246 190 14 246 190 14 246 190 14
44731-246 190 14 246 190 14 246 190 14 246 190 14
44732-241 196 14 215 174 15 190 178 144 253 253 253
44733-253 253 253 253 253 253 253 253 253 253 253 253
44734-253 253 253 253 253 253 253 253 253 253 253 253
44735-253 253 253 253 253 253 253 253 253 253 253 253
44736-253 253 253 253 253 253 253 253 253 218 218 218
44737- 58 58 58 2 2 6 22 18 6 167 114 7
44738-216 158 10 236 178 12 246 186 14 246 190 14
44739-246 190 14 246 190 14 246 190 14 246 190 14
44740-246 190 14 246 190 14 246 190 14 246 190 14
44741-246 190 14 246 190 14 246 190 14 246 190 14
44742-246 190 14 246 186 14 242 186 14 190 150 46
44743- 54 54 54 22 22 22 6 6 6 0 0 0
44744- 0 0 0 0 0 0 0 0 0 0 0 0
44745- 0 0 0 0 0 0 0 0 0 0 0 0
44746- 0 0 0 0 0 0 0 0 0 14 14 14
44747- 38 38 38 86 86 86 180 133 36 213 154 11
44748-236 178 12 246 186 14 246 190 14 246 190 14
44749-246 190 14 246 190 14 246 190 14 246 190 14
44750-246 190 14 246 190 14 246 190 14 246 190 14
44751-246 190 14 246 190 14 246 190 14 246 190 14
44752-246 190 14 232 195 16 190 146 13 214 214 214
44753-253 253 253 253 253 253 253 253 253 253 253 253
44754-253 253 253 253 253 253 253 253 253 253 253 253
44755-253 253 253 253 253 253 253 253 253 253 253 253
44756-253 253 253 250 250 250 170 170 170 26 26 26
44757- 2 2 6 2 2 6 37 26 9 163 110 8
44758-219 162 10 239 182 13 246 186 14 246 190 14
44759-246 190 14 246 190 14 246 190 14 246 190 14
44760-246 190 14 246 190 14 246 190 14 246 190 14
44761-246 190 14 246 190 14 246 190 14 246 190 14
44762-246 186 14 236 178 12 224 166 10 142 122 72
44763- 46 46 46 18 18 18 6 6 6 0 0 0
44764- 0 0 0 0 0 0 0 0 0 0 0 0
44765- 0 0 0 0 0 0 0 0 0 0 0 0
44766- 0 0 0 0 0 0 6 6 6 18 18 18
44767- 50 50 50 109 106 95 192 133 9 224 166 10
44768-242 186 14 246 190 14 246 190 14 246 190 14
44769-246 190 14 246 190 14 246 190 14 246 190 14
44770-246 190 14 246 190 14 246 190 14 246 190 14
44771-246 190 14 246 190 14 246 190 14 246 190 14
44772-242 186 14 226 184 13 210 162 10 142 110 46
44773-226 226 226 253 253 253 253 253 253 253 253 253
44774-253 253 253 253 253 253 253 253 253 253 253 253
44775-253 253 253 253 253 253 253 253 253 253 253 253
44776-198 198 198 66 66 66 2 2 6 2 2 6
44777- 2 2 6 2 2 6 50 34 6 156 107 11
44778-219 162 10 239 182 13 246 186 14 246 190 14
44779-246 190 14 246 190 14 246 190 14 246 190 14
44780-246 190 14 246 190 14 246 190 14 246 190 14
44781-246 190 14 246 190 14 246 190 14 242 186 14
44782-234 174 13 213 154 11 154 122 46 66 66 66
44783- 30 30 30 10 10 10 0 0 0 0 0 0
44784- 0 0 0 0 0 0 0 0 0 0 0 0
44785- 0 0 0 0 0 0 0 0 0 0 0 0
44786- 0 0 0 0 0 0 6 6 6 22 22 22
44787- 58 58 58 154 121 60 206 145 10 234 174 13
44788-242 186 14 246 186 14 246 190 14 246 190 14
44789-246 190 14 246 190 14 246 190 14 246 190 14
44790-246 190 14 246 190 14 246 190 14 246 190 14
44791-246 190 14 246 190 14 246 190 14 246 190 14
44792-246 186 14 236 178 12 210 162 10 163 110 8
44793- 61 42 6 138 138 138 218 218 218 250 250 250
44794-253 253 253 253 253 253 253 253 253 250 250 250
44795-242 242 242 210 210 210 144 144 144 66 66 66
44796- 6 6 6 2 2 6 2 2 6 2 2 6
44797- 2 2 6 2 2 6 61 42 6 163 110 8
44798-216 158 10 236 178 12 246 190 14 246 190 14
44799-246 190 14 246 190 14 246 190 14 246 190 14
44800-246 190 14 246 190 14 246 190 14 246 190 14
44801-246 190 14 239 182 13 230 174 11 216 158 10
44802-190 142 34 124 112 88 70 70 70 38 38 38
44803- 18 18 18 6 6 6 0 0 0 0 0 0
44804- 0 0 0 0 0 0 0 0 0 0 0 0
44805- 0 0 0 0 0 0 0 0 0 0 0 0
44806- 0 0 0 0 0 0 6 6 6 22 22 22
44807- 62 62 62 168 124 44 206 145 10 224 166 10
44808-236 178 12 239 182 13 242 186 14 242 186 14
44809-246 186 14 246 190 14 246 190 14 246 190 14
44810-246 190 14 246 190 14 246 190 14 246 190 14
44811-246 190 14 246 190 14 246 190 14 246 190 14
44812-246 190 14 236 178 12 216 158 10 175 118 6
44813- 80 54 7 2 2 6 6 6 6 30 30 30
44814- 54 54 54 62 62 62 50 50 50 38 38 38
44815- 14 14 14 2 2 6 2 2 6 2 2 6
44816- 2 2 6 2 2 6 2 2 6 2 2 6
44817- 2 2 6 6 6 6 80 54 7 167 114 7
44818-213 154 11 236 178 12 246 190 14 246 190 14
44819-246 190 14 246 190 14 246 190 14 246 190 14
44820-246 190 14 242 186 14 239 182 13 239 182 13
44821-230 174 11 210 150 10 174 135 50 124 112 88
44822- 82 82 82 54 54 54 34 34 34 18 18 18
44823- 6 6 6 0 0 0 0 0 0 0 0 0
44824- 0 0 0 0 0 0 0 0 0 0 0 0
44825- 0 0 0 0 0 0 0 0 0 0 0 0
44826- 0 0 0 0 0 0 6 6 6 18 18 18
44827- 50 50 50 158 118 36 192 133 9 200 144 11
44828-216 158 10 219 162 10 224 166 10 226 170 11
44829-230 174 11 236 178 12 239 182 13 239 182 13
44830-242 186 14 246 186 14 246 190 14 246 190 14
44831-246 190 14 246 190 14 246 190 14 246 190 14
44832-246 186 14 230 174 11 210 150 10 163 110 8
44833-104 69 6 10 10 10 2 2 6 2 2 6
44834- 2 2 6 2 2 6 2 2 6 2 2 6
44835- 2 2 6 2 2 6 2 2 6 2 2 6
44836- 2 2 6 2 2 6 2 2 6 2 2 6
44837- 2 2 6 6 6 6 91 60 6 167 114 7
44838-206 145 10 230 174 11 242 186 14 246 190 14
44839-246 190 14 246 190 14 246 186 14 242 186 14
44840-239 182 13 230 174 11 224 166 10 213 154 11
44841-180 133 36 124 112 88 86 86 86 58 58 58
44842- 38 38 38 22 22 22 10 10 10 6 6 6
44843- 0 0 0 0 0 0 0 0 0 0 0 0
44844- 0 0 0 0 0 0 0 0 0 0 0 0
44845- 0 0 0 0 0 0 0 0 0 0 0 0
44846- 0 0 0 0 0 0 0 0 0 14 14 14
44847- 34 34 34 70 70 70 138 110 50 158 118 36
44848-167 114 7 180 123 7 192 133 9 197 138 11
44849-200 144 11 206 145 10 213 154 11 219 162 10
44850-224 166 10 230 174 11 239 182 13 242 186 14
44851-246 186 14 246 186 14 246 186 14 246 186 14
44852-239 182 13 216 158 10 185 133 11 152 99 6
44853-104 69 6 18 14 6 2 2 6 2 2 6
44854- 2 2 6 2 2 6 2 2 6 2 2 6
44855- 2 2 6 2 2 6 2 2 6 2 2 6
44856- 2 2 6 2 2 6 2 2 6 2 2 6
44857- 2 2 6 6 6 6 80 54 7 152 99 6
44858-192 133 9 219 162 10 236 178 12 239 182 13
44859-246 186 14 242 186 14 239 182 13 236 178 12
44860-224 166 10 206 145 10 192 133 9 154 121 60
44861- 94 94 94 62 62 62 42 42 42 22 22 22
44862- 14 14 14 6 6 6 0 0 0 0 0 0
44863- 0 0 0 0 0 0 0 0 0 0 0 0
44864- 0 0 0 0 0 0 0 0 0 0 0 0
44865- 0 0 0 0 0 0 0 0 0 0 0 0
44866- 0 0 0 0 0 0 0 0 0 6 6 6
44867- 18 18 18 34 34 34 58 58 58 78 78 78
44868-101 98 89 124 112 88 142 110 46 156 107 11
44869-163 110 8 167 114 7 175 118 6 180 123 7
44870-185 133 11 197 138 11 210 150 10 219 162 10
44871-226 170 11 236 178 12 236 178 12 234 174 13
44872-219 162 10 197 138 11 163 110 8 130 83 6
44873- 91 60 6 10 10 10 2 2 6 2 2 6
44874- 18 18 18 38 38 38 38 38 38 38 38 38
44875- 38 38 38 38 38 38 38 38 38 38 38 38
44876- 38 38 38 38 38 38 26 26 26 2 2 6
44877- 2 2 6 6 6 6 70 47 6 137 92 6
44878-175 118 6 200 144 11 219 162 10 230 174 11
44879-234 174 13 230 174 11 219 162 10 210 150 10
44880-192 133 9 163 110 8 124 112 88 82 82 82
44881- 50 50 50 30 30 30 14 14 14 6 6 6
44882- 0 0 0 0 0 0 0 0 0 0 0 0
44883- 0 0 0 0 0 0 0 0 0 0 0 0
44884- 0 0 0 0 0 0 0 0 0 0 0 0
44885- 0 0 0 0 0 0 0 0 0 0 0 0
44886- 0 0 0 0 0 0 0 0 0 0 0 0
44887- 6 6 6 14 14 14 22 22 22 34 34 34
44888- 42 42 42 58 58 58 74 74 74 86 86 86
44889-101 98 89 122 102 70 130 98 46 121 87 25
44890-137 92 6 152 99 6 163 110 8 180 123 7
44891-185 133 11 197 138 11 206 145 10 200 144 11
44892-180 123 7 156 107 11 130 83 6 104 69 6
44893- 50 34 6 54 54 54 110 110 110 101 98 89
44894- 86 86 86 82 82 82 78 78 78 78 78 78
44895- 78 78 78 78 78 78 78 78 78 78 78 78
44896- 78 78 78 82 82 82 86 86 86 94 94 94
44897-106 106 106 101 101 101 86 66 34 124 80 6
44898-156 107 11 180 123 7 192 133 9 200 144 11
44899-206 145 10 200 144 11 192 133 9 175 118 6
44900-139 102 15 109 106 95 70 70 70 42 42 42
44901- 22 22 22 10 10 10 0 0 0 0 0 0
44902- 0 0 0 0 0 0 0 0 0 0 0 0
44903- 0 0 0 0 0 0 0 0 0 0 0 0
44904- 0 0 0 0 0 0 0 0 0 0 0 0
44905- 0 0 0 0 0 0 0 0 0 0 0 0
44906- 0 0 0 0 0 0 0 0 0 0 0 0
44907- 0 0 0 0 0 0 6 6 6 10 10 10
44908- 14 14 14 22 22 22 30 30 30 38 38 38
44909- 50 50 50 62 62 62 74 74 74 90 90 90
44910-101 98 89 112 100 78 121 87 25 124 80 6
44911-137 92 6 152 99 6 152 99 6 152 99 6
44912-138 86 6 124 80 6 98 70 6 86 66 30
44913-101 98 89 82 82 82 58 58 58 46 46 46
44914- 38 38 38 34 34 34 34 34 34 34 34 34
44915- 34 34 34 34 34 34 34 34 34 34 34 34
44916- 34 34 34 34 34 34 38 38 38 42 42 42
44917- 54 54 54 82 82 82 94 86 76 91 60 6
44918-134 86 6 156 107 11 167 114 7 175 118 6
44919-175 118 6 167 114 7 152 99 6 121 87 25
44920-101 98 89 62 62 62 34 34 34 18 18 18
44921- 6 6 6 0 0 0 0 0 0 0 0 0
44922- 0 0 0 0 0 0 0 0 0 0 0 0
44923- 0 0 0 0 0 0 0 0 0 0 0 0
44924- 0 0 0 0 0 0 0 0 0 0 0 0
44925- 0 0 0 0 0 0 0 0 0 0 0 0
44926- 0 0 0 0 0 0 0 0 0 0 0 0
44927- 0 0 0 0 0 0 0 0 0 0 0 0
44928- 0 0 0 6 6 6 6 6 6 10 10 10
44929- 18 18 18 22 22 22 30 30 30 42 42 42
44930- 50 50 50 66 66 66 86 86 86 101 98 89
44931-106 86 58 98 70 6 104 69 6 104 69 6
44932-104 69 6 91 60 6 82 62 34 90 90 90
44933- 62 62 62 38 38 38 22 22 22 14 14 14
44934- 10 10 10 10 10 10 10 10 10 10 10 10
44935- 10 10 10 10 10 10 6 6 6 10 10 10
44936- 10 10 10 10 10 10 10 10 10 14 14 14
44937- 22 22 22 42 42 42 70 70 70 89 81 66
44938- 80 54 7 104 69 6 124 80 6 137 92 6
44939-134 86 6 116 81 8 100 82 52 86 86 86
44940- 58 58 58 30 30 30 14 14 14 6 6 6
44941- 0 0 0 0 0 0 0 0 0 0 0 0
44942- 0 0 0 0 0 0 0 0 0 0 0 0
44943- 0 0 0 0 0 0 0 0 0 0 0 0
44944- 0 0 0 0 0 0 0 0 0 0 0 0
44945- 0 0 0 0 0 0 0 0 0 0 0 0
44946- 0 0 0 0 0 0 0 0 0 0 0 0
44947- 0 0 0 0 0 0 0 0 0 0 0 0
44948- 0 0 0 0 0 0 0 0 0 0 0 0
44949- 0 0 0 6 6 6 10 10 10 14 14 14
44950- 18 18 18 26 26 26 38 38 38 54 54 54
44951- 70 70 70 86 86 86 94 86 76 89 81 66
44952- 89 81 66 86 86 86 74 74 74 50 50 50
44953- 30 30 30 14 14 14 6 6 6 0 0 0
44954- 0 0 0 0 0 0 0 0 0 0 0 0
44955- 0 0 0 0 0 0 0 0 0 0 0 0
44956- 0 0 0 0 0 0 0 0 0 0 0 0
44957- 6 6 6 18 18 18 34 34 34 58 58 58
44958- 82 82 82 89 81 66 89 81 66 89 81 66
44959- 94 86 66 94 86 76 74 74 74 50 50 50
44960- 26 26 26 14 14 14 6 6 6 0 0 0
44961- 0 0 0 0 0 0 0 0 0 0 0 0
44962- 0 0 0 0 0 0 0 0 0 0 0 0
44963- 0 0 0 0 0 0 0 0 0 0 0 0
44964- 0 0 0 0 0 0 0 0 0 0 0 0
44965- 0 0 0 0 0 0 0 0 0 0 0 0
44966- 0 0 0 0 0 0 0 0 0 0 0 0
44967- 0 0 0 0 0 0 0 0 0 0 0 0
44968- 0 0 0 0 0 0 0 0 0 0 0 0
44969- 0 0 0 0 0 0 0 0 0 0 0 0
44970- 6 6 6 6 6 6 14 14 14 18 18 18
44971- 30 30 30 38 38 38 46 46 46 54 54 54
44972- 50 50 50 42 42 42 30 30 30 18 18 18
44973- 10 10 10 0 0 0 0 0 0 0 0 0
44974- 0 0 0 0 0 0 0 0 0 0 0 0
44975- 0 0 0 0 0 0 0 0 0 0 0 0
44976- 0 0 0 0 0 0 0 0 0 0 0 0
44977- 0 0 0 6 6 6 14 14 14 26 26 26
44978- 38 38 38 50 50 50 58 58 58 58 58 58
44979- 54 54 54 42 42 42 30 30 30 18 18 18
44980- 10 10 10 0 0 0 0 0 0 0 0 0
44981- 0 0 0 0 0 0 0 0 0 0 0 0
44982- 0 0 0 0 0 0 0 0 0 0 0 0
44983- 0 0 0 0 0 0 0 0 0 0 0 0
44984- 0 0 0 0 0 0 0 0 0 0 0 0
44985- 0 0 0 0 0 0 0 0 0 0 0 0
44986- 0 0 0 0 0 0 0 0 0 0 0 0
44987- 0 0 0 0 0 0 0 0 0 0 0 0
44988- 0 0 0 0 0 0 0 0 0 0 0 0
44989- 0 0 0 0 0 0 0 0 0 0 0 0
44990- 0 0 0 0 0 0 0 0 0 6 6 6
44991- 6 6 6 10 10 10 14 14 14 18 18 18
44992- 18 18 18 14 14 14 10 10 10 6 6 6
44993- 0 0 0 0 0 0 0 0 0 0 0 0
44994- 0 0 0 0 0 0 0 0 0 0 0 0
44995- 0 0 0 0 0 0 0 0 0 0 0 0
44996- 0 0 0 0 0 0 0 0 0 0 0 0
44997- 0 0 0 0 0 0 0 0 0 6 6 6
44998- 14 14 14 18 18 18 22 22 22 22 22 22
44999- 18 18 18 14 14 14 10 10 10 6 6 6
45000- 0 0 0 0 0 0 0 0 0 0 0 0
45001- 0 0 0 0 0 0 0 0 0 0 0 0
45002- 0 0 0 0 0 0 0 0 0 0 0 0
45003- 0 0 0 0 0 0 0 0 0 0 0 0
45004- 0 0 0 0 0 0 0 0 0 0 0 0
45005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45016+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45017+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45018+4 4 4 4 4 4
45019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45026+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45030+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45031+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45032+4 4 4 4 4 4
45033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45040+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45044+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45045+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45046+4 4 4 4 4 4
45047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45054+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45059+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45060+4 4 4 4 4 4
45061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45068+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45073+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45074+4 4 4 4 4 4
45075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45082+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45088+4 4 4 4 4 4
45089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45093+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
45094+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
45095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45096+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45098+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
45099+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45100+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
45101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45102+4 4 4 4 4 4
45103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45107+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
45108+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
45109+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45110+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45112+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
45113+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
45114+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
45115+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45116+4 4 4 4 4 4
45117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45121+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
45122+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
45123+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45124+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45125+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45126+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
45127+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
45128+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
45129+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
45130+4 4 4 4 4 4
45131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45132+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45133+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45134+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
45135+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
45136+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
45137+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
45138+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45139+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45140+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
45141+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
45142+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
45143+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
45144+4 4 4 4 4 4
45145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45146+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45147+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45148+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
45149+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
45150+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
45151+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
45152+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45153+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
45154+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
45155+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
45156+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
45157+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
45158+4 4 4 4 4 4
45159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45160+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45161+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45162+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
45163+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
45164+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
45165+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
45166+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45167+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
45168+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
45169+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
45170+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
45171+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
45172+4 4 4 4 4 4
45173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45174+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45175+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
45176+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
45177+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
45178+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
45179+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
45180+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
45181+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
45182+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
45183+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
45184+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
45185+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
45186+4 4 4 4 4 4
45187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45188+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45189+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
45190+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
45191+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
45192+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
45193+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
45194+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
45195+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
45196+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
45197+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
45198+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
45199+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
45200+4 4 4 4 4 4
45201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45203+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
45204+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
45205+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
45206+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
45207+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
45208+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
45209+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
45210+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
45211+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
45212+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
45213+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45214+4 4 4 4 4 4
45215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45217+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
45218+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
45219+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
45220+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
45221+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
45222+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
45223+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
45224+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
45225+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
45226+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
45227+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
45228+4 4 4 4 4 4
45229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45230+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
45231+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
45232+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
45233+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
45234+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
45235+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
45236+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
45237+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
45238+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
45239+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
45240+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
45241+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
45242+4 4 4 4 4 4
45243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45244+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
45245+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
45246+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
45247+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45248+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
45249+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
45250+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
45251+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
45252+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
45253+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
45254+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
45255+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
45256+0 0 0 4 4 4
45257+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45258+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
45259+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
45260+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
45261+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
45262+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
45263+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
45264+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
45265+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
45266+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
45267+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
45268+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
45269+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
45270+2 0 0 0 0 0
45271+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
45272+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
45273+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
45274+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
45275+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
45276+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
45277+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
45278+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
45279+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
45280+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
45281+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
45282+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
45283+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
45284+37 38 37 0 0 0
45285+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45286+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45287+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45288+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45289+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45290+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45291+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45292+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45293+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45294+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45295+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45296+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45297+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45298+85 115 134 4 0 0
45299+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45300+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45301+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45302+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45303+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45304+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45305+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45306+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45307+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45308+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45309+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45310+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45311+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45312+60 73 81 4 0 0
45313+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45314+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45315+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45316+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45317+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45318+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45319+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45320+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45321+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45322+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45323+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45324+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45325+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45326+16 19 21 4 0 0
45327+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45328+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45329+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45330+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45331+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45332+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45333+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45334+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45335+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45336+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45337+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45338+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45339+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45340+4 0 0 4 3 3
45341+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45342+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45343+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45345+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45346+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45347+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45348+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45349+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45350+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45351+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45352+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45353+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45354+3 2 2 4 4 4
45355+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45356+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45357+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45358+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45359+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45360+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45361+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45362+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45363+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45364+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45365+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45366+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45367+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45368+4 4 4 4 4 4
45369+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45370+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45371+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45372+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45373+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45374+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45375+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45376+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45377+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45378+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45379+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45380+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45381+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45382+4 4 4 4 4 4
45383+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45384+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45385+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45386+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45387+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45388+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45389+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45390+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45391+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45392+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45393+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45394+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45395+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45396+5 5 5 5 5 5
45397+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45398+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45399+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45400+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45401+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45402+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45403+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45404+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45405+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45406+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45407+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45408+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45409+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45410+5 5 5 4 4 4
45411+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45412+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45413+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45414+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45415+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45416+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45417+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45418+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45419+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45420+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45421+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45422+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45423+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45424+4 4 4 4 4 4
45425+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45426+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45427+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45428+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45429+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45430+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45431+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45432+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45433+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45434+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45435+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45436+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45437+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45438+4 4 4 4 4 4
45439+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45440+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45441+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45442+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45443+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45444+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45445+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45446+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45447+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45448+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45449+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45451+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45452+4 4 4 4 4 4
45453+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45454+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45455+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45456+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45457+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45458+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45459+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45460+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45461+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45462+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45463+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45465+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45466+4 4 4 4 4 4
45467+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45468+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45469+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45470+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45471+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45472+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45473+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45474+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45475+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45476+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45477+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45479+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45480+4 4 4 4 4 4
45481+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45482+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
45483+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
45484+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
45485+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45486+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
45487+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
45488+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
45489+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
45490+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
45491+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
45492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45493+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45494+4 4 4 4 4 4
45495+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
45496+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
45497+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
45498+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
45499+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45500+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
45501+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
45502+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
45503+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
45504+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
45505+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
45506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45507+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45508+4 4 4 4 4 4
45509+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
45510+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
45511+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
45512+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45513+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45514+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45515+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45516+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45517+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45518+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45519+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45522+4 4 4 4 4 4
45523+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45524+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45525+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45526+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45527+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45528+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45529+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45530+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45531+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45532+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45533+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45536+4 4 4 4 4 4
45537+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45538+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45539+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45540+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45541+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45542+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45543+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45544+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45545+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45546+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45547+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45550+4 4 4 4 4 4
45551+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45552+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45553+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45554+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45555+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45556+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45557+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45558+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45559+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45560+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45561+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45564+4 4 4 4 4 4
45565+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45566+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45567+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45568+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45569+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45570+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45571+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45572+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45573+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45574+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45575+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45578+4 4 4 4 4 4
45579+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45580+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45581+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45582+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45583+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45584+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45585+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45586+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45587+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45588+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45589+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45592+4 4 4 4 4 4
45593+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45594+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45595+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45596+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45597+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45598+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45599+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45600+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45601+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45602+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45603+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45606+4 4 4 4 4 4
45607+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45608+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45609+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45610+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45611+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45612+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45613+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45614+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45615+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45616+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45617+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45620+4 4 4 4 4 4
45621+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45622+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45623+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45624+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45625+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45626+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45627+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45628+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45629+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45630+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45631+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45634+4 4 4 4 4 4
45635+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45636+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45637+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45638+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45639+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45640+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45641+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45642+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45643+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45644+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45645+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45648+4 4 4 4 4 4
45649+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45650+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45651+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45652+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45653+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45654+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45655+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45656+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45657+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45658+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45659+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45662+4 4 4 4 4 4
45663+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45664+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45665+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45666+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45667+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45668+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45669+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45670+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45671+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45672+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45673+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45676+4 4 4 4 4 4
45677+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45678+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45679+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45680+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45681+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45682+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45683+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45684+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45685+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45686+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45687+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45690+4 4 4 4 4 4
45691+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45692+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45693+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45694+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45695+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45696+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45697+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45698+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45699+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45700+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45701+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45704+4 4 4 4 4 4
45705+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45706+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45707+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45708+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45709+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45710+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45711+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45712+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45713+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45714+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45715+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45718+4 4 4 4 4 4
45719+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45720+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45721+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45722+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45723+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45724+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45725+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45726+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45727+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45728+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45729+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45732+4 4 4 4 4 4
45733+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45734+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45735+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45736+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45737+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45738+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45739+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45740+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45741+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45742+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45743+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45746+4 4 4 4 4 4
45747+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45748+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45749+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45750+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45751+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45752+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45753+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45754+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45755+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45756+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45757+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45760+4 4 4 4 4 4
45761+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45762+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45763+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45764+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45765+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45766+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45767+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45768+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45769+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45770+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45771+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45774+4 4 4 4 4 4
45775+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45776+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45777+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45778+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45779+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45780+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45781+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45782+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45783+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45784+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45785+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45788+4 4 4 4 4 4
45789+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45790+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45791+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45792+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45793+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45794+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45795+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45796+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45797+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45798+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45799+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45802+4 4 4 4 4 4
45803+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45804+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45805+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45806+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45807+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45808+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45809+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45810+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45811+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45812+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45813+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45816+4 4 4 4 4 4
45817+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45818+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45819+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45820+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45821+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45822+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45823+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45824+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45825+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45826+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45830+4 4 4 4 4 4
45831+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45832+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45833+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45834+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45835+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45836+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45837+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45838+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45839+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45840+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45844+4 4 4 4 4 4
45845+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45846+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45847+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45848+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45849+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45850+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45851+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45852+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45853+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45854+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45858+4 4 4 4 4 4
45859+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45860+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45861+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45862+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45863+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45864+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45865+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45866+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45867+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45868+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45872+4 4 4 4 4 4
45873+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45874+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45875+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45876+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45877+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45878+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45879+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45880+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45881+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45886+4 4 4 4 4 4
45887+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45888+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45889+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45890+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45891+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45892+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45893+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45894+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45895+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45900+4 4 4 4 4 4
45901+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45902+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45903+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45904+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45905+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45906+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45907+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45908+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45909+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45914+4 4 4 4 4 4
45915+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45916+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45917+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45918+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45919+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45920+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45921+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45922+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45923+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45928+4 4 4 4 4 4
45929+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45930+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45931+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45932+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45933+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45934+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45935+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45936+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45937+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45942+4 4 4 4 4 4
45943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45944+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45945+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45946+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45947+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45948+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45949+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45950+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45951+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45954+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45956+4 4 4 4 4 4
45957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45958+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45959+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45960+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45961+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45962+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45963+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45964+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45965+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45968+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45970+4 4 4 4 4 4
45971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45972+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45973+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45974+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45975+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45976+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45977+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45978+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45979+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45982+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45984+4 4 4 4 4 4
45985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45986+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45987+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45988+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45989+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45990+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45991+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45992+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45993+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45996+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45997+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45998+4 4 4 4 4 4
45999+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46000+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46002+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46003+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
46004+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
46005+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
46006+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46007+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46010+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46011+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46012+4 4 4 4 4 4
46013+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46014+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46016+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
46017+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
46018+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
46019+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
46020+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46021+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46024+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46025+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46026+4 4 4 4 4 4
46027+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46030+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
46031+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46032+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46033+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
46034+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46035+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46038+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46039+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46040+4 4 4 4 4 4
46041+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46044+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
46045+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
46046+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
46047+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
46048+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46049+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46052+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46053+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46054+4 4 4 4 4 4
46055+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46058+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46059+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
46060+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46061+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46062+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46063+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46066+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46067+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46068+4 4 4 4 4 4
46069+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46072+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46073+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
46074+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
46075+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46076+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46080+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46081+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46082+4 4 4 4 4 4
46083+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46086+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46087+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
46088+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
46089+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46090+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46091+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46094+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46095+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46096+4 4 4 4 4 4
46097+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46100+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46101+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
46102+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
46103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46104+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46105+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46108+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46109+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46110+4 4 4 4 4 4
46111+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46114+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46115+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46116+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
46117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46118+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46119+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46122+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46124+4 4 4 4 4 4
46125diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
46126index 443e3c8..c443d6a 100644
46127--- a/drivers/video/nvidia/nv_backlight.c
46128+++ b/drivers/video/nvidia/nv_backlight.c
46129@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
46130 return bd->props.brightness;
46131 }
46132
46133-static struct backlight_ops nvidia_bl_ops = {
46134+static const struct backlight_ops nvidia_bl_ops = {
46135 .get_brightness = nvidia_bl_get_brightness,
46136 .update_status = nvidia_bl_update_status,
46137 };
46138diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
46139index d94c57f..912984c 100644
46140--- a/drivers/video/riva/fbdev.c
46141+++ b/drivers/video/riva/fbdev.c
46142@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
46143 return bd->props.brightness;
46144 }
46145
46146-static struct backlight_ops riva_bl_ops = {
46147+static const struct backlight_ops riva_bl_ops = {
46148 .get_brightness = riva_bl_get_brightness,
46149 .update_status = riva_bl_update_status,
46150 };
46151diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
46152index 54fbb29..2c108fc 100644
46153--- a/drivers/video/uvesafb.c
46154+++ b/drivers/video/uvesafb.c
46155@@ -18,6 +18,7 @@
46156 #include <linux/fb.h>
46157 #include <linux/io.h>
46158 #include <linux/mutex.h>
46159+#include <linux/moduleloader.h>
46160 #include <video/edid.h>
46161 #include <video/uvesafb.h>
46162 #ifdef CONFIG_X86
46163@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
46164 NULL,
46165 };
46166
46167- return call_usermodehelper(v86d_path, argv, envp, 1);
46168+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
46169 }
46170
46171 /*
46172@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
46173 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
46174 par->pmi_setpal = par->ypan = 0;
46175 } else {
46176+
46177+#ifdef CONFIG_PAX_KERNEXEC
46178+#ifdef CONFIG_MODULES
46179+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
46180+#endif
46181+ if (!par->pmi_code) {
46182+ par->pmi_setpal = par->ypan = 0;
46183+ return 0;
46184+ }
46185+#endif
46186+
46187 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
46188 + task->t.regs.edi);
46189+
46190+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46191+ pax_open_kernel();
46192+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
46193+ pax_close_kernel();
46194+
46195+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
46196+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
46197+#else
46198 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
46199 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
46200+#endif
46201+
46202 printk(KERN_INFO "uvesafb: protected mode interface info at "
46203 "%04x:%04x\n",
46204 (u16)task->t.regs.es, (u16)task->t.regs.edi);
46205@@ -1799,6 +1822,11 @@ out:
46206 if (par->vbe_modes)
46207 kfree(par->vbe_modes);
46208
46209+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46210+ if (par->pmi_code)
46211+ module_free_exec(NULL, par->pmi_code);
46212+#endif
46213+
46214 framebuffer_release(info);
46215 return err;
46216 }
46217@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
46218 kfree(par->vbe_state_orig);
46219 if (par->vbe_state_saved)
46220 kfree(par->vbe_state_saved);
46221+
46222+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46223+ if (par->pmi_code)
46224+ module_free_exec(NULL, par->pmi_code);
46225+#endif
46226+
46227 }
46228
46229 framebuffer_release(info);
46230diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
46231index bd37ee1..cb827e8 100644
46232--- a/drivers/video/vesafb.c
46233+++ b/drivers/video/vesafb.c
46234@@ -9,6 +9,7 @@
46235 */
46236
46237 #include <linux/module.h>
46238+#include <linux/moduleloader.h>
46239 #include <linux/kernel.h>
46240 #include <linux/errno.h>
46241 #include <linux/string.h>
46242@@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
46243 static int vram_total __initdata; /* Set total amount of memory */
46244 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
46245 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
46246-static void (*pmi_start)(void) __read_mostly;
46247-static void (*pmi_pal) (void) __read_mostly;
46248+static void (*pmi_start)(void) __read_only;
46249+static void (*pmi_pal) (void) __read_only;
46250 static int depth __read_mostly;
46251 static int vga_compat __read_mostly;
46252 /* --------------------------------------------------------------------- */
46253@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
46254 unsigned int size_vmode;
46255 unsigned int size_remap;
46256 unsigned int size_total;
46257+ void *pmi_code = NULL;
46258
46259 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
46260 return -ENODEV;
46261@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
46262 size_remap = size_total;
46263 vesafb_fix.smem_len = size_remap;
46264
46265-#ifndef __i386__
46266- screen_info.vesapm_seg = 0;
46267-#endif
46268-
46269 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
46270 printk(KERN_WARNING
46271 "vesafb: cannot reserve video memory at 0x%lx\n",
46272@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
46273 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
46274 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
46275
46276+#ifdef __i386__
46277+
46278+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46279+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
46280+ if (!pmi_code)
46281+#elif !defined(CONFIG_PAX_KERNEXEC)
46282+ if (0)
46283+#endif
46284+
46285+#endif
46286+ screen_info.vesapm_seg = 0;
46287+
46288 if (screen_info.vesapm_seg) {
46289- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
46290- screen_info.vesapm_seg,screen_info.vesapm_off);
46291+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
46292+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
46293 }
46294
46295 if (screen_info.vesapm_seg < 0xc000)
46296@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
46297
46298 if (ypan || pmi_setpal) {
46299 unsigned short *pmi_base;
46300+
46301 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
46302- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
46303- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
46304+
46305+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46306+ pax_open_kernel();
46307+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
46308+#else
46309+ pmi_code = pmi_base;
46310+#endif
46311+
46312+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
46313+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
46314+
46315+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46316+ pmi_start = ktva_ktla(pmi_start);
46317+ pmi_pal = ktva_ktla(pmi_pal);
46318+ pax_close_kernel();
46319+#endif
46320+
46321 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
46322 if (pmi_base[3]) {
46323 printk(KERN_INFO "vesafb: pmi: ports = ");
46324@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
46325 info->node, info->fix.id);
46326 return 0;
46327 err:
46328+
46329+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46330+ module_free_exec(NULL, pmi_code);
46331+#endif
46332+
46333 if (info->screen_base)
46334 iounmap(info->screen_base);
46335 framebuffer_release(info);
46336diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
46337index 88a60e0..6783cc2 100644
46338--- a/drivers/xen/sys-hypervisor.c
46339+++ b/drivers/xen/sys-hypervisor.c
46340@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
46341 return 0;
46342 }
46343
46344-static struct sysfs_ops hyp_sysfs_ops = {
46345+static const struct sysfs_ops hyp_sysfs_ops = {
46346 .show = hyp_sysfs_show,
46347 .store = hyp_sysfs_store,
46348 };
46349diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
46350index 18f74ec..3227009 100644
46351--- a/fs/9p/vfs_inode.c
46352+++ b/fs/9p/vfs_inode.c
46353@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46354 static void
46355 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46356 {
46357- char *s = nd_get_link(nd);
46358+ const char *s = nd_get_link(nd);
46359
46360 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
46361 IS_ERR(s) ? "<error>" : s);
46362diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
46363index bb4cc5b..df5eaa0 100644
46364--- a/fs/Kconfig.binfmt
46365+++ b/fs/Kconfig.binfmt
46366@@ -86,7 +86,7 @@ config HAVE_AOUT
46367
46368 config BINFMT_AOUT
46369 tristate "Kernel support for a.out and ECOFF binaries"
46370- depends on HAVE_AOUT
46371+ depends on HAVE_AOUT && BROKEN
46372 ---help---
46373 A.out (Assembler.OUTput) is a set of formats for libraries and
46374 executables used in the earliest versions of UNIX. Linux used
46375diff --git a/fs/aio.c b/fs/aio.c
46376index 22a19ad..d484e5b 100644
46377--- a/fs/aio.c
46378+++ b/fs/aio.c
46379@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
46380 size += sizeof(struct io_event) * nr_events;
46381 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
46382
46383- if (nr_pages < 0)
46384+ if (nr_pages <= 0)
46385 return -EINVAL;
46386
46387 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
46388@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
46389 struct aio_timeout to;
46390 int retry = 0;
46391
46392+ pax_track_stack();
46393+
46394 /* needed to zero any padding within an entry (there shouldn't be
46395 * any, but C is fun!
46396 */
46397@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
46398 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
46399 {
46400 ssize_t ret;
46401+ struct iovec iovstack;
46402
46403 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
46404 kiocb->ki_nbytes, 1,
46405- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
46406+ &iovstack, &kiocb->ki_iovec);
46407 if (ret < 0)
46408 goto out;
46409
46410+ if (kiocb->ki_iovec == &iovstack) {
46411+ kiocb->ki_inline_vec = iovstack;
46412+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
46413+ }
46414 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46415 kiocb->ki_cur_seg = 0;
46416 /* ki_nbytes/left now reflect bytes instead of segs */
46417diff --git a/fs/attr.c b/fs/attr.c
46418index 96d394b..33cf5b4 100644
46419--- a/fs/attr.c
46420+++ b/fs/attr.c
46421@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46422 unsigned long limit;
46423
46424 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46425+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46426 if (limit != RLIM_INFINITY && offset > limit)
46427 goto out_sig;
46428 if (offset > inode->i_sb->s_maxbytes)
46429diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46430index 4a1401c..05eb5ca 100644
46431--- a/fs/autofs/root.c
46432+++ b/fs/autofs/root.c
46433@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46434 set_bit(n,sbi->symlink_bitmap);
46435 sl = &sbi->symlink[n];
46436 sl->len = strlen(symname);
46437- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46438+ slsize = sl->len+1;
46439+ sl->data = kmalloc(slsize, GFP_KERNEL);
46440 if (!sl->data) {
46441 clear_bit(n,sbi->symlink_bitmap);
46442 unlock_kernel();
46443diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46444index b4ea829..e63ef18 100644
46445--- a/fs/autofs4/symlink.c
46446+++ b/fs/autofs4/symlink.c
46447@@ -15,7 +15,7 @@
46448 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46449 {
46450 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46451- nd_set_link(nd, (char *)ino->u.symlink);
46452+ nd_set_link(nd, ino->u.symlink);
46453 return NULL;
46454 }
46455
46456diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46457index 2341375..df9d1c2 100644
46458--- a/fs/autofs4/waitq.c
46459+++ b/fs/autofs4/waitq.c
46460@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46461 {
46462 unsigned long sigpipe, flags;
46463 mm_segment_t fs;
46464- const char *data = (const char *)addr;
46465+ const char __user *data = (const char __force_user *)addr;
46466 ssize_t wr = 0;
46467
46468 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46469diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46470index 9158c07..3f06659 100644
46471--- a/fs/befs/linuxvfs.c
46472+++ b/fs/befs/linuxvfs.c
46473@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46474 {
46475 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46476 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46477- char *link = nd_get_link(nd);
46478+ const char *link = nd_get_link(nd);
46479 if (!IS_ERR(link))
46480 kfree(link);
46481 }
46482diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
46483index 0133b5a..b3baa9f 100644
46484--- a/fs/binfmt_aout.c
46485+++ b/fs/binfmt_aout.c
46486@@ -16,6 +16,7 @@
46487 #include <linux/string.h>
46488 #include <linux/fs.h>
46489 #include <linux/file.h>
46490+#include <linux/security.h>
46491 #include <linux/stat.h>
46492 #include <linux/fcntl.h>
46493 #include <linux/ptrace.h>
46494@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46495 #endif
46496 # define START_STACK(u) (u.start_stack)
46497
46498+ memset(&dump, 0, sizeof(dump));
46499+
46500 fs = get_fs();
46501 set_fs(KERNEL_DS);
46502 has_dumped = 1;
46503@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46504
46505 /* If the size of the dump file exceeds the rlimit, then see what would happen
46506 if we wrote the stack, but not the data area. */
46507+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
46508 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
46509 dump.u_dsize = 0;
46510
46511 /* Make sure we have enough room to write the stack and data areas. */
46512+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46513 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46514 dump.u_ssize = 0;
46515
46516@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46517 dump_size = dump.u_ssize << PAGE_SHIFT;
46518 DUMP_WRITE(dump_start,dump_size);
46519 }
46520-/* Finally dump the task struct. Not be used by gdb, but could be useful */
46521- set_fs(KERNEL_DS);
46522- DUMP_WRITE(current,sizeof(*current));
46523+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46524 end_coredump:
46525 set_fs(fs);
46526 return has_dumped;
46527@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46528 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46529 if (rlim >= RLIM_INFINITY)
46530 rlim = ~0;
46531+
46532+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46533 if (ex.a_data + ex.a_bss > rlim)
46534 return -ENOMEM;
46535
46536@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46537 install_exec_creds(bprm);
46538 current->flags &= ~PF_FORKNOEXEC;
46539
46540+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46541+ current->mm->pax_flags = 0UL;
46542+#endif
46543+
46544+#ifdef CONFIG_PAX_PAGEEXEC
46545+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46546+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46547+
46548+#ifdef CONFIG_PAX_EMUTRAMP
46549+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46550+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46551+#endif
46552+
46553+#ifdef CONFIG_PAX_MPROTECT
46554+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46555+ current->mm->pax_flags |= MF_PAX_MPROTECT;
46556+#endif
46557+
46558+ }
46559+#endif
46560+
46561 if (N_MAGIC(ex) == OMAGIC) {
46562 unsigned long text_addr, map_size;
46563 loff_t pos;
46564@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46565
46566 down_write(&current->mm->mmap_sem);
46567 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46568- PROT_READ | PROT_WRITE | PROT_EXEC,
46569+ PROT_READ | PROT_WRITE,
46570 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46571 fd_offset + ex.a_text);
46572 up_write(&current->mm->mmap_sem);
46573diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46574index 1ed37ba..308a022 100644
46575--- a/fs/binfmt_elf.c
46576+++ b/fs/binfmt_elf.c
46577@@ -31,6 +31,7 @@
46578 #include <linux/random.h>
46579 #include <linux/elf.h>
46580 #include <linux/utsname.h>
46581+#include <linux/xattr.h>
46582 #include <asm/uaccess.h>
46583 #include <asm/param.h>
46584 #include <asm/page.h>
46585@@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46586 #define elf_core_dump NULL
46587 #endif
46588
46589+#ifdef CONFIG_PAX_MPROTECT
46590+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46591+#endif
46592+
46593 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46594 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46595 #else
46596@@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
46597 .load_binary = load_elf_binary,
46598 .load_shlib = load_elf_library,
46599 .core_dump = elf_core_dump,
46600+
46601+#ifdef CONFIG_PAX_MPROTECT
46602+ .handle_mprotect= elf_handle_mprotect,
46603+#endif
46604+
46605 .min_coredump = ELF_EXEC_PAGESIZE,
46606 .hasvdso = 1
46607 };
46608@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
46609
46610 static int set_brk(unsigned long start, unsigned long end)
46611 {
46612+ unsigned long e = end;
46613+
46614 start = ELF_PAGEALIGN(start);
46615 end = ELF_PAGEALIGN(end);
46616 if (end > start) {
46617@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
46618 if (BAD_ADDR(addr))
46619 return addr;
46620 }
46621- current->mm->start_brk = current->mm->brk = end;
46622+ current->mm->start_brk = current->mm->brk = e;
46623 return 0;
46624 }
46625
46626@@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46627 elf_addr_t __user *u_rand_bytes;
46628 const char *k_platform = ELF_PLATFORM;
46629 const char *k_base_platform = ELF_BASE_PLATFORM;
46630- unsigned char k_rand_bytes[16];
46631+ u32 k_rand_bytes[4];
46632 int items;
46633 elf_addr_t *elf_info;
46634 int ei_index = 0;
46635 const struct cred *cred = current_cred();
46636 struct vm_area_struct *vma;
46637+ unsigned long saved_auxv[AT_VECTOR_SIZE];
46638+
46639+ pax_track_stack();
46640
46641 /*
46642 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46643@@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46644 * Generate 16 random bytes for userspace PRNG seeding.
46645 */
46646 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46647- u_rand_bytes = (elf_addr_t __user *)
46648- STACK_ALLOC(p, sizeof(k_rand_bytes));
46649+ srandom32(k_rand_bytes[0] ^ random32());
46650+ srandom32(k_rand_bytes[1] ^ random32());
46651+ srandom32(k_rand_bytes[2] ^ random32());
46652+ srandom32(k_rand_bytes[3] ^ random32());
46653+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
46654+ u_rand_bytes = (elf_addr_t __user *) p;
46655 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46656 return -EFAULT;
46657
46658@@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46659 return -EFAULT;
46660 current->mm->env_end = p;
46661
46662+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46663+
46664 /* Put the elf_info on the stack in the right place. */
46665 sp = (elf_addr_t __user *)envp + 1;
46666- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46667+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46668 return -EFAULT;
46669 return 0;
46670 }
46671@@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46672 {
46673 struct elf_phdr *elf_phdata;
46674 struct elf_phdr *eppnt;
46675- unsigned long load_addr = 0;
46676+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46677 int load_addr_set = 0;
46678 unsigned long last_bss = 0, elf_bss = 0;
46679- unsigned long error = ~0UL;
46680+ unsigned long error = -EINVAL;
46681 unsigned long total_size;
46682 int retval, i, size;
46683
46684@@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46685 goto out_close;
46686 }
46687
46688+#ifdef CONFIG_PAX_SEGMEXEC
46689+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46690+ pax_task_size = SEGMEXEC_TASK_SIZE;
46691+#endif
46692+
46693 eppnt = elf_phdata;
46694 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46695 if (eppnt->p_type == PT_LOAD) {
46696@@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46697 k = load_addr + eppnt->p_vaddr;
46698 if (BAD_ADDR(k) ||
46699 eppnt->p_filesz > eppnt->p_memsz ||
46700- eppnt->p_memsz > TASK_SIZE ||
46701- TASK_SIZE - eppnt->p_memsz < k) {
46702+ eppnt->p_memsz > pax_task_size ||
46703+ pax_task_size - eppnt->p_memsz < k) {
46704 error = -ENOMEM;
46705 goto out_close;
46706 }
46707@@ -532,6 +558,351 @@ out:
46708 return error;
46709 }
46710
46711+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46712+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
46713+{
46714+ unsigned long pax_flags = 0UL;
46715+
46716+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46717+
46718+#ifdef CONFIG_PAX_PAGEEXEC
46719+ if (elf_phdata->p_flags & PF_PAGEEXEC)
46720+ pax_flags |= MF_PAX_PAGEEXEC;
46721+#endif
46722+
46723+#ifdef CONFIG_PAX_SEGMEXEC
46724+ if (elf_phdata->p_flags & PF_SEGMEXEC)
46725+ pax_flags |= MF_PAX_SEGMEXEC;
46726+#endif
46727+
46728+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46729+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46730+ if (nx_enabled)
46731+ pax_flags &= ~MF_PAX_SEGMEXEC;
46732+ else
46733+ pax_flags &= ~MF_PAX_PAGEEXEC;
46734+ }
46735+#endif
46736+
46737+#ifdef CONFIG_PAX_EMUTRAMP
46738+ if (elf_phdata->p_flags & PF_EMUTRAMP)
46739+ pax_flags |= MF_PAX_EMUTRAMP;
46740+#endif
46741+
46742+#ifdef CONFIG_PAX_MPROTECT
46743+ if (elf_phdata->p_flags & PF_MPROTECT)
46744+ pax_flags |= MF_PAX_MPROTECT;
46745+#endif
46746+
46747+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46748+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46749+ pax_flags |= MF_PAX_RANDMMAP;
46750+#endif
46751+
46752+#endif
46753+
46754+ return pax_flags;
46755+}
46756+
46757+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
46758+{
46759+ unsigned long pax_flags = 0UL;
46760+
46761+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46762+
46763+#ifdef CONFIG_PAX_PAGEEXEC
46764+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46765+ pax_flags |= MF_PAX_PAGEEXEC;
46766+#endif
46767+
46768+#ifdef CONFIG_PAX_SEGMEXEC
46769+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46770+ pax_flags |= MF_PAX_SEGMEXEC;
46771+#endif
46772+
46773+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46774+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46775+ if (nx_enabled)
46776+ pax_flags &= ~MF_PAX_SEGMEXEC;
46777+ else
46778+ pax_flags &= ~MF_PAX_PAGEEXEC;
46779+ }
46780+#endif
46781+
46782+#ifdef CONFIG_PAX_EMUTRAMP
46783+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46784+ pax_flags |= MF_PAX_EMUTRAMP;
46785+#endif
46786+
46787+#ifdef CONFIG_PAX_MPROTECT
46788+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46789+ pax_flags |= MF_PAX_MPROTECT;
46790+#endif
46791+
46792+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46793+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46794+ pax_flags |= MF_PAX_RANDMMAP;
46795+#endif
46796+
46797+#endif
46798+
46799+ return pax_flags;
46800+}
46801+
46802+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46803+{
46804+ unsigned long pax_flags = 0UL;
46805+
46806+#ifdef CONFIG_PAX_EI_PAX
46807+
46808+#ifdef CONFIG_PAX_PAGEEXEC
46809+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46810+ pax_flags |= MF_PAX_PAGEEXEC;
46811+#endif
46812+
46813+#ifdef CONFIG_PAX_SEGMEXEC
46814+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46815+ pax_flags |= MF_PAX_SEGMEXEC;
46816+#endif
46817+
46818+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46819+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46820+ if (nx_enabled)
46821+ pax_flags &= ~MF_PAX_SEGMEXEC;
46822+ else
46823+ pax_flags &= ~MF_PAX_PAGEEXEC;
46824+ }
46825+#endif
46826+
46827+#ifdef CONFIG_PAX_EMUTRAMP
46828+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46829+ pax_flags |= MF_PAX_EMUTRAMP;
46830+#endif
46831+
46832+#ifdef CONFIG_PAX_MPROTECT
46833+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46834+ pax_flags |= MF_PAX_MPROTECT;
46835+#endif
46836+
46837+#ifdef CONFIG_PAX_ASLR
46838+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46839+ pax_flags |= MF_PAX_RANDMMAP;
46840+#endif
46841+
46842+#else
46843+
46844+#ifdef CONFIG_PAX_PAGEEXEC
46845+ pax_flags |= MF_PAX_PAGEEXEC;
46846+#endif
46847+
46848+#ifdef CONFIG_PAX_MPROTECT
46849+ pax_flags |= MF_PAX_MPROTECT;
46850+#endif
46851+
46852+#ifdef CONFIG_PAX_RANDMMAP
46853+ pax_flags |= MF_PAX_RANDMMAP;
46854+#endif
46855+
46856+#ifdef CONFIG_PAX_SEGMEXEC
46857+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
46858+ pax_flags &= ~MF_PAX_PAGEEXEC;
46859+ pax_flags |= MF_PAX_SEGMEXEC;
46860+ }
46861+#endif
46862+
46863+#endif
46864+
46865+ return pax_flags;
46866+}
46867+
46868+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46869+{
46870+
46871+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46872+ unsigned long i;
46873+
46874+ for (i = 0UL; i < elf_ex->e_phnum; i++)
46875+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46876+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46877+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46878+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46879+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46880+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46881+ return ~0UL;
46882+
46883+#ifdef CONFIG_PAX_SOFTMODE
46884+ if (pax_softmode)
46885+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
46886+ else
46887+#endif
46888+
46889+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
46890+ break;
46891+ }
46892+#endif
46893+
46894+ return ~0UL;
46895+}
46896+
46897+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46898+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
46899+{
46900+ unsigned long pax_flags = 0UL;
46901+
46902+#ifdef CONFIG_PAX_PAGEEXEC
46903+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
46904+ pax_flags |= MF_PAX_PAGEEXEC;
46905+#endif
46906+
46907+#ifdef CONFIG_PAX_SEGMEXEC
46908+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
46909+ pax_flags |= MF_PAX_SEGMEXEC;
46910+#endif
46911+
46912+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46913+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46914+ if ((__supported_pte_mask & _PAGE_NX))
46915+ pax_flags &= ~MF_PAX_SEGMEXEC;
46916+ else
46917+ pax_flags &= ~MF_PAX_PAGEEXEC;
46918+ }
46919+#endif
46920+
46921+#ifdef CONFIG_PAX_EMUTRAMP
46922+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
46923+ pax_flags |= MF_PAX_EMUTRAMP;
46924+#endif
46925+
46926+#ifdef CONFIG_PAX_MPROTECT
46927+ if (pax_flags_softmode & MF_PAX_MPROTECT)
46928+ pax_flags |= MF_PAX_MPROTECT;
46929+#endif
46930+
46931+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46932+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
46933+ pax_flags |= MF_PAX_RANDMMAP;
46934+#endif
46935+
46936+ return pax_flags;
46937+}
46938+
46939+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
46940+{
46941+ unsigned long pax_flags = 0UL;
46942+
46943+#ifdef CONFIG_PAX_PAGEEXEC
46944+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
46945+ pax_flags |= MF_PAX_PAGEEXEC;
46946+#endif
46947+
46948+#ifdef CONFIG_PAX_SEGMEXEC
46949+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
46950+ pax_flags |= MF_PAX_SEGMEXEC;
46951+#endif
46952+
46953+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46954+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46955+ if ((__supported_pte_mask & _PAGE_NX))
46956+ pax_flags &= ~MF_PAX_SEGMEXEC;
46957+ else
46958+ pax_flags &= ~MF_PAX_PAGEEXEC;
46959+ }
46960+#endif
46961+
46962+#ifdef CONFIG_PAX_EMUTRAMP
46963+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
46964+ pax_flags |= MF_PAX_EMUTRAMP;
46965+#endif
46966+
46967+#ifdef CONFIG_PAX_MPROTECT
46968+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
46969+ pax_flags |= MF_PAX_MPROTECT;
46970+#endif
46971+
46972+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46973+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
46974+ pax_flags |= MF_PAX_RANDMMAP;
46975+#endif
46976+
46977+ return pax_flags;
46978+}
46979+#endif
46980+
46981+static unsigned long pax_parse_xattr_pax(struct file * const file)
46982+{
46983+
46984+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46985+ ssize_t xattr_size, i;
46986+ unsigned char xattr_value[5];
46987+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
46988+
46989+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
46990+ if (xattr_size <= 0)
46991+ return ~0UL;
46992+
46993+ for (i = 0; i < xattr_size; i++)
46994+ switch (xattr_value[i]) {
46995+ default:
46996+ return ~0UL;
46997+
46998+#define parse_flag(option1, option2, flag) \
46999+ case option1: \
47000+ pax_flags_hardmode |= MF_PAX_##flag; \
47001+ break; \
47002+ case option2: \
47003+ pax_flags_softmode |= MF_PAX_##flag; \
47004+ break;
47005+
47006+ parse_flag('p', 'P', PAGEEXEC);
47007+ parse_flag('e', 'E', EMUTRAMP);
47008+ parse_flag('m', 'M', MPROTECT);
47009+ parse_flag('r', 'R', RANDMMAP);
47010+ parse_flag('s', 'S', SEGMEXEC);
47011+
47012+#undef parse_flag
47013+ }
47014+
47015+ if (pax_flags_hardmode & pax_flags_softmode)
47016+ return ~0UL;
47017+
47018+#ifdef CONFIG_PAX_SOFTMODE
47019+ if (pax_softmode)
47020+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
47021+ else
47022+#endif
47023+
47024+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
47025+#else
47026+ return ~0UL;
47027+#endif
47028+
47029+}
47030+
47031+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
47032+{
47033+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
47034+
47035+ pax_flags = pax_parse_ei_pax(elf_ex);
47036+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
47037+ xattr_pax_flags = pax_parse_xattr_pax(file);
47038+
47039+ if (pt_pax_flags == ~0UL)
47040+ pt_pax_flags = xattr_pax_flags;
47041+ else if (xattr_pax_flags == ~0UL)
47042+ xattr_pax_flags = pt_pax_flags;
47043+ if (pt_pax_flags != xattr_pax_flags)
47044+ return -EINVAL;
47045+ if (pt_pax_flags != ~0UL)
47046+ pax_flags = pt_pax_flags;
47047+
47048+ if (0 > pax_check_flags(&pax_flags))
47049+ return -EINVAL;
47050+
47051+ current->mm->pax_flags = pax_flags;
47052+ return 0;
47053+}
47054+#endif
47055+
47056 /*
47057 * These are the functions used to load ELF style executables and shared
47058 * libraries. There is no binary dependent code anywhere else.
47059@@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
47060 {
47061 unsigned int random_variable = 0;
47062
47063+#ifdef CONFIG_PAX_RANDUSTACK
47064+ if (randomize_va_space)
47065+ return stack_top - current->mm->delta_stack;
47066+#endif
47067+
47068 if ((current->flags & PF_RANDOMIZE) &&
47069 !(current->personality & ADDR_NO_RANDOMIZE)) {
47070 random_variable = get_random_int() & STACK_RND_MASK;
47071@@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47072 unsigned long load_addr = 0, load_bias = 0;
47073 int load_addr_set = 0;
47074 char * elf_interpreter = NULL;
47075- unsigned long error;
47076+ unsigned long error = 0;
47077 struct elf_phdr *elf_ppnt, *elf_phdata;
47078 unsigned long elf_bss, elf_brk;
47079 int retval, i;
47080@@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47081 unsigned long start_code, end_code, start_data, end_data;
47082 unsigned long reloc_func_desc = 0;
47083 int executable_stack = EXSTACK_DEFAULT;
47084- unsigned long def_flags = 0;
47085 struct {
47086 struct elfhdr elf_ex;
47087 struct elfhdr interp_elf_ex;
47088 } *loc;
47089+ unsigned long pax_task_size = TASK_SIZE;
47090
47091 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
47092 if (!loc) {
47093@@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47094
47095 /* OK, This is the point of no return */
47096 current->flags &= ~PF_FORKNOEXEC;
47097- current->mm->def_flags = def_flags;
47098+
47099+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47100+ current->mm->pax_flags = 0UL;
47101+#endif
47102+
47103+#ifdef CONFIG_PAX_DLRESOLVE
47104+ current->mm->call_dl_resolve = 0UL;
47105+#endif
47106+
47107+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
47108+ current->mm->call_syscall = 0UL;
47109+#endif
47110+
47111+#ifdef CONFIG_PAX_ASLR
47112+ current->mm->delta_mmap = 0UL;
47113+ current->mm->delta_stack = 0UL;
47114+#endif
47115+
47116+ current->mm->def_flags = 0;
47117+
47118+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
47119+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
47120+ send_sig(SIGKILL, current, 0);
47121+ goto out_free_dentry;
47122+ }
47123+#endif
47124+
47125+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47126+ pax_set_initial_flags(bprm);
47127+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
47128+ if (pax_set_initial_flags_func)
47129+ (pax_set_initial_flags_func)(bprm);
47130+#endif
47131+
47132+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47133+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
47134+ current->mm->context.user_cs_limit = PAGE_SIZE;
47135+ current->mm->def_flags |= VM_PAGEEXEC;
47136+ }
47137+#endif
47138+
47139+#ifdef CONFIG_PAX_SEGMEXEC
47140+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
47141+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
47142+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
47143+ pax_task_size = SEGMEXEC_TASK_SIZE;
47144+ }
47145+#endif
47146+
47147+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
47148+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47149+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
47150+ put_cpu();
47151+ }
47152+#endif
47153
47154 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
47155 may depend on the personality. */
47156 SET_PERSONALITY(loc->elf_ex);
47157+
47158+#ifdef CONFIG_PAX_ASLR
47159+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
47160+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
47161+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
47162+ }
47163+#endif
47164+
47165+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47166+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47167+ executable_stack = EXSTACK_DISABLE_X;
47168+ current->personality &= ~READ_IMPLIES_EXEC;
47169+ } else
47170+#endif
47171+
47172 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
47173 current->personality |= READ_IMPLIES_EXEC;
47174
47175@@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47176 * might try to exec. This is because the brk will
47177 * follow the loader, and is not movable. */
47178 #ifdef CONFIG_X86
47179- load_bias = 0;
47180+ if (current->flags & PF_RANDOMIZE)
47181+ load_bias = 0;
47182+ else
47183+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47184 #else
47185 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47186 #endif
47187+
47188+#ifdef CONFIG_PAX_RANDMMAP
47189+ /* PaX: randomize base address at the default exe base if requested */
47190+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
47191+#ifdef CONFIG_SPARC64
47192+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
47193+#else
47194+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
47195+#endif
47196+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
47197+ elf_flags |= MAP_FIXED;
47198+ }
47199+#endif
47200+
47201 }
47202
47203 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
47204@@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47205 * allowed task size. Note that p_filesz must always be
47206 * <= p_memsz so it is only necessary to check p_memsz.
47207 */
47208- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47209- elf_ppnt->p_memsz > TASK_SIZE ||
47210- TASK_SIZE - elf_ppnt->p_memsz < k) {
47211+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47212+ elf_ppnt->p_memsz > pax_task_size ||
47213+ pax_task_size - elf_ppnt->p_memsz < k) {
47214 /* set_brk can never work. Avoid overflows. */
47215 send_sig(SIGKILL, current, 0);
47216 retval = -EINVAL;
47217@@ -866,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47218 start_data += load_bias;
47219 end_data += load_bias;
47220
47221+#ifdef CONFIG_PAX_RANDMMAP
47222+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
47223+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
47224+#endif
47225+
47226 /* Calling set_brk effectively mmaps the pages that we need
47227 * for the bss and break sections. We must do this before
47228 * mapping in the interpreter, to make sure it doesn't wind
47229@@ -877,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
47230 goto out_free_dentry;
47231 }
47232 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
47233- send_sig(SIGSEGV, current, 0);
47234- retval = -EFAULT; /* Nobody gets to see this, but.. */
47235- goto out_free_dentry;
47236+ /*
47237+ * This bss-zeroing can fail if the ELF
47238+ * file specifies odd protections. So
47239+ * we don't check the return value
47240+ */
47241 }
47242
47243 if (elf_interpreter) {
47244@@ -1112,8 +1581,10 @@ static int dump_seek(struct file *file, loff_t off)
47245 unsigned long n = off;
47246 if (n > PAGE_SIZE)
47247 n = PAGE_SIZE;
47248- if (!dump_write(file, buf, n))
47249+ if (!dump_write(file, buf, n)) {
47250+ free_page((unsigned long)buf);
47251 return 0;
47252+ }
47253 off -= n;
47254 }
47255 free_page((unsigned long)buf);
47256@@ -1125,7 +1596,7 @@ static int dump_seek(struct file *file, loff_t off)
47257 * Decide what to dump of a segment, part, all or none.
47258 */
47259 static unsigned long vma_dump_size(struct vm_area_struct *vma,
47260- unsigned long mm_flags)
47261+ unsigned long mm_flags, long signr)
47262 {
47263 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
47264
47265@@ -1159,7 +1630,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
47266 if (vma->vm_file == NULL)
47267 return 0;
47268
47269- if (FILTER(MAPPED_PRIVATE))
47270+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
47271 goto whole;
47272
47273 /*
47274@@ -1255,8 +1726,11 @@ static int writenote(struct memelfnote *men, struct file *file,
47275 #undef DUMP_WRITE
47276
47277 #define DUMP_WRITE(addr, nr) \
47278+ do { \
47279+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
47280 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
47281- goto end_coredump;
47282+ goto end_coredump; \
47283+ } while (0);
47284
47285 static void fill_elf_header(struct elfhdr *elf, int segs,
47286 u16 machine, u32 flags, u8 osabi)
47287@@ -1385,9 +1859,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
47288 {
47289 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
47290 int i = 0;
47291- do
47292+ do {
47293 i += 2;
47294- while (auxv[i - 2] != AT_NULL);
47295+ } while (auxv[i - 2] != AT_NULL);
47296 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
47297 }
47298
47299@@ -1973,7 +2447,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47300 phdr.p_offset = offset;
47301 phdr.p_vaddr = vma->vm_start;
47302 phdr.p_paddr = 0;
47303- phdr.p_filesz = vma_dump_size(vma, mm_flags);
47304+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
47305 phdr.p_memsz = vma->vm_end - vma->vm_start;
47306 offset += phdr.p_filesz;
47307 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
47308@@ -2006,7 +2480,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47309 unsigned long addr;
47310 unsigned long end;
47311
47312- end = vma->vm_start + vma_dump_size(vma, mm_flags);
47313+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
47314
47315 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
47316 struct page *page;
47317@@ -2015,6 +2489,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47318 page = get_dump_page(addr);
47319 if (page) {
47320 void *kaddr = kmap(page);
47321+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
47322 stop = ((size += PAGE_SIZE) > limit) ||
47323 !dump_write(file, kaddr, PAGE_SIZE);
47324 kunmap(page);
47325@@ -2042,6 +2517,97 @@ out:
47326
47327 #endif /* USE_ELF_CORE_DUMP */
47328
47329+#ifdef CONFIG_PAX_MPROTECT
47330+/* PaX: non-PIC ELF libraries need relocations on their executable segments
47331+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
47332+ * we'll remove VM_MAYWRITE for good on RELRO segments.
47333+ *
47334+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
47335+ * basis because we want to allow the common case and not the special ones.
47336+ */
47337+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
47338+{
47339+ struct elfhdr elf_h;
47340+ struct elf_phdr elf_p;
47341+ unsigned long i;
47342+ unsigned long oldflags;
47343+ bool is_textrel_rw, is_textrel_rx, is_relro;
47344+
47345+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
47346+ return;
47347+
47348+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
47349+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
47350+
47351+#ifdef CONFIG_PAX_ELFRELOCS
47352+ /* possible TEXTREL */
47353+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
47354+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
47355+#else
47356+ is_textrel_rw = false;
47357+ is_textrel_rx = false;
47358+#endif
47359+
47360+ /* possible RELRO */
47361+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
47362+
47363+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
47364+ return;
47365+
47366+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
47367+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
47368+
47369+#ifdef CONFIG_PAX_ETEXECRELOCS
47370+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47371+#else
47372+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
47373+#endif
47374+
47375+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47376+ !elf_check_arch(&elf_h) ||
47377+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
47378+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
47379+ return;
47380+
47381+ for (i = 0UL; i < elf_h.e_phnum; i++) {
47382+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
47383+ return;
47384+ switch (elf_p.p_type) {
47385+ case PT_DYNAMIC:
47386+ if (!is_textrel_rw && !is_textrel_rx)
47387+ continue;
47388+ i = 0UL;
47389+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
47390+ elf_dyn dyn;
47391+
47392+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
47393+ return;
47394+ if (dyn.d_tag == DT_NULL)
47395+ return;
47396+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
47397+ gr_log_textrel(vma);
47398+ if (is_textrel_rw)
47399+ vma->vm_flags |= VM_MAYWRITE;
47400+ else
47401+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
47402+ vma->vm_flags &= ~VM_MAYWRITE;
47403+ return;
47404+ }
47405+ i++;
47406+ }
47407+ return;
47408+
47409+ case PT_GNU_RELRO:
47410+ if (!is_relro)
47411+ continue;
47412+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
47413+ vma->vm_flags &= ~VM_MAYWRITE;
47414+ return;
47415+ }
47416+ }
47417+}
47418+#endif
47419+
47420 static int __init init_elf_binfmt(void)
47421 {
47422 return register_binfmt(&elf_format);
47423diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
47424index ca88c46..f155a60 100644
47425--- a/fs/binfmt_flat.c
47426+++ b/fs/binfmt_flat.c
47427@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
47428 realdatastart = (unsigned long) -ENOMEM;
47429 printk("Unable to allocate RAM for process data, errno %d\n",
47430 (int)-realdatastart);
47431+ down_write(&current->mm->mmap_sem);
47432 do_munmap(current->mm, textpos, text_len);
47433+ up_write(&current->mm->mmap_sem);
47434 ret = realdatastart;
47435 goto err;
47436 }
47437@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47438 }
47439 if (IS_ERR_VALUE(result)) {
47440 printk("Unable to read data+bss, errno %d\n", (int)-result);
47441+ down_write(&current->mm->mmap_sem);
47442 do_munmap(current->mm, textpos, text_len);
47443 do_munmap(current->mm, realdatastart, data_len + extra);
47444+ up_write(&current->mm->mmap_sem);
47445 ret = result;
47446 goto err;
47447 }
47448@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47449 }
47450 if (IS_ERR_VALUE(result)) {
47451 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
47452+ down_write(&current->mm->mmap_sem);
47453 do_munmap(current->mm, textpos, text_len + data_len + extra +
47454 MAX_SHARED_LIBS * sizeof(unsigned long));
47455+ up_write(&current->mm->mmap_sem);
47456 ret = result;
47457 goto err;
47458 }
47459diff --git a/fs/bio.c b/fs/bio.c
47460index e696713..83de133 100644
47461--- a/fs/bio.c
47462+++ b/fs/bio.c
47463@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
47464
47465 i = 0;
47466 while (i < bio_slab_nr) {
47467- struct bio_slab *bslab = &bio_slabs[i];
47468+ bslab = &bio_slabs[i];
47469
47470 if (!bslab->slab && entry == -1)
47471 entry = i;
47472@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
47473 const int read = bio_data_dir(bio) == READ;
47474 struct bio_map_data *bmd = bio->bi_private;
47475 int i;
47476- char *p = bmd->sgvecs[0].iov_base;
47477+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
47478
47479 __bio_for_each_segment(bvec, bio, i, 0) {
47480 char *addr = page_address(bvec->bv_page);
47481diff --git a/fs/block_dev.c b/fs/block_dev.c
47482index e65efa2..04fae57 100644
47483--- a/fs/block_dev.c
47484+++ b/fs/block_dev.c
47485@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
47486 else if (bdev->bd_contains == bdev)
47487 res = 0; /* is a whole device which isn't held */
47488
47489- else if (bdev->bd_contains->bd_holder == bd_claim)
47490+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
47491 res = 0; /* is a partition of a device that is being partitioned */
47492 else if (bdev->bd_contains->bd_holder != NULL)
47493 res = -EBUSY; /* is a partition of a held device */
47494diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
47495index c4bc570..42acd8d 100644
47496--- a/fs/btrfs/ctree.c
47497+++ b/fs/btrfs/ctree.c
47498@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
47499 free_extent_buffer(buf);
47500 add_root_to_dirty_list(root);
47501 } else {
47502- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
47503- parent_start = parent->start;
47504- else
47505+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
47506+ if (parent)
47507+ parent_start = parent->start;
47508+ else
47509+ parent_start = 0;
47510+ } else
47511 parent_start = 0;
47512
47513 WARN_ON(trans->transid != btrfs_header_generation(parent));
47514@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
47515
47516 ret = 0;
47517 if (slot == 0) {
47518- struct btrfs_disk_key disk_key;
47519 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
47520 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
47521 }
47522diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
47523index f447188..59c17c5 100644
47524--- a/fs/btrfs/disk-io.c
47525+++ b/fs/btrfs/disk-io.c
47526@@ -39,7 +39,7 @@
47527 #include "tree-log.h"
47528 #include "free-space-cache.h"
47529
47530-static struct extent_io_ops btree_extent_io_ops;
47531+static const struct extent_io_ops btree_extent_io_ops;
47532 static void end_workqueue_fn(struct btrfs_work *work);
47533 static void free_fs_root(struct btrfs_root *root);
47534
47535@@ -2607,7 +2607,7 @@ out:
47536 return 0;
47537 }
47538
47539-static struct extent_io_ops btree_extent_io_ops = {
47540+static const struct extent_io_ops btree_extent_io_ops = {
47541 .write_cache_pages_lock_hook = btree_lock_page_hook,
47542 .readpage_end_io_hook = btree_readpage_end_io_hook,
47543 .submit_bio_hook = btree_submit_bio_hook,
47544diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
47545index 559f724..a026171 100644
47546--- a/fs/btrfs/extent-tree.c
47547+++ b/fs/btrfs/extent-tree.c
47548@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
47549 u64 group_start = group->key.objectid;
47550 new_extents = kmalloc(sizeof(*new_extents),
47551 GFP_NOFS);
47552+ if (!new_extents) {
47553+ ret = -ENOMEM;
47554+ goto out;
47555+ }
47556 nr_extents = 1;
47557 ret = get_new_locations(reloc_inode,
47558 extent_key,
47559diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
47560index 36de250..7ec75c7 100644
47561--- a/fs/btrfs/extent_io.h
47562+++ b/fs/btrfs/extent_io.h
47563@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
47564 struct bio *bio, int mirror_num,
47565 unsigned long bio_flags);
47566 struct extent_io_ops {
47567- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
47568+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
47569 u64 start, u64 end, int *page_started,
47570 unsigned long *nr_written);
47571- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
47572- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
47573+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
47574+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
47575 extent_submit_bio_hook_t *submit_bio_hook;
47576- int (*merge_bio_hook)(struct page *page, unsigned long offset,
47577+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
47578 size_t size, struct bio *bio,
47579 unsigned long bio_flags);
47580- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
47581- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
47582+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
47583+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
47584 u64 start, u64 end,
47585 struct extent_state *state);
47586- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
47587+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
47588 u64 start, u64 end,
47589 struct extent_state *state);
47590- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47591+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47592 struct extent_state *state);
47593- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47594+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47595 struct extent_state *state, int uptodate);
47596- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
47597+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
47598 unsigned long old, unsigned long bits);
47599- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
47600+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
47601 unsigned long bits);
47602- int (*merge_extent_hook)(struct inode *inode,
47603+ int (* const merge_extent_hook)(struct inode *inode,
47604 struct extent_state *new,
47605 struct extent_state *other);
47606- int (*split_extent_hook)(struct inode *inode,
47607+ int (* const split_extent_hook)(struct inode *inode,
47608 struct extent_state *orig, u64 split);
47609- int (*write_cache_pages_lock_hook)(struct page *page);
47610+ int (* const write_cache_pages_lock_hook)(struct page *page);
47611 };
47612
47613 struct extent_io_tree {
47614@@ -88,7 +88,7 @@ struct extent_io_tree {
47615 u64 dirty_bytes;
47616 spinlock_t lock;
47617 spinlock_t buffer_lock;
47618- struct extent_io_ops *ops;
47619+ const struct extent_io_ops *ops;
47620 };
47621
47622 struct extent_state {
47623diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
47624index cb2849f..3718fb4 100644
47625--- a/fs/btrfs/free-space-cache.c
47626+++ b/fs/btrfs/free-space-cache.c
47627@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
47628
47629 while(1) {
47630 if (entry->bytes < bytes || entry->offset < min_start) {
47631- struct rb_node *node;
47632-
47633 node = rb_next(&entry->offset_index);
47634 if (!node)
47635 break;
47636@@ -1226,7 +1224,7 @@ again:
47637 */
47638 while (entry->bitmap || found_bitmap ||
47639 (!entry->bitmap && entry->bytes < min_bytes)) {
47640- struct rb_node *node = rb_next(&entry->offset_index);
47641+ node = rb_next(&entry->offset_index);
47642
47643 if (entry->bitmap && entry->bytes > bytes + empty_size) {
47644 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
47645diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
47646index e03a836..323837e 100644
47647--- a/fs/btrfs/inode.c
47648+++ b/fs/btrfs/inode.c
47649@@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
47650 static const struct address_space_operations btrfs_aops;
47651 static const struct address_space_operations btrfs_symlink_aops;
47652 static const struct file_operations btrfs_dir_file_operations;
47653-static struct extent_io_ops btrfs_extent_io_ops;
47654+static const struct extent_io_ops btrfs_extent_io_ops;
47655
47656 static struct kmem_cache *btrfs_inode_cachep;
47657 struct kmem_cache *btrfs_trans_handle_cachep;
47658@@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
47659 1, 0, NULL, GFP_NOFS);
47660 while (start < end) {
47661 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
47662+ BUG_ON(!async_cow);
47663 async_cow->inode = inode;
47664 async_cow->root = root;
47665 async_cow->locked_page = locked_page;
47666@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
47667 inline_size = btrfs_file_extent_inline_item_len(leaf,
47668 btrfs_item_nr(leaf, path->slots[0]));
47669 tmp = kmalloc(inline_size, GFP_NOFS);
47670+ if (!tmp)
47671+ return -ENOMEM;
47672 ptr = btrfs_file_extent_inline_start(item);
47673
47674 read_extent_buffer(leaf, tmp, ptr, inline_size);
47675@@ -5410,7 +5413,7 @@ fail:
47676 return -ENOMEM;
47677 }
47678
47679-static int btrfs_getattr(struct vfsmount *mnt,
47680+int btrfs_getattr(struct vfsmount *mnt,
47681 struct dentry *dentry, struct kstat *stat)
47682 {
47683 struct inode *inode = dentry->d_inode;
47684@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47685 return 0;
47686 }
47687
47688+EXPORT_SYMBOL(btrfs_getattr);
47689+
47690+dev_t get_btrfs_dev_from_inode(struct inode *inode)
47691+{
47692+ return BTRFS_I(inode)->root->anon_super.s_dev;
47693+}
47694+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47695+
47696 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47697 struct inode *new_dir, struct dentry *new_dentry)
47698 {
47699@@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47700 .fsync = btrfs_sync_file,
47701 };
47702
47703-static struct extent_io_ops btrfs_extent_io_ops = {
47704+static const struct extent_io_ops btrfs_extent_io_ops = {
47705 .fill_delalloc = run_delalloc_range,
47706 .submit_bio_hook = btrfs_submit_bio_hook,
47707 .merge_bio_hook = btrfs_merge_bio_hook,
47708diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47709index ab7ab53..94e0781 100644
47710--- a/fs/btrfs/relocation.c
47711+++ b/fs/btrfs/relocation.c
47712@@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47713 }
47714 spin_unlock(&rc->reloc_root_tree.lock);
47715
47716- BUG_ON((struct btrfs_root *)node->data != root);
47717+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
47718
47719 if (!del) {
47720 spin_lock(&rc->reloc_root_tree.lock);
47721diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47722index a240b6f..4ce16ef 100644
47723--- a/fs/btrfs/sysfs.c
47724+++ b/fs/btrfs/sysfs.c
47725@@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47726 complete(&root->kobj_unregister);
47727 }
47728
47729-static struct sysfs_ops btrfs_super_attr_ops = {
47730+static const struct sysfs_ops btrfs_super_attr_ops = {
47731 .show = btrfs_super_attr_show,
47732 .store = btrfs_super_attr_store,
47733 };
47734
47735-static struct sysfs_ops btrfs_root_attr_ops = {
47736+static const struct sysfs_ops btrfs_root_attr_ops = {
47737 .show = btrfs_root_attr_show,
47738 .store = btrfs_root_attr_store,
47739 };
47740diff --git a/fs/buffer.c b/fs/buffer.c
47741index 6fa5302..395d9f6 100644
47742--- a/fs/buffer.c
47743+++ b/fs/buffer.c
47744@@ -25,6 +25,7 @@
47745 #include <linux/percpu.h>
47746 #include <linux/slab.h>
47747 #include <linux/capability.h>
47748+#include <linux/security.h>
47749 #include <linux/blkdev.h>
47750 #include <linux/file.h>
47751 #include <linux/quotaops.h>
47752diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47753index 3797e00..ce776f6 100644
47754--- a/fs/cachefiles/bind.c
47755+++ b/fs/cachefiles/bind.c
47756@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47757 args);
47758
47759 /* start by checking things over */
47760- ASSERT(cache->fstop_percent >= 0 &&
47761- cache->fstop_percent < cache->fcull_percent &&
47762+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
47763 cache->fcull_percent < cache->frun_percent &&
47764 cache->frun_percent < 100);
47765
47766- ASSERT(cache->bstop_percent >= 0 &&
47767- cache->bstop_percent < cache->bcull_percent &&
47768+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
47769 cache->bcull_percent < cache->brun_percent &&
47770 cache->brun_percent < 100);
47771
47772diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47773index 4618516..bb30d01 100644
47774--- a/fs/cachefiles/daemon.c
47775+++ b/fs/cachefiles/daemon.c
47776@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47777 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47778 return -EIO;
47779
47780- if (datalen < 0 || datalen > PAGE_SIZE - 1)
47781+ if (datalen > PAGE_SIZE - 1)
47782 return -EOPNOTSUPP;
47783
47784 /* drag the command string into the kernel so we can parse it */
47785@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47786 if (args[0] != '%' || args[1] != '\0')
47787 return -EINVAL;
47788
47789- if (fstop < 0 || fstop >= cache->fcull_percent)
47790+ if (fstop >= cache->fcull_percent)
47791 return cachefiles_daemon_range_error(cache, args);
47792
47793 cache->fstop_percent = fstop;
47794@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47795 if (args[0] != '%' || args[1] != '\0')
47796 return -EINVAL;
47797
47798- if (bstop < 0 || bstop >= cache->bcull_percent)
47799+ if (bstop >= cache->bcull_percent)
47800 return cachefiles_daemon_range_error(cache, args);
47801
47802 cache->bstop_percent = bstop;
47803diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47804index f7c255f..fcd61de 100644
47805--- a/fs/cachefiles/internal.h
47806+++ b/fs/cachefiles/internal.h
47807@@ -56,7 +56,7 @@ struct cachefiles_cache {
47808 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47809 struct rb_root active_nodes; /* active nodes (can't be culled) */
47810 rwlock_t active_lock; /* lock for active_nodes */
47811- atomic_t gravecounter; /* graveyard uniquifier */
47812+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47813 unsigned frun_percent; /* when to stop culling (% files) */
47814 unsigned fcull_percent; /* when to start culling (% files) */
47815 unsigned fstop_percent; /* when to stop allocating (% files) */
47816@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47817 * proc.c
47818 */
47819 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47820-extern atomic_t cachefiles_lookup_histogram[HZ];
47821-extern atomic_t cachefiles_mkdir_histogram[HZ];
47822-extern atomic_t cachefiles_create_histogram[HZ];
47823+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47824+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47825+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47826
47827 extern int __init cachefiles_proc_init(void);
47828 extern void cachefiles_proc_cleanup(void);
47829 static inline
47830-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47831+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47832 {
47833 unsigned long jif = jiffies - start_jif;
47834 if (jif >= HZ)
47835 jif = HZ - 1;
47836- atomic_inc(&histogram[jif]);
47837+ atomic_inc_unchecked(&histogram[jif]);
47838 }
47839
47840 #else
47841diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47842index 14ac480..a62766c 100644
47843--- a/fs/cachefiles/namei.c
47844+++ b/fs/cachefiles/namei.c
47845@@ -250,7 +250,7 @@ try_again:
47846 /* first step is to make up a grave dentry in the graveyard */
47847 sprintf(nbuffer, "%08x%08x",
47848 (uint32_t) get_seconds(),
47849- (uint32_t) atomic_inc_return(&cache->gravecounter));
47850+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47851
47852 /* do the multiway lock magic */
47853 trap = lock_rename(cache->graveyard, dir);
47854diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47855index eccd339..4c1d995 100644
47856--- a/fs/cachefiles/proc.c
47857+++ b/fs/cachefiles/proc.c
47858@@ -14,9 +14,9 @@
47859 #include <linux/seq_file.h>
47860 #include "internal.h"
47861
47862-atomic_t cachefiles_lookup_histogram[HZ];
47863-atomic_t cachefiles_mkdir_histogram[HZ];
47864-atomic_t cachefiles_create_histogram[HZ];
47865+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47866+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47867+atomic_unchecked_t cachefiles_create_histogram[HZ];
47868
47869 /*
47870 * display the latency histogram
47871@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47872 return 0;
47873 default:
47874 index = (unsigned long) v - 3;
47875- x = atomic_read(&cachefiles_lookup_histogram[index]);
47876- y = atomic_read(&cachefiles_mkdir_histogram[index]);
47877- z = atomic_read(&cachefiles_create_histogram[index]);
47878+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47879+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47880+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47881 if (x == 0 && y == 0 && z == 0)
47882 return 0;
47883
47884diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47885index a6c8c6f..5cf8517 100644
47886--- a/fs/cachefiles/rdwr.c
47887+++ b/fs/cachefiles/rdwr.c
47888@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47889 old_fs = get_fs();
47890 set_fs(KERNEL_DS);
47891 ret = file->f_op->write(
47892- file, (const void __user *) data, len, &pos);
47893+ file, (const void __force_user *) data, len, &pos);
47894 set_fs(old_fs);
47895 kunmap(page);
47896 if (ret != len)
47897diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47898index 42cec2a..2aba466 100644
47899--- a/fs/cifs/cifs_debug.c
47900+++ b/fs/cifs/cifs_debug.c
47901@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47902 tcon = list_entry(tmp3,
47903 struct cifsTconInfo,
47904 tcon_list);
47905- atomic_set(&tcon->num_smbs_sent, 0);
47906- atomic_set(&tcon->num_writes, 0);
47907- atomic_set(&tcon->num_reads, 0);
47908- atomic_set(&tcon->num_oplock_brks, 0);
47909- atomic_set(&tcon->num_opens, 0);
47910- atomic_set(&tcon->num_posixopens, 0);
47911- atomic_set(&tcon->num_posixmkdirs, 0);
47912- atomic_set(&tcon->num_closes, 0);
47913- atomic_set(&tcon->num_deletes, 0);
47914- atomic_set(&tcon->num_mkdirs, 0);
47915- atomic_set(&tcon->num_rmdirs, 0);
47916- atomic_set(&tcon->num_renames, 0);
47917- atomic_set(&tcon->num_t2renames, 0);
47918- atomic_set(&tcon->num_ffirst, 0);
47919- atomic_set(&tcon->num_fnext, 0);
47920- atomic_set(&tcon->num_fclose, 0);
47921- atomic_set(&tcon->num_hardlinks, 0);
47922- atomic_set(&tcon->num_symlinks, 0);
47923- atomic_set(&tcon->num_locks, 0);
47924+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47925+ atomic_set_unchecked(&tcon->num_writes, 0);
47926+ atomic_set_unchecked(&tcon->num_reads, 0);
47927+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47928+ atomic_set_unchecked(&tcon->num_opens, 0);
47929+ atomic_set_unchecked(&tcon->num_posixopens, 0);
47930+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47931+ atomic_set_unchecked(&tcon->num_closes, 0);
47932+ atomic_set_unchecked(&tcon->num_deletes, 0);
47933+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
47934+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
47935+ atomic_set_unchecked(&tcon->num_renames, 0);
47936+ atomic_set_unchecked(&tcon->num_t2renames, 0);
47937+ atomic_set_unchecked(&tcon->num_ffirst, 0);
47938+ atomic_set_unchecked(&tcon->num_fnext, 0);
47939+ atomic_set_unchecked(&tcon->num_fclose, 0);
47940+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
47941+ atomic_set_unchecked(&tcon->num_symlinks, 0);
47942+ atomic_set_unchecked(&tcon->num_locks, 0);
47943 }
47944 }
47945 }
47946@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47947 if (tcon->need_reconnect)
47948 seq_puts(m, "\tDISCONNECTED ");
47949 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47950- atomic_read(&tcon->num_smbs_sent),
47951- atomic_read(&tcon->num_oplock_brks));
47952+ atomic_read_unchecked(&tcon->num_smbs_sent),
47953+ atomic_read_unchecked(&tcon->num_oplock_brks));
47954 seq_printf(m, "\nReads: %d Bytes: %lld",
47955- atomic_read(&tcon->num_reads),
47956+ atomic_read_unchecked(&tcon->num_reads),
47957 (long long)(tcon->bytes_read));
47958 seq_printf(m, "\nWrites: %d Bytes: %lld",
47959- atomic_read(&tcon->num_writes),
47960+ atomic_read_unchecked(&tcon->num_writes),
47961 (long long)(tcon->bytes_written));
47962 seq_printf(m, "\nFlushes: %d",
47963- atomic_read(&tcon->num_flushes));
47964+ atomic_read_unchecked(&tcon->num_flushes));
47965 seq_printf(m, "\nLocks: %d HardLinks: %d "
47966 "Symlinks: %d",
47967- atomic_read(&tcon->num_locks),
47968- atomic_read(&tcon->num_hardlinks),
47969- atomic_read(&tcon->num_symlinks));
47970+ atomic_read_unchecked(&tcon->num_locks),
47971+ atomic_read_unchecked(&tcon->num_hardlinks),
47972+ atomic_read_unchecked(&tcon->num_symlinks));
47973 seq_printf(m, "\nOpens: %d Closes: %d "
47974 "Deletes: %d",
47975- atomic_read(&tcon->num_opens),
47976- atomic_read(&tcon->num_closes),
47977- atomic_read(&tcon->num_deletes));
47978+ atomic_read_unchecked(&tcon->num_opens),
47979+ atomic_read_unchecked(&tcon->num_closes),
47980+ atomic_read_unchecked(&tcon->num_deletes));
47981 seq_printf(m, "\nPosix Opens: %d "
47982 "Posix Mkdirs: %d",
47983- atomic_read(&tcon->num_posixopens),
47984- atomic_read(&tcon->num_posixmkdirs));
47985+ atomic_read_unchecked(&tcon->num_posixopens),
47986+ atomic_read_unchecked(&tcon->num_posixmkdirs));
47987 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47988- atomic_read(&tcon->num_mkdirs),
47989- atomic_read(&tcon->num_rmdirs));
47990+ atomic_read_unchecked(&tcon->num_mkdirs),
47991+ atomic_read_unchecked(&tcon->num_rmdirs));
47992 seq_printf(m, "\nRenames: %d T2 Renames %d",
47993- atomic_read(&tcon->num_renames),
47994- atomic_read(&tcon->num_t2renames));
47995+ atomic_read_unchecked(&tcon->num_renames),
47996+ atomic_read_unchecked(&tcon->num_t2renames));
47997 seq_printf(m, "\nFindFirst: %d FNext %d "
47998 "FClose %d",
47999- atomic_read(&tcon->num_ffirst),
48000- atomic_read(&tcon->num_fnext),
48001- atomic_read(&tcon->num_fclose));
48002+ atomic_read_unchecked(&tcon->num_ffirst),
48003+ atomic_read_unchecked(&tcon->num_fnext),
48004+ atomic_read_unchecked(&tcon->num_fclose));
48005 }
48006 }
48007 }
48008diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
48009index 1445407..68cb0dc 100644
48010--- a/fs/cifs/cifsfs.c
48011+++ b/fs/cifs/cifsfs.c
48012@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
48013 cifs_req_cachep = kmem_cache_create("cifs_request",
48014 CIFSMaxBufSize +
48015 MAX_CIFS_HDR_SIZE, 0,
48016- SLAB_HWCACHE_ALIGN, NULL);
48017+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
48018 if (cifs_req_cachep == NULL)
48019 return -ENOMEM;
48020
48021@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
48022 efficient to alloc 1 per page off the slab compared to 17K (5page)
48023 alloc of large cifs buffers even when page debugging is on */
48024 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
48025- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
48026+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
48027 NULL);
48028 if (cifs_sm_req_cachep == NULL) {
48029 mempool_destroy(cifs_req_poolp);
48030@@ -991,8 +991,8 @@ init_cifs(void)
48031 atomic_set(&bufAllocCount, 0);
48032 atomic_set(&smBufAllocCount, 0);
48033 #ifdef CONFIG_CIFS_STATS2
48034- atomic_set(&totBufAllocCount, 0);
48035- atomic_set(&totSmBufAllocCount, 0);
48036+ atomic_set_unchecked(&totBufAllocCount, 0);
48037+ atomic_set_unchecked(&totSmBufAllocCount, 0);
48038 #endif /* CONFIG_CIFS_STATS2 */
48039
48040 atomic_set(&midCount, 0);
48041diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
48042index e29581e..1c22bab 100644
48043--- a/fs/cifs/cifsglob.h
48044+++ b/fs/cifs/cifsglob.h
48045@@ -252,28 +252,28 @@ struct cifsTconInfo {
48046 __u16 Flags; /* optional support bits */
48047 enum statusEnum tidStatus;
48048 #ifdef CONFIG_CIFS_STATS
48049- atomic_t num_smbs_sent;
48050- atomic_t num_writes;
48051- atomic_t num_reads;
48052- atomic_t num_flushes;
48053- atomic_t num_oplock_brks;
48054- atomic_t num_opens;
48055- atomic_t num_closes;
48056- atomic_t num_deletes;
48057- atomic_t num_mkdirs;
48058- atomic_t num_posixopens;
48059- atomic_t num_posixmkdirs;
48060- atomic_t num_rmdirs;
48061- atomic_t num_renames;
48062- atomic_t num_t2renames;
48063- atomic_t num_ffirst;
48064- atomic_t num_fnext;
48065- atomic_t num_fclose;
48066- atomic_t num_hardlinks;
48067- atomic_t num_symlinks;
48068- atomic_t num_locks;
48069- atomic_t num_acl_get;
48070- atomic_t num_acl_set;
48071+ atomic_unchecked_t num_smbs_sent;
48072+ atomic_unchecked_t num_writes;
48073+ atomic_unchecked_t num_reads;
48074+ atomic_unchecked_t num_flushes;
48075+ atomic_unchecked_t num_oplock_brks;
48076+ atomic_unchecked_t num_opens;
48077+ atomic_unchecked_t num_closes;
48078+ atomic_unchecked_t num_deletes;
48079+ atomic_unchecked_t num_mkdirs;
48080+ atomic_unchecked_t num_posixopens;
48081+ atomic_unchecked_t num_posixmkdirs;
48082+ atomic_unchecked_t num_rmdirs;
48083+ atomic_unchecked_t num_renames;
48084+ atomic_unchecked_t num_t2renames;
48085+ atomic_unchecked_t num_ffirst;
48086+ atomic_unchecked_t num_fnext;
48087+ atomic_unchecked_t num_fclose;
48088+ atomic_unchecked_t num_hardlinks;
48089+ atomic_unchecked_t num_symlinks;
48090+ atomic_unchecked_t num_locks;
48091+ atomic_unchecked_t num_acl_get;
48092+ atomic_unchecked_t num_acl_set;
48093 #ifdef CONFIG_CIFS_STATS2
48094 unsigned long long time_writes;
48095 unsigned long long time_reads;
48096@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
48097 }
48098
48099 #ifdef CONFIG_CIFS_STATS
48100-#define cifs_stats_inc atomic_inc
48101+#define cifs_stats_inc atomic_inc_unchecked
48102
48103 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
48104 unsigned int bytes)
48105@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
48106 /* Various Debug counters */
48107 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
48108 #ifdef CONFIG_CIFS_STATS2
48109-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
48110-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
48111+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
48112+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
48113 #endif
48114 GLOBAL_EXTERN atomic_t smBufAllocCount;
48115 GLOBAL_EXTERN atomic_t midCount;
48116diff --git a/fs/cifs/link.c b/fs/cifs/link.c
48117index fc1e048..28b3441 100644
48118--- a/fs/cifs/link.c
48119+++ b/fs/cifs/link.c
48120@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
48121
48122 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
48123 {
48124- char *p = nd_get_link(nd);
48125+ const char *p = nd_get_link(nd);
48126 if (!IS_ERR(p))
48127 kfree(p);
48128 }
48129diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
48130index 95b82e8..12a538d 100644
48131--- a/fs/cifs/misc.c
48132+++ b/fs/cifs/misc.c
48133@@ -155,7 +155,7 @@ cifs_buf_get(void)
48134 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
48135 atomic_inc(&bufAllocCount);
48136 #ifdef CONFIG_CIFS_STATS2
48137- atomic_inc(&totBufAllocCount);
48138+ atomic_inc_unchecked(&totBufAllocCount);
48139 #endif /* CONFIG_CIFS_STATS2 */
48140 }
48141
48142@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
48143 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
48144 atomic_inc(&smBufAllocCount);
48145 #ifdef CONFIG_CIFS_STATS2
48146- atomic_inc(&totSmBufAllocCount);
48147+ atomic_inc_unchecked(&totSmBufAllocCount);
48148 #endif /* CONFIG_CIFS_STATS2 */
48149
48150 }
48151diff --git a/fs/coda/cache.c b/fs/coda/cache.c
48152index a5bf577..6d19845 100644
48153--- a/fs/coda/cache.c
48154+++ b/fs/coda/cache.c
48155@@ -24,14 +24,14 @@
48156 #include <linux/coda_fs_i.h>
48157 #include <linux/coda_cache.h>
48158
48159-static atomic_t permission_epoch = ATOMIC_INIT(0);
48160+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
48161
48162 /* replace or extend an acl cache hit */
48163 void coda_cache_enter(struct inode *inode, int mask)
48164 {
48165 struct coda_inode_info *cii = ITOC(inode);
48166
48167- cii->c_cached_epoch = atomic_read(&permission_epoch);
48168+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
48169 if (cii->c_uid != current_fsuid()) {
48170 cii->c_uid = current_fsuid();
48171 cii->c_cached_perm = mask;
48172@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
48173 void coda_cache_clear_inode(struct inode *inode)
48174 {
48175 struct coda_inode_info *cii = ITOC(inode);
48176- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
48177+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
48178 }
48179
48180 /* remove all acl caches */
48181 void coda_cache_clear_all(struct super_block *sb)
48182 {
48183- atomic_inc(&permission_epoch);
48184+ atomic_inc_unchecked(&permission_epoch);
48185 }
48186
48187
48188@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
48189
48190 hit = (mask & cii->c_cached_perm) == mask &&
48191 cii->c_uid == current_fsuid() &&
48192- cii->c_cached_epoch == atomic_read(&permission_epoch);
48193+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
48194
48195 return hit;
48196 }
48197diff --git a/fs/compat.c b/fs/compat.c
48198index d1e2411..c2ef8ed 100644
48199--- a/fs/compat.c
48200+++ b/fs/compat.c
48201@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
48202 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
48203 {
48204 compat_ino_t ino = stat->ino;
48205- typeof(ubuf->st_uid) uid = 0;
48206- typeof(ubuf->st_gid) gid = 0;
48207+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
48208+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
48209 int err;
48210
48211 SET_UID(uid, stat->uid);
48212@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
48213
48214 set_fs(KERNEL_DS);
48215 /* The __user pointer cast is valid because of the set_fs() */
48216- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
48217+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
48218 set_fs(oldfs);
48219 /* truncating is ok because it's a user address */
48220 if (!ret)
48221@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
48222
48223 struct compat_readdir_callback {
48224 struct compat_old_linux_dirent __user *dirent;
48225+ struct file * file;
48226 int result;
48227 };
48228
48229@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
48230 buf->result = -EOVERFLOW;
48231 return -EOVERFLOW;
48232 }
48233+
48234+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48235+ return 0;
48236+
48237 buf->result++;
48238 dirent = buf->dirent;
48239 if (!access_ok(VERIFY_WRITE, dirent,
48240@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
48241
48242 buf.result = 0;
48243 buf.dirent = dirent;
48244+ buf.file = file;
48245
48246 error = vfs_readdir(file, compat_fillonedir, &buf);
48247 if (buf.result)
48248@@ -899,6 +905,7 @@ struct compat_linux_dirent {
48249 struct compat_getdents_callback {
48250 struct compat_linux_dirent __user *current_dir;
48251 struct compat_linux_dirent __user *previous;
48252+ struct file * file;
48253 int count;
48254 int error;
48255 };
48256@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
48257 buf->error = -EOVERFLOW;
48258 return -EOVERFLOW;
48259 }
48260+
48261+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48262+ return 0;
48263+
48264 dirent = buf->previous;
48265 if (dirent) {
48266 if (__put_user(offset, &dirent->d_off))
48267@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
48268 buf.previous = NULL;
48269 buf.count = count;
48270 buf.error = 0;
48271+ buf.file = file;
48272
48273 error = vfs_readdir(file, compat_filldir, &buf);
48274 if (error >= 0)
48275@@ -987,6 +999,7 @@ out:
48276 struct compat_getdents_callback64 {
48277 struct linux_dirent64 __user *current_dir;
48278 struct linux_dirent64 __user *previous;
48279+ struct file * file;
48280 int count;
48281 int error;
48282 };
48283@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
48284 buf->error = -EINVAL; /* only used if we fail.. */
48285 if (reclen > buf->count)
48286 return -EINVAL;
48287+
48288+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48289+ return 0;
48290+
48291 dirent = buf->previous;
48292
48293 if (dirent) {
48294@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
48295 buf.previous = NULL;
48296 buf.count = count;
48297 buf.error = 0;
48298+ buf.file = file;
48299
48300 error = vfs_readdir(file, compat_filldir64, &buf);
48301 if (error >= 0)
48302 error = buf.error;
48303 lastdirent = buf.previous;
48304 if (lastdirent) {
48305- typeof(lastdirent->d_off) d_off = file->f_pos;
48306+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48307 if (__put_user_unaligned(d_off, &lastdirent->d_off))
48308 error = -EFAULT;
48309 else
48310@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
48311 * verify all the pointers
48312 */
48313 ret = -EINVAL;
48314- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
48315+ if (nr_segs > UIO_MAXIOV)
48316 goto out;
48317 if (!file->f_op)
48318 goto out;
48319@@ -1454,6 +1472,10 @@ out:
48320 return ret;
48321 }
48322
48323+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48324+extern atomic64_unchecked_t global_exec_counter;
48325+#endif
48326+
48327 /*
48328 * compat_do_execve() is mostly a copy of do_execve(), with the exception
48329 * that it processes 32 bit argv and envp pointers.
48330@@ -1463,11 +1485,35 @@ int compat_do_execve(char * filename,
48331 compat_uptr_t __user *envp,
48332 struct pt_regs * regs)
48333 {
48334+#ifdef CONFIG_GRKERNSEC
48335+ struct file *old_exec_file;
48336+ struct acl_subject_label *old_acl;
48337+ struct rlimit old_rlim[RLIM_NLIMITS];
48338+#endif
48339 struct linux_binprm *bprm;
48340 struct file *file;
48341 struct files_struct *displaced;
48342 bool clear_in_exec;
48343 int retval;
48344+ const struct cred *cred = current_cred();
48345+
48346+ /*
48347+ * We move the actual failure in case of RLIMIT_NPROC excess from
48348+ * set*uid() to execve() because too many poorly written programs
48349+ * don't check setuid() return code. Here we additionally recheck
48350+ * whether NPROC limit is still exceeded.
48351+ */
48352+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48353+
48354+ if ((current->flags & PF_NPROC_EXCEEDED) &&
48355+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48356+ retval = -EAGAIN;
48357+ goto out_ret;
48358+ }
48359+
48360+ /* We're below the limit (still or again), so we don't want to make
48361+ * further execve() calls fail. */
48362+ current->flags &= ~PF_NPROC_EXCEEDED;
48363
48364 retval = unshare_files(&displaced);
48365 if (retval)
48366@@ -1493,12 +1539,26 @@ int compat_do_execve(char * filename,
48367 if (IS_ERR(file))
48368 goto out_unmark;
48369
48370+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
48371+ retval = -EPERM;
48372+ goto out_file;
48373+ }
48374+
48375 sched_exec();
48376
48377 bprm->file = file;
48378 bprm->filename = filename;
48379 bprm->interp = filename;
48380
48381+ if (gr_process_user_ban()) {
48382+ retval = -EPERM;
48383+ goto out_file;
48384+ }
48385+
48386+ retval = -EACCES;
48387+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
48388+ goto out_file;
48389+
48390 retval = bprm_mm_init(bprm);
48391 if (retval)
48392 goto out_file;
48393@@ -1528,11 +1588,45 @@ int compat_do_execve(char * filename,
48394 if (retval < 0)
48395 goto out;
48396
48397+ if (!gr_tpe_allow(file)) {
48398+ retval = -EACCES;
48399+ goto out;
48400+ }
48401+
48402+ if (gr_check_crash_exec(file)) {
48403+ retval = -EACCES;
48404+ goto out;
48405+ }
48406+
48407+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48408+
48409+ gr_handle_exec_args_compat(bprm, argv);
48410+
48411+#ifdef CONFIG_GRKERNSEC
48412+ old_acl = current->acl;
48413+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48414+ old_exec_file = current->exec_file;
48415+ get_file(file);
48416+ current->exec_file = file;
48417+#endif
48418+
48419+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48420+ bprm->unsafe);
48421+ if (retval < 0)
48422+ goto out_fail;
48423+
48424 retval = search_binary_handler(bprm, regs);
48425 if (retval < 0)
48426- goto out;
48427+ goto out_fail;
48428+#ifdef CONFIG_GRKERNSEC
48429+ if (old_exec_file)
48430+ fput(old_exec_file);
48431+#endif
48432
48433 /* execve succeeded */
48434+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48435+ current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
48436+#endif
48437 current->fs->in_exec = 0;
48438 current->in_execve = 0;
48439 acct_update_integrals(current);
48440@@ -1541,6 +1635,14 @@ int compat_do_execve(char * filename,
48441 put_files_struct(displaced);
48442 return retval;
48443
48444+out_fail:
48445+#ifdef CONFIG_GRKERNSEC
48446+ current->acl = old_acl;
48447+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48448+ fput(current->exec_file);
48449+ current->exec_file = old_exec_file;
48450+#endif
48451+
48452 out:
48453 if (bprm->mm) {
48454 acct_arg_size(bprm, 0);
48455@@ -1711,6 +1813,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
48456 struct fdtable *fdt;
48457 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
48458
48459+ pax_track_stack();
48460+
48461 if (n < 0)
48462 goto out_nofds;
48463
48464@@ -2151,7 +2255,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
48465 oldfs = get_fs();
48466 set_fs(KERNEL_DS);
48467 /* The __user pointer casts are valid because of the set_fs() */
48468- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
48469+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
48470 set_fs(oldfs);
48471
48472 if (err)
48473diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
48474index 0adced2..bbb1b0d 100644
48475--- a/fs/compat_binfmt_elf.c
48476+++ b/fs/compat_binfmt_elf.c
48477@@ -29,10 +29,12 @@
48478 #undef elfhdr
48479 #undef elf_phdr
48480 #undef elf_note
48481+#undef elf_dyn
48482 #undef elf_addr_t
48483 #define elfhdr elf32_hdr
48484 #define elf_phdr elf32_phdr
48485 #define elf_note elf32_note
48486+#define elf_dyn Elf32_Dyn
48487 #define elf_addr_t Elf32_Addr
48488
48489 /*
48490diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
48491index d84e705..d8c364c 100644
48492--- a/fs/compat_ioctl.c
48493+++ b/fs/compat_ioctl.c
48494@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
48495 up = (struct compat_video_spu_palette __user *) arg;
48496 err = get_user(palp, &up->palette);
48497 err |= get_user(length, &up->length);
48498+ if (err)
48499+ return -EFAULT;
48500
48501 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
48502 err = put_user(compat_ptr(palp), &up_native->palette);
48503@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
48504 return -EFAULT;
48505 if (__get_user(udata, &ss32->iomem_base))
48506 return -EFAULT;
48507- ss.iomem_base = compat_ptr(udata);
48508+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
48509 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
48510 __get_user(ss.port_high, &ss32->port_high))
48511 return -EFAULT;
48512@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
48513 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
48514 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
48515 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
48516- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48517+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48518 return -EFAULT;
48519
48520 return ioctl_preallocate(file, p);
48521diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
48522index 8e48b52..f01ed91 100644
48523--- a/fs/configfs/dir.c
48524+++ b/fs/configfs/dir.c
48525@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48526 }
48527 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
48528 struct configfs_dirent *next;
48529- const char * name;
48530+ const unsigned char * name;
48531+ char d_name[sizeof(next->s_dentry->d_iname)];
48532 int len;
48533
48534 next = list_entry(p, struct configfs_dirent,
48535@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48536 continue;
48537
48538 name = configfs_get_name(next);
48539- len = strlen(name);
48540+ if (next->s_dentry && name == next->s_dentry->d_iname) {
48541+ len = next->s_dentry->d_name.len;
48542+ memcpy(d_name, name, len);
48543+ name = d_name;
48544+ } else
48545+ len = strlen(name);
48546 if (next->s_dentry)
48547 ino = next->s_dentry->d_inode->i_ino;
48548 else
48549diff --git a/fs/dcache.c b/fs/dcache.c
48550index 44c0aea..2529092 100644
48551--- a/fs/dcache.c
48552+++ b/fs/dcache.c
48553@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
48554
48555 static struct kmem_cache *dentry_cache __read_mostly;
48556
48557-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
48558-
48559 /*
48560 * This is the single most critical data structure when it comes
48561 * to the dcache: the hashtable for lookups. Somebody should try
48562@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
48563 mempages -= reserve;
48564
48565 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
48566- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
48567+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
48568
48569 dcache_init();
48570 inode_init();
48571diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
48572index 39c6ee8..dcee0f1 100644
48573--- a/fs/debugfs/inode.c
48574+++ b/fs/debugfs/inode.c
48575@@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
48576 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
48577 {
48578 return debugfs_create_file(name,
48579+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48580+ S_IFDIR | S_IRWXU,
48581+#else
48582 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
48583+#endif
48584 parent, NULL, NULL);
48585 }
48586 EXPORT_SYMBOL_GPL(debugfs_create_dir);
48587diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
48588index c010ecf..a8d8c59 100644
48589--- a/fs/dlm/lockspace.c
48590+++ b/fs/dlm/lockspace.c
48591@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
48592 kfree(ls);
48593 }
48594
48595-static struct sysfs_ops dlm_attr_ops = {
48596+static const struct sysfs_ops dlm_attr_ops = {
48597 .show = dlm_attr_show,
48598 .store = dlm_attr_store,
48599 };
48600diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
48601index 7a5f1ac..205b034 100644
48602--- a/fs/ecryptfs/crypto.c
48603+++ b/fs/ecryptfs/crypto.c
48604@@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48605 rc);
48606 goto out;
48607 }
48608- if (unlikely(ecryptfs_verbosity > 0)) {
48609- ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
48610- "with iv:\n");
48611- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48612- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48613- "encryption:\n");
48614- ecryptfs_dump_hex((char *)
48615- (page_address(page)
48616- + (extent_offset * crypt_stat->extent_size)),
48617- 8);
48618- }
48619 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
48620 page, (extent_offset
48621 * crypt_stat->extent_size),
48622@@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48623 goto out;
48624 }
48625 rc = 0;
48626- if (unlikely(ecryptfs_verbosity > 0)) {
48627- ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
48628- "rc = [%d]\n", (extent_base + extent_offset),
48629- rc);
48630- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48631- "encryption:\n");
48632- ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
48633- }
48634 out:
48635 return rc;
48636 }
48637@@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48638 rc);
48639 goto out;
48640 }
48641- if (unlikely(ecryptfs_verbosity > 0)) {
48642- ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
48643- "with iv:\n");
48644- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48645- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48646- "decryption:\n");
48647- ecryptfs_dump_hex((char *)
48648- (page_address(enc_extent_page)
48649- + (extent_offset * crypt_stat->extent_size)),
48650- 8);
48651- }
48652 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
48653 (extent_offset
48654 * crypt_stat->extent_size),
48655@@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48656 goto out;
48657 }
48658 rc = 0;
48659- if (unlikely(ecryptfs_verbosity > 0)) {
48660- ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
48661- "rc = [%d]\n", (extent_base + extent_offset),
48662- rc);
48663- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48664- "decryption:\n");
48665- ecryptfs_dump_hex((char *)(page_address(page)
48666- + (extent_offset
48667- * crypt_stat->extent_size)), 8);
48668- }
48669 out:
48670 return rc;
48671 }
48672diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
48673index 4e25328..3015389 100644
48674--- a/fs/ecryptfs/file.c
48675+++ b/fs/ecryptfs/file.c
48676@@ -323,11 +323,11 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
48677
48678 const struct file_operations ecryptfs_dir_fops = {
48679 .readdir = ecryptfs_readdir,
48680+ .read = generic_read_dir,
48681 .unlocked_ioctl = ecryptfs_unlocked_ioctl,
48682 #ifdef CONFIG_COMPAT
48683 .compat_ioctl = ecryptfs_compat_ioctl,
48684 #endif
48685- .mmap = generic_file_mmap,
48686 .open = ecryptfs_open,
48687 .flush = ecryptfs_flush,
48688 .release = ecryptfs_release,
48689diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
48690index 88ba4d4..55639ca 100644
48691--- a/fs/ecryptfs/inode.c
48692+++ b/fs/ecryptfs/inode.c
48693@@ -575,8 +575,8 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
48694 dget(lower_dentry);
48695 rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
48696 dput(lower_dentry);
48697- if (!rc)
48698- d_delete(lower_dentry);
48699+ if (!rc && dentry->d_inode)
48700+ clear_nlink(dentry->d_inode);
48701 fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
48702 dir->i_nlink = lower_dir_dentry->d_inode->i_nlink;
48703 unlock_dir(lower_dir_dentry);
48704@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
48705 old_fs = get_fs();
48706 set_fs(get_ds());
48707 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
48708- (char __user *)lower_buf,
48709+ (char __force_user *)lower_buf,
48710 lower_bufsiz);
48711 set_fs(old_fs);
48712 if (rc < 0)
48713@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48714 }
48715 old_fs = get_fs();
48716 set_fs(get_ds());
48717- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
48718+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
48719 set_fs(old_fs);
48720 if (rc < 0)
48721 goto out_free;
48722@@ -758,18 +758,23 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat,
48723 }
48724
48725 /**
48726- * ecryptfs_truncate
48727+ * truncate_upper
48728 * @dentry: The ecryptfs layer dentry
48729- * @new_length: The length to expand the file to
48730+ * @ia: Address of the ecryptfs inode's attributes
48731+ * @lower_ia: Address of the lower inode's attributes
48732 *
48733 * Function to handle truncations modifying the size of the file. Note
48734 * that the file sizes are interpolated. When expanding, we are simply
48735- * writing strings of 0's out. When truncating, we need to modify the
48736- * underlying file size according to the page index interpolations.
48737+ * writing strings of 0's out. When truncating, we truncate the upper
48738+ * inode and update the lower_ia according to the page index
48739+ * interpolations. If ATTR_SIZE is set in lower_ia->ia_valid upon return,
48740+ * the caller must use lower_ia in a call to notify_change() to perform
48741+ * the truncation of the lower inode.
48742 *
48743 * Returns zero on success; non-zero otherwise
48744 */
48745-int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
48746+static int truncate_upper(struct dentry *dentry, struct iattr *ia,
48747+ struct iattr *lower_ia)
48748 {
48749 int rc = 0;
48750 struct inode *inode = dentry->d_inode;
48751@@ -780,8 +785,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
48752 loff_t lower_size_before_truncate;
48753 loff_t lower_size_after_truncate;
48754
48755- if (unlikely((new_length == i_size)))
48756+ if (unlikely((ia->ia_size == i_size))) {
48757+ lower_ia->ia_valid &= ~ATTR_SIZE;
48758 goto out;
48759+ }
48760 crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
48761 /* Set up a fake ecryptfs file, this is used to interface with
48762 * the file in the underlying filesystem so that the
48763@@ -801,28 +808,30 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
48764 &fake_ecryptfs_file,
48765 ecryptfs_inode_to_private(dentry->d_inode)->lower_file);
48766 /* Switch on growing or shrinking file */
48767- if (new_length > i_size) {
48768+ if (ia->ia_size > i_size) {
48769 char zero[] = { 0x00 };
48770
48771+ lower_ia->ia_valid &= ~ATTR_SIZE;
48772 /* Write a single 0 at the last position of the file;
48773 * this triggers code that will fill in 0's throughout
48774 * the intermediate portion of the previous end of the
48775 * file and the new and of the file */
48776 rc = ecryptfs_write(&fake_ecryptfs_file, zero,
48777- (new_length - 1), 1);
48778- } else { /* new_length < i_size_read(inode) */
48779- /* We're chopping off all the pages down do the page
48780- * in which new_length is located. Fill in the end of
48781- * that page from (new_length & ~PAGE_CACHE_MASK) to
48782+ (ia->ia_size - 1), 1);
48783+ } else { /* ia->ia_size < i_size_read(inode) */
48784+ /* We're chopping off all the pages down to the page
48785+ * in which ia->ia_size is located. Fill in the end of
48786+ * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to
48787 * PAGE_CACHE_SIZE with zeros. */
48788 size_t num_zeros = (PAGE_CACHE_SIZE
48789- - (new_length & ~PAGE_CACHE_MASK));
48790+ - (ia->ia_size & ~PAGE_CACHE_MASK));
48791
48792 if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
48793- rc = vmtruncate(inode, new_length);
48794+ rc = vmtruncate(inode, ia->ia_size);
48795 if (rc)
48796 goto out_free;
48797- rc = vmtruncate(lower_dentry->d_inode, new_length);
48798+ lower_ia->ia_size = ia->ia_size;
48799+ lower_ia->ia_valid |= ATTR_SIZE;
48800 goto out_free;
48801 }
48802 if (num_zeros) {
48803@@ -834,7 +843,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
48804 goto out_free;
48805 }
48806 rc = ecryptfs_write(&fake_ecryptfs_file, zeros_virt,
48807- new_length, num_zeros);
48808+ ia->ia_size, num_zeros);
48809 kfree(zeros_virt);
48810 if (rc) {
48811 printk(KERN_ERR "Error attempting to zero out "
48812@@ -843,7 +852,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
48813 goto out_free;
48814 }
48815 }
48816- vmtruncate(inode, new_length);
48817+ vmtruncate(inode, ia->ia_size);
48818 rc = ecryptfs_write_inode_size_to_metadata(inode);
48819 if (rc) {
48820 printk(KERN_ERR "Problem with "
48821@@ -856,10 +865,12 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
48822 lower_size_before_truncate =
48823 upper_size_to_lower_size(crypt_stat, i_size);
48824 lower_size_after_truncate =
48825- upper_size_to_lower_size(crypt_stat, new_length);
48826- if (lower_size_after_truncate < lower_size_before_truncate)
48827- vmtruncate(lower_dentry->d_inode,
48828- lower_size_after_truncate);
48829+ upper_size_to_lower_size(crypt_stat, ia->ia_size);
48830+ if (lower_size_after_truncate < lower_size_before_truncate) {
48831+ lower_ia->ia_size = lower_size_after_truncate;
48832+ lower_ia->ia_valid |= ATTR_SIZE;
48833+ } else
48834+ lower_ia->ia_valid &= ~ATTR_SIZE;
48835 }
48836 out_free:
48837 if (ecryptfs_file_to_private(&fake_ecryptfs_file))
48838@@ -869,6 +880,33 @@ out:
48839 return rc;
48840 }
48841
48842+/**
48843+ * ecryptfs_truncate
48844+ * @dentry: The ecryptfs layer dentry
48845+ * @new_length: The length to expand the file to
48846+ *
48847+ * Simple function that handles the truncation of an eCryptfs inode and
48848+ * its corresponding lower inode.
48849+ *
48850+ * Returns zero on success; non-zero otherwise
48851+ */
48852+int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
48853+{
48854+ struct iattr ia = { .ia_valid = ATTR_SIZE, .ia_size = new_length };
48855+ struct iattr lower_ia = { .ia_valid = 0 };
48856+ int rc;
48857+
48858+ rc = truncate_upper(dentry, &ia, &lower_ia);
48859+ if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
48860+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
48861+
48862+ mutex_lock(&lower_dentry->d_inode->i_mutex);
48863+ rc = notify_change(lower_dentry, &lower_ia);
48864+ mutex_unlock(&lower_dentry->d_inode->i_mutex);
48865+ }
48866+ return rc;
48867+}
48868+
48869 static int
48870 ecryptfs_permission(struct inode *inode, int mask)
48871 {
48872@@ -891,6 +929,7 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
48873 {
48874 int rc = 0;
48875 struct dentry *lower_dentry;
48876+ struct iattr lower_ia;
48877 struct inode *inode;
48878 struct inode *lower_inode;
48879 struct ecryptfs_crypt_stat *crypt_stat;
48880@@ -929,15 +968,11 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
48881 }
48882 }
48883 mutex_unlock(&crypt_stat->cs_mutex);
48884+ memcpy(&lower_ia, ia, sizeof(lower_ia));
48885+ if (ia->ia_valid & ATTR_FILE)
48886+ lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
48887 if (ia->ia_valid & ATTR_SIZE) {
48888- ecryptfs_printk(KERN_DEBUG,
48889- "ia->ia_valid = [0x%x] ATTR_SIZE" " = [0x%x]\n",
48890- ia->ia_valid, ATTR_SIZE);
48891- rc = ecryptfs_truncate(dentry, ia->ia_size);
48892- /* ecryptfs_truncate handles resizing of the lower file */
48893- ia->ia_valid &= ~ATTR_SIZE;
48894- ecryptfs_printk(KERN_DEBUG, "ia->ia_valid = [%x]\n",
48895- ia->ia_valid);
48896+ rc = truncate_upper(dentry, ia, &lower_ia);
48897 if (rc < 0)
48898 goto out;
48899 }
48900@@ -946,11 +981,11 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
48901 * mode change is for clearing setuid/setgid bits. Allow lower fs
48902 * to interpret this in its own way.
48903 */
48904- if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
48905- ia->ia_valid &= ~ATTR_MODE;
48906+ if (lower_ia.ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
48907+ lower_ia.ia_valid &= ~ATTR_MODE;
48908
48909 mutex_lock(&lower_dentry->d_inode->i_mutex);
48910- rc = notify_change(lower_dentry, ia);
48911+ rc = notify_change(lower_dentry, &lower_ia);
48912 mutex_unlock(&lower_dentry->d_inode->i_mutex);
48913 out:
48914 fsstack_copy_attr_all(inode, lower_inode, NULL);
48915diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
48916index c6ac85d..c52df75 100644
48917--- a/fs/ecryptfs/main.c
48918+++ b/fs/ecryptfs/main.c
48919@@ -487,6 +487,7 @@ out:
48920 }
48921
48922 struct kmem_cache *ecryptfs_sb_info_cache;
48923+static struct file_system_type ecryptfs_fs_type;
48924
48925 /**
48926 * ecryptfs_fill_super
48927@@ -561,6 +562,23 @@ static int ecryptfs_read_super(struct super_block *sb, const char *dev_name)
48928 ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n");
48929 goto out;
48930 }
48931+
48932+ if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) {
48933+ rc = -EINVAL;
48934+ printk(KERN_ERR "Mount on filesystem of type "
48935+ "eCryptfs explicitly disallowed due to "
48936+ "known incompatibilities\n");
48937+ goto out_free;
48938+ }
48939+
48940+ if (check_ruid && path.dentry->d_inode->i_uid != current_uid()) {
48941+ rc = -EPERM;
48942+ printk(KERN_ERR "Mount of device (uid: %d) not owned by "
48943+ "requested user (uid: %d)\n",
48944+ path.dentry->d_inode->i_uid, current_uid());
48945+ goto out_free;
48946+ }
48947+
48948 ecryptfs_set_superblock_lower(sb, path.dentry->d_sb);
48949 sb->s_maxbytes = path.dentry->d_sb->s_maxbytes;
48950 sb->s_blocksize = path.dentry->d_sb->s_blocksize;
48951diff --git a/fs/exec.c b/fs/exec.c
48952index 86fafc6..6272c0e 100644
48953--- a/fs/exec.c
48954+++ b/fs/exec.c
48955@@ -56,12 +56,28 @@
48956 #include <linux/fsnotify.h>
48957 #include <linux/fs_struct.h>
48958 #include <linux/pipe_fs_i.h>
48959+#include <linux/random.h>
48960+#include <linux/seq_file.h>
48961+
48962+#ifdef CONFIG_PAX_REFCOUNT
48963+#include <linux/kallsyms.h>
48964+#include <linux/kdebug.h>
48965+#endif
48966
48967 #include <asm/uaccess.h>
48968 #include <asm/mmu_context.h>
48969 #include <asm/tlb.h>
48970 #include "internal.h"
48971
48972+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
48973+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
48974+#endif
48975+
48976+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
48977+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
48978+EXPORT_SYMBOL(pax_set_initial_flags_func);
48979+#endif
48980+
48981 int core_uses_pid;
48982 char core_pattern[CORENAME_MAX_SIZE] = "core";
48983 unsigned int core_pipe_limit;
48984@@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48985 int write)
48986 {
48987 struct page *page;
48988- int ret;
48989
48990-#ifdef CONFIG_STACK_GROWSUP
48991- if (write) {
48992- ret = expand_stack_downwards(bprm->vma, pos);
48993- if (ret < 0)
48994- return NULL;
48995- }
48996-#endif
48997- ret = get_user_pages(current, bprm->mm, pos,
48998- 1, write, 1, &page, NULL);
48999- if (ret <= 0)
49000+ if (0 > expand_stack_downwards(bprm->vma, pos))
49001+ return NULL;
49002+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
49003 return NULL;
49004
49005 if (write) {
49006@@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
49007 if (size <= ARG_MAX)
49008 return page;
49009
49010+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49011+ // only allow 1MB for argv+env on suid/sgid binaries
49012+ // to prevent easy ASLR exhaustion
49013+ if (((bprm->cred->euid != current_euid()) ||
49014+ (bprm->cred->egid != current_egid())) &&
49015+ (size > (1024 * 1024))) {
49016+ put_page(page);
49017+ return NULL;
49018+ }
49019+#endif
49020+
49021 /*
49022 * Limit to 1/4-th the stack size for the argv+env strings.
49023 * This ensures that:
49024@@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
49025 vma->vm_end = STACK_TOP_MAX;
49026 vma->vm_start = vma->vm_end - PAGE_SIZE;
49027 vma->vm_flags = VM_STACK_FLAGS;
49028+
49029+#ifdef CONFIG_PAX_SEGMEXEC
49030+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
49031+#endif
49032+
49033 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
49034
49035 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
49036@@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
49037 mm->stack_vm = mm->total_vm = 1;
49038 up_write(&mm->mmap_sem);
49039 bprm->p = vma->vm_end - sizeof(void *);
49040+
49041+#ifdef CONFIG_PAX_RANDUSTACK
49042+ if (randomize_va_space)
49043+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
49044+#endif
49045+
49046 return 0;
49047 err:
49048 up_write(&mm->mmap_sem);
49049@@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
49050 int r;
49051 mm_segment_t oldfs = get_fs();
49052 set_fs(KERNEL_DS);
49053- r = copy_strings(argc, (char __user * __user *)argv, bprm);
49054+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
49055 set_fs(oldfs);
49056 return r;
49057 }
49058@@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
49059 unsigned long new_end = old_end - shift;
49060 struct mmu_gather *tlb;
49061
49062- BUG_ON(new_start > new_end);
49063+ if (new_start >= new_end || new_start < mmap_min_addr)
49064+ return -ENOMEM;
49065
49066 /*
49067 * ensure there are no vmas between where we want to go
49068@@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
49069 if (vma != find_vma(mm, new_start))
49070 return -EFAULT;
49071
49072+#ifdef CONFIG_PAX_SEGMEXEC
49073+ BUG_ON(pax_find_mirror_vma(vma));
49074+#endif
49075+
49076 /*
49077 * cover the whole range: [new_start, old_end)
49078 */
49079@@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
49080 stack_top = arch_align_stack(stack_top);
49081 stack_top = PAGE_ALIGN(stack_top);
49082
49083- if (unlikely(stack_top < mmap_min_addr) ||
49084- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
49085- return -ENOMEM;
49086-
49087 stack_shift = vma->vm_end - stack_top;
49088
49089 bprm->p -= stack_shift;
49090@@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
49091 bprm->exec -= stack_shift;
49092
49093 down_write(&mm->mmap_sem);
49094+
49095+ /* Move stack pages down in memory. */
49096+ if (stack_shift) {
49097+ ret = shift_arg_pages(vma, stack_shift);
49098+ if (ret)
49099+ goto out_unlock;
49100+ }
49101+
49102 vm_flags = VM_STACK_FLAGS;
49103
49104 /*
49105@@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
49106 vm_flags &= ~VM_EXEC;
49107 vm_flags |= mm->def_flags;
49108
49109+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49110+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49111+ vm_flags &= ~VM_EXEC;
49112+
49113+#ifdef CONFIG_PAX_MPROTECT
49114+ if (mm->pax_flags & MF_PAX_MPROTECT)
49115+ vm_flags &= ~VM_MAYEXEC;
49116+#endif
49117+
49118+ }
49119+#endif
49120+
49121 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
49122 vm_flags);
49123 if (ret)
49124 goto out_unlock;
49125 BUG_ON(prev != vma);
49126
49127- /* Move stack pages down in memory. */
49128- if (stack_shift) {
49129- ret = shift_arg_pages(vma, stack_shift);
49130- if (ret)
49131- goto out_unlock;
49132- }
49133-
49134 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
49135 stack_size = vma->vm_end - vma->vm_start;
49136 /*
49137@@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset,
49138 old_fs = get_fs();
49139 set_fs(get_ds());
49140 /* The cast to a user pointer is valid due to the set_fs() */
49141- result = vfs_read(file, (void __user *)addr, count, &pos);
49142+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
49143 set_fs(old_fs);
49144 return result;
49145 }
49146@@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
49147 perf_event_comm(tsk);
49148 }
49149
49150+static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
49151+{
49152+ int i, ch;
49153+
49154+ /* Copies the binary name from after last slash */
49155+ for (i = 0; (ch = *(fn++)) != '\0';) {
49156+ if (ch == '/')
49157+ i = 0; /* overwrite what we wrote */
49158+ else
49159+ if (i < len - 1)
49160+ tcomm[i++] = ch;
49161+ }
49162+ tcomm[i] = '\0';
49163+}
49164+
49165 int flush_old_exec(struct linux_binprm * bprm)
49166 {
49167 int retval;
49168@@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm)
49169
49170 set_mm_exe_file(bprm->mm, bprm->file);
49171
49172+ filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
49173 /*
49174 * Release all of the old mmap stuff
49175 */
49176@@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec);
49177
49178 void setup_new_exec(struct linux_binprm * bprm)
49179 {
49180- int i, ch;
49181- char * name;
49182- char tcomm[sizeof(current->comm)];
49183-
49184 arch_pick_mmap_layout(current->mm);
49185
49186 /* This is the point of no return */
49187@@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm)
49188 else
49189 set_dumpable(current->mm, suid_dumpable);
49190
49191- name = bprm->filename;
49192-
49193- /* Copies the binary name from after last slash */
49194- for (i=0; (ch = *(name++)) != '\0';) {
49195- if (ch == '/')
49196- i = 0; /* overwrite what we wrote */
49197- else
49198- if (i < (sizeof(tcomm) - 1))
49199- tcomm[i++] = ch;
49200- }
49201- tcomm[i] = '\0';
49202- set_task_comm(current, tcomm);
49203+ set_task_comm(current, bprm->tcomm);
49204
49205 /* Set the new mm task size. We have to do that late because it may
49206 * depend on TIF_32BIT which is only updated in flush_thread() on
49207@@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
49208 }
49209 rcu_read_unlock();
49210
49211- if (p->fs->users > n_fs) {
49212+ if (atomic_read(&p->fs->users) > n_fs) {
49213 bprm->unsafe |= LSM_UNSAFE_SHARE;
49214 } else {
49215 res = -EAGAIN;
49216@@ -1339,6 +1384,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
49217
49218 EXPORT_SYMBOL(search_binary_handler);
49219
49220+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49221+atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
49222+#endif
49223+
49224 /*
49225 * sys_execve() executes a new program.
49226 */
49227@@ -1347,11 +1396,35 @@ int do_execve(char * filename,
49228 char __user *__user *envp,
49229 struct pt_regs * regs)
49230 {
49231+#ifdef CONFIG_GRKERNSEC
49232+ struct file *old_exec_file;
49233+ struct acl_subject_label *old_acl;
49234+ struct rlimit old_rlim[RLIM_NLIMITS];
49235+#endif
49236 struct linux_binprm *bprm;
49237 struct file *file;
49238 struct files_struct *displaced;
49239 bool clear_in_exec;
49240 int retval;
49241+ const struct cred *cred = current_cred();
49242+
49243+ /*
49244+ * We move the actual failure in case of RLIMIT_NPROC excess from
49245+ * set*uid() to execve() because too many poorly written programs
49246+ * don't check setuid() return code. Here we additionally recheck
49247+ * whether NPROC limit is still exceeded.
49248+ */
49249+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
49250+
49251+ if ((current->flags & PF_NPROC_EXCEEDED) &&
49252+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
49253+ retval = -EAGAIN;
49254+ goto out_ret;
49255+ }
49256+
49257+ /* We're below the limit (still or again), so we don't want to make
49258+ * further execve() calls fail. */
49259+ current->flags &= ~PF_NPROC_EXCEEDED;
49260
49261 retval = unshare_files(&displaced);
49262 if (retval)
49263@@ -1377,12 +1450,27 @@ int do_execve(char * filename,
49264 if (IS_ERR(file))
49265 goto out_unmark;
49266
49267+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
49268+ retval = -EPERM;
49269+ goto out_file;
49270+ }
49271+
49272 sched_exec();
49273
49274 bprm->file = file;
49275 bprm->filename = filename;
49276 bprm->interp = filename;
49277
49278+ if (gr_process_user_ban()) {
49279+ retval = -EPERM;
49280+ goto out_file;
49281+ }
49282+
49283+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
49284+ retval = -EACCES;
49285+ goto out_file;
49286+ }
49287+
49288 retval = bprm_mm_init(bprm);
49289 if (retval)
49290 goto out_file;
49291@@ -1412,12 +1500,47 @@ int do_execve(char * filename,
49292 if (retval < 0)
49293 goto out;
49294
49295+ if (!gr_tpe_allow(file)) {
49296+ retval = -EACCES;
49297+ goto out;
49298+ }
49299+
49300+ if (gr_check_crash_exec(file)) {
49301+ retval = -EACCES;
49302+ goto out;
49303+ }
49304+
49305+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
49306+
49307+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
49308+
49309+#ifdef CONFIG_GRKERNSEC
49310+ old_acl = current->acl;
49311+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
49312+ old_exec_file = current->exec_file;
49313+ get_file(file);
49314+ current->exec_file = file;
49315+#endif
49316+
49317+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
49318+ bprm->unsafe);
49319+ if (retval < 0)
49320+ goto out_fail;
49321+
49322 current->flags &= ~PF_KTHREAD;
49323 retval = search_binary_handler(bprm,regs);
49324 if (retval < 0)
49325- goto out;
49326+ goto out_fail;
49327+#ifdef CONFIG_GRKERNSEC
49328+ if (old_exec_file)
49329+ fput(old_exec_file);
49330+#endif
49331
49332 /* execve succeeded */
49333+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49334+ current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
49335+#endif
49336+
49337 current->fs->in_exec = 0;
49338 current->in_execve = 0;
49339 acct_update_integrals(current);
49340@@ -1426,6 +1549,14 @@ int do_execve(char * filename,
49341 put_files_struct(displaced);
49342 return retval;
49343
49344+out_fail:
49345+#ifdef CONFIG_GRKERNSEC
49346+ current->acl = old_acl;
49347+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
49348+ fput(current->exec_file);
49349+ current->exec_file = old_exec_file;
49350+#endif
49351+
49352 out:
49353 if (bprm->mm) {
49354 acct_arg_size(bprm, 0);
49355@@ -1591,6 +1722,220 @@ out:
49356 return ispipe;
49357 }
49358
49359+int pax_check_flags(unsigned long *flags)
49360+{
49361+ int retval = 0;
49362+
49363+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
49364+ if (*flags & MF_PAX_SEGMEXEC)
49365+ {
49366+ *flags &= ~MF_PAX_SEGMEXEC;
49367+ retval = -EINVAL;
49368+ }
49369+#endif
49370+
49371+ if ((*flags & MF_PAX_PAGEEXEC)
49372+
49373+#ifdef CONFIG_PAX_PAGEEXEC
49374+ && (*flags & MF_PAX_SEGMEXEC)
49375+#endif
49376+
49377+ )
49378+ {
49379+ *flags &= ~MF_PAX_PAGEEXEC;
49380+ retval = -EINVAL;
49381+ }
49382+
49383+ if ((*flags & MF_PAX_MPROTECT)
49384+
49385+#ifdef CONFIG_PAX_MPROTECT
49386+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49387+#endif
49388+
49389+ )
49390+ {
49391+ *flags &= ~MF_PAX_MPROTECT;
49392+ retval = -EINVAL;
49393+ }
49394+
49395+ if ((*flags & MF_PAX_EMUTRAMP)
49396+
49397+#ifdef CONFIG_PAX_EMUTRAMP
49398+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
49399+#endif
49400+
49401+ )
49402+ {
49403+ *flags &= ~MF_PAX_EMUTRAMP;
49404+ retval = -EINVAL;
49405+ }
49406+
49407+ return retval;
49408+}
49409+
49410+EXPORT_SYMBOL(pax_check_flags);
49411+
49412+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49413+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
49414+{
49415+ struct task_struct *tsk = current;
49416+ struct mm_struct *mm = current->mm;
49417+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
49418+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
49419+ char *path_exec = NULL;
49420+ char *path_fault = NULL;
49421+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
49422+
49423+ if (buffer_exec && buffer_fault) {
49424+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
49425+
49426+ down_read(&mm->mmap_sem);
49427+ vma = mm->mmap;
49428+ while (vma && (!vma_exec || !vma_fault)) {
49429+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
49430+ vma_exec = vma;
49431+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
49432+ vma_fault = vma;
49433+ vma = vma->vm_next;
49434+ }
49435+ if (vma_exec) {
49436+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
49437+ if (IS_ERR(path_exec))
49438+ path_exec = "<path too long>";
49439+ else {
49440+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
49441+ if (path_exec) {
49442+ *path_exec = 0;
49443+ path_exec = buffer_exec;
49444+ } else
49445+ path_exec = "<path too long>";
49446+ }
49447+ }
49448+ if (vma_fault) {
49449+ start = vma_fault->vm_start;
49450+ end = vma_fault->vm_end;
49451+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
49452+ if (vma_fault->vm_file) {
49453+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
49454+ if (IS_ERR(path_fault))
49455+ path_fault = "<path too long>";
49456+ else {
49457+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
49458+ if (path_fault) {
49459+ *path_fault = 0;
49460+ path_fault = buffer_fault;
49461+ } else
49462+ path_fault = "<path too long>";
49463+ }
49464+ } else
49465+ path_fault = "<anonymous mapping>";
49466+ }
49467+ up_read(&mm->mmap_sem);
49468+ }
49469+ if (tsk->signal->curr_ip)
49470+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
49471+ else
49472+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
49473+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
49474+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
49475+ task_uid(tsk), task_euid(tsk), pc, sp);
49476+ free_page((unsigned long)buffer_exec);
49477+ free_page((unsigned long)buffer_fault);
49478+ pax_report_insns(regs, pc, sp);
49479+ do_coredump(SIGKILL, SIGKILL, regs);
49480+}
49481+#endif
49482+
49483+#ifdef CONFIG_PAX_REFCOUNT
49484+void pax_report_refcount_overflow(struct pt_regs *regs)
49485+{
49486+ if (current->signal->curr_ip)
49487+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49488+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
49489+ else
49490+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
49491+ current->comm, task_pid_nr(current), current_uid(), current_euid());
49492+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
49493+ show_regs(regs);
49494+ force_sig_specific(SIGKILL, current);
49495+}
49496+#endif
49497+
49498+#ifdef CONFIG_PAX_USERCOPY
49499+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
49500+int object_is_on_stack(const void *obj, unsigned long len)
49501+{
49502+ const void * const stack = task_stack_page(current);
49503+ const void * const stackend = stack + THREAD_SIZE;
49504+
49505+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49506+ const void *frame = NULL;
49507+ const void *oldframe;
49508+#endif
49509+
49510+ if (obj + len < obj)
49511+ return -1;
49512+
49513+ if (obj + len <= stack || stackend <= obj)
49514+ return 0;
49515+
49516+ if (obj < stack || stackend < obj + len)
49517+ return -1;
49518+
49519+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
49520+ oldframe = __builtin_frame_address(1);
49521+ if (oldframe)
49522+ frame = __builtin_frame_address(2);
49523+ /*
49524+ low ----------------------------------------------> high
49525+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
49526+ ^----------------^
49527+ allow copies only within here
49528+ */
49529+ while (stack <= frame && frame < stackend) {
49530+ /* if obj + len extends past the last frame, this
49531+ check won't pass and the next frame will be 0,
49532+ causing us to bail out and correctly report
49533+ the copy as invalid
49534+ */
49535+ if (obj + len <= frame)
49536+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
49537+ oldframe = frame;
49538+ frame = *(const void * const *)frame;
49539+ }
49540+ return -1;
49541+#else
49542+ return 1;
49543+#endif
49544+}
49545+
49546+
49547+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
49548+{
49549+ if (current->signal->curr_ip)
49550+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49551+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49552+ else
49553+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49554+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49555+
49556+ dump_stack();
49557+ gr_handle_kernel_exploit();
49558+ do_group_exit(SIGKILL);
49559+}
49560+#endif
49561+
49562+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
49563+void pax_track_stack(void)
49564+{
49565+ unsigned long sp = (unsigned long)&sp;
49566+ if (sp < current_thread_info()->lowest_stack &&
49567+ sp > (unsigned long)task_stack_page(current))
49568+ current_thread_info()->lowest_stack = sp;
49569+}
49570+EXPORT_SYMBOL(pax_track_stack);
49571+#endif
49572+
49573 static int zap_process(struct task_struct *start)
49574 {
49575 struct task_struct *t;
49576@@ -1793,17 +2138,17 @@ static void wait_for_dump_helpers(struct file *file)
49577 pipe = file->f_path.dentry->d_inode->i_pipe;
49578
49579 pipe_lock(pipe);
49580- pipe->readers++;
49581- pipe->writers--;
49582+ atomic_inc(&pipe->readers);
49583+ atomic_dec(&pipe->writers);
49584
49585- while ((pipe->readers > 1) && (!signal_pending(current))) {
49586+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
49587 wake_up_interruptible_sync(&pipe->wait);
49588 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
49589 pipe_wait(pipe);
49590 }
49591
49592- pipe->readers--;
49593- pipe->writers++;
49594+ atomic_dec(&pipe->readers);
49595+ atomic_inc(&pipe->writers);
49596 pipe_unlock(pipe);
49597
49598 }
49599@@ -1826,10 +2171,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49600 char **helper_argv = NULL;
49601 int helper_argc = 0;
49602 int dump_count = 0;
49603- static atomic_t core_dump_count = ATOMIC_INIT(0);
49604+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
49605
49606 audit_core_dumps(signr);
49607
49608+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
49609+ gr_handle_brute_attach(current, mm->flags);
49610+
49611 binfmt = mm->binfmt;
49612 if (!binfmt || !binfmt->core_dump)
49613 goto fail;
49614@@ -1874,6 +2222,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49615 */
49616 clear_thread_flag(TIF_SIGPENDING);
49617
49618+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
49619+
49620 /*
49621 * lock_kernel() because format_corename() is controlled by sysctl, which
49622 * uses lock_kernel()
49623@@ -1908,7 +2258,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49624 goto fail_unlock;
49625 }
49626
49627- dump_count = atomic_inc_return(&core_dump_count);
49628+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
49629 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49630 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49631 task_tgid_vnr(current), current->comm);
49632@@ -1972,7 +2322,7 @@ close_fail:
49633 filp_close(file, NULL);
49634 fail_dropcount:
49635 if (dump_count)
49636- atomic_dec(&core_dump_count);
49637+ atomic_dec_unchecked(&core_dump_count);
49638 fail_unlock:
49639 if (helper_argv)
49640 argv_free(helper_argv);
49641diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
49642index 7f8d2e5..a1abdbb 100644
49643--- a/fs/ext2/balloc.c
49644+++ b/fs/ext2/balloc.c
49645@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
49646
49647 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49648 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49649- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49650+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49651 sbi->s_resuid != current_fsuid() &&
49652 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49653 return 0;
49654diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
49655index 27967f9..9f2a5fb 100644
49656--- a/fs/ext3/balloc.c
49657+++ b/fs/ext3/balloc.c
49658@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
49659
49660 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49661 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49662- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49663+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49664 sbi->s_resuid != current_fsuid() &&
49665 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49666 return 0;
49667diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
49668index e85b63c..80398e6 100644
49669--- a/fs/ext4/balloc.c
49670+++ b/fs/ext4/balloc.c
49671@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
49672 /* Hm, nope. Are (enough) root reserved blocks available? */
49673 if (sbi->s_resuid == current_fsuid() ||
49674 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
49675- capable(CAP_SYS_RESOURCE)) {
49676+ capable_nolog(CAP_SYS_RESOURCE)) {
49677 if (free_blocks >= (nblocks + dirty_blocks))
49678 return 1;
49679 }
49680diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
49681index 67c46ed..1f237e5 100644
49682--- a/fs/ext4/ext4.h
49683+++ b/fs/ext4/ext4.h
49684@@ -1077,19 +1077,19 @@ struct ext4_sb_info {
49685
49686 /* stats for buddy allocator */
49687 spinlock_t s_mb_pa_lock;
49688- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
49689- atomic_t s_bal_success; /* we found long enough chunks */
49690- atomic_t s_bal_allocated; /* in blocks */
49691- atomic_t s_bal_ex_scanned; /* total extents scanned */
49692- atomic_t s_bal_goals; /* goal hits */
49693- atomic_t s_bal_breaks; /* too long searches */
49694- atomic_t s_bal_2orders; /* 2^order hits */
49695+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
49696+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
49697+ atomic_unchecked_t s_bal_allocated; /* in blocks */
49698+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
49699+ atomic_unchecked_t s_bal_goals; /* goal hits */
49700+ atomic_unchecked_t s_bal_breaks; /* too long searches */
49701+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
49702 spinlock_t s_bal_lock;
49703 unsigned long s_mb_buddies_generated;
49704 unsigned long long s_mb_generation_time;
49705- atomic_t s_mb_lost_chunks;
49706- atomic_t s_mb_preallocated;
49707- atomic_t s_mb_discarded;
49708+ atomic_unchecked_t s_mb_lost_chunks;
49709+ atomic_unchecked_t s_mb_preallocated;
49710+ atomic_unchecked_t s_mb_discarded;
49711 atomic_t s_lock_busy;
49712
49713 /* locality groups */
49714diff --git a/fs/ext4/file.c b/fs/ext4/file.c
49715index 2a60541..7439d61 100644
49716--- a/fs/ext4/file.c
49717+++ b/fs/ext4/file.c
49718@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
49719 cp = d_path(&path, buf, sizeof(buf));
49720 path_put(&path);
49721 if (!IS_ERR(cp)) {
49722- memcpy(sbi->s_es->s_last_mounted, cp,
49723- sizeof(sbi->s_es->s_last_mounted));
49724+ strlcpy(sbi->s_es->s_last_mounted, cp,
49725+ sizeof(sbi->s_es->s_last_mounted));
49726 sb->s_dirt = 1;
49727 }
49728 }
49729diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
49730index 42bac1b..0aab9d8 100644
49731--- a/fs/ext4/mballoc.c
49732+++ b/fs/ext4/mballoc.c
49733@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
49734 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
49735
49736 if (EXT4_SB(sb)->s_mb_stats)
49737- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
49738+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
49739
49740 break;
49741 }
49742@@ -2131,7 +2131,7 @@ repeat:
49743 ac->ac_status = AC_STATUS_CONTINUE;
49744 ac->ac_flags |= EXT4_MB_HINT_FIRST;
49745 cr = 3;
49746- atomic_inc(&sbi->s_mb_lost_chunks);
49747+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
49748 goto repeat;
49749 }
49750 }
49751@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
49752 ext4_grpblk_t counters[16];
49753 } sg;
49754
49755+ pax_track_stack();
49756+
49757 group--;
49758 if (group == 0)
49759 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
49760@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
49761 if (sbi->s_mb_stats) {
49762 printk(KERN_INFO
49763 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
49764- atomic_read(&sbi->s_bal_allocated),
49765- atomic_read(&sbi->s_bal_reqs),
49766- atomic_read(&sbi->s_bal_success));
49767+ atomic_read_unchecked(&sbi->s_bal_allocated),
49768+ atomic_read_unchecked(&sbi->s_bal_reqs),
49769+ atomic_read_unchecked(&sbi->s_bal_success));
49770 printk(KERN_INFO
49771 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
49772 "%u 2^N hits, %u breaks, %u lost\n",
49773- atomic_read(&sbi->s_bal_ex_scanned),
49774- atomic_read(&sbi->s_bal_goals),
49775- atomic_read(&sbi->s_bal_2orders),
49776- atomic_read(&sbi->s_bal_breaks),
49777- atomic_read(&sbi->s_mb_lost_chunks));
49778+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
49779+ atomic_read_unchecked(&sbi->s_bal_goals),
49780+ atomic_read_unchecked(&sbi->s_bal_2orders),
49781+ atomic_read_unchecked(&sbi->s_bal_breaks),
49782+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
49783 printk(KERN_INFO
49784 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
49785 sbi->s_mb_buddies_generated++,
49786 sbi->s_mb_generation_time);
49787 printk(KERN_INFO
49788 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
49789- atomic_read(&sbi->s_mb_preallocated),
49790- atomic_read(&sbi->s_mb_discarded));
49791+ atomic_read_unchecked(&sbi->s_mb_preallocated),
49792+ atomic_read_unchecked(&sbi->s_mb_discarded));
49793 }
49794
49795 free_percpu(sbi->s_locality_groups);
49796@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
49797 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
49798
49799 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
49800- atomic_inc(&sbi->s_bal_reqs);
49801- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49802+ atomic_inc_unchecked(&sbi->s_bal_reqs);
49803+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49804 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
49805- atomic_inc(&sbi->s_bal_success);
49806- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
49807+ atomic_inc_unchecked(&sbi->s_bal_success);
49808+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
49809 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
49810 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
49811- atomic_inc(&sbi->s_bal_goals);
49812+ atomic_inc_unchecked(&sbi->s_bal_goals);
49813 if (ac->ac_found > sbi->s_mb_max_to_scan)
49814- atomic_inc(&sbi->s_bal_breaks);
49815+ atomic_inc_unchecked(&sbi->s_bal_breaks);
49816 }
49817
49818 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
49819@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
49820 trace_ext4_mb_new_inode_pa(ac, pa);
49821
49822 ext4_mb_use_inode_pa(ac, pa);
49823- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49824+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49825
49826 ei = EXT4_I(ac->ac_inode);
49827 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49828@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
49829 trace_ext4_mb_new_group_pa(ac, pa);
49830
49831 ext4_mb_use_group_pa(ac, pa);
49832- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49833+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49834
49835 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49836 lg = ac->ac_lg;
49837@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
49838 * from the bitmap and continue.
49839 */
49840 }
49841- atomic_add(free, &sbi->s_mb_discarded);
49842+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
49843
49844 return err;
49845 }
49846@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
49847 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
49848 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
49849 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
49850- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49851+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49852
49853 if (ac) {
49854 ac->ac_sb = sb;
49855diff --git a/fs/ext4/super.c b/fs/ext4/super.c
49856index f1e7077..edd86b2 100644
49857--- a/fs/ext4/super.c
49858+++ b/fs/ext4/super.c
49859@@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
49860 }
49861
49862
49863-static struct sysfs_ops ext4_attr_ops = {
49864+static const struct sysfs_ops ext4_attr_ops = {
49865 .show = ext4_attr_show,
49866 .store = ext4_attr_store,
49867 };
49868diff --git a/fs/fcntl.c b/fs/fcntl.c
49869index 97e01dc..e9aab2d 100644
49870--- a/fs/fcntl.c
49871+++ b/fs/fcntl.c
49872@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
49873 if (err)
49874 return err;
49875
49876+ if (gr_handle_chroot_fowner(pid, type))
49877+ return -ENOENT;
49878+ if (gr_check_protected_task_fowner(pid, type))
49879+ return -EACCES;
49880+
49881 f_modown(filp, pid, type, force);
49882 return 0;
49883 }
49884@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
49885
49886 static int f_setown_ex(struct file *filp, unsigned long arg)
49887 {
49888- struct f_owner_ex * __user owner_p = (void * __user)arg;
49889+ struct f_owner_ex __user *owner_p = (void __user *)arg;
49890 struct f_owner_ex owner;
49891 struct pid *pid;
49892 int type;
49893@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
49894
49895 static int f_getown_ex(struct file *filp, unsigned long arg)
49896 {
49897- struct f_owner_ex * __user owner_p = (void * __user)arg;
49898+ struct f_owner_ex __user *owner_p = (void __user *)arg;
49899 struct f_owner_ex owner;
49900 int ret = 0;
49901
49902@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
49903 switch (cmd) {
49904 case F_DUPFD:
49905 case F_DUPFD_CLOEXEC:
49906+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
49907 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49908 break;
49909 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
49910diff --git a/fs/fifo.c b/fs/fifo.c
49911index f8f97b8..b1f2259 100644
49912--- a/fs/fifo.c
49913+++ b/fs/fifo.c
49914@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
49915 */
49916 filp->f_op = &read_pipefifo_fops;
49917 pipe->r_counter++;
49918- if (pipe->readers++ == 0)
49919+ if (atomic_inc_return(&pipe->readers) == 1)
49920 wake_up_partner(inode);
49921
49922- if (!pipe->writers) {
49923+ if (!atomic_read(&pipe->writers)) {
49924 if ((filp->f_flags & O_NONBLOCK)) {
49925 /* suppress POLLHUP until we have
49926 * seen a writer */
49927@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
49928 * errno=ENXIO when there is no process reading the FIFO.
49929 */
49930 ret = -ENXIO;
49931- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
49932+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
49933 goto err;
49934
49935 filp->f_op = &write_pipefifo_fops;
49936 pipe->w_counter++;
49937- if (!pipe->writers++)
49938+ if (atomic_inc_return(&pipe->writers) == 1)
49939 wake_up_partner(inode);
49940
49941- if (!pipe->readers) {
49942+ if (!atomic_read(&pipe->readers)) {
49943 wait_for_partner(inode, &pipe->r_counter);
49944 if (signal_pending(current))
49945 goto err_wr;
49946@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
49947 */
49948 filp->f_op = &rdwr_pipefifo_fops;
49949
49950- pipe->readers++;
49951- pipe->writers++;
49952+ atomic_inc(&pipe->readers);
49953+ atomic_inc(&pipe->writers);
49954 pipe->r_counter++;
49955 pipe->w_counter++;
49956- if (pipe->readers == 1 || pipe->writers == 1)
49957+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
49958 wake_up_partner(inode);
49959 break;
49960
49961@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
49962 return 0;
49963
49964 err_rd:
49965- if (!--pipe->readers)
49966+ if (atomic_dec_and_test(&pipe->readers))
49967 wake_up_interruptible(&pipe->wait);
49968 ret = -ERESTARTSYS;
49969 goto err;
49970
49971 err_wr:
49972- if (!--pipe->writers)
49973+ if (atomic_dec_and_test(&pipe->writers))
49974 wake_up_interruptible(&pipe->wait);
49975 ret = -ERESTARTSYS;
49976 goto err;
49977
49978 err:
49979- if (!pipe->readers && !pipe->writers)
49980+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
49981 free_pipe_info(inode);
49982
49983 err_nocleanup:
49984diff --git a/fs/file.c b/fs/file.c
49985index 87e1290..a930cc4 100644
49986--- a/fs/file.c
49987+++ b/fs/file.c
49988@@ -14,6 +14,7 @@
49989 #include <linux/slab.h>
49990 #include <linux/vmalloc.h>
49991 #include <linux/file.h>
49992+#include <linux/security.h>
49993 #include <linux/fdtable.h>
49994 #include <linux/bitops.h>
49995 #include <linux/interrupt.h>
49996@@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
49997 * N.B. For clone tasks sharing a files structure, this test
49998 * will limit the total number of files that can be opened.
49999 */
50000+
50001+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
50002 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
50003 return -EMFILE;
50004
50005diff --git a/fs/filesystems.c b/fs/filesystems.c
50006index a24c58e..53f91ee 100644
50007--- a/fs/filesystems.c
50008+++ b/fs/filesystems.c
50009@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
50010 int len = dot ? dot - name : strlen(name);
50011
50012 fs = __get_fs_type(name, len);
50013+
50014+#ifdef CONFIG_GRKERNSEC_MODHARDEN
50015+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
50016+#else
50017 if (!fs && (request_module("%.*s", len, name) == 0))
50018+#endif
50019 fs = __get_fs_type(name, len);
50020
50021 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
50022diff --git a/fs/fs_struct.c b/fs/fs_struct.c
50023index eee0590..1181166 100644
50024--- a/fs/fs_struct.c
50025+++ b/fs/fs_struct.c
50026@@ -4,6 +4,7 @@
50027 #include <linux/path.h>
50028 #include <linux/slab.h>
50029 #include <linux/fs_struct.h>
50030+#include <linux/grsecurity.h>
50031
50032 /*
50033 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
50034@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
50035 old_root = fs->root;
50036 fs->root = *path;
50037 path_get(path);
50038+ gr_set_chroot_entries(current, path);
50039 write_unlock(&fs->lock);
50040 if (old_root.dentry)
50041 path_put(&old_root);
50042@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
50043 && fs->root.mnt == old_root->mnt) {
50044 path_get(new_root);
50045 fs->root = *new_root;
50046+ gr_set_chroot_entries(p, new_root);
50047 count++;
50048 }
50049 if (fs->pwd.dentry == old_root->dentry
50050@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
50051 task_lock(tsk);
50052 write_lock(&fs->lock);
50053 tsk->fs = NULL;
50054- kill = !--fs->users;
50055+ gr_clear_chroot_entries(tsk);
50056+ kill = !atomic_dec_return(&fs->users);
50057 write_unlock(&fs->lock);
50058 task_unlock(tsk);
50059 if (kill)
50060@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
50061 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
50062 /* We don't need to lock fs - think why ;-) */
50063 if (fs) {
50064- fs->users = 1;
50065+ atomic_set(&fs->users, 1);
50066 fs->in_exec = 0;
50067 rwlock_init(&fs->lock);
50068 fs->umask = old->umask;
50069@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
50070
50071 task_lock(current);
50072 write_lock(&fs->lock);
50073- kill = !--fs->users;
50074+ kill = !atomic_dec_return(&fs->users);
50075 current->fs = new_fs;
50076+ gr_set_chroot_entries(current, &new_fs->root);
50077 write_unlock(&fs->lock);
50078 task_unlock(current);
50079
50080@@ -141,13 +146,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
50081
50082 int current_umask(void)
50083 {
50084- return current->fs->umask;
50085+ return current->fs->umask | gr_acl_umask();
50086 }
50087 EXPORT_SYMBOL(current_umask);
50088
50089 /* to be mentioned only in INIT_TASK */
50090 struct fs_struct init_fs = {
50091- .users = 1,
50092+ .users = ATOMIC_INIT(1),
50093 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
50094 .umask = 0022,
50095 };
50096@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
50097 task_lock(current);
50098
50099 write_lock(&init_fs.lock);
50100- init_fs.users++;
50101+ atomic_inc(&init_fs.users);
50102 write_unlock(&init_fs.lock);
50103
50104 write_lock(&fs->lock);
50105 current->fs = &init_fs;
50106- kill = !--fs->users;
50107+ gr_set_chroot_entries(current, &current->fs->root);
50108+ kill = !atomic_dec_return(&fs->users);
50109 write_unlock(&fs->lock);
50110
50111 task_unlock(current);
50112diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
50113index 9905350..02eaec4 100644
50114--- a/fs/fscache/cookie.c
50115+++ b/fs/fscache/cookie.c
50116@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
50117 parent ? (char *) parent->def->name : "<no-parent>",
50118 def->name, netfs_data);
50119
50120- fscache_stat(&fscache_n_acquires);
50121+ fscache_stat_unchecked(&fscache_n_acquires);
50122
50123 /* if there's no parent cookie, then we don't create one here either */
50124 if (!parent) {
50125- fscache_stat(&fscache_n_acquires_null);
50126+ fscache_stat_unchecked(&fscache_n_acquires_null);
50127 _leave(" [no parent]");
50128 return NULL;
50129 }
50130@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
50131 /* allocate and initialise a cookie */
50132 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
50133 if (!cookie) {
50134- fscache_stat(&fscache_n_acquires_oom);
50135+ fscache_stat_unchecked(&fscache_n_acquires_oom);
50136 _leave(" [ENOMEM]");
50137 return NULL;
50138 }
50139@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
50140
50141 switch (cookie->def->type) {
50142 case FSCACHE_COOKIE_TYPE_INDEX:
50143- fscache_stat(&fscache_n_cookie_index);
50144+ fscache_stat_unchecked(&fscache_n_cookie_index);
50145 break;
50146 case FSCACHE_COOKIE_TYPE_DATAFILE:
50147- fscache_stat(&fscache_n_cookie_data);
50148+ fscache_stat_unchecked(&fscache_n_cookie_data);
50149 break;
50150 default:
50151- fscache_stat(&fscache_n_cookie_special);
50152+ fscache_stat_unchecked(&fscache_n_cookie_special);
50153 break;
50154 }
50155
50156@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
50157 if (fscache_acquire_non_index_cookie(cookie) < 0) {
50158 atomic_dec(&parent->n_children);
50159 __fscache_cookie_put(cookie);
50160- fscache_stat(&fscache_n_acquires_nobufs);
50161+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
50162 _leave(" = NULL");
50163 return NULL;
50164 }
50165 }
50166
50167- fscache_stat(&fscache_n_acquires_ok);
50168+ fscache_stat_unchecked(&fscache_n_acquires_ok);
50169 _leave(" = %p", cookie);
50170 return cookie;
50171 }
50172@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
50173 cache = fscache_select_cache_for_object(cookie->parent);
50174 if (!cache) {
50175 up_read(&fscache_addremove_sem);
50176- fscache_stat(&fscache_n_acquires_no_cache);
50177+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
50178 _leave(" = -ENOMEDIUM [no cache]");
50179 return -ENOMEDIUM;
50180 }
50181@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
50182 object = cache->ops->alloc_object(cache, cookie);
50183 fscache_stat_d(&fscache_n_cop_alloc_object);
50184 if (IS_ERR(object)) {
50185- fscache_stat(&fscache_n_object_no_alloc);
50186+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
50187 ret = PTR_ERR(object);
50188 goto error;
50189 }
50190
50191- fscache_stat(&fscache_n_object_alloc);
50192+ fscache_stat_unchecked(&fscache_n_object_alloc);
50193
50194 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
50195
50196@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
50197 struct fscache_object *object;
50198 struct hlist_node *_p;
50199
50200- fscache_stat(&fscache_n_updates);
50201+ fscache_stat_unchecked(&fscache_n_updates);
50202
50203 if (!cookie) {
50204- fscache_stat(&fscache_n_updates_null);
50205+ fscache_stat_unchecked(&fscache_n_updates_null);
50206 _leave(" [no cookie]");
50207 return;
50208 }
50209@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
50210 struct fscache_object *object;
50211 unsigned long event;
50212
50213- fscache_stat(&fscache_n_relinquishes);
50214+ fscache_stat_unchecked(&fscache_n_relinquishes);
50215 if (retire)
50216- fscache_stat(&fscache_n_relinquishes_retire);
50217+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
50218
50219 if (!cookie) {
50220- fscache_stat(&fscache_n_relinquishes_null);
50221+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
50222 _leave(" [no cookie]");
50223 return;
50224 }
50225@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
50226
50227 /* wait for the cookie to finish being instantiated (or to fail) */
50228 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
50229- fscache_stat(&fscache_n_relinquishes_waitcrt);
50230+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
50231 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
50232 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
50233 }
50234diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
50235index edd7434..0725e66 100644
50236--- a/fs/fscache/internal.h
50237+++ b/fs/fscache/internal.h
50238@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
50239 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
50240 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
50241
50242-extern atomic_t fscache_n_op_pend;
50243-extern atomic_t fscache_n_op_run;
50244-extern atomic_t fscache_n_op_enqueue;
50245-extern atomic_t fscache_n_op_deferred_release;
50246-extern atomic_t fscache_n_op_release;
50247-extern atomic_t fscache_n_op_gc;
50248-extern atomic_t fscache_n_op_cancelled;
50249-extern atomic_t fscache_n_op_rejected;
50250+extern atomic_unchecked_t fscache_n_op_pend;
50251+extern atomic_unchecked_t fscache_n_op_run;
50252+extern atomic_unchecked_t fscache_n_op_enqueue;
50253+extern atomic_unchecked_t fscache_n_op_deferred_release;
50254+extern atomic_unchecked_t fscache_n_op_release;
50255+extern atomic_unchecked_t fscache_n_op_gc;
50256+extern atomic_unchecked_t fscache_n_op_cancelled;
50257+extern atomic_unchecked_t fscache_n_op_rejected;
50258
50259-extern atomic_t fscache_n_attr_changed;
50260-extern atomic_t fscache_n_attr_changed_ok;
50261-extern atomic_t fscache_n_attr_changed_nobufs;
50262-extern atomic_t fscache_n_attr_changed_nomem;
50263-extern atomic_t fscache_n_attr_changed_calls;
50264+extern atomic_unchecked_t fscache_n_attr_changed;
50265+extern atomic_unchecked_t fscache_n_attr_changed_ok;
50266+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
50267+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
50268+extern atomic_unchecked_t fscache_n_attr_changed_calls;
50269
50270-extern atomic_t fscache_n_allocs;
50271-extern atomic_t fscache_n_allocs_ok;
50272-extern atomic_t fscache_n_allocs_wait;
50273-extern atomic_t fscache_n_allocs_nobufs;
50274-extern atomic_t fscache_n_allocs_intr;
50275-extern atomic_t fscache_n_allocs_object_dead;
50276-extern atomic_t fscache_n_alloc_ops;
50277-extern atomic_t fscache_n_alloc_op_waits;
50278+extern atomic_unchecked_t fscache_n_allocs;
50279+extern atomic_unchecked_t fscache_n_allocs_ok;
50280+extern atomic_unchecked_t fscache_n_allocs_wait;
50281+extern atomic_unchecked_t fscache_n_allocs_nobufs;
50282+extern atomic_unchecked_t fscache_n_allocs_intr;
50283+extern atomic_unchecked_t fscache_n_allocs_object_dead;
50284+extern atomic_unchecked_t fscache_n_alloc_ops;
50285+extern atomic_unchecked_t fscache_n_alloc_op_waits;
50286
50287-extern atomic_t fscache_n_retrievals;
50288-extern atomic_t fscache_n_retrievals_ok;
50289-extern atomic_t fscache_n_retrievals_wait;
50290-extern atomic_t fscache_n_retrievals_nodata;
50291-extern atomic_t fscache_n_retrievals_nobufs;
50292-extern atomic_t fscache_n_retrievals_intr;
50293-extern atomic_t fscache_n_retrievals_nomem;
50294-extern atomic_t fscache_n_retrievals_object_dead;
50295-extern atomic_t fscache_n_retrieval_ops;
50296-extern atomic_t fscache_n_retrieval_op_waits;
50297+extern atomic_unchecked_t fscache_n_retrievals;
50298+extern atomic_unchecked_t fscache_n_retrievals_ok;
50299+extern atomic_unchecked_t fscache_n_retrievals_wait;
50300+extern atomic_unchecked_t fscache_n_retrievals_nodata;
50301+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
50302+extern atomic_unchecked_t fscache_n_retrievals_intr;
50303+extern atomic_unchecked_t fscache_n_retrievals_nomem;
50304+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
50305+extern atomic_unchecked_t fscache_n_retrieval_ops;
50306+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
50307
50308-extern atomic_t fscache_n_stores;
50309-extern atomic_t fscache_n_stores_ok;
50310-extern atomic_t fscache_n_stores_again;
50311-extern atomic_t fscache_n_stores_nobufs;
50312-extern atomic_t fscache_n_stores_oom;
50313-extern atomic_t fscache_n_store_ops;
50314-extern atomic_t fscache_n_store_calls;
50315-extern atomic_t fscache_n_store_pages;
50316-extern atomic_t fscache_n_store_radix_deletes;
50317-extern atomic_t fscache_n_store_pages_over_limit;
50318+extern atomic_unchecked_t fscache_n_stores;
50319+extern atomic_unchecked_t fscache_n_stores_ok;
50320+extern atomic_unchecked_t fscache_n_stores_again;
50321+extern atomic_unchecked_t fscache_n_stores_nobufs;
50322+extern atomic_unchecked_t fscache_n_stores_oom;
50323+extern atomic_unchecked_t fscache_n_store_ops;
50324+extern atomic_unchecked_t fscache_n_store_calls;
50325+extern atomic_unchecked_t fscache_n_store_pages;
50326+extern atomic_unchecked_t fscache_n_store_radix_deletes;
50327+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
50328
50329-extern atomic_t fscache_n_store_vmscan_not_storing;
50330-extern atomic_t fscache_n_store_vmscan_gone;
50331-extern atomic_t fscache_n_store_vmscan_busy;
50332-extern atomic_t fscache_n_store_vmscan_cancelled;
50333+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50334+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
50335+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
50336+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50337
50338-extern atomic_t fscache_n_marks;
50339-extern atomic_t fscache_n_uncaches;
50340+extern atomic_unchecked_t fscache_n_marks;
50341+extern atomic_unchecked_t fscache_n_uncaches;
50342
50343-extern atomic_t fscache_n_acquires;
50344-extern atomic_t fscache_n_acquires_null;
50345-extern atomic_t fscache_n_acquires_no_cache;
50346-extern atomic_t fscache_n_acquires_ok;
50347-extern atomic_t fscache_n_acquires_nobufs;
50348-extern atomic_t fscache_n_acquires_oom;
50349+extern atomic_unchecked_t fscache_n_acquires;
50350+extern atomic_unchecked_t fscache_n_acquires_null;
50351+extern atomic_unchecked_t fscache_n_acquires_no_cache;
50352+extern atomic_unchecked_t fscache_n_acquires_ok;
50353+extern atomic_unchecked_t fscache_n_acquires_nobufs;
50354+extern atomic_unchecked_t fscache_n_acquires_oom;
50355
50356-extern atomic_t fscache_n_updates;
50357-extern atomic_t fscache_n_updates_null;
50358-extern atomic_t fscache_n_updates_run;
50359+extern atomic_unchecked_t fscache_n_updates;
50360+extern atomic_unchecked_t fscache_n_updates_null;
50361+extern atomic_unchecked_t fscache_n_updates_run;
50362
50363-extern atomic_t fscache_n_relinquishes;
50364-extern atomic_t fscache_n_relinquishes_null;
50365-extern atomic_t fscache_n_relinquishes_waitcrt;
50366-extern atomic_t fscache_n_relinquishes_retire;
50367+extern atomic_unchecked_t fscache_n_relinquishes;
50368+extern atomic_unchecked_t fscache_n_relinquishes_null;
50369+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50370+extern atomic_unchecked_t fscache_n_relinquishes_retire;
50371
50372-extern atomic_t fscache_n_cookie_index;
50373-extern atomic_t fscache_n_cookie_data;
50374-extern atomic_t fscache_n_cookie_special;
50375+extern atomic_unchecked_t fscache_n_cookie_index;
50376+extern atomic_unchecked_t fscache_n_cookie_data;
50377+extern atomic_unchecked_t fscache_n_cookie_special;
50378
50379-extern atomic_t fscache_n_object_alloc;
50380-extern atomic_t fscache_n_object_no_alloc;
50381-extern atomic_t fscache_n_object_lookups;
50382-extern atomic_t fscache_n_object_lookups_negative;
50383-extern atomic_t fscache_n_object_lookups_positive;
50384-extern atomic_t fscache_n_object_lookups_timed_out;
50385-extern atomic_t fscache_n_object_created;
50386-extern atomic_t fscache_n_object_avail;
50387-extern atomic_t fscache_n_object_dead;
50388+extern atomic_unchecked_t fscache_n_object_alloc;
50389+extern atomic_unchecked_t fscache_n_object_no_alloc;
50390+extern atomic_unchecked_t fscache_n_object_lookups;
50391+extern atomic_unchecked_t fscache_n_object_lookups_negative;
50392+extern atomic_unchecked_t fscache_n_object_lookups_positive;
50393+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
50394+extern atomic_unchecked_t fscache_n_object_created;
50395+extern atomic_unchecked_t fscache_n_object_avail;
50396+extern atomic_unchecked_t fscache_n_object_dead;
50397
50398-extern atomic_t fscache_n_checkaux_none;
50399-extern atomic_t fscache_n_checkaux_okay;
50400-extern atomic_t fscache_n_checkaux_update;
50401-extern atomic_t fscache_n_checkaux_obsolete;
50402+extern atomic_unchecked_t fscache_n_checkaux_none;
50403+extern atomic_unchecked_t fscache_n_checkaux_okay;
50404+extern atomic_unchecked_t fscache_n_checkaux_update;
50405+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
50406
50407 extern atomic_t fscache_n_cop_alloc_object;
50408 extern atomic_t fscache_n_cop_lookup_object;
50409@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
50410 atomic_inc(stat);
50411 }
50412
50413+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
50414+{
50415+ atomic_inc_unchecked(stat);
50416+}
50417+
50418 static inline void fscache_stat_d(atomic_t *stat)
50419 {
50420 atomic_dec(stat);
50421@@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
50422
50423 #define __fscache_stat(stat) (NULL)
50424 #define fscache_stat(stat) do {} while (0)
50425+#define fscache_stat_unchecked(stat) do {} while (0)
50426 #define fscache_stat_d(stat) do {} while (0)
50427 #endif
50428
50429diff --git a/fs/fscache/object.c b/fs/fscache/object.c
50430index e513ac5..e888d34 100644
50431--- a/fs/fscache/object.c
50432+++ b/fs/fscache/object.c
50433@@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50434 /* update the object metadata on disk */
50435 case FSCACHE_OBJECT_UPDATING:
50436 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
50437- fscache_stat(&fscache_n_updates_run);
50438+ fscache_stat_unchecked(&fscache_n_updates_run);
50439 fscache_stat(&fscache_n_cop_update_object);
50440 object->cache->ops->update_object(object);
50441 fscache_stat_d(&fscache_n_cop_update_object);
50442@@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50443 spin_lock(&object->lock);
50444 object->state = FSCACHE_OBJECT_DEAD;
50445 spin_unlock(&object->lock);
50446- fscache_stat(&fscache_n_object_dead);
50447+ fscache_stat_unchecked(&fscache_n_object_dead);
50448 goto terminal_transit;
50449
50450 /* handle the parent cache of this object being withdrawn from
50451@@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
50452 spin_lock(&object->lock);
50453 object->state = FSCACHE_OBJECT_DEAD;
50454 spin_unlock(&object->lock);
50455- fscache_stat(&fscache_n_object_dead);
50456+ fscache_stat_unchecked(&fscache_n_object_dead);
50457 goto terminal_transit;
50458
50459 /* complain about the object being woken up once it is
50460@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50461 parent->cookie->def->name, cookie->def->name,
50462 object->cache->tag->name);
50463
50464- fscache_stat(&fscache_n_object_lookups);
50465+ fscache_stat_unchecked(&fscache_n_object_lookups);
50466 fscache_stat(&fscache_n_cop_lookup_object);
50467 ret = object->cache->ops->lookup_object(object);
50468 fscache_stat_d(&fscache_n_cop_lookup_object);
50469@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
50470 if (ret == -ETIMEDOUT) {
50471 /* probably stuck behind another object, so move this one to
50472 * the back of the queue */
50473- fscache_stat(&fscache_n_object_lookups_timed_out);
50474+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
50475 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50476 }
50477
50478@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
50479
50480 spin_lock(&object->lock);
50481 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50482- fscache_stat(&fscache_n_object_lookups_negative);
50483+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
50484
50485 /* transit here to allow write requests to begin stacking up
50486 * and read requests to begin returning ENODATA */
50487@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
50488 * result, in which case there may be data available */
50489 spin_lock(&object->lock);
50490 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
50491- fscache_stat(&fscache_n_object_lookups_positive);
50492+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
50493
50494 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
50495
50496@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
50497 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
50498 } else {
50499 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
50500- fscache_stat(&fscache_n_object_created);
50501+ fscache_stat_unchecked(&fscache_n_object_created);
50502
50503 object->state = FSCACHE_OBJECT_AVAILABLE;
50504 spin_unlock(&object->lock);
50505@@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
50506 fscache_enqueue_dependents(object);
50507
50508 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
50509- fscache_stat(&fscache_n_object_avail);
50510+ fscache_stat_unchecked(&fscache_n_object_avail);
50511
50512 _leave("");
50513 }
50514@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50515 enum fscache_checkaux result;
50516
50517 if (!object->cookie->def->check_aux) {
50518- fscache_stat(&fscache_n_checkaux_none);
50519+ fscache_stat_unchecked(&fscache_n_checkaux_none);
50520 return FSCACHE_CHECKAUX_OKAY;
50521 }
50522
50523@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
50524 switch (result) {
50525 /* entry okay as is */
50526 case FSCACHE_CHECKAUX_OKAY:
50527- fscache_stat(&fscache_n_checkaux_okay);
50528+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
50529 break;
50530
50531 /* entry requires update */
50532 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
50533- fscache_stat(&fscache_n_checkaux_update);
50534+ fscache_stat_unchecked(&fscache_n_checkaux_update);
50535 break;
50536
50537 /* entry requires deletion */
50538 case FSCACHE_CHECKAUX_OBSOLETE:
50539- fscache_stat(&fscache_n_checkaux_obsolete);
50540+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
50541 break;
50542
50543 default:
50544diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
50545index 313e79a..775240f 100644
50546--- a/fs/fscache/operation.c
50547+++ b/fs/fscache/operation.c
50548@@ -16,7 +16,7 @@
50549 #include <linux/seq_file.h>
50550 #include "internal.h"
50551
50552-atomic_t fscache_op_debug_id;
50553+atomic_unchecked_t fscache_op_debug_id;
50554 EXPORT_SYMBOL(fscache_op_debug_id);
50555
50556 /**
50557@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
50558 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
50559 ASSERTCMP(atomic_read(&op->usage), >, 0);
50560
50561- fscache_stat(&fscache_n_op_enqueue);
50562+ fscache_stat_unchecked(&fscache_n_op_enqueue);
50563 switch (op->flags & FSCACHE_OP_TYPE) {
50564 case FSCACHE_OP_FAST:
50565 _debug("queue fast");
50566@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
50567 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
50568 if (op->processor)
50569 fscache_enqueue_operation(op);
50570- fscache_stat(&fscache_n_op_run);
50571+ fscache_stat_unchecked(&fscache_n_op_run);
50572 }
50573
50574 /*
50575@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50576 if (object->n_ops > 0) {
50577 atomic_inc(&op->usage);
50578 list_add_tail(&op->pend_link, &object->pending_ops);
50579- fscache_stat(&fscache_n_op_pend);
50580+ fscache_stat_unchecked(&fscache_n_op_pend);
50581 } else if (!list_empty(&object->pending_ops)) {
50582 atomic_inc(&op->usage);
50583 list_add_tail(&op->pend_link, &object->pending_ops);
50584- fscache_stat(&fscache_n_op_pend);
50585+ fscache_stat_unchecked(&fscache_n_op_pend);
50586 fscache_start_operations(object);
50587 } else {
50588 ASSERTCMP(object->n_in_progress, ==, 0);
50589@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50590 object->n_exclusive++; /* reads and writes must wait */
50591 atomic_inc(&op->usage);
50592 list_add_tail(&op->pend_link, &object->pending_ops);
50593- fscache_stat(&fscache_n_op_pend);
50594+ fscache_stat_unchecked(&fscache_n_op_pend);
50595 ret = 0;
50596 } else {
50597 /* not allowed to submit ops in any other state */
50598@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
50599 if (object->n_exclusive > 0) {
50600 atomic_inc(&op->usage);
50601 list_add_tail(&op->pend_link, &object->pending_ops);
50602- fscache_stat(&fscache_n_op_pend);
50603+ fscache_stat_unchecked(&fscache_n_op_pend);
50604 } else if (!list_empty(&object->pending_ops)) {
50605 atomic_inc(&op->usage);
50606 list_add_tail(&op->pend_link, &object->pending_ops);
50607- fscache_stat(&fscache_n_op_pend);
50608+ fscache_stat_unchecked(&fscache_n_op_pend);
50609 fscache_start_operations(object);
50610 } else {
50611 ASSERTCMP(object->n_exclusive, ==, 0);
50612@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
50613 object->n_ops++;
50614 atomic_inc(&op->usage);
50615 list_add_tail(&op->pend_link, &object->pending_ops);
50616- fscache_stat(&fscache_n_op_pend);
50617+ fscache_stat_unchecked(&fscache_n_op_pend);
50618 ret = 0;
50619 } else if (object->state == FSCACHE_OBJECT_DYING ||
50620 object->state == FSCACHE_OBJECT_LC_DYING ||
50621 object->state == FSCACHE_OBJECT_WITHDRAWING) {
50622- fscache_stat(&fscache_n_op_rejected);
50623+ fscache_stat_unchecked(&fscache_n_op_rejected);
50624 ret = -ENOBUFS;
50625 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
50626 fscache_report_unexpected_submission(object, op, ostate);
50627@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
50628
50629 ret = -EBUSY;
50630 if (!list_empty(&op->pend_link)) {
50631- fscache_stat(&fscache_n_op_cancelled);
50632+ fscache_stat_unchecked(&fscache_n_op_cancelled);
50633 list_del_init(&op->pend_link);
50634 object->n_ops--;
50635 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
50636@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
50637 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
50638 BUG();
50639
50640- fscache_stat(&fscache_n_op_release);
50641+ fscache_stat_unchecked(&fscache_n_op_release);
50642
50643 if (op->release) {
50644 op->release(op);
50645@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
50646 * lock, and defer it otherwise */
50647 if (!spin_trylock(&object->lock)) {
50648 _debug("defer put");
50649- fscache_stat(&fscache_n_op_deferred_release);
50650+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
50651
50652 cache = object->cache;
50653 spin_lock(&cache->op_gc_list_lock);
50654@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
50655
50656 _debug("GC DEFERRED REL OBJ%x OP%x",
50657 object->debug_id, op->debug_id);
50658- fscache_stat(&fscache_n_op_gc);
50659+ fscache_stat_unchecked(&fscache_n_op_gc);
50660
50661 ASSERTCMP(atomic_read(&op->usage), ==, 0);
50662
50663diff --git a/fs/fscache/page.c b/fs/fscache/page.c
50664index c598ea4..6aac13e 100644
50665--- a/fs/fscache/page.c
50666+++ b/fs/fscache/page.c
50667@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50668 val = radix_tree_lookup(&cookie->stores, page->index);
50669 if (!val) {
50670 rcu_read_unlock();
50671- fscache_stat(&fscache_n_store_vmscan_not_storing);
50672+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
50673 __fscache_uncache_page(cookie, page);
50674 return true;
50675 }
50676@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50677 spin_unlock(&cookie->stores_lock);
50678
50679 if (xpage) {
50680- fscache_stat(&fscache_n_store_vmscan_cancelled);
50681- fscache_stat(&fscache_n_store_radix_deletes);
50682+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
50683+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50684 ASSERTCMP(xpage, ==, page);
50685 } else {
50686- fscache_stat(&fscache_n_store_vmscan_gone);
50687+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
50688 }
50689
50690 wake_up_bit(&cookie->flags, 0);
50691@@ -106,7 +106,7 @@ page_busy:
50692 /* we might want to wait here, but that could deadlock the allocator as
50693 * the slow-work threads writing to the cache may all end up sleeping
50694 * on memory allocation */
50695- fscache_stat(&fscache_n_store_vmscan_busy);
50696+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
50697 return false;
50698 }
50699 EXPORT_SYMBOL(__fscache_maybe_release_page);
50700@@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
50701 FSCACHE_COOKIE_STORING_TAG);
50702 if (!radix_tree_tag_get(&cookie->stores, page->index,
50703 FSCACHE_COOKIE_PENDING_TAG)) {
50704- fscache_stat(&fscache_n_store_radix_deletes);
50705+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50706 xpage = radix_tree_delete(&cookie->stores, page->index);
50707 }
50708 spin_unlock(&cookie->stores_lock);
50709@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
50710
50711 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
50712
50713- fscache_stat(&fscache_n_attr_changed_calls);
50714+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
50715
50716 if (fscache_object_is_active(object)) {
50717 fscache_set_op_state(op, "CallFS");
50718@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50719
50720 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50721
50722- fscache_stat(&fscache_n_attr_changed);
50723+ fscache_stat_unchecked(&fscache_n_attr_changed);
50724
50725 op = kzalloc(sizeof(*op), GFP_KERNEL);
50726 if (!op) {
50727- fscache_stat(&fscache_n_attr_changed_nomem);
50728+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
50729 _leave(" = -ENOMEM");
50730 return -ENOMEM;
50731 }
50732@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50733 if (fscache_submit_exclusive_op(object, op) < 0)
50734 goto nobufs;
50735 spin_unlock(&cookie->lock);
50736- fscache_stat(&fscache_n_attr_changed_ok);
50737+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
50738 fscache_put_operation(op);
50739 _leave(" = 0");
50740 return 0;
50741@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50742 nobufs:
50743 spin_unlock(&cookie->lock);
50744 kfree(op);
50745- fscache_stat(&fscache_n_attr_changed_nobufs);
50746+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
50747 _leave(" = %d", -ENOBUFS);
50748 return -ENOBUFS;
50749 }
50750@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
50751 /* allocate a retrieval operation and attempt to submit it */
50752 op = kzalloc(sizeof(*op), GFP_NOIO);
50753 if (!op) {
50754- fscache_stat(&fscache_n_retrievals_nomem);
50755+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50756 return NULL;
50757 }
50758
50759@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50760 return 0;
50761 }
50762
50763- fscache_stat(&fscache_n_retrievals_wait);
50764+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
50765
50766 jif = jiffies;
50767 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
50768 fscache_wait_bit_interruptible,
50769 TASK_INTERRUPTIBLE) != 0) {
50770- fscache_stat(&fscache_n_retrievals_intr);
50771+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50772 _leave(" = -ERESTARTSYS");
50773 return -ERESTARTSYS;
50774 }
50775@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50776 */
50777 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50778 struct fscache_retrieval *op,
50779- atomic_t *stat_op_waits,
50780- atomic_t *stat_object_dead)
50781+ atomic_unchecked_t *stat_op_waits,
50782+ atomic_unchecked_t *stat_object_dead)
50783 {
50784 int ret;
50785
50786@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50787 goto check_if_dead;
50788
50789 _debug(">>> WT");
50790- fscache_stat(stat_op_waits);
50791+ fscache_stat_unchecked(stat_op_waits);
50792 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
50793 fscache_wait_bit_interruptible,
50794 TASK_INTERRUPTIBLE) < 0) {
50795@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50796
50797 check_if_dead:
50798 if (unlikely(fscache_object_is_dead(object))) {
50799- fscache_stat(stat_object_dead);
50800+ fscache_stat_unchecked(stat_object_dead);
50801 return -ENOBUFS;
50802 }
50803 return 0;
50804@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50805
50806 _enter("%p,%p,,,", cookie, page);
50807
50808- fscache_stat(&fscache_n_retrievals);
50809+ fscache_stat_unchecked(&fscache_n_retrievals);
50810
50811 if (hlist_empty(&cookie->backing_objects))
50812 goto nobufs;
50813@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50814 goto nobufs_unlock;
50815 spin_unlock(&cookie->lock);
50816
50817- fscache_stat(&fscache_n_retrieval_ops);
50818+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
50819
50820 /* pin the netfs read context in case we need to do the actual netfs
50821 * read because we've encountered a cache read failure */
50822@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50823
50824 error:
50825 if (ret == -ENOMEM)
50826- fscache_stat(&fscache_n_retrievals_nomem);
50827+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50828 else if (ret == -ERESTARTSYS)
50829- fscache_stat(&fscache_n_retrievals_intr);
50830+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50831 else if (ret == -ENODATA)
50832- fscache_stat(&fscache_n_retrievals_nodata);
50833+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50834 else if (ret < 0)
50835- fscache_stat(&fscache_n_retrievals_nobufs);
50836+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50837 else
50838- fscache_stat(&fscache_n_retrievals_ok);
50839+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
50840
50841 fscache_put_retrieval(op);
50842 _leave(" = %d", ret);
50843@@ -453,7 +453,7 @@ nobufs_unlock:
50844 spin_unlock(&cookie->lock);
50845 kfree(op);
50846 nobufs:
50847- fscache_stat(&fscache_n_retrievals_nobufs);
50848+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50849 _leave(" = -ENOBUFS");
50850 return -ENOBUFS;
50851 }
50852@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50853
50854 _enter("%p,,%d,,,", cookie, *nr_pages);
50855
50856- fscache_stat(&fscache_n_retrievals);
50857+ fscache_stat_unchecked(&fscache_n_retrievals);
50858
50859 if (hlist_empty(&cookie->backing_objects))
50860 goto nobufs;
50861@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50862 goto nobufs_unlock;
50863 spin_unlock(&cookie->lock);
50864
50865- fscache_stat(&fscache_n_retrieval_ops);
50866+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
50867
50868 /* pin the netfs read context in case we need to do the actual netfs
50869 * read because we've encountered a cache read failure */
50870@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50871
50872 error:
50873 if (ret == -ENOMEM)
50874- fscache_stat(&fscache_n_retrievals_nomem);
50875+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50876 else if (ret == -ERESTARTSYS)
50877- fscache_stat(&fscache_n_retrievals_intr);
50878+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50879 else if (ret == -ENODATA)
50880- fscache_stat(&fscache_n_retrievals_nodata);
50881+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50882 else if (ret < 0)
50883- fscache_stat(&fscache_n_retrievals_nobufs);
50884+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50885 else
50886- fscache_stat(&fscache_n_retrievals_ok);
50887+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
50888
50889 fscache_put_retrieval(op);
50890 _leave(" = %d", ret);
50891@@ -570,7 +570,7 @@ nobufs_unlock:
50892 spin_unlock(&cookie->lock);
50893 kfree(op);
50894 nobufs:
50895- fscache_stat(&fscache_n_retrievals_nobufs);
50896+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50897 _leave(" = -ENOBUFS");
50898 return -ENOBUFS;
50899 }
50900@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50901
50902 _enter("%p,%p,,,", cookie, page);
50903
50904- fscache_stat(&fscache_n_allocs);
50905+ fscache_stat_unchecked(&fscache_n_allocs);
50906
50907 if (hlist_empty(&cookie->backing_objects))
50908 goto nobufs;
50909@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50910 goto nobufs_unlock;
50911 spin_unlock(&cookie->lock);
50912
50913- fscache_stat(&fscache_n_alloc_ops);
50914+ fscache_stat_unchecked(&fscache_n_alloc_ops);
50915
50916 ret = fscache_wait_for_retrieval_activation(
50917 object, op,
50918@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50919
50920 error:
50921 if (ret == -ERESTARTSYS)
50922- fscache_stat(&fscache_n_allocs_intr);
50923+ fscache_stat_unchecked(&fscache_n_allocs_intr);
50924 else if (ret < 0)
50925- fscache_stat(&fscache_n_allocs_nobufs);
50926+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50927 else
50928- fscache_stat(&fscache_n_allocs_ok);
50929+ fscache_stat_unchecked(&fscache_n_allocs_ok);
50930
50931 fscache_put_retrieval(op);
50932 _leave(" = %d", ret);
50933@@ -651,7 +651,7 @@ nobufs_unlock:
50934 spin_unlock(&cookie->lock);
50935 kfree(op);
50936 nobufs:
50937- fscache_stat(&fscache_n_allocs_nobufs);
50938+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50939 _leave(" = -ENOBUFS");
50940 return -ENOBUFS;
50941 }
50942@@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50943
50944 spin_lock(&cookie->stores_lock);
50945
50946- fscache_stat(&fscache_n_store_calls);
50947+ fscache_stat_unchecked(&fscache_n_store_calls);
50948
50949 /* find a page to store */
50950 page = NULL;
50951@@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50952 page = results[0];
50953 _debug("gang %d [%lx]", n, page->index);
50954 if (page->index > op->store_limit) {
50955- fscache_stat(&fscache_n_store_pages_over_limit);
50956+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
50957 goto superseded;
50958 }
50959
50960@@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50961
50962 if (page) {
50963 fscache_set_op_state(&op->op, "Store");
50964- fscache_stat(&fscache_n_store_pages);
50965+ fscache_stat_unchecked(&fscache_n_store_pages);
50966 fscache_stat(&fscache_n_cop_write_page);
50967 ret = object->cache->ops->write_page(op, page);
50968 fscache_stat_d(&fscache_n_cop_write_page);
50969@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50970 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50971 ASSERT(PageFsCache(page));
50972
50973- fscache_stat(&fscache_n_stores);
50974+ fscache_stat_unchecked(&fscache_n_stores);
50975
50976 op = kzalloc(sizeof(*op), GFP_NOIO);
50977 if (!op)
50978@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50979 spin_unlock(&cookie->stores_lock);
50980 spin_unlock(&object->lock);
50981
50982- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
50983+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
50984 op->store_limit = object->store_limit;
50985
50986 if (fscache_submit_op(object, &op->op) < 0)
50987@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50988
50989 spin_unlock(&cookie->lock);
50990 radix_tree_preload_end();
50991- fscache_stat(&fscache_n_store_ops);
50992- fscache_stat(&fscache_n_stores_ok);
50993+ fscache_stat_unchecked(&fscache_n_store_ops);
50994+ fscache_stat_unchecked(&fscache_n_stores_ok);
50995
50996 /* the slow work queue now carries its own ref on the object */
50997 fscache_put_operation(&op->op);
50998@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50999 return 0;
51000
51001 already_queued:
51002- fscache_stat(&fscache_n_stores_again);
51003+ fscache_stat_unchecked(&fscache_n_stores_again);
51004 already_pending:
51005 spin_unlock(&cookie->stores_lock);
51006 spin_unlock(&object->lock);
51007 spin_unlock(&cookie->lock);
51008 radix_tree_preload_end();
51009 kfree(op);
51010- fscache_stat(&fscache_n_stores_ok);
51011+ fscache_stat_unchecked(&fscache_n_stores_ok);
51012 _leave(" = 0");
51013 return 0;
51014
51015@@ -886,14 +886,14 @@ nobufs:
51016 spin_unlock(&cookie->lock);
51017 radix_tree_preload_end();
51018 kfree(op);
51019- fscache_stat(&fscache_n_stores_nobufs);
51020+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
51021 _leave(" = -ENOBUFS");
51022 return -ENOBUFS;
51023
51024 nomem_free:
51025 kfree(op);
51026 nomem:
51027- fscache_stat(&fscache_n_stores_oom);
51028+ fscache_stat_unchecked(&fscache_n_stores_oom);
51029 _leave(" = -ENOMEM");
51030 return -ENOMEM;
51031 }
51032@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
51033 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
51034 ASSERTCMP(page, !=, NULL);
51035
51036- fscache_stat(&fscache_n_uncaches);
51037+ fscache_stat_unchecked(&fscache_n_uncaches);
51038
51039 /* cache withdrawal may beat us to it */
51040 if (!PageFsCache(page))
51041@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
51042 unsigned long loop;
51043
51044 #ifdef CONFIG_FSCACHE_STATS
51045- atomic_add(pagevec->nr, &fscache_n_marks);
51046+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
51047 #endif
51048
51049 for (loop = 0; loop < pagevec->nr; loop++) {
51050diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
51051index 46435f3..8cddf18 100644
51052--- a/fs/fscache/stats.c
51053+++ b/fs/fscache/stats.c
51054@@ -18,95 +18,95 @@
51055 /*
51056 * operation counters
51057 */
51058-atomic_t fscache_n_op_pend;
51059-atomic_t fscache_n_op_run;
51060-atomic_t fscache_n_op_enqueue;
51061-atomic_t fscache_n_op_requeue;
51062-atomic_t fscache_n_op_deferred_release;
51063-atomic_t fscache_n_op_release;
51064-atomic_t fscache_n_op_gc;
51065-atomic_t fscache_n_op_cancelled;
51066-atomic_t fscache_n_op_rejected;
51067+atomic_unchecked_t fscache_n_op_pend;
51068+atomic_unchecked_t fscache_n_op_run;
51069+atomic_unchecked_t fscache_n_op_enqueue;
51070+atomic_unchecked_t fscache_n_op_requeue;
51071+atomic_unchecked_t fscache_n_op_deferred_release;
51072+atomic_unchecked_t fscache_n_op_release;
51073+atomic_unchecked_t fscache_n_op_gc;
51074+atomic_unchecked_t fscache_n_op_cancelled;
51075+atomic_unchecked_t fscache_n_op_rejected;
51076
51077-atomic_t fscache_n_attr_changed;
51078-atomic_t fscache_n_attr_changed_ok;
51079-atomic_t fscache_n_attr_changed_nobufs;
51080-atomic_t fscache_n_attr_changed_nomem;
51081-atomic_t fscache_n_attr_changed_calls;
51082+atomic_unchecked_t fscache_n_attr_changed;
51083+atomic_unchecked_t fscache_n_attr_changed_ok;
51084+atomic_unchecked_t fscache_n_attr_changed_nobufs;
51085+atomic_unchecked_t fscache_n_attr_changed_nomem;
51086+atomic_unchecked_t fscache_n_attr_changed_calls;
51087
51088-atomic_t fscache_n_allocs;
51089-atomic_t fscache_n_allocs_ok;
51090-atomic_t fscache_n_allocs_wait;
51091-atomic_t fscache_n_allocs_nobufs;
51092-atomic_t fscache_n_allocs_intr;
51093-atomic_t fscache_n_allocs_object_dead;
51094-atomic_t fscache_n_alloc_ops;
51095-atomic_t fscache_n_alloc_op_waits;
51096+atomic_unchecked_t fscache_n_allocs;
51097+atomic_unchecked_t fscache_n_allocs_ok;
51098+atomic_unchecked_t fscache_n_allocs_wait;
51099+atomic_unchecked_t fscache_n_allocs_nobufs;
51100+atomic_unchecked_t fscache_n_allocs_intr;
51101+atomic_unchecked_t fscache_n_allocs_object_dead;
51102+atomic_unchecked_t fscache_n_alloc_ops;
51103+atomic_unchecked_t fscache_n_alloc_op_waits;
51104
51105-atomic_t fscache_n_retrievals;
51106-atomic_t fscache_n_retrievals_ok;
51107-atomic_t fscache_n_retrievals_wait;
51108-atomic_t fscache_n_retrievals_nodata;
51109-atomic_t fscache_n_retrievals_nobufs;
51110-atomic_t fscache_n_retrievals_intr;
51111-atomic_t fscache_n_retrievals_nomem;
51112-atomic_t fscache_n_retrievals_object_dead;
51113-atomic_t fscache_n_retrieval_ops;
51114-atomic_t fscache_n_retrieval_op_waits;
51115+atomic_unchecked_t fscache_n_retrievals;
51116+atomic_unchecked_t fscache_n_retrievals_ok;
51117+atomic_unchecked_t fscache_n_retrievals_wait;
51118+atomic_unchecked_t fscache_n_retrievals_nodata;
51119+atomic_unchecked_t fscache_n_retrievals_nobufs;
51120+atomic_unchecked_t fscache_n_retrievals_intr;
51121+atomic_unchecked_t fscache_n_retrievals_nomem;
51122+atomic_unchecked_t fscache_n_retrievals_object_dead;
51123+atomic_unchecked_t fscache_n_retrieval_ops;
51124+atomic_unchecked_t fscache_n_retrieval_op_waits;
51125
51126-atomic_t fscache_n_stores;
51127-atomic_t fscache_n_stores_ok;
51128-atomic_t fscache_n_stores_again;
51129-atomic_t fscache_n_stores_nobufs;
51130-atomic_t fscache_n_stores_oom;
51131-atomic_t fscache_n_store_ops;
51132-atomic_t fscache_n_store_calls;
51133-atomic_t fscache_n_store_pages;
51134-atomic_t fscache_n_store_radix_deletes;
51135-atomic_t fscache_n_store_pages_over_limit;
51136+atomic_unchecked_t fscache_n_stores;
51137+atomic_unchecked_t fscache_n_stores_ok;
51138+atomic_unchecked_t fscache_n_stores_again;
51139+atomic_unchecked_t fscache_n_stores_nobufs;
51140+atomic_unchecked_t fscache_n_stores_oom;
51141+atomic_unchecked_t fscache_n_store_ops;
51142+atomic_unchecked_t fscache_n_store_calls;
51143+atomic_unchecked_t fscache_n_store_pages;
51144+atomic_unchecked_t fscache_n_store_radix_deletes;
51145+atomic_unchecked_t fscache_n_store_pages_over_limit;
51146
51147-atomic_t fscache_n_store_vmscan_not_storing;
51148-atomic_t fscache_n_store_vmscan_gone;
51149-atomic_t fscache_n_store_vmscan_busy;
51150-atomic_t fscache_n_store_vmscan_cancelled;
51151+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
51152+atomic_unchecked_t fscache_n_store_vmscan_gone;
51153+atomic_unchecked_t fscache_n_store_vmscan_busy;
51154+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
51155
51156-atomic_t fscache_n_marks;
51157-atomic_t fscache_n_uncaches;
51158+atomic_unchecked_t fscache_n_marks;
51159+atomic_unchecked_t fscache_n_uncaches;
51160
51161-atomic_t fscache_n_acquires;
51162-atomic_t fscache_n_acquires_null;
51163-atomic_t fscache_n_acquires_no_cache;
51164-atomic_t fscache_n_acquires_ok;
51165-atomic_t fscache_n_acquires_nobufs;
51166-atomic_t fscache_n_acquires_oom;
51167+atomic_unchecked_t fscache_n_acquires;
51168+atomic_unchecked_t fscache_n_acquires_null;
51169+atomic_unchecked_t fscache_n_acquires_no_cache;
51170+atomic_unchecked_t fscache_n_acquires_ok;
51171+atomic_unchecked_t fscache_n_acquires_nobufs;
51172+atomic_unchecked_t fscache_n_acquires_oom;
51173
51174-atomic_t fscache_n_updates;
51175-atomic_t fscache_n_updates_null;
51176-atomic_t fscache_n_updates_run;
51177+atomic_unchecked_t fscache_n_updates;
51178+atomic_unchecked_t fscache_n_updates_null;
51179+atomic_unchecked_t fscache_n_updates_run;
51180
51181-atomic_t fscache_n_relinquishes;
51182-atomic_t fscache_n_relinquishes_null;
51183-atomic_t fscache_n_relinquishes_waitcrt;
51184-atomic_t fscache_n_relinquishes_retire;
51185+atomic_unchecked_t fscache_n_relinquishes;
51186+atomic_unchecked_t fscache_n_relinquishes_null;
51187+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
51188+atomic_unchecked_t fscache_n_relinquishes_retire;
51189
51190-atomic_t fscache_n_cookie_index;
51191-atomic_t fscache_n_cookie_data;
51192-atomic_t fscache_n_cookie_special;
51193+atomic_unchecked_t fscache_n_cookie_index;
51194+atomic_unchecked_t fscache_n_cookie_data;
51195+atomic_unchecked_t fscache_n_cookie_special;
51196
51197-atomic_t fscache_n_object_alloc;
51198-atomic_t fscache_n_object_no_alloc;
51199-atomic_t fscache_n_object_lookups;
51200-atomic_t fscache_n_object_lookups_negative;
51201-atomic_t fscache_n_object_lookups_positive;
51202-atomic_t fscache_n_object_lookups_timed_out;
51203-atomic_t fscache_n_object_created;
51204-atomic_t fscache_n_object_avail;
51205-atomic_t fscache_n_object_dead;
51206+atomic_unchecked_t fscache_n_object_alloc;
51207+atomic_unchecked_t fscache_n_object_no_alloc;
51208+atomic_unchecked_t fscache_n_object_lookups;
51209+atomic_unchecked_t fscache_n_object_lookups_negative;
51210+atomic_unchecked_t fscache_n_object_lookups_positive;
51211+atomic_unchecked_t fscache_n_object_lookups_timed_out;
51212+atomic_unchecked_t fscache_n_object_created;
51213+atomic_unchecked_t fscache_n_object_avail;
51214+atomic_unchecked_t fscache_n_object_dead;
51215
51216-atomic_t fscache_n_checkaux_none;
51217-atomic_t fscache_n_checkaux_okay;
51218-atomic_t fscache_n_checkaux_update;
51219-atomic_t fscache_n_checkaux_obsolete;
51220+atomic_unchecked_t fscache_n_checkaux_none;
51221+atomic_unchecked_t fscache_n_checkaux_okay;
51222+atomic_unchecked_t fscache_n_checkaux_update;
51223+atomic_unchecked_t fscache_n_checkaux_obsolete;
51224
51225 atomic_t fscache_n_cop_alloc_object;
51226 atomic_t fscache_n_cop_lookup_object;
51227@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
51228 seq_puts(m, "FS-Cache statistics\n");
51229
51230 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
51231- atomic_read(&fscache_n_cookie_index),
51232- atomic_read(&fscache_n_cookie_data),
51233- atomic_read(&fscache_n_cookie_special));
51234+ atomic_read_unchecked(&fscache_n_cookie_index),
51235+ atomic_read_unchecked(&fscache_n_cookie_data),
51236+ atomic_read_unchecked(&fscache_n_cookie_special));
51237
51238 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
51239- atomic_read(&fscache_n_object_alloc),
51240- atomic_read(&fscache_n_object_no_alloc),
51241- atomic_read(&fscache_n_object_avail),
51242- atomic_read(&fscache_n_object_dead));
51243+ atomic_read_unchecked(&fscache_n_object_alloc),
51244+ atomic_read_unchecked(&fscache_n_object_no_alloc),
51245+ atomic_read_unchecked(&fscache_n_object_avail),
51246+ atomic_read_unchecked(&fscache_n_object_dead));
51247 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
51248- atomic_read(&fscache_n_checkaux_none),
51249- atomic_read(&fscache_n_checkaux_okay),
51250- atomic_read(&fscache_n_checkaux_update),
51251- atomic_read(&fscache_n_checkaux_obsolete));
51252+ atomic_read_unchecked(&fscache_n_checkaux_none),
51253+ atomic_read_unchecked(&fscache_n_checkaux_okay),
51254+ atomic_read_unchecked(&fscache_n_checkaux_update),
51255+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
51256
51257 seq_printf(m, "Pages : mrk=%u unc=%u\n",
51258- atomic_read(&fscache_n_marks),
51259- atomic_read(&fscache_n_uncaches));
51260+ atomic_read_unchecked(&fscache_n_marks),
51261+ atomic_read_unchecked(&fscache_n_uncaches));
51262
51263 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
51264 " oom=%u\n",
51265- atomic_read(&fscache_n_acquires),
51266- atomic_read(&fscache_n_acquires_null),
51267- atomic_read(&fscache_n_acquires_no_cache),
51268- atomic_read(&fscache_n_acquires_ok),
51269- atomic_read(&fscache_n_acquires_nobufs),
51270- atomic_read(&fscache_n_acquires_oom));
51271+ atomic_read_unchecked(&fscache_n_acquires),
51272+ atomic_read_unchecked(&fscache_n_acquires_null),
51273+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
51274+ atomic_read_unchecked(&fscache_n_acquires_ok),
51275+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
51276+ atomic_read_unchecked(&fscache_n_acquires_oom));
51277
51278 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
51279- atomic_read(&fscache_n_object_lookups),
51280- atomic_read(&fscache_n_object_lookups_negative),
51281- atomic_read(&fscache_n_object_lookups_positive),
51282- atomic_read(&fscache_n_object_lookups_timed_out),
51283- atomic_read(&fscache_n_object_created));
51284+ atomic_read_unchecked(&fscache_n_object_lookups),
51285+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
51286+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
51287+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
51288+ atomic_read_unchecked(&fscache_n_object_created));
51289
51290 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
51291- atomic_read(&fscache_n_updates),
51292- atomic_read(&fscache_n_updates_null),
51293- atomic_read(&fscache_n_updates_run));
51294+ atomic_read_unchecked(&fscache_n_updates),
51295+ atomic_read_unchecked(&fscache_n_updates_null),
51296+ atomic_read_unchecked(&fscache_n_updates_run));
51297
51298 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
51299- atomic_read(&fscache_n_relinquishes),
51300- atomic_read(&fscache_n_relinquishes_null),
51301- atomic_read(&fscache_n_relinquishes_waitcrt),
51302- atomic_read(&fscache_n_relinquishes_retire));
51303+ atomic_read_unchecked(&fscache_n_relinquishes),
51304+ atomic_read_unchecked(&fscache_n_relinquishes_null),
51305+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
51306+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
51307
51308 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
51309- atomic_read(&fscache_n_attr_changed),
51310- atomic_read(&fscache_n_attr_changed_ok),
51311- atomic_read(&fscache_n_attr_changed_nobufs),
51312- atomic_read(&fscache_n_attr_changed_nomem),
51313- atomic_read(&fscache_n_attr_changed_calls));
51314+ atomic_read_unchecked(&fscache_n_attr_changed),
51315+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
51316+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
51317+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
51318+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
51319
51320 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
51321- atomic_read(&fscache_n_allocs),
51322- atomic_read(&fscache_n_allocs_ok),
51323- atomic_read(&fscache_n_allocs_wait),
51324- atomic_read(&fscache_n_allocs_nobufs),
51325- atomic_read(&fscache_n_allocs_intr));
51326+ atomic_read_unchecked(&fscache_n_allocs),
51327+ atomic_read_unchecked(&fscache_n_allocs_ok),
51328+ atomic_read_unchecked(&fscache_n_allocs_wait),
51329+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
51330+ atomic_read_unchecked(&fscache_n_allocs_intr));
51331 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
51332- atomic_read(&fscache_n_alloc_ops),
51333- atomic_read(&fscache_n_alloc_op_waits),
51334- atomic_read(&fscache_n_allocs_object_dead));
51335+ atomic_read_unchecked(&fscache_n_alloc_ops),
51336+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
51337+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
51338
51339 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
51340 " int=%u oom=%u\n",
51341- atomic_read(&fscache_n_retrievals),
51342- atomic_read(&fscache_n_retrievals_ok),
51343- atomic_read(&fscache_n_retrievals_wait),
51344- atomic_read(&fscache_n_retrievals_nodata),
51345- atomic_read(&fscache_n_retrievals_nobufs),
51346- atomic_read(&fscache_n_retrievals_intr),
51347- atomic_read(&fscache_n_retrievals_nomem));
51348+ atomic_read_unchecked(&fscache_n_retrievals),
51349+ atomic_read_unchecked(&fscache_n_retrievals_ok),
51350+ atomic_read_unchecked(&fscache_n_retrievals_wait),
51351+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
51352+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
51353+ atomic_read_unchecked(&fscache_n_retrievals_intr),
51354+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
51355 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
51356- atomic_read(&fscache_n_retrieval_ops),
51357- atomic_read(&fscache_n_retrieval_op_waits),
51358- atomic_read(&fscache_n_retrievals_object_dead));
51359+ atomic_read_unchecked(&fscache_n_retrieval_ops),
51360+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
51361+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
51362
51363 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
51364- atomic_read(&fscache_n_stores),
51365- atomic_read(&fscache_n_stores_ok),
51366- atomic_read(&fscache_n_stores_again),
51367- atomic_read(&fscache_n_stores_nobufs),
51368- atomic_read(&fscache_n_stores_oom));
51369+ atomic_read_unchecked(&fscache_n_stores),
51370+ atomic_read_unchecked(&fscache_n_stores_ok),
51371+ atomic_read_unchecked(&fscache_n_stores_again),
51372+ atomic_read_unchecked(&fscache_n_stores_nobufs),
51373+ atomic_read_unchecked(&fscache_n_stores_oom));
51374 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
51375- atomic_read(&fscache_n_store_ops),
51376- atomic_read(&fscache_n_store_calls),
51377- atomic_read(&fscache_n_store_pages),
51378- atomic_read(&fscache_n_store_radix_deletes),
51379- atomic_read(&fscache_n_store_pages_over_limit));
51380+ atomic_read_unchecked(&fscache_n_store_ops),
51381+ atomic_read_unchecked(&fscache_n_store_calls),
51382+ atomic_read_unchecked(&fscache_n_store_pages),
51383+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
51384+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
51385
51386 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
51387- atomic_read(&fscache_n_store_vmscan_not_storing),
51388- atomic_read(&fscache_n_store_vmscan_gone),
51389- atomic_read(&fscache_n_store_vmscan_busy),
51390- atomic_read(&fscache_n_store_vmscan_cancelled));
51391+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
51392+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
51393+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
51394+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
51395
51396 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
51397- atomic_read(&fscache_n_op_pend),
51398- atomic_read(&fscache_n_op_run),
51399- atomic_read(&fscache_n_op_enqueue),
51400- atomic_read(&fscache_n_op_cancelled),
51401- atomic_read(&fscache_n_op_rejected));
51402+ atomic_read_unchecked(&fscache_n_op_pend),
51403+ atomic_read_unchecked(&fscache_n_op_run),
51404+ atomic_read_unchecked(&fscache_n_op_enqueue),
51405+ atomic_read_unchecked(&fscache_n_op_cancelled),
51406+ atomic_read_unchecked(&fscache_n_op_rejected));
51407 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
51408- atomic_read(&fscache_n_op_deferred_release),
51409- atomic_read(&fscache_n_op_release),
51410- atomic_read(&fscache_n_op_gc));
51411+ atomic_read_unchecked(&fscache_n_op_deferred_release),
51412+ atomic_read_unchecked(&fscache_n_op_release),
51413+ atomic_read_unchecked(&fscache_n_op_gc));
51414
51415 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
51416 atomic_read(&fscache_n_cop_alloc_object),
51417diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
51418index de792dc..448b532 100644
51419--- a/fs/fuse/cuse.c
51420+++ b/fs/fuse/cuse.c
51421@@ -576,10 +576,12 @@ static int __init cuse_init(void)
51422 INIT_LIST_HEAD(&cuse_conntbl[i]);
51423
51424 /* inherit and extend fuse_dev_operations */
51425- cuse_channel_fops = fuse_dev_operations;
51426- cuse_channel_fops.owner = THIS_MODULE;
51427- cuse_channel_fops.open = cuse_channel_open;
51428- cuse_channel_fops.release = cuse_channel_release;
51429+ pax_open_kernel();
51430+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
51431+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
51432+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
51433+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
51434+ pax_close_kernel();
51435
51436 cuse_class = class_create(THIS_MODULE, "cuse");
51437 if (IS_ERR(cuse_class))
51438diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
51439index 1facb39..7f48557 100644
51440--- a/fs/fuse/dev.c
51441+++ b/fs/fuse/dev.c
51442@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51443 {
51444 struct fuse_notify_inval_entry_out outarg;
51445 int err = -EINVAL;
51446- char buf[FUSE_NAME_MAX+1];
51447+ char *buf = NULL;
51448 struct qstr name;
51449
51450 if (size < sizeof(outarg))
51451@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51452 if (outarg.namelen > FUSE_NAME_MAX)
51453 goto err;
51454
51455+ err = -ENOMEM;
51456+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
51457+ if (!buf)
51458+ goto err;
51459+
51460 err = -EINVAL;
51461 if (size != sizeof(outarg) + outarg.namelen + 1)
51462 goto err;
51463@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
51464
51465 down_read(&fc->killsb);
51466 err = -ENOENT;
51467- if (!fc->sb)
51468- goto err_unlock;
51469-
51470- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51471-
51472-err_unlock:
51473+ if (fc->sb)
51474+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
51475 up_read(&fc->killsb);
51476+ kfree(buf);
51477 return err;
51478
51479 err:
51480 fuse_copy_finish(cs);
51481+ kfree(buf);
51482 return err;
51483 }
51484
51485diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
51486index 4787ae6..73efff7 100644
51487--- a/fs/fuse/dir.c
51488+++ b/fs/fuse/dir.c
51489@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
51490 return link;
51491 }
51492
51493-static void free_link(char *link)
51494+static void free_link(const char *link)
51495 {
51496 if (!IS_ERR(link))
51497 free_page((unsigned long) link);
51498diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
51499index 247436c..e650ccb 100644
51500--- a/fs/gfs2/ops_inode.c
51501+++ b/fs/gfs2/ops_inode.c
51502@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
51503 unsigned int x;
51504 int error;
51505
51506+ pax_track_stack();
51507+
51508 if (ndentry->d_inode) {
51509 nip = GFS2_I(ndentry->d_inode);
51510 if (ip == nip)
51511diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
51512index 4463297..4fed53b 100644
51513--- a/fs/gfs2/sys.c
51514+++ b/fs/gfs2/sys.c
51515@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
51516 return a->store ? a->store(sdp, buf, len) : len;
51517 }
51518
51519-static struct sysfs_ops gfs2_attr_ops = {
51520+static const struct sysfs_ops gfs2_attr_ops = {
51521 .show = gfs2_attr_show,
51522 .store = gfs2_attr_store,
51523 };
51524@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
51525 return 0;
51526 }
51527
51528-static struct kset_uevent_ops gfs2_uevent_ops = {
51529+static const struct kset_uevent_ops gfs2_uevent_ops = {
51530 .uevent = gfs2_uevent,
51531 };
51532
51533diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
51534index f6874ac..7cd98a8 100644
51535--- a/fs/hfsplus/catalog.c
51536+++ b/fs/hfsplus/catalog.c
51537@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
51538 int err;
51539 u16 type;
51540
51541+ pax_track_stack();
51542+
51543 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
51544 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
51545 if (err)
51546@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
51547 int entry_size;
51548 int err;
51549
51550+ pax_track_stack();
51551+
51552 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
51553 sb = dir->i_sb;
51554 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
51555@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
51556 int entry_size, type;
51557 int err = 0;
51558
51559+ pax_track_stack();
51560+
51561 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
51562 dst_dir->i_ino, dst_name->name);
51563 sb = src_dir->i_sb;
51564diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
51565index 5f40236..dac3421 100644
51566--- a/fs/hfsplus/dir.c
51567+++ b/fs/hfsplus/dir.c
51568@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
51569 struct hfsplus_readdir_data *rd;
51570 u16 type;
51571
51572+ pax_track_stack();
51573+
51574 if (filp->f_pos >= inode->i_size)
51575 return 0;
51576
51577diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
51578index 1bcf597..905a251 100644
51579--- a/fs/hfsplus/inode.c
51580+++ b/fs/hfsplus/inode.c
51581@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
51582 int res = 0;
51583 u16 type;
51584
51585+ pax_track_stack();
51586+
51587 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
51588
51589 HFSPLUS_I(inode).dev = 0;
51590@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
51591 struct hfs_find_data fd;
51592 hfsplus_cat_entry entry;
51593
51594+ pax_track_stack();
51595+
51596 if (HFSPLUS_IS_RSRC(inode))
51597 main_inode = HFSPLUS_I(inode).rsrc_inode;
51598
51599diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
51600index f457d2c..7ef4ad5 100644
51601--- a/fs/hfsplus/ioctl.c
51602+++ b/fs/hfsplus/ioctl.c
51603@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
51604 struct hfsplus_cat_file *file;
51605 int res;
51606
51607+ pax_track_stack();
51608+
51609 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51610 return -EOPNOTSUPP;
51611
51612@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
51613 struct hfsplus_cat_file *file;
51614 ssize_t res = 0;
51615
51616+ pax_track_stack();
51617+
51618 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51619 return -EOPNOTSUPP;
51620
51621diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
51622index 43022f3..7298079 100644
51623--- a/fs/hfsplus/super.c
51624+++ b/fs/hfsplus/super.c
51625@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
51626 struct nls_table *nls = NULL;
51627 int err = -EINVAL;
51628
51629+ pax_track_stack();
51630+
51631 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
51632 if (!sbi)
51633 return -ENOMEM;
51634diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
51635index 87a1258..5694d91 100644
51636--- a/fs/hugetlbfs/inode.c
51637+++ b/fs/hugetlbfs/inode.c
51638@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
51639 .kill_sb = kill_litter_super,
51640 };
51641
51642-static struct vfsmount *hugetlbfs_vfsmount;
51643+struct vfsmount *hugetlbfs_vfsmount;
51644
51645 static int can_do_hugetlb_shm(void)
51646 {
51647diff --git a/fs/ioctl.c b/fs/ioctl.c
51648index 6c75110..19d2c3c 100644
51649--- a/fs/ioctl.c
51650+++ b/fs/ioctl.c
51651@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
51652 u64 phys, u64 len, u32 flags)
51653 {
51654 struct fiemap_extent extent;
51655- struct fiemap_extent *dest = fieinfo->fi_extents_start;
51656+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
51657
51658 /* only count the extents */
51659 if (fieinfo->fi_extents_max == 0) {
51660@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51661
51662 fieinfo.fi_flags = fiemap.fm_flags;
51663 fieinfo.fi_extents_max = fiemap.fm_extent_count;
51664- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
51665+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
51666
51667 if (fiemap.fm_extent_count != 0 &&
51668 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
51669@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51670 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
51671 fiemap.fm_flags = fieinfo.fi_flags;
51672 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
51673- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
51674+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
51675 error = -EFAULT;
51676
51677 return error;
51678diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
51679index b0435dd..81ee0be 100644
51680--- a/fs/jbd/checkpoint.c
51681+++ b/fs/jbd/checkpoint.c
51682@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
51683 tid_t this_tid;
51684 int result;
51685
51686+ pax_track_stack();
51687+
51688 jbd_debug(1, "Start checkpoint\n");
51689
51690 /*
51691diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
51692index 546d153..736896c 100644
51693--- a/fs/jffs2/compr_rtime.c
51694+++ b/fs/jffs2/compr_rtime.c
51695@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
51696 int outpos = 0;
51697 int pos=0;
51698
51699+ pax_track_stack();
51700+
51701 memset(positions,0,sizeof(positions));
51702
51703 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
51704@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
51705 int outpos = 0;
51706 int pos=0;
51707
51708+ pax_track_stack();
51709+
51710 memset(positions,0,sizeof(positions));
51711
51712 while (outpos<destlen) {
51713diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
51714index 170d289..3254b98 100644
51715--- a/fs/jffs2/compr_rubin.c
51716+++ b/fs/jffs2/compr_rubin.c
51717@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
51718 int ret;
51719 uint32_t mysrclen, mydstlen;
51720
51721+ pax_track_stack();
51722+
51723 mysrclen = *sourcelen;
51724 mydstlen = *dstlen - 8;
51725
51726diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
51727index b47679b..00d65d3 100644
51728--- a/fs/jffs2/erase.c
51729+++ b/fs/jffs2/erase.c
51730@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
51731 struct jffs2_unknown_node marker = {
51732 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
51733 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51734- .totlen = cpu_to_je32(c->cleanmarker_size)
51735+ .totlen = cpu_to_je32(c->cleanmarker_size),
51736+ .hdr_crc = cpu_to_je32(0)
51737 };
51738
51739 jffs2_prealloc_raw_node_refs(c, jeb, 1);
51740diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
51741index 5ef7bac..4fd1e3c 100644
51742--- a/fs/jffs2/wbuf.c
51743+++ b/fs/jffs2/wbuf.c
51744@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
51745 {
51746 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
51747 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51748- .totlen = constant_cpu_to_je32(8)
51749+ .totlen = constant_cpu_to_je32(8),
51750+ .hdr_crc = constant_cpu_to_je32(0)
51751 };
51752
51753 /*
51754diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
51755index 082e844..52012a1 100644
51756--- a/fs/jffs2/xattr.c
51757+++ b/fs/jffs2/xattr.c
51758@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
51759
51760 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
51761
51762+ pax_track_stack();
51763+
51764 /* Phase.1 : Merge same xref */
51765 for (i=0; i < XREF_TMPHASH_SIZE; i++)
51766 xref_tmphash[i] = NULL;
51767diff --git a/fs/jfs/super.c b/fs/jfs/super.c
51768index 2234c73..f6e6e6b 100644
51769--- a/fs/jfs/super.c
51770+++ b/fs/jfs/super.c
51771@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
51772
51773 jfs_inode_cachep =
51774 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
51775- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
51776+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
51777 init_once);
51778 if (jfs_inode_cachep == NULL)
51779 return -ENOMEM;
51780diff --git a/fs/libfs.c b/fs/libfs.c
51781index ba36e93..3153fce 100644
51782--- a/fs/libfs.c
51783+++ b/fs/libfs.c
51784@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
51785
51786 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
51787 struct dentry *next;
51788+ char d_name[sizeof(next->d_iname)];
51789+ const unsigned char *name;
51790+
51791 next = list_entry(p, struct dentry, d_u.d_child);
51792 if (d_unhashed(next) || !next->d_inode)
51793 continue;
51794
51795 spin_unlock(&dcache_lock);
51796- if (filldir(dirent, next->d_name.name,
51797+ name = next->d_name.name;
51798+ if (name == next->d_iname) {
51799+ memcpy(d_name, name, next->d_name.len);
51800+ name = d_name;
51801+ }
51802+ if (filldir(dirent, name,
51803 next->d_name.len, filp->f_pos,
51804 next->d_inode->i_ino,
51805 dt_type(next->d_inode)) < 0)
51806diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
51807index c325a83..d15b07b 100644
51808--- a/fs/lockd/clntproc.c
51809+++ b/fs/lockd/clntproc.c
51810@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
51811 /*
51812 * Cookie counter for NLM requests
51813 */
51814-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
51815+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
51816
51817 void nlmclnt_next_cookie(struct nlm_cookie *c)
51818 {
51819- u32 cookie = atomic_inc_return(&nlm_cookie);
51820+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
51821
51822 memcpy(c->data, &cookie, 4);
51823 c->len=4;
51824@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
51825 struct nlm_rqst reqst, *req;
51826 int status;
51827
51828+ pax_track_stack();
51829+
51830 req = &reqst;
51831 memset(req, 0, sizeof(*req));
51832 locks_init_lock(&req->a_args.lock.fl);
51833diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
51834index 1a54ae1..6a16c27 100644
51835--- a/fs/lockd/svc.c
51836+++ b/fs/lockd/svc.c
51837@@ -43,7 +43,7 @@
51838
51839 static struct svc_program nlmsvc_program;
51840
51841-struct nlmsvc_binding * nlmsvc_ops;
51842+const struct nlmsvc_binding * nlmsvc_ops;
51843 EXPORT_SYMBOL_GPL(nlmsvc_ops);
51844
51845 static DEFINE_MUTEX(nlmsvc_mutex);
51846diff --git a/fs/locks.c b/fs/locks.c
51847index a8794f2..4041e55 100644
51848--- a/fs/locks.c
51849+++ b/fs/locks.c
51850@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
51851
51852 static struct kmem_cache *filelock_cache __read_mostly;
51853
51854+static void locks_init_lock_always(struct file_lock *fl)
51855+{
51856+ fl->fl_next = NULL;
51857+ fl->fl_fasync = NULL;
51858+ fl->fl_owner = NULL;
51859+ fl->fl_pid = 0;
51860+ fl->fl_nspid = NULL;
51861+ fl->fl_file = NULL;
51862+ fl->fl_flags = 0;
51863+ fl->fl_type = 0;
51864+ fl->fl_start = fl->fl_end = 0;
51865+}
51866+
51867 /* Allocate an empty lock structure. */
51868 static struct file_lock *locks_alloc_lock(void)
51869 {
51870- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51871+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51872+
51873+ if (fl)
51874+ locks_init_lock_always(fl);
51875+
51876+ return fl;
51877 }
51878
51879 void locks_release_private(struct file_lock *fl)
51880@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
51881 INIT_LIST_HEAD(&fl->fl_link);
51882 INIT_LIST_HEAD(&fl->fl_block);
51883 init_waitqueue_head(&fl->fl_wait);
51884- fl->fl_next = NULL;
51885- fl->fl_fasync = NULL;
51886- fl->fl_owner = NULL;
51887- fl->fl_pid = 0;
51888- fl->fl_nspid = NULL;
51889- fl->fl_file = NULL;
51890- fl->fl_flags = 0;
51891- fl->fl_type = 0;
51892- fl->fl_start = fl->fl_end = 0;
51893 fl->fl_ops = NULL;
51894 fl->fl_lmops = NULL;
51895+ locks_init_lock_always(fl);
51896 }
51897
51898 EXPORT_SYMBOL(locks_init_lock);
51899@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
51900 return;
51901
51902 if (filp->f_op && filp->f_op->flock) {
51903- struct file_lock fl = {
51904+ struct file_lock flock = {
51905 .fl_pid = current->tgid,
51906 .fl_file = filp,
51907 .fl_flags = FL_FLOCK,
51908 .fl_type = F_UNLCK,
51909 .fl_end = OFFSET_MAX,
51910 };
51911- filp->f_op->flock(filp, F_SETLKW, &fl);
51912- if (fl.fl_ops && fl.fl_ops->fl_release_private)
51913- fl.fl_ops->fl_release_private(&fl);
51914+ filp->f_op->flock(filp, F_SETLKW, &flock);
51915+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
51916+ flock.fl_ops->fl_release_private(&flock);
51917 }
51918
51919 lock_kernel();
51920diff --git a/fs/mbcache.c b/fs/mbcache.c
51921index ec88ff3..b843a82 100644
51922--- a/fs/mbcache.c
51923+++ b/fs/mbcache.c
51924@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
51925 if (!cache)
51926 goto fail;
51927 cache->c_name = name;
51928- cache->c_op.free = NULL;
51929+ *(void **)&cache->c_op.free = NULL;
51930 if (cache_op)
51931- cache->c_op.free = cache_op->free;
51932+ *(void **)&cache->c_op.free = cache_op->free;
51933 atomic_set(&cache->c_entry_count, 0);
51934 cache->c_bucket_bits = bucket_bits;
51935 #ifdef MB_CACHE_INDEXES_COUNT
51936diff --git a/fs/namei.c b/fs/namei.c
51937index b0afbd4..8d065a1 100644
51938--- a/fs/namei.c
51939+++ b/fs/namei.c
51940@@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
51941 return ret;
51942
51943 /*
51944+ * Searching includes executable on directories, else just read.
51945+ */
51946+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51947+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51948+ if (capable(CAP_DAC_READ_SEARCH))
51949+ return 0;
51950+
51951+ /*
51952 * Read/write DACs are always overridable.
51953 * Executable DACs are overridable if at least one exec bit is set.
51954 */
51955@@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
51956 if (capable(CAP_DAC_OVERRIDE))
51957 return 0;
51958
51959- /*
51960- * Searching includes executable on directories, else just read.
51961- */
51962- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51963- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51964- if (capable(CAP_DAC_READ_SEARCH))
51965- return 0;
51966-
51967 return -EACCES;
51968 }
51969
51970@@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
51971 if (!ret)
51972 goto ok;
51973
51974- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
51975+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
51976+ capable(CAP_DAC_OVERRIDE))
51977 goto ok;
51978
51979 return ret;
51980@@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
51981 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
51982 error = PTR_ERR(cookie);
51983 if (!IS_ERR(cookie)) {
51984- char *s = nd_get_link(nd);
51985+ const char *s = nd_get_link(nd);
51986 error = 0;
51987 if (s)
51988 error = __vfs_follow_link(nd, s);
51989@@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
51990 err = security_inode_follow_link(path->dentry, nd);
51991 if (err)
51992 goto loop;
51993+
51994+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
51995+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
51996+ err = -EACCES;
51997+ goto loop;
51998+ }
51999+
52000 current->link_count++;
52001 current->total_link_count++;
52002 nd->depth++;
52003@@ -1016,11 +1024,19 @@ return_reval:
52004 break;
52005 }
52006 return_base:
52007+ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
52008+ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
52009+ path_put(&nd->path);
52010+ return -ENOENT;
52011+ }
52012 return 0;
52013 out_dput:
52014 path_put_conditional(&next, nd);
52015 break;
52016 }
52017+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
52018+ err = -ENOENT;
52019+
52020 path_put(&nd->path);
52021 return_err:
52022 return err;
52023@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
52024 int retval = path_init(dfd, name, flags, nd);
52025 if (!retval)
52026 retval = path_walk(name, nd);
52027- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
52028- nd->path.dentry->d_inode))
52029- audit_inode(name, nd->path.dentry);
52030+
52031+ if (likely(!retval)) {
52032+ if (nd->path.dentry && nd->path.dentry->d_inode) {
52033+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
52034+ retval = -ENOENT;
52035+ if (!audit_dummy_context())
52036+ audit_inode(name, nd->path.dentry);
52037+ }
52038+ }
52039 if (nd->root.mnt) {
52040 path_put(&nd->root);
52041 nd->root.mnt = NULL;
52042 }
52043+
52044 return retval;
52045 }
52046
52047@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
52048 if (error)
52049 goto err_out;
52050
52051+
52052+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
52053+ error = -EPERM;
52054+ goto err_out;
52055+ }
52056+ if (gr_handle_rawio(inode)) {
52057+ error = -EPERM;
52058+ goto err_out;
52059+ }
52060+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
52061+ error = -EACCES;
52062+ goto err_out;
52063+ }
52064+
52065 if (flag & O_TRUNC) {
52066 error = get_write_access(inode);
52067 if (error)
52068@@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
52069 {
52070 int error;
52071 struct dentry *dir = nd->path.dentry;
52072+ int acc_mode = ACC_MODE(flag);
52073+
52074+ if (flag & O_TRUNC)
52075+ acc_mode |= MAY_WRITE;
52076+ if (flag & O_APPEND)
52077+ acc_mode |= MAY_APPEND;
52078+
52079+ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
52080+ error = -EACCES;
52081+ goto out_unlock;
52082+ }
52083
52084 if (!IS_POSIXACL(dir->d_inode))
52085 mode &= ~current_umask();
52086@@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
52087 if (error)
52088 goto out_unlock;
52089 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
52090+ if (!error)
52091+ gr_handle_create(path->dentry, nd->path.mnt);
52092 out_unlock:
52093 mutex_unlock(&dir->d_inode->i_mutex);
52094 dput(nd->path.dentry);
52095@@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
52096 &nd, flag);
52097 if (error)
52098 return ERR_PTR(error);
52099+
52100+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
52101+ error = -EPERM;
52102+ goto exit;
52103+ }
52104+
52105+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
52106+ error = -EPERM;
52107+ goto exit;
52108+ }
52109+
52110+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
52111+ error = -EACCES;
52112+ goto exit;
52113+ }
52114+
52115 goto ok;
52116 }
52117
52118@@ -1795,6 +1861,19 @@ do_last:
52119 /*
52120 * It already exists.
52121 */
52122+
52123+ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
52124+ error = -ENOENT;
52125+ goto exit_mutex_unlock;
52126+ }
52127+
52128+ /* only check if O_CREAT is specified, all other checks need
52129+ to go into may_open */
52130+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
52131+ error = -EACCES;
52132+ goto exit_mutex_unlock;
52133+ }
52134+
52135 mutex_unlock(&dir->d_inode->i_mutex);
52136 audit_inode(pathname, path.dentry);
52137
52138@@ -1887,6 +1966,13 @@ do_link:
52139 error = security_inode_follow_link(path.dentry, &nd);
52140 if (error)
52141 goto exit_dput;
52142+
52143+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
52144+ path.dentry, nd.path.mnt)) {
52145+ error = -EACCES;
52146+ goto exit_dput;
52147+ }
52148+
52149 error = __do_follow_link(&path, &nd);
52150 if (error) {
52151 /* Does someone understand code flow here? Or it is only
52152@@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
52153 }
52154 return dentry;
52155 eexist:
52156+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
52157+ dput(dentry);
52158+ return ERR_PTR(-ENOENT);
52159+ }
52160 dput(dentry);
52161 dentry = ERR_PTR(-EEXIST);
52162 fail:
52163@@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
52164 error = may_mknod(mode);
52165 if (error)
52166 goto out_dput;
52167+
52168+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
52169+ error = -EPERM;
52170+ goto out_dput;
52171+ }
52172+
52173+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
52174+ error = -EACCES;
52175+ goto out_dput;
52176+ }
52177+
52178 error = mnt_want_write(nd.path.mnt);
52179 if (error)
52180 goto out_dput;
52181@@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
52182 }
52183 out_drop_write:
52184 mnt_drop_write(nd.path.mnt);
52185+
52186+ if (!error)
52187+ gr_handle_create(dentry, nd.path.mnt);
52188 out_dput:
52189 dput(dentry);
52190 out_unlock:
52191@@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
52192 if (IS_ERR(dentry))
52193 goto out_unlock;
52194
52195+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
52196+ error = -EACCES;
52197+ goto out_dput;
52198+ }
52199+
52200 if (!IS_POSIXACL(nd.path.dentry->d_inode))
52201 mode &= ~current_umask();
52202 error = mnt_want_write(nd.path.mnt);
52203@@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
52204 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
52205 out_drop_write:
52206 mnt_drop_write(nd.path.mnt);
52207+
52208+ if (!error)
52209+ gr_handle_create(dentry, nd.path.mnt);
52210+
52211 out_dput:
52212 dput(dentry);
52213 out_unlock:
52214@@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
52215 char * name;
52216 struct dentry *dentry;
52217 struct nameidata nd;
52218+ ino_t saved_ino = 0;
52219+ dev_t saved_dev = 0;
52220
52221 error = user_path_parent(dfd, pathname, &nd, &name);
52222 if (error)
52223@@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
52224 error = PTR_ERR(dentry);
52225 if (IS_ERR(dentry))
52226 goto exit2;
52227+
52228+ if (dentry->d_inode != NULL) {
52229+ saved_ino = dentry->d_inode->i_ino;
52230+ saved_dev = gr_get_dev_from_dentry(dentry);
52231+
52232+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
52233+ error = -EACCES;
52234+ goto exit3;
52235+ }
52236+ }
52237+
52238 error = mnt_want_write(nd.path.mnt);
52239 if (error)
52240 goto exit3;
52241@@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
52242 if (error)
52243 goto exit4;
52244 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
52245+ if (!error && (saved_dev || saved_ino))
52246+ gr_handle_delete(saved_ino, saved_dev);
52247 exit4:
52248 mnt_drop_write(nd.path.mnt);
52249 exit3:
52250@@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52251 struct dentry *dentry;
52252 struct nameidata nd;
52253 struct inode *inode = NULL;
52254+ ino_t saved_ino = 0;
52255+ dev_t saved_dev = 0;
52256
52257 error = user_path_parent(dfd, pathname, &nd, &name);
52258 if (error)
52259@@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52260 if (nd.last.name[nd.last.len])
52261 goto slashes;
52262 inode = dentry->d_inode;
52263- if (inode)
52264+ if (inode) {
52265+ if (inode->i_nlink <= 1) {
52266+ saved_ino = inode->i_ino;
52267+ saved_dev = gr_get_dev_from_dentry(dentry);
52268+ }
52269+
52270 atomic_inc(&inode->i_count);
52271+
52272+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
52273+ error = -EACCES;
52274+ goto exit2;
52275+ }
52276+ }
52277 error = mnt_want_write(nd.path.mnt);
52278 if (error)
52279 goto exit2;
52280@@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52281 if (error)
52282 goto exit3;
52283 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
52284+ if (!error && (saved_ino || saved_dev))
52285+ gr_handle_delete(saved_ino, saved_dev);
52286 exit3:
52287 mnt_drop_write(nd.path.mnt);
52288 exit2:
52289@@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
52290 if (IS_ERR(dentry))
52291 goto out_unlock;
52292
52293+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
52294+ error = -EACCES;
52295+ goto out_dput;
52296+ }
52297+
52298 error = mnt_want_write(nd.path.mnt);
52299 if (error)
52300 goto out_dput;
52301@@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
52302 if (error)
52303 goto out_drop_write;
52304 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
52305+ if (!error)
52306+ gr_handle_create(dentry, nd.path.mnt);
52307 out_drop_write:
52308 mnt_drop_write(nd.path.mnt);
52309 out_dput:
52310@@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52311 error = PTR_ERR(new_dentry);
52312 if (IS_ERR(new_dentry))
52313 goto out_unlock;
52314+
52315+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
52316+ old_path.dentry->d_inode,
52317+ old_path.dentry->d_inode->i_mode, to)) {
52318+ error = -EACCES;
52319+ goto out_dput;
52320+ }
52321+
52322+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
52323+ old_path.dentry, old_path.mnt, to)) {
52324+ error = -EACCES;
52325+ goto out_dput;
52326+ }
52327+
52328 error = mnt_want_write(nd.path.mnt);
52329 if (error)
52330 goto out_dput;
52331@@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52332 if (error)
52333 goto out_drop_write;
52334 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
52335+ if (!error)
52336+ gr_handle_create(new_dentry, nd.path.mnt);
52337 out_drop_write:
52338 mnt_drop_write(nd.path.mnt);
52339 out_dput:
52340@@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52341 char *to;
52342 int error;
52343
52344+ pax_track_stack();
52345+
52346 error = user_path_parent(olddfd, oldname, &oldnd, &from);
52347 if (error)
52348 goto exit;
52349@@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52350 if (new_dentry == trap)
52351 goto exit5;
52352
52353+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
52354+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
52355+ to);
52356+ if (error)
52357+ goto exit5;
52358+
52359 error = mnt_want_write(oldnd.path.mnt);
52360 if (error)
52361 goto exit5;
52362@@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
52363 goto exit6;
52364 error = vfs_rename(old_dir->d_inode, old_dentry,
52365 new_dir->d_inode, new_dentry);
52366+ if (!error)
52367+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
52368+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
52369 exit6:
52370 mnt_drop_write(oldnd.path.mnt);
52371 exit5:
52372@@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
52373
52374 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
52375 {
52376+ char tmpbuf[64];
52377+ const char *newlink;
52378 int len;
52379
52380 len = PTR_ERR(link);
52381@@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
52382 len = strlen(link);
52383 if (len > (unsigned) buflen)
52384 len = buflen;
52385- if (copy_to_user(buffer, link, len))
52386+
52387+ if (len < sizeof(tmpbuf)) {
52388+ memcpy(tmpbuf, link, len);
52389+ newlink = tmpbuf;
52390+ } else
52391+ newlink = link;
52392+
52393+ if (copy_to_user(buffer, newlink, len))
52394 len = -EFAULT;
52395 out:
52396 return len;
52397diff --git a/fs/namespace.c b/fs/namespace.c
52398index 2beb0fb..11a95a5 100644
52399--- a/fs/namespace.c
52400+++ b/fs/namespace.c
52401@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52402 if (!(sb->s_flags & MS_RDONLY))
52403 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
52404 up_write(&sb->s_umount);
52405+
52406+ gr_log_remount(mnt->mnt_devname, retval);
52407+
52408 return retval;
52409 }
52410
52411@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
52412 security_sb_umount_busy(mnt);
52413 up_write(&namespace_sem);
52414 release_mounts(&umount_list);
52415+
52416+ gr_log_unmount(mnt->mnt_devname, retval);
52417+
52418 return retval;
52419 }
52420
52421@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52422 if (retval)
52423 goto dput_out;
52424
52425+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
52426+ retval = -EPERM;
52427+ goto dput_out;
52428+ }
52429+
52430+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
52431+ retval = -EPERM;
52432+ goto dput_out;
52433+ }
52434+
52435 if (flags & MS_REMOUNT)
52436 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
52437 data_page);
52438@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
52439 dev_name, data_page);
52440 dput_out:
52441 path_put(&path);
52442+
52443+ gr_log_mount(dev_name, dir_name, retval);
52444+
52445 return retval;
52446 }
52447
52448@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
52449 goto out1;
52450 }
52451
52452+ if (gr_handle_chroot_pivot()) {
52453+ error = -EPERM;
52454+ path_put(&old);
52455+ goto out1;
52456+ }
52457+
52458 read_lock(&current->fs->lock);
52459 root = current->fs->root;
52460 path_get(&current->fs->root);
52461diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
52462index b8b5b30..2bd9ccb 100644
52463--- a/fs/ncpfs/dir.c
52464+++ b/fs/ncpfs/dir.c
52465@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
52466 int res, val = 0, len;
52467 __u8 __name[NCP_MAXPATHLEN + 1];
52468
52469+ pax_track_stack();
52470+
52471 parent = dget_parent(dentry);
52472 dir = parent->d_inode;
52473
52474@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
52475 int error, res, len;
52476 __u8 __name[NCP_MAXPATHLEN + 1];
52477
52478+ pax_track_stack();
52479+
52480 lock_kernel();
52481 error = -EIO;
52482 if (!ncp_conn_valid(server))
52483@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
52484 int error, result, len;
52485 int opmode;
52486 __u8 __name[NCP_MAXPATHLEN + 1];
52487-
52488+
52489 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
52490 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
52491
52492+ pax_track_stack();
52493+
52494 error = -EIO;
52495 lock_kernel();
52496 if (!ncp_conn_valid(server))
52497@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52498 int error, len;
52499 __u8 __name[NCP_MAXPATHLEN + 1];
52500
52501+ pax_track_stack();
52502+
52503 DPRINTK("ncp_mkdir: making %s/%s\n",
52504 dentry->d_parent->d_name.name, dentry->d_name.name);
52505
52506@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
52507 if (!ncp_conn_valid(server))
52508 goto out;
52509
52510+ pax_track_stack();
52511+
52512 ncp_age_dentry(server, dentry);
52513 len = sizeof(__name);
52514 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
52515@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
52516 int old_len, new_len;
52517 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
52518
52519+ pax_track_stack();
52520+
52521 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
52522 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
52523 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
52524diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
52525index cf98da1..da890a9 100644
52526--- a/fs/ncpfs/inode.c
52527+++ b/fs/ncpfs/inode.c
52528@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
52529 #endif
52530 struct ncp_entry_info finfo;
52531
52532+ pax_track_stack();
52533+
52534 data.wdog_pid = NULL;
52535 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
52536 if (!server)
52537diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
52538index bfaef7b..e9d03ca 100644
52539--- a/fs/nfs/inode.c
52540+++ b/fs/nfs/inode.c
52541@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
52542 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
52543 nfsi->attrtimeo_timestamp = jiffies;
52544
52545- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
52546+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
52547 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
52548 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
52549 else
52550@@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
52551 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
52552 }
52553
52554-static atomic_long_t nfs_attr_generation_counter;
52555+static atomic_long_unchecked_t nfs_attr_generation_counter;
52556
52557 static unsigned long nfs_read_attr_generation_counter(void)
52558 {
52559- return atomic_long_read(&nfs_attr_generation_counter);
52560+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
52561 }
52562
52563 unsigned long nfs_inc_attr_generation_counter(void)
52564 {
52565- return atomic_long_inc_return(&nfs_attr_generation_counter);
52566+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
52567 }
52568
52569 void nfs_fattr_init(struct nfs_fattr *fattr)
52570diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
52571index cc2f505..f6a236f 100644
52572--- a/fs/nfsd/lockd.c
52573+++ b/fs/nfsd/lockd.c
52574@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
52575 fput(filp);
52576 }
52577
52578-static struct nlmsvc_binding nfsd_nlm_ops = {
52579+static const struct nlmsvc_binding nfsd_nlm_ops = {
52580 .fopen = nlm_fopen, /* open file for locking */
52581 .fclose = nlm_fclose, /* close file */
52582 };
52583diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
52584index cfc3391..dcc083a 100644
52585--- a/fs/nfsd/nfs4state.c
52586+++ b/fs/nfsd/nfs4state.c
52587@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
52588 unsigned int cmd;
52589 int err;
52590
52591+ pax_track_stack();
52592+
52593 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
52594 (long long) lock->lk_offset,
52595 (long long) lock->lk_length);
52596diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
52597index 4a82a96..0d5fb49 100644
52598--- a/fs/nfsd/nfs4xdr.c
52599+++ b/fs/nfsd/nfs4xdr.c
52600@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
52601 struct nfsd4_compoundres *resp = rqstp->rq_resp;
52602 u32 minorversion = resp->cstate.minorversion;
52603
52604+ pax_track_stack();
52605+
52606 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
52607 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
52608 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
52609diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
52610index 2e09588..596421d 100644
52611--- a/fs/nfsd/vfs.c
52612+++ b/fs/nfsd/vfs.c
52613@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52614 } else {
52615 oldfs = get_fs();
52616 set_fs(KERNEL_DS);
52617- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
52618+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
52619 set_fs(oldfs);
52620 }
52621
52622@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52623
52624 /* Write the data. */
52625 oldfs = get_fs(); set_fs(KERNEL_DS);
52626- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
52627+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
52628 set_fs(oldfs);
52629 if (host_err < 0)
52630 goto out_nfserr;
52631@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
52632 */
52633
52634 oldfs = get_fs(); set_fs(KERNEL_DS);
52635- host_err = inode->i_op->readlink(dentry, buf, *lenp);
52636+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
52637 set_fs(oldfs);
52638
52639 if (host_err < 0)
52640diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
52641index f6af760..d0adf34 100644
52642--- a/fs/nilfs2/ioctl.c
52643+++ b/fs/nilfs2/ioctl.c
52644@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52645 unsigned int cmd, void __user *argp)
52646 {
52647 struct nilfs_argv argv[5];
52648- const static size_t argsz[5] = {
52649+ static const size_t argsz[5] = {
52650 sizeof(struct nilfs_vdesc),
52651 sizeof(struct nilfs_period),
52652 sizeof(__u64),
52653@@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52654 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
52655 goto out_free;
52656
52657+ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
52658+ goto out_free;
52659+
52660 len = argv[n].v_size * argv[n].v_nmembs;
52661 base = (void __user *)(unsigned long)argv[n].v_base;
52662 if (len == 0) {
52663diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
52664index 7e54e52..9337248 100644
52665--- a/fs/notify/dnotify/dnotify.c
52666+++ b/fs/notify/dnotify/dnotify.c
52667@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
52668 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
52669 }
52670
52671-static struct fsnotify_ops dnotify_fsnotify_ops = {
52672+static const struct fsnotify_ops dnotify_fsnotify_ops = {
52673 .handle_event = dnotify_handle_event,
52674 .should_send_event = dnotify_should_send_event,
52675 .free_group_priv = NULL,
52676diff --git a/fs/notify/notification.c b/fs/notify/notification.c
52677index b8bf53b..c518688 100644
52678--- a/fs/notify/notification.c
52679+++ b/fs/notify/notification.c
52680@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
52681 * get set to 0 so it will never get 'freed'
52682 */
52683 static struct fsnotify_event q_overflow_event;
52684-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52685+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52686
52687 /**
52688 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
52689@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52690 */
52691 u32 fsnotify_get_cookie(void)
52692 {
52693- return atomic_inc_return(&fsnotify_sync_cookie);
52694+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
52695 }
52696 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
52697
52698diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
52699index 5a9e344..0f8cd28 100644
52700--- a/fs/ntfs/dir.c
52701+++ b/fs/ntfs/dir.c
52702@@ -1328,7 +1328,7 @@ find_next_index_buffer:
52703 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
52704 ~(s64)(ndir->itype.index.block_size - 1)));
52705 /* Bounds checks. */
52706- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52707+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52708 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
52709 "inode 0x%lx or driver bug.", vdir->i_ino);
52710 goto err_out;
52711diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
52712index 663c0e3..b6868e9 100644
52713--- a/fs/ntfs/file.c
52714+++ b/fs/ntfs/file.c
52715@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
52716 #endif /* NTFS_RW */
52717 };
52718
52719-const struct file_operations ntfs_empty_file_ops = {};
52720+const struct file_operations ntfs_empty_file_ops __read_only;
52721
52722-const struct inode_operations ntfs_empty_inode_ops = {};
52723+const struct inode_operations ntfs_empty_inode_ops __read_only;
52724diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
52725index 1cd2934..880b5d2 100644
52726--- a/fs/ocfs2/cluster/masklog.c
52727+++ b/fs/ocfs2/cluster/masklog.c
52728@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
52729 return mlog_mask_store(mlog_attr->mask, buf, count);
52730 }
52731
52732-static struct sysfs_ops mlog_attr_ops = {
52733+static const struct sysfs_ops mlog_attr_ops = {
52734 .show = mlog_show,
52735 .store = mlog_store,
52736 };
52737diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
52738index ac10f83..2cd2607 100644
52739--- a/fs/ocfs2/localalloc.c
52740+++ b/fs/ocfs2/localalloc.c
52741@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
52742 goto bail;
52743 }
52744
52745- atomic_inc(&osb->alloc_stats.moves);
52746+ atomic_inc_unchecked(&osb->alloc_stats.moves);
52747
52748 status = 0;
52749 bail:
52750diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
52751index f010b22..9f9ed34 100644
52752--- a/fs/ocfs2/namei.c
52753+++ b/fs/ocfs2/namei.c
52754@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
52755 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
52756 struct ocfs2_dir_lookup_result target_insert = { NULL, };
52757
52758+ pax_track_stack();
52759+
52760 /* At some point it might be nice to break this function up a
52761 * bit. */
52762
52763diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
52764index d963d86..914cfbd 100644
52765--- a/fs/ocfs2/ocfs2.h
52766+++ b/fs/ocfs2/ocfs2.h
52767@@ -217,11 +217,11 @@ enum ocfs2_vol_state
52768
52769 struct ocfs2_alloc_stats
52770 {
52771- atomic_t moves;
52772- atomic_t local_data;
52773- atomic_t bitmap_data;
52774- atomic_t bg_allocs;
52775- atomic_t bg_extends;
52776+ atomic_unchecked_t moves;
52777+ atomic_unchecked_t local_data;
52778+ atomic_unchecked_t bitmap_data;
52779+ atomic_unchecked_t bg_allocs;
52780+ atomic_unchecked_t bg_extends;
52781 };
52782
52783 enum ocfs2_local_alloc_state
52784diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
52785index 79b5dac..d322952 100644
52786--- a/fs/ocfs2/suballoc.c
52787+++ b/fs/ocfs2/suballoc.c
52788@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
52789 mlog_errno(status);
52790 goto bail;
52791 }
52792- atomic_inc(&osb->alloc_stats.bg_extends);
52793+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
52794
52795 /* You should never ask for this much metadata */
52796 BUG_ON(bits_wanted >
52797@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
52798 mlog_errno(status);
52799 goto bail;
52800 }
52801- atomic_inc(&osb->alloc_stats.bg_allocs);
52802+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52803
52804 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
52805 ac->ac_bits_given += (*num_bits);
52806@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
52807 mlog_errno(status);
52808 goto bail;
52809 }
52810- atomic_inc(&osb->alloc_stats.bg_allocs);
52811+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52812
52813 BUG_ON(num_bits != 1);
52814
52815@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52816 cluster_start,
52817 num_clusters);
52818 if (!status)
52819- atomic_inc(&osb->alloc_stats.local_data);
52820+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
52821 } else {
52822 if (min_clusters > (osb->bitmap_cpg - 1)) {
52823 /* The only paths asking for contiguousness
52824@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52825 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
52826 bg_blkno,
52827 bg_bit_off);
52828- atomic_inc(&osb->alloc_stats.bitmap_data);
52829+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
52830 }
52831 }
52832 if (status < 0) {
52833diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
52834index 9f55be4..a3f8048 100644
52835--- a/fs/ocfs2/super.c
52836+++ b/fs/ocfs2/super.c
52837@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
52838 "%10s => GlobalAllocs: %d LocalAllocs: %d "
52839 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
52840 "Stats",
52841- atomic_read(&osb->alloc_stats.bitmap_data),
52842- atomic_read(&osb->alloc_stats.local_data),
52843- atomic_read(&osb->alloc_stats.bg_allocs),
52844- atomic_read(&osb->alloc_stats.moves),
52845- atomic_read(&osb->alloc_stats.bg_extends));
52846+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
52847+ atomic_read_unchecked(&osb->alloc_stats.local_data),
52848+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
52849+ atomic_read_unchecked(&osb->alloc_stats.moves),
52850+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
52851
52852 out += snprintf(buf + out, len - out,
52853 "%10s => State: %u Descriptor: %llu Size: %u bits "
52854@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
52855 spin_lock_init(&osb->osb_xattr_lock);
52856 ocfs2_init_inode_steal_slot(osb);
52857
52858- atomic_set(&osb->alloc_stats.moves, 0);
52859- atomic_set(&osb->alloc_stats.local_data, 0);
52860- atomic_set(&osb->alloc_stats.bitmap_data, 0);
52861- atomic_set(&osb->alloc_stats.bg_allocs, 0);
52862- atomic_set(&osb->alloc_stats.bg_extends, 0);
52863+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
52864+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
52865+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
52866+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
52867+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
52868
52869 /* Copy the blockcheck stats from the superblock probe */
52870 osb->osb_ecc_stats = *stats;
52871diff --git a/fs/open.c b/fs/open.c
52872index 4f01e06..2a8057a 100644
52873--- a/fs/open.c
52874+++ b/fs/open.c
52875@@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
52876 error = locks_verify_truncate(inode, NULL, length);
52877 if (!error)
52878 error = security_path_truncate(&path, length, 0);
52879+
52880+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
52881+ error = -EACCES;
52882+
52883 if (!error) {
52884 vfs_dq_init(inode);
52885 error = do_truncate(path.dentry, length, 0, NULL);
52886@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
52887 if (__mnt_is_readonly(path.mnt))
52888 res = -EROFS;
52889
52890+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
52891+ res = -EACCES;
52892+
52893 out_path_release:
52894 path_put(&path);
52895 out:
52896@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
52897 if (error)
52898 goto dput_and_out;
52899
52900+ gr_log_chdir(path.dentry, path.mnt);
52901+
52902 set_fs_pwd(current->fs, &path);
52903
52904 dput_and_out:
52905@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
52906 goto out_putf;
52907
52908 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
52909+
52910+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
52911+ error = -EPERM;
52912+
52913+ if (!error)
52914+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
52915+
52916 if (!error)
52917 set_fs_pwd(current->fs, &file->f_path);
52918 out_putf:
52919@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
52920 if (!capable(CAP_SYS_CHROOT))
52921 goto dput_and_out;
52922
52923+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
52924+ goto dput_and_out;
52925+
52926 set_fs_root(current->fs, &path);
52927+
52928+ gr_handle_chroot_chdir(&path);
52929+
52930 error = 0;
52931 dput_and_out:
52932 path_put(&path);
52933@@ -596,66 +618,57 @@ out:
52934 return error;
52935 }
52936
52937+static int chmod_common(struct path *path, umode_t mode)
52938+{
52939+ struct inode *inode = path->dentry->d_inode;
52940+ struct iattr newattrs;
52941+ int error;
52942+
52943+ error = mnt_want_write(path->mnt);
52944+ if (error)
52945+ return error;
52946+ mutex_lock(&inode->i_mutex);
52947+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
52948+ error = -EACCES;
52949+ goto out_unlock;
52950+ }
52951+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
52952+ error = -EPERM;
52953+ goto out_unlock;
52954+ }
52955+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52956+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52957+ error = notify_change(path->dentry, &newattrs);
52958+out_unlock:
52959+ mutex_unlock(&inode->i_mutex);
52960+ mnt_drop_write(path->mnt);
52961+ return error;
52962+}
52963+
52964 SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
52965 {
52966- struct inode * inode;
52967- struct dentry * dentry;
52968 struct file * file;
52969 int err = -EBADF;
52970- struct iattr newattrs;
52971
52972 file = fget(fd);
52973- if (!file)
52974- goto out;
52975-
52976- dentry = file->f_path.dentry;
52977- inode = dentry->d_inode;
52978-
52979- audit_inode(NULL, dentry);
52980-
52981- err = mnt_want_write_file(file);
52982- if (err)
52983- goto out_putf;
52984- mutex_lock(&inode->i_mutex);
52985- if (mode == (mode_t) -1)
52986- mode = inode->i_mode;
52987- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52988- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52989- err = notify_change(dentry, &newattrs);
52990- mutex_unlock(&inode->i_mutex);
52991- mnt_drop_write(file->f_path.mnt);
52992-out_putf:
52993- fput(file);
52994-out:
52995+ if (file) {
52996+ audit_inode(NULL, file->f_path.dentry);
52997+ err = chmod_common(&file->f_path, mode);
52998+ fput(file);
52999+ }
53000 return err;
53001 }
53002
53003 SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
53004 {
53005 struct path path;
53006- struct inode *inode;
53007 int error;
53008- struct iattr newattrs;
53009
53010 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
53011- if (error)
53012- goto out;
53013- inode = path.dentry->d_inode;
53014-
53015- error = mnt_want_write(path.mnt);
53016- if (error)
53017- goto dput_and_out;
53018- mutex_lock(&inode->i_mutex);
53019- if (mode == (mode_t) -1)
53020- mode = inode->i_mode;
53021- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
53022- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
53023- error = notify_change(path.dentry, &newattrs);
53024- mutex_unlock(&inode->i_mutex);
53025- mnt_drop_write(path.mnt);
53026-dput_and_out:
53027- path_put(&path);
53028-out:
53029+ if (!error) {
53030+ error = chmod_common(&path, mode);
53031+ path_put(&path);
53032+ }
53033 return error;
53034 }
53035
53036@@ -664,12 +677,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
53037 return sys_fchmodat(AT_FDCWD, filename, mode);
53038 }
53039
53040-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
53041+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
53042 {
53043 struct inode *inode = dentry->d_inode;
53044 int error;
53045 struct iattr newattrs;
53046
53047+ if (!gr_acl_handle_chown(dentry, mnt))
53048+ return -EACCES;
53049+
53050 newattrs.ia_valid = ATTR_CTIME;
53051 if (user != (uid_t) -1) {
53052 newattrs.ia_valid |= ATTR_UID;
53053@@ -700,7 +716,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
53054 error = mnt_want_write(path.mnt);
53055 if (error)
53056 goto out_release;
53057- error = chown_common(path.dentry, user, group);
53058+ error = chown_common(path.dentry, user, group, path.mnt);
53059 mnt_drop_write(path.mnt);
53060 out_release:
53061 path_put(&path);
53062@@ -725,7 +741,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
53063 error = mnt_want_write(path.mnt);
53064 if (error)
53065 goto out_release;
53066- error = chown_common(path.dentry, user, group);
53067+ error = chown_common(path.dentry, user, group, path.mnt);
53068 mnt_drop_write(path.mnt);
53069 out_release:
53070 path_put(&path);
53071@@ -744,7 +760,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
53072 error = mnt_want_write(path.mnt);
53073 if (error)
53074 goto out_release;
53075- error = chown_common(path.dentry, user, group);
53076+ error = chown_common(path.dentry, user, group, path.mnt);
53077 mnt_drop_write(path.mnt);
53078 out_release:
53079 path_put(&path);
53080@@ -767,7 +783,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
53081 goto out_fput;
53082 dentry = file->f_path.dentry;
53083 audit_inode(NULL, dentry);
53084- error = chown_common(dentry, user, group);
53085+ error = chown_common(dentry, user, group, file->f_path.mnt);
53086 mnt_drop_write(file->f_path.mnt);
53087 out_fput:
53088 fput(file);
53089@@ -1036,7 +1052,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
53090 if (!IS_ERR(tmp)) {
53091 fd = get_unused_fd_flags(flags);
53092 if (fd >= 0) {
53093- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
53094+ struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
53095 if (IS_ERR(f)) {
53096 put_unused_fd(fd);
53097 fd = PTR_ERR(f);
53098diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
53099index 6ab70f4..f4103d1 100644
53100--- a/fs/partitions/efi.c
53101+++ b/fs/partitions/efi.c
53102@@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
53103 if (!bdev || !gpt)
53104 return NULL;
53105
53106+ if (!le32_to_cpu(gpt->num_partition_entries))
53107+ return NULL;
53108+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
53109+ if (!pte)
53110+ return NULL;
53111+
53112 count = le32_to_cpu(gpt->num_partition_entries) *
53113 le32_to_cpu(gpt->sizeof_partition_entry);
53114- if (!count)
53115- return NULL;
53116- pte = kzalloc(count, GFP_KERNEL);
53117- if (!pte)
53118- return NULL;
53119-
53120 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
53121 (u8 *) pte,
53122 count) < count) {
53123diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
53124index dd6efdb..3babc6c 100644
53125--- a/fs/partitions/ldm.c
53126+++ b/fs/partitions/ldm.c
53127@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
53128 ldm_error ("A VBLK claims to have %d parts.", num);
53129 return false;
53130 }
53131+
53132 if (rec >= num) {
53133 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
53134 return false;
53135@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
53136 goto found;
53137 }
53138
53139- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
53140+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
53141 if (!f) {
53142 ldm_crit ("Out of memory.");
53143 return false;
53144diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
53145index 5765198..7f8e9e0 100644
53146--- a/fs/partitions/mac.c
53147+++ b/fs/partitions/mac.c
53148@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
53149 return 0; /* not a MacOS disk */
53150 }
53151 blocks_in_map = be32_to_cpu(part->map_count);
53152- if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
53153- put_dev_sector(sect);
53154- return 0;
53155- }
53156 printk(" [mac]");
53157+ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
53158+ put_dev_sector(sect);
53159+ return 0;
53160+ }
53161 for (slot = 1; slot <= blocks_in_map; ++slot) {
53162 int pos = slot * secsize;
53163 put_dev_sector(sect);
53164diff --git a/fs/pipe.c b/fs/pipe.c
53165index d0cc080..8a6f211 100644
53166--- a/fs/pipe.c
53167+++ b/fs/pipe.c
53168@@ -401,9 +401,9 @@ redo:
53169 }
53170 if (bufs) /* More to do? */
53171 continue;
53172- if (!pipe->writers)
53173+ if (!atomic_read(&pipe->writers))
53174 break;
53175- if (!pipe->waiting_writers) {
53176+ if (!atomic_read(&pipe->waiting_writers)) {
53177 /* syscall merging: Usually we must not sleep
53178 * if O_NONBLOCK is set, or if we got some data.
53179 * But if a writer sleeps in kernel space, then
53180@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
53181 mutex_lock(&inode->i_mutex);
53182 pipe = inode->i_pipe;
53183
53184- if (!pipe->readers) {
53185+ if (!atomic_read(&pipe->readers)) {
53186 send_sig(SIGPIPE, current, 0);
53187 ret = -EPIPE;
53188 goto out;
53189@@ -511,7 +511,7 @@ redo1:
53190 for (;;) {
53191 int bufs;
53192
53193- if (!pipe->readers) {
53194+ if (!atomic_read(&pipe->readers)) {
53195 send_sig(SIGPIPE, current, 0);
53196 if (!ret)
53197 ret = -EPIPE;
53198@@ -597,9 +597,9 @@ redo2:
53199 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
53200 do_wakeup = 0;
53201 }
53202- pipe->waiting_writers++;
53203+ atomic_inc(&pipe->waiting_writers);
53204 pipe_wait(pipe);
53205- pipe->waiting_writers--;
53206+ atomic_dec(&pipe->waiting_writers);
53207 }
53208 out:
53209 mutex_unlock(&inode->i_mutex);
53210@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53211 mask = 0;
53212 if (filp->f_mode & FMODE_READ) {
53213 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
53214- if (!pipe->writers && filp->f_version != pipe->w_counter)
53215+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
53216 mask |= POLLHUP;
53217 }
53218
53219@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53220 * Most Unices do not set POLLERR for FIFOs but on Linux they
53221 * behave exactly like pipes for poll().
53222 */
53223- if (!pipe->readers)
53224+ if (!atomic_read(&pipe->readers))
53225 mask |= POLLERR;
53226 }
53227
53228@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
53229
53230 mutex_lock(&inode->i_mutex);
53231 pipe = inode->i_pipe;
53232- pipe->readers -= decr;
53233- pipe->writers -= decw;
53234+ atomic_sub(decr, &pipe->readers);
53235+ atomic_sub(decw, &pipe->writers);
53236
53237- if (!pipe->readers && !pipe->writers) {
53238+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
53239 free_pipe_info(inode);
53240 } else {
53241 wake_up_interruptible_sync(&pipe->wait);
53242@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
53243
53244 if (inode->i_pipe) {
53245 ret = 0;
53246- inode->i_pipe->readers++;
53247+ atomic_inc(&inode->i_pipe->readers);
53248 }
53249
53250 mutex_unlock(&inode->i_mutex);
53251@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
53252
53253 if (inode->i_pipe) {
53254 ret = 0;
53255- inode->i_pipe->writers++;
53256+ atomic_inc(&inode->i_pipe->writers);
53257 }
53258
53259 mutex_unlock(&inode->i_mutex);
53260@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
53261 if (inode->i_pipe) {
53262 ret = 0;
53263 if (filp->f_mode & FMODE_READ)
53264- inode->i_pipe->readers++;
53265+ atomic_inc(&inode->i_pipe->readers);
53266 if (filp->f_mode & FMODE_WRITE)
53267- inode->i_pipe->writers++;
53268+ atomic_inc(&inode->i_pipe->writers);
53269 }
53270
53271 mutex_unlock(&inode->i_mutex);
53272@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
53273 inode->i_pipe = NULL;
53274 }
53275
53276-static struct vfsmount *pipe_mnt __read_mostly;
53277+struct vfsmount *pipe_mnt __read_mostly;
53278 static int pipefs_delete_dentry(struct dentry *dentry)
53279 {
53280 /*
53281@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
53282 goto fail_iput;
53283 inode->i_pipe = pipe;
53284
53285- pipe->readers = pipe->writers = 1;
53286+ atomic_set(&pipe->readers, 1);
53287+ atomic_set(&pipe->writers, 1);
53288 inode->i_fop = &rdwr_pipefifo_fops;
53289
53290 /*
53291diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
53292index 50f8f06..c5755df 100644
53293--- a/fs/proc/Kconfig
53294+++ b/fs/proc/Kconfig
53295@@ -30,12 +30,12 @@ config PROC_FS
53296
53297 config PROC_KCORE
53298 bool "/proc/kcore support" if !ARM
53299- depends on PROC_FS && MMU
53300+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
53301
53302 config PROC_VMCORE
53303 bool "/proc/vmcore support (EXPERIMENTAL)"
53304- depends on PROC_FS && CRASH_DUMP
53305- default y
53306+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
53307+ default n
53308 help
53309 Exports the dump image of crashed kernel in ELF format.
53310
53311@@ -59,8 +59,8 @@ config PROC_SYSCTL
53312 limited in memory.
53313
53314 config PROC_PAGE_MONITOR
53315- default y
53316- depends on PROC_FS && MMU
53317+ default n
53318+ depends on PROC_FS && MMU && !GRKERNSEC
53319 bool "Enable /proc page monitoring" if EMBEDDED
53320 help
53321 Various /proc files exist to monitor process memory utilization:
53322diff --git a/fs/proc/array.c b/fs/proc/array.c
53323index c5ef152..28c94f7 100644
53324--- a/fs/proc/array.c
53325+++ b/fs/proc/array.c
53326@@ -60,6 +60,7 @@
53327 #include <linux/tty.h>
53328 #include <linux/string.h>
53329 #include <linux/mman.h>
53330+#include <linux/grsecurity.h>
53331 #include <linux/proc_fs.h>
53332 #include <linux/ioport.h>
53333 #include <linux/uaccess.h>
53334@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
53335 p->nivcsw);
53336 }
53337
53338+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53339+static inline void task_pax(struct seq_file *m, struct task_struct *p)
53340+{
53341+ if (p->mm)
53342+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
53343+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
53344+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
53345+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
53346+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
53347+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
53348+ else
53349+ seq_printf(m, "PaX:\t-----\n");
53350+}
53351+#endif
53352+
53353 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53354 struct pid *pid, struct task_struct *task)
53355 {
53356@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53357 task_cap(m, task);
53358 cpuset_task_status_allowed(m, task);
53359 task_context_switch_counts(m, task);
53360+
53361+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53362+ task_pax(m, task);
53363+#endif
53364+
53365+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
53366+ task_grsec_rbac(m, task);
53367+#endif
53368+
53369 return 0;
53370 }
53371
53372+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53373+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53374+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53375+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53376+#endif
53377+
53378 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53379 struct pid *pid, struct task_struct *task, int whole)
53380 {
53381@@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53382 cputime_t cutime, cstime, utime, stime;
53383 cputime_t cgtime, gtime;
53384 unsigned long rsslim = 0;
53385- char tcomm[sizeof(task->comm)];
53386+ char tcomm[sizeof(task->comm)] = { 0 };
53387 unsigned long flags;
53388
53389+ pax_track_stack();
53390+
53391+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53392+ if (current->exec_id != m->exec_id) {
53393+ gr_log_badprocpid("stat");
53394+ return 0;
53395+ }
53396+#endif
53397+
53398 state = *get_task_state(task);
53399 vsize = eip = esp = 0;
53400 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
53401@@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53402 gtime = task_gtime(task);
53403 }
53404
53405+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53406+ if (PAX_RAND_FLAGS(mm)) {
53407+ eip = 0;
53408+ esp = 0;
53409+ wchan = 0;
53410+ }
53411+#endif
53412+#ifdef CONFIG_GRKERNSEC_HIDESYM
53413+ wchan = 0;
53414+ eip =0;
53415+ esp =0;
53416+#endif
53417+
53418 /* scale priority and nice values from timeslices to -20..20 */
53419 /* to make it look like a "normal" Unix priority/nice value */
53420 priority = task_prio(task);
53421@@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53422 vsize,
53423 mm ? get_mm_rss(mm) : 0,
53424 rsslim,
53425+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53426+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
53427+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
53428+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
53429+#else
53430 mm ? (permitted ? mm->start_code : 1) : 0,
53431 mm ? (permitted ? mm->end_code : 1) : 0,
53432 (permitted && mm) ? mm->start_stack : 0,
53433+#endif
53434 esp,
53435 eip,
53436 /* The signal information here is obsolete.
53437@@ -517,8 +576,16 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53438 struct pid *pid, struct task_struct *task)
53439 {
53440 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
53441- struct mm_struct *mm = get_task_mm(task);
53442+ struct mm_struct *mm;
53443
53444+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53445+ if (current->exec_id != m->exec_id) {
53446+ gr_log_badprocpid("statm");
53447+ return 0;
53448+ }
53449+#endif
53450+
53451+ mm = get_task_mm(task);
53452 if (mm) {
53453 size = task_statm(mm, &shared, &text, &data, &resident);
53454 mmput(mm);
53455@@ -528,3 +595,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53456
53457 return 0;
53458 }
53459+
53460+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53461+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
53462+{
53463+ u32 curr_ip = 0;
53464+ unsigned long flags;
53465+
53466+ if (lock_task_sighand(task, &flags)) {
53467+ curr_ip = task->signal->curr_ip;
53468+ unlock_task_sighand(task, &flags);
53469+ }
53470+
53471+ return sprintf(buffer, "%pI4\n", &curr_ip);
53472+}
53473+#endif
53474diff --git a/fs/proc/base.c b/fs/proc/base.c
53475index 67f7dc0..a86ad9a 100644
53476--- a/fs/proc/base.c
53477+++ b/fs/proc/base.c
53478@@ -102,6 +102,22 @@ struct pid_entry {
53479 union proc_op op;
53480 };
53481
53482+struct getdents_callback {
53483+ struct linux_dirent __user * current_dir;
53484+ struct linux_dirent __user * previous;
53485+ struct file * file;
53486+ int count;
53487+ int error;
53488+};
53489+
53490+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
53491+ loff_t offset, u64 ino, unsigned int d_type)
53492+{
53493+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
53494+ buf->error = -EINVAL;
53495+ return 0;
53496+}
53497+
53498 #define NOD(NAME, MODE, IOP, FOP, OP) { \
53499 .name = (NAME), \
53500 .len = sizeof(NAME) - 1, \
53501@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
53502 if (task == current)
53503 return 0;
53504
53505+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
53506+ return -EPERM;
53507+
53508 /*
53509 * If current is actively ptrace'ing, and would also be
53510 * permitted to freshly attach with ptrace now, permit it.
53511@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
53512 if (!mm->arg_end)
53513 goto out_mm; /* Shh! No looking before we're done */
53514
53515+ if (gr_acl_handle_procpidmem(task))
53516+ goto out_mm;
53517+
53518 len = mm->arg_end - mm->arg_start;
53519
53520 if (len > PAGE_SIZE)
53521@@ -287,12 +309,28 @@ out:
53522 return res;
53523 }
53524
53525+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53526+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53527+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53528+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53529+#endif
53530+
53531 static int proc_pid_auxv(struct task_struct *task, char *buffer)
53532 {
53533 int res = 0;
53534 struct mm_struct *mm = get_task_mm(task);
53535 if (mm) {
53536 unsigned int nwords = 0;
53537+
53538+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53539+ /* allow if we're currently ptracing this task */
53540+ if (PAX_RAND_FLAGS(mm) &&
53541+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
53542+ mmput(mm);
53543+ return 0;
53544+ }
53545+#endif
53546+
53547 do {
53548 nwords += 2;
53549 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
53550@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
53551 }
53552
53553
53554-#ifdef CONFIG_KALLSYMS
53555+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53556 /*
53557 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
53558 * Returns the resolved symbol. If that fails, simply return the address.
53559@@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
53560 mutex_unlock(&task->cred_guard_mutex);
53561 }
53562
53563-#ifdef CONFIG_STACKTRACE
53564+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53565
53566 #define MAX_STACK_TRACE_DEPTH 64
53567
53568@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
53569 return count;
53570 }
53571
53572-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53573+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53574 static int proc_pid_syscall(struct task_struct *task, char *buffer)
53575 {
53576 long nr;
53577@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
53578 /************************************************************************/
53579
53580 /* permission checks */
53581-static int proc_fd_access_allowed(struct inode *inode)
53582+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
53583 {
53584 struct task_struct *task;
53585 int allowed = 0;
53586@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
53587 */
53588 task = get_proc_task(inode);
53589 if (task) {
53590- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53591+ if (log)
53592+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
53593+ else
53594+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53595 put_task_struct(task);
53596 }
53597 return allowed;
53598@@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = {
53599 static int mem_open(struct inode* inode, struct file* file)
53600 {
53601 file->private_data = (void*)((long)current->self_exec_id);
53602+
53603+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53604+ file->f_version = current->exec_id;
53605+#endif
53606+
53607 return 0;
53608 }
53609
53610+static int task_dumpable(struct task_struct *task);
53611+
53612 static ssize_t mem_read(struct file * file, char __user * buf,
53613 size_t count, loff_t *ppos)
53614 {
53615@@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
53616 int ret = -ESRCH;
53617 struct mm_struct *mm;
53618
53619+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53620+ if (file->f_version != current->exec_id) {
53621+ gr_log_badprocpid("mem");
53622+ return 0;
53623+ }
53624+#endif
53625+
53626 if (!task)
53627 goto out_no_task;
53628
53629@@ -963,6 +1018,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
53630 if (!task)
53631 goto out_no_task;
53632
53633+ if (gr_acl_handle_procpidmem(task))
53634+ goto out;
53635+
53636 if (!ptrace_may_access(task, PTRACE_MODE_READ))
53637 goto out;
53638
53639@@ -1377,7 +1435,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
53640 path_put(&nd->path);
53641
53642 /* Are we allowed to snoop on the tasks file descriptors? */
53643- if (!proc_fd_access_allowed(inode))
53644+ if (!proc_fd_access_allowed(inode,0))
53645 goto out;
53646
53647 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
53648@@ -1417,8 +1475,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
53649 struct path path;
53650
53651 /* Are we allowed to snoop on the tasks file descriptors? */
53652- if (!proc_fd_access_allowed(inode))
53653- goto out;
53654+ /* logging this is needed for learning on chromium to work properly,
53655+ but we don't want to flood the logs from 'ps' which does a readlink
53656+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
53657+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
53658+ */
53659+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
53660+ if (!proc_fd_access_allowed(inode,0))
53661+ goto out;
53662+ } else {
53663+ if (!proc_fd_access_allowed(inode,1))
53664+ goto out;
53665+ }
53666
53667 error = PROC_I(inode)->op.proc_get_link(inode, &path);
53668 if (error)
53669@@ -1483,7 +1551,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
53670 rcu_read_lock();
53671 cred = __task_cred(task);
53672 inode->i_uid = cred->euid;
53673+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53674+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53675+#else
53676 inode->i_gid = cred->egid;
53677+#endif
53678 rcu_read_unlock();
53679 }
53680 security_task_to_inode(task, inode);
53681@@ -1501,6 +1573,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53682 struct inode *inode = dentry->d_inode;
53683 struct task_struct *task;
53684 const struct cred *cred;
53685+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53686+ const struct cred *tmpcred = current_cred();
53687+#endif
53688
53689 generic_fillattr(inode, stat);
53690
53691@@ -1508,13 +1583,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53692 stat->uid = 0;
53693 stat->gid = 0;
53694 task = pid_task(proc_pid(inode), PIDTYPE_PID);
53695+
53696+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
53697+ rcu_read_unlock();
53698+ return -ENOENT;
53699+ }
53700+
53701 if (task) {
53702+ cred = __task_cred(task);
53703+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53704+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
53705+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53706+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53707+#endif
53708+ ) {
53709+#endif
53710 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53711+#ifdef CONFIG_GRKERNSEC_PROC_USER
53712+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53713+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53714+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53715+#endif
53716 task_dumpable(task)) {
53717- cred = __task_cred(task);
53718 stat->uid = cred->euid;
53719+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53720+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
53721+#else
53722 stat->gid = cred->egid;
53723+#endif
53724 }
53725+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53726+ } else {
53727+ rcu_read_unlock();
53728+ return -ENOENT;
53729+ }
53730+#endif
53731 }
53732 rcu_read_unlock();
53733 return 0;
53734@@ -1545,11 +1648,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
53735
53736 if (task) {
53737 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53738+#ifdef CONFIG_GRKERNSEC_PROC_USER
53739+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53740+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53741+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53742+#endif
53743 task_dumpable(task)) {
53744 rcu_read_lock();
53745 cred = __task_cred(task);
53746 inode->i_uid = cred->euid;
53747+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53748+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53749+#else
53750 inode->i_gid = cred->egid;
53751+#endif
53752 rcu_read_unlock();
53753 } else {
53754 inode->i_uid = 0;
53755@@ -1670,7 +1782,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
53756 int fd = proc_fd(inode);
53757
53758 if (task) {
53759- files = get_files_struct(task);
53760+ if (!gr_acl_handle_procpidmem(task))
53761+ files = get_files_struct(task);
53762 put_task_struct(task);
53763 }
53764 if (files) {
53765@@ -1922,12 +2035,22 @@ static const struct file_operations proc_fd_operations = {
53766 static int proc_fd_permission(struct inode *inode, int mask)
53767 {
53768 int rv;
53769+ struct task_struct *task;
53770
53771 rv = generic_permission(inode, mask, NULL);
53772- if (rv == 0)
53773- return 0;
53774+
53775 if (task_pid(current) == proc_pid(inode))
53776 rv = 0;
53777+
53778+ task = get_proc_task(inode);
53779+ if (task == NULL)
53780+ return rv;
53781+
53782+ if (gr_acl_handle_procpidmem(task))
53783+ rv = -EACCES;
53784+
53785+ put_task_struct(task);
53786+
53787 return rv;
53788 }
53789
53790@@ -2036,6 +2159,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
53791 if (!task)
53792 goto out_no_task;
53793
53794+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53795+ goto out;
53796+
53797 /*
53798 * Yes, it does not scale. And it should not. Don't add
53799 * new entries into /proc/<tgid>/ without very good reasons.
53800@@ -2080,6 +2206,9 @@ static int proc_pident_readdir(struct file *filp,
53801 if (!task)
53802 goto out_no_task;
53803
53804+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53805+ goto out;
53806+
53807 ret = 0;
53808 i = filp->f_pos;
53809 switch (i) {
53810@@ -2347,7 +2476,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
53811 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
53812 void *cookie)
53813 {
53814- char *s = nd_get_link(nd);
53815+ const char *s = nd_get_link(nd);
53816 if (!IS_ERR(s))
53817 __putname(s);
53818 }
53819@@ -2553,7 +2682,7 @@ static const struct pid_entry tgid_base_stuff[] = {
53820 #ifdef CONFIG_SCHED_DEBUG
53821 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53822 #endif
53823-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53824+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53825 INF("syscall", S_IRUGO, proc_pid_syscall),
53826 #endif
53827 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53828@@ -2578,10 +2707,10 @@ static const struct pid_entry tgid_base_stuff[] = {
53829 #ifdef CONFIG_SECURITY
53830 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53831 #endif
53832-#ifdef CONFIG_KALLSYMS
53833+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53834 INF("wchan", S_IRUGO, proc_pid_wchan),
53835 #endif
53836-#ifdef CONFIG_STACKTRACE
53837+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53838 ONE("stack", S_IRUGO, proc_pid_stack),
53839 #endif
53840 #ifdef CONFIG_SCHEDSTATS
53841@@ -2611,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
53842 #ifdef CONFIG_TASK_IO_ACCOUNTING
53843 INF("io", S_IRUSR, proc_tgid_io_accounting),
53844 #endif
53845+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53846+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
53847+#endif
53848 };
53849
53850 static int proc_tgid_base_readdir(struct file * filp,
53851@@ -2735,7 +2867,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
53852 if (!inode)
53853 goto out;
53854
53855+#ifdef CONFIG_GRKERNSEC_PROC_USER
53856+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
53857+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53858+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53859+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
53860+#else
53861 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
53862+#endif
53863 inode->i_op = &proc_tgid_base_inode_operations;
53864 inode->i_fop = &proc_tgid_base_operations;
53865 inode->i_flags|=S_IMMUTABLE;
53866@@ -2777,7 +2916,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
53867 if (!task)
53868 goto out;
53869
53870+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53871+ goto out_put_task;
53872+
53873 result = proc_pid_instantiate(dir, dentry, task, NULL);
53874+out_put_task:
53875 put_task_struct(task);
53876 out:
53877 return result;
53878@@ -2842,6 +2985,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53879 {
53880 unsigned int nr;
53881 struct task_struct *reaper;
53882+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53883+ const struct cred *tmpcred = current_cred();
53884+ const struct cred *itercred;
53885+#endif
53886+ filldir_t __filldir = filldir;
53887 struct tgid_iter iter;
53888 struct pid_namespace *ns;
53889
53890@@ -2865,8 +3013,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53891 for (iter = next_tgid(ns, iter);
53892 iter.task;
53893 iter.tgid += 1, iter = next_tgid(ns, iter)) {
53894+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53895+ rcu_read_lock();
53896+ itercred = __task_cred(iter.task);
53897+#endif
53898+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
53899+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53900+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
53901+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53902+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53903+#endif
53904+ )
53905+#endif
53906+ )
53907+ __filldir = &gr_fake_filldir;
53908+ else
53909+ __filldir = filldir;
53910+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53911+ rcu_read_unlock();
53912+#endif
53913 filp->f_pos = iter.tgid + TGID_OFFSET;
53914- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
53915+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
53916 put_task_struct(iter.task);
53917 goto out;
53918 }
53919@@ -2892,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
53920 #ifdef CONFIG_SCHED_DEBUG
53921 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53922 #endif
53923-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53924+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53925 INF("syscall", S_IRUGO, proc_pid_syscall),
53926 #endif
53927 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53928@@ -2916,10 +3083,10 @@ static const struct pid_entry tid_base_stuff[] = {
53929 #ifdef CONFIG_SECURITY
53930 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53931 #endif
53932-#ifdef CONFIG_KALLSYMS
53933+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53934 INF("wchan", S_IRUGO, proc_pid_wchan),
53935 #endif
53936-#ifdef CONFIG_STACKTRACE
53937+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53938 ONE("stack", S_IRUGO, proc_pid_stack),
53939 #endif
53940 #ifdef CONFIG_SCHEDSTATS
53941diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
53942index 82676e3..5f8518a 100644
53943--- a/fs/proc/cmdline.c
53944+++ b/fs/proc/cmdline.c
53945@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
53946
53947 static int __init proc_cmdline_init(void)
53948 {
53949+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53950+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
53951+#else
53952 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
53953+#endif
53954 return 0;
53955 }
53956 module_init(proc_cmdline_init);
53957diff --git a/fs/proc/devices.c b/fs/proc/devices.c
53958index 59ee7da..469b4b6 100644
53959--- a/fs/proc/devices.c
53960+++ b/fs/proc/devices.c
53961@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
53962
53963 static int __init proc_devices_init(void)
53964 {
53965+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53966+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
53967+#else
53968 proc_create("devices", 0, NULL, &proc_devinfo_operations);
53969+#endif
53970 return 0;
53971 }
53972 module_init(proc_devices_init);
53973diff --git a/fs/proc/inode.c b/fs/proc/inode.c
53974index d78ade3..81767f9 100644
53975--- a/fs/proc/inode.c
53976+++ b/fs/proc/inode.c
53977@@ -18,12 +18,19 @@
53978 #include <linux/module.h>
53979 #include <linux/smp_lock.h>
53980 #include <linux/sysctl.h>
53981+#include <linux/grsecurity.h>
53982
53983 #include <asm/system.h>
53984 #include <asm/uaccess.h>
53985
53986 #include "internal.h"
53987
53988+#ifdef CONFIG_PROC_SYSCTL
53989+extern const struct inode_operations proc_sys_inode_operations;
53990+extern const struct inode_operations proc_sys_dir_operations;
53991+#endif
53992+
53993+
53994 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
53995 {
53996 atomic_inc(&de->count);
53997@@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
53998 de_put(de);
53999 if (PROC_I(inode)->sysctl)
54000 sysctl_head_put(PROC_I(inode)->sysctl);
54001+
54002+#ifdef CONFIG_PROC_SYSCTL
54003+ if (inode->i_op == &proc_sys_inode_operations ||
54004+ inode->i_op == &proc_sys_dir_operations)
54005+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
54006+#endif
54007+
54008 clear_inode(inode);
54009 }
54010
54011@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
54012 if (de->mode) {
54013 inode->i_mode = de->mode;
54014 inode->i_uid = de->uid;
54015+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54016+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
54017+#else
54018 inode->i_gid = de->gid;
54019+#endif
54020 }
54021 if (de->size)
54022 inode->i_size = de->size;
54023diff --git a/fs/proc/internal.h b/fs/proc/internal.h
54024index 753ca37..26bcf3b 100644
54025--- a/fs/proc/internal.h
54026+++ b/fs/proc/internal.h
54027@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54028 struct pid *pid, struct task_struct *task);
54029 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54030 struct pid *pid, struct task_struct *task);
54031+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54032+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
54033+#endif
54034 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
54035
54036 extern const struct file_operations proc_maps_operations;
54037diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
54038index b442dac..aab29cb 100644
54039--- a/fs/proc/kcore.c
54040+++ b/fs/proc/kcore.c
54041@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
54042 off_t offset = 0;
54043 struct kcore_list *m;
54044
54045+ pax_track_stack();
54046+
54047 /* setup ELF header */
54048 elf = (struct elfhdr *) bufp;
54049 bufp += sizeof(struct elfhdr);
54050@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54051 * the addresses in the elf_phdr on our list.
54052 */
54053 start = kc_offset_to_vaddr(*fpos - elf_buflen);
54054- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
54055+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
54056+ if (tsz > buflen)
54057 tsz = buflen;
54058-
54059+
54060 while (buflen) {
54061 struct kcore_list *m;
54062
54063@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54064 kfree(elf_buf);
54065 } else {
54066 if (kern_addr_valid(start)) {
54067- unsigned long n;
54068+ char *elf_buf;
54069+ mm_segment_t oldfs;
54070
54071- n = copy_to_user(buffer, (char *)start, tsz);
54072- /*
54073- * We cannot distingush between fault on source
54074- * and fault on destination. When this happens
54075- * we clear too and hope it will trigger the
54076- * EFAULT again.
54077- */
54078- if (n) {
54079- if (clear_user(buffer + tsz - n,
54080- n))
54081+ elf_buf = kmalloc(tsz, GFP_KERNEL);
54082+ if (!elf_buf)
54083+ return -ENOMEM;
54084+ oldfs = get_fs();
54085+ set_fs(KERNEL_DS);
54086+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
54087+ set_fs(oldfs);
54088+ if (copy_to_user(buffer, elf_buf, tsz)) {
54089+ kfree(elf_buf);
54090 return -EFAULT;
54091+ }
54092 }
54093+ set_fs(oldfs);
54094+ kfree(elf_buf);
54095 } else {
54096 if (clear_user(buffer, tsz))
54097 return -EFAULT;
54098@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54099
54100 static int open_kcore(struct inode *inode, struct file *filp)
54101 {
54102+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
54103+ return -EPERM;
54104+#endif
54105 if (!capable(CAP_SYS_RAWIO))
54106 return -EPERM;
54107 if (kcore_need_update)
54108diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
54109index 7ca7834..cfe90a4 100644
54110--- a/fs/proc/kmsg.c
54111+++ b/fs/proc/kmsg.c
54112@@ -12,37 +12,37 @@
54113 #include <linux/poll.h>
54114 #include <linux/proc_fs.h>
54115 #include <linux/fs.h>
54116+#include <linux/syslog.h>
54117
54118 #include <asm/uaccess.h>
54119 #include <asm/io.h>
54120
54121 extern wait_queue_head_t log_wait;
54122
54123-extern int do_syslog(int type, char __user *bug, int count);
54124-
54125 static int kmsg_open(struct inode * inode, struct file * file)
54126 {
54127- return do_syslog(1,NULL,0);
54128+ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
54129 }
54130
54131 static int kmsg_release(struct inode * inode, struct file * file)
54132 {
54133- (void) do_syslog(0,NULL,0);
54134+ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
54135 return 0;
54136 }
54137
54138 static ssize_t kmsg_read(struct file *file, char __user *buf,
54139 size_t count, loff_t *ppos)
54140 {
54141- if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
54142+ if ((file->f_flags & O_NONBLOCK) &&
54143+ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
54144 return -EAGAIN;
54145- return do_syslog(2, buf, count);
54146+ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
54147 }
54148
54149 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
54150 {
54151 poll_wait(file, &log_wait, wait);
54152- if (do_syslog(9, NULL, 0))
54153+ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
54154 return POLLIN | POLLRDNORM;
54155 return 0;
54156 }
54157diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
54158index a65239c..ad1182a 100644
54159--- a/fs/proc/meminfo.c
54160+++ b/fs/proc/meminfo.c
54161@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
54162 unsigned long pages[NR_LRU_LISTS];
54163 int lru;
54164
54165+ pax_track_stack();
54166+
54167 /*
54168 * display in kilobytes.
54169 */
54170@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
54171 vmi.used >> 10,
54172 vmi.largest_chunk >> 10
54173 #ifdef CONFIG_MEMORY_FAILURE
54174- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
54175+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
54176 #endif
54177 );
54178
54179diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
54180index 9fe7d7e..cdb62c9 100644
54181--- a/fs/proc/nommu.c
54182+++ b/fs/proc/nommu.c
54183@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
54184 if (len < 1)
54185 len = 1;
54186 seq_printf(m, "%*c", len, ' ');
54187- seq_path(m, &file->f_path, "");
54188+ seq_path(m, &file->f_path, "\n\\");
54189 }
54190
54191 seq_putc(m, '\n');
54192diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
54193index 04d1270..25e1173 100644
54194--- a/fs/proc/proc_net.c
54195+++ b/fs/proc/proc_net.c
54196@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
54197 struct task_struct *task;
54198 struct nsproxy *ns;
54199 struct net *net = NULL;
54200+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54201+ const struct cred *cred = current_cred();
54202+#endif
54203+
54204+#ifdef CONFIG_GRKERNSEC_PROC_USER
54205+ if (cred->fsuid)
54206+ return net;
54207+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54208+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
54209+ return net;
54210+#endif
54211
54212 rcu_read_lock();
54213 task = pid_task(proc_pid(dir), PIDTYPE_PID);
54214diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
54215index f667e8a..55f4d96 100644
54216--- a/fs/proc/proc_sysctl.c
54217+++ b/fs/proc/proc_sysctl.c
54218@@ -7,11 +7,13 @@
54219 #include <linux/security.h>
54220 #include "internal.h"
54221
54222+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
54223+
54224 static const struct dentry_operations proc_sys_dentry_operations;
54225 static const struct file_operations proc_sys_file_operations;
54226-static const struct inode_operations proc_sys_inode_operations;
54227+const struct inode_operations proc_sys_inode_operations;
54228 static const struct file_operations proc_sys_dir_file_operations;
54229-static const struct inode_operations proc_sys_dir_operations;
54230+const struct inode_operations proc_sys_dir_operations;
54231
54232 static struct inode *proc_sys_make_inode(struct super_block *sb,
54233 struct ctl_table_header *head, struct ctl_table *table)
54234@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
54235 if (!p)
54236 goto out;
54237
54238+ if (gr_handle_sysctl(p, MAY_EXEC))
54239+ goto out;
54240+
54241 err = ERR_PTR(-ENOMEM);
54242 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
54243 if (h)
54244@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
54245
54246 err = NULL;
54247 dentry->d_op = &proc_sys_dentry_operations;
54248+
54249+ gr_handle_proc_create(dentry, inode);
54250+
54251 d_add(dentry, inode);
54252
54253 out:
54254@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
54255 return -ENOMEM;
54256 } else {
54257 child->d_op = &proc_sys_dentry_operations;
54258+
54259+ gr_handle_proc_create(child, inode);
54260+
54261 d_add(child, inode);
54262 }
54263 } else {
54264@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
54265 if (*pos < file->f_pos)
54266 continue;
54267
54268+ if (gr_handle_sysctl(table, 0))
54269+ continue;
54270+
54271 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
54272 if (res)
54273 return res;
54274@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
54275 if (IS_ERR(head))
54276 return PTR_ERR(head);
54277
54278+ if (table && gr_handle_sysctl(table, MAY_EXEC))
54279+ return -ENOENT;
54280+
54281 generic_fillattr(inode, stat);
54282 if (table)
54283 stat->mode = (stat->mode & S_IFMT) | table->mode;
54284@@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
54285 };
54286
54287 static const struct file_operations proc_sys_dir_file_operations = {
54288+ .read = generic_read_dir,
54289 .readdir = proc_sys_readdir,
54290 .llseek = generic_file_llseek,
54291 };
54292
54293-static const struct inode_operations proc_sys_inode_operations = {
54294+const struct inode_operations proc_sys_inode_operations = {
54295 .permission = proc_sys_permission,
54296 .setattr = proc_sys_setattr,
54297 .getattr = proc_sys_getattr,
54298 };
54299
54300-static const struct inode_operations proc_sys_dir_operations = {
54301+const struct inode_operations proc_sys_dir_operations = {
54302 .lookup = proc_sys_lookup,
54303 .permission = proc_sys_permission,
54304 .setattr = proc_sys_setattr,
54305diff --git a/fs/proc/root.c b/fs/proc/root.c
54306index b080b79..d957e63 100644
54307--- a/fs/proc/root.c
54308+++ b/fs/proc/root.c
54309@@ -134,7 +134,15 @@ void __init proc_root_init(void)
54310 #ifdef CONFIG_PROC_DEVICETREE
54311 proc_device_tree_init();
54312 #endif
54313+#ifdef CONFIG_GRKERNSEC_PROC_ADD
54314+#ifdef CONFIG_GRKERNSEC_PROC_USER
54315+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
54316+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54317+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
54318+#endif
54319+#else
54320 proc_mkdir("bus", NULL);
54321+#endif
54322 proc_sys_init();
54323 }
54324
54325diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
54326index 3b7b82a..4b420b0 100644
54327--- a/fs/proc/task_mmu.c
54328+++ b/fs/proc/task_mmu.c
54329@@ -8,6 +8,7 @@
54330 #include <linux/mempolicy.h>
54331 #include <linux/swap.h>
54332 #include <linux/swapops.h>
54333+#include <linux/grsecurity.h>
54334
54335 #include <asm/elf.h>
54336 #include <asm/uaccess.h>
54337@@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54338 "VmStk:\t%8lu kB\n"
54339 "VmExe:\t%8lu kB\n"
54340 "VmLib:\t%8lu kB\n"
54341- "VmPTE:\t%8lu kB\n",
54342- hiwater_vm << (PAGE_SHIFT-10),
54343+ "VmPTE:\t%8lu kB\n"
54344+
54345+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54346+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
54347+#endif
54348+
54349+ ,hiwater_vm << (PAGE_SHIFT-10),
54350 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
54351 mm->locked_vm << (PAGE_SHIFT-10),
54352 hiwater_rss << (PAGE_SHIFT-10),
54353 total_rss << (PAGE_SHIFT-10),
54354 data << (PAGE_SHIFT-10),
54355 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54356- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
54357+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
54358+
54359+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54360+ , mm->context.user_cs_base, mm->context.user_cs_limit
54361+#endif
54362+
54363+ );
54364 }
54365
54366 unsigned long task_vsize(struct mm_struct *mm)
54367@@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
54368 struct proc_maps_private *priv = m->private;
54369 struct vm_area_struct *vma = v;
54370
54371- vma_stop(priv, vma);
54372+ if (!IS_ERR(vma))
54373+ vma_stop(priv, vma);
54374 if (priv->task)
54375 put_task_struct(priv->task);
54376 }
54377@@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
54378 return ret;
54379 }
54380
54381+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54382+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54383+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
54384+ _mm->pax_flags & MF_PAX_SEGMEXEC))
54385+#endif
54386+
54387 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54388 {
54389 struct mm_struct *mm = vma->vm_mm;
54390@@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54391 int flags = vma->vm_flags;
54392 unsigned long ino = 0;
54393 unsigned long long pgoff = 0;
54394- unsigned long start;
54395 dev_t dev = 0;
54396 int len;
54397
54398@@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54399 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
54400 }
54401
54402- /* We don't show the stack guard page in /proc/maps */
54403- start = vma->vm_start;
54404- if (vma->vm_flags & VM_GROWSDOWN)
54405- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
54406- start += PAGE_SIZE;
54407-
54408 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
54409- start,
54410+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54411+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
54412+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
54413+#else
54414+ vma->vm_start,
54415 vma->vm_end,
54416+#endif
54417 flags & VM_READ ? 'r' : '-',
54418 flags & VM_WRITE ? 'w' : '-',
54419 flags & VM_EXEC ? 'x' : '-',
54420 flags & VM_MAYSHARE ? 's' : 'p',
54421+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54422+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
54423+#else
54424 pgoff,
54425+#endif
54426 MAJOR(dev), MINOR(dev), ino, &len);
54427
54428 /*
54429@@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54430 */
54431 if (file) {
54432 pad_len_spaces(m, len);
54433- seq_path(m, &file->f_path, "\n");
54434+ seq_path(m, &file->f_path, "\n\\");
54435 } else {
54436 const char *name = arch_vma_name(vma);
54437 if (!name) {
54438@@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
54439 if (vma->vm_start <= mm->brk &&
54440 vma->vm_end >= mm->start_brk) {
54441 name = "[heap]";
54442- } else if (vma->vm_start <= mm->start_stack &&
54443- vma->vm_end >= mm->start_stack) {
54444+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
54445+ (vma->vm_start <= mm->start_stack &&
54446+ vma->vm_end >= mm->start_stack)) {
54447 name = "[stack]";
54448 }
54449 } else {
54450@@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
54451 struct proc_maps_private *priv = m->private;
54452 struct task_struct *task = priv->task;
54453
54454+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54455+ if (current->exec_id != m->exec_id) {
54456+ gr_log_badprocpid("maps");
54457+ return 0;
54458+ }
54459+#endif
54460+
54461 show_map_vma(m, vma);
54462
54463 if (m->count < m->size) /* vma is copied successfully */
54464@@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
54465 .private = &mss,
54466 };
54467
54468+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54469+ if (current->exec_id != m->exec_id) {
54470+ gr_log_badprocpid("smaps");
54471+ return 0;
54472+ }
54473+#endif
54474 memset(&mss, 0, sizeof mss);
54475- mss.vma = vma;
54476- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54477- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54478+
54479+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54480+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
54481+#endif
54482+ mss.vma = vma;
54483+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54484+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54485+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54486+ }
54487+#endif
54488
54489 show_map_vma(m, vma);
54490
54491@@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
54492 "Swap: %8lu kB\n"
54493 "KernelPageSize: %8lu kB\n"
54494 "MMUPageSize: %8lu kB\n",
54495+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54496+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
54497+#else
54498 (vma->vm_end - vma->vm_start) >> 10,
54499+#endif
54500 mss.resident >> 10,
54501 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
54502 mss.shared_clean >> 10,
54503diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
54504index 8f5c05d..c99c76d 100644
54505--- a/fs/proc/task_nommu.c
54506+++ b/fs/proc/task_nommu.c
54507@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54508 else
54509 bytes += kobjsize(mm);
54510
54511- if (current->fs && current->fs->users > 1)
54512+ if (current->fs && atomic_read(&current->fs->users) > 1)
54513 sbytes += kobjsize(current->fs);
54514 else
54515 bytes += kobjsize(current->fs);
54516@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
54517 if (len < 1)
54518 len = 1;
54519 seq_printf(m, "%*c", len, ' ');
54520- seq_path(m, &file->f_path, "");
54521+ seq_path(m, &file->f_path, "\n\\");
54522 }
54523
54524 seq_putc(m, '\n');
54525diff --git a/fs/readdir.c b/fs/readdir.c
54526index 7723401..30059a6 100644
54527--- a/fs/readdir.c
54528+++ b/fs/readdir.c
54529@@ -16,6 +16,7 @@
54530 #include <linux/security.h>
54531 #include <linux/syscalls.h>
54532 #include <linux/unistd.h>
54533+#include <linux/namei.h>
54534
54535 #include <asm/uaccess.h>
54536
54537@@ -67,6 +68,7 @@ struct old_linux_dirent {
54538
54539 struct readdir_callback {
54540 struct old_linux_dirent __user * dirent;
54541+ struct file * file;
54542 int result;
54543 };
54544
54545@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
54546 buf->result = -EOVERFLOW;
54547 return -EOVERFLOW;
54548 }
54549+
54550+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54551+ return 0;
54552+
54553 buf->result++;
54554 dirent = buf->dirent;
54555 if (!access_ok(VERIFY_WRITE, dirent,
54556@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
54557
54558 buf.result = 0;
54559 buf.dirent = dirent;
54560+ buf.file = file;
54561
54562 error = vfs_readdir(file, fillonedir, &buf);
54563 if (buf.result)
54564@@ -142,6 +149,7 @@ struct linux_dirent {
54565 struct getdents_callback {
54566 struct linux_dirent __user * current_dir;
54567 struct linux_dirent __user * previous;
54568+ struct file * file;
54569 int count;
54570 int error;
54571 };
54572@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
54573 buf->error = -EOVERFLOW;
54574 return -EOVERFLOW;
54575 }
54576+
54577+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54578+ return 0;
54579+
54580 dirent = buf->previous;
54581 if (dirent) {
54582 if (__put_user(offset, &dirent->d_off))
54583@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
54584 buf.previous = NULL;
54585 buf.count = count;
54586 buf.error = 0;
54587+ buf.file = file;
54588
54589 error = vfs_readdir(file, filldir, &buf);
54590 if (error >= 0)
54591@@ -228,6 +241,7 @@ out:
54592 struct getdents_callback64 {
54593 struct linux_dirent64 __user * current_dir;
54594 struct linux_dirent64 __user * previous;
54595+ struct file *file;
54596 int count;
54597 int error;
54598 };
54599@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
54600 buf->error = -EINVAL; /* only used if we fail.. */
54601 if (reclen > buf->count)
54602 return -EINVAL;
54603+
54604+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54605+ return 0;
54606+
54607 dirent = buf->previous;
54608 if (dirent) {
54609 if (__put_user(offset, &dirent->d_off))
54610@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54611
54612 buf.current_dir = dirent;
54613 buf.previous = NULL;
54614+ buf.file = file;
54615 buf.count = count;
54616 buf.error = 0;
54617
54618@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54619 error = buf.error;
54620 lastdirent = buf.previous;
54621 if (lastdirent) {
54622- typeof(lastdirent->d_off) d_off = file->f_pos;
54623+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
54624 if (__put_user(d_off, &lastdirent->d_off))
54625 error = -EFAULT;
54626 else
54627diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
54628index d42c30c..4fd8718 100644
54629--- a/fs/reiserfs/dir.c
54630+++ b/fs/reiserfs/dir.c
54631@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
54632 struct reiserfs_dir_entry de;
54633 int ret = 0;
54634
54635+ pax_track_stack();
54636+
54637 reiserfs_write_lock(inode->i_sb);
54638
54639 reiserfs_check_lock_depth(inode->i_sb, "readdir");
54640diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
54641index 128d3f7..8840d44 100644
54642--- a/fs/reiserfs/do_balan.c
54643+++ b/fs/reiserfs/do_balan.c
54644@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
54645 return;
54646 }
54647
54648- atomic_inc(&(fs_generation(tb->tb_sb)));
54649+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
54650 do_balance_starts(tb);
54651
54652 /* balance leaf returns 0 except if combining L R and S into
54653diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
54654index 72cb1cc..d0e3181 100644
54655--- a/fs/reiserfs/item_ops.c
54656+++ b/fs/reiserfs/item_ops.c
54657@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
54658 vi->vi_index, vi->vi_type, vi->vi_ih);
54659 }
54660
54661-static struct item_operations stat_data_ops = {
54662+static const struct item_operations stat_data_ops = {
54663 .bytes_number = sd_bytes_number,
54664 .decrement_key = sd_decrement_key,
54665 .is_left_mergeable = sd_is_left_mergeable,
54666@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
54667 vi->vi_index, vi->vi_type, vi->vi_ih);
54668 }
54669
54670-static struct item_operations direct_ops = {
54671+static const struct item_operations direct_ops = {
54672 .bytes_number = direct_bytes_number,
54673 .decrement_key = direct_decrement_key,
54674 .is_left_mergeable = direct_is_left_mergeable,
54675@@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
54676 vi->vi_index, vi->vi_type, vi->vi_ih);
54677 }
54678
54679-static struct item_operations indirect_ops = {
54680+static const struct item_operations indirect_ops = {
54681 .bytes_number = indirect_bytes_number,
54682 .decrement_key = indirect_decrement_key,
54683 .is_left_mergeable = indirect_is_left_mergeable,
54684@@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
54685 printk("\n");
54686 }
54687
54688-static struct item_operations direntry_ops = {
54689+static const struct item_operations direntry_ops = {
54690 .bytes_number = direntry_bytes_number,
54691 .decrement_key = direntry_decrement_key,
54692 .is_left_mergeable = direntry_is_left_mergeable,
54693@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
54694 "Invalid item type observed, run fsck ASAP");
54695 }
54696
54697-static struct item_operations errcatch_ops = {
54698+static const struct item_operations errcatch_ops = {
54699 errcatch_bytes_number,
54700 errcatch_decrement_key,
54701 errcatch_is_left_mergeable,
54702@@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
54703 #error Item types must use disk-format assigned values.
54704 #endif
54705
54706-struct item_operations *item_ops[TYPE_ANY + 1] = {
54707+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
54708 &stat_data_ops,
54709 &indirect_ops,
54710 &direct_ops,
54711diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
54712index b5fe0aa..e0e25c4 100644
54713--- a/fs/reiserfs/journal.c
54714+++ b/fs/reiserfs/journal.c
54715@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
54716 struct buffer_head *bh;
54717 int i, j;
54718
54719+ pax_track_stack();
54720+
54721 bh = __getblk(dev, block, bufsize);
54722 if (buffer_uptodate(bh))
54723 return (bh);
54724diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
54725index 2715791..b8996db 100644
54726--- a/fs/reiserfs/namei.c
54727+++ b/fs/reiserfs/namei.c
54728@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
54729 unsigned long savelink = 1;
54730 struct timespec ctime;
54731
54732+ pax_track_stack();
54733+
54734 /* three balancings: (1) old name removal, (2) new name insertion
54735 and (3) maybe "save" link insertion
54736 stat data updates: (1) old directory,
54737diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
54738index 9229e55..3d2e3b7 100644
54739--- a/fs/reiserfs/procfs.c
54740+++ b/fs/reiserfs/procfs.c
54741@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
54742 "SMALL_TAILS " : "NO_TAILS ",
54743 replay_only(sb) ? "REPLAY_ONLY " : "",
54744 convert_reiserfs(sb) ? "CONV " : "",
54745- atomic_read(&r->s_generation_counter),
54746+ atomic_read_unchecked(&r->s_generation_counter),
54747 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
54748 SF(s_do_balance), SF(s_unneeded_left_neighbor),
54749 SF(s_good_search_by_key_reada), SF(s_bmaps),
54750@@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
54751 struct journal_params *jp = &rs->s_v1.s_journal;
54752 char b[BDEVNAME_SIZE];
54753
54754+ pax_track_stack();
54755+
54756 seq_printf(m, /* on-disk fields */
54757 "jp_journal_1st_block: \t%i\n"
54758 "jp_journal_dev: \t%s[%x]\n"
54759diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
54760index d036ee5..4c7dca1 100644
54761--- a/fs/reiserfs/stree.c
54762+++ b/fs/reiserfs/stree.c
54763@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
54764 int iter = 0;
54765 #endif
54766
54767+ pax_track_stack();
54768+
54769 BUG_ON(!th->t_trans_id);
54770
54771 init_tb_struct(th, &s_del_balance, sb, path,
54772@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
54773 int retval;
54774 int quota_cut_bytes = 0;
54775
54776+ pax_track_stack();
54777+
54778 BUG_ON(!th->t_trans_id);
54779
54780 le_key2cpu_key(&cpu_key, key);
54781@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
54782 int quota_cut_bytes;
54783 loff_t tail_pos = 0;
54784
54785+ pax_track_stack();
54786+
54787 BUG_ON(!th->t_trans_id);
54788
54789 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
54790@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
54791 int retval;
54792 int fs_gen;
54793
54794+ pax_track_stack();
54795+
54796 BUG_ON(!th->t_trans_id);
54797
54798 fs_gen = get_generation(inode->i_sb);
54799@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
54800 int fs_gen = 0;
54801 int quota_bytes = 0;
54802
54803+ pax_track_stack();
54804+
54805 BUG_ON(!th->t_trans_id);
54806
54807 if (inode) { /* Do we count quotas for item? */
54808diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
54809index 7cb1285..c726cd0 100644
54810--- a/fs/reiserfs/super.c
54811+++ b/fs/reiserfs/super.c
54812@@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
54813 {.option_name = NULL}
54814 };
54815
54816+ pax_track_stack();
54817+
54818 *blocks = 0;
54819 if (!options || !*options)
54820 /* use default configuration: create tails, journaling on, no
54821diff --git a/fs/select.c b/fs/select.c
54822index fd38ce2..f5381b8 100644
54823--- a/fs/select.c
54824+++ b/fs/select.c
54825@@ -20,6 +20,7 @@
54826 #include <linux/module.h>
54827 #include <linux/slab.h>
54828 #include <linux/poll.h>
54829+#include <linux/security.h>
54830 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
54831 #include <linux/file.h>
54832 #include <linux/fdtable.h>
54833@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
54834 int retval, i, timed_out = 0;
54835 unsigned long slack = 0;
54836
54837+ pax_track_stack();
54838+
54839 rcu_read_lock();
54840 retval = max_select_fd(n, fds);
54841 rcu_read_unlock();
54842@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
54843 /* Allocate small arguments on the stack to save memory and be faster */
54844 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
54845
54846+ pax_track_stack();
54847+
54848 ret = -EINVAL;
54849 if (n < 0)
54850 goto out_nofds;
54851@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
54852 struct poll_list *walk = head;
54853 unsigned long todo = nfds;
54854
54855+ pax_track_stack();
54856+
54857+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
54858 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
54859 return -EINVAL;
54860
54861diff --git a/fs/seq_file.c b/fs/seq_file.c
54862index eae7d9d..4ddabe2 100644
54863--- a/fs/seq_file.c
54864+++ b/fs/seq_file.c
54865@@ -9,6 +9,7 @@
54866 #include <linux/module.h>
54867 #include <linux/seq_file.h>
54868 #include <linux/slab.h>
54869+#include <linux/sched.h>
54870
54871 #include <asm/uaccess.h>
54872 #include <asm/page.h>
54873@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
54874 memset(p, 0, sizeof(*p));
54875 mutex_init(&p->lock);
54876 p->op = op;
54877+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54878+ p->exec_id = current->exec_id;
54879+#endif
54880
54881 /*
54882 * Wrappers around seq_open(e.g. swaps_open) need to be
54883@@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54884 return 0;
54885 }
54886 if (!m->buf) {
54887- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54888+ m->size = PAGE_SIZE;
54889+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54890 if (!m->buf)
54891 return -ENOMEM;
54892 }
54893@@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54894 Eoverflow:
54895 m->op->stop(m, p);
54896 kfree(m->buf);
54897- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54898+ m->size <<= 1;
54899+ m->buf = kmalloc(m->size, GFP_KERNEL);
54900 return !m->buf ? -ENOMEM : -EAGAIN;
54901 }
54902
54903@@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54904 m->version = file->f_version;
54905 /* grab buffer if we didn't have one */
54906 if (!m->buf) {
54907- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54908+ m->size = PAGE_SIZE;
54909+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54910 if (!m->buf)
54911 goto Enomem;
54912 }
54913@@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54914 goto Fill;
54915 m->op->stop(m, p);
54916 kfree(m->buf);
54917- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54918+ m->size <<= 1;
54919+ m->buf = kmalloc(m->size, GFP_KERNEL);
54920 if (!m->buf)
54921 goto Enomem;
54922 m->count = 0;
54923@@ -551,7 +559,7 @@ static void single_stop(struct seq_file *p, void *v)
54924 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
54925 void *data)
54926 {
54927- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
54928+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
54929 int res = -ENOMEM;
54930
54931 if (op) {
54932diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
54933index 71c29b6..54694dd 100644
54934--- a/fs/smbfs/proc.c
54935+++ b/fs/smbfs/proc.c
54936@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
54937
54938 out:
54939 if (server->local_nls != NULL && server->remote_nls != NULL)
54940- server->ops->convert = convert_cp;
54941+ *(void **)&server->ops->convert = convert_cp;
54942 else
54943- server->ops->convert = convert_memcpy;
54944+ *(void **)&server->ops->convert = convert_memcpy;
54945
54946 smb_unlock_server(server);
54947 return n;
54948@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
54949
54950 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
54951 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
54952- server->ops->getattr = smb_proc_getattr_core;
54953+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
54954 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
54955- server->ops->getattr = smb_proc_getattr_ff;
54956+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
54957 }
54958
54959 /* Decode server capabilities */
54960@@ -3439,7 +3439,7 @@ out:
54961 static void
54962 install_ops(struct smb_ops *dst, struct smb_ops *src)
54963 {
54964- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54965+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54966 }
54967
54968 /* < LANMAN2 */
54969diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
54970index 00b2909..2ace383 100644
54971--- a/fs/smbfs/symlink.c
54972+++ b/fs/smbfs/symlink.c
54973@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
54974
54975 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54976 {
54977- char *s = nd_get_link(nd);
54978+ const char *s = nd_get_link(nd);
54979 if (!IS_ERR(s))
54980 __putname(s);
54981 }
54982diff --git a/fs/splice.c b/fs/splice.c
54983index bb92b7c..5aa72b0 100644
54984--- a/fs/splice.c
54985+++ b/fs/splice.c
54986@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54987 pipe_lock(pipe);
54988
54989 for (;;) {
54990- if (!pipe->readers) {
54991+ if (!atomic_read(&pipe->readers)) {
54992 send_sig(SIGPIPE, current, 0);
54993 if (!ret)
54994 ret = -EPIPE;
54995@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54996 do_wakeup = 0;
54997 }
54998
54999- pipe->waiting_writers++;
55000+ atomic_inc(&pipe->waiting_writers);
55001 pipe_wait(pipe);
55002- pipe->waiting_writers--;
55003+ atomic_dec(&pipe->waiting_writers);
55004 }
55005
55006 pipe_unlock(pipe);
55007@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
55008 .spd_release = spd_release_page,
55009 };
55010
55011+ pax_track_stack();
55012+
55013 index = *ppos >> PAGE_CACHE_SHIFT;
55014 loff = *ppos & ~PAGE_CACHE_MASK;
55015 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
55016@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
55017 old_fs = get_fs();
55018 set_fs(get_ds());
55019 /* The cast to a user pointer is valid due to the set_fs() */
55020- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
55021+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
55022 set_fs(old_fs);
55023
55024 return res;
55025@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
55026 old_fs = get_fs();
55027 set_fs(get_ds());
55028 /* The cast to a user pointer is valid due to the set_fs() */
55029- res = vfs_write(file, (const char __user *)buf, count, &pos);
55030+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
55031 set_fs(old_fs);
55032
55033 return res;
55034@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
55035 .spd_release = spd_release_page,
55036 };
55037
55038+ pax_track_stack();
55039+
55040 index = *ppos >> PAGE_CACHE_SHIFT;
55041 offset = *ppos & ~PAGE_CACHE_MASK;
55042 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
55043@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
55044 goto err;
55045
55046 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
55047- vec[i].iov_base = (void __user *) page_address(page);
55048+ vec[i].iov_base = (__force void __user *) page_address(page);
55049 vec[i].iov_len = this_len;
55050 pages[i] = page;
55051 spd.nr_pages++;
55052@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
55053 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
55054 {
55055 while (!pipe->nrbufs) {
55056- if (!pipe->writers)
55057+ if (!atomic_read(&pipe->writers))
55058 return 0;
55059
55060- if (!pipe->waiting_writers && sd->num_spliced)
55061+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
55062 return 0;
55063
55064 if (sd->flags & SPLICE_F_NONBLOCK)
55065@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
55066 * out of the pipe right after the splice_to_pipe(). So set
55067 * PIPE_READERS appropriately.
55068 */
55069- pipe->readers = 1;
55070+ atomic_set(&pipe->readers, 1);
55071
55072 current->splice_pipe = pipe;
55073 }
55074@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
55075 .spd_release = spd_release_page,
55076 };
55077
55078+ pax_track_stack();
55079+
55080 pipe = pipe_info(file->f_path.dentry->d_inode);
55081 if (!pipe)
55082 return -EBADF;
55083@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55084 ret = -ERESTARTSYS;
55085 break;
55086 }
55087- if (!pipe->writers)
55088+ if (!atomic_read(&pipe->writers))
55089 break;
55090- if (!pipe->waiting_writers) {
55091+ if (!atomic_read(&pipe->waiting_writers)) {
55092 if (flags & SPLICE_F_NONBLOCK) {
55093 ret = -EAGAIN;
55094 break;
55095@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55096 pipe_lock(pipe);
55097
55098 while (pipe->nrbufs >= PIPE_BUFFERS) {
55099- if (!pipe->readers) {
55100+ if (!atomic_read(&pipe->readers)) {
55101 send_sig(SIGPIPE, current, 0);
55102 ret = -EPIPE;
55103 break;
55104@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55105 ret = -ERESTARTSYS;
55106 break;
55107 }
55108- pipe->waiting_writers++;
55109+ atomic_inc(&pipe->waiting_writers);
55110 pipe_wait(pipe);
55111- pipe->waiting_writers--;
55112+ atomic_dec(&pipe->waiting_writers);
55113 }
55114
55115 pipe_unlock(pipe);
55116@@ -1786,14 +1792,14 @@ retry:
55117 pipe_double_lock(ipipe, opipe);
55118
55119 do {
55120- if (!opipe->readers) {
55121+ if (!atomic_read(&opipe->readers)) {
55122 send_sig(SIGPIPE, current, 0);
55123 if (!ret)
55124 ret = -EPIPE;
55125 break;
55126 }
55127
55128- if (!ipipe->nrbufs && !ipipe->writers)
55129+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
55130 break;
55131
55132 /*
55133@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55134 pipe_double_lock(ipipe, opipe);
55135
55136 do {
55137- if (!opipe->readers) {
55138+ if (!atomic_read(&opipe->readers)) {
55139 send_sig(SIGPIPE, current, 0);
55140 if (!ret)
55141 ret = -EPIPE;
55142@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55143 * return EAGAIN if we have the potential of some data in the
55144 * future, otherwise just return 0
55145 */
55146- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
55147+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
55148 ret = -EAGAIN;
55149
55150 pipe_unlock(ipipe);
55151diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
55152index e020183..18d64b4 100644
55153--- a/fs/sysfs/dir.c
55154+++ b/fs/sysfs/dir.c
55155@@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
55156 struct sysfs_dirent *sd;
55157 int rc;
55158
55159+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
55160+ const char *parent_name = parent_sd->s_name;
55161+
55162+ mode = S_IFDIR | S_IRWXU;
55163+
55164+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
55165+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
55166+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
55167+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
55168+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
55169+#endif
55170+
55171 /* allocate */
55172 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
55173 if (!sd)
55174diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
55175index 7118a38..70af853 100644
55176--- a/fs/sysfs/file.c
55177+++ b/fs/sysfs/file.c
55178@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
55179
55180 struct sysfs_open_dirent {
55181 atomic_t refcnt;
55182- atomic_t event;
55183+ atomic_unchecked_t event;
55184 wait_queue_head_t poll;
55185 struct list_head buffers; /* goes through sysfs_buffer.list */
55186 };
55187@@ -53,7 +53,7 @@ struct sysfs_buffer {
55188 size_t count;
55189 loff_t pos;
55190 char * page;
55191- struct sysfs_ops * ops;
55192+ const struct sysfs_ops * ops;
55193 struct mutex mutex;
55194 int needs_read_fill;
55195 int event;
55196@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
55197 {
55198 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
55199 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
55200- struct sysfs_ops * ops = buffer->ops;
55201+ const struct sysfs_ops * ops = buffer->ops;
55202 int ret = 0;
55203 ssize_t count;
55204
55205@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
55206 if (!sysfs_get_active_two(attr_sd))
55207 return -ENODEV;
55208
55209- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
55210+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
55211 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
55212
55213 sysfs_put_active_two(attr_sd);
55214@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
55215 {
55216 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
55217 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
55218- struct sysfs_ops * ops = buffer->ops;
55219+ const struct sysfs_ops * ops = buffer->ops;
55220 int rc;
55221
55222 /* need attr_sd for attr and ops, its parent for kobj */
55223@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
55224 return -ENOMEM;
55225
55226 atomic_set(&new_od->refcnt, 0);
55227- atomic_set(&new_od->event, 1);
55228+ atomic_set_unchecked(&new_od->event, 1);
55229 init_waitqueue_head(&new_od->poll);
55230 INIT_LIST_HEAD(&new_od->buffers);
55231 goto retry;
55232@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
55233 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
55234 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
55235 struct sysfs_buffer *buffer;
55236- struct sysfs_ops *ops;
55237+ const struct sysfs_ops *ops;
55238 int error = -EACCES;
55239 char *p;
55240
55241@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
55242
55243 sysfs_put_active_two(attr_sd);
55244
55245- if (buffer->event != atomic_read(&od->event))
55246+ if (buffer->event != atomic_read_unchecked(&od->event))
55247 goto trigger;
55248
55249 return DEFAULT_POLLMASK;
55250@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
55251
55252 od = sd->s_attr.open;
55253 if (od) {
55254- atomic_inc(&od->event);
55255+ atomic_inc_unchecked(&od->event);
55256 wake_up_interruptible(&od->poll);
55257 }
55258
55259diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
55260index c5081ad..342ea86 100644
55261--- a/fs/sysfs/symlink.c
55262+++ b/fs/sysfs/symlink.c
55263@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
55264
55265 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
55266 {
55267- char *page = nd_get_link(nd);
55268+ const char *page = nd_get_link(nd);
55269 if (!IS_ERR(page))
55270 free_page((unsigned long)page);
55271 }
55272diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
55273index 1e06853..b06d325 100644
55274--- a/fs/udf/balloc.c
55275+++ b/fs/udf/balloc.c
55276@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
55277
55278 mutex_lock(&sbi->s_alloc_mutex);
55279 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
55280- if (bloc->logicalBlockNum < 0 ||
55281- (bloc->logicalBlockNum + count) >
55282- partmap->s_partition_len) {
55283+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
55284 udf_debug("%d < %d || %d + %d > %d\n",
55285 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
55286 count, partmap->s_partition_len);
55287@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
55288
55289 mutex_lock(&sbi->s_alloc_mutex);
55290 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
55291- if (bloc->logicalBlockNum < 0 ||
55292- (bloc->logicalBlockNum + count) >
55293- partmap->s_partition_len) {
55294+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
55295 udf_debug("%d < %d || %d + %d > %d\n",
55296 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
55297 partmap->s_partition_len);
55298diff --git a/fs/udf/inode.c b/fs/udf/inode.c
55299index 6d24c2c..fff470f 100644
55300--- a/fs/udf/inode.c
55301+++ b/fs/udf/inode.c
55302@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
55303 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
55304 int lastblock = 0;
55305
55306+ pax_track_stack();
55307+
55308 prev_epos.offset = udf_file_entry_alloc_offset(inode);
55309 prev_epos.block = iinfo->i_location;
55310 prev_epos.bh = NULL;
55311diff --git a/fs/udf/misc.c b/fs/udf/misc.c
55312index 9215700..bf1f68e 100644
55313--- a/fs/udf/misc.c
55314+++ b/fs/udf/misc.c
55315@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
55316
55317 u8 udf_tag_checksum(const struct tag *t)
55318 {
55319- u8 *data = (u8 *)t;
55320+ const u8 *data = (const u8 *)t;
55321 u8 checksum = 0;
55322 int i;
55323 for (i = 0; i < sizeof(struct tag); ++i)
55324diff --git a/fs/utimes.c b/fs/utimes.c
55325index e4c75db..b4df0e0 100644
55326--- a/fs/utimes.c
55327+++ b/fs/utimes.c
55328@@ -1,6 +1,7 @@
55329 #include <linux/compiler.h>
55330 #include <linux/file.h>
55331 #include <linux/fs.h>
55332+#include <linux/security.h>
55333 #include <linux/linkage.h>
55334 #include <linux/mount.h>
55335 #include <linux/namei.h>
55336@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
55337 goto mnt_drop_write_and_out;
55338 }
55339 }
55340+
55341+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
55342+ error = -EACCES;
55343+ goto mnt_drop_write_and_out;
55344+ }
55345+
55346 mutex_lock(&inode->i_mutex);
55347 error = notify_change(path->dentry, &newattrs);
55348 mutex_unlock(&inode->i_mutex);
55349diff --git a/fs/xattr.c b/fs/xattr.c
55350index 6d4f6d3..cda3958 100644
55351--- a/fs/xattr.c
55352+++ b/fs/xattr.c
55353@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
55354 * Extended attribute SET operations
55355 */
55356 static long
55357-setxattr(struct dentry *d, const char __user *name, const void __user *value,
55358+setxattr(struct path *path, const char __user *name, const void __user *value,
55359 size_t size, int flags)
55360 {
55361 int error;
55362@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
55363 return PTR_ERR(kvalue);
55364 }
55365
55366- error = vfs_setxattr(d, kname, kvalue, size, flags);
55367+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
55368+ error = -EACCES;
55369+ goto out;
55370+ }
55371+
55372+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
55373+out:
55374 kfree(kvalue);
55375 return error;
55376 }
55377@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
55378 return error;
55379 error = mnt_want_write(path.mnt);
55380 if (!error) {
55381- error = setxattr(path.dentry, name, value, size, flags);
55382+ error = setxattr(&path, name, value, size, flags);
55383 mnt_drop_write(path.mnt);
55384 }
55385 path_put(&path);
55386@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
55387 return error;
55388 error = mnt_want_write(path.mnt);
55389 if (!error) {
55390- error = setxattr(path.dentry, name, value, size, flags);
55391+ error = setxattr(&path, name, value, size, flags);
55392 mnt_drop_write(path.mnt);
55393 }
55394 path_put(&path);
55395@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
55396 const void __user *,value, size_t, size, int, flags)
55397 {
55398 struct file *f;
55399- struct dentry *dentry;
55400 int error = -EBADF;
55401
55402 f = fget(fd);
55403 if (!f)
55404 return error;
55405- dentry = f->f_path.dentry;
55406- audit_inode(NULL, dentry);
55407+ audit_inode(NULL, f->f_path.dentry);
55408 error = mnt_want_write_file(f);
55409 if (!error) {
55410- error = setxattr(dentry, name, value, size, flags);
55411+ error = setxattr(&f->f_path, name, value, size, flags);
55412 mnt_drop_write(f->f_path.mnt);
55413 }
55414 fput(f);
55415diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
55416index c6ad7c7..f2847a7 100644
55417--- a/fs/xattr_acl.c
55418+++ b/fs/xattr_acl.c
55419@@ -17,8 +17,8 @@
55420 struct posix_acl *
55421 posix_acl_from_xattr(const void *value, size_t size)
55422 {
55423- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
55424- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
55425+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
55426+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
55427 int count;
55428 struct posix_acl *acl;
55429 struct posix_acl_entry *acl_e;
55430diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
55431index 942362f..88f96f5 100644
55432--- a/fs/xfs/linux-2.6/xfs_ioctl.c
55433+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
55434@@ -134,7 +134,7 @@ xfs_find_handle(
55435 }
55436
55437 error = -EFAULT;
55438- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
55439+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
55440 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
55441 goto out_put;
55442
55443@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
55444 if (IS_ERR(dentry))
55445 return PTR_ERR(dentry);
55446
55447- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
55448+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
55449 if (!kbuf)
55450 goto out_dput;
55451
55452@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
55453 xfs_mount_t *mp,
55454 void __user *arg)
55455 {
55456- xfs_fsop_geom_t fsgeo;
55457+ xfs_fsop_geom_t fsgeo;
55458 int error;
55459
55460 error = xfs_fs_geometry(mp, &fsgeo, 3);
55461diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
55462index bad485a..479bd32 100644
55463--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
55464+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
55465@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
55466 xfs_fsop_geom_t fsgeo;
55467 int error;
55468
55469+ memset(&fsgeo, 0, sizeof(fsgeo));
55470 error = xfs_fs_geometry(mp, &fsgeo, 3);
55471 if (error)
55472 return -error;
55473diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
55474index 1f3b4b8..6102f6d 100644
55475--- a/fs/xfs/linux-2.6/xfs_iops.c
55476+++ b/fs/xfs/linux-2.6/xfs_iops.c
55477@@ -468,7 +468,7 @@ xfs_vn_put_link(
55478 struct nameidata *nd,
55479 void *p)
55480 {
55481- char *s = nd_get_link(nd);
55482+ const char *s = nd_get_link(nd);
55483
55484 if (!IS_ERR(s))
55485 kfree(s);
55486diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
55487index 8971fb0..5fc1eb2 100644
55488--- a/fs/xfs/xfs_bmap.c
55489+++ b/fs/xfs/xfs_bmap.c
55490@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
55491 int nmap,
55492 int ret_nmap);
55493 #else
55494-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
55495+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
55496 #endif /* DEBUG */
55497
55498 #if defined(XFS_RW_TRACE)
55499diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
55500index e89734e..5e84d8d 100644
55501--- a/fs/xfs/xfs_dir2_sf.c
55502+++ b/fs/xfs/xfs_dir2_sf.c
55503@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
55504 }
55505
55506 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
55507- if (filldir(dirent, sfep->name, sfep->namelen,
55508+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
55509+ char name[sfep->namelen];
55510+ memcpy(name, sfep->name, sfep->namelen);
55511+ if (filldir(dirent, name, sfep->namelen,
55512+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
55513+ *offset = off & 0x7fffffff;
55514+ return 0;
55515+ }
55516+ } else if (filldir(dirent, sfep->name, sfep->namelen,
55517 off & 0x7fffffff, ino, DT_UNKNOWN)) {
55518 *offset = off & 0x7fffffff;
55519 return 0;
55520diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
55521index 8f32f50..b6a41e8 100644
55522--- a/fs/xfs/xfs_vnodeops.c
55523+++ b/fs/xfs/xfs_vnodeops.c
55524@@ -564,13 +564,18 @@ xfs_readlink(
55525
55526 xfs_ilock(ip, XFS_ILOCK_SHARED);
55527
55528- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
55529- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
55530-
55531 pathlen = ip->i_d.di_size;
55532 if (!pathlen)
55533 goto out;
55534
55535+ if (pathlen > MAXPATHLEN) {
55536+ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
55537+ __func__, (unsigned long long)ip->i_ino, pathlen);
55538+ ASSERT(0);
55539+ error = XFS_ERROR(EFSCORRUPTED);
55540+ goto out;
55541+ }
55542+
55543 if (ip->i_df.if_flags & XFS_IFINLINE) {
55544 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
55545 link[pathlen] = '\0';
55546diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
55547new file mode 100644
55548index 0000000..7026cbd
55549--- /dev/null
55550+++ b/grsecurity/Kconfig
55551@@ -0,0 +1,1074 @@
55552+#
55553+# grecurity configuration
55554+#
55555+
55556+menu "Grsecurity"
55557+
55558+config GRKERNSEC
55559+ bool "Grsecurity"
55560+ select CRYPTO
55561+ select CRYPTO_SHA256
55562+ help
55563+ If you say Y here, you will be able to configure many features
55564+ that will enhance the security of your system. It is highly
55565+ recommended that you say Y here and read through the help
55566+ for each option so that you fully understand the features and
55567+ can evaluate their usefulness for your machine.
55568+
55569+choice
55570+ prompt "Security Level"
55571+ depends on GRKERNSEC
55572+ default GRKERNSEC_CUSTOM
55573+
55574+config GRKERNSEC_LOW
55575+ bool "Low"
55576+ select GRKERNSEC_LINK
55577+ select GRKERNSEC_FIFO
55578+ select GRKERNSEC_RANDNET
55579+ select GRKERNSEC_DMESG
55580+ select GRKERNSEC_CHROOT
55581+ select GRKERNSEC_CHROOT_CHDIR
55582+
55583+ help
55584+ If you choose this option, several of the grsecurity options will
55585+ be enabled that will give you greater protection against a number
55586+ of attacks, while assuring that none of your software will have any
55587+ conflicts with the additional security measures. If you run a lot
55588+ of unusual software, or you are having problems with the higher
55589+ security levels, you should say Y here. With this option, the
55590+ following features are enabled:
55591+
55592+ - Linking restrictions
55593+ - FIFO restrictions
55594+ - Restricted dmesg
55595+ - Enforced chdir("/") on chroot
55596+ - Runtime module disabling
55597+
55598+config GRKERNSEC_MEDIUM
55599+ bool "Medium"
55600+ select PAX
55601+ select PAX_EI_PAX
55602+ select PAX_PT_PAX_FLAGS
55603+ select PAX_HAVE_ACL_FLAGS
55604+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55605+ select GRKERNSEC_CHROOT
55606+ select GRKERNSEC_CHROOT_SYSCTL
55607+ select GRKERNSEC_LINK
55608+ select GRKERNSEC_FIFO
55609+ select GRKERNSEC_DMESG
55610+ select GRKERNSEC_RANDNET
55611+ select GRKERNSEC_FORKFAIL
55612+ select GRKERNSEC_TIME
55613+ select GRKERNSEC_SIGNAL
55614+ select GRKERNSEC_CHROOT
55615+ select GRKERNSEC_CHROOT_UNIX
55616+ select GRKERNSEC_CHROOT_MOUNT
55617+ select GRKERNSEC_CHROOT_PIVOT
55618+ select GRKERNSEC_CHROOT_DOUBLE
55619+ select GRKERNSEC_CHROOT_CHDIR
55620+ select GRKERNSEC_CHROOT_MKNOD
55621+ select GRKERNSEC_PROC
55622+ select GRKERNSEC_PROC_USERGROUP
55623+ select PAX_RANDUSTACK
55624+ select PAX_ASLR
55625+ select PAX_RANDMMAP
55626+ select PAX_REFCOUNT if (X86 || SPARC64)
55627+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55628+
55629+ help
55630+ If you say Y here, several features in addition to those included
55631+ in the low additional security level will be enabled. These
55632+ features provide even more security to your system, though in rare
55633+ cases they may be incompatible with very old or poorly written
55634+ software. If you enable this option, make sure that your auth
55635+ service (identd) is running as gid 1001. With this option,
55636+ the following features (in addition to those provided in the
55637+ low additional security level) will be enabled:
55638+
55639+ - Failed fork logging
55640+ - Time change logging
55641+ - Signal logging
55642+ - Deny mounts in chroot
55643+ - Deny double chrooting
55644+ - Deny sysctl writes in chroot
55645+ - Deny mknod in chroot
55646+ - Deny access to abstract AF_UNIX sockets out of chroot
55647+ - Deny pivot_root in chroot
55648+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
55649+ - /proc restrictions with special GID set to 10 (usually wheel)
55650+ - Address Space Layout Randomization (ASLR)
55651+ - Prevent exploitation of most refcount overflows
55652+ - Bounds checking of copying between the kernel and userland
55653+
55654+config GRKERNSEC_HIGH
55655+ bool "High"
55656+ select GRKERNSEC_LINK
55657+ select GRKERNSEC_FIFO
55658+ select GRKERNSEC_DMESG
55659+ select GRKERNSEC_FORKFAIL
55660+ select GRKERNSEC_TIME
55661+ select GRKERNSEC_SIGNAL
55662+ select GRKERNSEC_CHROOT
55663+ select GRKERNSEC_CHROOT_SHMAT
55664+ select GRKERNSEC_CHROOT_UNIX
55665+ select GRKERNSEC_CHROOT_MOUNT
55666+ select GRKERNSEC_CHROOT_FCHDIR
55667+ select GRKERNSEC_CHROOT_PIVOT
55668+ select GRKERNSEC_CHROOT_DOUBLE
55669+ select GRKERNSEC_CHROOT_CHDIR
55670+ select GRKERNSEC_CHROOT_MKNOD
55671+ select GRKERNSEC_CHROOT_CAPS
55672+ select GRKERNSEC_CHROOT_SYSCTL
55673+ select GRKERNSEC_CHROOT_FINDTASK
55674+ select GRKERNSEC_SYSFS_RESTRICT
55675+ select GRKERNSEC_PROC
55676+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55677+ select GRKERNSEC_HIDESYM
55678+ select GRKERNSEC_BRUTE
55679+ select GRKERNSEC_PROC_USERGROUP
55680+ select GRKERNSEC_KMEM
55681+ select GRKERNSEC_RESLOG
55682+ select GRKERNSEC_RANDNET
55683+ select GRKERNSEC_PROC_ADD
55684+ select GRKERNSEC_CHROOT_CHMOD
55685+ select GRKERNSEC_CHROOT_NICE
55686+ select GRKERNSEC_SETXID
55687+ select GRKERNSEC_AUDIT_MOUNT
55688+ select GRKERNSEC_MODHARDEN if (MODULES)
55689+ select GRKERNSEC_HARDEN_PTRACE
55690+ select GRKERNSEC_PTRACE_READEXEC
55691+ select GRKERNSEC_VM86 if (X86_32)
55692+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55693+ select PAX
55694+ select PAX_RANDUSTACK
55695+ select PAX_ASLR
55696+ select PAX_RANDMMAP
55697+ select PAX_NOEXEC
55698+ select PAX_MPROTECT
55699+ select PAX_EI_PAX
55700+ select PAX_PT_PAX_FLAGS
55701+ select PAX_HAVE_ACL_FLAGS
55702+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55703+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
55704+ select PAX_RANDKSTACK if (X86_TSC && X86)
55705+ select PAX_SEGMEXEC if (X86_32)
55706+ select PAX_PAGEEXEC
55707+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55708+ select PAX_EMUTRAMP if (PARISC)
55709+ select PAX_EMUSIGRT if (PARISC)
55710+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55711+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55712+ select PAX_REFCOUNT if (X86 || SPARC64)
55713+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55714+ help
55715+ If you say Y here, many of the features of grsecurity will be
55716+ enabled, which will protect you against many kinds of attacks
55717+ against your system. The heightened security comes at a cost
55718+ of an increased chance of incompatibilities with rare software
55719+ on your machine. Since this security level enables PaX, you should
55720+ view <http://pax.grsecurity.net> and read about the PaX
55721+ project. While you are there, download chpax and run it on
55722+ binaries that cause problems with PaX. Also remember that
55723+ since the /proc restrictions are enabled, you must run your
55724+ identd as gid 1001. This security level enables the following
55725+ features in addition to those listed in the low and medium
55726+ security levels:
55727+
55728+ - Additional /proc restrictions
55729+ - Chmod restrictions in chroot
55730+ - No signals, ptrace, or viewing of processes outside of chroot
55731+ - Capability restrictions in chroot
55732+ - Deny fchdir out of chroot
55733+ - Priority restrictions in chroot
55734+ - Segmentation-based implementation of PaX
55735+ - Mprotect restrictions
55736+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55737+ - Kernel stack randomization
55738+ - Mount/unmount/remount logging
55739+ - Kernel symbol hiding
55740+ - Hardening of module auto-loading
55741+ - Ptrace restrictions
55742+ - Restricted vm86 mode
55743+ - Restricted sysfs/debugfs
55744+ - Active kernel exploit response
55745+
55746+config GRKERNSEC_CUSTOM
55747+ bool "Custom"
55748+ help
55749+ If you say Y here, you will be able to configure every grsecurity
55750+ option, which allows you to enable many more features that aren't
55751+ covered in the basic security levels. These additional features
55752+ include TPE, socket restrictions, and the sysctl system for
55753+ grsecurity. It is advised that you read through the help for
55754+ each option to determine its usefulness in your situation.
55755+
55756+endchoice
55757+
55758+menu "Memory Protections"
55759+depends on GRKERNSEC
55760+
55761+config GRKERNSEC_KMEM
55762+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
55763+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55764+ help
55765+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55766+ be written to or read from to modify or leak the contents of the running
55767+ kernel. /dev/port will also not be allowed to be opened. If you have module
55768+ support disabled, enabling this will close up four ways that are
55769+ currently used to insert malicious code into the running kernel.
55770+ Even with all these features enabled, we still highly recommend that
55771+ you use the RBAC system, as it is still possible for an attacker to
55772+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55773+ If you are not using XFree86, you may be able to stop this additional
55774+ case by enabling the 'Disable privileged I/O' option. Though nothing
55775+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55776+ but only to video memory, which is the only writing we allow in this
55777+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55778+ not be allowed to mprotect it with PROT_WRITE later.
55779+ It is highly recommended that you say Y here if you meet all the
55780+ conditions above.
55781+
55782+config GRKERNSEC_VM86
55783+ bool "Restrict VM86 mode"
55784+ depends on X86_32
55785+
55786+ help
55787+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55788+ make use of a special execution mode on 32bit x86 processors called
55789+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55790+ video cards and will still work with this option enabled. The purpose
55791+ of the option is to prevent exploitation of emulation errors in
55792+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55793+ Nearly all users should be able to enable this option.
55794+
55795+config GRKERNSEC_IO
55796+ bool "Disable privileged I/O"
55797+ depends on X86
55798+ select RTC_CLASS
55799+ select RTC_INTF_DEV
55800+ select RTC_DRV_CMOS
55801+
55802+ help
55803+ If you say Y here, all ioperm and iopl calls will return an error.
55804+ Ioperm and iopl can be used to modify the running kernel.
55805+ Unfortunately, some programs need this access to operate properly,
55806+ the most notable of which are XFree86 and hwclock. hwclock can be
55807+ remedied by having RTC support in the kernel, so real-time
55808+ clock support is enabled if this option is enabled, to ensure
55809+ that hwclock operates correctly. XFree86 still will not
55810+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55811+ IF YOU USE XFree86. If you use XFree86 and you still want to
55812+ protect your kernel against modification, use the RBAC system.
55813+
55814+config GRKERNSEC_PROC_MEMMAP
55815+ bool "Harden ASLR against information leaks and entropy reduction"
55816+ default y if (PAX_NOEXEC || PAX_ASLR)
55817+ depends on PAX_NOEXEC || PAX_ASLR
55818+ help
55819+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55820+ give no information about the addresses of its mappings if
55821+ PaX features that rely on random addresses are enabled on the task.
55822+ In addition to sanitizing this information and disabling other
55823+ dangerous sources of information, this option causes reads of sensitive
55824+ /proc/<pid> entries where the file descriptor was opened in a different
55825+ task than the one performing the read. Such attempts are logged.
55826+ Finally, this option limits argv/env strings for suid/sgid binaries
55827+ to 1MB to prevent a complete exhaustion of the stack entropy provided
55828+ by ASLR.
55829+ If you use PaX it is essential that you say Y here as it closes up
55830+ several holes that make full ASLR useless for suid/sgid binaries.
55831+
55832+config GRKERNSEC_BRUTE
55833+ bool "Deter exploit bruteforcing"
55834+ help
55835+ If you say Y here, attempts to bruteforce exploits against forking
55836+ daemons such as apache or sshd, as well as against suid/sgid binaries
55837+ will be deterred. When a child of a forking daemon is killed by PaX
55838+ or crashes due to an illegal instruction or other suspicious signal,
55839+ the parent process will be delayed 30 seconds upon every subsequent
55840+ fork until the administrator is able to assess the situation and
55841+ restart the daemon.
55842+ In the suid/sgid case, the attempt is logged, the user has all their
55843+ processes terminated, and they are prevented from executing any further
55844+ processes for 15 minutes.
55845+ It is recommended that you also enable signal logging in the auditing
55846+ section so that logs are generated when a process triggers a suspicious
55847+ signal.
55848+ If the sysctl option is enabled, a sysctl option with name
55849+ "deter_bruteforce" is created.
55850+
55851+config GRKERNSEC_MODHARDEN
55852+ bool "Harden module auto-loading"
55853+ depends on MODULES
55854+ help
55855+ If you say Y here, module auto-loading in response to use of some
55856+ feature implemented by an unloaded module will be restricted to
55857+ root users. Enabling this option helps defend against attacks
55858+ by unprivileged users who abuse the auto-loading behavior to
55859+ cause a vulnerable module to load that is then exploited.
55860+
55861+ If this option prevents a legitimate use of auto-loading for a
55862+ non-root user, the administrator can execute modprobe manually
55863+ with the exact name of the module mentioned in the alert log.
55864+ Alternatively, the administrator can add the module to the list
55865+ of modules loaded at boot by modifying init scripts.
55866+
55867+ Modification of init scripts will most likely be needed on
55868+ Ubuntu servers with encrypted home directory support enabled,
55869+ as the first non-root user logging in will cause the ecb(aes),
55870+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55871+
55872+config GRKERNSEC_HIDESYM
55873+ bool "Hide kernel symbols"
55874+ help
55875+ If you say Y here, getting information on loaded modules, and
55876+ displaying all kernel symbols through a syscall will be restricted
55877+ to users with CAP_SYS_MODULE. For software compatibility reasons,
55878+ /proc/kallsyms will be restricted to the root user. The RBAC
55879+ system can hide that entry even from root.
55880+
55881+ This option also prevents leaking of kernel addresses through
55882+ several /proc entries.
55883+
55884+ Note that this option is only effective provided the following
55885+ conditions are met:
55886+ 1) The kernel using grsecurity is not precompiled by some distribution
55887+ 2) You have also enabled GRKERNSEC_DMESG
55888+ 3) You are using the RBAC system and hiding other files such as your
55889+ kernel image and System.map. Alternatively, enabling this option
55890+ causes the permissions on /boot, /lib/modules, and the kernel
55891+ source directory to change at compile time to prevent
55892+ reading by non-root users.
55893+ If the above conditions are met, this option will aid in providing a
55894+ useful protection against local kernel exploitation of overflows
55895+ and arbitrary read/write vulnerabilities.
55896+
55897+config GRKERNSEC_KERN_LOCKOUT
55898+ bool "Active kernel exploit response"
55899+ depends on X86 || ARM || PPC || SPARC
55900+ help
55901+ If you say Y here, when a PaX alert is triggered due to suspicious
55902+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55903+ or an OOPs occurs due to bad memory accesses, instead of just
55904+ terminating the offending process (and potentially allowing
55905+ a subsequent exploit from the same user), we will take one of two
55906+ actions:
55907+ If the user was root, we will panic the system
55908+ If the user was non-root, we will log the attempt, terminate
55909+ all processes owned by the user, then prevent them from creating
55910+ any new processes until the system is restarted
55911+ This deters repeated kernel exploitation/bruteforcing attempts
55912+ and is useful for later forensics.
55913+
55914+endmenu
55915+menu "Role Based Access Control Options"
55916+depends on GRKERNSEC
55917+
55918+config GRKERNSEC_RBAC_DEBUG
55919+ bool
55920+
55921+config GRKERNSEC_NO_RBAC
55922+ bool "Disable RBAC system"
55923+ help
55924+ If you say Y here, the /dev/grsec device will be removed from the kernel,
55925+ preventing the RBAC system from being enabled. You should only say Y
55926+ here if you have no intention of using the RBAC system, so as to prevent
55927+ an attacker with root access from misusing the RBAC system to hide files
55928+ and processes when loadable module support and /dev/[k]mem have been
55929+ locked down.
55930+
55931+config GRKERNSEC_ACL_HIDEKERN
55932+ bool "Hide kernel processes"
55933+ help
55934+ If you say Y here, all kernel threads will be hidden to all
55935+ processes but those whose subject has the "view hidden processes"
55936+ flag.
55937+
55938+config GRKERNSEC_ACL_MAXTRIES
55939+ int "Maximum tries before password lockout"
55940+ default 3
55941+ help
55942+ This option enforces the maximum number of times a user can attempt
55943+ to authorize themselves with the grsecurity RBAC system before being
55944+ denied the ability to attempt authorization again for a specified time.
55945+ The lower the number, the harder it will be to brute-force a password.
55946+
55947+config GRKERNSEC_ACL_TIMEOUT
55948+ int "Time to wait after max password tries, in seconds"
55949+ default 30
55950+ help
55951+ This option specifies the time the user must wait after attempting to
55952+ authorize to the RBAC system with the maximum number of invalid
55953+ passwords. The higher the number, the harder it will be to brute-force
55954+ a password.
55955+
55956+endmenu
55957+menu "Filesystem Protections"
55958+depends on GRKERNSEC
55959+
55960+config GRKERNSEC_PROC
55961+ bool "Proc restrictions"
55962+ help
55963+ If you say Y here, the permissions of the /proc filesystem
55964+ will be altered to enhance system security and privacy. You MUST
55965+ choose either a user only restriction or a user and group restriction.
55966+ Depending upon the option you choose, you can either restrict users to
55967+ see only the processes they themselves run, or choose a group that can
55968+ view all processes and files normally restricted to root if you choose
55969+ the "restrict to user only" option. NOTE: If you're running identd as
55970+ a non-root user, you will have to run it as the group you specify here.
55971+
55972+config GRKERNSEC_PROC_USER
55973+ bool "Restrict /proc to user only"
55974+ depends on GRKERNSEC_PROC
55975+ help
55976+ If you say Y here, non-root users will only be able to view their own
55977+ processes, and restricts them from viewing network-related information,
55978+ and viewing kernel symbol and module information.
55979+
55980+config GRKERNSEC_PROC_USERGROUP
55981+ bool "Allow special group"
55982+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55983+ help
55984+ If you say Y here, you will be able to select a group that will be
55985+ able to view all processes and network-related information. If you've
55986+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55987+ remain hidden. This option is useful if you want to run identd as
55988+ a non-root user.
55989+
55990+config GRKERNSEC_PROC_GID
55991+ int "GID for special group"
55992+ depends on GRKERNSEC_PROC_USERGROUP
55993+ default 1001
55994+
55995+config GRKERNSEC_PROC_ADD
55996+ bool "Additional restrictions"
55997+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55998+ help
55999+ If you say Y here, additional restrictions will be placed on
56000+ /proc that keep normal users from viewing device information and
56001+ slabinfo information that could be useful for exploits.
56002+
56003+config GRKERNSEC_LINK
56004+ bool "Linking restrictions"
56005+ help
56006+ If you say Y here, /tmp race exploits will be prevented, since users
56007+ will no longer be able to follow symlinks owned by other users in
56008+ world-writable +t directories (e.g. /tmp), unless the owner of the
56009+ symlink is the owner of the directory. users will also not be
56010+ able to hardlink to files they do not own. If the sysctl option is
56011+ enabled, a sysctl option with name "linking_restrictions" is created.
56012+
56013+config GRKERNSEC_FIFO
56014+ bool "FIFO restrictions"
56015+ help
56016+ If you say Y here, users will not be able to write to FIFOs they don't
56017+ own in world-writable +t directories (e.g. /tmp), unless the owner of
56018+ the FIFO is the same owner of the directory it's held in. If the sysctl
56019+ option is enabled, a sysctl option with name "fifo_restrictions" is
56020+ created.
56021+
56022+config GRKERNSEC_SYSFS_RESTRICT
56023+ bool "Sysfs/debugfs restriction"
56024+ depends on SYSFS
56025+ help
56026+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56027+ any filesystem normally mounted under it (e.g. debugfs) will be
56028+ mostly accessible only by root. These filesystems generally provide access
56029+ to hardware and debug information that isn't appropriate for unprivileged
56030+ users of the system. Sysfs and debugfs have also become a large source
56031+ of new vulnerabilities, ranging from infoleaks to local compromise.
56032+ There has been very little oversight with an eye toward security involved
56033+ in adding new exporters of information to these filesystems, so their
56034+ use is discouraged.
56035+ For reasons of compatibility, a few directories have been whitelisted
56036+ for access by non-root users:
56037+ /sys/fs/selinux
56038+ /sys/fs/fuse
56039+ /sys/devices/system/cpu
56040+
56041+config GRKERNSEC_ROFS
56042+ bool "Runtime read-only mount protection"
56043+ help
56044+ If you say Y here, a sysctl option with name "romount_protect" will
56045+ be created. By setting this option to 1 at runtime, filesystems
56046+ will be protected in the following ways:
56047+ * No new writable mounts will be allowed
56048+ * Existing read-only mounts won't be able to be remounted read/write
56049+ * Write operations will be denied on all block devices
56050+ This option acts independently of grsec_lock: once it is set to 1,
56051+ it cannot be turned off. Therefore, please be mindful of the resulting
56052+ behavior if this option is enabled in an init script on a read-only
56053+ filesystem. This feature is mainly intended for secure embedded systems.
56054+
56055+config GRKERNSEC_CHROOT
56056+ bool "Chroot jail restrictions"
56057+ help
56058+ If you say Y here, you will be able to choose several options that will
56059+ make breaking out of a chrooted jail much more difficult. If you
56060+ encounter no software incompatibilities with the following options, it
56061+ is recommended that you enable each one.
56062+
56063+config GRKERNSEC_CHROOT_MOUNT
56064+ bool "Deny mounts"
56065+ depends on GRKERNSEC_CHROOT
56066+ help
56067+ If you say Y here, processes inside a chroot will not be able to
56068+ mount or remount filesystems. If the sysctl option is enabled, a
56069+ sysctl option with name "chroot_deny_mount" is created.
56070+
56071+config GRKERNSEC_CHROOT_DOUBLE
56072+ bool "Deny double-chroots"
56073+ depends on GRKERNSEC_CHROOT
56074+ help
56075+ If you say Y here, processes inside a chroot will not be able to chroot
56076+ again outside the chroot. This is a widely used method of breaking
56077+ out of a chroot jail and should not be allowed. If the sysctl
56078+ option is enabled, a sysctl option with name
56079+ "chroot_deny_chroot" is created.
56080+
56081+config GRKERNSEC_CHROOT_PIVOT
56082+ bool "Deny pivot_root in chroot"
56083+ depends on GRKERNSEC_CHROOT
56084+ help
56085+ If you say Y here, processes inside a chroot will not be able to use
56086+ a function called pivot_root() that was introduced in Linux 2.3.41. It
56087+ works similar to chroot in that it changes the root filesystem. This
56088+ function could be misused in a chrooted process to attempt to break out
56089+ of the chroot, and therefore should not be allowed. If the sysctl
56090+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
56091+ created.
56092+
56093+config GRKERNSEC_CHROOT_CHDIR
56094+ bool "Enforce chdir(\"/\") on all chroots"
56095+ depends on GRKERNSEC_CHROOT
56096+ help
56097+ If you say Y here, the current working directory of all newly-chrooted
56098+ applications will be set to the the root directory of the chroot.
56099+ The man page on chroot(2) states:
56100+ Note that this call does not change the current working
56101+ directory, so that `.' can be outside the tree rooted at
56102+ `/'. In particular, the super-user can escape from a
56103+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56104+
56105+ It is recommended that you say Y here, since it's not known to break
56106+ any software. If the sysctl option is enabled, a sysctl option with
56107+ name "chroot_enforce_chdir" is created.
56108+
56109+config GRKERNSEC_CHROOT_CHMOD
56110+ bool "Deny (f)chmod +s"
56111+ depends on GRKERNSEC_CHROOT
56112+ help
56113+ If you say Y here, processes inside a chroot will not be able to chmod
56114+ or fchmod files to make them have suid or sgid bits. This protects
56115+ against another published method of breaking a chroot. If the sysctl
56116+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
56117+ created.
56118+
56119+config GRKERNSEC_CHROOT_FCHDIR
56120+ bool "Deny fchdir out of chroot"
56121+ depends on GRKERNSEC_CHROOT
56122+ help
56123+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
56124+ to a file descriptor of the chrooting process that points to a directory
56125+ outside the filesystem will be stopped. If the sysctl option
56126+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56127+
56128+config GRKERNSEC_CHROOT_MKNOD
56129+ bool "Deny mknod"
56130+ depends on GRKERNSEC_CHROOT
56131+ help
56132+ If you say Y here, processes inside a chroot will not be allowed to
56133+ mknod. The problem with using mknod inside a chroot is that it
56134+ would allow an attacker to create a device entry that is the same
56135+ as one on the physical root of your system, which could range from
56136+ anything from the console device to a device for your harddrive (which
56137+ they could then use to wipe the drive or steal data). It is recommended
56138+ that you say Y here, unless you run into software incompatibilities.
56139+ If the sysctl option is enabled, a sysctl option with name
56140+ "chroot_deny_mknod" is created.
56141+
56142+config GRKERNSEC_CHROOT_SHMAT
56143+ bool "Deny shmat() out of chroot"
56144+ depends on GRKERNSEC_CHROOT
56145+ help
56146+ If you say Y here, processes inside a chroot will not be able to attach
56147+ to shared memory segments that were created outside of the chroot jail.
56148+ It is recommended that you say Y here. If the sysctl option is enabled,
56149+ a sysctl option with name "chroot_deny_shmat" is created.
56150+
56151+config GRKERNSEC_CHROOT_UNIX
56152+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
56153+ depends on GRKERNSEC_CHROOT
56154+ help
56155+ If you say Y here, processes inside a chroot will not be able to
56156+ connect to abstract (meaning not belonging to a filesystem) Unix
56157+ domain sockets that were bound outside of a chroot. It is recommended
56158+ that you say Y here. If the sysctl option is enabled, a sysctl option
56159+ with name "chroot_deny_unix" is created.
56160+
56161+config GRKERNSEC_CHROOT_FINDTASK
56162+ bool "Protect outside processes"
56163+ depends on GRKERNSEC_CHROOT
56164+ help
56165+ If you say Y here, processes inside a chroot will not be able to
56166+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56167+ getsid, or view any process outside of the chroot. If the sysctl
56168+ option is enabled, a sysctl option with name "chroot_findtask" is
56169+ created.
56170+
56171+config GRKERNSEC_CHROOT_NICE
56172+ bool "Restrict priority changes"
56173+ depends on GRKERNSEC_CHROOT
56174+ help
56175+ If you say Y here, processes inside a chroot will not be able to raise
56176+ the priority of processes in the chroot, or alter the priority of
56177+ processes outside the chroot. This provides more security than simply
56178+ removing CAP_SYS_NICE from the process' capability set. If the
56179+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56180+ is created.
56181+
56182+config GRKERNSEC_CHROOT_SYSCTL
56183+ bool "Deny sysctl writes"
56184+ depends on GRKERNSEC_CHROOT
56185+ help
56186+ If you say Y here, an attacker in a chroot will not be able to
56187+ write to sysctl entries, either by sysctl(2) or through a /proc
56188+ interface. It is strongly recommended that you say Y here. If the
56189+ sysctl option is enabled, a sysctl option with name
56190+ "chroot_deny_sysctl" is created.
56191+
56192+config GRKERNSEC_CHROOT_CAPS
56193+ bool "Capability restrictions"
56194+ depends on GRKERNSEC_CHROOT
56195+ help
56196+ If you say Y here, the capabilities on all processes within a
56197+ chroot jail will be lowered to stop module insertion, raw i/o,
56198+ system and net admin tasks, rebooting the system, modifying immutable
56199+ files, modifying IPC owned by another, and changing the system time.
56200+ This is left an option because it can break some apps. Disable this
56201+ if your chrooted apps are having problems performing those kinds of
56202+ tasks. If the sysctl option is enabled, a sysctl option with
56203+ name "chroot_caps" is created.
56204+
56205+endmenu
56206+menu "Kernel Auditing"
56207+depends on GRKERNSEC
56208+
56209+config GRKERNSEC_AUDIT_GROUP
56210+ bool "Single group for auditing"
56211+ help
56212+ If you say Y here, the exec, chdir, and (un)mount logging features
56213+ will only operate on a group you specify. This option is recommended
56214+ if you only want to watch certain users instead of having a large
56215+ amount of logs from the entire system. If the sysctl option is enabled,
56216+ a sysctl option with name "audit_group" is created.
56217+
56218+config GRKERNSEC_AUDIT_GID
56219+ int "GID for auditing"
56220+ depends on GRKERNSEC_AUDIT_GROUP
56221+ default 1007
56222+
56223+config GRKERNSEC_EXECLOG
56224+ bool "Exec logging"
56225+ help
56226+ If you say Y here, all execve() calls will be logged (since the
56227+ other exec*() calls are frontends to execve(), all execution
56228+ will be logged). Useful for shell-servers that like to keep track
56229+ of their users. If the sysctl option is enabled, a sysctl option with
56230+ name "exec_logging" is created.
56231+ WARNING: This option when enabled will produce a LOT of logs, especially
56232+ on an active system.
56233+
56234+config GRKERNSEC_RESLOG
56235+ bool "Resource logging"
56236+ help
56237+ If you say Y here, all attempts to overstep resource limits will
56238+ be logged with the resource name, the requested size, and the current
56239+ limit. It is highly recommended that you say Y here. If the sysctl
56240+ option is enabled, a sysctl option with name "resource_logging" is
56241+ created. If the RBAC system is enabled, the sysctl value is ignored.
56242+
56243+config GRKERNSEC_CHROOT_EXECLOG
56244+ bool "Log execs within chroot"
56245+ help
56246+ If you say Y here, all executions inside a chroot jail will be logged
56247+ to syslog. This can cause a large amount of logs if certain
56248+ applications (eg. djb's daemontools) are installed on the system, and
56249+ is therefore left as an option. If the sysctl option is enabled, a
56250+ sysctl option with name "chroot_execlog" is created.
56251+
56252+config GRKERNSEC_AUDIT_PTRACE
56253+ bool "Ptrace logging"
56254+ help
56255+ If you say Y here, all attempts to attach to a process via ptrace
56256+ will be logged. If the sysctl option is enabled, a sysctl option
56257+ with name "audit_ptrace" is created.
56258+
56259+config GRKERNSEC_AUDIT_CHDIR
56260+ bool "Chdir logging"
56261+ help
56262+ If you say Y here, all chdir() calls will be logged. If the sysctl
56263+ option is enabled, a sysctl option with name "audit_chdir" is created.
56264+
56265+config GRKERNSEC_AUDIT_MOUNT
56266+ bool "(Un)Mount logging"
56267+ help
56268+ If you say Y here, all mounts and unmounts will be logged. If the
56269+ sysctl option is enabled, a sysctl option with name "audit_mount" is
56270+ created.
56271+
56272+config GRKERNSEC_SIGNAL
56273+ bool "Signal logging"
56274+ help
56275+ If you say Y here, certain important signals will be logged, such as
56276+ SIGSEGV, which will as a result inform you of when a error in a program
56277+ occurred, which in some cases could mean a possible exploit attempt.
56278+ If the sysctl option is enabled, a sysctl option with name
56279+ "signal_logging" is created.
56280+
56281+config GRKERNSEC_FORKFAIL
56282+ bool "Fork failure logging"
56283+ help
56284+ If you say Y here, all failed fork() attempts will be logged.
56285+ This could suggest a fork bomb, or someone attempting to overstep
56286+ their process limit. If the sysctl option is enabled, a sysctl option
56287+ with name "forkfail_logging" is created.
56288+
56289+config GRKERNSEC_TIME
56290+ bool "Time change logging"
56291+ help
56292+ If you say Y here, any changes of the system clock will be logged.
56293+ If the sysctl option is enabled, a sysctl option with name
56294+ "timechange_logging" is created.
56295+
56296+config GRKERNSEC_PROC_IPADDR
56297+ bool "/proc/<pid>/ipaddr support"
56298+ help
56299+ If you say Y here, a new entry will be added to each /proc/<pid>
56300+ directory that contains the IP address of the person using the task.
56301+ The IP is carried across local TCP and AF_UNIX stream sockets.
56302+ This information can be useful for IDS/IPSes to perform remote response
56303+ to a local attack. The entry is readable by only the owner of the
56304+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56305+ the RBAC system), and thus does not create privacy concerns.
56306+
56307+config GRKERNSEC_RWXMAP_LOG
56308+ bool 'Denied RWX mmap/mprotect logging'
56309+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56310+ help
56311+ If you say Y here, calls to mmap() and mprotect() with explicit
56312+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56313+ denied by the PAX_MPROTECT feature. If the sysctl option is
56314+ enabled, a sysctl option with name "rwxmap_logging" is created.
56315+
56316+config GRKERNSEC_AUDIT_TEXTREL
56317+ bool 'ELF text relocations logging (READ HELP)'
56318+ depends on PAX_MPROTECT
56319+ help
56320+ If you say Y here, text relocations will be logged with the filename
56321+ of the offending library or binary. The purpose of the feature is
56322+ to help Linux distribution developers get rid of libraries and
56323+ binaries that need text relocations which hinder the future progress
56324+ of PaX. Only Linux distribution developers should say Y here, and
56325+ never on a production machine, as this option creates an information
56326+ leak that could aid an attacker in defeating the randomization of
56327+ a single memory region. If the sysctl option is enabled, a sysctl
56328+ option with name "audit_textrel" is created.
56329+
56330+endmenu
56331+
56332+menu "Executable Protections"
56333+depends on GRKERNSEC
56334+
56335+config GRKERNSEC_DMESG
56336+ bool "Dmesg(8) restriction"
56337+ help
56338+ If you say Y here, non-root users will not be able to use dmesg(8)
56339+ to view up to the last 4kb of messages in the kernel's log buffer.
56340+ The kernel's log buffer often contains kernel addresses and other
56341+ identifying information useful to an attacker in fingerprinting a
56342+ system for a targeted exploit.
56343+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56344+ created.
56345+
56346+config GRKERNSEC_HARDEN_PTRACE
56347+ bool "Deter ptrace-based process snooping"
56348+ help
56349+ If you say Y here, TTY sniffers and other malicious monitoring
56350+ programs implemented through ptrace will be defeated. If you
56351+ have been using the RBAC system, this option has already been
56352+ enabled for several years for all users, with the ability to make
56353+ fine-grained exceptions.
56354+
56355+ This option only affects the ability of non-root users to ptrace
56356+ processes that are not a descendent of the ptracing process.
56357+ This means that strace ./binary and gdb ./binary will still work,
56358+ but attaching to arbitrary processes will not. If the sysctl
56359+ option is enabled, a sysctl option with name "harden_ptrace" is
56360+ created.
56361+
56362+config GRKERNSEC_PTRACE_READEXEC
56363+ bool "Require read access to ptrace sensitive binaries"
56364+ help
56365+ If you say Y here, unprivileged users will not be able to ptrace unreadable
56366+ binaries. This option is useful in environments that
56367+ remove the read bits (e.g. file mode 4711) from suid binaries to
56368+ prevent infoleaking of their contents. This option adds
56369+ consistency to the use of that file mode, as the binary could normally
56370+ be read out when run without privileges while ptracing.
56371+
56372+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
56373+ is created.
56374+
56375+config GRKERNSEC_SETXID
56376+ bool "Enforce consistent multithreaded privileges"
56377+ help
56378+ If you say Y here, a change from a root uid to a non-root uid
56379+ in a multithreaded application will cause the resulting uids,
56380+ gids, supplementary groups, and capabilities in that thread
56381+ to be propagated to the other threads of the process. In most
56382+ cases this is unnecessary, as glibc will emulate this behavior
56383+ on behalf of the application. Other libcs do not act in the
56384+ same way, allowing the other threads of the process to continue
56385+ running with root privileges. If the sysctl option is enabled,
56386+ a sysctl option with name "consistent_setxid" is created.
56387+
56388+config GRKERNSEC_TPE
56389+ bool "Trusted Path Execution (TPE)"
56390+ help
56391+ If you say Y here, you will be able to choose a gid to add to the
56392+ supplementary groups of users you want to mark as "untrusted."
56393+ These users will not be able to execute any files that are not in
56394+ root-owned directories writable only by root. If the sysctl option
56395+ is enabled, a sysctl option with name "tpe" is created.
56396+
56397+config GRKERNSEC_TPE_ALL
56398+ bool "Partially restrict all non-root users"
56399+ depends on GRKERNSEC_TPE
56400+ help
56401+ If you say Y here, all non-root users will be covered under
56402+ a weaker TPE restriction. This is separate from, and in addition to,
56403+ the main TPE options that you have selected elsewhere. Thus, if a
56404+ "trusted" GID is chosen, this restriction applies to even that GID.
56405+ Under this restriction, all non-root users will only be allowed to
56406+ execute files in directories they own that are not group or
56407+ world-writable, or in directories owned by root and writable only by
56408+ root. If the sysctl option is enabled, a sysctl option with name
56409+ "tpe_restrict_all" is created.
56410+
56411+config GRKERNSEC_TPE_INVERT
56412+ bool "Invert GID option"
56413+ depends on GRKERNSEC_TPE
56414+ help
56415+ If you say Y here, the group you specify in the TPE configuration will
56416+ decide what group TPE restrictions will be *disabled* for. This
56417+ option is useful if you want TPE restrictions to be applied to most
56418+ users on the system. If the sysctl option is enabled, a sysctl option
56419+ with name "tpe_invert" is created. Unlike other sysctl options, this
56420+ entry will default to on for backward-compatibility.
56421+
56422+config GRKERNSEC_TPE_GID
56423+ int "GID for untrusted users"
56424+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56425+ default 1005
56426+ help
56427+ Setting this GID determines what group TPE restrictions will be
56428+ *enabled* for. If the sysctl option is enabled, a sysctl option
56429+ with name "tpe_gid" is created.
56430+
56431+config GRKERNSEC_TPE_GID
56432+ int "GID for trusted users"
56433+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56434+ default 1005
56435+ help
56436+ Setting this GID determines what group TPE restrictions will be
56437+ *disabled* for. If the sysctl option is enabled, a sysctl option
56438+ with name "tpe_gid" is created.
56439+
56440+endmenu
56441+menu "Network Protections"
56442+depends on GRKERNSEC
56443+
56444+config GRKERNSEC_RANDNET
56445+ bool "Larger entropy pools"
56446+ help
56447+ If you say Y here, the entropy pools used for many features of Linux
56448+ and grsecurity will be doubled in size. Since several grsecurity
56449+ features use additional randomness, it is recommended that you say Y
56450+ here. Saying Y here has a similar effect as modifying
56451+ /proc/sys/kernel/random/poolsize.
56452+
56453+config GRKERNSEC_BLACKHOLE
56454+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56455+ depends on NET
56456+ help
56457+ If you say Y here, neither TCP resets nor ICMP
56458+ destination-unreachable packets will be sent in response to packets
56459+ sent to ports for which no associated listening process exists.
56460+ This feature supports both IPV4 and IPV6 and exempts the
56461+ loopback interface from blackholing. Enabling this feature
56462+ makes a host more resilient to DoS attacks and reduces network
56463+ visibility against scanners.
56464+
56465+ The blackhole feature as-implemented is equivalent to the FreeBSD
56466+ blackhole feature, as it prevents RST responses to all packets, not
56467+ just SYNs. Under most application behavior this causes no
56468+ problems, but applications (like haproxy) may not close certain
56469+ connections in a way that cleanly terminates them on the remote
56470+ end, leaving the remote host in LAST_ACK state. Because of this
56471+ side-effect and to prevent intentional LAST_ACK DoSes, this
56472+ feature also adds automatic mitigation against such attacks.
56473+ The mitigation drastically reduces the amount of time a socket
56474+ can spend in LAST_ACK state. If you're using haproxy and not
56475+ all servers it connects to have this option enabled, consider
56476+ disabling this feature on the haproxy host.
56477+
56478+ If the sysctl option is enabled, two sysctl options with names
56479+ "ip_blackhole" and "lastack_retries" will be created.
56480+ While "ip_blackhole" takes the standard zero/non-zero on/off
56481+ toggle, "lastack_retries" uses the same kinds of values as
56482+ "tcp_retries1" and "tcp_retries2". The default value of 4
56483+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56484+ state.
56485+
56486+config GRKERNSEC_SOCKET
56487+ bool "Socket restrictions"
56488+ depends on NET
56489+ help
56490+ If you say Y here, you will be able to choose from several options.
56491+ If you assign a GID on your system and add it to the supplementary
56492+ groups of users you want to restrict socket access to, this patch
56493+ will perform up to three things, based on the option(s) you choose.
56494+
56495+config GRKERNSEC_SOCKET_ALL
56496+ bool "Deny any sockets to group"
56497+ depends on GRKERNSEC_SOCKET
56498+ help
56499+ If you say Y here, you will be able to choose a GID of whose users will
56500+ be unable to connect to other hosts from your machine or run server
56501+ applications from your machine. If the sysctl option is enabled, a
56502+ sysctl option with name "socket_all" is created.
56503+
56504+config GRKERNSEC_SOCKET_ALL_GID
56505+ int "GID to deny all sockets for"
56506+ depends on GRKERNSEC_SOCKET_ALL
56507+ default 1004
56508+ help
56509+ Here you can choose the GID to disable socket access for. Remember to
56510+ add the users you want socket access disabled for to the GID
56511+ specified here. If the sysctl option is enabled, a sysctl option
56512+ with name "socket_all_gid" is created.
56513+
56514+config GRKERNSEC_SOCKET_CLIENT
56515+ bool "Deny client sockets to group"
56516+ depends on GRKERNSEC_SOCKET
56517+ help
56518+ If you say Y here, you will be able to choose a GID of whose users will
56519+ be unable to connect to other hosts from your machine, but will be
56520+ able to run servers. If this option is enabled, all users in the group
56521+ you specify will have to use passive mode when initiating ftp transfers
56522+ from the shell on your machine. If the sysctl option is enabled, a
56523+ sysctl option with name "socket_client" is created.
56524+
56525+config GRKERNSEC_SOCKET_CLIENT_GID
56526+ int "GID to deny client sockets for"
56527+ depends on GRKERNSEC_SOCKET_CLIENT
56528+ default 1003
56529+ help
56530+ Here you can choose the GID to disable client socket access for.
56531+ Remember to add the users you want client socket access disabled for to
56532+ the GID specified here. If the sysctl option is enabled, a sysctl
56533+ option with name "socket_client_gid" is created.
56534+
56535+config GRKERNSEC_SOCKET_SERVER
56536+ bool "Deny server sockets to group"
56537+ depends on GRKERNSEC_SOCKET
56538+ help
56539+ If you say Y here, you will be able to choose a GID of whose users will
56540+ be unable to run server applications from your machine. If the sysctl
56541+ option is enabled, a sysctl option with name "socket_server" is created.
56542+
56543+config GRKERNSEC_SOCKET_SERVER_GID
56544+ int "GID to deny server sockets for"
56545+ depends on GRKERNSEC_SOCKET_SERVER
56546+ default 1002
56547+ help
56548+ Here you can choose the GID to disable server socket access for.
56549+ Remember to add the users you want server socket access disabled for to
56550+ the GID specified here. If the sysctl option is enabled, a sysctl
56551+ option with name "socket_server_gid" is created.
56552+
56553+endmenu
56554+menu "Sysctl support"
56555+depends on GRKERNSEC && SYSCTL
56556+
56557+config GRKERNSEC_SYSCTL
56558+ bool "Sysctl support"
56559+ help
56560+ If you say Y here, you will be able to change the options that
56561+ grsecurity runs with at bootup, without having to recompile your
56562+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56563+ to enable (1) or disable (0) various features. All the sysctl entries
56564+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56565+ All features enabled in the kernel configuration are disabled at boot
56566+ if you do not say Y to the "Turn on features by default" option.
56567+ All options should be set at startup, and the grsec_lock entry should
56568+ be set to a non-zero value after all the options are set.
56569+ *THIS IS EXTREMELY IMPORTANT*
56570+
56571+config GRKERNSEC_SYSCTL_DISTRO
56572+ bool "Extra sysctl support for distro makers (READ HELP)"
56573+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56574+ help
56575+ If you say Y here, additional sysctl options will be created
56576+ for features that affect processes running as root. Therefore,
56577+ it is critical when using this option that the grsec_lock entry be
56578+ enabled after boot. Only distros with prebuilt kernel packages
56579+ with this option enabled that can ensure grsec_lock is enabled
56580+ after boot should use this option.
56581+ *Failure to set grsec_lock after boot makes all grsec features
56582+ this option covers useless*
56583+
56584+ Currently this option creates the following sysctl entries:
56585+ "Disable Privileged I/O": "disable_priv_io"
56586+
56587+config GRKERNSEC_SYSCTL_ON
56588+ bool "Turn on features by default"
56589+ depends on GRKERNSEC_SYSCTL
56590+ help
56591+ If you say Y here, instead of having all features enabled in the
56592+ kernel configuration disabled at boot time, the features will be
56593+ enabled at boot time. It is recommended you say Y here unless
56594+ there is some reason you would want all sysctl-tunable features to
56595+ be disabled by default. As mentioned elsewhere, it is important
56596+ to enable the grsec_lock entry once you have finished modifying
56597+ the sysctl entries.
56598+
56599+endmenu
56600+menu "Logging Options"
56601+depends on GRKERNSEC
56602+
56603+config GRKERNSEC_FLOODTIME
56604+ int "Seconds in between log messages (minimum)"
56605+ default 10
56606+ help
56607+ This option allows you to enforce the number of seconds between
56608+ grsecurity log messages. The default should be suitable for most
56609+ people, however, if you choose to change it, choose a value small enough
56610+ to allow informative logs to be produced, but large enough to
56611+ prevent flooding.
56612+
56613+config GRKERNSEC_FLOODBURST
56614+ int "Number of messages in a burst (maximum)"
56615+ default 6
56616+ help
56617+ This option allows you to choose the maximum number of messages allowed
56618+ within the flood time interval you chose in a separate option. The
56619+ default should be suitable for most people, however if you find that
56620+ many of your logs are being interpreted as flooding, you may want to
56621+ raise this value.
56622+
56623+endmenu
56624+
56625+endmenu
56626diff --git a/grsecurity/Makefile b/grsecurity/Makefile
56627new file mode 100644
56628index 0000000..1b9afa9
56629--- /dev/null
56630+++ b/grsecurity/Makefile
56631@@ -0,0 +1,38 @@
56632+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56633+# during 2001-2009 it has been completely redesigned by Brad Spengler
56634+# into an RBAC system
56635+#
56636+# All code in this directory and various hooks inserted throughout the kernel
56637+# are copyright Brad Spengler - Open Source Security, Inc., and released
56638+# under the GPL v2 or higher
56639+
56640+KBUILD_CFLAGS += -Werror
56641+
56642+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56643+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56644+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56645+
56646+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56647+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56648+ gracl_learn.o grsec_log.o
56649+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56650+
56651+ifdef CONFIG_NET
56652+obj-y += grsec_sock.o
56653+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56654+endif
56655+
56656+ifndef CONFIG_GRKERNSEC
56657+obj-y += grsec_disabled.o
56658+endif
56659+
56660+ifdef CONFIG_GRKERNSEC_HIDESYM
56661+extra-y := grsec_hidesym.o
56662+$(obj)/grsec_hidesym.o:
56663+ @-chmod -f 500 /boot
56664+ @-chmod -f 500 /lib/modules
56665+ @-chmod -f 500 /lib64/modules
56666+ @-chmod -f 500 /lib32/modules
56667+ @-chmod -f 700 .
56668+ @echo ' grsec: protected kernel image paths'
56669+endif
56670diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
56671new file mode 100644
56672index 0000000..b1c4f4d
56673--- /dev/null
56674+++ b/grsecurity/gracl.c
56675@@ -0,0 +1,4149 @@
56676+#include <linux/kernel.h>
56677+#include <linux/module.h>
56678+#include <linux/sched.h>
56679+#include <linux/mm.h>
56680+#include <linux/file.h>
56681+#include <linux/fs.h>
56682+#include <linux/namei.h>
56683+#include <linux/mount.h>
56684+#include <linux/tty.h>
56685+#include <linux/proc_fs.h>
56686+#include <linux/smp_lock.h>
56687+#include <linux/slab.h>
56688+#include <linux/vmalloc.h>
56689+#include <linux/types.h>
56690+#include <linux/sysctl.h>
56691+#include <linux/netdevice.h>
56692+#include <linux/ptrace.h>
56693+#include <linux/gracl.h>
56694+#include <linux/gralloc.h>
56695+#include <linux/security.h>
56696+#include <linux/grinternal.h>
56697+#include <linux/pid_namespace.h>
56698+#include <linux/fdtable.h>
56699+#include <linux/percpu.h>
56700+
56701+#include <asm/uaccess.h>
56702+#include <asm/errno.h>
56703+#include <asm/mman.h>
56704+
56705+static struct acl_role_db acl_role_set;
56706+static struct name_db name_set;
56707+static struct inodev_db inodev_set;
56708+
56709+/* for keeping track of userspace pointers used for subjects, so we
56710+ can share references in the kernel as well
56711+*/
56712+
56713+static struct dentry *real_root;
56714+static struct vfsmount *real_root_mnt;
56715+
56716+static struct acl_subj_map_db subj_map_set;
56717+
56718+static struct acl_role_label *default_role;
56719+
56720+static struct acl_role_label *role_list;
56721+
56722+static u16 acl_sp_role_value;
56723+
56724+extern char *gr_shared_page[4];
56725+static DEFINE_MUTEX(gr_dev_mutex);
56726+DEFINE_RWLOCK(gr_inode_lock);
56727+
56728+struct gr_arg *gr_usermode;
56729+
56730+static unsigned int gr_status __read_only = GR_STATUS_INIT;
56731+
56732+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
56733+extern void gr_clear_learn_entries(void);
56734+
56735+#ifdef CONFIG_GRKERNSEC_RESLOG
56736+extern void gr_log_resource(const struct task_struct *task,
56737+ const int res, const unsigned long wanted, const int gt);
56738+#endif
56739+
56740+unsigned char *gr_system_salt;
56741+unsigned char *gr_system_sum;
56742+
56743+static struct sprole_pw **acl_special_roles = NULL;
56744+static __u16 num_sprole_pws = 0;
56745+
56746+static struct acl_role_label *kernel_role = NULL;
56747+
56748+static unsigned int gr_auth_attempts = 0;
56749+static unsigned long gr_auth_expires = 0UL;
56750+
56751+#ifdef CONFIG_NET
56752+extern struct vfsmount *sock_mnt;
56753+#endif
56754+extern struct vfsmount *pipe_mnt;
56755+extern struct vfsmount *shm_mnt;
56756+#ifdef CONFIG_HUGETLBFS
56757+extern struct vfsmount *hugetlbfs_vfsmount;
56758+#endif
56759+
56760+static struct acl_object_label *fakefs_obj_rw;
56761+static struct acl_object_label *fakefs_obj_rwx;
56762+
56763+extern int gr_init_uidset(void);
56764+extern void gr_free_uidset(void);
56765+extern void gr_remove_uid(uid_t uid);
56766+extern int gr_find_uid(uid_t uid);
56767+
56768+__inline__ int
56769+gr_acl_is_enabled(void)
56770+{
56771+ return (gr_status & GR_READY);
56772+}
56773+
56774+#ifdef CONFIG_BTRFS_FS
56775+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56776+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56777+#endif
56778+
56779+static inline dev_t __get_dev(const struct dentry *dentry)
56780+{
56781+#ifdef CONFIG_BTRFS_FS
56782+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56783+ return get_btrfs_dev_from_inode(dentry->d_inode);
56784+ else
56785+#endif
56786+ return dentry->d_inode->i_sb->s_dev;
56787+}
56788+
56789+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56790+{
56791+ return __get_dev(dentry);
56792+}
56793+
56794+static char gr_task_roletype_to_char(struct task_struct *task)
56795+{
56796+ switch (task->role->roletype &
56797+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
56798+ GR_ROLE_SPECIAL)) {
56799+ case GR_ROLE_DEFAULT:
56800+ return 'D';
56801+ case GR_ROLE_USER:
56802+ return 'U';
56803+ case GR_ROLE_GROUP:
56804+ return 'G';
56805+ case GR_ROLE_SPECIAL:
56806+ return 'S';
56807+ }
56808+
56809+ return 'X';
56810+}
56811+
56812+char gr_roletype_to_char(void)
56813+{
56814+ return gr_task_roletype_to_char(current);
56815+}
56816+
56817+__inline__ int
56818+gr_acl_tpe_check(void)
56819+{
56820+ if (unlikely(!(gr_status & GR_READY)))
56821+ return 0;
56822+ if (current->role->roletype & GR_ROLE_TPE)
56823+ return 1;
56824+ else
56825+ return 0;
56826+}
56827+
56828+int
56829+gr_handle_rawio(const struct inode *inode)
56830+{
56831+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56832+ if (inode && S_ISBLK(inode->i_mode) &&
56833+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56834+ !capable(CAP_SYS_RAWIO))
56835+ return 1;
56836+#endif
56837+ return 0;
56838+}
56839+
56840+static int
56841+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
56842+{
56843+ if (likely(lena != lenb))
56844+ return 0;
56845+
56846+ return !memcmp(a, b, lena);
56847+}
56848+
56849+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
56850+{
56851+ *buflen -= namelen;
56852+ if (*buflen < 0)
56853+ return -ENAMETOOLONG;
56854+ *buffer -= namelen;
56855+ memcpy(*buffer, str, namelen);
56856+ return 0;
56857+}
56858+
56859+/* this must be called with vfsmount_lock and dcache_lock held */
56860+
56861+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56862+ struct dentry *root, struct vfsmount *rootmnt,
56863+ char *buffer, int buflen)
56864+{
56865+ char * end = buffer+buflen;
56866+ char * retval;
56867+ int namelen;
56868+
56869+ *--end = '\0';
56870+ buflen--;
56871+
56872+ if (buflen < 1)
56873+ goto Elong;
56874+ /* Get '/' right */
56875+ retval = end-1;
56876+ *retval = '/';
56877+
56878+ for (;;) {
56879+ struct dentry * parent;
56880+
56881+ if (dentry == root && vfsmnt == rootmnt)
56882+ break;
56883+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
56884+ /* Global root? */
56885+ if (vfsmnt->mnt_parent == vfsmnt)
56886+ goto global_root;
56887+ dentry = vfsmnt->mnt_mountpoint;
56888+ vfsmnt = vfsmnt->mnt_parent;
56889+ continue;
56890+ }
56891+ parent = dentry->d_parent;
56892+ prefetch(parent);
56893+ namelen = dentry->d_name.len;
56894+ buflen -= namelen + 1;
56895+ if (buflen < 0)
56896+ goto Elong;
56897+ end -= namelen;
56898+ memcpy(end, dentry->d_name.name, namelen);
56899+ *--end = '/';
56900+ retval = end;
56901+ dentry = parent;
56902+ }
56903+
56904+out:
56905+ return retval;
56906+
56907+global_root:
56908+ namelen = dentry->d_name.len;
56909+ buflen -= namelen;
56910+ if (buflen < 0)
56911+ goto Elong;
56912+ retval -= namelen-1; /* hit the slash */
56913+ memcpy(retval, dentry->d_name.name, namelen);
56914+ goto out;
56915+Elong:
56916+ retval = ERR_PTR(-ENAMETOOLONG);
56917+ goto out;
56918+}
56919+
56920+static char *
56921+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56922+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
56923+{
56924+ char *retval;
56925+
56926+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
56927+ if (unlikely(IS_ERR(retval)))
56928+ retval = strcpy(buf, "<path too long>");
56929+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
56930+ retval[1] = '\0';
56931+
56932+ return retval;
56933+}
56934+
56935+static char *
56936+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56937+ char *buf, int buflen)
56938+{
56939+ char *res;
56940+
56941+ /* we can use real_root, real_root_mnt, because this is only called
56942+ by the RBAC system */
56943+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
56944+
56945+ return res;
56946+}
56947+
56948+static char *
56949+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56950+ char *buf, int buflen)
56951+{
56952+ char *res;
56953+ struct dentry *root;
56954+ struct vfsmount *rootmnt;
56955+ struct task_struct *reaper = &init_task;
56956+
56957+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
56958+ read_lock(&reaper->fs->lock);
56959+ root = dget(reaper->fs->root.dentry);
56960+ rootmnt = mntget(reaper->fs->root.mnt);
56961+ read_unlock(&reaper->fs->lock);
56962+
56963+ spin_lock(&dcache_lock);
56964+ spin_lock(&vfsmount_lock);
56965+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
56966+ spin_unlock(&vfsmount_lock);
56967+ spin_unlock(&dcache_lock);
56968+
56969+ dput(root);
56970+ mntput(rootmnt);
56971+ return res;
56972+}
56973+
56974+static char *
56975+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56976+{
56977+ char *ret;
56978+ spin_lock(&dcache_lock);
56979+ spin_lock(&vfsmount_lock);
56980+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56981+ PAGE_SIZE);
56982+ spin_unlock(&vfsmount_lock);
56983+ spin_unlock(&dcache_lock);
56984+ return ret;
56985+}
56986+
56987+static char *
56988+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56989+{
56990+ char *ret;
56991+ char *buf;
56992+ int buflen;
56993+
56994+ spin_lock(&dcache_lock);
56995+ spin_lock(&vfsmount_lock);
56996+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
56997+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
56998+ buflen = (int)(ret - buf);
56999+ if (buflen >= 5)
57000+ prepend(&ret, &buflen, "/proc", 5);
57001+ else
57002+ ret = strcpy(buf, "<path too long>");
57003+ spin_unlock(&vfsmount_lock);
57004+ spin_unlock(&dcache_lock);
57005+ return ret;
57006+}
57007+
57008+char *
57009+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
57010+{
57011+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
57012+ PAGE_SIZE);
57013+}
57014+
57015+char *
57016+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
57017+{
57018+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
57019+ PAGE_SIZE);
57020+}
57021+
57022+char *
57023+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
57024+{
57025+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
57026+ PAGE_SIZE);
57027+}
57028+
57029+char *
57030+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
57031+{
57032+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
57033+ PAGE_SIZE);
57034+}
57035+
57036+char *
57037+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
57038+{
57039+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
57040+ PAGE_SIZE);
57041+}
57042+
57043+__inline__ __u32
57044+to_gr_audit(const __u32 reqmode)
57045+{
57046+ /* masks off auditable permission flags, then shifts them to create
57047+ auditing flags, and adds the special case of append auditing if
57048+ we're requesting write */
57049+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
57050+}
57051+
57052+struct acl_subject_label *
57053+lookup_subject_map(const struct acl_subject_label *userp)
57054+{
57055+ unsigned int index = shash(userp, subj_map_set.s_size);
57056+ struct subject_map *match;
57057+
57058+ match = subj_map_set.s_hash[index];
57059+
57060+ while (match && match->user != userp)
57061+ match = match->next;
57062+
57063+ if (match != NULL)
57064+ return match->kernel;
57065+ else
57066+ return NULL;
57067+}
57068+
57069+static void
57070+insert_subj_map_entry(struct subject_map *subjmap)
57071+{
57072+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
57073+ struct subject_map **curr;
57074+
57075+ subjmap->prev = NULL;
57076+
57077+ curr = &subj_map_set.s_hash[index];
57078+ if (*curr != NULL)
57079+ (*curr)->prev = subjmap;
57080+
57081+ subjmap->next = *curr;
57082+ *curr = subjmap;
57083+
57084+ return;
57085+}
57086+
57087+static struct acl_role_label *
57088+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
57089+ const gid_t gid)
57090+{
57091+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
57092+ struct acl_role_label *match;
57093+ struct role_allowed_ip *ipp;
57094+ unsigned int x;
57095+ u32 curr_ip = task->signal->curr_ip;
57096+
57097+ task->signal->saved_ip = curr_ip;
57098+
57099+ match = acl_role_set.r_hash[index];
57100+
57101+ while (match) {
57102+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
57103+ for (x = 0; x < match->domain_child_num; x++) {
57104+ if (match->domain_children[x] == uid)
57105+ goto found;
57106+ }
57107+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
57108+ break;
57109+ match = match->next;
57110+ }
57111+found:
57112+ if (match == NULL) {
57113+ try_group:
57114+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
57115+ match = acl_role_set.r_hash[index];
57116+
57117+ while (match) {
57118+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
57119+ for (x = 0; x < match->domain_child_num; x++) {
57120+ if (match->domain_children[x] == gid)
57121+ goto found2;
57122+ }
57123+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
57124+ break;
57125+ match = match->next;
57126+ }
57127+found2:
57128+ if (match == NULL)
57129+ match = default_role;
57130+ if (match->allowed_ips == NULL)
57131+ return match;
57132+ else {
57133+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57134+ if (likely
57135+ ((ntohl(curr_ip) & ipp->netmask) ==
57136+ (ntohl(ipp->addr) & ipp->netmask)))
57137+ return match;
57138+ }
57139+ match = default_role;
57140+ }
57141+ } else if (match->allowed_ips == NULL) {
57142+ return match;
57143+ } else {
57144+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57145+ if (likely
57146+ ((ntohl(curr_ip) & ipp->netmask) ==
57147+ (ntohl(ipp->addr) & ipp->netmask)))
57148+ return match;
57149+ }
57150+ goto try_group;
57151+ }
57152+
57153+ return match;
57154+}
57155+
57156+struct acl_subject_label *
57157+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
57158+ const struct acl_role_label *role)
57159+{
57160+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
57161+ struct acl_subject_label *match;
57162+
57163+ match = role->subj_hash[index];
57164+
57165+ while (match && (match->inode != ino || match->device != dev ||
57166+ (match->mode & GR_DELETED))) {
57167+ match = match->next;
57168+ }
57169+
57170+ if (match && !(match->mode & GR_DELETED))
57171+ return match;
57172+ else
57173+ return NULL;
57174+}
57175+
57176+struct acl_subject_label *
57177+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
57178+ const struct acl_role_label *role)
57179+{
57180+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
57181+ struct acl_subject_label *match;
57182+
57183+ match = role->subj_hash[index];
57184+
57185+ while (match && (match->inode != ino || match->device != dev ||
57186+ !(match->mode & GR_DELETED))) {
57187+ match = match->next;
57188+ }
57189+
57190+ if (match && (match->mode & GR_DELETED))
57191+ return match;
57192+ else
57193+ return NULL;
57194+}
57195+
57196+static struct acl_object_label *
57197+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
57198+ const struct acl_subject_label *subj)
57199+{
57200+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
57201+ struct acl_object_label *match;
57202+
57203+ match = subj->obj_hash[index];
57204+
57205+ while (match && (match->inode != ino || match->device != dev ||
57206+ (match->mode & GR_DELETED))) {
57207+ match = match->next;
57208+ }
57209+
57210+ if (match && !(match->mode & GR_DELETED))
57211+ return match;
57212+ else
57213+ return NULL;
57214+}
57215+
57216+static struct acl_object_label *
57217+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
57218+ const struct acl_subject_label *subj)
57219+{
57220+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
57221+ struct acl_object_label *match;
57222+
57223+ match = subj->obj_hash[index];
57224+
57225+ while (match && (match->inode != ino || match->device != dev ||
57226+ !(match->mode & GR_DELETED))) {
57227+ match = match->next;
57228+ }
57229+
57230+ if (match && (match->mode & GR_DELETED))
57231+ return match;
57232+
57233+ match = subj->obj_hash[index];
57234+
57235+ while (match && (match->inode != ino || match->device != dev ||
57236+ (match->mode & GR_DELETED))) {
57237+ match = match->next;
57238+ }
57239+
57240+ if (match && !(match->mode & GR_DELETED))
57241+ return match;
57242+ else
57243+ return NULL;
57244+}
57245+
57246+static struct name_entry *
57247+lookup_name_entry(const char *name)
57248+{
57249+ unsigned int len = strlen(name);
57250+ unsigned int key = full_name_hash(name, len);
57251+ unsigned int index = key % name_set.n_size;
57252+ struct name_entry *match;
57253+
57254+ match = name_set.n_hash[index];
57255+
57256+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
57257+ match = match->next;
57258+
57259+ return match;
57260+}
57261+
57262+static struct name_entry *
57263+lookup_name_entry_create(const char *name)
57264+{
57265+ unsigned int len = strlen(name);
57266+ unsigned int key = full_name_hash(name, len);
57267+ unsigned int index = key % name_set.n_size;
57268+ struct name_entry *match;
57269+
57270+ match = name_set.n_hash[index];
57271+
57272+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57273+ !match->deleted))
57274+ match = match->next;
57275+
57276+ if (match && match->deleted)
57277+ return match;
57278+
57279+ match = name_set.n_hash[index];
57280+
57281+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57282+ match->deleted))
57283+ match = match->next;
57284+
57285+ if (match && !match->deleted)
57286+ return match;
57287+ else
57288+ return NULL;
57289+}
57290+
57291+static struct inodev_entry *
57292+lookup_inodev_entry(const ino_t ino, const dev_t dev)
57293+{
57294+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
57295+ struct inodev_entry *match;
57296+
57297+ match = inodev_set.i_hash[index];
57298+
57299+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
57300+ match = match->next;
57301+
57302+ return match;
57303+}
57304+
57305+static void
57306+insert_inodev_entry(struct inodev_entry *entry)
57307+{
57308+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
57309+ inodev_set.i_size);
57310+ struct inodev_entry **curr;
57311+
57312+ entry->prev = NULL;
57313+
57314+ curr = &inodev_set.i_hash[index];
57315+ if (*curr != NULL)
57316+ (*curr)->prev = entry;
57317+
57318+ entry->next = *curr;
57319+ *curr = entry;
57320+
57321+ return;
57322+}
57323+
57324+static void
57325+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
57326+{
57327+ unsigned int index =
57328+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
57329+ struct acl_role_label **curr;
57330+ struct acl_role_label *tmp;
57331+
57332+ curr = &acl_role_set.r_hash[index];
57333+
57334+ /* if role was already inserted due to domains and already has
57335+ a role in the same bucket as it attached, then we need to
57336+ combine these two buckets
57337+ */
57338+ if (role->next) {
57339+ tmp = role->next;
57340+ while (tmp->next)
57341+ tmp = tmp->next;
57342+ tmp->next = *curr;
57343+ } else
57344+ role->next = *curr;
57345+ *curr = role;
57346+
57347+ return;
57348+}
57349+
57350+static void
57351+insert_acl_role_label(struct acl_role_label *role)
57352+{
57353+ int i;
57354+
57355+ if (role_list == NULL) {
57356+ role_list = role;
57357+ role->prev = NULL;
57358+ } else {
57359+ role->prev = role_list;
57360+ role_list = role;
57361+ }
57362+
57363+ /* used for hash chains */
57364+ role->next = NULL;
57365+
57366+ if (role->roletype & GR_ROLE_DOMAIN) {
57367+ for (i = 0; i < role->domain_child_num; i++)
57368+ __insert_acl_role_label(role, role->domain_children[i]);
57369+ } else
57370+ __insert_acl_role_label(role, role->uidgid);
57371+}
57372+
57373+static int
57374+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
57375+{
57376+ struct name_entry **curr, *nentry;
57377+ struct inodev_entry *ientry;
57378+ unsigned int len = strlen(name);
57379+ unsigned int key = full_name_hash(name, len);
57380+ unsigned int index = key % name_set.n_size;
57381+
57382+ curr = &name_set.n_hash[index];
57383+
57384+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
57385+ curr = &((*curr)->next);
57386+
57387+ if (*curr != NULL)
57388+ return 1;
57389+
57390+ nentry = acl_alloc(sizeof (struct name_entry));
57391+ if (nentry == NULL)
57392+ return 0;
57393+ ientry = acl_alloc(sizeof (struct inodev_entry));
57394+ if (ientry == NULL)
57395+ return 0;
57396+ ientry->nentry = nentry;
57397+
57398+ nentry->key = key;
57399+ nentry->name = name;
57400+ nentry->inode = inode;
57401+ nentry->device = device;
57402+ nentry->len = len;
57403+ nentry->deleted = deleted;
57404+
57405+ nentry->prev = NULL;
57406+ curr = &name_set.n_hash[index];
57407+ if (*curr != NULL)
57408+ (*curr)->prev = nentry;
57409+ nentry->next = *curr;
57410+ *curr = nentry;
57411+
57412+ /* insert us into the table searchable by inode/dev */
57413+ insert_inodev_entry(ientry);
57414+
57415+ return 1;
57416+}
57417+
57418+static void
57419+insert_acl_obj_label(struct acl_object_label *obj,
57420+ struct acl_subject_label *subj)
57421+{
57422+ unsigned int index =
57423+ fhash(obj->inode, obj->device, subj->obj_hash_size);
57424+ struct acl_object_label **curr;
57425+
57426+
57427+ obj->prev = NULL;
57428+
57429+ curr = &subj->obj_hash[index];
57430+ if (*curr != NULL)
57431+ (*curr)->prev = obj;
57432+
57433+ obj->next = *curr;
57434+ *curr = obj;
57435+
57436+ return;
57437+}
57438+
57439+static void
57440+insert_acl_subj_label(struct acl_subject_label *obj,
57441+ struct acl_role_label *role)
57442+{
57443+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
57444+ struct acl_subject_label **curr;
57445+
57446+ obj->prev = NULL;
57447+
57448+ curr = &role->subj_hash[index];
57449+ if (*curr != NULL)
57450+ (*curr)->prev = obj;
57451+
57452+ obj->next = *curr;
57453+ *curr = obj;
57454+
57455+ return;
57456+}
57457+
57458+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
57459+
57460+static void *
57461+create_table(__u32 * len, int elementsize)
57462+{
57463+ unsigned int table_sizes[] = {
57464+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
57465+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
57466+ 4194301, 8388593, 16777213, 33554393, 67108859
57467+ };
57468+ void *newtable = NULL;
57469+ unsigned int pwr = 0;
57470+
57471+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
57472+ table_sizes[pwr] <= *len)
57473+ pwr++;
57474+
57475+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
57476+ return newtable;
57477+
57478+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
57479+ newtable =
57480+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
57481+ else
57482+ newtable = vmalloc(table_sizes[pwr] * elementsize);
57483+
57484+ *len = table_sizes[pwr];
57485+
57486+ return newtable;
57487+}
57488+
57489+static int
57490+init_variables(const struct gr_arg *arg)
57491+{
57492+ struct task_struct *reaper = &init_task;
57493+ unsigned int stacksize;
57494+
57495+ subj_map_set.s_size = arg->role_db.num_subjects;
57496+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
57497+ name_set.n_size = arg->role_db.num_objects;
57498+ inodev_set.i_size = arg->role_db.num_objects;
57499+
57500+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
57501+ !name_set.n_size || !inodev_set.i_size)
57502+ return 1;
57503+
57504+ if (!gr_init_uidset())
57505+ return 1;
57506+
57507+ /* set up the stack that holds allocation info */
57508+
57509+ stacksize = arg->role_db.num_pointers + 5;
57510+
57511+ if (!acl_alloc_stack_init(stacksize))
57512+ return 1;
57513+
57514+ /* grab reference for the real root dentry and vfsmount */
57515+ read_lock(&reaper->fs->lock);
57516+ real_root = dget(reaper->fs->root.dentry);
57517+ real_root_mnt = mntget(reaper->fs->root.mnt);
57518+ read_unlock(&reaper->fs->lock);
57519+
57520+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57521+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
57522+#endif
57523+
57524+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
57525+ if (fakefs_obj_rw == NULL)
57526+ return 1;
57527+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
57528+
57529+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
57530+ if (fakefs_obj_rwx == NULL)
57531+ return 1;
57532+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
57533+
57534+ subj_map_set.s_hash =
57535+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
57536+ acl_role_set.r_hash =
57537+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
57538+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
57539+ inodev_set.i_hash =
57540+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
57541+
57542+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
57543+ !name_set.n_hash || !inodev_set.i_hash)
57544+ return 1;
57545+
57546+ memset(subj_map_set.s_hash, 0,
57547+ sizeof(struct subject_map *) * subj_map_set.s_size);
57548+ memset(acl_role_set.r_hash, 0,
57549+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
57550+ memset(name_set.n_hash, 0,
57551+ sizeof (struct name_entry *) * name_set.n_size);
57552+ memset(inodev_set.i_hash, 0,
57553+ sizeof (struct inodev_entry *) * inodev_set.i_size);
57554+
57555+ return 0;
57556+}
57557+
57558+/* free information not needed after startup
57559+ currently contains user->kernel pointer mappings for subjects
57560+*/
57561+
57562+static void
57563+free_init_variables(void)
57564+{
57565+ __u32 i;
57566+
57567+ if (subj_map_set.s_hash) {
57568+ for (i = 0; i < subj_map_set.s_size; i++) {
57569+ if (subj_map_set.s_hash[i]) {
57570+ kfree(subj_map_set.s_hash[i]);
57571+ subj_map_set.s_hash[i] = NULL;
57572+ }
57573+ }
57574+
57575+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
57576+ PAGE_SIZE)
57577+ kfree(subj_map_set.s_hash);
57578+ else
57579+ vfree(subj_map_set.s_hash);
57580+ }
57581+
57582+ return;
57583+}
57584+
57585+static void
57586+free_variables(void)
57587+{
57588+ struct acl_subject_label *s;
57589+ struct acl_role_label *r;
57590+ struct task_struct *task, *task2;
57591+ unsigned int x;
57592+
57593+ gr_clear_learn_entries();
57594+
57595+ read_lock(&tasklist_lock);
57596+ do_each_thread(task2, task) {
57597+ task->acl_sp_role = 0;
57598+ task->acl_role_id = 0;
57599+ task->acl = NULL;
57600+ task->role = NULL;
57601+ } while_each_thread(task2, task);
57602+ read_unlock(&tasklist_lock);
57603+
57604+ /* release the reference to the real root dentry and vfsmount */
57605+ if (real_root)
57606+ dput(real_root);
57607+ real_root = NULL;
57608+ if (real_root_mnt)
57609+ mntput(real_root_mnt);
57610+ real_root_mnt = NULL;
57611+
57612+ /* free all object hash tables */
57613+
57614+ FOR_EACH_ROLE_START(r)
57615+ if (r->subj_hash == NULL)
57616+ goto next_role;
57617+ FOR_EACH_SUBJECT_START(r, s, x)
57618+ if (s->obj_hash == NULL)
57619+ break;
57620+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57621+ kfree(s->obj_hash);
57622+ else
57623+ vfree(s->obj_hash);
57624+ FOR_EACH_SUBJECT_END(s, x)
57625+ FOR_EACH_NESTED_SUBJECT_START(r, s)
57626+ if (s->obj_hash == NULL)
57627+ break;
57628+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57629+ kfree(s->obj_hash);
57630+ else
57631+ vfree(s->obj_hash);
57632+ FOR_EACH_NESTED_SUBJECT_END(s)
57633+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
57634+ kfree(r->subj_hash);
57635+ else
57636+ vfree(r->subj_hash);
57637+ r->subj_hash = NULL;
57638+next_role:
57639+ FOR_EACH_ROLE_END(r)
57640+
57641+ acl_free_all();
57642+
57643+ if (acl_role_set.r_hash) {
57644+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
57645+ PAGE_SIZE)
57646+ kfree(acl_role_set.r_hash);
57647+ else
57648+ vfree(acl_role_set.r_hash);
57649+ }
57650+ if (name_set.n_hash) {
57651+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
57652+ PAGE_SIZE)
57653+ kfree(name_set.n_hash);
57654+ else
57655+ vfree(name_set.n_hash);
57656+ }
57657+
57658+ if (inodev_set.i_hash) {
57659+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
57660+ PAGE_SIZE)
57661+ kfree(inodev_set.i_hash);
57662+ else
57663+ vfree(inodev_set.i_hash);
57664+ }
57665+
57666+ gr_free_uidset();
57667+
57668+ memset(&name_set, 0, sizeof (struct name_db));
57669+ memset(&inodev_set, 0, sizeof (struct inodev_db));
57670+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
57671+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
57672+
57673+ default_role = NULL;
57674+ role_list = NULL;
57675+
57676+ return;
57677+}
57678+
57679+static __u32
57680+count_user_objs(struct acl_object_label *userp)
57681+{
57682+ struct acl_object_label o_tmp;
57683+ __u32 num = 0;
57684+
57685+ while (userp) {
57686+ if (copy_from_user(&o_tmp, userp,
57687+ sizeof (struct acl_object_label)))
57688+ break;
57689+
57690+ userp = o_tmp.prev;
57691+ num++;
57692+ }
57693+
57694+ return num;
57695+}
57696+
57697+static struct acl_subject_label *
57698+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
57699+
57700+static int
57701+copy_user_glob(struct acl_object_label *obj)
57702+{
57703+ struct acl_object_label *g_tmp, **guser;
57704+ unsigned int len;
57705+ char *tmp;
57706+
57707+ if (obj->globbed == NULL)
57708+ return 0;
57709+
57710+ guser = &obj->globbed;
57711+ while (*guser) {
57712+ g_tmp = (struct acl_object_label *)
57713+ acl_alloc(sizeof (struct acl_object_label));
57714+ if (g_tmp == NULL)
57715+ return -ENOMEM;
57716+
57717+ if (copy_from_user(g_tmp, *guser,
57718+ sizeof (struct acl_object_label)))
57719+ return -EFAULT;
57720+
57721+ len = strnlen_user(g_tmp->filename, PATH_MAX);
57722+
57723+ if (!len || len >= PATH_MAX)
57724+ return -EINVAL;
57725+
57726+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57727+ return -ENOMEM;
57728+
57729+ if (copy_from_user(tmp, g_tmp->filename, len))
57730+ return -EFAULT;
57731+ tmp[len-1] = '\0';
57732+ g_tmp->filename = tmp;
57733+
57734+ *guser = g_tmp;
57735+ guser = &(g_tmp->next);
57736+ }
57737+
57738+ return 0;
57739+}
57740+
57741+static int
57742+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
57743+ struct acl_role_label *role)
57744+{
57745+ struct acl_object_label *o_tmp;
57746+ unsigned int len;
57747+ int ret;
57748+ char *tmp;
57749+
57750+ while (userp) {
57751+ if ((o_tmp = (struct acl_object_label *)
57752+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
57753+ return -ENOMEM;
57754+
57755+ if (copy_from_user(o_tmp, userp,
57756+ sizeof (struct acl_object_label)))
57757+ return -EFAULT;
57758+
57759+ userp = o_tmp->prev;
57760+
57761+ len = strnlen_user(o_tmp->filename, PATH_MAX);
57762+
57763+ if (!len || len >= PATH_MAX)
57764+ return -EINVAL;
57765+
57766+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57767+ return -ENOMEM;
57768+
57769+ if (copy_from_user(tmp, o_tmp->filename, len))
57770+ return -EFAULT;
57771+ tmp[len-1] = '\0';
57772+ o_tmp->filename = tmp;
57773+
57774+ insert_acl_obj_label(o_tmp, subj);
57775+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
57776+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
57777+ return -ENOMEM;
57778+
57779+ ret = copy_user_glob(o_tmp);
57780+ if (ret)
57781+ return ret;
57782+
57783+ if (o_tmp->nested) {
57784+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
57785+ if (IS_ERR(o_tmp->nested))
57786+ return PTR_ERR(o_tmp->nested);
57787+
57788+ /* insert into nested subject list */
57789+ o_tmp->nested->next = role->hash->first;
57790+ role->hash->first = o_tmp->nested;
57791+ }
57792+ }
57793+
57794+ return 0;
57795+}
57796+
57797+static __u32
57798+count_user_subjs(struct acl_subject_label *userp)
57799+{
57800+ struct acl_subject_label s_tmp;
57801+ __u32 num = 0;
57802+
57803+ while (userp) {
57804+ if (copy_from_user(&s_tmp, userp,
57805+ sizeof (struct acl_subject_label)))
57806+ break;
57807+
57808+ userp = s_tmp.prev;
57809+ /* do not count nested subjects against this count, since
57810+ they are not included in the hash table, but are
57811+ attached to objects. We have already counted
57812+ the subjects in userspace for the allocation
57813+ stack
57814+ */
57815+ if (!(s_tmp.mode & GR_NESTED))
57816+ num++;
57817+ }
57818+
57819+ return num;
57820+}
57821+
57822+static int
57823+copy_user_allowedips(struct acl_role_label *rolep)
57824+{
57825+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
57826+
57827+ ruserip = rolep->allowed_ips;
57828+
57829+ while (ruserip) {
57830+ rlast = rtmp;
57831+
57832+ if ((rtmp = (struct role_allowed_ip *)
57833+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
57834+ return -ENOMEM;
57835+
57836+ if (copy_from_user(rtmp, ruserip,
57837+ sizeof (struct role_allowed_ip)))
57838+ return -EFAULT;
57839+
57840+ ruserip = rtmp->prev;
57841+
57842+ if (!rlast) {
57843+ rtmp->prev = NULL;
57844+ rolep->allowed_ips = rtmp;
57845+ } else {
57846+ rlast->next = rtmp;
57847+ rtmp->prev = rlast;
57848+ }
57849+
57850+ if (!ruserip)
57851+ rtmp->next = NULL;
57852+ }
57853+
57854+ return 0;
57855+}
57856+
57857+static int
57858+copy_user_transitions(struct acl_role_label *rolep)
57859+{
57860+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
57861+
57862+ unsigned int len;
57863+ char *tmp;
57864+
57865+ rusertp = rolep->transitions;
57866+
57867+ while (rusertp) {
57868+ rlast = rtmp;
57869+
57870+ if ((rtmp = (struct role_transition *)
57871+ acl_alloc(sizeof (struct role_transition))) == NULL)
57872+ return -ENOMEM;
57873+
57874+ if (copy_from_user(rtmp, rusertp,
57875+ sizeof (struct role_transition)))
57876+ return -EFAULT;
57877+
57878+ rusertp = rtmp->prev;
57879+
57880+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
57881+
57882+ if (!len || len >= GR_SPROLE_LEN)
57883+ return -EINVAL;
57884+
57885+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57886+ return -ENOMEM;
57887+
57888+ if (copy_from_user(tmp, rtmp->rolename, len))
57889+ return -EFAULT;
57890+ tmp[len-1] = '\0';
57891+ rtmp->rolename = tmp;
57892+
57893+ if (!rlast) {
57894+ rtmp->prev = NULL;
57895+ rolep->transitions = rtmp;
57896+ } else {
57897+ rlast->next = rtmp;
57898+ rtmp->prev = rlast;
57899+ }
57900+
57901+ if (!rusertp)
57902+ rtmp->next = NULL;
57903+ }
57904+
57905+ return 0;
57906+}
57907+
57908+static struct acl_subject_label *
57909+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
57910+{
57911+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
57912+ unsigned int len;
57913+ char *tmp;
57914+ __u32 num_objs;
57915+ struct acl_ip_label **i_tmp, *i_utmp2;
57916+ struct gr_hash_struct ghash;
57917+ struct subject_map *subjmap;
57918+ unsigned int i_num;
57919+ int err;
57920+
57921+ s_tmp = lookup_subject_map(userp);
57922+
57923+ /* we've already copied this subject into the kernel, just return
57924+ the reference to it, and don't copy it over again
57925+ */
57926+ if (s_tmp)
57927+ return(s_tmp);
57928+
57929+ if ((s_tmp = (struct acl_subject_label *)
57930+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
57931+ return ERR_PTR(-ENOMEM);
57932+
57933+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
57934+ if (subjmap == NULL)
57935+ return ERR_PTR(-ENOMEM);
57936+
57937+ subjmap->user = userp;
57938+ subjmap->kernel = s_tmp;
57939+ insert_subj_map_entry(subjmap);
57940+
57941+ if (copy_from_user(s_tmp, userp,
57942+ sizeof (struct acl_subject_label)))
57943+ return ERR_PTR(-EFAULT);
57944+
57945+ len = strnlen_user(s_tmp->filename, PATH_MAX);
57946+
57947+ if (!len || len >= PATH_MAX)
57948+ return ERR_PTR(-EINVAL);
57949+
57950+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57951+ return ERR_PTR(-ENOMEM);
57952+
57953+ if (copy_from_user(tmp, s_tmp->filename, len))
57954+ return ERR_PTR(-EFAULT);
57955+ tmp[len-1] = '\0';
57956+ s_tmp->filename = tmp;
57957+
57958+ if (!strcmp(s_tmp->filename, "/"))
57959+ role->root_label = s_tmp;
57960+
57961+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
57962+ return ERR_PTR(-EFAULT);
57963+
57964+ /* copy user and group transition tables */
57965+
57966+ if (s_tmp->user_trans_num) {
57967+ uid_t *uidlist;
57968+
57969+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
57970+ if (uidlist == NULL)
57971+ return ERR_PTR(-ENOMEM);
57972+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
57973+ return ERR_PTR(-EFAULT);
57974+
57975+ s_tmp->user_transitions = uidlist;
57976+ }
57977+
57978+ if (s_tmp->group_trans_num) {
57979+ gid_t *gidlist;
57980+
57981+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
57982+ if (gidlist == NULL)
57983+ return ERR_PTR(-ENOMEM);
57984+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
57985+ return ERR_PTR(-EFAULT);
57986+
57987+ s_tmp->group_transitions = gidlist;
57988+ }
57989+
57990+ /* set up object hash table */
57991+ num_objs = count_user_objs(ghash.first);
57992+
57993+ s_tmp->obj_hash_size = num_objs;
57994+ s_tmp->obj_hash =
57995+ (struct acl_object_label **)
57996+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
57997+
57998+ if (!s_tmp->obj_hash)
57999+ return ERR_PTR(-ENOMEM);
58000+
58001+ memset(s_tmp->obj_hash, 0,
58002+ s_tmp->obj_hash_size *
58003+ sizeof (struct acl_object_label *));
58004+
58005+ /* add in objects */
58006+ err = copy_user_objs(ghash.first, s_tmp, role);
58007+
58008+ if (err)
58009+ return ERR_PTR(err);
58010+
58011+ /* set pointer for parent subject */
58012+ if (s_tmp->parent_subject) {
58013+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
58014+
58015+ if (IS_ERR(s_tmp2))
58016+ return s_tmp2;
58017+
58018+ s_tmp->parent_subject = s_tmp2;
58019+ }
58020+
58021+ /* add in ip acls */
58022+
58023+ if (!s_tmp->ip_num) {
58024+ s_tmp->ips = NULL;
58025+ goto insert;
58026+ }
58027+
58028+ i_tmp =
58029+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
58030+ sizeof (struct acl_ip_label *));
58031+
58032+ if (!i_tmp)
58033+ return ERR_PTR(-ENOMEM);
58034+
58035+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
58036+ *(i_tmp + i_num) =
58037+ (struct acl_ip_label *)
58038+ acl_alloc(sizeof (struct acl_ip_label));
58039+ if (!*(i_tmp + i_num))
58040+ return ERR_PTR(-ENOMEM);
58041+
58042+ if (copy_from_user
58043+ (&i_utmp2, s_tmp->ips + i_num,
58044+ sizeof (struct acl_ip_label *)))
58045+ return ERR_PTR(-EFAULT);
58046+
58047+ if (copy_from_user
58048+ (*(i_tmp + i_num), i_utmp2,
58049+ sizeof (struct acl_ip_label)))
58050+ return ERR_PTR(-EFAULT);
58051+
58052+ if ((*(i_tmp + i_num))->iface == NULL)
58053+ continue;
58054+
58055+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
58056+ if (!len || len >= IFNAMSIZ)
58057+ return ERR_PTR(-EINVAL);
58058+ tmp = acl_alloc(len);
58059+ if (tmp == NULL)
58060+ return ERR_PTR(-ENOMEM);
58061+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
58062+ return ERR_PTR(-EFAULT);
58063+ (*(i_tmp + i_num))->iface = tmp;
58064+ }
58065+
58066+ s_tmp->ips = i_tmp;
58067+
58068+insert:
58069+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
58070+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
58071+ return ERR_PTR(-ENOMEM);
58072+
58073+ return s_tmp;
58074+}
58075+
58076+static int
58077+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
58078+{
58079+ struct acl_subject_label s_pre;
58080+ struct acl_subject_label * ret;
58081+ int err;
58082+
58083+ while (userp) {
58084+ if (copy_from_user(&s_pre, userp,
58085+ sizeof (struct acl_subject_label)))
58086+ return -EFAULT;
58087+
58088+ /* do not add nested subjects here, add
58089+ while parsing objects
58090+ */
58091+
58092+ if (s_pre.mode & GR_NESTED) {
58093+ userp = s_pre.prev;
58094+ continue;
58095+ }
58096+
58097+ ret = do_copy_user_subj(userp, role);
58098+
58099+ err = PTR_ERR(ret);
58100+ if (IS_ERR(ret))
58101+ return err;
58102+
58103+ insert_acl_subj_label(ret, role);
58104+
58105+ userp = s_pre.prev;
58106+ }
58107+
58108+ return 0;
58109+}
58110+
58111+static int
58112+copy_user_acl(struct gr_arg *arg)
58113+{
58114+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
58115+ struct sprole_pw *sptmp;
58116+ struct gr_hash_struct *ghash;
58117+ uid_t *domainlist;
58118+ unsigned int r_num;
58119+ unsigned int len;
58120+ char *tmp;
58121+ int err = 0;
58122+ __u16 i;
58123+ __u32 num_subjs;
58124+
58125+ /* we need a default and kernel role */
58126+ if (arg->role_db.num_roles < 2)
58127+ return -EINVAL;
58128+
58129+ /* copy special role authentication info from userspace */
58130+
58131+ num_sprole_pws = arg->num_sprole_pws;
58132+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
58133+
58134+ if (!acl_special_roles) {
58135+ err = -ENOMEM;
58136+ goto cleanup;
58137+ }
58138+
58139+ for (i = 0; i < num_sprole_pws; i++) {
58140+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
58141+ if (!sptmp) {
58142+ err = -ENOMEM;
58143+ goto cleanup;
58144+ }
58145+ if (copy_from_user(sptmp, arg->sprole_pws + i,
58146+ sizeof (struct sprole_pw))) {
58147+ err = -EFAULT;
58148+ goto cleanup;
58149+ }
58150+
58151+ len =
58152+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
58153+
58154+ if (!len || len >= GR_SPROLE_LEN) {
58155+ err = -EINVAL;
58156+ goto cleanup;
58157+ }
58158+
58159+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
58160+ err = -ENOMEM;
58161+ goto cleanup;
58162+ }
58163+
58164+ if (copy_from_user(tmp, sptmp->rolename, len)) {
58165+ err = -EFAULT;
58166+ goto cleanup;
58167+ }
58168+ tmp[len-1] = '\0';
58169+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58170+ printk(KERN_ALERT "Copying special role %s\n", tmp);
58171+#endif
58172+ sptmp->rolename = tmp;
58173+ acl_special_roles[i] = sptmp;
58174+ }
58175+
58176+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
58177+
58178+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
58179+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
58180+
58181+ if (!r_tmp) {
58182+ err = -ENOMEM;
58183+ goto cleanup;
58184+ }
58185+
58186+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
58187+ sizeof (struct acl_role_label *))) {
58188+ err = -EFAULT;
58189+ goto cleanup;
58190+ }
58191+
58192+ if (copy_from_user(r_tmp, r_utmp2,
58193+ sizeof (struct acl_role_label))) {
58194+ err = -EFAULT;
58195+ goto cleanup;
58196+ }
58197+
58198+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
58199+
58200+ if (!len || len >= PATH_MAX) {
58201+ err = -EINVAL;
58202+ goto cleanup;
58203+ }
58204+
58205+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
58206+ err = -ENOMEM;
58207+ goto cleanup;
58208+ }
58209+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
58210+ err = -EFAULT;
58211+ goto cleanup;
58212+ }
58213+ tmp[len-1] = '\0';
58214+ r_tmp->rolename = tmp;
58215+
58216+ if (!strcmp(r_tmp->rolename, "default")
58217+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
58218+ default_role = r_tmp;
58219+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
58220+ kernel_role = r_tmp;
58221+ }
58222+
58223+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
58224+ err = -ENOMEM;
58225+ goto cleanup;
58226+ }
58227+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
58228+ err = -EFAULT;
58229+ goto cleanup;
58230+ }
58231+
58232+ r_tmp->hash = ghash;
58233+
58234+ num_subjs = count_user_subjs(r_tmp->hash->first);
58235+
58236+ r_tmp->subj_hash_size = num_subjs;
58237+ r_tmp->subj_hash =
58238+ (struct acl_subject_label **)
58239+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
58240+
58241+ if (!r_tmp->subj_hash) {
58242+ err = -ENOMEM;
58243+ goto cleanup;
58244+ }
58245+
58246+ err = copy_user_allowedips(r_tmp);
58247+ if (err)
58248+ goto cleanup;
58249+
58250+ /* copy domain info */
58251+ if (r_tmp->domain_children != NULL) {
58252+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
58253+ if (domainlist == NULL) {
58254+ err = -ENOMEM;
58255+ goto cleanup;
58256+ }
58257+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
58258+ err = -EFAULT;
58259+ goto cleanup;
58260+ }
58261+ r_tmp->domain_children = domainlist;
58262+ }
58263+
58264+ err = copy_user_transitions(r_tmp);
58265+ if (err)
58266+ goto cleanup;
58267+
58268+ memset(r_tmp->subj_hash, 0,
58269+ r_tmp->subj_hash_size *
58270+ sizeof (struct acl_subject_label *));
58271+
58272+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
58273+
58274+ if (err)
58275+ goto cleanup;
58276+
58277+ /* set nested subject list to null */
58278+ r_tmp->hash->first = NULL;
58279+
58280+ insert_acl_role_label(r_tmp);
58281+ }
58282+
58283+ goto return_err;
58284+ cleanup:
58285+ free_variables();
58286+ return_err:
58287+ return err;
58288+
58289+}
58290+
58291+static int
58292+gracl_init(struct gr_arg *args)
58293+{
58294+ int error = 0;
58295+
58296+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
58297+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
58298+
58299+ if (init_variables(args)) {
58300+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
58301+ error = -ENOMEM;
58302+ free_variables();
58303+ goto out;
58304+ }
58305+
58306+ error = copy_user_acl(args);
58307+ free_init_variables();
58308+ if (error) {
58309+ free_variables();
58310+ goto out;
58311+ }
58312+
58313+ if ((error = gr_set_acls(0))) {
58314+ free_variables();
58315+ goto out;
58316+ }
58317+
58318+ pax_open_kernel();
58319+ gr_status |= GR_READY;
58320+ pax_close_kernel();
58321+
58322+ out:
58323+ return error;
58324+}
58325+
58326+/* derived from glibc fnmatch() 0: match, 1: no match*/
58327+
58328+static int
58329+glob_match(const char *p, const char *n)
58330+{
58331+ char c;
58332+
58333+ while ((c = *p++) != '\0') {
58334+ switch (c) {
58335+ case '?':
58336+ if (*n == '\0')
58337+ return 1;
58338+ else if (*n == '/')
58339+ return 1;
58340+ break;
58341+ case '\\':
58342+ if (*n != c)
58343+ return 1;
58344+ break;
58345+ case '*':
58346+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
58347+ if (*n == '/')
58348+ return 1;
58349+ else if (c == '?') {
58350+ if (*n == '\0')
58351+ return 1;
58352+ else
58353+ ++n;
58354+ }
58355+ }
58356+ if (c == '\0') {
58357+ return 0;
58358+ } else {
58359+ const char *endp;
58360+
58361+ if ((endp = strchr(n, '/')) == NULL)
58362+ endp = n + strlen(n);
58363+
58364+ if (c == '[') {
58365+ for (--p; n < endp; ++n)
58366+ if (!glob_match(p, n))
58367+ return 0;
58368+ } else if (c == '/') {
58369+ while (*n != '\0' && *n != '/')
58370+ ++n;
58371+ if (*n == '/' && !glob_match(p, n + 1))
58372+ return 0;
58373+ } else {
58374+ for (--p; n < endp; ++n)
58375+ if (*n == c && !glob_match(p, n))
58376+ return 0;
58377+ }
58378+
58379+ return 1;
58380+ }
58381+ case '[':
58382+ {
58383+ int not;
58384+ char cold;
58385+
58386+ if (*n == '\0' || *n == '/')
58387+ return 1;
58388+
58389+ not = (*p == '!' || *p == '^');
58390+ if (not)
58391+ ++p;
58392+
58393+ c = *p++;
58394+ for (;;) {
58395+ unsigned char fn = (unsigned char)*n;
58396+
58397+ if (c == '\0')
58398+ return 1;
58399+ else {
58400+ if (c == fn)
58401+ goto matched;
58402+ cold = c;
58403+ c = *p++;
58404+
58405+ if (c == '-' && *p != ']') {
58406+ unsigned char cend = *p++;
58407+
58408+ if (cend == '\0')
58409+ return 1;
58410+
58411+ if (cold <= fn && fn <= cend)
58412+ goto matched;
58413+
58414+ c = *p++;
58415+ }
58416+ }
58417+
58418+ if (c == ']')
58419+ break;
58420+ }
58421+ if (!not)
58422+ return 1;
58423+ break;
58424+ matched:
58425+ while (c != ']') {
58426+ if (c == '\0')
58427+ return 1;
58428+
58429+ c = *p++;
58430+ }
58431+ if (not)
58432+ return 1;
58433+ }
58434+ break;
58435+ default:
58436+ if (c != *n)
58437+ return 1;
58438+ }
58439+
58440+ ++n;
58441+ }
58442+
58443+ if (*n == '\0')
58444+ return 0;
58445+
58446+ if (*n == '/')
58447+ return 0;
58448+
58449+ return 1;
58450+}
58451+
58452+static struct acl_object_label *
58453+chk_glob_label(struct acl_object_label *globbed,
58454+ struct dentry *dentry, struct vfsmount *mnt, char **path)
58455+{
58456+ struct acl_object_label *tmp;
58457+
58458+ if (*path == NULL)
58459+ *path = gr_to_filename_nolock(dentry, mnt);
58460+
58461+ tmp = globbed;
58462+
58463+ while (tmp) {
58464+ if (!glob_match(tmp->filename, *path))
58465+ return tmp;
58466+ tmp = tmp->next;
58467+ }
58468+
58469+ return NULL;
58470+}
58471+
58472+static struct acl_object_label *
58473+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58474+ const ino_t curr_ino, const dev_t curr_dev,
58475+ const struct acl_subject_label *subj, char **path, const int checkglob)
58476+{
58477+ struct acl_subject_label *tmpsubj;
58478+ struct acl_object_label *retval;
58479+ struct acl_object_label *retval2;
58480+
58481+ tmpsubj = (struct acl_subject_label *) subj;
58482+ read_lock(&gr_inode_lock);
58483+ do {
58484+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
58485+ if (retval) {
58486+ if (checkglob && retval->globbed) {
58487+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
58488+ (struct vfsmount *)orig_mnt, path);
58489+ if (retval2)
58490+ retval = retval2;
58491+ }
58492+ break;
58493+ }
58494+ } while ((tmpsubj = tmpsubj->parent_subject));
58495+ read_unlock(&gr_inode_lock);
58496+
58497+ return retval;
58498+}
58499+
58500+static __inline__ struct acl_object_label *
58501+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58502+ const struct dentry *curr_dentry,
58503+ const struct acl_subject_label *subj, char **path, const int checkglob)
58504+{
58505+ int newglob = checkglob;
58506+
58507+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
58508+ as we don't want a / * rule to match instead of the / object
58509+ don't do this for create lookups that call this function though, since they're looking up
58510+ on the parent and thus need globbing checks on all paths
58511+ */
58512+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
58513+ newglob = GR_NO_GLOB;
58514+
58515+ return __full_lookup(orig_dentry, orig_mnt,
58516+ curr_dentry->d_inode->i_ino,
58517+ __get_dev(curr_dentry), subj, path, newglob);
58518+}
58519+
58520+static struct acl_object_label *
58521+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58522+ const struct acl_subject_label *subj, char *path, const int checkglob)
58523+{
58524+ struct dentry *dentry = (struct dentry *) l_dentry;
58525+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58526+ struct acl_object_label *retval;
58527+
58528+ spin_lock(&dcache_lock);
58529+ spin_lock(&vfsmount_lock);
58530+
58531+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
58532+#ifdef CONFIG_NET
58533+ mnt == sock_mnt ||
58534+#endif
58535+#ifdef CONFIG_HUGETLBFS
58536+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
58537+#endif
58538+ /* ignore Eric Biederman */
58539+ IS_PRIVATE(l_dentry->d_inode))) {
58540+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
58541+ goto out;
58542+ }
58543+
58544+ for (;;) {
58545+ if (dentry == real_root && mnt == real_root_mnt)
58546+ break;
58547+
58548+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58549+ if (mnt->mnt_parent == mnt)
58550+ break;
58551+
58552+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58553+ if (retval != NULL)
58554+ goto out;
58555+
58556+ dentry = mnt->mnt_mountpoint;
58557+ mnt = mnt->mnt_parent;
58558+ continue;
58559+ }
58560+
58561+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58562+ if (retval != NULL)
58563+ goto out;
58564+
58565+ dentry = dentry->d_parent;
58566+ }
58567+
58568+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58569+
58570+ if (retval == NULL)
58571+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
58572+out:
58573+ spin_unlock(&vfsmount_lock);
58574+ spin_unlock(&dcache_lock);
58575+
58576+ BUG_ON(retval == NULL);
58577+
58578+ return retval;
58579+}
58580+
58581+static __inline__ struct acl_object_label *
58582+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58583+ const struct acl_subject_label *subj)
58584+{
58585+ char *path = NULL;
58586+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
58587+}
58588+
58589+static __inline__ struct acl_object_label *
58590+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58591+ const struct acl_subject_label *subj)
58592+{
58593+ char *path = NULL;
58594+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
58595+}
58596+
58597+static __inline__ struct acl_object_label *
58598+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58599+ const struct acl_subject_label *subj, char *path)
58600+{
58601+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
58602+}
58603+
58604+static struct acl_subject_label *
58605+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58606+ const struct acl_role_label *role)
58607+{
58608+ struct dentry *dentry = (struct dentry *) l_dentry;
58609+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58610+ struct acl_subject_label *retval;
58611+
58612+ spin_lock(&dcache_lock);
58613+ spin_lock(&vfsmount_lock);
58614+
58615+ for (;;) {
58616+ if (dentry == real_root && mnt == real_root_mnt)
58617+ break;
58618+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58619+ if (mnt->mnt_parent == mnt)
58620+ break;
58621+
58622+ read_lock(&gr_inode_lock);
58623+ retval =
58624+ lookup_acl_subj_label(dentry->d_inode->i_ino,
58625+ __get_dev(dentry), role);
58626+ read_unlock(&gr_inode_lock);
58627+ if (retval != NULL)
58628+ goto out;
58629+
58630+ dentry = mnt->mnt_mountpoint;
58631+ mnt = mnt->mnt_parent;
58632+ continue;
58633+ }
58634+
58635+ read_lock(&gr_inode_lock);
58636+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58637+ __get_dev(dentry), role);
58638+ read_unlock(&gr_inode_lock);
58639+ if (retval != NULL)
58640+ goto out;
58641+
58642+ dentry = dentry->d_parent;
58643+ }
58644+
58645+ read_lock(&gr_inode_lock);
58646+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58647+ __get_dev(dentry), role);
58648+ read_unlock(&gr_inode_lock);
58649+
58650+ if (unlikely(retval == NULL)) {
58651+ read_lock(&gr_inode_lock);
58652+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
58653+ __get_dev(real_root), role);
58654+ read_unlock(&gr_inode_lock);
58655+ }
58656+out:
58657+ spin_unlock(&vfsmount_lock);
58658+ spin_unlock(&dcache_lock);
58659+
58660+ BUG_ON(retval == NULL);
58661+
58662+ return retval;
58663+}
58664+
58665+static void
58666+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
58667+{
58668+ struct task_struct *task = current;
58669+ const struct cred *cred = current_cred();
58670+
58671+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58672+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58673+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58674+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
58675+
58676+ return;
58677+}
58678+
58679+static void
58680+gr_log_learn_sysctl(const char *path, const __u32 mode)
58681+{
58682+ struct task_struct *task = current;
58683+ const struct cred *cred = current_cred();
58684+
58685+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58686+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58687+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58688+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
58689+
58690+ return;
58691+}
58692+
58693+static void
58694+gr_log_learn_id_change(const char type, const unsigned int real,
58695+ const unsigned int effective, const unsigned int fs)
58696+{
58697+ struct task_struct *task = current;
58698+ const struct cred *cred = current_cred();
58699+
58700+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
58701+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58702+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58703+ type, real, effective, fs, &task->signal->saved_ip);
58704+
58705+ return;
58706+}
58707+
58708+__u32
58709+gr_search_file(const struct dentry * dentry, const __u32 mode,
58710+ const struct vfsmount * mnt)
58711+{
58712+ __u32 retval = mode;
58713+ struct acl_subject_label *curracl;
58714+ struct acl_object_label *currobj;
58715+
58716+ if (unlikely(!(gr_status & GR_READY)))
58717+ return (mode & ~GR_AUDITS);
58718+
58719+ curracl = current->acl;
58720+
58721+ currobj = chk_obj_label(dentry, mnt, curracl);
58722+ retval = currobj->mode & mode;
58723+
58724+ /* if we're opening a specified transfer file for writing
58725+ (e.g. /dev/initctl), then transfer our role to init
58726+ */
58727+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
58728+ current->role->roletype & GR_ROLE_PERSIST)) {
58729+ struct task_struct *task = init_pid_ns.child_reaper;
58730+
58731+ if (task->role != current->role) {
58732+ task->acl_sp_role = 0;
58733+ task->acl_role_id = current->acl_role_id;
58734+ task->role = current->role;
58735+ rcu_read_lock();
58736+ read_lock(&grsec_exec_file_lock);
58737+ gr_apply_subject_to_task(task);
58738+ read_unlock(&grsec_exec_file_lock);
58739+ rcu_read_unlock();
58740+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
58741+ }
58742+ }
58743+
58744+ if (unlikely
58745+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
58746+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
58747+ __u32 new_mode = mode;
58748+
58749+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58750+
58751+ retval = new_mode;
58752+
58753+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
58754+ new_mode |= GR_INHERIT;
58755+
58756+ if (!(mode & GR_NOLEARN))
58757+ gr_log_learn(dentry, mnt, new_mode);
58758+ }
58759+
58760+ return retval;
58761+}
58762+
58763+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
58764+ const struct dentry *parent,
58765+ const struct vfsmount *mnt)
58766+{
58767+ struct name_entry *match;
58768+ struct acl_object_label *matchpo;
58769+ struct acl_subject_label *curracl;
58770+ char *path;
58771+
58772+ if (unlikely(!(gr_status & GR_READY)))
58773+ return NULL;
58774+
58775+ preempt_disable();
58776+ path = gr_to_filename_rbac(new_dentry, mnt);
58777+ match = lookup_name_entry_create(path);
58778+
58779+ curracl = current->acl;
58780+
58781+ if (match) {
58782+ read_lock(&gr_inode_lock);
58783+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
58784+ read_unlock(&gr_inode_lock);
58785+
58786+ if (matchpo) {
58787+ preempt_enable();
58788+ return matchpo;
58789+ }
58790+ }
58791+
58792+ // lookup parent
58793+
58794+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
58795+
58796+ preempt_enable();
58797+ return matchpo;
58798+}
58799+
58800+__u32
58801+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
58802+ const struct vfsmount * mnt, const __u32 mode)
58803+{
58804+ struct acl_object_label *matchpo;
58805+ __u32 retval;
58806+
58807+ if (unlikely(!(gr_status & GR_READY)))
58808+ return (mode & ~GR_AUDITS);
58809+
58810+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
58811+
58812+ retval = matchpo->mode & mode;
58813+
58814+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
58815+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58816+ __u32 new_mode = mode;
58817+
58818+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58819+
58820+ gr_log_learn(new_dentry, mnt, new_mode);
58821+ return new_mode;
58822+ }
58823+
58824+ return retval;
58825+}
58826+
58827+__u32
58828+gr_check_link(const struct dentry * new_dentry,
58829+ const struct dentry * parent_dentry,
58830+ const struct vfsmount * parent_mnt,
58831+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
58832+{
58833+ struct acl_object_label *obj;
58834+ __u32 oldmode, newmode;
58835+ __u32 needmode;
58836+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
58837+ GR_DELETE | GR_INHERIT;
58838+
58839+ if (unlikely(!(gr_status & GR_READY)))
58840+ return (GR_CREATE | GR_LINK);
58841+
58842+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
58843+ oldmode = obj->mode;
58844+
58845+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
58846+ newmode = obj->mode;
58847+
58848+ needmode = newmode & checkmodes;
58849+
58850+ // old name for hardlink must have at least the permissions of the new name
58851+ if ((oldmode & needmode) != needmode)
58852+ goto bad;
58853+
58854+ // if old name had restrictions/auditing, make sure the new name does as well
58855+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
58856+
58857+ // don't allow hardlinking of suid/sgid files without permission
58858+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58859+ needmode |= GR_SETID;
58860+
58861+ if ((newmode & needmode) != needmode)
58862+ goto bad;
58863+
58864+ // enforce minimum permissions
58865+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
58866+ return newmode;
58867+bad:
58868+ needmode = oldmode;
58869+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58870+ needmode |= GR_SETID;
58871+
58872+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
58873+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
58874+ return (GR_CREATE | GR_LINK);
58875+ } else if (newmode & GR_SUPPRESS)
58876+ return GR_SUPPRESS;
58877+ else
58878+ return 0;
58879+}
58880+
58881+int
58882+gr_check_hidden_task(const struct task_struct *task)
58883+{
58884+ if (unlikely(!(gr_status & GR_READY)))
58885+ return 0;
58886+
58887+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
58888+ return 1;
58889+
58890+ return 0;
58891+}
58892+
58893+int
58894+gr_check_protected_task(const struct task_struct *task)
58895+{
58896+ if (unlikely(!(gr_status & GR_READY) || !task))
58897+ return 0;
58898+
58899+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58900+ task->acl != current->acl)
58901+ return 1;
58902+
58903+ return 0;
58904+}
58905+
58906+int
58907+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
58908+{
58909+ struct task_struct *p;
58910+ int ret = 0;
58911+
58912+ if (unlikely(!(gr_status & GR_READY) || !pid))
58913+ return ret;
58914+
58915+ read_lock(&tasklist_lock);
58916+ do_each_pid_task(pid, type, p) {
58917+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58918+ p->acl != current->acl) {
58919+ ret = 1;
58920+ goto out;
58921+ }
58922+ } while_each_pid_task(pid, type, p);
58923+out:
58924+ read_unlock(&tasklist_lock);
58925+
58926+ return ret;
58927+}
58928+
58929+void
58930+gr_copy_label(struct task_struct *tsk)
58931+{
58932+ /* plain copying of fields is already done by dup_task_struct */
58933+ tsk->signal->used_accept = 0;
58934+ tsk->acl_sp_role = 0;
58935+ //tsk->acl_role_id = current->acl_role_id;
58936+ //tsk->acl = current->acl;
58937+ //tsk->role = current->role;
58938+ tsk->signal->curr_ip = current->signal->curr_ip;
58939+ tsk->signal->saved_ip = current->signal->saved_ip;
58940+ if (current->exec_file)
58941+ get_file(current->exec_file);
58942+ //tsk->exec_file = current->exec_file;
58943+ //tsk->is_writable = current->is_writable;
58944+ if (unlikely(current->signal->used_accept)) {
58945+ current->signal->curr_ip = 0;
58946+ current->signal->saved_ip = 0;
58947+ }
58948+
58949+ return;
58950+}
58951+
58952+static void
58953+gr_set_proc_res(struct task_struct *task)
58954+{
58955+ struct acl_subject_label *proc;
58956+ unsigned short i;
58957+
58958+ proc = task->acl;
58959+
58960+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
58961+ return;
58962+
58963+ for (i = 0; i < RLIM_NLIMITS; i++) {
58964+ if (!(proc->resmask & (1 << i)))
58965+ continue;
58966+
58967+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
58968+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
58969+ }
58970+
58971+ return;
58972+}
58973+
58974+extern int __gr_process_user_ban(struct user_struct *user);
58975+
58976+int
58977+gr_check_user_change(int real, int effective, int fs)
58978+{
58979+ unsigned int i;
58980+ __u16 num;
58981+ uid_t *uidlist;
58982+ int curuid;
58983+ int realok = 0;
58984+ int effectiveok = 0;
58985+ int fsok = 0;
58986+
58987+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58988+ struct user_struct *user;
58989+
58990+ if (real == -1)
58991+ goto skipit;
58992+
58993+ user = find_user(real);
58994+ if (user == NULL)
58995+ goto skipit;
58996+
58997+ if (__gr_process_user_ban(user)) {
58998+ /* for find_user */
58999+ free_uid(user);
59000+ return 1;
59001+ }
59002+
59003+ /* for find_user */
59004+ free_uid(user);
59005+
59006+skipit:
59007+#endif
59008+
59009+ if (unlikely(!(gr_status & GR_READY)))
59010+ return 0;
59011+
59012+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59013+ gr_log_learn_id_change('u', real, effective, fs);
59014+
59015+ num = current->acl->user_trans_num;
59016+ uidlist = current->acl->user_transitions;
59017+
59018+ if (uidlist == NULL)
59019+ return 0;
59020+
59021+ if (real == -1)
59022+ realok = 1;
59023+ if (effective == -1)
59024+ effectiveok = 1;
59025+ if (fs == -1)
59026+ fsok = 1;
59027+
59028+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
59029+ for (i = 0; i < num; i++) {
59030+ curuid = (int)uidlist[i];
59031+ if (real == curuid)
59032+ realok = 1;
59033+ if (effective == curuid)
59034+ effectiveok = 1;
59035+ if (fs == curuid)
59036+ fsok = 1;
59037+ }
59038+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
59039+ for (i = 0; i < num; i++) {
59040+ curuid = (int)uidlist[i];
59041+ if (real == curuid)
59042+ break;
59043+ if (effective == curuid)
59044+ break;
59045+ if (fs == curuid)
59046+ break;
59047+ }
59048+ /* not in deny list */
59049+ if (i == num) {
59050+ realok = 1;
59051+ effectiveok = 1;
59052+ fsok = 1;
59053+ }
59054+ }
59055+
59056+ if (realok && effectiveok && fsok)
59057+ return 0;
59058+ else {
59059+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
59060+ return 1;
59061+ }
59062+}
59063+
59064+int
59065+gr_check_group_change(int real, int effective, int fs)
59066+{
59067+ unsigned int i;
59068+ __u16 num;
59069+ gid_t *gidlist;
59070+ int curgid;
59071+ int realok = 0;
59072+ int effectiveok = 0;
59073+ int fsok = 0;
59074+
59075+ if (unlikely(!(gr_status & GR_READY)))
59076+ return 0;
59077+
59078+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59079+ gr_log_learn_id_change('g', real, effective, fs);
59080+
59081+ num = current->acl->group_trans_num;
59082+ gidlist = current->acl->group_transitions;
59083+
59084+ if (gidlist == NULL)
59085+ return 0;
59086+
59087+ if (real == -1)
59088+ realok = 1;
59089+ if (effective == -1)
59090+ effectiveok = 1;
59091+ if (fs == -1)
59092+ fsok = 1;
59093+
59094+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
59095+ for (i = 0; i < num; i++) {
59096+ curgid = (int)gidlist[i];
59097+ if (real == curgid)
59098+ realok = 1;
59099+ if (effective == curgid)
59100+ effectiveok = 1;
59101+ if (fs == curgid)
59102+ fsok = 1;
59103+ }
59104+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
59105+ for (i = 0; i < num; i++) {
59106+ curgid = (int)gidlist[i];
59107+ if (real == curgid)
59108+ break;
59109+ if (effective == curgid)
59110+ break;
59111+ if (fs == curgid)
59112+ break;
59113+ }
59114+ /* not in deny list */
59115+ if (i == num) {
59116+ realok = 1;
59117+ effectiveok = 1;
59118+ fsok = 1;
59119+ }
59120+ }
59121+
59122+ if (realok && effectiveok && fsok)
59123+ return 0;
59124+ else {
59125+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
59126+ return 1;
59127+ }
59128+}
59129+
59130+extern int gr_acl_is_capable(const int cap);
59131+
59132+void
59133+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
59134+{
59135+ struct acl_role_label *role = task->role;
59136+ struct acl_subject_label *subj = NULL;
59137+ struct acl_object_label *obj;
59138+ struct file *filp;
59139+
59140+ if (unlikely(!(gr_status & GR_READY)))
59141+ return;
59142+
59143+ filp = task->exec_file;
59144+
59145+ /* kernel process, we'll give them the kernel role */
59146+ if (unlikely(!filp)) {
59147+ task->role = kernel_role;
59148+ task->acl = kernel_role->root_label;
59149+ return;
59150+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
59151+ role = lookup_acl_role_label(task, uid, gid);
59152+
59153+ /* don't change the role if we're not a privileged process */
59154+ if (role && task->role != role &&
59155+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
59156+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
59157+ return;
59158+
59159+ /* perform subject lookup in possibly new role
59160+ we can use this result below in the case where role == task->role
59161+ */
59162+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
59163+
59164+ /* if we changed uid/gid, but result in the same role
59165+ and are using inheritance, don't lose the inherited subject
59166+ if current subject is other than what normal lookup
59167+ would result in, we arrived via inheritance, don't
59168+ lose subject
59169+ */
59170+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
59171+ (subj == task->acl)))
59172+ task->acl = subj;
59173+
59174+ task->role = role;
59175+
59176+ task->is_writable = 0;
59177+
59178+ /* ignore additional mmap checks for processes that are writable
59179+ by the default ACL */
59180+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59181+ if (unlikely(obj->mode & GR_WRITE))
59182+ task->is_writable = 1;
59183+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59184+ if (unlikely(obj->mode & GR_WRITE))
59185+ task->is_writable = 1;
59186+
59187+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59188+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59189+#endif
59190+
59191+ gr_set_proc_res(task);
59192+
59193+ return;
59194+}
59195+
59196+int
59197+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
59198+ const int unsafe_flags)
59199+{
59200+ struct task_struct *task = current;
59201+ struct acl_subject_label *newacl;
59202+ struct acl_object_label *obj;
59203+ __u32 retmode;
59204+
59205+ if (unlikely(!(gr_status & GR_READY)))
59206+ return 0;
59207+
59208+ newacl = chk_subj_label(dentry, mnt, task->role);
59209+
59210+ task_lock(task);
59211+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
59212+ !(task->role->roletype & GR_ROLE_GOD) &&
59213+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
59214+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
59215+ task_unlock(task);
59216+ if (unsafe_flags & LSM_UNSAFE_SHARE)
59217+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
59218+ else
59219+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
59220+ return -EACCES;
59221+ }
59222+ task_unlock(task);
59223+
59224+ obj = chk_obj_label(dentry, mnt, task->acl);
59225+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
59226+
59227+ if (!(task->acl->mode & GR_INHERITLEARN) &&
59228+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
59229+ if (obj->nested)
59230+ task->acl = obj->nested;
59231+ else
59232+ task->acl = newacl;
59233+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
59234+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
59235+
59236+ task->is_writable = 0;
59237+
59238+ /* ignore additional mmap checks for processes that are writable
59239+ by the default ACL */
59240+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
59241+ if (unlikely(obj->mode & GR_WRITE))
59242+ task->is_writable = 1;
59243+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
59244+ if (unlikely(obj->mode & GR_WRITE))
59245+ task->is_writable = 1;
59246+
59247+ gr_set_proc_res(task);
59248+
59249+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59250+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59251+#endif
59252+ return 0;
59253+}
59254+
59255+/* always called with valid inodev ptr */
59256+static void
59257+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
59258+{
59259+ struct acl_object_label *matchpo;
59260+ struct acl_subject_label *matchps;
59261+ struct acl_subject_label *subj;
59262+ struct acl_role_label *role;
59263+ unsigned int x;
59264+
59265+ FOR_EACH_ROLE_START(role)
59266+ FOR_EACH_SUBJECT_START(role, subj, x)
59267+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
59268+ matchpo->mode |= GR_DELETED;
59269+ FOR_EACH_SUBJECT_END(subj,x)
59270+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
59271+ if (subj->inode == ino && subj->device == dev)
59272+ subj->mode |= GR_DELETED;
59273+ FOR_EACH_NESTED_SUBJECT_END(subj)
59274+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
59275+ matchps->mode |= GR_DELETED;
59276+ FOR_EACH_ROLE_END(role)
59277+
59278+ inodev->nentry->deleted = 1;
59279+
59280+ return;
59281+}
59282+
59283+void
59284+gr_handle_delete(const ino_t ino, const dev_t dev)
59285+{
59286+ struct inodev_entry *inodev;
59287+
59288+ if (unlikely(!(gr_status & GR_READY)))
59289+ return;
59290+
59291+ write_lock(&gr_inode_lock);
59292+ inodev = lookup_inodev_entry(ino, dev);
59293+ if (inodev != NULL)
59294+ do_handle_delete(inodev, ino, dev);
59295+ write_unlock(&gr_inode_lock);
59296+
59297+ return;
59298+}
59299+
59300+static void
59301+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
59302+ const ino_t newinode, const dev_t newdevice,
59303+ struct acl_subject_label *subj)
59304+{
59305+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
59306+ struct acl_object_label *match;
59307+
59308+ match = subj->obj_hash[index];
59309+
59310+ while (match && (match->inode != oldinode ||
59311+ match->device != olddevice ||
59312+ !(match->mode & GR_DELETED)))
59313+ match = match->next;
59314+
59315+ if (match && (match->inode == oldinode)
59316+ && (match->device == olddevice)
59317+ && (match->mode & GR_DELETED)) {
59318+ if (match->prev == NULL) {
59319+ subj->obj_hash[index] = match->next;
59320+ if (match->next != NULL)
59321+ match->next->prev = NULL;
59322+ } else {
59323+ match->prev->next = match->next;
59324+ if (match->next != NULL)
59325+ match->next->prev = match->prev;
59326+ }
59327+ match->prev = NULL;
59328+ match->next = NULL;
59329+ match->inode = newinode;
59330+ match->device = newdevice;
59331+ match->mode &= ~GR_DELETED;
59332+
59333+ insert_acl_obj_label(match, subj);
59334+ }
59335+
59336+ return;
59337+}
59338+
59339+static void
59340+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
59341+ const ino_t newinode, const dev_t newdevice,
59342+ struct acl_role_label *role)
59343+{
59344+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
59345+ struct acl_subject_label *match;
59346+
59347+ match = role->subj_hash[index];
59348+
59349+ while (match && (match->inode != oldinode ||
59350+ match->device != olddevice ||
59351+ !(match->mode & GR_DELETED)))
59352+ match = match->next;
59353+
59354+ if (match && (match->inode == oldinode)
59355+ && (match->device == olddevice)
59356+ && (match->mode & GR_DELETED)) {
59357+ if (match->prev == NULL) {
59358+ role->subj_hash[index] = match->next;
59359+ if (match->next != NULL)
59360+ match->next->prev = NULL;
59361+ } else {
59362+ match->prev->next = match->next;
59363+ if (match->next != NULL)
59364+ match->next->prev = match->prev;
59365+ }
59366+ match->prev = NULL;
59367+ match->next = NULL;
59368+ match->inode = newinode;
59369+ match->device = newdevice;
59370+ match->mode &= ~GR_DELETED;
59371+
59372+ insert_acl_subj_label(match, role);
59373+ }
59374+
59375+ return;
59376+}
59377+
59378+static void
59379+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
59380+ const ino_t newinode, const dev_t newdevice)
59381+{
59382+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
59383+ struct inodev_entry *match;
59384+
59385+ match = inodev_set.i_hash[index];
59386+
59387+ while (match && (match->nentry->inode != oldinode ||
59388+ match->nentry->device != olddevice || !match->nentry->deleted))
59389+ match = match->next;
59390+
59391+ if (match && (match->nentry->inode == oldinode)
59392+ && (match->nentry->device == olddevice) &&
59393+ match->nentry->deleted) {
59394+ if (match->prev == NULL) {
59395+ inodev_set.i_hash[index] = match->next;
59396+ if (match->next != NULL)
59397+ match->next->prev = NULL;
59398+ } else {
59399+ match->prev->next = match->next;
59400+ if (match->next != NULL)
59401+ match->next->prev = match->prev;
59402+ }
59403+ match->prev = NULL;
59404+ match->next = NULL;
59405+ match->nentry->inode = newinode;
59406+ match->nentry->device = newdevice;
59407+ match->nentry->deleted = 0;
59408+
59409+ insert_inodev_entry(match);
59410+ }
59411+
59412+ return;
59413+}
59414+
59415+static void
59416+__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
59417+{
59418+ struct acl_subject_label *subj;
59419+ struct acl_role_label *role;
59420+ unsigned int x;
59421+
59422+ FOR_EACH_ROLE_START(role)
59423+ update_acl_subj_label(matchn->inode, matchn->device,
59424+ inode, dev, role);
59425+
59426+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
59427+ if ((subj->inode == inode) && (subj->device == dev)) {
59428+ subj->inode = inode;
59429+ subj->device = dev;
59430+ }
59431+ FOR_EACH_NESTED_SUBJECT_END(subj)
59432+ FOR_EACH_SUBJECT_START(role, subj, x)
59433+ update_acl_obj_label(matchn->inode, matchn->device,
59434+ inode, dev, subj);
59435+ FOR_EACH_SUBJECT_END(subj,x)
59436+ FOR_EACH_ROLE_END(role)
59437+
59438+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
59439+
59440+ return;
59441+}
59442+
59443+static void
59444+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
59445+ const struct vfsmount *mnt)
59446+{
59447+ ino_t ino = dentry->d_inode->i_ino;
59448+ dev_t dev = __get_dev(dentry);
59449+
59450+ __do_handle_create(matchn, ino, dev);
59451+
59452+ return;
59453+}
59454+
59455+void
59456+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
59457+{
59458+ struct name_entry *matchn;
59459+
59460+ if (unlikely(!(gr_status & GR_READY)))
59461+ return;
59462+
59463+ preempt_disable();
59464+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
59465+
59466+ if (unlikely((unsigned long)matchn)) {
59467+ write_lock(&gr_inode_lock);
59468+ do_handle_create(matchn, dentry, mnt);
59469+ write_unlock(&gr_inode_lock);
59470+ }
59471+ preempt_enable();
59472+
59473+ return;
59474+}
59475+
59476+void
59477+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
59478+{
59479+ struct name_entry *matchn;
59480+
59481+ if (unlikely(!(gr_status & GR_READY)))
59482+ return;
59483+
59484+ preempt_disable();
59485+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
59486+
59487+ if (unlikely((unsigned long)matchn)) {
59488+ write_lock(&gr_inode_lock);
59489+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
59490+ write_unlock(&gr_inode_lock);
59491+ }
59492+ preempt_enable();
59493+
59494+ return;
59495+}
59496+
59497+void
59498+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59499+ struct dentry *old_dentry,
59500+ struct dentry *new_dentry,
59501+ struct vfsmount *mnt, const __u8 replace)
59502+{
59503+ struct name_entry *matchn;
59504+ struct inodev_entry *inodev;
59505+ struct inode *inode = new_dentry->d_inode;
59506+ ino_t oldinode = old_dentry->d_inode->i_ino;
59507+ dev_t olddev = __get_dev(old_dentry);
59508+
59509+ /* vfs_rename swaps the name and parent link for old_dentry and
59510+ new_dentry
59511+ at this point, old_dentry has the new name, parent link, and inode
59512+ for the renamed file
59513+ if a file is being replaced by a rename, new_dentry has the inode
59514+ and name for the replaced file
59515+ */
59516+
59517+ if (unlikely(!(gr_status & GR_READY)))
59518+ return;
59519+
59520+ preempt_disable();
59521+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
59522+
59523+ /* we wouldn't have to check d_inode if it weren't for
59524+ NFS silly-renaming
59525+ */
59526+
59527+ write_lock(&gr_inode_lock);
59528+ if (unlikely(replace && inode)) {
59529+ ino_t newinode = inode->i_ino;
59530+ dev_t newdev = __get_dev(new_dentry);
59531+ inodev = lookup_inodev_entry(newinode, newdev);
59532+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
59533+ do_handle_delete(inodev, newinode, newdev);
59534+ }
59535+
59536+ inodev = lookup_inodev_entry(oldinode, olddev);
59537+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
59538+ do_handle_delete(inodev, oldinode, olddev);
59539+
59540+ if (unlikely((unsigned long)matchn))
59541+ do_handle_create(matchn, old_dentry, mnt);
59542+
59543+ write_unlock(&gr_inode_lock);
59544+ preempt_enable();
59545+
59546+ return;
59547+}
59548+
59549+static int
59550+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
59551+ unsigned char **sum)
59552+{
59553+ struct acl_role_label *r;
59554+ struct role_allowed_ip *ipp;
59555+ struct role_transition *trans;
59556+ unsigned int i;
59557+ int found = 0;
59558+ u32 curr_ip = current->signal->curr_ip;
59559+
59560+ current->signal->saved_ip = curr_ip;
59561+
59562+ /* check transition table */
59563+
59564+ for (trans = current->role->transitions; trans; trans = trans->next) {
59565+ if (!strcmp(rolename, trans->rolename)) {
59566+ found = 1;
59567+ break;
59568+ }
59569+ }
59570+
59571+ if (!found)
59572+ return 0;
59573+
59574+ /* handle special roles that do not require authentication
59575+ and check ip */
59576+
59577+ FOR_EACH_ROLE_START(r)
59578+ if (!strcmp(rolename, r->rolename) &&
59579+ (r->roletype & GR_ROLE_SPECIAL)) {
59580+ found = 0;
59581+ if (r->allowed_ips != NULL) {
59582+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
59583+ if ((ntohl(curr_ip) & ipp->netmask) ==
59584+ (ntohl(ipp->addr) & ipp->netmask))
59585+ found = 1;
59586+ }
59587+ } else
59588+ found = 2;
59589+ if (!found)
59590+ return 0;
59591+
59592+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
59593+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
59594+ *salt = NULL;
59595+ *sum = NULL;
59596+ return 1;
59597+ }
59598+ }
59599+ FOR_EACH_ROLE_END(r)
59600+
59601+ for (i = 0; i < num_sprole_pws; i++) {
59602+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
59603+ *salt = acl_special_roles[i]->salt;
59604+ *sum = acl_special_roles[i]->sum;
59605+ return 1;
59606+ }
59607+ }
59608+
59609+ return 0;
59610+}
59611+
59612+static void
59613+assign_special_role(char *rolename)
59614+{
59615+ struct acl_object_label *obj;
59616+ struct acl_role_label *r;
59617+ struct acl_role_label *assigned = NULL;
59618+ struct task_struct *tsk;
59619+ struct file *filp;
59620+
59621+ FOR_EACH_ROLE_START(r)
59622+ if (!strcmp(rolename, r->rolename) &&
59623+ (r->roletype & GR_ROLE_SPECIAL)) {
59624+ assigned = r;
59625+ break;
59626+ }
59627+ FOR_EACH_ROLE_END(r)
59628+
59629+ if (!assigned)
59630+ return;
59631+
59632+ read_lock(&tasklist_lock);
59633+ read_lock(&grsec_exec_file_lock);
59634+
59635+ tsk = current->real_parent;
59636+ if (tsk == NULL)
59637+ goto out_unlock;
59638+
59639+ filp = tsk->exec_file;
59640+ if (filp == NULL)
59641+ goto out_unlock;
59642+
59643+ tsk->is_writable = 0;
59644+
59645+ tsk->acl_sp_role = 1;
59646+ tsk->acl_role_id = ++acl_sp_role_value;
59647+ tsk->role = assigned;
59648+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
59649+
59650+ /* ignore additional mmap checks for processes that are writable
59651+ by the default ACL */
59652+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59653+ if (unlikely(obj->mode & GR_WRITE))
59654+ tsk->is_writable = 1;
59655+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
59656+ if (unlikely(obj->mode & GR_WRITE))
59657+ tsk->is_writable = 1;
59658+
59659+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59660+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
59661+#endif
59662+
59663+out_unlock:
59664+ read_unlock(&grsec_exec_file_lock);
59665+ read_unlock(&tasklist_lock);
59666+ return;
59667+}
59668+
59669+int gr_check_secure_terminal(struct task_struct *task)
59670+{
59671+ struct task_struct *p, *p2, *p3;
59672+ struct files_struct *files;
59673+ struct fdtable *fdt;
59674+ struct file *our_file = NULL, *file;
59675+ int i;
59676+
59677+ if (task->signal->tty == NULL)
59678+ return 1;
59679+
59680+ files = get_files_struct(task);
59681+ if (files != NULL) {
59682+ rcu_read_lock();
59683+ fdt = files_fdtable(files);
59684+ for (i=0; i < fdt->max_fds; i++) {
59685+ file = fcheck_files(files, i);
59686+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
59687+ get_file(file);
59688+ our_file = file;
59689+ }
59690+ }
59691+ rcu_read_unlock();
59692+ put_files_struct(files);
59693+ }
59694+
59695+ if (our_file == NULL)
59696+ return 1;
59697+
59698+ read_lock(&tasklist_lock);
59699+ do_each_thread(p2, p) {
59700+ files = get_files_struct(p);
59701+ if (files == NULL ||
59702+ (p->signal && p->signal->tty == task->signal->tty)) {
59703+ if (files != NULL)
59704+ put_files_struct(files);
59705+ continue;
59706+ }
59707+ rcu_read_lock();
59708+ fdt = files_fdtable(files);
59709+ for (i=0; i < fdt->max_fds; i++) {
59710+ file = fcheck_files(files, i);
59711+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
59712+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
59713+ p3 = task;
59714+ while (p3->pid > 0) {
59715+ if (p3 == p)
59716+ break;
59717+ p3 = p3->real_parent;
59718+ }
59719+ if (p3 == p)
59720+ break;
59721+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
59722+ gr_handle_alertkill(p);
59723+ rcu_read_unlock();
59724+ put_files_struct(files);
59725+ read_unlock(&tasklist_lock);
59726+ fput(our_file);
59727+ return 0;
59728+ }
59729+ }
59730+ rcu_read_unlock();
59731+ put_files_struct(files);
59732+ } while_each_thread(p2, p);
59733+ read_unlock(&tasklist_lock);
59734+
59735+ fput(our_file);
59736+ return 1;
59737+}
59738+
59739+ssize_t
59740+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
59741+{
59742+ struct gr_arg_wrapper uwrap;
59743+ unsigned char *sprole_salt = NULL;
59744+ unsigned char *sprole_sum = NULL;
59745+ int error = sizeof (struct gr_arg_wrapper);
59746+ int error2 = 0;
59747+
59748+ mutex_lock(&gr_dev_mutex);
59749+
59750+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
59751+ error = -EPERM;
59752+ goto out;
59753+ }
59754+
59755+ if (count != sizeof (struct gr_arg_wrapper)) {
59756+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
59757+ error = -EINVAL;
59758+ goto out;
59759+ }
59760+
59761+
59762+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
59763+ gr_auth_expires = 0;
59764+ gr_auth_attempts = 0;
59765+ }
59766+
59767+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
59768+ error = -EFAULT;
59769+ goto out;
59770+ }
59771+
59772+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
59773+ error = -EINVAL;
59774+ goto out;
59775+ }
59776+
59777+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
59778+ error = -EFAULT;
59779+ goto out;
59780+ }
59781+
59782+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59783+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59784+ time_after(gr_auth_expires, get_seconds())) {
59785+ error = -EBUSY;
59786+ goto out;
59787+ }
59788+
59789+ /* if non-root trying to do anything other than use a special role,
59790+ do not attempt authentication, do not count towards authentication
59791+ locking
59792+ */
59793+
59794+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
59795+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59796+ current_uid()) {
59797+ error = -EPERM;
59798+ goto out;
59799+ }
59800+
59801+ /* ensure pw and special role name are null terminated */
59802+
59803+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
59804+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
59805+
59806+ /* Okay.
59807+ * We have our enough of the argument structure..(we have yet
59808+ * to copy_from_user the tables themselves) . Copy the tables
59809+ * only if we need them, i.e. for loading operations. */
59810+
59811+ switch (gr_usermode->mode) {
59812+ case GR_STATUS:
59813+ if (gr_status & GR_READY) {
59814+ error = 1;
59815+ if (!gr_check_secure_terminal(current))
59816+ error = 3;
59817+ } else
59818+ error = 2;
59819+ goto out;
59820+ case GR_SHUTDOWN:
59821+ if ((gr_status & GR_READY)
59822+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59823+ pax_open_kernel();
59824+ gr_status &= ~GR_READY;
59825+ pax_close_kernel();
59826+
59827+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
59828+ free_variables();
59829+ memset(gr_usermode, 0, sizeof (struct gr_arg));
59830+ memset(gr_system_salt, 0, GR_SALT_LEN);
59831+ memset(gr_system_sum, 0, GR_SHA_LEN);
59832+ } else if (gr_status & GR_READY) {
59833+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
59834+ error = -EPERM;
59835+ } else {
59836+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
59837+ error = -EAGAIN;
59838+ }
59839+ break;
59840+ case GR_ENABLE:
59841+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
59842+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
59843+ else {
59844+ if (gr_status & GR_READY)
59845+ error = -EAGAIN;
59846+ else
59847+ error = error2;
59848+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
59849+ }
59850+ break;
59851+ case GR_RELOAD:
59852+ if (!(gr_status & GR_READY)) {
59853+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
59854+ error = -EAGAIN;
59855+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59856+ lock_kernel();
59857+
59858+ pax_open_kernel();
59859+ gr_status &= ~GR_READY;
59860+ pax_close_kernel();
59861+
59862+ free_variables();
59863+ if (!(error2 = gracl_init(gr_usermode))) {
59864+ unlock_kernel();
59865+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
59866+ } else {
59867+ unlock_kernel();
59868+ error = error2;
59869+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59870+ }
59871+ } else {
59872+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59873+ error = -EPERM;
59874+ }
59875+ break;
59876+ case GR_SEGVMOD:
59877+ if (unlikely(!(gr_status & GR_READY))) {
59878+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
59879+ error = -EAGAIN;
59880+ break;
59881+ }
59882+
59883+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59884+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
59885+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
59886+ struct acl_subject_label *segvacl;
59887+ segvacl =
59888+ lookup_acl_subj_label(gr_usermode->segv_inode,
59889+ gr_usermode->segv_device,
59890+ current->role);
59891+ if (segvacl) {
59892+ segvacl->crashes = 0;
59893+ segvacl->expires = 0;
59894+ }
59895+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
59896+ gr_remove_uid(gr_usermode->segv_uid);
59897+ }
59898+ } else {
59899+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
59900+ error = -EPERM;
59901+ }
59902+ break;
59903+ case GR_SPROLE:
59904+ case GR_SPROLEPAM:
59905+ if (unlikely(!(gr_status & GR_READY))) {
59906+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
59907+ error = -EAGAIN;
59908+ break;
59909+ }
59910+
59911+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
59912+ current->role->expires = 0;
59913+ current->role->auth_attempts = 0;
59914+ }
59915+
59916+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59917+ time_after(current->role->expires, get_seconds())) {
59918+ error = -EBUSY;
59919+ goto out;
59920+ }
59921+
59922+ if (lookup_special_role_auth
59923+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
59924+ && ((!sprole_salt && !sprole_sum)
59925+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
59926+ char *p = "";
59927+ assign_special_role(gr_usermode->sp_role);
59928+ read_lock(&tasklist_lock);
59929+ if (current->real_parent)
59930+ p = current->real_parent->role->rolename;
59931+ read_unlock(&tasklist_lock);
59932+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
59933+ p, acl_sp_role_value);
59934+ } else {
59935+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
59936+ error = -EPERM;
59937+ if(!(current->role->auth_attempts++))
59938+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59939+
59940+ goto out;
59941+ }
59942+ break;
59943+ case GR_UNSPROLE:
59944+ if (unlikely(!(gr_status & GR_READY))) {
59945+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
59946+ error = -EAGAIN;
59947+ break;
59948+ }
59949+
59950+ if (current->role->roletype & GR_ROLE_SPECIAL) {
59951+ char *p = "";
59952+ int i = 0;
59953+
59954+ read_lock(&tasklist_lock);
59955+ if (current->real_parent) {
59956+ p = current->real_parent->role->rolename;
59957+ i = current->real_parent->acl_role_id;
59958+ }
59959+ read_unlock(&tasklist_lock);
59960+
59961+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
59962+ gr_set_acls(1);
59963+ } else {
59964+ error = -EPERM;
59965+ goto out;
59966+ }
59967+ break;
59968+ default:
59969+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
59970+ error = -EINVAL;
59971+ break;
59972+ }
59973+
59974+ if (error != -EPERM)
59975+ goto out;
59976+
59977+ if(!(gr_auth_attempts++))
59978+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59979+
59980+ out:
59981+ mutex_unlock(&gr_dev_mutex);
59982+ return error;
59983+}
59984+
59985+/* must be called with
59986+ rcu_read_lock();
59987+ read_lock(&tasklist_lock);
59988+ read_lock(&grsec_exec_file_lock);
59989+*/
59990+int gr_apply_subject_to_task(struct task_struct *task)
59991+{
59992+ struct acl_object_label *obj;
59993+ char *tmpname;
59994+ struct acl_subject_label *tmpsubj;
59995+ struct file *filp;
59996+ struct name_entry *nmatch;
59997+
59998+ filp = task->exec_file;
59999+ if (filp == NULL)
60000+ return 0;
60001+
60002+ /* the following is to apply the correct subject
60003+ on binaries running when the RBAC system
60004+ is enabled, when the binaries have been
60005+ replaced or deleted since their execution
60006+ -----
60007+ when the RBAC system starts, the inode/dev
60008+ from exec_file will be one the RBAC system
60009+ is unaware of. It only knows the inode/dev
60010+ of the present file on disk, or the absence
60011+ of it.
60012+ */
60013+ preempt_disable();
60014+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
60015+
60016+ nmatch = lookup_name_entry(tmpname);
60017+ preempt_enable();
60018+ tmpsubj = NULL;
60019+ if (nmatch) {
60020+ if (nmatch->deleted)
60021+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
60022+ else
60023+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
60024+ if (tmpsubj != NULL)
60025+ task->acl = tmpsubj;
60026+ }
60027+ if (tmpsubj == NULL)
60028+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
60029+ task->role);
60030+ if (task->acl) {
60031+ task->is_writable = 0;
60032+ /* ignore additional mmap checks for processes that are writable
60033+ by the default ACL */
60034+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60035+ if (unlikely(obj->mode & GR_WRITE))
60036+ task->is_writable = 1;
60037+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
60038+ if (unlikely(obj->mode & GR_WRITE))
60039+ task->is_writable = 1;
60040+
60041+ gr_set_proc_res(task);
60042+
60043+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60044+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
60045+#endif
60046+ } else {
60047+ return 1;
60048+ }
60049+
60050+ return 0;
60051+}
60052+
60053+int
60054+gr_set_acls(const int type)
60055+{
60056+ struct task_struct *task, *task2;
60057+ struct acl_role_label *role = current->role;
60058+ __u16 acl_role_id = current->acl_role_id;
60059+ const struct cred *cred;
60060+ int ret;
60061+
60062+ rcu_read_lock();
60063+ read_lock(&tasklist_lock);
60064+ read_lock(&grsec_exec_file_lock);
60065+ do_each_thread(task2, task) {
60066+ /* check to see if we're called from the exit handler,
60067+ if so, only replace ACLs that have inherited the admin
60068+ ACL */
60069+
60070+ if (type && (task->role != role ||
60071+ task->acl_role_id != acl_role_id))
60072+ continue;
60073+
60074+ task->acl_role_id = 0;
60075+ task->acl_sp_role = 0;
60076+
60077+ if (task->exec_file) {
60078+ cred = __task_cred(task);
60079+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
60080+
60081+ ret = gr_apply_subject_to_task(task);
60082+ if (ret) {
60083+ read_unlock(&grsec_exec_file_lock);
60084+ read_unlock(&tasklist_lock);
60085+ rcu_read_unlock();
60086+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
60087+ return ret;
60088+ }
60089+ } else {
60090+ // it's a kernel process
60091+ task->role = kernel_role;
60092+ task->acl = kernel_role->root_label;
60093+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
60094+ task->acl->mode &= ~GR_PROCFIND;
60095+#endif
60096+ }
60097+ } while_each_thread(task2, task);
60098+ read_unlock(&grsec_exec_file_lock);
60099+ read_unlock(&tasklist_lock);
60100+ rcu_read_unlock();
60101+
60102+ return 0;
60103+}
60104+
60105+void
60106+gr_learn_resource(const struct task_struct *task,
60107+ const int res, const unsigned long wanted, const int gt)
60108+{
60109+ struct acl_subject_label *acl;
60110+ const struct cred *cred;
60111+
60112+ if (unlikely((gr_status & GR_READY) &&
60113+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
60114+ goto skip_reslog;
60115+
60116+#ifdef CONFIG_GRKERNSEC_RESLOG
60117+ gr_log_resource(task, res, wanted, gt);
60118+#endif
60119+ skip_reslog:
60120+
60121+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
60122+ return;
60123+
60124+ acl = task->acl;
60125+
60126+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
60127+ !(acl->resmask & (1 << (unsigned short) res))))
60128+ return;
60129+
60130+ if (wanted >= acl->res[res].rlim_cur) {
60131+ unsigned long res_add;
60132+
60133+ res_add = wanted;
60134+ switch (res) {
60135+ case RLIMIT_CPU:
60136+ res_add += GR_RLIM_CPU_BUMP;
60137+ break;
60138+ case RLIMIT_FSIZE:
60139+ res_add += GR_RLIM_FSIZE_BUMP;
60140+ break;
60141+ case RLIMIT_DATA:
60142+ res_add += GR_RLIM_DATA_BUMP;
60143+ break;
60144+ case RLIMIT_STACK:
60145+ res_add += GR_RLIM_STACK_BUMP;
60146+ break;
60147+ case RLIMIT_CORE:
60148+ res_add += GR_RLIM_CORE_BUMP;
60149+ break;
60150+ case RLIMIT_RSS:
60151+ res_add += GR_RLIM_RSS_BUMP;
60152+ break;
60153+ case RLIMIT_NPROC:
60154+ res_add += GR_RLIM_NPROC_BUMP;
60155+ break;
60156+ case RLIMIT_NOFILE:
60157+ res_add += GR_RLIM_NOFILE_BUMP;
60158+ break;
60159+ case RLIMIT_MEMLOCK:
60160+ res_add += GR_RLIM_MEMLOCK_BUMP;
60161+ break;
60162+ case RLIMIT_AS:
60163+ res_add += GR_RLIM_AS_BUMP;
60164+ break;
60165+ case RLIMIT_LOCKS:
60166+ res_add += GR_RLIM_LOCKS_BUMP;
60167+ break;
60168+ case RLIMIT_SIGPENDING:
60169+ res_add += GR_RLIM_SIGPENDING_BUMP;
60170+ break;
60171+ case RLIMIT_MSGQUEUE:
60172+ res_add += GR_RLIM_MSGQUEUE_BUMP;
60173+ break;
60174+ case RLIMIT_NICE:
60175+ res_add += GR_RLIM_NICE_BUMP;
60176+ break;
60177+ case RLIMIT_RTPRIO:
60178+ res_add += GR_RLIM_RTPRIO_BUMP;
60179+ break;
60180+ case RLIMIT_RTTIME:
60181+ res_add += GR_RLIM_RTTIME_BUMP;
60182+ break;
60183+ }
60184+
60185+ acl->res[res].rlim_cur = res_add;
60186+
60187+ if (wanted > acl->res[res].rlim_max)
60188+ acl->res[res].rlim_max = res_add;
60189+
60190+ /* only log the subject filename, since resource logging is supported for
60191+ single-subject learning only */
60192+ rcu_read_lock();
60193+ cred = __task_cred(task);
60194+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60195+ task->role->roletype, cred->uid, cred->gid, acl->filename,
60196+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
60197+ "", (unsigned long) res, &task->signal->saved_ip);
60198+ rcu_read_unlock();
60199+ }
60200+
60201+ return;
60202+}
60203+
60204+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
60205+void
60206+pax_set_initial_flags(struct linux_binprm *bprm)
60207+{
60208+ struct task_struct *task = current;
60209+ struct acl_subject_label *proc;
60210+ unsigned long flags;
60211+
60212+ if (unlikely(!(gr_status & GR_READY)))
60213+ return;
60214+
60215+ flags = pax_get_flags(task);
60216+
60217+ proc = task->acl;
60218+
60219+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
60220+ flags &= ~MF_PAX_PAGEEXEC;
60221+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
60222+ flags &= ~MF_PAX_SEGMEXEC;
60223+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
60224+ flags &= ~MF_PAX_RANDMMAP;
60225+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
60226+ flags &= ~MF_PAX_EMUTRAMP;
60227+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
60228+ flags &= ~MF_PAX_MPROTECT;
60229+
60230+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
60231+ flags |= MF_PAX_PAGEEXEC;
60232+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
60233+ flags |= MF_PAX_SEGMEXEC;
60234+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
60235+ flags |= MF_PAX_RANDMMAP;
60236+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
60237+ flags |= MF_PAX_EMUTRAMP;
60238+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
60239+ flags |= MF_PAX_MPROTECT;
60240+
60241+ pax_set_flags(task, flags);
60242+
60243+ return;
60244+}
60245+#endif
60246+
60247+#ifdef CONFIG_SYSCTL
60248+/* Eric Biederman likes breaking userland ABI and every inode-based security
60249+ system to save 35kb of memory */
60250+
60251+/* we modify the passed in filename, but adjust it back before returning */
60252+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
60253+{
60254+ struct name_entry *nmatch;
60255+ char *p, *lastp = NULL;
60256+ struct acl_object_label *obj = NULL, *tmp;
60257+ struct acl_subject_label *tmpsubj;
60258+ char c = '\0';
60259+
60260+ read_lock(&gr_inode_lock);
60261+
60262+ p = name + len - 1;
60263+ do {
60264+ nmatch = lookup_name_entry(name);
60265+ if (lastp != NULL)
60266+ *lastp = c;
60267+
60268+ if (nmatch == NULL)
60269+ goto next_component;
60270+ tmpsubj = current->acl;
60271+ do {
60272+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
60273+ if (obj != NULL) {
60274+ tmp = obj->globbed;
60275+ while (tmp) {
60276+ if (!glob_match(tmp->filename, name)) {
60277+ obj = tmp;
60278+ goto found_obj;
60279+ }
60280+ tmp = tmp->next;
60281+ }
60282+ goto found_obj;
60283+ }
60284+ } while ((tmpsubj = tmpsubj->parent_subject));
60285+next_component:
60286+ /* end case */
60287+ if (p == name)
60288+ break;
60289+
60290+ while (*p != '/')
60291+ p--;
60292+ if (p == name)
60293+ lastp = p + 1;
60294+ else {
60295+ lastp = p;
60296+ p--;
60297+ }
60298+ c = *lastp;
60299+ *lastp = '\0';
60300+ } while (1);
60301+found_obj:
60302+ read_unlock(&gr_inode_lock);
60303+ /* obj returned will always be non-null */
60304+ return obj;
60305+}
60306+
60307+/* returns 0 when allowing, non-zero on error
60308+ op of 0 is used for readdir, so we don't log the names of hidden files
60309+*/
60310+__u32
60311+gr_handle_sysctl(const struct ctl_table *table, const int op)
60312+{
60313+ ctl_table *tmp;
60314+ const char *proc_sys = "/proc/sys";
60315+ char *path;
60316+ struct acl_object_label *obj;
60317+ unsigned short len = 0, pos = 0, depth = 0, i;
60318+ __u32 err = 0;
60319+ __u32 mode = 0;
60320+
60321+ if (unlikely(!(gr_status & GR_READY)))
60322+ return 0;
60323+
60324+ /* for now, ignore operations on non-sysctl entries if it's not a
60325+ readdir*/
60326+ if (table->child != NULL && op != 0)
60327+ return 0;
60328+
60329+ mode |= GR_FIND;
60330+ /* it's only a read if it's an entry, read on dirs is for readdir */
60331+ if (op & MAY_READ)
60332+ mode |= GR_READ;
60333+ if (op & MAY_WRITE)
60334+ mode |= GR_WRITE;
60335+
60336+ preempt_disable();
60337+
60338+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
60339+
60340+ /* it's only a read/write if it's an actual entry, not a dir
60341+ (which are opened for readdir)
60342+ */
60343+
60344+ /* convert the requested sysctl entry into a pathname */
60345+
60346+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60347+ len += strlen(tmp->procname);
60348+ len++;
60349+ depth++;
60350+ }
60351+
60352+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
60353+ /* deny */
60354+ goto out;
60355+ }
60356+
60357+ memset(path, 0, PAGE_SIZE);
60358+
60359+ memcpy(path, proc_sys, strlen(proc_sys));
60360+
60361+ pos += strlen(proc_sys);
60362+
60363+ for (; depth > 0; depth--) {
60364+ path[pos] = '/';
60365+ pos++;
60366+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
60367+ if (depth == i) {
60368+ memcpy(path + pos, tmp->procname,
60369+ strlen(tmp->procname));
60370+ pos += strlen(tmp->procname);
60371+ }
60372+ i++;
60373+ }
60374+ }
60375+
60376+ obj = gr_lookup_by_name(path, pos);
60377+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
60378+
60379+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
60380+ ((err & mode) != mode))) {
60381+ __u32 new_mode = mode;
60382+
60383+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60384+
60385+ err = 0;
60386+ gr_log_learn_sysctl(path, new_mode);
60387+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
60388+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
60389+ err = -ENOENT;
60390+ } else if (!(err & GR_FIND)) {
60391+ err = -ENOENT;
60392+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
60393+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
60394+ path, (mode & GR_READ) ? " reading" : "",
60395+ (mode & GR_WRITE) ? " writing" : "");
60396+ err = -EACCES;
60397+ } else if ((err & mode) != mode) {
60398+ err = -EACCES;
60399+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
60400+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
60401+ path, (mode & GR_READ) ? " reading" : "",
60402+ (mode & GR_WRITE) ? " writing" : "");
60403+ err = 0;
60404+ } else
60405+ err = 0;
60406+
60407+ out:
60408+ preempt_enable();
60409+
60410+ return err;
60411+}
60412+#endif
60413+
60414+int
60415+gr_handle_proc_ptrace(struct task_struct *task)
60416+{
60417+ struct file *filp;
60418+ struct task_struct *tmp = task;
60419+ struct task_struct *curtemp = current;
60420+ __u32 retmode;
60421+
60422+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60423+ if (unlikely(!(gr_status & GR_READY)))
60424+ return 0;
60425+#endif
60426+
60427+ read_lock(&tasklist_lock);
60428+ read_lock(&grsec_exec_file_lock);
60429+ filp = task->exec_file;
60430+
60431+ while (tmp->pid > 0) {
60432+ if (tmp == curtemp)
60433+ break;
60434+ tmp = tmp->real_parent;
60435+ }
60436+
60437+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60438+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
60439+ read_unlock(&grsec_exec_file_lock);
60440+ read_unlock(&tasklist_lock);
60441+ return 1;
60442+ }
60443+
60444+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60445+ if (!(gr_status & GR_READY)) {
60446+ read_unlock(&grsec_exec_file_lock);
60447+ read_unlock(&tasklist_lock);
60448+ return 0;
60449+ }
60450+#endif
60451+
60452+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
60453+ read_unlock(&grsec_exec_file_lock);
60454+ read_unlock(&tasklist_lock);
60455+
60456+ if (retmode & GR_NOPTRACE)
60457+ return 1;
60458+
60459+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
60460+ && (current->acl != task->acl || (current->acl != current->role->root_label
60461+ && current->pid != task->pid)))
60462+ return 1;
60463+
60464+ return 0;
60465+}
60466+
60467+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
60468+{
60469+ if (unlikely(!(gr_status & GR_READY)))
60470+ return;
60471+
60472+ if (!(current->role->roletype & GR_ROLE_GOD))
60473+ return;
60474+
60475+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
60476+ p->role->rolename, gr_task_roletype_to_char(p),
60477+ p->acl->filename);
60478+}
60479+
60480+int
60481+gr_handle_ptrace(struct task_struct *task, const long request)
60482+{
60483+ struct task_struct *tmp = task;
60484+ struct task_struct *curtemp = current;
60485+ __u32 retmode;
60486+
60487+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60488+ if (unlikely(!(gr_status & GR_READY)))
60489+ return 0;
60490+#endif
60491+
60492+ read_lock(&tasklist_lock);
60493+ while (tmp->pid > 0) {
60494+ if (tmp == curtemp)
60495+ break;
60496+ tmp = tmp->real_parent;
60497+ }
60498+
60499+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
60500+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
60501+ read_unlock(&tasklist_lock);
60502+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60503+ return 1;
60504+ }
60505+ read_unlock(&tasklist_lock);
60506+
60507+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60508+ if (!(gr_status & GR_READY))
60509+ return 0;
60510+#endif
60511+
60512+ read_lock(&grsec_exec_file_lock);
60513+ if (unlikely(!task->exec_file)) {
60514+ read_unlock(&grsec_exec_file_lock);
60515+ return 0;
60516+ }
60517+
60518+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
60519+ read_unlock(&grsec_exec_file_lock);
60520+
60521+ if (retmode & GR_NOPTRACE) {
60522+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60523+ return 1;
60524+ }
60525+
60526+ if (retmode & GR_PTRACERD) {
60527+ switch (request) {
60528+ case PTRACE_POKETEXT:
60529+ case PTRACE_POKEDATA:
60530+ case PTRACE_POKEUSR:
60531+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
60532+ case PTRACE_SETREGS:
60533+ case PTRACE_SETFPREGS:
60534+#endif
60535+#ifdef CONFIG_X86
60536+ case PTRACE_SETFPXREGS:
60537+#endif
60538+#ifdef CONFIG_ALTIVEC
60539+ case PTRACE_SETVRREGS:
60540+#endif
60541+ return 1;
60542+ default:
60543+ return 0;
60544+ }
60545+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
60546+ !(current->role->roletype & GR_ROLE_GOD) &&
60547+ (current->acl != task->acl)) {
60548+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60549+ return 1;
60550+ }
60551+
60552+ return 0;
60553+}
60554+
60555+static int is_writable_mmap(const struct file *filp)
60556+{
60557+ struct task_struct *task = current;
60558+ struct acl_object_label *obj, *obj2;
60559+
60560+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
60561+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
60562+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60563+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
60564+ task->role->root_label);
60565+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
60566+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
60567+ return 1;
60568+ }
60569+ }
60570+ return 0;
60571+}
60572+
60573+int
60574+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
60575+{
60576+ __u32 mode;
60577+
60578+ if (unlikely(!file || !(prot & PROT_EXEC)))
60579+ return 1;
60580+
60581+ if (is_writable_mmap(file))
60582+ return 0;
60583+
60584+ mode =
60585+ gr_search_file(file->f_path.dentry,
60586+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60587+ file->f_path.mnt);
60588+
60589+ if (!gr_tpe_allow(file))
60590+ return 0;
60591+
60592+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60593+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60594+ return 0;
60595+ } else if (unlikely(!(mode & GR_EXEC))) {
60596+ return 0;
60597+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60598+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60599+ return 1;
60600+ }
60601+
60602+ return 1;
60603+}
60604+
60605+int
60606+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
60607+{
60608+ __u32 mode;
60609+
60610+ if (unlikely(!file || !(prot & PROT_EXEC)))
60611+ return 1;
60612+
60613+ if (is_writable_mmap(file))
60614+ return 0;
60615+
60616+ mode =
60617+ gr_search_file(file->f_path.dentry,
60618+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60619+ file->f_path.mnt);
60620+
60621+ if (!gr_tpe_allow(file))
60622+ return 0;
60623+
60624+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60625+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60626+ return 0;
60627+ } else if (unlikely(!(mode & GR_EXEC))) {
60628+ return 0;
60629+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60630+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60631+ return 1;
60632+ }
60633+
60634+ return 1;
60635+}
60636+
60637+void
60638+gr_acl_handle_psacct(struct task_struct *task, const long code)
60639+{
60640+ unsigned long runtime;
60641+ unsigned long cputime;
60642+ unsigned int wday, cday;
60643+ __u8 whr, chr;
60644+ __u8 wmin, cmin;
60645+ __u8 wsec, csec;
60646+ struct timespec timeval;
60647+
60648+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
60649+ !(task->acl->mode & GR_PROCACCT)))
60650+ return;
60651+
60652+ do_posix_clock_monotonic_gettime(&timeval);
60653+ runtime = timeval.tv_sec - task->start_time.tv_sec;
60654+ wday = runtime / (3600 * 24);
60655+ runtime -= wday * (3600 * 24);
60656+ whr = runtime / 3600;
60657+ runtime -= whr * 3600;
60658+ wmin = runtime / 60;
60659+ runtime -= wmin * 60;
60660+ wsec = runtime;
60661+
60662+ cputime = (task->utime + task->stime) / HZ;
60663+ cday = cputime / (3600 * 24);
60664+ cputime -= cday * (3600 * 24);
60665+ chr = cputime / 3600;
60666+ cputime -= chr * 3600;
60667+ cmin = cputime / 60;
60668+ cputime -= cmin * 60;
60669+ csec = cputime;
60670+
60671+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
60672+
60673+ return;
60674+}
60675+
60676+void gr_set_kernel_label(struct task_struct *task)
60677+{
60678+ if (gr_status & GR_READY) {
60679+ task->role = kernel_role;
60680+ task->acl = kernel_role->root_label;
60681+ }
60682+ return;
60683+}
60684+
60685+#ifdef CONFIG_TASKSTATS
60686+int gr_is_taskstats_denied(int pid)
60687+{
60688+ struct task_struct *task;
60689+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60690+ const struct cred *cred;
60691+#endif
60692+ int ret = 0;
60693+
60694+ /* restrict taskstats viewing to un-chrooted root users
60695+ who have the 'view' subject flag if the RBAC system is enabled
60696+ */
60697+
60698+ rcu_read_lock();
60699+ read_lock(&tasklist_lock);
60700+ task = find_task_by_vpid(pid);
60701+ if (task) {
60702+#ifdef CONFIG_GRKERNSEC_CHROOT
60703+ if (proc_is_chrooted(task))
60704+ ret = -EACCES;
60705+#endif
60706+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60707+ cred = __task_cred(task);
60708+#ifdef CONFIG_GRKERNSEC_PROC_USER
60709+ if (cred->uid != 0)
60710+ ret = -EACCES;
60711+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60712+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
60713+ ret = -EACCES;
60714+#endif
60715+#endif
60716+ if (gr_status & GR_READY) {
60717+ if (!(task->acl->mode & GR_VIEW))
60718+ ret = -EACCES;
60719+ }
60720+ } else
60721+ ret = -ENOENT;
60722+
60723+ read_unlock(&tasklist_lock);
60724+ rcu_read_unlock();
60725+
60726+ return ret;
60727+}
60728+#endif
60729+
60730+/* AUXV entries are filled via a descendant of search_binary_handler
60731+ after we've already applied the subject for the target
60732+*/
60733+int gr_acl_enable_at_secure(void)
60734+{
60735+ if (unlikely(!(gr_status & GR_READY)))
60736+ return 0;
60737+
60738+ if (current->acl->mode & GR_ATSECURE)
60739+ return 1;
60740+
60741+ return 0;
60742+}
60743+
60744+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
60745+{
60746+ struct task_struct *task = current;
60747+ struct dentry *dentry = file->f_path.dentry;
60748+ struct vfsmount *mnt = file->f_path.mnt;
60749+ struct acl_object_label *obj, *tmp;
60750+ struct acl_subject_label *subj;
60751+ unsigned int bufsize;
60752+ int is_not_root;
60753+ char *path;
60754+ dev_t dev = __get_dev(dentry);
60755+
60756+ if (unlikely(!(gr_status & GR_READY)))
60757+ return 1;
60758+
60759+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60760+ return 1;
60761+
60762+ /* ignore Eric Biederman */
60763+ if (IS_PRIVATE(dentry->d_inode))
60764+ return 1;
60765+
60766+ subj = task->acl;
60767+ do {
60768+ obj = lookup_acl_obj_label(ino, dev, subj);
60769+ if (obj != NULL)
60770+ return (obj->mode & GR_FIND) ? 1 : 0;
60771+ } while ((subj = subj->parent_subject));
60772+
60773+ /* this is purely an optimization since we're looking for an object
60774+ for the directory we're doing a readdir on
60775+ if it's possible for any globbed object to match the entry we're
60776+ filling into the directory, then the object we find here will be
60777+ an anchor point with attached globbed objects
60778+ */
60779+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
60780+ if (obj->globbed == NULL)
60781+ return (obj->mode & GR_FIND) ? 1 : 0;
60782+
60783+ is_not_root = ((obj->filename[0] == '/') &&
60784+ (obj->filename[1] == '\0')) ? 0 : 1;
60785+ bufsize = PAGE_SIZE - namelen - is_not_root;
60786+
60787+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
60788+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
60789+ return 1;
60790+
60791+ preempt_disable();
60792+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
60793+ bufsize);
60794+
60795+ bufsize = strlen(path);
60796+
60797+ /* if base is "/", don't append an additional slash */
60798+ if (is_not_root)
60799+ *(path + bufsize) = '/';
60800+ memcpy(path + bufsize + is_not_root, name, namelen);
60801+ *(path + bufsize + namelen + is_not_root) = '\0';
60802+
60803+ tmp = obj->globbed;
60804+ while (tmp) {
60805+ if (!glob_match(tmp->filename, path)) {
60806+ preempt_enable();
60807+ return (tmp->mode & GR_FIND) ? 1 : 0;
60808+ }
60809+ tmp = tmp->next;
60810+ }
60811+ preempt_enable();
60812+ return (obj->mode & GR_FIND) ? 1 : 0;
60813+}
60814+
60815+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
60816+EXPORT_SYMBOL(gr_acl_is_enabled);
60817+#endif
60818+EXPORT_SYMBOL(gr_learn_resource);
60819+EXPORT_SYMBOL(gr_set_kernel_label);
60820+#ifdef CONFIG_SECURITY
60821+EXPORT_SYMBOL(gr_check_user_change);
60822+EXPORT_SYMBOL(gr_check_group_change);
60823+#endif
60824+
60825diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
60826new file mode 100644
60827index 0000000..34fefda
60828--- /dev/null
60829+++ b/grsecurity/gracl_alloc.c
60830@@ -0,0 +1,105 @@
60831+#include <linux/kernel.h>
60832+#include <linux/mm.h>
60833+#include <linux/slab.h>
60834+#include <linux/vmalloc.h>
60835+#include <linux/gracl.h>
60836+#include <linux/grsecurity.h>
60837+
60838+static unsigned long alloc_stack_next = 1;
60839+static unsigned long alloc_stack_size = 1;
60840+static void **alloc_stack;
60841+
60842+static __inline__ int
60843+alloc_pop(void)
60844+{
60845+ if (alloc_stack_next == 1)
60846+ return 0;
60847+
60848+ kfree(alloc_stack[alloc_stack_next - 2]);
60849+
60850+ alloc_stack_next--;
60851+
60852+ return 1;
60853+}
60854+
60855+static __inline__ int
60856+alloc_push(void *buf)
60857+{
60858+ if (alloc_stack_next >= alloc_stack_size)
60859+ return 1;
60860+
60861+ alloc_stack[alloc_stack_next - 1] = buf;
60862+
60863+ alloc_stack_next++;
60864+
60865+ return 0;
60866+}
60867+
60868+void *
60869+acl_alloc(unsigned long len)
60870+{
60871+ void *ret = NULL;
60872+
60873+ if (!len || len > PAGE_SIZE)
60874+ goto out;
60875+
60876+ ret = kmalloc(len, GFP_KERNEL);
60877+
60878+ if (ret) {
60879+ if (alloc_push(ret)) {
60880+ kfree(ret);
60881+ ret = NULL;
60882+ }
60883+ }
60884+
60885+out:
60886+ return ret;
60887+}
60888+
60889+void *
60890+acl_alloc_num(unsigned long num, unsigned long len)
60891+{
60892+ if (!len || (num > (PAGE_SIZE / len)))
60893+ return NULL;
60894+
60895+ return acl_alloc(num * len);
60896+}
60897+
60898+void
60899+acl_free_all(void)
60900+{
60901+ if (gr_acl_is_enabled() || !alloc_stack)
60902+ return;
60903+
60904+ while (alloc_pop()) ;
60905+
60906+ if (alloc_stack) {
60907+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
60908+ kfree(alloc_stack);
60909+ else
60910+ vfree(alloc_stack);
60911+ }
60912+
60913+ alloc_stack = NULL;
60914+ alloc_stack_size = 1;
60915+ alloc_stack_next = 1;
60916+
60917+ return;
60918+}
60919+
60920+int
60921+acl_alloc_stack_init(unsigned long size)
60922+{
60923+ if ((size * sizeof (void *)) <= PAGE_SIZE)
60924+ alloc_stack =
60925+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
60926+ else
60927+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
60928+
60929+ alloc_stack_size = size;
60930+
60931+ if (!alloc_stack)
60932+ return 0;
60933+ else
60934+ return 1;
60935+}
60936diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
60937new file mode 100644
60938index 0000000..955ddfb
60939--- /dev/null
60940+++ b/grsecurity/gracl_cap.c
60941@@ -0,0 +1,101 @@
60942+#include <linux/kernel.h>
60943+#include <linux/module.h>
60944+#include <linux/sched.h>
60945+#include <linux/gracl.h>
60946+#include <linux/grsecurity.h>
60947+#include <linux/grinternal.h>
60948+
60949+extern const char *captab_log[];
60950+extern int captab_log_entries;
60951+
60952+int
60953+gr_acl_is_capable(const int cap)
60954+{
60955+ struct task_struct *task = current;
60956+ const struct cred *cred = current_cred();
60957+ struct acl_subject_label *curracl;
60958+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60959+ kernel_cap_t cap_audit = __cap_empty_set;
60960+
60961+ if (!gr_acl_is_enabled())
60962+ return 1;
60963+
60964+ curracl = task->acl;
60965+
60966+ cap_drop = curracl->cap_lower;
60967+ cap_mask = curracl->cap_mask;
60968+ cap_audit = curracl->cap_invert_audit;
60969+
60970+ while ((curracl = curracl->parent_subject)) {
60971+ /* if the cap isn't specified in the current computed mask but is specified in the
60972+ current level subject, and is lowered in the current level subject, then add
60973+ it to the set of dropped capabilities
60974+ otherwise, add the current level subject's mask to the current computed mask
60975+ */
60976+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60977+ cap_raise(cap_mask, cap);
60978+ if (cap_raised(curracl->cap_lower, cap))
60979+ cap_raise(cap_drop, cap);
60980+ if (cap_raised(curracl->cap_invert_audit, cap))
60981+ cap_raise(cap_audit, cap);
60982+ }
60983+ }
60984+
60985+ if (!cap_raised(cap_drop, cap)) {
60986+ if (cap_raised(cap_audit, cap))
60987+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
60988+ return 1;
60989+ }
60990+
60991+ curracl = task->acl;
60992+
60993+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
60994+ && cap_raised(cred->cap_effective, cap)) {
60995+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60996+ task->role->roletype, cred->uid,
60997+ cred->gid, task->exec_file ?
60998+ gr_to_filename(task->exec_file->f_path.dentry,
60999+ task->exec_file->f_path.mnt) : curracl->filename,
61000+ curracl->filename, 0UL,
61001+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
61002+ return 1;
61003+ }
61004+
61005+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
61006+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
61007+ return 0;
61008+}
61009+
61010+int
61011+gr_acl_is_capable_nolog(const int cap)
61012+{
61013+ struct acl_subject_label *curracl;
61014+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
61015+
61016+ if (!gr_acl_is_enabled())
61017+ return 1;
61018+
61019+ curracl = current->acl;
61020+
61021+ cap_drop = curracl->cap_lower;
61022+ cap_mask = curracl->cap_mask;
61023+
61024+ while ((curracl = curracl->parent_subject)) {
61025+ /* if the cap isn't specified in the current computed mask but is specified in the
61026+ current level subject, and is lowered in the current level subject, then add
61027+ it to the set of dropped capabilities
61028+ otherwise, add the current level subject's mask to the current computed mask
61029+ */
61030+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
61031+ cap_raise(cap_mask, cap);
61032+ if (cap_raised(curracl->cap_lower, cap))
61033+ cap_raise(cap_drop, cap);
61034+ }
61035+ }
61036+
61037+ if (!cap_raised(cap_drop, cap))
61038+ return 1;
61039+
61040+ return 0;
61041+}
61042+
61043diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
61044new file mode 100644
61045index 0000000..523e7e8
61046--- /dev/null
61047+++ b/grsecurity/gracl_fs.c
61048@@ -0,0 +1,435 @@
61049+#include <linux/kernel.h>
61050+#include <linux/sched.h>
61051+#include <linux/types.h>
61052+#include <linux/fs.h>
61053+#include <linux/file.h>
61054+#include <linux/stat.h>
61055+#include <linux/grsecurity.h>
61056+#include <linux/grinternal.h>
61057+#include <linux/gracl.h>
61058+
61059+umode_t
61060+gr_acl_umask(void)
61061+{
61062+ if (unlikely(!gr_acl_is_enabled()))
61063+ return 0;
61064+
61065+ return current->role->umask;
61066+}
61067+
61068+__u32
61069+gr_acl_handle_hidden_file(const struct dentry * dentry,
61070+ const struct vfsmount * mnt)
61071+{
61072+ __u32 mode;
61073+
61074+ if (unlikely(!dentry->d_inode))
61075+ return GR_FIND;
61076+
61077+ mode =
61078+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
61079+
61080+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
61081+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61082+ return mode;
61083+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
61084+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61085+ return 0;
61086+ } else if (unlikely(!(mode & GR_FIND)))
61087+ return 0;
61088+
61089+ return GR_FIND;
61090+}
61091+
61092+__u32
61093+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
61094+ int acc_mode)
61095+{
61096+ __u32 reqmode = GR_FIND;
61097+ __u32 mode;
61098+
61099+ if (unlikely(!dentry->d_inode))
61100+ return reqmode;
61101+
61102+ if (acc_mode & MAY_APPEND)
61103+ reqmode |= GR_APPEND;
61104+ else if (acc_mode & MAY_WRITE)
61105+ reqmode |= GR_WRITE;
61106+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
61107+ reqmode |= GR_READ;
61108+
61109+ mode =
61110+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61111+ mnt);
61112+
61113+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61114+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61115+ reqmode & GR_READ ? " reading" : "",
61116+ reqmode & GR_WRITE ? " writing" : reqmode &
61117+ GR_APPEND ? " appending" : "");
61118+ return reqmode;
61119+ } else
61120+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61121+ {
61122+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61123+ reqmode & GR_READ ? " reading" : "",
61124+ reqmode & GR_WRITE ? " writing" : reqmode &
61125+ GR_APPEND ? " appending" : "");
61126+ return 0;
61127+ } else if (unlikely((mode & reqmode) != reqmode))
61128+ return 0;
61129+
61130+ return reqmode;
61131+}
61132+
61133+__u32
61134+gr_acl_handle_creat(const struct dentry * dentry,
61135+ const struct dentry * p_dentry,
61136+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
61137+ const int imode)
61138+{
61139+ __u32 reqmode = GR_WRITE | GR_CREATE;
61140+ __u32 mode;
61141+
61142+ if (acc_mode & MAY_APPEND)
61143+ reqmode |= GR_APPEND;
61144+ // if a directory was required or the directory already exists, then
61145+ // don't count this open as a read
61146+ if ((acc_mode & MAY_READ) &&
61147+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
61148+ reqmode |= GR_READ;
61149+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
61150+ reqmode |= GR_SETID;
61151+
61152+ mode =
61153+ gr_check_create(dentry, p_dentry, p_mnt,
61154+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61155+
61156+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61157+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61158+ reqmode & GR_READ ? " reading" : "",
61159+ reqmode & GR_WRITE ? " writing" : reqmode &
61160+ GR_APPEND ? " appending" : "");
61161+ return reqmode;
61162+ } else
61163+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61164+ {
61165+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61166+ reqmode & GR_READ ? " reading" : "",
61167+ reqmode & GR_WRITE ? " writing" : reqmode &
61168+ GR_APPEND ? " appending" : "");
61169+ return 0;
61170+ } else if (unlikely((mode & reqmode) != reqmode))
61171+ return 0;
61172+
61173+ return reqmode;
61174+}
61175+
61176+__u32
61177+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
61178+ const int fmode)
61179+{
61180+ __u32 mode, reqmode = GR_FIND;
61181+
61182+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
61183+ reqmode |= GR_EXEC;
61184+ if (fmode & S_IWOTH)
61185+ reqmode |= GR_WRITE;
61186+ if (fmode & S_IROTH)
61187+ reqmode |= GR_READ;
61188+
61189+ mode =
61190+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61191+ mnt);
61192+
61193+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61194+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61195+ reqmode & GR_READ ? " reading" : "",
61196+ reqmode & GR_WRITE ? " writing" : "",
61197+ reqmode & GR_EXEC ? " executing" : "");
61198+ return reqmode;
61199+ } else
61200+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61201+ {
61202+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61203+ reqmode & GR_READ ? " reading" : "",
61204+ reqmode & GR_WRITE ? " writing" : "",
61205+ reqmode & GR_EXEC ? " executing" : "");
61206+ return 0;
61207+ } else if (unlikely((mode & reqmode) != reqmode))
61208+ return 0;
61209+
61210+ return reqmode;
61211+}
61212+
61213+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
61214+{
61215+ __u32 mode;
61216+
61217+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
61218+
61219+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61220+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
61221+ return mode;
61222+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61223+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
61224+ return 0;
61225+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
61226+ return 0;
61227+
61228+ return (reqmode);
61229+}
61230+
61231+__u32
61232+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
61233+{
61234+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
61235+}
61236+
61237+__u32
61238+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
61239+{
61240+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
61241+}
61242+
61243+__u32
61244+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
61245+{
61246+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
61247+}
61248+
61249+__u32
61250+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
61251+{
61252+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
61253+}
61254+
61255+__u32
61256+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
61257+ umode_t *modeptr)
61258+{
61259+ mode_t mode;
61260+
61261+ *modeptr &= ~(mode_t)gr_acl_umask();
61262+ mode = *modeptr;
61263+
61264+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
61265+ return 1;
61266+
61267+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
61268+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
61269+ GR_CHMOD_ACL_MSG);
61270+ } else {
61271+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
61272+ }
61273+}
61274+
61275+__u32
61276+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
61277+{
61278+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
61279+}
61280+
61281+__u32
61282+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
61283+{
61284+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
61285+}
61286+
61287+__u32
61288+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
61289+{
61290+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
61291+}
61292+
61293+__u32
61294+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
61295+{
61296+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
61297+ GR_UNIXCONNECT_ACL_MSG);
61298+}
61299+
61300+/* hardlinks require at minimum create and link permission,
61301+ any additional privilege required is based on the
61302+ privilege of the file being linked to
61303+*/
61304+__u32
61305+gr_acl_handle_link(const struct dentry * new_dentry,
61306+ const struct dentry * parent_dentry,
61307+ const struct vfsmount * parent_mnt,
61308+ const struct dentry * old_dentry,
61309+ const struct vfsmount * old_mnt, const char *to)
61310+{
61311+ __u32 mode;
61312+ __u32 needmode = GR_CREATE | GR_LINK;
61313+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
61314+
61315+ mode =
61316+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
61317+ old_mnt);
61318+
61319+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
61320+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61321+ return mode;
61322+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61323+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
61324+ return 0;
61325+ } else if (unlikely((mode & needmode) != needmode))
61326+ return 0;
61327+
61328+ return 1;
61329+}
61330+
61331+__u32
61332+gr_acl_handle_symlink(const struct dentry * new_dentry,
61333+ const struct dentry * parent_dentry,
61334+ const struct vfsmount * parent_mnt, const char *from)
61335+{
61336+ __u32 needmode = GR_WRITE | GR_CREATE;
61337+ __u32 mode;
61338+
61339+ mode =
61340+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
61341+ GR_CREATE | GR_AUDIT_CREATE |
61342+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
61343+
61344+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
61345+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61346+ return mode;
61347+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61348+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
61349+ return 0;
61350+ } else if (unlikely((mode & needmode) != needmode))
61351+ return 0;
61352+
61353+ return (GR_WRITE | GR_CREATE);
61354+}
61355+
61356+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
61357+{
61358+ __u32 mode;
61359+
61360+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61361+
61362+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61363+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
61364+ return mode;
61365+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61366+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
61367+ return 0;
61368+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
61369+ return 0;
61370+
61371+ return (reqmode);
61372+}
61373+
61374+__u32
61375+gr_acl_handle_mknod(const struct dentry * new_dentry,
61376+ const struct dentry * parent_dentry,
61377+ const struct vfsmount * parent_mnt,
61378+ const int mode)
61379+{
61380+ __u32 reqmode = GR_WRITE | GR_CREATE;
61381+ if (unlikely(mode & (S_ISUID | S_ISGID)))
61382+ reqmode |= GR_SETID;
61383+
61384+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61385+ reqmode, GR_MKNOD_ACL_MSG);
61386+}
61387+
61388+__u32
61389+gr_acl_handle_mkdir(const struct dentry *new_dentry,
61390+ const struct dentry *parent_dentry,
61391+ const struct vfsmount *parent_mnt)
61392+{
61393+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61394+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
61395+}
61396+
61397+#define RENAME_CHECK_SUCCESS(old, new) \
61398+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
61399+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
61400+
61401+int
61402+gr_acl_handle_rename(struct dentry *new_dentry,
61403+ struct dentry *parent_dentry,
61404+ const struct vfsmount *parent_mnt,
61405+ struct dentry *old_dentry,
61406+ struct inode *old_parent_inode,
61407+ struct vfsmount *old_mnt, const char *newname)
61408+{
61409+ __u32 comp1, comp2;
61410+ int error = 0;
61411+
61412+ if (unlikely(!gr_acl_is_enabled()))
61413+ return 0;
61414+
61415+ if (!new_dentry->d_inode) {
61416+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
61417+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
61418+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
61419+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
61420+ GR_DELETE | GR_AUDIT_DELETE |
61421+ GR_AUDIT_READ | GR_AUDIT_WRITE |
61422+ GR_SUPPRESS, old_mnt);
61423+ } else {
61424+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
61425+ GR_CREATE | GR_DELETE |
61426+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
61427+ GR_AUDIT_READ | GR_AUDIT_WRITE |
61428+ GR_SUPPRESS, parent_mnt);
61429+ comp2 =
61430+ gr_search_file(old_dentry,
61431+ GR_READ | GR_WRITE | GR_AUDIT_READ |
61432+ GR_DELETE | GR_AUDIT_DELETE |
61433+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
61434+ }
61435+
61436+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
61437+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
61438+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61439+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
61440+ && !(comp2 & GR_SUPPRESS)) {
61441+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
61442+ error = -EACCES;
61443+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
61444+ error = -EACCES;
61445+
61446+ return error;
61447+}
61448+
61449+void
61450+gr_acl_handle_exit(void)
61451+{
61452+ u16 id;
61453+ char *rolename;
61454+ struct file *exec_file;
61455+
61456+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
61457+ !(current->role->roletype & GR_ROLE_PERSIST))) {
61458+ id = current->acl_role_id;
61459+ rolename = current->role->rolename;
61460+ gr_set_acls(1);
61461+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
61462+ }
61463+
61464+ write_lock(&grsec_exec_file_lock);
61465+ exec_file = current->exec_file;
61466+ current->exec_file = NULL;
61467+ write_unlock(&grsec_exec_file_lock);
61468+
61469+ if (exec_file)
61470+ fput(exec_file);
61471+}
61472+
61473+int
61474+gr_acl_handle_procpidmem(const struct task_struct *task)
61475+{
61476+ if (unlikely(!gr_acl_is_enabled()))
61477+ return 0;
61478+
61479+ if (task != current && task->acl->mode & GR_PROTPROCFD)
61480+ return -EACCES;
61481+
61482+ return 0;
61483+}
61484diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
61485new file mode 100644
61486index 0000000..cd07b96
61487--- /dev/null
61488+++ b/grsecurity/gracl_ip.c
61489@@ -0,0 +1,382 @@
61490+#include <linux/kernel.h>
61491+#include <asm/uaccess.h>
61492+#include <asm/errno.h>
61493+#include <net/sock.h>
61494+#include <linux/file.h>
61495+#include <linux/fs.h>
61496+#include <linux/net.h>
61497+#include <linux/in.h>
61498+#include <linux/skbuff.h>
61499+#include <linux/ip.h>
61500+#include <linux/udp.h>
61501+#include <linux/smp_lock.h>
61502+#include <linux/types.h>
61503+#include <linux/sched.h>
61504+#include <linux/netdevice.h>
61505+#include <linux/inetdevice.h>
61506+#include <linux/gracl.h>
61507+#include <linux/grsecurity.h>
61508+#include <linux/grinternal.h>
61509+
61510+#define GR_BIND 0x01
61511+#define GR_CONNECT 0x02
61512+#define GR_INVERT 0x04
61513+#define GR_BINDOVERRIDE 0x08
61514+#define GR_CONNECTOVERRIDE 0x10
61515+#define GR_SOCK_FAMILY 0x20
61516+
61517+static const char * gr_protocols[IPPROTO_MAX] = {
61518+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
61519+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
61520+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
61521+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
61522+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
61523+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
61524+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
61525+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
61526+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
61527+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
61528+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
61529+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
61530+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
61531+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
61532+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
61533+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
61534+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
61535+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
61536+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
61537+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
61538+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
61539+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
61540+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
61541+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
61542+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
61543+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
61544+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
61545+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
61546+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
61547+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
61548+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
61549+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
61550+ };
61551+
61552+static const char * gr_socktypes[SOCK_MAX] = {
61553+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
61554+ "unknown:7", "unknown:8", "unknown:9", "packet"
61555+ };
61556+
61557+static const char * gr_sockfamilies[AF_MAX+1] = {
61558+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
61559+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
61560+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
61561+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
61562+ };
61563+
61564+const char *
61565+gr_proto_to_name(unsigned char proto)
61566+{
61567+ return gr_protocols[proto];
61568+}
61569+
61570+const char *
61571+gr_socktype_to_name(unsigned char type)
61572+{
61573+ return gr_socktypes[type];
61574+}
61575+
61576+const char *
61577+gr_sockfamily_to_name(unsigned char family)
61578+{
61579+ return gr_sockfamilies[family];
61580+}
61581+
61582+int
61583+gr_search_socket(const int domain, const int type, const int protocol)
61584+{
61585+ struct acl_subject_label *curr;
61586+ const struct cred *cred = current_cred();
61587+
61588+ if (unlikely(!gr_acl_is_enabled()))
61589+ goto exit;
61590+
61591+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
61592+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
61593+ goto exit; // let the kernel handle it
61594+
61595+ curr = current->acl;
61596+
61597+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
61598+ /* the family is allowed, if this is PF_INET allow it only if
61599+ the extra sock type/protocol checks pass */
61600+ if (domain == PF_INET)
61601+ goto inet_check;
61602+ goto exit;
61603+ } else {
61604+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61605+ __u32 fakeip = 0;
61606+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61607+ current->role->roletype, cred->uid,
61608+ cred->gid, current->exec_file ?
61609+ gr_to_filename(current->exec_file->f_path.dentry,
61610+ current->exec_file->f_path.mnt) :
61611+ curr->filename, curr->filename,
61612+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
61613+ &current->signal->saved_ip);
61614+ goto exit;
61615+ }
61616+ goto exit_fail;
61617+ }
61618+
61619+inet_check:
61620+ /* the rest of this checking is for IPv4 only */
61621+ if (!curr->ips)
61622+ goto exit;
61623+
61624+ if ((curr->ip_type & (1 << type)) &&
61625+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
61626+ goto exit;
61627+
61628+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61629+ /* we don't place acls on raw sockets , and sometimes
61630+ dgram/ip sockets are opened for ioctl and not
61631+ bind/connect, so we'll fake a bind learn log */
61632+ if (type == SOCK_RAW || type == SOCK_PACKET) {
61633+ __u32 fakeip = 0;
61634+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61635+ current->role->roletype, cred->uid,
61636+ cred->gid, current->exec_file ?
61637+ gr_to_filename(current->exec_file->f_path.dentry,
61638+ current->exec_file->f_path.mnt) :
61639+ curr->filename, curr->filename,
61640+ &fakeip, 0, type,
61641+ protocol, GR_CONNECT, &current->signal->saved_ip);
61642+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
61643+ __u32 fakeip = 0;
61644+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61645+ current->role->roletype, cred->uid,
61646+ cred->gid, current->exec_file ?
61647+ gr_to_filename(current->exec_file->f_path.dentry,
61648+ current->exec_file->f_path.mnt) :
61649+ curr->filename, curr->filename,
61650+ &fakeip, 0, type,
61651+ protocol, GR_BIND, &current->signal->saved_ip);
61652+ }
61653+ /* we'll log when they use connect or bind */
61654+ goto exit;
61655+ }
61656+
61657+exit_fail:
61658+ if (domain == PF_INET)
61659+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
61660+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
61661+ else
61662+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
61663+ gr_socktype_to_name(type), protocol);
61664+
61665+ return 0;
61666+exit:
61667+ return 1;
61668+}
61669+
61670+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
61671+{
61672+ if ((ip->mode & mode) &&
61673+ (ip_port >= ip->low) &&
61674+ (ip_port <= ip->high) &&
61675+ ((ntohl(ip_addr) & our_netmask) ==
61676+ (ntohl(our_addr) & our_netmask))
61677+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
61678+ && (ip->type & (1 << type))) {
61679+ if (ip->mode & GR_INVERT)
61680+ return 2; // specifically denied
61681+ else
61682+ return 1; // allowed
61683+ }
61684+
61685+ return 0; // not specifically allowed, may continue parsing
61686+}
61687+
61688+static int
61689+gr_search_connectbind(const int full_mode, struct sock *sk,
61690+ struct sockaddr_in *addr, const int type)
61691+{
61692+ char iface[IFNAMSIZ] = {0};
61693+ struct acl_subject_label *curr;
61694+ struct acl_ip_label *ip;
61695+ struct inet_sock *isk;
61696+ struct net_device *dev;
61697+ struct in_device *idev;
61698+ unsigned long i;
61699+ int ret;
61700+ int mode = full_mode & (GR_BIND | GR_CONNECT);
61701+ __u32 ip_addr = 0;
61702+ __u32 our_addr;
61703+ __u32 our_netmask;
61704+ char *p;
61705+ __u16 ip_port = 0;
61706+ const struct cred *cred = current_cred();
61707+
61708+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
61709+ return 0;
61710+
61711+ curr = current->acl;
61712+ isk = inet_sk(sk);
61713+
61714+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
61715+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
61716+ addr->sin_addr.s_addr = curr->inaddr_any_override;
61717+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
61718+ struct sockaddr_in saddr;
61719+ int err;
61720+
61721+ saddr.sin_family = AF_INET;
61722+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
61723+ saddr.sin_port = isk->sport;
61724+
61725+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61726+ if (err)
61727+ return err;
61728+
61729+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61730+ if (err)
61731+ return err;
61732+ }
61733+
61734+ if (!curr->ips)
61735+ return 0;
61736+
61737+ ip_addr = addr->sin_addr.s_addr;
61738+ ip_port = ntohs(addr->sin_port);
61739+
61740+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61741+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61742+ current->role->roletype, cred->uid,
61743+ cred->gid, current->exec_file ?
61744+ gr_to_filename(current->exec_file->f_path.dentry,
61745+ current->exec_file->f_path.mnt) :
61746+ curr->filename, curr->filename,
61747+ &ip_addr, ip_port, type,
61748+ sk->sk_protocol, mode, &current->signal->saved_ip);
61749+ return 0;
61750+ }
61751+
61752+ for (i = 0; i < curr->ip_num; i++) {
61753+ ip = *(curr->ips + i);
61754+ if (ip->iface != NULL) {
61755+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
61756+ p = strchr(iface, ':');
61757+ if (p != NULL)
61758+ *p = '\0';
61759+ dev = dev_get_by_name(sock_net(sk), iface);
61760+ if (dev == NULL)
61761+ continue;
61762+ idev = in_dev_get(dev);
61763+ if (idev == NULL) {
61764+ dev_put(dev);
61765+ continue;
61766+ }
61767+ rcu_read_lock();
61768+ for_ifa(idev) {
61769+ if (!strcmp(ip->iface, ifa->ifa_label)) {
61770+ our_addr = ifa->ifa_address;
61771+ our_netmask = 0xffffffff;
61772+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61773+ if (ret == 1) {
61774+ rcu_read_unlock();
61775+ in_dev_put(idev);
61776+ dev_put(dev);
61777+ return 0;
61778+ } else if (ret == 2) {
61779+ rcu_read_unlock();
61780+ in_dev_put(idev);
61781+ dev_put(dev);
61782+ goto denied;
61783+ }
61784+ }
61785+ } endfor_ifa(idev);
61786+ rcu_read_unlock();
61787+ in_dev_put(idev);
61788+ dev_put(dev);
61789+ } else {
61790+ our_addr = ip->addr;
61791+ our_netmask = ip->netmask;
61792+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61793+ if (ret == 1)
61794+ return 0;
61795+ else if (ret == 2)
61796+ goto denied;
61797+ }
61798+ }
61799+
61800+denied:
61801+ if (mode == GR_BIND)
61802+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61803+ else if (mode == GR_CONNECT)
61804+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61805+
61806+ return -EACCES;
61807+}
61808+
61809+int
61810+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
61811+{
61812+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
61813+}
61814+
61815+int
61816+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
61817+{
61818+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
61819+}
61820+
61821+int gr_search_listen(struct socket *sock)
61822+{
61823+ struct sock *sk = sock->sk;
61824+ struct sockaddr_in addr;
61825+
61826+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61827+ addr.sin_port = inet_sk(sk)->sport;
61828+
61829+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61830+}
61831+
61832+int gr_search_accept(struct socket *sock)
61833+{
61834+ struct sock *sk = sock->sk;
61835+ struct sockaddr_in addr;
61836+
61837+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61838+ addr.sin_port = inet_sk(sk)->sport;
61839+
61840+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61841+}
61842+
61843+int
61844+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
61845+{
61846+ if (addr)
61847+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
61848+ else {
61849+ struct sockaddr_in sin;
61850+ const struct inet_sock *inet = inet_sk(sk);
61851+
61852+ sin.sin_addr.s_addr = inet->daddr;
61853+ sin.sin_port = inet->dport;
61854+
61855+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61856+ }
61857+}
61858+
61859+int
61860+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
61861+{
61862+ struct sockaddr_in sin;
61863+
61864+ if (unlikely(skb->len < sizeof (struct udphdr)))
61865+ return 0; // skip this packet
61866+
61867+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
61868+ sin.sin_port = udp_hdr(skb)->source;
61869+
61870+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61871+}
61872diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
61873new file mode 100644
61874index 0000000..34bdd46
61875--- /dev/null
61876+++ b/grsecurity/gracl_learn.c
61877@@ -0,0 +1,208 @@
61878+#include <linux/kernel.h>
61879+#include <linux/mm.h>
61880+#include <linux/sched.h>
61881+#include <linux/poll.h>
61882+#include <linux/smp_lock.h>
61883+#include <linux/string.h>
61884+#include <linux/file.h>
61885+#include <linux/types.h>
61886+#include <linux/vmalloc.h>
61887+#include <linux/grinternal.h>
61888+
61889+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
61890+ size_t count, loff_t *ppos);
61891+extern int gr_acl_is_enabled(void);
61892+
61893+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
61894+static int gr_learn_attached;
61895+
61896+/* use a 512k buffer */
61897+#define LEARN_BUFFER_SIZE (512 * 1024)
61898+
61899+static DEFINE_SPINLOCK(gr_learn_lock);
61900+static DEFINE_MUTEX(gr_learn_user_mutex);
61901+
61902+/* we need to maintain two buffers, so that the kernel context of grlearn
61903+ uses a semaphore around the userspace copying, and the other kernel contexts
61904+ use a spinlock when copying into the buffer, since they cannot sleep
61905+*/
61906+static char *learn_buffer;
61907+static char *learn_buffer_user;
61908+static int learn_buffer_len;
61909+static int learn_buffer_user_len;
61910+
61911+static ssize_t
61912+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
61913+{
61914+ DECLARE_WAITQUEUE(wait, current);
61915+ ssize_t retval = 0;
61916+
61917+ add_wait_queue(&learn_wait, &wait);
61918+ set_current_state(TASK_INTERRUPTIBLE);
61919+ do {
61920+ mutex_lock(&gr_learn_user_mutex);
61921+ spin_lock(&gr_learn_lock);
61922+ if (learn_buffer_len)
61923+ break;
61924+ spin_unlock(&gr_learn_lock);
61925+ mutex_unlock(&gr_learn_user_mutex);
61926+ if (file->f_flags & O_NONBLOCK) {
61927+ retval = -EAGAIN;
61928+ goto out;
61929+ }
61930+ if (signal_pending(current)) {
61931+ retval = -ERESTARTSYS;
61932+ goto out;
61933+ }
61934+
61935+ schedule();
61936+ } while (1);
61937+
61938+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
61939+ learn_buffer_user_len = learn_buffer_len;
61940+ retval = learn_buffer_len;
61941+ learn_buffer_len = 0;
61942+
61943+ spin_unlock(&gr_learn_lock);
61944+
61945+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
61946+ retval = -EFAULT;
61947+
61948+ mutex_unlock(&gr_learn_user_mutex);
61949+out:
61950+ set_current_state(TASK_RUNNING);
61951+ remove_wait_queue(&learn_wait, &wait);
61952+ return retval;
61953+}
61954+
61955+static unsigned int
61956+poll_learn(struct file * file, poll_table * wait)
61957+{
61958+ poll_wait(file, &learn_wait, wait);
61959+
61960+ if (learn_buffer_len)
61961+ return (POLLIN | POLLRDNORM);
61962+
61963+ return 0;
61964+}
61965+
61966+void
61967+gr_clear_learn_entries(void)
61968+{
61969+ char *tmp;
61970+
61971+ mutex_lock(&gr_learn_user_mutex);
61972+ spin_lock(&gr_learn_lock);
61973+ tmp = learn_buffer;
61974+ learn_buffer = NULL;
61975+ spin_unlock(&gr_learn_lock);
61976+ if (tmp)
61977+ vfree(tmp);
61978+ if (learn_buffer_user != NULL) {
61979+ vfree(learn_buffer_user);
61980+ learn_buffer_user = NULL;
61981+ }
61982+ learn_buffer_len = 0;
61983+ mutex_unlock(&gr_learn_user_mutex);
61984+
61985+ return;
61986+}
61987+
61988+void
61989+gr_add_learn_entry(const char *fmt, ...)
61990+{
61991+ va_list args;
61992+ unsigned int len;
61993+
61994+ if (!gr_learn_attached)
61995+ return;
61996+
61997+ spin_lock(&gr_learn_lock);
61998+
61999+ /* leave a gap at the end so we know when it's "full" but don't have to
62000+ compute the exact length of the string we're trying to append
62001+ */
62002+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
62003+ spin_unlock(&gr_learn_lock);
62004+ wake_up_interruptible(&learn_wait);
62005+ return;
62006+ }
62007+ if (learn_buffer == NULL) {
62008+ spin_unlock(&gr_learn_lock);
62009+ return;
62010+ }
62011+
62012+ va_start(args, fmt);
62013+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
62014+ va_end(args);
62015+
62016+ learn_buffer_len += len + 1;
62017+
62018+ spin_unlock(&gr_learn_lock);
62019+ wake_up_interruptible(&learn_wait);
62020+
62021+ return;
62022+}
62023+
62024+static int
62025+open_learn(struct inode *inode, struct file *file)
62026+{
62027+ if (file->f_mode & FMODE_READ && gr_learn_attached)
62028+ return -EBUSY;
62029+ if (file->f_mode & FMODE_READ) {
62030+ int retval = 0;
62031+ mutex_lock(&gr_learn_user_mutex);
62032+ if (learn_buffer == NULL)
62033+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
62034+ if (learn_buffer_user == NULL)
62035+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
62036+ if (learn_buffer == NULL) {
62037+ retval = -ENOMEM;
62038+ goto out_error;
62039+ }
62040+ if (learn_buffer_user == NULL) {
62041+ retval = -ENOMEM;
62042+ goto out_error;
62043+ }
62044+ learn_buffer_len = 0;
62045+ learn_buffer_user_len = 0;
62046+ gr_learn_attached = 1;
62047+out_error:
62048+ mutex_unlock(&gr_learn_user_mutex);
62049+ return retval;
62050+ }
62051+ return 0;
62052+}
62053+
62054+static int
62055+close_learn(struct inode *inode, struct file *file)
62056+{
62057+ if (file->f_mode & FMODE_READ) {
62058+ char *tmp = NULL;
62059+ mutex_lock(&gr_learn_user_mutex);
62060+ spin_lock(&gr_learn_lock);
62061+ tmp = learn_buffer;
62062+ learn_buffer = NULL;
62063+ spin_unlock(&gr_learn_lock);
62064+ if (tmp)
62065+ vfree(tmp);
62066+ if (learn_buffer_user != NULL) {
62067+ vfree(learn_buffer_user);
62068+ learn_buffer_user = NULL;
62069+ }
62070+ learn_buffer_len = 0;
62071+ learn_buffer_user_len = 0;
62072+ gr_learn_attached = 0;
62073+ mutex_unlock(&gr_learn_user_mutex);
62074+ }
62075+
62076+ return 0;
62077+}
62078+
62079+const struct file_operations grsec_fops = {
62080+ .read = read_learn,
62081+ .write = write_grsec_handler,
62082+ .open = open_learn,
62083+ .release = close_learn,
62084+ .poll = poll_learn,
62085+};
62086diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
62087new file mode 100644
62088index 0000000..70b2179
62089--- /dev/null
62090+++ b/grsecurity/gracl_res.c
62091@@ -0,0 +1,67 @@
62092+#include <linux/kernel.h>
62093+#include <linux/sched.h>
62094+#include <linux/gracl.h>
62095+#include <linux/grinternal.h>
62096+
62097+static const char *restab_log[] = {
62098+ [RLIMIT_CPU] = "RLIMIT_CPU",
62099+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
62100+ [RLIMIT_DATA] = "RLIMIT_DATA",
62101+ [RLIMIT_STACK] = "RLIMIT_STACK",
62102+ [RLIMIT_CORE] = "RLIMIT_CORE",
62103+ [RLIMIT_RSS] = "RLIMIT_RSS",
62104+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
62105+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
62106+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
62107+ [RLIMIT_AS] = "RLIMIT_AS",
62108+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
62109+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
62110+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
62111+ [RLIMIT_NICE] = "RLIMIT_NICE",
62112+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
62113+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
62114+ [GR_CRASH_RES] = "RLIMIT_CRASH"
62115+};
62116+
62117+void
62118+gr_log_resource(const struct task_struct *task,
62119+ const int res, const unsigned long wanted, const int gt)
62120+{
62121+ const struct cred *cred;
62122+ unsigned long rlim;
62123+
62124+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
62125+ return;
62126+
62127+ // not yet supported resource
62128+ if (unlikely(!restab_log[res]))
62129+ return;
62130+
62131+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
62132+ rlim = task->signal->rlim[res].rlim_max;
62133+ else
62134+ rlim = task->signal->rlim[res].rlim_cur;
62135+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
62136+ return;
62137+
62138+ rcu_read_lock();
62139+ cred = __task_cred(task);
62140+
62141+ if (res == RLIMIT_NPROC &&
62142+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
62143+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
62144+ goto out_rcu_unlock;
62145+ else if (res == RLIMIT_MEMLOCK &&
62146+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
62147+ goto out_rcu_unlock;
62148+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
62149+ goto out_rcu_unlock;
62150+ rcu_read_unlock();
62151+
62152+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
62153+
62154+ return;
62155+out_rcu_unlock:
62156+ rcu_read_unlock();
62157+ return;
62158+}
62159diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
62160new file mode 100644
62161index 0000000..1d1b734
62162--- /dev/null
62163+++ b/grsecurity/gracl_segv.c
62164@@ -0,0 +1,284 @@
62165+#include <linux/kernel.h>
62166+#include <linux/mm.h>
62167+#include <asm/uaccess.h>
62168+#include <asm/errno.h>
62169+#include <asm/mman.h>
62170+#include <net/sock.h>
62171+#include <linux/file.h>
62172+#include <linux/fs.h>
62173+#include <linux/net.h>
62174+#include <linux/in.h>
62175+#include <linux/smp_lock.h>
62176+#include <linux/slab.h>
62177+#include <linux/types.h>
62178+#include <linux/sched.h>
62179+#include <linux/timer.h>
62180+#include <linux/gracl.h>
62181+#include <linux/grsecurity.h>
62182+#include <linux/grinternal.h>
62183+
62184+static struct crash_uid *uid_set;
62185+static unsigned short uid_used;
62186+static DEFINE_SPINLOCK(gr_uid_lock);
62187+extern rwlock_t gr_inode_lock;
62188+extern struct acl_subject_label *
62189+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
62190+ struct acl_role_label *role);
62191+extern int gr_fake_force_sig(int sig, struct task_struct *t);
62192+
62193+int
62194+gr_init_uidset(void)
62195+{
62196+ uid_set =
62197+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
62198+ uid_used = 0;
62199+
62200+ return uid_set ? 1 : 0;
62201+}
62202+
62203+void
62204+gr_free_uidset(void)
62205+{
62206+ if (uid_set)
62207+ kfree(uid_set);
62208+
62209+ return;
62210+}
62211+
62212+int
62213+gr_find_uid(const uid_t uid)
62214+{
62215+ struct crash_uid *tmp = uid_set;
62216+ uid_t buid;
62217+ int low = 0, high = uid_used - 1, mid;
62218+
62219+ while (high >= low) {
62220+ mid = (low + high) >> 1;
62221+ buid = tmp[mid].uid;
62222+ if (buid == uid)
62223+ return mid;
62224+ if (buid > uid)
62225+ high = mid - 1;
62226+ if (buid < uid)
62227+ low = mid + 1;
62228+ }
62229+
62230+ return -1;
62231+}
62232+
62233+static __inline__ void
62234+gr_insertsort(void)
62235+{
62236+ unsigned short i, j;
62237+ struct crash_uid index;
62238+
62239+ for (i = 1; i < uid_used; i++) {
62240+ index = uid_set[i];
62241+ j = i;
62242+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
62243+ uid_set[j] = uid_set[j - 1];
62244+ j--;
62245+ }
62246+ uid_set[j] = index;
62247+ }
62248+
62249+ return;
62250+}
62251+
62252+static __inline__ void
62253+gr_insert_uid(const uid_t uid, const unsigned long expires)
62254+{
62255+ int loc;
62256+
62257+ if (uid_used == GR_UIDTABLE_MAX)
62258+ return;
62259+
62260+ loc = gr_find_uid(uid);
62261+
62262+ if (loc >= 0) {
62263+ uid_set[loc].expires = expires;
62264+ return;
62265+ }
62266+
62267+ uid_set[uid_used].uid = uid;
62268+ uid_set[uid_used].expires = expires;
62269+ uid_used++;
62270+
62271+ gr_insertsort();
62272+
62273+ return;
62274+}
62275+
62276+void
62277+gr_remove_uid(const unsigned short loc)
62278+{
62279+ unsigned short i;
62280+
62281+ for (i = loc + 1; i < uid_used; i++)
62282+ uid_set[i - 1] = uid_set[i];
62283+
62284+ uid_used--;
62285+
62286+ return;
62287+}
62288+
62289+int
62290+gr_check_crash_uid(const uid_t uid)
62291+{
62292+ int loc;
62293+ int ret = 0;
62294+
62295+ if (unlikely(!gr_acl_is_enabled()))
62296+ return 0;
62297+
62298+ spin_lock(&gr_uid_lock);
62299+ loc = gr_find_uid(uid);
62300+
62301+ if (loc < 0)
62302+ goto out_unlock;
62303+
62304+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
62305+ gr_remove_uid(loc);
62306+ else
62307+ ret = 1;
62308+
62309+out_unlock:
62310+ spin_unlock(&gr_uid_lock);
62311+ return ret;
62312+}
62313+
62314+static __inline__ int
62315+proc_is_setxid(const struct cred *cred)
62316+{
62317+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
62318+ cred->uid != cred->fsuid)
62319+ return 1;
62320+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
62321+ cred->gid != cred->fsgid)
62322+ return 1;
62323+
62324+ return 0;
62325+}
62326+
62327+void
62328+gr_handle_crash(struct task_struct *task, const int sig)
62329+{
62330+ struct acl_subject_label *curr;
62331+ struct task_struct *tsk, *tsk2;
62332+ const struct cred *cred;
62333+ const struct cred *cred2;
62334+
62335+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
62336+ return;
62337+
62338+ if (unlikely(!gr_acl_is_enabled()))
62339+ return;
62340+
62341+ curr = task->acl;
62342+
62343+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
62344+ return;
62345+
62346+ if (time_before_eq(curr->expires, get_seconds())) {
62347+ curr->expires = 0;
62348+ curr->crashes = 0;
62349+ }
62350+
62351+ curr->crashes++;
62352+
62353+ if (!curr->expires)
62354+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
62355+
62356+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62357+ time_after(curr->expires, get_seconds())) {
62358+ rcu_read_lock();
62359+ cred = __task_cred(task);
62360+ if (cred->uid && proc_is_setxid(cred)) {
62361+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62362+ spin_lock(&gr_uid_lock);
62363+ gr_insert_uid(cred->uid, curr->expires);
62364+ spin_unlock(&gr_uid_lock);
62365+ curr->expires = 0;
62366+ curr->crashes = 0;
62367+ read_lock(&tasklist_lock);
62368+ do_each_thread(tsk2, tsk) {
62369+ cred2 = __task_cred(tsk);
62370+ if (tsk != task && cred2->uid == cred->uid)
62371+ gr_fake_force_sig(SIGKILL, tsk);
62372+ } while_each_thread(tsk2, tsk);
62373+ read_unlock(&tasklist_lock);
62374+ } else {
62375+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62376+ read_lock(&tasklist_lock);
62377+ read_lock(&grsec_exec_file_lock);
62378+ do_each_thread(tsk2, tsk) {
62379+ if (likely(tsk != task)) {
62380+ // if this thread has the same subject as the one that triggered
62381+ // RES_CRASH and it's the same binary, kill it
62382+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
62383+ gr_fake_force_sig(SIGKILL, tsk);
62384+ }
62385+ } while_each_thread(tsk2, tsk);
62386+ read_unlock(&grsec_exec_file_lock);
62387+ read_unlock(&tasklist_lock);
62388+ }
62389+ rcu_read_unlock();
62390+ }
62391+
62392+ return;
62393+}
62394+
62395+int
62396+gr_check_crash_exec(const struct file *filp)
62397+{
62398+ struct acl_subject_label *curr;
62399+
62400+ if (unlikely(!gr_acl_is_enabled()))
62401+ return 0;
62402+
62403+ read_lock(&gr_inode_lock);
62404+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
62405+ filp->f_path.dentry->d_inode->i_sb->s_dev,
62406+ current->role);
62407+ read_unlock(&gr_inode_lock);
62408+
62409+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
62410+ (!curr->crashes && !curr->expires))
62411+ return 0;
62412+
62413+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62414+ time_after(curr->expires, get_seconds()))
62415+ return 1;
62416+ else if (time_before_eq(curr->expires, get_seconds())) {
62417+ curr->crashes = 0;
62418+ curr->expires = 0;
62419+ }
62420+
62421+ return 0;
62422+}
62423+
62424+void
62425+gr_handle_alertkill(struct task_struct *task)
62426+{
62427+ struct acl_subject_label *curracl;
62428+ __u32 curr_ip;
62429+ struct task_struct *p, *p2;
62430+
62431+ if (unlikely(!gr_acl_is_enabled()))
62432+ return;
62433+
62434+ curracl = task->acl;
62435+ curr_ip = task->signal->curr_ip;
62436+
62437+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
62438+ read_lock(&tasklist_lock);
62439+ do_each_thread(p2, p) {
62440+ if (p->signal->curr_ip == curr_ip)
62441+ gr_fake_force_sig(SIGKILL, p);
62442+ } while_each_thread(p2, p);
62443+ read_unlock(&tasklist_lock);
62444+ } else if (curracl->mode & GR_KILLPROC)
62445+ gr_fake_force_sig(SIGKILL, task);
62446+
62447+ return;
62448+}
62449diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
62450new file mode 100644
62451index 0000000..9d83a69
62452--- /dev/null
62453+++ b/grsecurity/gracl_shm.c
62454@@ -0,0 +1,40 @@
62455+#include <linux/kernel.h>
62456+#include <linux/mm.h>
62457+#include <linux/sched.h>
62458+#include <linux/file.h>
62459+#include <linux/ipc.h>
62460+#include <linux/gracl.h>
62461+#include <linux/grsecurity.h>
62462+#include <linux/grinternal.h>
62463+
62464+int
62465+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62466+ const time_t shm_createtime, const uid_t cuid, const int shmid)
62467+{
62468+ struct task_struct *task;
62469+
62470+ if (!gr_acl_is_enabled())
62471+ return 1;
62472+
62473+ rcu_read_lock();
62474+ read_lock(&tasklist_lock);
62475+
62476+ task = find_task_by_vpid(shm_cprid);
62477+
62478+ if (unlikely(!task))
62479+ task = find_task_by_vpid(shm_lapid);
62480+
62481+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
62482+ (task->pid == shm_lapid)) &&
62483+ (task->acl->mode & GR_PROTSHM) &&
62484+ (task->acl != current->acl))) {
62485+ read_unlock(&tasklist_lock);
62486+ rcu_read_unlock();
62487+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
62488+ return 0;
62489+ }
62490+ read_unlock(&tasklist_lock);
62491+ rcu_read_unlock();
62492+
62493+ return 1;
62494+}
62495diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
62496new file mode 100644
62497index 0000000..bc0be01
62498--- /dev/null
62499+++ b/grsecurity/grsec_chdir.c
62500@@ -0,0 +1,19 @@
62501+#include <linux/kernel.h>
62502+#include <linux/sched.h>
62503+#include <linux/fs.h>
62504+#include <linux/file.h>
62505+#include <linux/grsecurity.h>
62506+#include <linux/grinternal.h>
62507+
62508+void
62509+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
62510+{
62511+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62512+ if ((grsec_enable_chdir && grsec_enable_group &&
62513+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
62514+ !grsec_enable_group)) {
62515+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
62516+ }
62517+#endif
62518+ return;
62519+}
62520diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
62521new file mode 100644
62522index 0000000..197bdd5
62523--- /dev/null
62524+++ b/grsecurity/grsec_chroot.c
62525@@ -0,0 +1,386 @@
62526+#include <linux/kernel.h>
62527+#include <linux/module.h>
62528+#include <linux/sched.h>
62529+#include <linux/file.h>
62530+#include <linux/fs.h>
62531+#include <linux/mount.h>
62532+#include <linux/types.h>
62533+#include <linux/pid_namespace.h>
62534+#include <linux/grsecurity.h>
62535+#include <linux/grinternal.h>
62536+
62537+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
62538+{
62539+#ifdef CONFIG_GRKERNSEC
62540+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
62541+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
62542+ task->gr_is_chrooted = 1;
62543+ else
62544+ task->gr_is_chrooted = 0;
62545+
62546+ task->gr_chroot_dentry = path->dentry;
62547+#endif
62548+ return;
62549+}
62550+
62551+void gr_clear_chroot_entries(struct task_struct *task)
62552+{
62553+#ifdef CONFIG_GRKERNSEC
62554+ task->gr_is_chrooted = 0;
62555+ task->gr_chroot_dentry = NULL;
62556+#endif
62557+ return;
62558+}
62559+
62560+int
62561+gr_handle_chroot_unix(const pid_t pid)
62562+{
62563+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62564+ struct task_struct *p;
62565+
62566+ if (unlikely(!grsec_enable_chroot_unix))
62567+ return 1;
62568+
62569+ if (likely(!proc_is_chrooted(current)))
62570+ return 1;
62571+
62572+ rcu_read_lock();
62573+ read_lock(&tasklist_lock);
62574+
62575+ p = find_task_by_vpid_unrestricted(pid);
62576+ if (unlikely(p && !have_same_root(current, p))) {
62577+ read_unlock(&tasklist_lock);
62578+ rcu_read_unlock();
62579+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
62580+ return 0;
62581+ }
62582+ read_unlock(&tasklist_lock);
62583+ rcu_read_unlock();
62584+#endif
62585+ return 1;
62586+}
62587+
62588+int
62589+gr_handle_chroot_nice(void)
62590+{
62591+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62592+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
62593+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
62594+ return -EPERM;
62595+ }
62596+#endif
62597+ return 0;
62598+}
62599+
62600+int
62601+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
62602+{
62603+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62604+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
62605+ && proc_is_chrooted(current)) {
62606+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
62607+ return -EACCES;
62608+ }
62609+#endif
62610+ return 0;
62611+}
62612+
62613+int
62614+gr_handle_chroot_rawio(const struct inode *inode)
62615+{
62616+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62617+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
62618+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
62619+ return 1;
62620+#endif
62621+ return 0;
62622+}
62623+
62624+int
62625+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
62626+{
62627+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62628+ struct task_struct *p;
62629+ int ret = 0;
62630+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
62631+ return ret;
62632+
62633+ read_lock(&tasklist_lock);
62634+ do_each_pid_task(pid, type, p) {
62635+ if (!have_same_root(current, p)) {
62636+ ret = 1;
62637+ goto out;
62638+ }
62639+ } while_each_pid_task(pid, type, p);
62640+out:
62641+ read_unlock(&tasklist_lock);
62642+ return ret;
62643+#endif
62644+ return 0;
62645+}
62646+
62647+int
62648+gr_pid_is_chrooted(struct task_struct *p)
62649+{
62650+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62651+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
62652+ return 0;
62653+
62654+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
62655+ !have_same_root(current, p)) {
62656+ return 1;
62657+ }
62658+#endif
62659+ return 0;
62660+}
62661+
62662+EXPORT_SYMBOL(gr_pid_is_chrooted);
62663+
62664+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
62665+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
62666+{
62667+ struct dentry *dentry = (struct dentry *)u_dentry;
62668+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
62669+ struct dentry *realroot;
62670+ struct vfsmount *realrootmnt;
62671+ struct dentry *currentroot;
62672+ struct vfsmount *currentmnt;
62673+ struct task_struct *reaper = &init_task;
62674+ int ret = 1;
62675+
62676+ read_lock(&reaper->fs->lock);
62677+ realrootmnt = mntget(reaper->fs->root.mnt);
62678+ realroot = dget(reaper->fs->root.dentry);
62679+ read_unlock(&reaper->fs->lock);
62680+
62681+ read_lock(&current->fs->lock);
62682+ currentmnt = mntget(current->fs->root.mnt);
62683+ currentroot = dget(current->fs->root.dentry);
62684+ read_unlock(&current->fs->lock);
62685+
62686+ spin_lock(&dcache_lock);
62687+ for (;;) {
62688+ if (unlikely((dentry == realroot && mnt == realrootmnt)
62689+ || (dentry == currentroot && mnt == currentmnt)))
62690+ break;
62691+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
62692+ if (mnt->mnt_parent == mnt)
62693+ break;
62694+ dentry = mnt->mnt_mountpoint;
62695+ mnt = mnt->mnt_parent;
62696+ continue;
62697+ }
62698+ dentry = dentry->d_parent;
62699+ }
62700+ spin_unlock(&dcache_lock);
62701+
62702+ dput(currentroot);
62703+ mntput(currentmnt);
62704+
62705+ /* access is outside of chroot */
62706+ if (dentry == realroot && mnt == realrootmnt)
62707+ ret = 0;
62708+
62709+ dput(realroot);
62710+ mntput(realrootmnt);
62711+ return ret;
62712+}
62713+#endif
62714+
62715+int
62716+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
62717+{
62718+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62719+ if (!grsec_enable_chroot_fchdir)
62720+ return 1;
62721+
62722+ if (!proc_is_chrooted(current))
62723+ return 1;
62724+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
62725+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
62726+ return 0;
62727+ }
62728+#endif
62729+ return 1;
62730+}
62731+
62732+int
62733+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62734+ const time_t shm_createtime)
62735+{
62736+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62737+ struct task_struct *p;
62738+ time_t starttime;
62739+
62740+ if (unlikely(!grsec_enable_chroot_shmat))
62741+ return 1;
62742+
62743+ if (likely(!proc_is_chrooted(current)))
62744+ return 1;
62745+
62746+ rcu_read_lock();
62747+ read_lock(&tasklist_lock);
62748+
62749+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
62750+ starttime = p->start_time.tv_sec;
62751+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
62752+ if (have_same_root(current, p)) {
62753+ goto allow;
62754+ } else {
62755+ read_unlock(&tasklist_lock);
62756+ rcu_read_unlock();
62757+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62758+ return 0;
62759+ }
62760+ }
62761+ /* creator exited, pid reuse, fall through to next check */
62762+ }
62763+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
62764+ if (unlikely(!have_same_root(current, p))) {
62765+ read_unlock(&tasklist_lock);
62766+ rcu_read_unlock();
62767+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62768+ return 0;
62769+ }
62770+ }
62771+
62772+allow:
62773+ read_unlock(&tasklist_lock);
62774+ rcu_read_unlock();
62775+#endif
62776+ return 1;
62777+}
62778+
62779+void
62780+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
62781+{
62782+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62783+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
62784+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
62785+#endif
62786+ return;
62787+}
62788+
62789+int
62790+gr_handle_chroot_mknod(const struct dentry *dentry,
62791+ const struct vfsmount *mnt, const int mode)
62792+{
62793+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62794+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
62795+ proc_is_chrooted(current)) {
62796+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
62797+ return -EPERM;
62798+ }
62799+#endif
62800+ return 0;
62801+}
62802+
62803+int
62804+gr_handle_chroot_mount(const struct dentry *dentry,
62805+ const struct vfsmount *mnt, const char *dev_name)
62806+{
62807+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62808+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
62809+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
62810+ return -EPERM;
62811+ }
62812+#endif
62813+ return 0;
62814+}
62815+
62816+int
62817+gr_handle_chroot_pivot(void)
62818+{
62819+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62820+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
62821+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
62822+ return -EPERM;
62823+ }
62824+#endif
62825+ return 0;
62826+}
62827+
62828+int
62829+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
62830+{
62831+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62832+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
62833+ !gr_is_outside_chroot(dentry, mnt)) {
62834+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
62835+ return -EPERM;
62836+ }
62837+#endif
62838+ return 0;
62839+}
62840+
62841+extern const char *captab_log[];
62842+extern int captab_log_entries;
62843+
62844+int
62845+gr_chroot_is_capable(const int cap)
62846+{
62847+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62848+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62849+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62850+ if (cap_raised(chroot_caps, cap)) {
62851+ const struct cred *creds = current_cred();
62852+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
62853+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
62854+ }
62855+ return 0;
62856+ }
62857+ }
62858+#endif
62859+ return 1;
62860+}
62861+
62862+int
62863+gr_chroot_is_capable_nolog(const int cap)
62864+{
62865+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62866+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62867+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62868+ if (cap_raised(chroot_caps, cap)) {
62869+ return 0;
62870+ }
62871+ }
62872+#endif
62873+ return 1;
62874+}
62875+
62876+int
62877+gr_handle_chroot_sysctl(const int op)
62878+{
62879+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62880+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
62881+ && (op & MAY_WRITE))
62882+ return -EACCES;
62883+#endif
62884+ return 0;
62885+}
62886+
62887+void
62888+gr_handle_chroot_chdir(struct path *path)
62889+{
62890+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62891+ if (grsec_enable_chroot_chdir)
62892+ set_fs_pwd(current->fs, path);
62893+#endif
62894+ return;
62895+}
62896+
62897+int
62898+gr_handle_chroot_chmod(const struct dentry *dentry,
62899+ const struct vfsmount *mnt, const int mode)
62900+{
62901+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62902+ /* allow chmod +s on directories, but not on files */
62903+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
62904+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
62905+ proc_is_chrooted(current)) {
62906+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
62907+ return -EPERM;
62908+ }
62909+#endif
62910+ return 0;
62911+}
62912diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
62913new file mode 100644
62914index 0000000..40545bf
62915--- /dev/null
62916+++ b/grsecurity/grsec_disabled.c
62917@@ -0,0 +1,437 @@
62918+#include <linux/kernel.h>
62919+#include <linux/module.h>
62920+#include <linux/sched.h>
62921+#include <linux/file.h>
62922+#include <linux/fs.h>
62923+#include <linux/kdev_t.h>
62924+#include <linux/net.h>
62925+#include <linux/in.h>
62926+#include <linux/ip.h>
62927+#include <linux/skbuff.h>
62928+#include <linux/sysctl.h>
62929+
62930+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62931+void
62932+pax_set_initial_flags(struct linux_binprm *bprm)
62933+{
62934+ return;
62935+}
62936+#endif
62937+
62938+#ifdef CONFIG_SYSCTL
62939+__u32
62940+gr_handle_sysctl(const struct ctl_table * table, const int op)
62941+{
62942+ return 0;
62943+}
62944+#endif
62945+
62946+#ifdef CONFIG_TASKSTATS
62947+int gr_is_taskstats_denied(int pid)
62948+{
62949+ return 0;
62950+}
62951+#endif
62952+
62953+int
62954+gr_acl_is_enabled(void)
62955+{
62956+ return 0;
62957+}
62958+
62959+void
62960+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
62961+{
62962+ return;
62963+}
62964+
62965+int
62966+gr_handle_rawio(const struct inode *inode)
62967+{
62968+ return 0;
62969+}
62970+
62971+void
62972+gr_acl_handle_psacct(struct task_struct *task, const long code)
62973+{
62974+ return;
62975+}
62976+
62977+int
62978+gr_handle_ptrace(struct task_struct *task, const long request)
62979+{
62980+ return 0;
62981+}
62982+
62983+int
62984+gr_handle_proc_ptrace(struct task_struct *task)
62985+{
62986+ return 0;
62987+}
62988+
62989+void
62990+gr_learn_resource(const struct task_struct *task,
62991+ const int res, const unsigned long wanted, const int gt)
62992+{
62993+ return;
62994+}
62995+
62996+int
62997+gr_set_acls(const int type)
62998+{
62999+ return 0;
63000+}
63001+
63002+int
63003+gr_check_hidden_task(const struct task_struct *tsk)
63004+{
63005+ return 0;
63006+}
63007+
63008+int
63009+gr_check_protected_task(const struct task_struct *task)
63010+{
63011+ return 0;
63012+}
63013+
63014+int
63015+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
63016+{
63017+ return 0;
63018+}
63019+
63020+void
63021+gr_copy_label(struct task_struct *tsk)
63022+{
63023+ return;
63024+}
63025+
63026+void
63027+gr_set_pax_flags(struct task_struct *task)
63028+{
63029+ return;
63030+}
63031+
63032+int
63033+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
63034+ const int unsafe_share)
63035+{
63036+ return 0;
63037+}
63038+
63039+void
63040+gr_handle_delete(const ino_t ino, const dev_t dev)
63041+{
63042+ return;
63043+}
63044+
63045+void
63046+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
63047+{
63048+ return;
63049+}
63050+
63051+void
63052+gr_handle_crash(struct task_struct *task, const int sig)
63053+{
63054+ return;
63055+}
63056+
63057+int
63058+gr_check_crash_exec(const struct file *filp)
63059+{
63060+ return 0;
63061+}
63062+
63063+int
63064+gr_check_crash_uid(const uid_t uid)
63065+{
63066+ return 0;
63067+}
63068+
63069+void
63070+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
63071+ struct dentry *old_dentry,
63072+ struct dentry *new_dentry,
63073+ struct vfsmount *mnt, const __u8 replace)
63074+{
63075+ return;
63076+}
63077+
63078+int
63079+gr_search_socket(const int family, const int type, const int protocol)
63080+{
63081+ return 1;
63082+}
63083+
63084+int
63085+gr_search_connectbind(const int mode, const struct socket *sock,
63086+ const struct sockaddr_in *addr)
63087+{
63088+ return 0;
63089+}
63090+
63091+void
63092+gr_handle_alertkill(struct task_struct *task)
63093+{
63094+ return;
63095+}
63096+
63097+__u32
63098+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
63099+{
63100+ return 1;
63101+}
63102+
63103+__u32
63104+gr_acl_handle_hidden_file(const struct dentry * dentry,
63105+ const struct vfsmount * mnt)
63106+{
63107+ return 1;
63108+}
63109+
63110+__u32
63111+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
63112+ int acc_mode)
63113+{
63114+ return 1;
63115+}
63116+
63117+__u32
63118+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
63119+{
63120+ return 1;
63121+}
63122+
63123+__u32
63124+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
63125+{
63126+ return 1;
63127+}
63128+
63129+int
63130+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
63131+ unsigned int *vm_flags)
63132+{
63133+ return 1;
63134+}
63135+
63136+__u32
63137+gr_acl_handle_truncate(const struct dentry * dentry,
63138+ const struct vfsmount * mnt)
63139+{
63140+ return 1;
63141+}
63142+
63143+__u32
63144+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
63145+{
63146+ return 1;
63147+}
63148+
63149+__u32
63150+gr_acl_handle_access(const struct dentry * dentry,
63151+ const struct vfsmount * mnt, const int fmode)
63152+{
63153+ return 1;
63154+}
63155+
63156+__u32
63157+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
63158+ umode_t *mode)
63159+{
63160+ return 1;
63161+}
63162+
63163+__u32
63164+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
63165+{
63166+ return 1;
63167+}
63168+
63169+__u32
63170+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
63171+{
63172+ return 1;
63173+}
63174+
63175+void
63176+grsecurity_init(void)
63177+{
63178+ return;
63179+}
63180+
63181+umode_t gr_acl_umask(void)
63182+{
63183+ return 0;
63184+}
63185+
63186+__u32
63187+gr_acl_handle_mknod(const struct dentry * new_dentry,
63188+ const struct dentry * parent_dentry,
63189+ const struct vfsmount * parent_mnt,
63190+ const int mode)
63191+{
63192+ return 1;
63193+}
63194+
63195+__u32
63196+gr_acl_handle_mkdir(const struct dentry * new_dentry,
63197+ const struct dentry * parent_dentry,
63198+ const struct vfsmount * parent_mnt)
63199+{
63200+ return 1;
63201+}
63202+
63203+__u32
63204+gr_acl_handle_symlink(const struct dentry * new_dentry,
63205+ const struct dentry * parent_dentry,
63206+ const struct vfsmount * parent_mnt, const char *from)
63207+{
63208+ return 1;
63209+}
63210+
63211+__u32
63212+gr_acl_handle_link(const struct dentry * new_dentry,
63213+ const struct dentry * parent_dentry,
63214+ const struct vfsmount * parent_mnt,
63215+ const struct dentry * old_dentry,
63216+ const struct vfsmount * old_mnt, const char *to)
63217+{
63218+ return 1;
63219+}
63220+
63221+int
63222+gr_acl_handle_rename(const struct dentry *new_dentry,
63223+ const struct dentry *parent_dentry,
63224+ const struct vfsmount *parent_mnt,
63225+ const struct dentry *old_dentry,
63226+ const struct inode *old_parent_inode,
63227+ const struct vfsmount *old_mnt, const char *newname)
63228+{
63229+ return 0;
63230+}
63231+
63232+int
63233+gr_acl_handle_filldir(const struct file *file, const char *name,
63234+ const int namelen, const ino_t ino)
63235+{
63236+ return 1;
63237+}
63238+
63239+int
63240+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63241+ const time_t shm_createtime, const uid_t cuid, const int shmid)
63242+{
63243+ return 1;
63244+}
63245+
63246+int
63247+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
63248+{
63249+ return 0;
63250+}
63251+
63252+int
63253+gr_search_accept(const struct socket *sock)
63254+{
63255+ return 0;
63256+}
63257+
63258+int
63259+gr_search_listen(const struct socket *sock)
63260+{
63261+ return 0;
63262+}
63263+
63264+int
63265+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
63266+{
63267+ return 0;
63268+}
63269+
63270+__u32
63271+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
63272+{
63273+ return 1;
63274+}
63275+
63276+__u32
63277+gr_acl_handle_creat(const struct dentry * dentry,
63278+ const struct dentry * p_dentry,
63279+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
63280+ const int imode)
63281+{
63282+ return 1;
63283+}
63284+
63285+void
63286+gr_acl_handle_exit(void)
63287+{
63288+ return;
63289+}
63290+
63291+int
63292+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
63293+{
63294+ return 1;
63295+}
63296+
63297+void
63298+gr_set_role_label(const uid_t uid, const gid_t gid)
63299+{
63300+ return;
63301+}
63302+
63303+int
63304+gr_acl_handle_procpidmem(const struct task_struct *task)
63305+{
63306+ return 0;
63307+}
63308+
63309+int
63310+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
63311+{
63312+ return 0;
63313+}
63314+
63315+int
63316+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
63317+{
63318+ return 0;
63319+}
63320+
63321+void
63322+gr_set_kernel_label(struct task_struct *task)
63323+{
63324+ return;
63325+}
63326+
63327+int
63328+gr_check_user_change(int real, int effective, int fs)
63329+{
63330+ return 0;
63331+}
63332+
63333+int
63334+gr_check_group_change(int real, int effective, int fs)
63335+{
63336+ return 0;
63337+}
63338+
63339+int gr_acl_enable_at_secure(void)
63340+{
63341+ return 0;
63342+}
63343+
63344+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
63345+{
63346+ return dentry->d_inode->i_sb->s_dev;
63347+}
63348+
63349+EXPORT_SYMBOL(gr_learn_resource);
63350+EXPORT_SYMBOL(gr_set_kernel_label);
63351+#ifdef CONFIG_SECURITY
63352+EXPORT_SYMBOL(gr_check_user_change);
63353+EXPORT_SYMBOL(gr_check_group_change);
63354+#endif
63355diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
63356new file mode 100644
63357index 0000000..a96e155
63358--- /dev/null
63359+++ b/grsecurity/grsec_exec.c
63360@@ -0,0 +1,204 @@
63361+#include <linux/kernel.h>
63362+#include <linux/sched.h>
63363+#include <linux/file.h>
63364+#include <linux/binfmts.h>
63365+#include <linux/smp_lock.h>
63366+#include <linux/fs.h>
63367+#include <linux/types.h>
63368+#include <linux/grdefs.h>
63369+#include <linux/grinternal.h>
63370+#include <linux/capability.h>
63371+#include <linux/compat.h>
63372+#include <linux/module.h>
63373+
63374+#include <asm/uaccess.h>
63375+
63376+#ifdef CONFIG_GRKERNSEC_EXECLOG
63377+static char gr_exec_arg_buf[132];
63378+static DEFINE_MUTEX(gr_exec_arg_mutex);
63379+#endif
63380+
63381+void
63382+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
63383+{
63384+#ifdef CONFIG_GRKERNSEC_EXECLOG
63385+ char *grarg = gr_exec_arg_buf;
63386+ unsigned int i, x, execlen = 0;
63387+ char c;
63388+
63389+ if (!((grsec_enable_execlog && grsec_enable_group &&
63390+ in_group_p(grsec_audit_gid))
63391+ || (grsec_enable_execlog && !grsec_enable_group)))
63392+ return;
63393+
63394+ mutex_lock(&gr_exec_arg_mutex);
63395+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
63396+
63397+ if (unlikely(argv == NULL))
63398+ goto log;
63399+
63400+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
63401+ const char __user *p;
63402+ unsigned int len;
63403+
63404+ if (copy_from_user(&p, argv + i, sizeof(p)))
63405+ goto log;
63406+ if (!p)
63407+ goto log;
63408+ len = strnlen_user(p, 128 - execlen);
63409+ if (len > 128 - execlen)
63410+ len = 128 - execlen;
63411+ else if (len > 0)
63412+ len--;
63413+ if (copy_from_user(grarg + execlen, p, len))
63414+ goto log;
63415+
63416+ /* rewrite unprintable characters */
63417+ for (x = 0; x < len; x++) {
63418+ c = *(grarg + execlen + x);
63419+ if (c < 32 || c > 126)
63420+ *(grarg + execlen + x) = ' ';
63421+ }
63422+
63423+ execlen += len;
63424+ *(grarg + execlen) = ' ';
63425+ *(grarg + execlen + 1) = '\0';
63426+ execlen++;
63427+ }
63428+
63429+ log:
63430+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63431+ bprm->file->f_path.mnt, grarg);
63432+ mutex_unlock(&gr_exec_arg_mutex);
63433+#endif
63434+ return;
63435+}
63436+
63437+#ifdef CONFIG_COMPAT
63438+void
63439+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
63440+{
63441+#ifdef CONFIG_GRKERNSEC_EXECLOG
63442+ char *grarg = gr_exec_arg_buf;
63443+ unsigned int i, x, execlen = 0;
63444+ char c;
63445+
63446+ if (!((grsec_enable_execlog && grsec_enable_group &&
63447+ in_group_p(grsec_audit_gid))
63448+ || (grsec_enable_execlog && !grsec_enable_group)))
63449+ return;
63450+
63451+ mutex_lock(&gr_exec_arg_mutex);
63452+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
63453+
63454+ if (unlikely(argv == NULL))
63455+ goto log;
63456+
63457+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
63458+ compat_uptr_t p;
63459+ unsigned int len;
63460+
63461+ if (get_user(p, argv + i))
63462+ goto log;
63463+ len = strnlen_user(compat_ptr(p), 128 - execlen);
63464+ if (len > 128 - execlen)
63465+ len = 128 - execlen;
63466+ else if (len > 0)
63467+ len--;
63468+ else
63469+ goto log;
63470+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
63471+ goto log;
63472+
63473+ /* rewrite unprintable characters */
63474+ for (x = 0; x < len; x++) {
63475+ c = *(grarg + execlen + x);
63476+ if (c < 32 || c > 126)
63477+ *(grarg + execlen + x) = ' ';
63478+ }
63479+
63480+ execlen += len;
63481+ *(grarg + execlen) = ' ';
63482+ *(grarg + execlen + 1) = '\0';
63483+ execlen++;
63484+ }
63485+
63486+ log:
63487+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63488+ bprm->file->f_path.mnt, grarg);
63489+ mutex_unlock(&gr_exec_arg_mutex);
63490+#endif
63491+ return;
63492+}
63493+#endif
63494+
63495+#ifdef CONFIG_GRKERNSEC
63496+extern int gr_acl_is_capable(const int cap);
63497+extern int gr_acl_is_capable_nolog(const int cap);
63498+extern int gr_chroot_is_capable(const int cap);
63499+extern int gr_chroot_is_capable_nolog(const int cap);
63500+#endif
63501+
63502+const char *captab_log[] = {
63503+ "CAP_CHOWN",
63504+ "CAP_DAC_OVERRIDE",
63505+ "CAP_DAC_READ_SEARCH",
63506+ "CAP_FOWNER",
63507+ "CAP_FSETID",
63508+ "CAP_KILL",
63509+ "CAP_SETGID",
63510+ "CAP_SETUID",
63511+ "CAP_SETPCAP",
63512+ "CAP_LINUX_IMMUTABLE",
63513+ "CAP_NET_BIND_SERVICE",
63514+ "CAP_NET_BROADCAST",
63515+ "CAP_NET_ADMIN",
63516+ "CAP_NET_RAW",
63517+ "CAP_IPC_LOCK",
63518+ "CAP_IPC_OWNER",
63519+ "CAP_SYS_MODULE",
63520+ "CAP_SYS_RAWIO",
63521+ "CAP_SYS_CHROOT",
63522+ "CAP_SYS_PTRACE",
63523+ "CAP_SYS_PACCT",
63524+ "CAP_SYS_ADMIN",
63525+ "CAP_SYS_BOOT",
63526+ "CAP_SYS_NICE",
63527+ "CAP_SYS_RESOURCE",
63528+ "CAP_SYS_TIME",
63529+ "CAP_SYS_TTY_CONFIG",
63530+ "CAP_MKNOD",
63531+ "CAP_LEASE",
63532+ "CAP_AUDIT_WRITE",
63533+ "CAP_AUDIT_CONTROL",
63534+ "CAP_SETFCAP",
63535+ "CAP_MAC_OVERRIDE",
63536+ "CAP_MAC_ADMIN"
63537+};
63538+
63539+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
63540+
63541+int gr_is_capable(const int cap)
63542+{
63543+#ifdef CONFIG_GRKERNSEC
63544+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
63545+ return 1;
63546+ return 0;
63547+#else
63548+ return 1;
63549+#endif
63550+}
63551+
63552+int gr_is_capable_nolog(const int cap)
63553+{
63554+#ifdef CONFIG_GRKERNSEC
63555+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
63556+ return 1;
63557+ return 0;
63558+#else
63559+ return 1;
63560+#endif
63561+}
63562+
63563+EXPORT_SYMBOL(gr_is_capable);
63564+EXPORT_SYMBOL(gr_is_capable_nolog);
63565diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
63566new file mode 100644
63567index 0000000..d3ee748
63568--- /dev/null
63569+++ b/grsecurity/grsec_fifo.c
63570@@ -0,0 +1,24 @@
63571+#include <linux/kernel.h>
63572+#include <linux/sched.h>
63573+#include <linux/fs.h>
63574+#include <linux/file.h>
63575+#include <linux/grinternal.h>
63576+
63577+int
63578+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
63579+ const struct dentry *dir, const int flag, const int acc_mode)
63580+{
63581+#ifdef CONFIG_GRKERNSEC_FIFO
63582+ const struct cred *cred = current_cred();
63583+
63584+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
63585+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
63586+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
63587+ (cred->fsuid != dentry->d_inode->i_uid)) {
63588+ if (!inode_permission(dentry->d_inode, acc_mode))
63589+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
63590+ return -EACCES;
63591+ }
63592+#endif
63593+ return 0;
63594+}
63595diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
63596new file mode 100644
63597index 0000000..8ca18bf
63598--- /dev/null
63599+++ b/grsecurity/grsec_fork.c
63600@@ -0,0 +1,23 @@
63601+#include <linux/kernel.h>
63602+#include <linux/sched.h>
63603+#include <linux/grsecurity.h>
63604+#include <linux/grinternal.h>
63605+#include <linux/errno.h>
63606+
63607+void
63608+gr_log_forkfail(const int retval)
63609+{
63610+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63611+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
63612+ switch (retval) {
63613+ case -EAGAIN:
63614+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
63615+ break;
63616+ case -ENOMEM:
63617+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
63618+ break;
63619+ }
63620+ }
63621+#endif
63622+ return;
63623+}
63624diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
63625new file mode 100644
63626index 0000000..1e995d3
63627--- /dev/null
63628+++ b/grsecurity/grsec_init.c
63629@@ -0,0 +1,278 @@
63630+#include <linux/kernel.h>
63631+#include <linux/sched.h>
63632+#include <linux/mm.h>
63633+#include <linux/smp_lock.h>
63634+#include <linux/gracl.h>
63635+#include <linux/slab.h>
63636+#include <linux/vmalloc.h>
63637+#include <linux/percpu.h>
63638+#include <linux/module.h>
63639+
63640+int grsec_enable_ptrace_readexec;
63641+int grsec_enable_setxid;
63642+int grsec_enable_brute;
63643+int grsec_enable_link;
63644+int grsec_enable_dmesg;
63645+int grsec_enable_harden_ptrace;
63646+int grsec_enable_fifo;
63647+int grsec_enable_execlog;
63648+int grsec_enable_signal;
63649+int grsec_enable_forkfail;
63650+int grsec_enable_audit_ptrace;
63651+int grsec_enable_time;
63652+int grsec_enable_audit_textrel;
63653+int grsec_enable_group;
63654+int grsec_audit_gid;
63655+int grsec_enable_chdir;
63656+int grsec_enable_mount;
63657+int grsec_enable_rofs;
63658+int grsec_enable_chroot_findtask;
63659+int grsec_enable_chroot_mount;
63660+int grsec_enable_chroot_shmat;
63661+int grsec_enable_chroot_fchdir;
63662+int grsec_enable_chroot_double;
63663+int grsec_enable_chroot_pivot;
63664+int grsec_enable_chroot_chdir;
63665+int grsec_enable_chroot_chmod;
63666+int grsec_enable_chroot_mknod;
63667+int grsec_enable_chroot_nice;
63668+int grsec_enable_chroot_execlog;
63669+int grsec_enable_chroot_caps;
63670+int grsec_enable_chroot_sysctl;
63671+int grsec_enable_chroot_unix;
63672+int grsec_enable_tpe;
63673+int grsec_tpe_gid;
63674+int grsec_enable_blackhole;
63675+#ifdef CONFIG_IPV6_MODULE
63676+EXPORT_SYMBOL(grsec_enable_blackhole);
63677+#endif
63678+int grsec_lastack_retries;
63679+int grsec_enable_tpe_all;
63680+int grsec_enable_tpe_invert;
63681+int grsec_enable_socket_all;
63682+int grsec_socket_all_gid;
63683+int grsec_enable_socket_client;
63684+int grsec_socket_client_gid;
63685+int grsec_enable_socket_server;
63686+int grsec_socket_server_gid;
63687+int grsec_resource_logging;
63688+int grsec_disable_privio;
63689+int grsec_enable_log_rwxmaps;
63690+int grsec_lock;
63691+
63692+DEFINE_SPINLOCK(grsec_alert_lock);
63693+unsigned long grsec_alert_wtime = 0;
63694+unsigned long grsec_alert_fyet = 0;
63695+
63696+DEFINE_SPINLOCK(grsec_audit_lock);
63697+
63698+DEFINE_RWLOCK(grsec_exec_file_lock);
63699+
63700+char *gr_shared_page[4];
63701+
63702+char *gr_alert_log_fmt;
63703+char *gr_audit_log_fmt;
63704+char *gr_alert_log_buf;
63705+char *gr_audit_log_buf;
63706+
63707+extern struct gr_arg *gr_usermode;
63708+extern unsigned char *gr_system_salt;
63709+extern unsigned char *gr_system_sum;
63710+
63711+void __init
63712+grsecurity_init(void)
63713+{
63714+ int j;
63715+ /* create the per-cpu shared pages */
63716+
63717+#ifdef CONFIG_X86
63718+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
63719+#endif
63720+
63721+ for (j = 0; j < 4; j++) {
63722+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
63723+ if (gr_shared_page[j] == NULL) {
63724+ panic("Unable to allocate grsecurity shared page");
63725+ return;
63726+ }
63727+ }
63728+
63729+ /* allocate log buffers */
63730+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
63731+ if (!gr_alert_log_fmt) {
63732+ panic("Unable to allocate grsecurity alert log format buffer");
63733+ return;
63734+ }
63735+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
63736+ if (!gr_audit_log_fmt) {
63737+ panic("Unable to allocate grsecurity audit log format buffer");
63738+ return;
63739+ }
63740+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63741+ if (!gr_alert_log_buf) {
63742+ panic("Unable to allocate grsecurity alert log buffer");
63743+ return;
63744+ }
63745+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63746+ if (!gr_audit_log_buf) {
63747+ panic("Unable to allocate grsecurity audit log buffer");
63748+ return;
63749+ }
63750+
63751+ /* allocate memory for authentication structure */
63752+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
63753+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
63754+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
63755+
63756+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
63757+ panic("Unable to allocate grsecurity authentication structure");
63758+ return;
63759+ }
63760+
63761+
63762+#ifdef CONFIG_GRKERNSEC_IO
63763+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
63764+ grsec_disable_privio = 1;
63765+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63766+ grsec_disable_privio = 1;
63767+#else
63768+ grsec_disable_privio = 0;
63769+#endif
63770+#endif
63771+
63772+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63773+ /* for backward compatibility, tpe_invert always defaults to on if
63774+ enabled in the kernel
63775+ */
63776+ grsec_enable_tpe_invert = 1;
63777+#endif
63778+
63779+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63780+#ifndef CONFIG_GRKERNSEC_SYSCTL
63781+ grsec_lock = 1;
63782+#endif
63783+
63784+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63785+ grsec_enable_audit_textrel = 1;
63786+#endif
63787+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63788+ grsec_enable_log_rwxmaps = 1;
63789+#endif
63790+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63791+ grsec_enable_group = 1;
63792+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
63793+#endif
63794+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63795+ grsec_enable_chdir = 1;
63796+#endif
63797+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63798+ grsec_enable_harden_ptrace = 1;
63799+#endif
63800+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63801+ grsec_enable_mount = 1;
63802+#endif
63803+#ifdef CONFIG_GRKERNSEC_LINK
63804+ grsec_enable_link = 1;
63805+#endif
63806+#ifdef CONFIG_GRKERNSEC_BRUTE
63807+ grsec_enable_brute = 1;
63808+#endif
63809+#ifdef CONFIG_GRKERNSEC_DMESG
63810+ grsec_enable_dmesg = 1;
63811+#endif
63812+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63813+ grsec_enable_blackhole = 1;
63814+ grsec_lastack_retries = 4;
63815+#endif
63816+#ifdef CONFIG_GRKERNSEC_FIFO
63817+ grsec_enable_fifo = 1;
63818+#endif
63819+#ifdef CONFIG_GRKERNSEC_EXECLOG
63820+ grsec_enable_execlog = 1;
63821+#endif
63822+#ifdef CONFIG_GRKERNSEC_SETXID
63823+ grsec_enable_setxid = 1;
63824+#endif
63825+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63826+ grsec_enable_ptrace_readexec = 1;
63827+#endif
63828+#ifdef CONFIG_GRKERNSEC_SIGNAL
63829+ grsec_enable_signal = 1;
63830+#endif
63831+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63832+ grsec_enable_forkfail = 1;
63833+#endif
63834+#ifdef CONFIG_GRKERNSEC_TIME
63835+ grsec_enable_time = 1;
63836+#endif
63837+#ifdef CONFIG_GRKERNSEC_RESLOG
63838+ grsec_resource_logging = 1;
63839+#endif
63840+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63841+ grsec_enable_chroot_findtask = 1;
63842+#endif
63843+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63844+ grsec_enable_chroot_unix = 1;
63845+#endif
63846+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63847+ grsec_enable_chroot_mount = 1;
63848+#endif
63849+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63850+ grsec_enable_chroot_fchdir = 1;
63851+#endif
63852+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63853+ grsec_enable_chroot_shmat = 1;
63854+#endif
63855+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63856+ grsec_enable_audit_ptrace = 1;
63857+#endif
63858+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63859+ grsec_enable_chroot_double = 1;
63860+#endif
63861+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63862+ grsec_enable_chroot_pivot = 1;
63863+#endif
63864+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63865+ grsec_enable_chroot_chdir = 1;
63866+#endif
63867+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63868+ grsec_enable_chroot_chmod = 1;
63869+#endif
63870+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63871+ grsec_enable_chroot_mknod = 1;
63872+#endif
63873+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63874+ grsec_enable_chroot_nice = 1;
63875+#endif
63876+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63877+ grsec_enable_chroot_execlog = 1;
63878+#endif
63879+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63880+ grsec_enable_chroot_caps = 1;
63881+#endif
63882+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63883+ grsec_enable_chroot_sysctl = 1;
63884+#endif
63885+#ifdef CONFIG_GRKERNSEC_TPE
63886+ grsec_enable_tpe = 1;
63887+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
63888+#ifdef CONFIG_GRKERNSEC_TPE_ALL
63889+ grsec_enable_tpe_all = 1;
63890+#endif
63891+#endif
63892+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63893+ grsec_enable_socket_all = 1;
63894+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
63895+#endif
63896+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63897+ grsec_enable_socket_client = 1;
63898+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
63899+#endif
63900+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63901+ grsec_enable_socket_server = 1;
63902+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
63903+#endif
63904+#endif
63905+
63906+ return;
63907+}
63908diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
63909new file mode 100644
63910index 0000000..3efe141
63911--- /dev/null
63912+++ b/grsecurity/grsec_link.c
63913@@ -0,0 +1,43 @@
63914+#include <linux/kernel.h>
63915+#include <linux/sched.h>
63916+#include <linux/fs.h>
63917+#include <linux/file.h>
63918+#include <linux/grinternal.h>
63919+
63920+int
63921+gr_handle_follow_link(const struct inode *parent,
63922+ const struct inode *inode,
63923+ const struct dentry *dentry, const struct vfsmount *mnt)
63924+{
63925+#ifdef CONFIG_GRKERNSEC_LINK
63926+ const struct cred *cred = current_cred();
63927+
63928+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
63929+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
63930+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
63931+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
63932+ return -EACCES;
63933+ }
63934+#endif
63935+ return 0;
63936+}
63937+
63938+int
63939+gr_handle_hardlink(const struct dentry *dentry,
63940+ const struct vfsmount *mnt,
63941+ struct inode *inode, const int mode, const char *to)
63942+{
63943+#ifdef CONFIG_GRKERNSEC_LINK
63944+ const struct cred *cred = current_cred();
63945+
63946+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
63947+ (!S_ISREG(mode) || (mode & S_ISUID) ||
63948+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
63949+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
63950+ !capable(CAP_FOWNER) && cred->uid) {
63951+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
63952+ return -EPERM;
63953+ }
63954+#endif
63955+ return 0;
63956+}
63957diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
63958new file mode 100644
63959index 0000000..a45d2e9
63960--- /dev/null
63961+++ b/grsecurity/grsec_log.c
63962@@ -0,0 +1,322 @@
63963+#include <linux/kernel.h>
63964+#include <linux/sched.h>
63965+#include <linux/file.h>
63966+#include <linux/tty.h>
63967+#include <linux/fs.h>
63968+#include <linux/grinternal.h>
63969+
63970+#ifdef CONFIG_TREE_PREEMPT_RCU
63971+#define DISABLE_PREEMPT() preempt_disable()
63972+#define ENABLE_PREEMPT() preempt_enable()
63973+#else
63974+#define DISABLE_PREEMPT()
63975+#define ENABLE_PREEMPT()
63976+#endif
63977+
63978+#define BEGIN_LOCKS(x) \
63979+ DISABLE_PREEMPT(); \
63980+ rcu_read_lock(); \
63981+ read_lock(&tasklist_lock); \
63982+ read_lock(&grsec_exec_file_lock); \
63983+ if (x != GR_DO_AUDIT) \
63984+ spin_lock(&grsec_alert_lock); \
63985+ else \
63986+ spin_lock(&grsec_audit_lock)
63987+
63988+#define END_LOCKS(x) \
63989+ if (x != GR_DO_AUDIT) \
63990+ spin_unlock(&grsec_alert_lock); \
63991+ else \
63992+ spin_unlock(&grsec_audit_lock); \
63993+ read_unlock(&grsec_exec_file_lock); \
63994+ read_unlock(&tasklist_lock); \
63995+ rcu_read_unlock(); \
63996+ ENABLE_PREEMPT(); \
63997+ if (x == GR_DONT_AUDIT) \
63998+ gr_handle_alertkill(current)
63999+
64000+enum {
64001+ FLOODING,
64002+ NO_FLOODING
64003+};
64004+
64005+extern char *gr_alert_log_fmt;
64006+extern char *gr_audit_log_fmt;
64007+extern char *gr_alert_log_buf;
64008+extern char *gr_audit_log_buf;
64009+
64010+static int gr_log_start(int audit)
64011+{
64012+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
64013+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
64014+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64015+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
64016+ unsigned long curr_secs = get_seconds();
64017+
64018+ if (audit == GR_DO_AUDIT)
64019+ goto set_fmt;
64020+
64021+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
64022+ grsec_alert_wtime = curr_secs;
64023+ grsec_alert_fyet = 0;
64024+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
64025+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
64026+ grsec_alert_fyet++;
64027+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
64028+ grsec_alert_wtime = curr_secs;
64029+ grsec_alert_fyet++;
64030+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
64031+ return FLOODING;
64032+ }
64033+ else return FLOODING;
64034+
64035+set_fmt:
64036+#endif
64037+ memset(buf, 0, PAGE_SIZE);
64038+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
64039+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
64040+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
64041+ } else if (current->signal->curr_ip) {
64042+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
64043+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
64044+ } else if (gr_acl_is_enabled()) {
64045+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
64046+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
64047+ } else {
64048+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
64049+ strcpy(buf, fmt);
64050+ }
64051+
64052+ return NO_FLOODING;
64053+}
64054+
64055+static void gr_log_middle(int audit, const char *msg, va_list ap)
64056+ __attribute__ ((format (printf, 2, 0)));
64057+
64058+static void gr_log_middle(int audit, const char *msg, va_list ap)
64059+{
64060+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64061+ unsigned int len = strlen(buf);
64062+
64063+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
64064+
64065+ return;
64066+}
64067+
64068+static void gr_log_middle_varargs(int audit, const char *msg, ...)
64069+ __attribute__ ((format (printf, 2, 3)));
64070+
64071+static void gr_log_middle_varargs(int audit, const char *msg, ...)
64072+{
64073+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64074+ unsigned int len = strlen(buf);
64075+ va_list ap;
64076+
64077+ va_start(ap, msg);
64078+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
64079+ va_end(ap);
64080+
64081+ return;
64082+}
64083+
64084+static void gr_log_end(int audit, int append_default)
64085+{
64086+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64087+
64088+ if (append_default) {
64089+ unsigned int len = strlen(buf);
64090+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
64091+ }
64092+
64093+ printk("%s\n", buf);
64094+
64095+ return;
64096+}
64097+
64098+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
64099+{
64100+ int logtype;
64101+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
64102+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
64103+ void *voidptr = NULL;
64104+ int num1 = 0, num2 = 0;
64105+ unsigned long ulong1 = 0, ulong2 = 0;
64106+ struct dentry *dentry = NULL;
64107+ struct vfsmount *mnt = NULL;
64108+ struct file *file = NULL;
64109+ struct task_struct *task = NULL;
64110+ const struct cred *cred, *pcred;
64111+ va_list ap;
64112+
64113+ BEGIN_LOCKS(audit);
64114+ logtype = gr_log_start(audit);
64115+ if (logtype == FLOODING) {
64116+ END_LOCKS(audit);
64117+ return;
64118+ }
64119+ va_start(ap, argtypes);
64120+ switch (argtypes) {
64121+ case GR_TTYSNIFF:
64122+ task = va_arg(ap, struct task_struct *);
64123+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
64124+ break;
64125+ case GR_SYSCTL_HIDDEN:
64126+ str1 = va_arg(ap, char *);
64127+ gr_log_middle_varargs(audit, msg, result, str1);
64128+ break;
64129+ case GR_RBAC:
64130+ dentry = va_arg(ap, struct dentry *);
64131+ mnt = va_arg(ap, struct vfsmount *);
64132+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
64133+ break;
64134+ case GR_RBAC_STR:
64135+ dentry = va_arg(ap, struct dentry *);
64136+ mnt = va_arg(ap, struct vfsmount *);
64137+ str1 = va_arg(ap, char *);
64138+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
64139+ break;
64140+ case GR_STR_RBAC:
64141+ str1 = va_arg(ap, char *);
64142+ dentry = va_arg(ap, struct dentry *);
64143+ mnt = va_arg(ap, struct vfsmount *);
64144+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
64145+ break;
64146+ case GR_RBAC_MODE2:
64147+ dentry = va_arg(ap, struct dentry *);
64148+ mnt = va_arg(ap, struct vfsmount *);
64149+ str1 = va_arg(ap, char *);
64150+ str2 = va_arg(ap, char *);
64151+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
64152+ break;
64153+ case GR_RBAC_MODE3:
64154+ dentry = va_arg(ap, struct dentry *);
64155+ mnt = va_arg(ap, struct vfsmount *);
64156+ str1 = va_arg(ap, char *);
64157+ str2 = va_arg(ap, char *);
64158+ str3 = va_arg(ap, char *);
64159+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
64160+ break;
64161+ case GR_FILENAME:
64162+ dentry = va_arg(ap, struct dentry *);
64163+ mnt = va_arg(ap, struct vfsmount *);
64164+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
64165+ break;
64166+ case GR_STR_FILENAME:
64167+ str1 = va_arg(ap, char *);
64168+ dentry = va_arg(ap, struct dentry *);
64169+ mnt = va_arg(ap, struct vfsmount *);
64170+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
64171+ break;
64172+ case GR_FILENAME_STR:
64173+ dentry = va_arg(ap, struct dentry *);
64174+ mnt = va_arg(ap, struct vfsmount *);
64175+ str1 = va_arg(ap, char *);
64176+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
64177+ break;
64178+ case GR_FILENAME_TWO_INT:
64179+ dentry = va_arg(ap, struct dentry *);
64180+ mnt = va_arg(ap, struct vfsmount *);
64181+ num1 = va_arg(ap, int);
64182+ num2 = va_arg(ap, int);
64183+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
64184+ break;
64185+ case GR_FILENAME_TWO_INT_STR:
64186+ dentry = va_arg(ap, struct dentry *);
64187+ mnt = va_arg(ap, struct vfsmount *);
64188+ num1 = va_arg(ap, int);
64189+ num2 = va_arg(ap, int);
64190+ str1 = va_arg(ap, char *);
64191+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
64192+ break;
64193+ case GR_TEXTREL:
64194+ file = va_arg(ap, struct file *);
64195+ ulong1 = va_arg(ap, unsigned long);
64196+ ulong2 = va_arg(ap, unsigned long);
64197+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
64198+ break;
64199+ case GR_PTRACE:
64200+ task = va_arg(ap, struct task_struct *);
64201+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
64202+ break;
64203+ case GR_RESOURCE:
64204+ task = va_arg(ap, struct task_struct *);
64205+ cred = __task_cred(task);
64206+ pcred = __task_cred(task->real_parent);
64207+ ulong1 = va_arg(ap, unsigned long);
64208+ str1 = va_arg(ap, char *);
64209+ ulong2 = va_arg(ap, unsigned long);
64210+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64211+ break;
64212+ case GR_CAP:
64213+ task = va_arg(ap, struct task_struct *);
64214+ cred = __task_cred(task);
64215+ pcred = __task_cred(task->real_parent);
64216+ str1 = va_arg(ap, char *);
64217+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64218+ break;
64219+ case GR_SIG:
64220+ str1 = va_arg(ap, char *);
64221+ voidptr = va_arg(ap, void *);
64222+ gr_log_middle_varargs(audit, msg, str1, voidptr);
64223+ break;
64224+ case GR_SIG2:
64225+ task = va_arg(ap, struct task_struct *);
64226+ cred = __task_cred(task);
64227+ pcred = __task_cred(task->real_parent);
64228+ num1 = va_arg(ap, int);
64229+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64230+ break;
64231+ case GR_CRASH1:
64232+ task = va_arg(ap, struct task_struct *);
64233+ cred = __task_cred(task);
64234+ pcred = __task_cred(task->real_parent);
64235+ ulong1 = va_arg(ap, unsigned long);
64236+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
64237+ break;
64238+ case GR_CRASH2:
64239+ task = va_arg(ap, struct task_struct *);
64240+ cred = __task_cred(task);
64241+ pcred = __task_cred(task->real_parent);
64242+ ulong1 = va_arg(ap, unsigned long);
64243+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
64244+ break;
64245+ case GR_RWXMAP:
64246+ file = va_arg(ap, struct file *);
64247+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
64248+ break;
64249+ case GR_PSACCT:
64250+ {
64251+ unsigned int wday, cday;
64252+ __u8 whr, chr;
64253+ __u8 wmin, cmin;
64254+ __u8 wsec, csec;
64255+ char cur_tty[64] = { 0 };
64256+ char parent_tty[64] = { 0 };
64257+
64258+ task = va_arg(ap, struct task_struct *);
64259+ wday = va_arg(ap, unsigned int);
64260+ cday = va_arg(ap, unsigned int);
64261+ whr = va_arg(ap, int);
64262+ chr = va_arg(ap, int);
64263+ wmin = va_arg(ap, int);
64264+ cmin = va_arg(ap, int);
64265+ wsec = va_arg(ap, int);
64266+ csec = va_arg(ap, int);
64267+ ulong1 = va_arg(ap, unsigned long);
64268+ cred = __task_cred(task);
64269+ pcred = __task_cred(task->real_parent);
64270+
64271+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
64272+ }
64273+ break;
64274+ default:
64275+ gr_log_middle(audit, msg, ap);
64276+ }
64277+ va_end(ap);
64278+ // these don't need DEFAULTSECARGS printed on the end
64279+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
64280+ gr_log_end(audit, 0);
64281+ else
64282+ gr_log_end(audit, 1);
64283+ END_LOCKS(audit);
64284+}
64285diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
64286new file mode 100644
64287index 0000000..f536303
64288--- /dev/null
64289+++ b/grsecurity/grsec_mem.c
64290@@ -0,0 +1,40 @@
64291+#include <linux/kernel.h>
64292+#include <linux/sched.h>
64293+#include <linux/mm.h>
64294+#include <linux/mman.h>
64295+#include <linux/grinternal.h>
64296+
64297+void
64298+gr_handle_ioperm(void)
64299+{
64300+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
64301+ return;
64302+}
64303+
64304+void
64305+gr_handle_iopl(void)
64306+{
64307+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
64308+ return;
64309+}
64310+
64311+void
64312+gr_handle_mem_readwrite(u64 from, u64 to)
64313+{
64314+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
64315+ return;
64316+}
64317+
64318+void
64319+gr_handle_vm86(void)
64320+{
64321+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
64322+ return;
64323+}
64324+
64325+void
64326+gr_log_badprocpid(const char *entry)
64327+{
64328+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
64329+ return;
64330+}
64331diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
64332new file mode 100644
64333index 0000000..2131422
64334--- /dev/null
64335+++ b/grsecurity/grsec_mount.c
64336@@ -0,0 +1,62 @@
64337+#include <linux/kernel.h>
64338+#include <linux/sched.h>
64339+#include <linux/mount.h>
64340+#include <linux/grsecurity.h>
64341+#include <linux/grinternal.h>
64342+
64343+void
64344+gr_log_remount(const char *devname, const int retval)
64345+{
64346+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64347+ if (grsec_enable_mount && (retval >= 0))
64348+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
64349+#endif
64350+ return;
64351+}
64352+
64353+void
64354+gr_log_unmount(const char *devname, const int retval)
64355+{
64356+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64357+ if (grsec_enable_mount && (retval >= 0))
64358+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
64359+#endif
64360+ return;
64361+}
64362+
64363+void
64364+gr_log_mount(const char *from, const char *to, const int retval)
64365+{
64366+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64367+ if (grsec_enable_mount && (retval >= 0))
64368+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
64369+#endif
64370+ return;
64371+}
64372+
64373+int
64374+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
64375+{
64376+#ifdef CONFIG_GRKERNSEC_ROFS
64377+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
64378+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
64379+ return -EPERM;
64380+ } else
64381+ return 0;
64382+#endif
64383+ return 0;
64384+}
64385+
64386+int
64387+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
64388+{
64389+#ifdef CONFIG_GRKERNSEC_ROFS
64390+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
64391+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
64392+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
64393+ return -EPERM;
64394+ } else
64395+ return 0;
64396+#endif
64397+ return 0;
64398+}
64399diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
64400new file mode 100644
64401index 0000000..a3b12a0
64402--- /dev/null
64403+++ b/grsecurity/grsec_pax.c
64404@@ -0,0 +1,36 @@
64405+#include <linux/kernel.h>
64406+#include <linux/sched.h>
64407+#include <linux/mm.h>
64408+#include <linux/file.h>
64409+#include <linux/grinternal.h>
64410+#include <linux/grsecurity.h>
64411+
64412+void
64413+gr_log_textrel(struct vm_area_struct * vma)
64414+{
64415+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64416+ if (grsec_enable_audit_textrel)
64417+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
64418+#endif
64419+ return;
64420+}
64421+
64422+void
64423+gr_log_rwxmmap(struct file *file)
64424+{
64425+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64426+ if (grsec_enable_log_rwxmaps)
64427+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
64428+#endif
64429+ return;
64430+}
64431+
64432+void
64433+gr_log_rwxmprotect(struct file *file)
64434+{
64435+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64436+ if (grsec_enable_log_rwxmaps)
64437+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
64438+#endif
64439+ return;
64440+}
64441diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
64442new file mode 100644
64443index 0000000..78f8733
64444--- /dev/null
64445+++ b/grsecurity/grsec_ptrace.c
64446@@ -0,0 +1,30 @@
64447+#include <linux/kernel.h>
64448+#include <linux/sched.h>
64449+#include <linux/grinternal.h>
64450+#include <linux/security.h>
64451+
64452+void
64453+gr_audit_ptrace(struct task_struct *task)
64454+{
64455+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64456+ if (grsec_enable_audit_ptrace)
64457+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
64458+#endif
64459+ return;
64460+}
64461+
64462+int
64463+gr_ptrace_readexec(struct file *file, int unsafe_flags)
64464+{
64465+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64466+ const struct dentry *dentry = file->f_path.dentry;
64467+ const struct vfsmount *mnt = file->f_path.mnt;
64468+
64469+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
64470+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
64471+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
64472+ return -EACCES;
64473+ }
64474+#endif
64475+ return 0;
64476+}
64477diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
64478new file mode 100644
64479index 0000000..c648492
64480--- /dev/null
64481+++ b/grsecurity/grsec_sig.c
64482@@ -0,0 +1,206 @@
64483+#include <linux/kernel.h>
64484+#include <linux/sched.h>
64485+#include <linux/delay.h>
64486+#include <linux/grsecurity.h>
64487+#include <linux/grinternal.h>
64488+#include <linux/hardirq.h>
64489+
64490+char *signames[] = {
64491+ [SIGSEGV] = "Segmentation fault",
64492+ [SIGILL] = "Illegal instruction",
64493+ [SIGABRT] = "Abort",
64494+ [SIGBUS] = "Invalid alignment/Bus error"
64495+};
64496+
64497+void
64498+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
64499+{
64500+#ifdef CONFIG_GRKERNSEC_SIGNAL
64501+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
64502+ (sig == SIGABRT) || (sig == SIGBUS))) {
64503+ if (t->pid == current->pid) {
64504+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
64505+ } else {
64506+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
64507+ }
64508+ }
64509+#endif
64510+ return;
64511+}
64512+
64513+int
64514+gr_handle_signal(const struct task_struct *p, const int sig)
64515+{
64516+#ifdef CONFIG_GRKERNSEC
64517+ /* ignore the 0 signal for protected task checks */
64518+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
64519+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
64520+ return -EPERM;
64521+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
64522+ return -EPERM;
64523+ }
64524+#endif
64525+ return 0;
64526+}
64527+
64528+#ifdef CONFIG_GRKERNSEC
64529+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
64530+
64531+int gr_fake_force_sig(int sig, struct task_struct *t)
64532+{
64533+ unsigned long int flags;
64534+ int ret, blocked, ignored;
64535+ struct k_sigaction *action;
64536+
64537+ spin_lock_irqsave(&t->sighand->siglock, flags);
64538+ action = &t->sighand->action[sig-1];
64539+ ignored = action->sa.sa_handler == SIG_IGN;
64540+ blocked = sigismember(&t->blocked, sig);
64541+ if (blocked || ignored) {
64542+ action->sa.sa_handler = SIG_DFL;
64543+ if (blocked) {
64544+ sigdelset(&t->blocked, sig);
64545+ recalc_sigpending_and_wake(t);
64546+ }
64547+ }
64548+ if (action->sa.sa_handler == SIG_DFL)
64549+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
64550+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
64551+
64552+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
64553+
64554+ return ret;
64555+}
64556+#endif
64557+
64558+#ifdef CONFIG_GRKERNSEC_BRUTE
64559+#define GR_USER_BAN_TIME (15 * 60)
64560+
64561+static int __get_dumpable(unsigned long mm_flags)
64562+{
64563+ int ret;
64564+
64565+ ret = mm_flags & MMF_DUMPABLE_MASK;
64566+ return (ret >= 2) ? 2 : ret;
64567+}
64568+#endif
64569+
64570+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
64571+{
64572+#ifdef CONFIG_GRKERNSEC_BRUTE
64573+ uid_t uid = 0;
64574+
64575+ if (!grsec_enable_brute)
64576+ return;
64577+
64578+ rcu_read_lock();
64579+ read_lock(&tasklist_lock);
64580+ read_lock(&grsec_exec_file_lock);
64581+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
64582+ p->real_parent->brute = 1;
64583+ else {
64584+ const struct cred *cred = __task_cred(p), *cred2;
64585+ struct task_struct *tsk, *tsk2;
64586+
64587+ if (!__get_dumpable(mm_flags) && cred->uid) {
64588+ struct user_struct *user;
64589+
64590+ uid = cred->uid;
64591+
64592+ /* this is put upon execution past expiration */
64593+ user = find_user(uid);
64594+ if (user == NULL)
64595+ goto unlock;
64596+ user->banned = 1;
64597+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
64598+ if (user->ban_expires == ~0UL)
64599+ user->ban_expires--;
64600+
64601+ do_each_thread(tsk2, tsk) {
64602+ cred2 = __task_cred(tsk);
64603+ if (tsk != p && cred2->uid == uid)
64604+ gr_fake_force_sig(SIGKILL, tsk);
64605+ } while_each_thread(tsk2, tsk);
64606+ }
64607+ }
64608+unlock:
64609+ read_unlock(&grsec_exec_file_lock);
64610+ read_unlock(&tasklist_lock);
64611+ rcu_read_unlock();
64612+
64613+ if (uid)
64614+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
64615+#endif
64616+ return;
64617+}
64618+
64619+void gr_handle_brute_check(void)
64620+{
64621+#ifdef CONFIG_GRKERNSEC_BRUTE
64622+ if (current->brute)
64623+ msleep(30 * 1000);
64624+#endif
64625+ return;
64626+}
64627+
64628+void gr_handle_kernel_exploit(void)
64629+{
64630+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
64631+ const struct cred *cred;
64632+ struct task_struct *tsk, *tsk2;
64633+ struct user_struct *user;
64634+ uid_t uid;
64635+
64636+ if (in_irq() || in_serving_softirq() || in_nmi())
64637+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
64638+
64639+ uid = current_uid();
64640+
64641+ if (uid == 0)
64642+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
64643+ else {
64644+ /* kill all the processes of this user, hold a reference
64645+ to their creds struct, and prevent them from creating
64646+ another process until system reset
64647+ */
64648+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
64649+ /* we intentionally leak this ref */
64650+ user = get_uid(current->cred->user);
64651+ if (user) {
64652+ user->banned = 1;
64653+ user->ban_expires = ~0UL;
64654+ }
64655+
64656+ read_lock(&tasklist_lock);
64657+ do_each_thread(tsk2, tsk) {
64658+ cred = __task_cred(tsk);
64659+ if (cred->uid == uid)
64660+ gr_fake_force_sig(SIGKILL, tsk);
64661+ } while_each_thread(tsk2, tsk);
64662+ read_unlock(&tasklist_lock);
64663+ }
64664+#endif
64665+}
64666+
64667+int __gr_process_user_ban(struct user_struct *user)
64668+{
64669+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64670+ if (unlikely(user->banned)) {
64671+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
64672+ user->banned = 0;
64673+ user->ban_expires = 0;
64674+ free_uid(user);
64675+ } else
64676+ return -EPERM;
64677+ }
64678+#endif
64679+ return 0;
64680+}
64681+
64682+int gr_process_user_ban(void)
64683+{
64684+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64685+ return __gr_process_user_ban(current->cred->user);
64686+#endif
64687+ return 0;
64688+}
64689diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
64690new file mode 100644
64691index 0000000..7512ea9
64692--- /dev/null
64693+++ b/grsecurity/grsec_sock.c
64694@@ -0,0 +1,275 @@
64695+#include <linux/kernel.h>
64696+#include <linux/module.h>
64697+#include <linux/sched.h>
64698+#include <linux/file.h>
64699+#include <linux/net.h>
64700+#include <linux/in.h>
64701+#include <linux/ip.h>
64702+#include <net/sock.h>
64703+#include <net/inet_sock.h>
64704+#include <linux/grsecurity.h>
64705+#include <linux/grinternal.h>
64706+#include <linux/gracl.h>
64707+
64708+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
64709+EXPORT_SYMBOL(gr_cap_rtnetlink);
64710+
64711+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
64712+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
64713+
64714+EXPORT_SYMBOL(gr_search_udp_recvmsg);
64715+EXPORT_SYMBOL(gr_search_udp_sendmsg);
64716+
64717+#ifdef CONFIG_UNIX_MODULE
64718+EXPORT_SYMBOL(gr_acl_handle_unix);
64719+EXPORT_SYMBOL(gr_acl_handle_mknod);
64720+EXPORT_SYMBOL(gr_handle_chroot_unix);
64721+EXPORT_SYMBOL(gr_handle_create);
64722+#endif
64723+
64724+#ifdef CONFIG_GRKERNSEC
64725+#define gr_conn_table_size 32749
64726+struct conn_table_entry {
64727+ struct conn_table_entry *next;
64728+ struct signal_struct *sig;
64729+};
64730+
64731+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
64732+DEFINE_SPINLOCK(gr_conn_table_lock);
64733+
64734+extern const char * gr_socktype_to_name(unsigned char type);
64735+extern const char * gr_proto_to_name(unsigned char proto);
64736+extern const char * gr_sockfamily_to_name(unsigned char family);
64737+
64738+static __inline__ int
64739+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
64740+{
64741+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
64742+}
64743+
64744+static __inline__ int
64745+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
64746+ __u16 sport, __u16 dport)
64747+{
64748+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
64749+ sig->gr_sport == sport && sig->gr_dport == dport))
64750+ return 1;
64751+ else
64752+ return 0;
64753+}
64754+
64755+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
64756+{
64757+ struct conn_table_entry **match;
64758+ unsigned int index;
64759+
64760+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64761+ sig->gr_sport, sig->gr_dport,
64762+ gr_conn_table_size);
64763+
64764+ newent->sig = sig;
64765+
64766+ match = &gr_conn_table[index];
64767+ newent->next = *match;
64768+ *match = newent;
64769+
64770+ return;
64771+}
64772+
64773+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
64774+{
64775+ struct conn_table_entry *match, *last = NULL;
64776+ unsigned int index;
64777+
64778+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64779+ sig->gr_sport, sig->gr_dport,
64780+ gr_conn_table_size);
64781+
64782+ match = gr_conn_table[index];
64783+ while (match && !conn_match(match->sig,
64784+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
64785+ sig->gr_dport)) {
64786+ last = match;
64787+ match = match->next;
64788+ }
64789+
64790+ if (match) {
64791+ if (last)
64792+ last->next = match->next;
64793+ else
64794+ gr_conn_table[index] = NULL;
64795+ kfree(match);
64796+ }
64797+
64798+ return;
64799+}
64800+
64801+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
64802+ __u16 sport, __u16 dport)
64803+{
64804+ struct conn_table_entry *match;
64805+ unsigned int index;
64806+
64807+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
64808+
64809+ match = gr_conn_table[index];
64810+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
64811+ match = match->next;
64812+
64813+ if (match)
64814+ return match->sig;
64815+ else
64816+ return NULL;
64817+}
64818+
64819+#endif
64820+
64821+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
64822+{
64823+#ifdef CONFIG_GRKERNSEC
64824+ struct signal_struct *sig = task->signal;
64825+ struct conn_table_entry *newent;
64826+
64827+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
64828+ if (newent == NULL)
64829+ return;
64830+ /* no bh lock needed since we are called with bh disabled */
64831+ spin_lock(&gr_conn_table_lock);
64832+ gr_del_task_from_ip_table_nolock(sig);
64833+ sig->gr_saddr = inet->rcv_saddr;
64834+ sig->gr_daddr = inet->daddr;
64835+ sig->gr_sport = inet->sport;
64836+ sig->gr_dport = inet->dport;
64837+ gr_add_to_task_ip_table_nolock(sig, newent);
64838+ spin_unlock(&gr_conn_table_lock);
64839+#endif
64840+ return;
64841+}
64842+
64843+void gr_del_task_from_ip_table(struct task_struct *task)
64844+{
64845+#ifdef CONFIG_GRKERNSEC
64846+ spin_lock_bh(&gr_conn_table_lock);
64847+ gr_del_task_from_ip_table_nolock(task->signal);
64848+ spin_unlock_bh(&gr_conn_table_lock);
64849+#endif
64850+ return;
64851+}
64852+
64853+void
64854+gr_attach_curr_ip(const struct sock *sk)
64855+{
64856+#ifdef CONFIG_GRKERNSEC
64857+ struct signal_struct *p, *set;
64858+ const struct inet_sock *inet = inet_sk(sk);
64859+
64860+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
64861+ return;
64862+
64863+ set = current->signal;
64864+
64865+ spin_lock_bh(&gr_conn_table_lock);
64866+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
64867+ inet->dport, inet->sport);
64868+ if (unlikely(p != NULL)) {
64869+ set->curr_ip = p->curr_ip;
64870+ set->used_accept = 1;
64871+ gr_del_task_from_ip_table_nolock(p);
64872+ spin_unlock_bh(&gr_conn_table_lock);
64873+ return;
64874+ }
64875+ spin_unlock_bh(&gr_conn_table_lock);
64876+
64877+ set->curr_ip = inet->daddr;
64878+ set->used_accept = 1;
64879+#endif
64880+ return;
64881+}
64882+
64883+int
64884+gr_handle_sock_all(const int family, const int type, const int protocol)
64885+{
64886+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64887+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
64888+ (family != AF_UNIX)) {
64889+ if (family == AF_INET)
64890+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
64891+ else
64892+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
64893+ return -EACCES;
64894+ }
64895+#endif
64896+ return 0;
64897+}
64898+
64899+int
64900+gr_handle_sock_server(const struct sockaddr *sck)
64901+{
64902+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64903+ if (grsec_enable_socket_server &&
64904+ in_group_p(grsec_socket_server_gid) &&
64905+ sck && (sck->sa_family != AF_UNIX) &&
64906+ (sck->sa_family != AF_LOCAL)) {
64907+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64908+ return -EACCES;
64909+ }
64910+#endif
64911+ return 0;
64912+}
64913+
64914+int
64915+gr_handle_sock_server_other(const struct sock *sck)
64916+{
64917+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64918+ if (grsec_enable_socket_server &&
64919+ in_group_p(grsec_socket_server_gid) &&
64920+ sck && (sck->sk_family != AF_UNIX) &&
64921+ (sck->sk_family != AF_LOCAL)) {
64922+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64923+ return -EACCES;
64924+ }
64925+#endif
64926+ return 0;
64927+}
64928+
64929+int
64930+gr_handle_sock_client(const struct sockaddr *sck)
64931+{
64932+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64933+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
64934+ sck && (sck->sa_family != AF_UNIX) &&
64935+ (sck->sa_family != AF_LOCAL)) {
64936+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
64937+ return -EACCES;
64938+ }
64939+#endif
64940+ return 0;
64941+}
64942+
64943+kernel_cap_t
64944+gr_cap_rtnetlink(struct sock *sock)
64945+{
64946+#ifdef CONFIG_GRKERNSEC
64947+ if (!gr_acl_is_enabled())
64948+ return current_cap();
64949+ else if (sock->sk_protocol == NETLINK_ISCSI &&
64950+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
64951+ gr_is_capable(CAP_SYS_ADMIN))
64952+ return current_cap();
64953+ else if (sock->sk_protocol == NETLINK_AUDIT &&
64954+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
64955+ gr_is_capable(CAP_AUDIT_WRITE) &&
64956+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
64957+ gr_is_capable(CAP_AUDIT_CONTROL))
64958+ return current_cap();
64959+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
64960+ ((sock->sk_protocol == NETLINK_ROUTE) ?
64961+ gr_is_capable_nolog(CAP_NET_ADMIN) :
64962+ gr_is_capable(CAP_NET_ADMIN)))
64963+ return current_cap();
64964+ else
64965+ return __cap_empty_set;
64966+#else
64967+ return current_cap();
64968+#endif
64969+}
64970diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
64971new file mode 100644
64972index 0000000..31f3258
64973--- /dev/null
64974+++ b/grsecurity/grsec_sysctl.c
64975@@ -0,0 +1,499 @@
64976+#include <linux/kernel.h>
64977+#include <linux/sched.h>
64978+#include <linux/sysctl.h>
64979+#include <linux/grsecurity.h>
64980+#include <linux/grinternal.h>
64981+
64982+int
64983+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
64984+{
64985+#ifdef CONFIG_GRKERNSEC_SYSCTL
64986+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
64987+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
64988+ return -EACCES;
64989+ }
64990+#endif
64991+ return 0;
64992+}
64993+
64994+#ifdef CONFIG_GRKERNSEC_ROFS
64995+static int __maybe_unused one = 1;
64996+#endif
64997+
64998+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64999+ctl_table grsecurity_table[] = {
65000+#ifdef CONFIG_GRKERNSEC_SYSCTL
65001+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
65002+#ifdef CONFIG_GRKERNSEC_IO
65003+ {
65004+ .ctl_name = CTL_UNNUMBERED,
65005+ .procname = "disable_priv_io",
65006+ .data = &grsec_disable_privio,
65007+ .maxlen = sizeof(int),
65008+ .mode = 0600,
65009+ .proc_handler = &proc_dointvec,
65010+ },
65011+#endif
65012+#endif
65013+#ifdef CONFIG_GRKERNSEC_LINK
65014+ {
65015+ .ctl_name = CTL_UNNUMBERED,
65016+ .procname = "linking_restrictions",
65017+ .data = &grsec_enable_link,
65018+ .maxlen = sizeof(int),
65019+ .mode = 0600,
65020+ .proc_handler = &proc_dointvec,
65021+ },
65022+#endif
65023+#ifdef CONFIG_GRKERNSEC_BRUTE
65024+ {
65025+ .ctl_name = CTL_UNNUMBERED,
65026+ .procname = "deter_bruteforce",
65027+ .data = &grsec_enable_brute,
65028+ .maxlen = sizeof(int),
65029+ .mode = 0600,
65030+ .proc_handler = &proc_dointvec,
65031+ },
65032+#endif
65033+#ifdef CONFIG_GRKERNSEC_FIFO
65034+ {
65035+ .ctl_name = CTL_UNNUMBERED,
65036+ .procname = "fifo_restrictions",
65037+ .data = &grsec_enable_fifo,
65038+ .maxlen = sizeof(int),
65039+ .mode = 0600,
65040+ .proc_handler = &proc_dointvec,
65041+ },
65042+#endif
65043+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65044+ {
65045+ .ctl_name = CTL_UNNUMBERED,
65046+ .procname = "ptrace_readexec",
65047+ .data = &grsec_enable_ptrace_readexec,
65048+ .maxlen = sizeof(int),
65049+ .mode = 0600,
65050+ .proc_handler = &proc_dointvec,
65051+ },
65052+#endif
65053+#ifdef CONFIG_GRKERNSEC_SETXID
65054+ {
65055+ .ctl_name = CTL_UNNUMBERED,
65056+ .procname = "consistent_setxid",
65057+ .data = &grsec_enable_setxid,
65058+ .maxlen = sizeof(int),
65059+ .mode = 0600,
65060+ .proc_handler = &proc_dointvec,
65061+ },
65062+#endif
65063+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65064+ {
65065+ .ctl_name = CTL_UNNUMBERED,
65066+ .procname = "ip_blackhole",
65067+ .data = &grsec_enable_blackhole,
65068+ .maxlen = sizeof(int),
65069+ .mode = 0600,
65070+ .proc_handler = &proc_dointvec,
65071+ },
65072+ {
65073+ .ctl_name = CTL_UNNUMBERED,
65074+ .procname = "lastack_retries",
65075+ .data = &grsec_lastack_retries,
65076+ .maxlen = sizeof(int),
65077+ .mode = 0600,
65078+ .proc_handler = &proc_dointvec,
65079+ },
65080+#endif
65081+#ifdef CONFIG_GRKERNSEC_EXECLOG
65082+ {
65083+ .ctl_name = CTL_UNNUMBERED,
65084+ .procname = "exec_logging",
65085+ .data = &grsec_enable_execlog,
65086+ .maxlen = sizeof(int),
65087+ .mode = 0600,
65088+ .proc_handler = &proc_dointvec,
65089+ },
65090+#endif
65091+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65092+ {
65093+ .ctl_name = CTL_UNNUMBERED,
65094+ .procname = "rwxmap_logging",
65095+ .data = &grsec_enable_log_rwxmaps,
65096+ .maxlen = sizeof(int),
65097+ .mode = 0600,
65098+ .proc_handler = &proc_dointvec,
65099+ },
65100+#endif
65101+#ifdef CONFIG_GRKERNSEC_SIGNAL
65102+ {
65103+ .ctl_name = CTL_UNNUMBERED,
65104+ .procname = "signal_logging",
65105+ .data = &grsec_enable_signal,
65106+ .maxlen = sizeof(int),
65107+ .mode = 0600,
65108+ .proc_handler = &proc_dointvec,
65109+ },
65110+#endif
65111+#ifdef CONFIG_GRKERNSEC_FORKFAIL
65112+ {
65113+ .ctl_name = CTL_UNNUMBERED,
65114+ .procname = "forkfail_logging",
65115+ .data = &grsec_enable_forkfail,
65116+ .maxlen = sizeof(int),
65117+ .mode = 0600,
65118+ .proc_handler = &proc_dointvec,
65119+ },
65120+#endif
65121+#ifdef CONFIG_GRKERNSEC_TIME
65122+ {
65123+ .ctl_name = CTL_UNNUMBERED,
65124+ .procname = "timechange_logging",
65125+ .data = &grsec_enable_time,
65126+ .maxlen = sizeof(int),
65127+ .mode = 0600,
65128+ .proc_handler = &proc_dointvec,
65129+ },
65130+#endif
65131+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
65132+ {
65133+ .ctl_name = CTL_UNNUMBERED,
65134+ .procname = "chroot_deny_shmat",
65135+ .data = &grsec_enable_chroot_shmat,
65136+ .maxlen = sizeof(int),
65137+ .mode = 0600,
65138+ .proc_handler = &proc_dointvec,
65139+ },
65140+#endif
65141+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
65142+ {
65143+ .ctl_name = CTL_UNNUMBERED,
65144+ .procname = "chroot_deny_unix",
65145+ .data = &grsec_enable_chroot_unix,
65146+ .maxlen = sizeof(int),
65147+ .mode = 0600,
65148+ .proc_handler = &proc_dointvec,
65149+ },
65150+#endif
65151+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
65152+ {
65153+ .ctl_name = CTL_UNNUMBERED,
65154+ .procname = "chroot_deny_mount",
65155+ .data = &grsec_enable_chroot_mount,
65156+ .maxlen = sizeof(int),
65157+ .mode = 0600,
65158+ .proc_handler = &proc_dointvec,
65159+ },
65160+#endif
65161+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
65162+ {
65163+ .ctl_name = CTL_UNNUMBERED,
65164+ .procname = "chroot_deny_fchdir",
65165+ .data = &grsec_enable_chroot_fchdir,
65166+ .maxlen = sizeof(int),
65167+ .mode = 0600,
65168+ .proc_handler = &proc_dointvec,
65169+ },
65170+#endif
65171+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
65172+ {
65173+ .ctl_name = CTL_UNNUMBERED,
65174+ .procname = "chroot_deny_chroot",
65175+ .data = &grsec_enable_chroot_double,
65176+ .maxlen = sizeof(int),
65177+ .mode = 0600,
65178+ .proc_handler = &proc_dointvec,
65179+ },
65180+#endif
65181+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
65182+ {
65183+ .ctl_name = CTL_UNNUMBERED,
65184+ .procname = "chroot_deny_pivot",
65185+ .data = &grsec_enable_chroot_pivot,
65186+ .maxlen = sizeof(int),
65187+ .mode = 0600,
65188+ .proc_handler = &proc_dointvec,
65189+ },
65190+#endif
65191+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
65192+ {
65193+ .ctl_name = CTL_UNNUMBERED,
65194+ .procname = "chroot_enforce_chdir",
65195+ .data = &grsec_enable_chroot_chdir,
65196+ .maxlen = sizeof(int),
65197+ .mode = 0600,
65198+ .proc_handler = &proc_dointvec,
65199+ },
65200+#endif
65201+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
65202+ {
65203+ .ctl_name = CTL_UNNUMBERED,
65204+ .procname = "chroot_deny_chmod",
65205+ .data = &grsec_enable_chroot_chmod,
65206+ .maxlen = sizeof(int),
65207+ .mode = 0600,
65208+ .proc_handler = &proc_dointvec,
65209+ },
65210+#endif
65211+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
65212+ {
65213+ .ctl_name = CTL_UNNUMBERED,
65214+ .procname = "chroot_deny_mknod",
65215+ .data = &grsec_enable_chroot_mknod,
65216+ .maxlen = sizeof(int),
65217+ .mode = 0600,
65218+ .proc_handler = &proc_dointvec,
65219+ },
65220+#endif
65221+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
65222+ {
65223+ .ctl_name = CTL_UNNUMBERED,
65224+ .procname = "chroot_restrict_nice",
65225+ .data = &grsec_enable_chroot_nice,
65226+ .maxlen = sizeof(int),
65227+ .mode = 0600,
65228+ .proc_handler = &proc_dointvec,
65229+ },
65230+#endif
65231+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
65232+ {
65233+ .ctl_name = CTL_UNNUMBERED,
65234+ .procname = "chroot_execlog",
65235+ .data = &grsec_enable_chroot_execlog,
65236+ .maxlen = sizeof(int),
65237+ .mode = 0600,
65238+ .proc_handler = &proc_dointvec,
65239+ },
65240+#endif
65241+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65242+ {
65243+ .ctl_name = CTL_UNNUMBERED,
65244+ .procname = "chroot_caps",
65245+ .data = &grsec_enable_chroot_caps,
65246+ .maxlen = sizeof(int),
65247+ .mode = 0600,
65248+ .proc_handler = &proc_dointvec,
65249+ },
65250+#endif
65251+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
65252+ {
65253+ .ctl_name = CTL_UNNUMBERED,
65254+ .procname = "chroot_deny_sysctl",
65255+ .data = &grsec_enable_chroot_sysctl,
65256+ .maxlen = sizeof(int),
65257+ .mode = 0600,
65258+ .proc_handler = &proc_dointvec,
65259+ },
65260+#endif
65261+#ifdef CONFIG_GRKERNSEC_TPE
65262+ {
65263+ .ctl_name = CTL_UNNUMBERED,
65264+ .procname = "tpe",
65265+ .data = &grsec_enable_tpe,
65266+ .maxlen = sizeof(int),
65267+ .mode = 0600,
65268+ .proc_handler = &proc_dointvec,
65269+ },
65270+ {
65271+ .ctl_name = CTL_UNNUMBERED,
65272+ .procname = "tpe_gid",
65273+ .data = &grsec_tpe_gid,
65274+ .maxlen = sizeof(int),
65275+ .mode = 0600,
65276+ .proc_handler = &proc_dointvec,
65277+ },
65278+#endif
65279+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65280+ {
65281+ .ctl_name = CTL_UNNUMBERED,
65282+ .procname = "tpe_invert",
65283+ .data = &grsec_enable_tpe_invert,
65284+ .maxlen = sizeof(int),
65285+ .mode = 0600,
65286+ .proc_handler = &proc_dointvec,
65287+ },
65288+#endif
65289+#ifdef CONFIG_GRKERNSEC_TPE_ALL
65290+ {
65291+ .ctl_name = CTL_UNNUMBERED,
65292+ .procname = "tpe_restrict_all",
65293+ .data = &grsec_enable_tpe_all,
65294+ .maxlen = sizeof(int),
65295+ .mode = 0600,
65296+ .proc_handler = &proc_dointvec,
65297+ },
65298+#endif
65299+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65300+ {
65301+ .ctl_name = CTL_UNNUMBERED,
65302+ .procname = "socket_all",
65303+ .data = &grsec_enable_socket_all,
65304+ .maxlen = sizeof(int),
65305+ .mode = 0600,
65306+ .proc_handler = &proc_dointvec,
65307+ },
65308+ {
65309+ .ctl_name = CTL_UNNUMBERED,
65310+ .procname = "socket_all_gid",
65311+ .data = &grsec_socket_all_gid,
65312+ .maxlen = sizeof(int),
65313+ .mode = 0600,
65314+ .proc_handler = &proc_dointvec,
65315+ },
65316+#endif
65317+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65318+ {
65319+ .ctl_name = CTL_UNNUMBERED,
65320+ .procname = "socket_client",
65321+ .data = &grsec_enable_socket_client,
65322+ .maxlen = sizeof(int),
65323+ .mode = 0600,
65324+ .proc_handler = &proc_dointvec,
65325+ },
65326+ {
65327+ .ctl_name = CTL_UNNUMBERED,
65328+ .procname = "socket_client_gid",
65329+ .data = &grsec_socket_client_gid,
65330+ .maxlen = sizeof(int),
65331+ .mode = 0600,
65332+ .proc_handler = &proc_dointvec,
65333+ },
65334+#endif
65335+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65336+ {
65337+ .ctl_name = CTL_UNNUMBERED,
65338+ .procname = "socket_server",
65339+ .data = &grsec_enable_socket_server,
65340+ .maxlen = sizeof(int),
65341+ .mode = 0600,
65342+ .proc_handler = &proc_dointvec,
65343+ },
65344+ {
65345+ .ctl_name = CTL_UNNUMBERED,
65346+ .procname = "socket_server_gid",
65347+ .data = &grsec_socket_server_gid,
65348+ .maxlen = sizeof(int),
65349+ .mode = 0600,
65350+ .proc_handler = &proc_dointvec,
65351+ },
65352+#endif
65353+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65354+ {
65355+ .ctl_name = CTL_UNNUMBERED,
65356+ .procname = "audit_group",
65357+ .data = &grsec_enable_group,
65358+ .maxlen = sizeof(int),
65359+ .mode = 0600,
65360+ .proc_handler = &proc_dointvec,
65361+ },
65362+ {
65363+ .ctl_name = CTL_UNNUMBERED,
65364+ .procname = "audit_gid",
65365+ .data = &grsec_audit_gid,
65366+ .maxlen = sizeof(int),
65367+ .mode = 0600,
65368+ .proc_handler = &proc_dointvec,
65369+ },
65370+#endif
65371+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65372+ {
65373+ .ctl_name = CTL_UNNUMBERED,
65374+ .procname = "audit_chdir",
65375+ .data = &grsec_enable_chdir,
65376+ .maxlen = sizeof(int),
65377+ .mode = 0600,
65378+ .proc_handler = &proc_dointvec,
65379+ },
65380+#endif
65381+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65382+ {
65383+ .ctl_name = CTL_UNNUMBERED,
65384+ .procname = "audit_mount",
65385+ .data = &grsec_enable_mount,
65386+ .maxlen = sizeof(int),
65387+ .mode = 0600,
65388+ .proc_handler = &proc_dointvec,
65389+ },
65390+#endif
65391+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65392+ {
65393+ .ctl_name = CTL_UNNUMBERED,
65394+ .procname = "audit_textrel",
65395+ .data = &grsec_enable_audit_textrel,
65396+ .maxlen = sizeof(int),
65397+ .mode = 0600,
65398+ .proc_handler = &proc_dointvec,
65399+ },
65400+#endif
65401+#ifdef CONFIG_GRKERNSEC_DMESG
65402+ {
65403+ .ctl_name = CTL_UNNUMBERED,
65404+ .procname = "dmesg",
65405+ .data = &grsec_enable_dmesg,
65406+ .maxlen = sizeof(int),
65407+ .mode = 0600,
65408+ .proc_handler = &proc_dointvec,
65409+ },
65410+#endif
65411+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65412+ {
65413+ .ctl_name = CTL_UNNUMBERED,
65414+ .procname = "chroot_findtask",
65415+ .data = &grsec_enable_chroot_findtask,
65416+ .maxlen = sizeof(int),
65417+ .mode = 0600,
65418+ .proc_handler = &proc_dointvec,
65419+ },
65420+#endif
65421+#ifdef CONFIG_GRKERNSEC_RESLOG
65422+ {
65423+ .ctl_name = CTL_UNNUMBERED,
65424+ .procname = "resource_logging",
65425+ .data = &grsec_resource_logging,
65426+ .maxlen = sizeof(int),
65427+ .mode = 0600,
65428+ .proc_handler = &proc_dointvec,
65429+ },
65430+#endif
65431+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65432+ {
65433+ .ctl_name = CTL_UNNUMBERED,
65434+ .procname = "audit_ptrace",
65435+ .data = &grsec_enable_audit_ptrace,
65436+ .maxlen = sizeof(int),
65437+ .mode = 0600,
65438+ .proc_handler = &proc_dointvec,
65439+ },
65440+#endif
65441+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65442+ {
65443+ .ctl_name = CTL_UNNUMBERED,
65444+ .procname = "harden_ptrace",
65445+ .data = &grsec_enable_harden_ptrace,
65446+ .maxlen = sizeof(int),
65447+ .mode = 0600,
65448+ .proc_handler = &proc_dointvec,
65449+ },
65450+#endif
65451+ {
65452+ .ctl_name = CTL_UNNUMBERED,
65453+ .procname = "grsec_lock",
65454+ .data = &grsec_lock,
65455+ .maxlen = sizeof(int),
65456+ .mode = 0600,
65457+ .proc_handler = &proc_dointvec,
65458+ },
65459+#endif
65460+#ifdef CONFIG_GRKERNSEC_ROFS
65461+ {
65462+ .ctl_name = CTL_UNNUMBERED,
65463+ .procname = "romount_protect",
65464+ .data = &grsec_enable_rofs,
65465+ .maxlen = sizeof(int),
65466+ .mode = 0600,
65467+ .proc_handler = &proc_dointvec_minmax,
65468+ .extra1 = &one,
65469+ .extra2 = &one,
65470+ },
65471+#endif
65472+ { .ctl_name = 0 }
65473+};
65474+#endif
65475diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
65476new file mode 100644
65477index 0000000..0dc13c3
65478--- /dev/null
65479+++ b/grsecurity/grsec_time.c
65480@@ -0,0 +1,16 @@
65481+#include <linux/kernel.h>
65482+#include <linux/sched.h>
65483+#include <linux/grinternal.h>
65484+#include <linux/module.h>
65485+
65486+void
65487+gr_log_timechange(void)
65488+{
65489+#ifdef CONFIG_GRKERNSEC_TIME
65490+ if (grsec_enable_time)
65491+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
65492+#endif
65493+ return;
65494+}
65495+
65496+EXPORT_SYMBOL(gr_log_timechange);
65497diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
65498new file mode 100644
65499index 0000000..07e0dc0
65500--- /dev/null
65501+++ b/grsecurity/grsec_tpe.c
65502@@ -0,0 +1,73 @@
65503+#include <linux/kernel.h>
65504+#include <linux/sched.h>
65505+#include <linux/file.h>
65506+#include <linux/fs.h>
65507+#include <linux/grinternal.h>
65508+
65509+extern int gr_acl_tpe_check(void);
65510+
65511+int
65512+gr_tpe_allow(const struct file *file)
65513+{
65514+#ifdef CONFIG_GRKERNSEC
65515+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
65516+ const struct cred *cred = current_cred();
65517+ char *msg = NULL;
65518+ char *msg2 = NULL;
65519+
65520+ // never restrict root
65521+ if (!cred->uid)
65522+ return 1;
65523+
65524+ if (grsec_enable_tpe) {
65525+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65526+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
65527+ msg = "not being in trusted group";
65528+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
65529+ msg = "being in untrusted group";
65530+#else
65531+ if (in_group_p(grsec_tpe_gid))
65532+ msg = "being in untrusted group";
65533+#endif
65534+ }
65535+ if (!msg && gr_acl_tpe_check())
65536+ msg = "being in untrusted role";
65537+
65538+ // not in any affected group/role
65539+ if (!msg)
65540+ goto next_check;
65541+
65542+ if (inode->i_uid)
65543+ msg2 = "file in non-root-owned directory";
65544+ else if (inode->i_mode & S_IWOTH)
65545+ msg2 = "file in world-writable directory";
65546+ else if (inode->i_mode & S_IWGRP)
65547+ msg2 = "file in group-writable directory";
65548+
65549+ if (msg && msg2) {
65550+ char fullmsg[70] = {0};
65551+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
65552+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
65553+ return 0;
65554+ }
65555+ msg = NULL;
65556+next_check:
65557+#ifdef CONFIG_GRKERNSEC_TPE_ALL
65558+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
65559+ return 1;
65560+
65561+ if (inode->i_uid && (inode->i_uid != cred->uid))
65562+ msg = "directory not owned by user";
65563+ else if (inode->i_mode & S_IWOTH)
65564+ msg = "file in world-writable directory";
65565+ else if (inode->i_mode & S_IWGRP)
65566+ msg = "file in group-writable directory";
65567+
65568+ if (msg) {
65569+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
65570+ return 0;
65571+ }
65572+#endif
65573+#endif
65574+ return 1;
65575+}
65576diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
65577new file mode 100644
65578index 0000000..9f7b1ac
65579--- /dev/null
65580+++ b/grsecurity/grsum.c
65581@@ -0,0 +1,61 @@
65582+#include <linux/err.h>
65583+#include <linux/kernel.h>
65584+#include <linux/sched.h>
65585+#include <linux/mm.h>
65586+#include <linux/scatterlist.h>
65587+#include <linux/crypto.h>
65588+#include <linux/gracl.h>
65589+
65590+
65591+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
65592+#error "crypto and sha256 must be built into the kernel"
65593+#endif
65594+
65595+int
65596+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
65597+{
65598+ char *p;
65599+ struct crypto_hash *tfm;
65600+ struct hash_desc desc;
65601+ struct scatterlist sg;
65602+ unsigned char temp_sum[GR_SHA_LEN];
65603+ volatile int retval = 0;
65604+ volatile int dummy = 0;
65605+ unsigned int i;
65606+
65607+ sg_init_table(&sg, 1);
65608+
65609+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
65610+ if (IS_ERR(tfm)) {
65611+ /* should never happen, since sha256 should be built in */
65612+ return 1;
65613+ }
65614+
65615+ desc.tfm = tfm;
65616+ desc.flags = 0;
65617+
65618+ crypto_hash_init(&desc);
65619+
65620+ p = salt;
65621+ sg_set_buf(&sg, p, GR_SALT_LEN);
65622+ crypto_hash_update(&desc, &sg, sg.length);
65623+
65624+ p = entry->pw;
65625+ sg_set_buf(&sg, p, strlen(p));
65626+
65627+ crypto_hash_update(&desc, &sg, sg.length);
65628+
65629+ crypto_hash_final(&desc, temp_sum);
65630+
65631+ memset(entry->pw, 0, GR_PW_LEN);
65632+
65633+ for (i = 0; i < GR_SHA_LEN; i++)
65634+ if (sum[i] != temp_sum[i])
65635+ retval = 1;
65636+ else
65637+ dummy = 1; // waste a cycle
65638+
65639+ crypto_free_hash(tfm);
65640+
65641+ return retval;
65642+}
65643diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
65644index 3cd9ccd..fe16d47 100644
65645--- a/include/acpi/acpi_bus.h
65646+++ b/include/acpi/acpi_bus.h
65647@@ -107,7 +107,7 @@ struct acpi_device_ops {
65648 acpi_op_bind bind;
65649 acpi_op_unbind unbind;
65650 acpi_op_notify notify;
65651-};
65652+} __no_const;
65653
65654 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
65655
65656diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
65657index f4906f6..71feb73 100644
65658--- a/include/acpi/acpi_drivers.h
65659+++ b/include/acpi/acpi_drivers.h
65660@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
65661 Dock Station
65662 -------------------------------------------------------------------------- */
65663 struct acpi_dock_ops {
65664- acpi_notify_handler handler;
65665- acpi_notify_handler uevent;
65666+ const acpi_notify_handler handler;
65667+ const acpi_notify_handler uevent;
65668 };
65669
65670 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
65671@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
65672 extern int register_dock_notifier(struct notifier_block *nb);
65673 extern void unregister_dock_notifier(struct notifier_block *nb);
65674 extern int register_hotplug_dock_device(acpi_handle handle,
65675- struct acpi_dock_ops *ops,
65676+ const struct acpi_dock_ops *ops,
65677 void *context);
65678 extern void unregister_hotplug_dock_device(acpi_handle handle);
65679 #else
65680@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
65681 {
65682 }
65683 static inline int register_hotplug_dock_device(acpi_handle handle,
65684- struct acpi_dock_ops *ops,
65685+ const struct acpi_dock_ops *ops,
65686 void *context)
65687 {
65688 return -ENODEV;
65689diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
65690index b7babf0..a9ac9fc 100644
65691--- a/include/asm-generic/atomic-long.h
65692+++ b/include/asm-generic/atomic-long.h
65693@@ -22,6 +22,12 @@
65694
65695 typedef atomic64_t atomic_long_t;
65696
65697+#ifdef CONFIG_PAX_REFCOUNT
65698+typedef atomic64_unchecked_t atomic_long_unchecked_t;
65699+#else
65700+typedef atomic64_t atomic_long_unchecked_t;
65701+#endif
65702+
65703 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
65704
65705 static inline long atomic_long_read(atomic_long_t *l)
65706@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65707 return (long)atomic64_read(v);
65708 }
65709
65710+#ifdef CONFIG_PAX_REFCOUNT
65711+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65712+{
65713+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65714+
65715+ return (long)atomic64_read_unchecked(v);
65716+}
65717+#endif
65718+
65719 static inline void atomic_long_set(atomic_long_t *l, long i)
65720 {
65721 atomic64_t *v = (atomic64_t *)l;
65722@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65723 atomic64_set(v, i);
65724 }
65725
65726+#ifdef CONFIG_PAX_REFCOUNT
65727+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65728+{
65729+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65730+
65731+ atomic64_set_unchecked(v, i);
65732+}
65733+#endif
65734+
65735 static inline void atomic_long_inc(atomic_long_t *l)
65736 {
65737 atomic64_t *v = (atomic64_t *)l;
65738@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65739 atomic64_inc(v);
65740 }
65741
65742+#ifdef CONFIG_PAX_REFCOUNT
65743+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65744+{
65745+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65746+
65747+ atomic64_inc_unchecked(v);
65748+}
65749+#endif
65750+
65751 static inline void atomic_long_dec(atomic_long_t *l)
65752 {
65753 atomic64_t *v = (atomic64_t *)l;
65754@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65755 atomic64_dec(v);
65756 }
65757
65758+#ifdef CONFIG_PAX_REFCOUNT
65759+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65760+{
65761+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65762+
65763+ atomic64_dec_unchecked(v);
65764+}
65765+#endif
65766+
65767 static inline void atomic_long_add(long i, atomic_long_t *l)
65768 {
65769 atomic64_t *v = (atomic64_t *)l;
65770@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65771 atomic64_add(i, v);
65772 }
65773
65774+#ifdef CONFIG_PAX_REFCOUNT
65775+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65776+{
65777+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65778+
65779+ atomic64_add_unchecked(i, v);
65780+}
65781+#endif
65782+
65783 static inline void atomic_long_sub(long i, atomic_long_t *l)
65784 {
65785 atomic64_t *v = (atomic64_t *)l;
65786@@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65787 return (long)atomic64_inc_return(v);
65788 }
65789
65790+#ifdef CONFIG_PAX_REFCOUNT
65791+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65792+{
65793+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65794+
65795+ return (long)atomic64_inc_return_unchecked(v);
65796+}
65797+#endif
65798+
65799 static inline long atomic_long_dec_return(atomic_long_t *l)
65800 {
65801 atomic64_t *v = (atomic64_t *)l;
65802@@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65803
65804 typedef atomic_t atomic_long_t;
65805
65806+#ifdef CONFIG_PAX_REFCOUNT
65807+typedef atomic_unchecked_t atomic_long_unchecked_t;
65808+#else
65809+typedef atomic_t atomic_long_unchecked_t;
65810+#endif
65811+
65812 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
65813 static inline long atomic_long_read(atomic_long_t *l)
65814 {
65815@@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65816 return (long)atomic_read(v);
65817 }
65818
65819+#ifdef CONFIG_PAX_REFCOUNT
65820+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65821+{
65822+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65823+
65824+ return (long)atomic_read_unchecked(v);
65825+}
65826+#endif
65827+
65828 static inline void atomic_long_set(atomic_long_t *l, long i)
65829 {
65830 atomic_t *v = (atomic_t *)l;
65831@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65832 atomic_set(v, i);
65833 }
65834
65835+#ifdef CONFIG_PAX_REFCOUNT
65836+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65837+{
65838+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65839+
65840+ atomic_set_unchecked(v, i);
65841+}
65842+#endif
65843+
65844 static inline void atomic_long_inc(atomic_long_t *l)
65845 {
65846 atomic_t *v = (atomic_t *)l;
65847@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65848 atomic_inc(v);
65849 }
65850
65851+#ifdef CONFIG_PAX_REFCOUNT
65852+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65853+{
65854+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65855+
65856+ atomic_inc_unchecked(v);
65857+}
65858+#endif
65859+
65860 static inline void atomic_long_dec(atomic_long_t *l)
65861 {
65862 atomic_t *v = (atomic_t *)l;
65863@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65864 atomic_dec(v);
65865 }
65866
65867+#ifdef CONFIG_PAX_REFCOUNT
65868+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65869+{
65870+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65871+
65872+ atomic_dec_unchecked(v);
65873+}
65874+#endif
65875+
65876 static inline void atomic_long_add(long i, atomic_long_t *l)
65877 {
65878 atomic_t *v = (atomic_t *)l;
65879@@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65880 atomic_add(i, v);
65881 }
65882
65883+#ifdef CONFIG_PAX_REFCOUNT
65884+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65885+{
65886+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65887+
65888+ atomic_add_unchecked(i, v);
65889+}
65890+#endif
65891+
65892 static inline void atomic_long_sub(long i, atomic_long_t *l)
65893 {
65894 atomic_t *v = (atomic_t *)l;
65895@@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65896 return (long)atomic_inc_return(v);
65897 }
65898
65899+#ifdef CONFIG_PAX_REFCOUNT
65900+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65901+{
65902+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65903+
65904+ return (long)atomic_inc_return_unchecked(v);
65905+}
65906+#endif
65907+
65908 static inline long atomic_long_dec_return(atomic_long_t *l)
65909 {
65910 atomic_t *v = (atomic_t *)l;
65911@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65912
65913 #endif /* BITS_PER_LONG == 64 */
65914
65915+#ifdef CONFIG_PAX_REFCOUNT
65916+static inline void pax_refcount_needs_these_functions(void)
65917+{
65918+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
65919+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
65920+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
65921+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
65922+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
65923+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
65924+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
65925+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
65926+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
65927+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
65928+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
65929+
65930+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
65931+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
65932+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
65933+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
65934+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
65935+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
65936+}
65937+#else
65938+#define atomic_read_unchecked(v) atomic_read(v)
65939+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
65940+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
65941+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
65942+#define atomic_inc_unchecked(v) atomic_inc(v)
65943+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
65944+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
65945+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
65946+#define atomic_dec_unchecked(v) atomic_dec(v)
65947+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
65948+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
65949+
65950+#define atomic_long_read_unchecked(v) atomic_long_read(v)
65951+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
65952+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
65953+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
65954+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
65955+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
65956+#endif
65957+
65958 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
65959diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
65960index b18ce4f..2ee2843 100644
65961--- a/include/asm-generic/atomic64.h
65962+++ b/include/asm-generic/atomic64.h
65963@@ -16,6 +16,8 @@ typedef struct {
65964 long long counter;
65965 } atomic64_t;
65966
65967+typedef atomic64_t atomic64_unchecked_t;
65968+
65969 #define ATOMIC64_INIT(i) { (i) }
65970
65971 extern long long atomic64_read(const atomic64_t *v);
65972@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
65973 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65974 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
65975
65976+#define atomic64_read_unchecked(v) atomic64_read(v)
65977+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
65978+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
65979+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
65980+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
65981+#define atomic64_inc_unchecked(v) atomic64_inc(v)
65982+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
65983+#define atomic64_dec_unchecked(v) atomic64_dec(v)
65984+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
65985+
65986 #endif /* _ASM_GENERIC_ATOMIC64_H */
65987diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
65988index d48ddf0..656a0ac 100644
65989--- a/include/asm-generic/bug.h
65990+++ b/include/asm-generic/bug.h
65991@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
65992
65993 #else /* !CONFIG_BUG */
65994 #ifndef HAVE_ARCH_BUG
65995-#define BUG() do {} while(0)
65996+#define BUG() do { for (;;) ; } while(0)
65997 #endif
65998
65999 #ifndef HAVE_ARCH_BUG_ON
66000-#define BUG_ON(condition) do { if (condition) ; } while(0)
66001+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
66002 #endif
66003
66004 #ifndef HAVE_ARCH_WARN_ON
66005diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
66006index 1bfcfe5..e04c5c9 100644
66007--- a/include/asm-generic/cache.h
66008+++ b/include/asm-generic/cache.h
66009@@ -6,7 +6,7 @@
66010 * cache lines need to provide their own cache.h.
66011 */
66012
66013-#define L1_CACHE_SHIFT 5
66014-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
66015+#define L1_CACHE_SHIFT 5UL
66016+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
66017
66018 #endif /* __ASM_GENERIC_CACHE_H */
66019diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
66020index 6920695..41038bc 100644
66021--- a/include/asm-generic/dma-mapping-common.h
66022+++ b/include/asm-generic/dma-mapping-common.h
66023@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
66024 enum dma_data_direction dir,
66025 struct dma_attrs *attrs)
66026 {
66027- struct dma_map_ops *ops = get_dma_ops(dev);
66028+ const struct dma_map_ops *ops = get_dma_ops(dev);
66029 dma_addr_t addr;
66030
66031 kmemcheck_mark_initialized(ptr, size);
66032@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
66033 enum dma_data_direction dir,
66034 struct dma_attrs *attrs)
66035 {
66036- struct dma_map_ops *ops = get_dma_ops(dev);
66037+ const struct dma_map_ops *ops = get_dma_ops(dev);
66038
66039 BUG_ON(!valid_dma_direction(dir));
66040 if (ops->unmap_page)
66041@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
66042 int nents, enum dma_data_direction dir,
66043 struct dma_attrs *attrs)
66044 {
66045- struct dma_map_ops *ops = get_dma_ops(dev);
66046+ const struct dma_map_ops *ops = get_dma_ops(dev);
66047 int i, ents;
66048 struct scatterlist *s;
66049
66050@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
66051 int nents, enum dma_data_direction dir,
66052 struct dma_attrs *attrs)
66053 {
66054- struct dma_map_ops *ops = get_dma_ops(dev);
66055+ const struct dma_map_ops *ops = get_dma_ops(dev);
66056
66057 BUG_ON(!valid_dma_direction(dir));
66058 debug_dma_unmap_sg(dev, sg, nents, dir);
66059@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
66060 size_t offset, size_t size,
66061 enum dma_data_direction dir)
66062 {
66063- struct dma_map_ops *ops = get_dma_ops(dev);
66064+ const struct dma_map_ops *ops = get_dma_ops(dev);
66065 dma_addr_t addr;
66066
66067 kmemcheck_mark_initialized(page_address(page) + offset, size);
66068@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
66069 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
66070 size_t size, enum dma_data_direction dir)
66071 {
66072- struct dma_map_ops *ops = get_dma_ops(dev);
66073+ const struct dma_map_ops *ops = get_dma_ops(dev);
66074
66075 BUG_ON(!valid_dma_direction(dir));
66076 if (ops->unmap_page)
66077@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
66078 size_t size,
66079 enum dma_data_direction dir)
66080 {
66081- struct dma_map_ops *ops = get_dma_ops(dev);
66082+ const struct dma_map_ops *ops = get_dma_ops(dev);
66083
66084 BUG_ON(!valid_dma_direction(dir));
66085 if (ops->sync_single_for_cpu)
66086@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
66087 dma_addr_t addr, size_t size,
66088 enum dma_data_direction dir)
66089 {
66090- struct dma_map_ops *ops = get_dma_ops(dev);
66091+ const struct dma_map_ops *ops = get_dma_ops(dev);
66092
66093 BUG_ON(!valid_dma_direction(dir));
66094 if (ops->sync_single_for_device)
66095@@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
66096 size_t size,
66097 enum dma_data_direction dir)
66098 {
66099- struct dma_map_ops *ops = get_dma_ops(dev);
66100+ const struct dma_map_ops *ops = get_dma_ops(dev);
66101
66102 BUG_ON(!valid_dma_direction(dir));
66103 if (ops->sync_single_range_for_cpu) {
66104@@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
66105 size_t size,
66106 enum dma_data_direction dir)
66107 {
66108- struct dma_map_ops *ops = get_dma_ops(dev);
66109+ const struct dma_map_ops *ops = get_dma_ops(dev);
66110
66111 BUG_ON(!valid_dma_direction(dir));
66112 if (ops->sync_single_range_for_device) {
66113@@ -155,7 +155,7 @@ static inline void
66114 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
66115 int nelems, enum dma_data_direction dir)
66116 {
66117- struct dma_map_ops *ops = get_dma_ops(dev);
66118+ const struct dma_map_ops *ops = get_dma_ops(dev);
66119
66120 BUG_ON(!valid_dma_direction(dir));
66121 if (ops->sync_sg_for_cpu)
66122@@ -167,7 +167,7 @@ static inline void
66123 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
66124 int nelems, enum dma_data_direction dir)
66125 {
66126- struct dma_map_ops *ops = get_dma_ops(dev);
66127+ const struct dma_map_ops *ops = get_dma_ops(dev);
66128
66129 BUG_ON(!valid_dma_direction(dir));
66130 if (ops->sync_sg_for_device)
66131diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
66132index 0d68a1e..b74a761 100644
66133--- a/include/asm-generic/emergency-restart.h
66134+++ b/include/asm-generic/emergency-restart.h
66135@@ -1,7 +1,7 @@
66136 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
66137 #define _ASM_GENERIC_EMERGENCY_RESTART_H
66138
66139-static inline void machine_emergency_restart(void)
66140+static inline __noreturn void machine_emergency_restart(void)
66141 {
66142 machine_restart(NULL);
66143 }
66144diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
66145index 3c2344f..4590a7d 100644
66146--- a/include/asm-generic/futex.h
66147+++ b/include/asm-generic/futex.h
66148@@ -6,7 +6,7 @@
66149 #include <asm/errno.h>
66150
66151 static inline int
66152-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
66153+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
66154 {
66155 int op = (encoded_op >> 28) & 7;
66156 int cmp = (encoded_op >> 24) & 15;
66157@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
66158 }
66159
66160 static inline int
66161-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
66162+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
66163 {
66164 return -ENOSYS;
66165 }
66166diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
66167index 1ca3efc..e3dc852 100644
66168--- a/include/asm-generic/int-l64.h
66169+++ b/include/asm-generic/int-l64.h
66170@@ -46,6 +46,8 @@ typedef unsigned int u32;
66171 typedef signed long s64;
66172 typedef unsigned long u64;
66173
66174+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
66175+
66176 #define S8_C(x) x
66177 #define U8_C(x) x ## U
66178 #define S16_C(x) x
66179diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
66180index f394147..b6152b9 100644
66181--- a/include/asm-generic/int-ll64.h
66182+++ b/include/asm-generic/int-ll64.h
66183@@ -51,6 +51,8 @@ typedef unsigned int u32;
66184 typedef signed long long s64;
66185 typedef unsigned long long u64;
66186
66187+typedef unsigned long long intoverflow_t;
66188+
66189 #define S8_C(x) x
66190 #define U8_C(x) x ## U
66191 #define S16_C(x) x
66192diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
66193index e5f234a..cdb16b3 100644
66194--- a/include/asm-generic/kmap_types.h
66195+++ b/include/asm-generic/kmap_types.h
66196@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
66197 KMAP_D(16) KM_IRQ_PTE,
66198 KMAP_D(17) KM_NMI,
66199 KMAP_D(18) KM_NMI_PTE,
66200-KMAP_D(19) KM_TYPE_NR
66201+KMAP_D(19) KM_CLEARPAGE,
66202+KMAP_D(20) KM_TYPE_NR
66203 };
66204
66205 #undef KMAP_D
66206diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
66207index 725612b..9cc513a 100644
66208--- a/include/asm-generic/pgtable-nopmd.h
66209+++ b/include/asm-generic/pgtable-nopmd.h
66210@@ -1,14 +1,19 @@
66211 #ifndef _PGTABLE_NOPMD_H
66212 #define _PGTABLE_NOPMD_H
66213
66214-#ifndef __ASSEMBLY__
66215-
66216 #include <asm-generic/pgtable-nopud.h>
66217
66218-struct mm_struct;
66219-
66220 #define __PAGETABLE_PMD_FOLDED
66221
66222+#define PMD_SHIFT PUD_SHIFT
66223+#define PTRS_PER_PMD 1
66224+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
66225+#define PMD_MASK (~(PMD_SIZE-1))
66226+
66227+#ifndef __ASSEMBLY__
66228+
66229+struct mm_struct;
66230+
66231 /*
66232 * Having the pmd type consist of a pud gets the size right, and allows
66233 * us to conceptually access the pud entry that this pmd is folded into
66234@@ -16,11 +21,6 @@ struct mm_struct;
66235 */
66236 typedef struct { pud_t pud; } pmd_t;
66237
66238-#define PMD_SHIFT PUD_SHIFT
66239-#define PTRS_PER_PMD 1
66240-#define PMD_SIZE (1UL << PMD_SHIFT)
66241-#define PMD_MASK (~(PMD_SIZE-1))
66242-
66243 /*
66244 * The "pud_xxx()" functions here are trivial for a folded two-level
66245 * setup: the pmd is never bad, and a pmd always exists (as it's folded
66246diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
66247index 810431d..ccc3638 100644
66248--- a/include/asm-generic/pgtable-nopud.h
66249+++ b/include/asm-generic/pgtable-nopud.h
66250@@ -1,10 +1,15 @@
66251 #ifndef _PGTABLE_NOPUD_H
66252 #define _PGTABLE_NOPUD_H
66253
66254-#ifndef __ASSEMBLY__
66255-
66256 #define __PAGETABLE_PUD_FOLDED
66257
66258+#define PUD_SHIFT PGDIR_SHIFT
66259+#define PTRS_PER_PUD 1
66260+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
66261+#define PUD_MASK (~(PUD_SIZE-1))
66262+
66263+#ifndef __ASSEMBLY__
66264+
66265 /*
66266 * Having the pud type consist of a pgd gets the size right, and allows
66267 * us to conceptually access the pgd entry that this pud is folded into
66268@@ -12,11 +17,6 @@
66269 */
66270 typedef struct { pgd_t pgd; } pud_t;
66271
66272-#define PUD_SHIFT PGDIR_SHIFT
66273-#define PTRS_PER_PUD 1
66274-#define PUD_SIZE (1UL << PUD_SHIFT)
66275-#define PUD_MASK (~(PUD_SIZE-1))
66276-
66277 /*
66278 * The "pgd_xxx()" functions here are trivial for a folded two-level
66279 * setup: the pud is never bad, and a pud always exists (as it's folded
66280diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
66281index e2bd73e..fea8ed3 100644
66282--- a/include/asm-generic/pgtable.h
66283+++ b/include/asm-generic/pgtable.h
66284@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
66285 unsigned long size);
66286 #endif
66287
66288+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
66289+static inline unsigned long pax_open_kernel(void) { return 0; }
66290+#endif
66291+
66292+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
66293+static inline unsigned long pax_close_kernel(void) { return 0; }
66294+#endif
66295+
66296 #endif /* !__ASSEMBLY__ */
66297
66298 #endif /* _ASM_GENERIC_PGTABLE_H */
66299diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
66300index b6e818f..21aa58a 100644
66301--- a/include/asm-generic/vmlinux.lds.h
66302+++ b/include/asm-generic/vmlinux.lds.h
66303@@ -199,6 +199,7 @@
66304 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
66305 VMLINUX_SYMBOL(__start_rodata) = .; \
66306 *(.rodata) *(.rodata.*) \
66307+ *(.data.read_only) \
66308 *(__vermagic) /* Kernel version magic */ \
66309 *(__markers_strings) /* Markers: strings */ \
66310 *(__tracepoints_strings)/* Tracepoints: strings */ \
66311@@ -656,22 +657,24 @@
66312 * section in the linker script will go there too. @phdr should have
66313 * a leading colon.
66314 *
66315- * Note that this macros defines __per_cpu_load as an absolute symbol.
66316+ * Note that this macros defines per_cpu_load as an absolute symbol.
66317 * If there is no need to put the percpu section at a predetermined
66318 * address, use PERCPU().
66319 */
66320 #define PERCPU_VADDR(vaddr, phdr) \
66321- VMLINUX_SYMBOL(__per_cpu_load) = .; \
66322- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
66323+ per_cpu_load = .; \
66324+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
66325 - LOAD_OFFSET) { \
66326+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
66327 VMLINUX_SYMBOL(__per_cpu_start) = .; \
66328 *(.data.percpu.first) \
66329- *(.data.percpu.page_aligned) \
66330 *(.data.percpu) \
66331+ . = ALIGN(PAGE_SIZE); \
66332+ *(.data.percpu.page_aligned) \
66333 *(.data.percpu.shared_aligned) \
66334 VMLINUX_SYMBOL(__per_cpu_end) = .; \
66335 } phdr \
66336- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
66337+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
66338
66339 /**
66340 * PERCPU - define output section for percpu area, simple version
66341diff --git a/include/drm/drmP.h b/include/drm/drmP.h
66342index ebab6a6..351dba1 100644
66343--- a/include/drm/drmP.h
66344+++ b/include/drm/drmP.h
66345@@ -71,6 +71,7 @@
66346 #include <linux/workqueue.h>
66347 #include <linux/poll.h>
66348 #include <asm/pgalloc.h>
66349+#include <asm/local.h>
66350 #include "drm.h"
66351
66352 #include <linux/idr.h>
66353@@ -814,7 +815,7 @@ struct drm_driver {
66354 void (*vgaarb_irq)(struct drm_device *dev, bool state);
66355
66356 /* Driver private ops for this object */
66357- struct vm_operations_struct *gem_vm_ops;
66358+ const struct vm_operations_struct *gem_vm_ops;
66359
66360 int major;
66361 int minor;
66362@@ -917,7 +918,7 @@ struct drm_device {
66363
66364 /** \name Usage Counters */
66365 /*@{ */
66366- int open_count; /**< Outstanding files open */
66367+ local_t open_count; /**< Outstanding files open */
66368 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
66369 atomic_t vma_count; /**< Outstanding vma areas open */
66370 int buf_use; /**< Buffers in use -- cannot alloc */
66371@@ -928,7 +929,7 @@ struct drm_device {
66372 /*@{ */
66373 unsigned long counters;
66374 enum drm_stat_type types[15];
66375- atomic_t counts[15];
66376+ atomic_unchecked_t counts[15];
66377 /*@} */
66378
66379 struct list_head filelist;
66380@@ -1016,7 +1017,7 @@ struct drm_device {
66381 struct pci_controller *hose;
66382 #endif
66383 struct drm_sg_mem *sg; /**< Scatter gather memory */
66384- unsigned int num_crtcs; /**< Number of CRTCs on this device */
66385+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
66386 void *dev_private; /**< device private data */
66387 void *mm_private;
66388 struct address_space *dev_mapping;
66389@@ -1042,11 +1043,11 @@ struct drm_device {
66390 spinlock_t object_name_lock;
66391 struct idr object_name_idr;
66392 atomic_t object_count;
66393- atomic_t object_memory;
66394+ atomic_unchecked_t object_memory;
66395 atomic_t pin_count;
66396- atomic_t pin_memory;
66397+ atomic_unchecked_t pin_memory;
66398 atomic_t gtt_count;
66399- atomic_t gtt_memory;
66400+ atomic_unchecked_t gtt_memory;
66401 uint32_t gtt_total;
66402 uint32_t invalidate_domains; /* domains pending invalidation */
66403 uint32_t flush_domains; /* domains pending flush */
66404diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
66405index b29e201..3413cc9 100644
66406--- a/include/drm/drm_crtc_helper.h
66407+++ b/include/drm/drm_crtc_helper.h
66408@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
66409
66410 /* reload the current crtc LUT */
66411 void (*load_lut)(struct drm_crtc *crtc);
66412-};
66413+} __no_const;
66414
66415 struct drm_encoder_helper_funcs {
66416 void (*dpms)(struct drm_encoder *encoder, int mode);
66417@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
66418 struct drm_connector *connector);
66419 /* disable encoder when not in use - more explicit than dpms off */
66420 void (*disable)(struct drm_encoder *encoder);
66421-};
66422+} __no_const;
66423
66424 struct drm_connector_helper_funcs {
66425 int (*get_modes)(struct drm_connector *connector);
66426diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
66427index b199170..6f9e64c 100644
66428--- a/include/drm/ttm/ttm_memory.h
66429+++ b/include/drm/ttm/ttm_memory.h
66430@@ -47,7 +47,7 @@
66431
66432 struct ttm_mem_shrink {
66433 int (*do_shrink) (struct ttm_mem_shrink *);
66434-};
66435+} __no_const;
66436
66437 /**
66438 * struct ttm_mem_global - Global memory accounting structure.
66439diff --git a/include/linux/a.out.h b/include/linux/a.out.h
66440index e86dfca..40cc55f 100644
66441--- a/include/linux/a.out.h
66442+++ b/include/linux/a.out.h
66443@@ -39,6 +39,14 @@ enum machine_type {
66444 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
66445 };
66446
66447+/* Constants for the N_FLAGS field */
66448+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66449+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
66450+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
66451+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
66452+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66453+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66454+
66455 #if !defined (N_MAGIC)
66456 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
66457 #endif
66458diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
66459index 817b237..62c10bc 100644
66460--- a/include/linux/atmdev.h
66461+++ b/include/linux/atmdev.h
66462@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
66463 #endif
66464
66465 struct k_atm_aal_stats {
66466-#define __HANDLE_ITEM(i) atomic_t i
66467+#define __HANDLE_ITEM(i) atomic_unchecked_t i
66468 __AAL_STAT_ITEMS
66469 #undef __HANDLE_ITEM
66470 };
66471diff --git a/include/linux/backlight.h b/include/linux/backlight.h
66472index 0f5f578..8c4f884 100644
66473--- a/include/linux/backlight.h
66474+++ b/include/linux/backlight.h
66475@@ -36,18 +36,18 @@ struct backlight_device;
66476 struct fb_info;
66477
66478 struct backlight_ops {
66479- unsigned int options;
66480+ const unsigned int options;
66481
66482 #define BL_CORE_SUSPENDRESUME (1 << 0)
66483
66484 /* Notify the backlight driver some property has changed */
66485- int (*update_status)(struct backlight_device *);
66486+ int (* const update_status)(struct backlight_device *);
66487 /* Return the current backlight brightness (accounting for power,
66488 fb_blank etc.) */
66489- int (*get_brightness)(struct backlight_device *);
66490+ int (* const get_brightness)(struct backlight_device *);
66491 /* Check if given framebuffer device is the one bound to this backlight;
66492 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
66493- int (*check_fb)(struct fb_info *);
66494+ int (* const check_fb)(struct fb_info *);
66495 };
66496
66497 /* This structure defines all the properties of a backlight */
66498@@ -86,7 +86,7 @@ struct backlight_device {
66499 registered this device has been unloaded, and if class_get_devdata()
66500 points to something in the body of that driver, it is also invalid. */
66501 struct mutex ops_lock;
66502- struct backlight_ops *ops;
66503+ const struct backlight_ops *ops;
66504
66505 /* The framebuffer notifier block */
66506 struct notifier_block fb_notif;
66507@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
66508 }
66509
66510 extern struct backlight_device *backlight_device_register(const char *name,
66511- struct device *dev, void *devdata, struct backlight_ops *ops);
66512+ struct device *dev, void *devdata, const struct backlight_ops *ops);
66513 extern void backlight_device_unregister(struct backlight_device *bd);
66514 extern void backlight_force_update(struct backlight_device *bd,
66515 enum backlight_update_reason reason);
66516diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
66517index a3d802e..93a2ef4 100644
66518--- a/include/linux/binfmts.h
66519+++ b/include/linux/binfmts.h
66520@@ -18,7 +18,7 @@ struct pt_regs;
66521 #define BINPRM_BUF_SIZE 128
66522
66523 #ifdef __KERNEL__
66524-#include <linux/list.h>
66525+#include <linux/sched.h>
66526
66527 #define CORENAME_MAX_SIZE 128
66528
66529@@ -58,6 +58,7 @@ struct linux_binprm{
66530 unsigned interp_flags;
66531 unsigned interp_data;
66532 unsigned long loader, exec;
66533+ char tcomm[TASK_COMM_LEN];
66534 };
66535
66536 extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
66537@@ -83,6 +84,7 @@ struct linux_binfmt {
66538 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
66539 int (*load_shlib)(struct file *);
66540 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
66541+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
66542 unsigned long min_coredump; /* minimal dump size */
66543 int hasvdso;
66544 };
66545diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
66546index 5eb6cb0..a2906d2 100644
66547--- a/include/linux/blkdev.h
66548+++ b/include/linux/blkdev.h
66549@@ -1281,7 +1281,7 @@ struct block_device_operations {
66550 int (*revalidate_disk) (struct gendisk *);
66551 int (*getgeo)(struct block_device *, struct hd_geometry *);
66552 struct module *owner;
66553-};
66554+} __do_const;
66555
66556 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
66557 unsigned long);
66558diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
66559index 3b73b99..629d21b 100644
66560--- a/include/linux/blktrace_api.h
66561+++ b/include/linux/blktrace_api.h
66562@@ -160,7 +160,7 @@ struct blk_trace {
66563 struct dentry *dir;
66564 struct dentry *dropped_file;
66565 struct dentry *msg_file;
66566- atomic_t dropped;
66567+ atomic_unchecked_t dropped;
66568 };
66569
66570 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
66571diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
66572index 83195fb..0b0f77d 100644
66573--- a/include/linux/byteorder/little_endian.h
66574+++ b/include/linux/byteorder/little_endian.h
66575@@ -42,51 +42,51 @@
66576
66577 static inline __le64 __cpu_to_le64p(const __u64 *p)
66578 {
66579- return (__force __le64)*p;
66580+ return (__force const __le64)*p;
66581 }
66582 static inline __u64 __le64_to_cpup(const __le64 *p)
66583 {
66584- return (__force __u64)*p;
66585+ return (__force const __u64)*p;
66586 }
66587 static inline __le32 __cpu_to_le32p(const __u32 *p)
66588 {
66589- return (__force __le32)*p;
66590+ return (__force const __le32)*p;
66591 }
66592 static inline __u32 __le32_to_cpup(const __le32 *p)
66593 {
66594- return (__force __u32)*p;
66595+ return (__force const __u32)*p;
66596 }
66597 static inline __le16 __cpu_to_le16p(const __u16 *p)
66598 {
66599- return (__force __le16)*p;
66600+ return (__force const __le16)*p;
66601 }
66602 static inline __u16 __le16_to_cpup(const __le16 *p)
66603 {
66604- return (__force __u16)*p;
66605+ return (__force const __u16)*p;
66606 }
66607 static inline __be64 __cpu_to_be64p(const __u64 *p)
66608 {
66609- return (__force __be64)__swab64p(p);
66610+ return (__force const __be64)__swab64p(p);
66611 }
66612 static inline __u64 __be64_to_cpup(const __be64 *p)
66613 {
66614- return __swab64p((__u64 *)p);
66615+ return __swab64p((const __u64 *)p);
66616 }
66617 static inline __be32 __cpu_to_be32p(const __u32 *p)
66618 {
66619- return (__force __be32)__swab32p(p);
66620+ return (__force const __be32)__swab32p(p);
66621 }
66622 static inline __u32 __be32_to_cpup(const __be32 *p)
66623 {
66624- return __swab32p((__u32 *)p);
66625+ return __swab32p((const __u32 *)p);
66626 }
66627 static inline __be16 __cpu_to_be16p(const __u16 *p)
66628 {
66629- return (__force __be16)__swab16p(p);
66630+ return (__force const __be16)__swab16p(p);
66631 }
66632 static inline __u16 __be16_to_cpup(const __be16 *p)
66633 {
66634- return __swab16p((__u16 *)p);
66635+ return __swab16p((const __u16 *)p);
66636 }
66637 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
66638 #define __le64_to_cpus(x) do { (void)(x); } while (0)
66639diff --git a/include/linux/cache.h b/include/linux/cache.h
66640index 97e2488..e7576b9 100644
66641--- a/include/linux/cache.h
66642+++ b/include/linux/cache.h
66643@@ -16,6 +16,10 @@
66644 #define __read_mostly
66645 #endif
66646
66647+#ifndef __read_only
66648+#define __read_only __read_mostly
66649+#endif
66650+
66651 #ifndef ____cacheline_aligned
66652 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
66653 #endif
66654diff --git a/include/linux/capability.h b/include/linux/capability.h
66655index c8f2a5f7..1618a5c 100644
66656--- a/include/linux/capability.h
66657+++ b/include/linux/capability.h
66658@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
66659 (security_real_capable_noaudit((t), (cap)) == 0)
66660
66661 extern int capable(int cap);
66662+int capable_nolog(int cap);
66663
66664 /* audit system wants to get cap info from files as well */
66665 struct dentry;
66666diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
66667index 450fa59..86019fb 100644
66668--- a/include/linux/compiler-gcc4.h
66669+++ b/include/linux/compiler-gcc4.h
66670@@ -36,4 +36,16 @@
66671 the kernel context */
66672 #define __cold __attribute__((__cold__))
66673
66674+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
66675+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
66676+#define __bos0(ptr) __bos((ptr), 0)
66677+#define __bos1(ptr) __bos((ptr), 1)
66678+
66679+#if __GNUC_MINOR__ >= 5
66680+#ifdef CONSTIFY_PLUGIN
66681+#define __no_const __attribute__((no_const))
66682+#define __do_const __attribute__((do_const))
66683+#endif
66684+#endif
66685+
66686 #endif
66687diff --git a/include/linux/compiler.h b/include/linux/compiler.h
66688index 04fb513..fd6477b 100644
66689--- a/include/linux/compiler.h
66690+++ b/include/linux/compiler.h
66691@@ -5,11 +5,14 @@
66692
66693 #ifdef __CHECKER__
66694 # define __user __attribute__((noderef, address_space(1)))
66695+# define __force_user __force __user
66696 # define __kernel /* default address space */
66697+# define __force_kernel __force __kernel
66698 # define __safe __attribute__((safe))
66699 # define __force __attribute__((force))
66700 # define __nocast __attribute__((nocast))
66701 # define __iomem __attribute__((noderef, address_space(2)))
66702+# define __force_iomem __force __iomem
66703 # define __acquires(x) __attribute__((context(x,0,1)))
66704 # define __releases(x) __attribute__((context(x,1,0)))
66705 # define __acquire(x) __context__(x,1)
66706@@ -17,13 +20,34 @@
66707 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
66708 extern void __chk_user_ptr(const volatile void __user *);
66709 extern void __chk_io_ptr(const volatile void __iomem *);
66710+#elif defined(CHECKER_PLUGIN)
66711+//# define __user
66712+//# define __force_user
66713+//# define __kernel
66714+//# define __force_kernel
66715+# define __safe
66716+# define __force
66717+# define __nocast
66718+# define __iomem
66719+# define __force_iomem
66720+# define __chk_user_ptr(x) (void)0
66721+# define __chk_io_ptr(x) (void)0
66722+# define __builtin_warning(x, y...) (1)
66723+# define __acquires(x)
66724+# define __releases(x)
66725+# define __acquire(x) (void)0
66726+# define __release(x) (void)0
66727+# define __cond_lock(x,c) (c)
66728 #else
66729 # define __user
66730+# define __force_user
66731 # define __kernel
66732+# define __force_kernel
66733 # define __safe
66734 # define __force
66735 # define __nocast
66736 # define __iomem
66737+# define __force_iomem
66738 # define __chk_user_ptr(x) (void)0
66739 # define __chk_io_ptr(x) (void)0
66740 # define __builtin_warning(x, y...) (1)
66741@@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66742 # define __attribute_const__ /* unimplemented */
66743 #endif
66744
66745+#ifndef __no_const
66746+# define __no_const
66747+#endif
66748+
66749+#ifndef __do_const
66750+# define __do_const
66751+#endif
66752+
66753 /*
66754 * Tell gcc if a function is cold. The compiler will assume any path
66755 * directly leading to the call is unlikely.
66756@@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66757 #define __cold
66758 #endif
66759
66760+#ifndef __alloc_size
66761+#define __alloc_size(...)
66762+#endif
66763+
66764+#ifndef __bos
66765+#define __bos(ptr, arg)
66766+#endif
66767+
66768+#ifndef __bos0
66769+#define __bos0(ptr)
66770+#endif
66771+
66772+#ifndef __bos1
66773+#define __bos1(ptr)
66774+#endif
66775+
66776 /* Simple shorthand for a section definition */
66777 #ifndef __section
66778 # define __section(S) __attribute__ ((__section__(#S)))
66779@@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66780 * use is to mediate communication between process-level code and irq/NMI
66781 * handlers, all running on the same CPU.
66782 */
66783-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
66784+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
66785+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
66786
66787 #endif /* __LINUX_COMPILER_H */
66788diff --git a/include/linux/crypto.h b/include/linux/crypto.h
66789index fd92988..a3164bd 100644
66790--- a/include/linux/crypto.h
66791+++ b/include/linux/crypto.h
66792@@ -394,7 +394,7 @@ struct cipher_tfm {
66793 const u8 *key, unsigned int keylen);
66794 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66795 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66796-};
66797+} __no_const;
66798
66799 struct hash_tfm {
66800 int (*init)(struct hash_desc *desc);
66801@@ -415,13 +415,13 @@ struct compress_tfm {
66802 int (*cot_decompress)(struct crypto_tfm *tfm,
66803 const u8 *src, unsigned int slen,
66804 u8 *dst, unsigned int *dlen);
66805-};
66806+} __no_const;
66807
66808 struct rng_tfm {
66809 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
66810 unsigned int dlen);
66811 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
66812-};
66813+} __no_const;
66814
66815 #define crt_ablkcipher crt_u.ablkcipher
66816 #define crt_aead crt_u.aead
66817diff --git a/include/linux/dcache.h b/include/linux/dcache.h
66818index 30b93b2..cd7a8db 100644
66819--- a/include/linux/dcache.h
66820+++ b/include/linux/dcache.h
66821@@ -119,6 +119,8 @@ struct dentry {
66822 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
66823 };
66824
66825+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66826+
66827 /*
66828 * dentry->d_lock spinlock nesting subclasses:
66829 *
66830diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
66831index 3e9bd6a..f4e1aa0 100644
66832--- a/include/linux/decompress/mm.h
66833+++ b/include/linux/decompress/mm.h
66834@@ -78,7 +78,7 @@ static void free(void *where)
66835 * warnings when not needed (indeed large_malloc / large_free are not
66836 * needed by inflate */
66837
66838-#define malloc(a) kmalloc(a, GFP_KERNEL)
66839+#define malloc(a) kmalloc((a), GFP_KERNEL)
66840 #define free(a) kfree(a)
66841
66842 #define large_malloc(a) vmalloc(a)
66843diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
66844index 91b7618..92a93d32 100644
66845--- a/include/linux/dma-mapping.h
66846+++ b/include/linux/dma-mapping.h
66847@@ -16,51 +16,51 @@ enum dma_data_direction {
66848 };
66849
66850 struct dma_map_ops {
66851- void* (*alloc_coherent)(struct device *dev, size_t size,
66852+ void* (* const alloc_coherent)(struct device *dev, size_t size,
66853 dma_addr_t *dma_handle, gfp_t gfp);
66854- void (*free_coherent)(struct device *dev, size_t size,
66855+ void (* const free_coherent)(struct device *dev, size_t size,
66856 void *vaddr, dma_addr_t dma_handle);
66857- dma_addr_t (*map_page)(struct device *dev, struct page *page,
66858+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
66859 unsigned long offset, size_t size,
66860 enum dma_data_direction dir,
66861 struct dma_attrs *attrs);
66862- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
66863+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
66864 size_t size, enum dma_data_direction dir,
66865 struct dma_attrs *attrs);
66866- int (*map_sg)(struct device *dev, struct scatterlist *sg,
66867+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
66868 int nents, enum dma_data_direction dir,
66869 struct dma_attrs *attrs);
66870- void (*unmap_sg)(struct device *dev,
66871+ void (* const unmap_sg)(struct device *dev,
66872 struct scatterlist *sg, int nents,
66873 enum dma_data_direction dir,
66874 struct dma_attrs *attrs);
66875- void (*sync_single_for_cpu)(struct device *dev,
66876+ void (* const sync_single_for_cpu)(struct device *dev,
66877 dma_addr_t dma_handle, size_t size,
66878 enum dma_data_direction dir);
66879- void (*sync_single_for_device)(struct device *dev,
66880+ void (* const sync_single_for_device)(struct device *dev,
66881 dma_addr_t dma_handle, size_t size,
66882 enum dma_data_direction dir);
66883- void (*sync_single_range_for_cpu)(struct device *dev,
66884+ void (* const sync_single_range_for_cpu)(struct device *dev,
66885 dma_addr_t dma_handle,
66886 unsigned long offset,
66887 size_t size,
66888 enum dma_data_direction dir);
66889- void (*sync_single_range_for_device)(struct device *dev,
66890+ void (* const sync_single_range_for_device)(struct device *dev,
66891 dma_addr_t dma_handle,
66892 unsigned long offset,
66893 size_t size,
66894 enum dma_data_direction dir);
66895- void (*sync_sg_for_cpu)(struct device *dev,
66896+ void (* const sync_sg_for_cpu)(struct device *dev,
66897 struct scatterlist *sg, int nents,
66898 enum dma_data_direction dir);
66899- void (*sync_sg_for_device)(struct device *dev,
66900+ void (* const sync_sg_for_device)(struct device *dev,
66901 struct scatterlist *sg, int nents,
66902 enum dma_data_direction dir);
66903- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
66904- int (*dma_supported)(struct device *dev, u64 mask);
66905+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
66906+ int (* const dma_supported)(struct device *dev, u64 mask);
66907 int (*set_dma_mask)(struct device *dev, u64 mask);
66908 int is_phys;
66909-};
66910+} __do_const;
66911
66912 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66913
66914diff --git a/include/linux/dst.h b/include/linux/dst.h
66915index e26fed8..b976d9f 100644
66916--- a/include/linux/dst.h
66917+++ b/include/linux/dst.h
66918@@ -380,7 +380,7 @@ struct dst_node
66919 struct thread_pool *pool;
66920
66921 /* Transaction IDs live here */
66922- atomic_long_t gen;
66923+ atomic_long_unchecked_t gen;
66924
66925 /*
66926 * How frequently and how many times transaction
66927diff --git a/include/linux/elf.h b/include/linux/elf.h
66928index 90a4ed0..d652617 100644
66929--- a/include/linux/elf.h
66930+++ b/include/linux/elf.h
66931@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
66932 #define PT_GNU_EH_FRAME 0x6474e550
66933
66934 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
66935+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
66936+
66937+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
66938+
66939+/* Constants for the e_flags field */
66940+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66941+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
66942+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
66943+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
66944+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66945+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66946
66947 /* These constants define the different elf file types */
66948 #define ET_NONE 0
66949@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
66950 #define DT_DEBUG 21
66951 #define DT_TEXTREL 22
66952 #define DT_JMPREL 23
66953+#define DT_FLAGS 30
66954+ #define DF_TEXTREL 0x00000004
66955 #define DT_ENCODING 32
66956 #define OLD_DT_LOOS 0x60000000
66957 #define DT_LOOS 0x6000000d
66958@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
66959 #define PF_W 0x2
66960 #define PF_X 0x1
66961
66962+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
66963+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
66964+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
66965+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
66966+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
66967+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
66968+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
66969+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
66970+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
66971+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
66972+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
66973+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
66974+
66975 typedef struct elf32_phdr{
66976 Elf32_Word p_type;
66977 Elf32_Off p_offset;
66978@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
66979 #define EI_OSABI 7
66980 #define EI_PAD 8
66981
66982+#define EI_PAX 14
66983+
66984 #define ELFMAG0 0x7f /* EI_MAG */
66985 #define ELFMAG1 'E'
66986 #define ELFMAG2 'L'
66987@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
66988 #define elf_phdr elf32_phdr
66989 #define elf_note elf32_note
66990 #define elf_addr_t Elf32_Off
66991+#define elf_dyn Elf32_Dyn
66992
66993 #else
66994
66995@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
66996 #define elf_phdr elf64_phdr
66997 #define elf_note elf64_note
66998 #define elf_addr_t Elf64_Off
66999+#define elf_dyn Elf64_Dyn
67000
67001 #endif
67002
67003diff --git a/include/linux/fs.h b/include/linux/fs.h
67004index 1b9a47a..6fe2934 100644
67005--- a/include/linux/fs.h
67006+++ b/include/linux/fs.h
67007@@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
67008 unsigned long, unsigned long);
67009
67010 struct address_space_operations {
67011- int (*writepage)(struct page *page, struct writeback_control *wbc);
67012- int (*readpage)(struct file *, struct page *);
67013- void (*sync_page)(struct page *);
67014+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
67015+ int (* const readpage)(struct file *, struct page *);
67016+ void (* const sync_page)(struct page *);
67017
67018 /* Write back some dirty pages from this mapping. */
67019- int (*writepages)(struct address_space *, struct writeback_control *);
67020+ int (* const writepages)(struct address_space *, struct writeback_control *);
67021
67022 /* Set a page dirty. Return true if this dirtied it */
67023- int (*set_page_dirty)(struct page *page);
67024+ int (* const set_page_dirty)(struct page *page);
67025
67026- int (*readpages)(struct file *filp, struct address_space *mapping,
67027+ int (* const readpages)(struct file *filp, struct address_space *mapping,
67028 struct list_head *pages, unsigned nr_pages);
67029
67030- int (*write_begin)(struct file *, struct address_space *mapping,
67031+ int (* const write_begin)(struct file *, struct address_space *mapping,
67032 loff_t pos, unsigned len, unsigned flags,
67033 struct page **pagep, void **fsdata);
67034- int (*write_end)(struct file *, struct address_space *mapping,
67035+ int (* const write_end)(struct file *, struct address_space *mapping,
67036 loff_t pos, unsigned len, unsigned copied,
67037 struct page *page, void *fsdata);
67038
67039 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
67040- sector_t (*bmap)(struct address_space *, sector_t);
67041- void (*invalidatepage) (struct page *, unsigned long);
67042- int (*releasepage) (struct page *, gfp_t);
67043- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
67044+ sector_t (* const bmap)(struct address_space *, sector_t);
67045+ void (* const invalidatepage) (struct page *, unsigned long);
67046+ int (* const releasepage) (struct page *, gfp_t);
67047+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
67048 loff_t offset, unsigned long nr_segs);
67049- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
67050+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
67051 void **, unsigned long *);
67052 /* migrate the contents of a page to the specified target */
67053- int (*migratepage) (struct address_space *,
67054+ int (* const migratepage) (struct address_space *,
67055 struct page *, struct page *);
67056- int (*launder_page) (struct page *);
67057- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
67058+ int (* const launder_page) (struct page *);
67059+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
67060 unsigned long);
67061- int (*error_remove_page)(struct address_space *, struct page *);
67062+ int (* const error_remove_page)(struct address_space *, struct page *);
67063 };
67064
67065 /*
67066@@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
67067 typedef struct files_struct *fl_owner_t;
67068
67069 struct file_lock_operations {
67070- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
67071- void (*fl_release_private)(struct file_lock *);
67072+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
67073+ void (* const fl_release_private)(struct file_lock *);
67074 };
67075
67076 struct lock_manager_operations {
67077- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
67078- void (*fl_notify)(struct file_lock *); /* unblock callback */
67079- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
67080- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
67081- void (*fl_release_private)(struct file_lock *);
67082- void (*fl_break)(struct file_lock *);
67083- int (*fl_mylease)(struct file_lock *, struct file_lock *);
67084- int (*fl_change)(struct file_lock **, int);
67085+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
67086+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
67087+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
67088+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
67089+ void (* const fl_release_private)(struct file_lock *);
67090+ void (* const fl_break)(struct file_lock *);
67091+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
67092+ int (* const fl_change)(struct file_lock **, int);
67093 };
67094
67095 struct lock_manager {
67096@@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
67097 unsigned int fi_flags; /* Flags as passed from user */
67098 unsigned int fi_extents_mapped; /* Number of mapped extents */
67099 unsigned int fi_extents_max; /* Size of fiemap_extent array */
67100- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
67101+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
67102 * array */
67103 };
67104 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
67105@@ -1512,7 +1512,8 @@ struct file_operations {
67106 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
67107 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
67108 int (*setlease)(struct file *, long, struct file_lock **);
67109-};
67110+} __do_const;
67111+typedef struct file_operations __no_const file_operations_no_const;
67112
67113 struct inode_operations {
67114 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
67115@@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
67116 unsigned long, loff_t *);
67117
67118 struct super_operations {
67119- struct inode *(*alloc_inode)(struct super_block *sb);
67120- void (*destroy_inode)(struct inode *);
67121+ struct inode *(* const alloc_inode)(struct super_block *sb);
67122+ void (* const destroy_inode)(struct inode *);
67123
67124- void (*dirty_inode) (struct inode *);
67125- int (*write_inode) (struct inode *, int);
67126- void (*drop_inode) (struct inode *);
67127- void (*delete_inode) (struct inode *);
67128- void (*put_super) (struct super_block *);
67129- void (*write_super) (struct super_block *);
67130- int (*sync_fs)(struct super_block *sb, int wait);
67131- int (*freeze_fs) (struct super_block *);
67132- int (*unfreeze_fs) (struct super_block *);
67133- int (*statfs) (struct dentry *, struct kstatfs *);
67134- int (*remount_fs) (struct super_block *, int *, char *);
67135- void (*clear_inode) (struct inode *);
67136- void (*umount_begin) (struct super_block *);
67137+ void (* const dirty_inode) (struct inode *);
67138+ int (* const write_inode) (struct inode *, int);
67139+ void (* const drop_inode) (struct inode *);
67140+ void (* const delete_inode) (struct inode *);
67141+ void (* const put_super) (struct super_block *);
67142+ void (* const write_super) (struct super_block *);
67143+ int (* const sync_fs)(struct super_block *sb, int wait);
67144+ int (* const freeze_fs) (struct super_block *);
67145+ int (* const unfreeze_fs) (struct super_block *);
67146+ int (* const statfs) (struct dentry *, struct kstatfs *);
67147+ int (* const remount_fs) (struct super_block *, int *, char *);
67148+ void (* const clear_inode) (struct inode *);
67149+ void (* const umount_begin) (struct super_block *);
67150
67151- int (*show_options)(struct seq_file *, struct vfsmount *);
67152- int (*show_stats)(struct seq_file *, struct vfsmount *);
67153+ int (* const show_options)(struct seq_file *, struct vfsmount *);
67154+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
67155 #ifdef CONFIG_QUOTA
67156- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
67157- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
67158+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
67159+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
67160 #endif
67161- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
67162+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
67163 };
67164
67165 /*
67166diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
67167index 78a05bf..2a7d3e1 100644
67168--- a/include/linux/fs_struct.h
67169+++ b/include/linux/fs_struct.h
67170@@ -4,7 +4,7 @@
67171 #include <linux/path.h>
67172
67173 struct fs_struct {
67174- int users;
67175+ atomic_t users;
67176 rwlock_t lock;
67177 int umask;
67178 int in_exec;
67179diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
67180index 7be0c6f..2f63a2b 100644
67181--- a/include/linux/fscache-cache.h
67182+++ b/include/linux/fscache-cache.h
67183@@ -116,7 +116,7 @@ struct fscache_operation {
67184 #endif
67185 };
67186
67187-extern atomic_t fscache_op_debug_id;
67188+extern atomic_unchecked_t fscache_op_debug_id;
67189 extern const struct slow_work_ops fscache_op_slow_work_ops;
67190
67191 extern void fscache_enqueue_operation(struct fscache_operation *);
67192@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
67193 fscache_operation_release_t release)
67194 {
67195 atomic_set(&op->usage, 1);
67196- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
67197+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
67198 op->release = release;
67199 INIT_LIST_HEAD(&op->pend_link);
67200 fscache_set_op_state(op, "Init");
67201diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
67202index 4d6f47b..00bcedb 100644
67203--- a/include/linux/fsnotify_backend.h
67204+++ b/include/linux/fsnotify_backend.h
67205@@ -86,6 +86,7 @@ struct fsnotify_ops {
67206 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
67207 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
67208 };
67209+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
67210
67211 /*
67212 * A group is a "thing" that wants to receive notification about filesystem
67213diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
67214index 4ec5e67..42f1eb9 100644
67215--- a/include/linux/ftrace_event.h
67216+++ b/include/linux/ftrace_event.h
67217@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
67218 int filter_type);
67219 extern int trace_define_common_fields(struct ftrace_event_call *call);
67220
67221-#define is_signed_type(type) (((type)(-1)) < 0)
67222+#define is_signed_type(type) (((type)(-1)) < (type)1)
67223
67224 int trace_set_clr_event(const char *system, const char *event, int set);
67225
67226diff --git a/include/linux/genhd.h b/include/linux/genhd.h
67227index 297df45..b6a74ff 100644
67228--- a/include/linux/genhd.h
67229+++ b/include/linux/genhd.h
67230@@ -161,7 +161,7 @@ struct gendisk {
67231
67232 struct timer_rand_state *random;
67233
67234- atomic_t sync_io; /* RAID */
67235+ atomic_unchecked_t sync_io; /* RAID */
67236 struct work_struct async_notify;
67237 #ifdef CONFIG_BLK_DEV_INTEGRITY
67238 struct blk_integrity *integrity;
67239diff --git a/include/linux/gracl.h b/include/linux/gracl.h
67240new file mode 100644
67241index 0000000..af663cf
67242--- /dev/null
67243+++ b/include/linux/gracl.h
67244@@ -0,0 +1,319 @@
67245+#ifndef GR_ACL_H
67246+#define GR_ACL_H
67247+
67248+#include <linux/grdefs.h>
67249+#include <linux/resource.h>
67250+#include <linux/capability.h>
67251+#include <linux/dcache.h>
67252+#include <asm/resource.h>
67253+
67254+/* Major status information */
67255+
67256+#define GR_VERSION "grsecurity 2.9"
67257+#define GRSECURITY_VERSION 0x2900
67258+
67259+enum {
67260+ GR_SHUTDOWN = 0,
67261+ GR_ENABLE = 1,
67262+ GR_SPROLE = 2,
67263+ GR_RELOAD = 3,
67264+ GR_SEGVMOD = 4,
67265+ GR_STATUS = 5,
67266+ GR_UNSPROLE = 6,
67267+ GR_PASSSET = 7,
67268+ GR_SPROLEPAM = 8,
67269+};
67270+
67271+/* Password setup definitions
67272+ * kernel/grhash.c */
67273+enum {
67274+ GR_PW_LEN = 128,
67275+ GR_SALT_LEN = 16,
67276+ GR_SHA_LEN = 32,
67277+};
67278+
67279+enum {
67280+ GR_SPROLE_LEN = 64,
67281+};
67282+
67283+enum {
67284+ GR_NO_GLOB = 0,
67285+ GR_REG_GLOB,
67286+ GR_CREATE_GLOB
67287+};
67288+
67289+#define GR_NLIMITS 32
67290+
67291+/* Begin Data Structures */
67292+
67293+struct sprole_pw {
67294+ unsigned char *rolename;
67295+ unsigned char salt[GR_SALT_LEN];
67296+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
67297+};
67298+
67299+struct name_entry {
67300+ __u32 key;
67301+ ino_t inode;
67302+ dev_t device;
67303+ char *name;
67304+ __u16 len;
67305+ __u8 deleted;
67306+ struct name_entry *prev;
67307+ struct name_entry *next;
67308+};
67309+
67310+struct inodev_entry {
67311+ struct name_entry *nentry;
67312+ struct inodev_entry *prev;
67313+ struct inodev_entry *next;
67314+};
67315+
67316+struct acl_role_db {
67317+ struct acl_role_label **r_hash;
67318+ __u32 r_size;
67319+};
67320+
67321+struct inodev_db {
67322+ struct inodev_entry **i_hash;
67323+ __u32 i_size;
67324+};
67325+
67326+struct name_db {
67327+ struct name_entry **n_hash;
67328+ __u32 n_size;
67329+};
67330+
67331+struct crash_uid {
67332+ uid_t uid;
67333+ unsigned long expires;
67334+};
67335+
67336+struct gr_hash_struct {
67337+ void **table;
67338+ void **nametable;
67339+ void *first;
67340+ __u32 table_size;
67341+ __u32 used_size;
67342+ int type;
67343+};
67344+
67345+/* Userspace Grsecurity ACL data structures */
67346+
67347+struct acl_subject_label {
67348+ char *filename;
67349+ ino_t inode;
67350+ dev_t device;
67351+ __u32 mode;
67352+ kernel_cap_t cap_mask;
67353+ kernel_cap_t cap_lower;
67354+ kernel_cap_t cap_invert_audit;
67355+
67356+ struct rlimit res[GR_NLIMITS];
67357+ __u32 resmask;
67358+
67359+ __u8 user_trans_type;
67360+ __u8 group_trans_type;
67361+ uid_t *user_transitions;
67362+ gid_t *group_transitions;
67363+ __u16 user_trans_num;
67364+ __u16 group_trans_num;
67365+
67366+ __u32 sock_families[2];
67367+ __u32 ip_proto[8];
67368+ __u32 ip_type;
67369+ struct acl_ip_label **ips;
67370+ __u32 ip_num;
67371+ __u32 inaddr_any_override;
67372+
67373+ __u32 crashes;
67374+ unsigned long expires;
67375+
67376+ struct acl_subject_label *parent_subject;
67377+ struct gr_hash_struct *hash;
67378+ struct acl_subject_label *prev;
67379+ struct acl_subject_label *next;
67380+
67381+ struct acl_object_label **obj_hash;
67382+ __u32 obj_hash_size;
67383+ __u16 pax_flags;
67384+};
67385+
67386+struct role_allowed_ip {
67387+ __u32 addr;
67388+ __u32 netmask;
67389+
67390+ struct role_allowed_ip *prev;
67391+ struct role_allowed_ip *next;
67392+};
67393+
67394+struct role_transition {
67395+ char *rolename;
67396+
67397+ struct role_transition *prev;
67398+ struct role_transition *next;
67399+};
67400+
67401+struct acl_role_label {
67402+ char *rolename;
67403+ uid_t uidgid;
67404+ __u16 roletype;
67405+
67406+ __u16 auth_attempts;
67407+ unsigned long expires;
67408+
67409+ struct acl_subject_label *root_label;
67410+ struct gr_hash_struct *hash;
67411+
67412+ struct acl_role_label *prev;
67413+ struct acl_role_label *next;
67414+
67415+ struct role_transition *transitions;
67416+ struct role_allowed_ip *allowed_ips;
67417+ uid_t *domain_children;
67418+ __u16 domain_child_num;
67419+
67420+ mode_t umask;
67421+
67422+ struct acl_subject_label **subj_hash;
67423+ __u32 subj_hash_size;
67424+};
67425+
67426+struct user_acl_role_db {
67427+ struct acl_role_label **r_table;
67428+ __u32 num_pointers; /* Number of allocations to track */
67429+ __u32 num_roles; /* Number of roles */
67430+ __u32 num_domain_children; /* Number of domain children */
67431+ __u32 num_subjects; /* Number of subjects */
67432+ __u32 num_objects; /* Number of objects */
67433+};
67434+
67435+struct acl_object_label {
67436+ char *filename;
67437+ ino_t inode;
67438+ dev_t device;
67439+ __u32 mode;
67440+
67441+ struct acl_subject_label *nested;
67442+ struct acl_object_label *globbed;
67443+
67444+ /* next two structures not used */
67445+
67446+ struct acl_object_label *prev;
67447+ struct acl_object_label *next;
67448+};
67449+
67450+struct acl_ip_label {
67451+ char *iface;
67452+ __u32 addr;
67453+ __u32 netmask;
67454+ __u16 low, high;
67455+ __u8 mode;
67456+ __u32 type;
67457+ __u32 proto[8];
67458+
67459+ /* next two structures not used */
67460+
67461+ struct acl_ip_label *prev;
67462+ struct acl_ip_label *next;
67463+};
67464+
67465+struct gr_arg {
67466+ struct user_acl_role_db role_db;
67467+ unsigned char pw[GR_PW_LEN];
67468+ unsigned char salt[GR_SALT_LEN];
67469+ unsigned char sum[GR_SHA_LEN];
67470+ unsigned char sp_role[GR_SPROLE_LEN];
67471+ struct sprole_pw *sprole_pws;
67472+ dev_t segv_device;
67473+ ino_t segv_inode;
67474+ uid_t segv_uid;
67475+ __u16 num_sprole_pws;
67476+ __u16 mode;
67477+};
67478+
67479+struct gr_arg_wrapper {
67480+ struct gr_arg *arg;
67481+ __u32 version;
67482+ __u32 size;
67483+};
67484+
67485+struct subject_map {
67486+ struct acl_subject_label *user;
67487+ struct acl_subject_label *kernel;
67488+ struct subject_map *prev;
67489+ struct subject_map *next;
67490+};
67491+
67492+struct acl_subj_map_db {
67493+ struct subject_map **s_hash;
67494+ __u32 s_size;
67495+};
67496+
67497+/* End Data Structures Section */
67498+
67499+/* Hash functions generated by empirical testing by Brad Spengler
67500+ Makes good use of the low bits of the inode. Generally 0-1 times
67501+ in loop for successful match. 0-3 for unsuccessful match.
67502+ Shift/add algorithm with modulus of table size and an XOR*/
67503+
67504+static __inline__ unsigned int
67505+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
67506+{
67507+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
67508+}
67509+
67510+ static __inline__ unsigned int
67511+shash(const struct acl_subject_label *userp, const unsigned int sz)
67512+{
67513+ return ((const unsigned long)userp % sz);
67514+}
67515+
67516+static __inline__ unsigned int
67517+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
67518+{
67519+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
67520+}
67521+
67522+static __inline__ unsigned int
67523+nhash(const char *name, const __u16 len, const unsigned int sz)
67524+{
67525+ return full_name_hash((const unsigned char *)name, len) % sz;
67526+}
67527+
67528+#define FOR_EACH_ROLE_START(role) \
67529+ role = role_list; \
67530+ while (role) {
67531+
67532+#define FOR_EACH_ROLE_END(role) \
67533+ role = role->prev; \
67534+ }
67535+
67536+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
67537+ subj = NULL; \
67538+ iter = 0; \
67539+ while (iter < role->subj_hash_size) { \
67540+ if (subj == NULL) \
67541+ subj = role->subj_hash[iter]; \
67542+ if (subj == NULL) { \
67543+ iter++; \
67544+ continue; \
67545+ }
67546+
67547+#define FOR_EACH_SUBJECT_END(subj,iter) \
67548+ subj = subj->next; \
67549+ if (subj == NULL) \
67550+ iter++; \
67551+ }
67552+
67553+
67554+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
67555+ subj = role->hash->first; \
67556+ while (subj != NULL) {
67557+
67558+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
67559+ subj = subj->next; \
67560+ }
67561+
67562+#endif
67563+
67564diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
67565new file mode 100644
67566index 0000000..323ecf2
67567--- /dev/null
67568+++ b/include/linux/gralloc.h
67569@@ -0,0 +1,9 @@
67570+#ifndef __GRALLOC_H
67571+#define __GRALLOC_H
67572+
67573+void acl_free_all(void);
67574+int acl_alloc_stack_init(unsigned long size);
67575+void *acl_alloc(unsigned long len);
67576+void *acl_alloc_num(unsigned long num, unsigned long len);
67577+
67578+#endif
67579diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
67580new file mode 100644
67581index 0000000..70d6cd5
67582--- /dev/null
67583+++ b/include/linux/grdefs.h
67584@@ -0,0 +1,140 @@
67585+#ifndef GRDEFS_H
67586+#define GRDEFS_H
67587+
67588+/* Begin grsecurity status declarations */
67589+
67590+enum {
67591+ GR_READY = 0x01,
67592+ GR_STATUS_INIT = 0x00 // disabled state
67593+};
67594+
67595+/* Begin ACL declarations */
67596+
67597+/* Role flags */
67598+
67599+enum {
67600+ GR_ROLE_USER = 0x0001,
67601+ GR_ROLE_GROUP = 0x0002,
67602+ GR_ROLE_DEFAULT = 0x0004,
67603+ GR_ROLE_SPECIAL = 0x0008,
67604+ GR_ROLE_AUTH = 0x0010,
67605+ GR_ROLE_NOPW = 0x0020,
67606+ GR_ROLE_GOD = 0x0040,
67607+ GR_ROLE_LEARN = 0x0080,
67608+ GR_ROLE_TPE = 0x0100,
67609+ GR_ROLE_DOMAIN = 0x0200,
67610+ GR_ROLE_PAM = 0x0400,
67611+ GR_ROLE_PERSIST = 0x800
67612+};
67613+
67614+/* ACL Subject and Object mode flags */
67615+enum {
67616+ GR_DELETED = 0x80000000
67617+};
67618+
67619+/* ACL Object-only mode flags */
67620+enum {
67621+ GR_READ = 0x00000001,
67622+ GR_APPEND = 0x00000002,
67623+ GR_WRITE = 0x00000004,
67624+ GR_EXEC = 0x00000008,
67625+ GR_FIND = 0x00000010,
67626+ GR_INHERIT = 0x00000020,
67627+ GR_SETID = 0x00000040,
67628+ GR_CREATE = 0x00000080,
67629+ GR_DELETE = 0x00000100,
67630+ GR_LINK = 0x00000200,
67631+ GR_AUDIT_READ = 0x00000400,
67632+ GR_AUDIT_APPEND = 0x00000800,
67633+ GR_AUDIT_WRITE = 0x00001000,
67634+ GR_AUDIT_EXEC = 0x00002000,
67635+ GR_AUDIT_FIND = 0x00004000,
67636+ GR_AUDIT_INHERIT= 0x00008000,
67637+ GR_AUDIT_SETID = 0x00010000,
67638+ GR_AUDIT_CREATE = 0x00020000,
67639+ GR_AUDIT_DELETE = 0x00040000,
67640+ GR_AUDIT_LINK = 0x00080000,
67641+ GR_PTRACERD = 0x00100000,
67642+ GR_NOPTRACE = 0x00200000,
67643+ GR_SUPPRESS = 0x00400000,
67644+ GR_NOLEARN = 0x00800000,
67645+ GR_INIT_TRANSFER= 0x01000000
67646+};
67647+
67648+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
67649+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
67650+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
67651+
67652+/* ACL subject-only mode flags */
67653+enum {
67654+ GR_KILL = 0x00000001,
67655+ GR_VIEW = 0x00000002,
67656+ GR_PROTECTED = 0x00000004,
67657+ GR_LEARN = 0x00000008,
67658+ GR_OVERRIDE = 0x00000010,
67659+ /* just a placeholder, this mode is only used in userspace */
67660+ GR_DUMMY = 0x00000020,
67661+ GR_PROTSHM = 0x00000040,
67662+ GR_KILLPROC = 0x00000080,
67663+ GR_KILLIPPROC = 0x00000100,
67664+ /* just a placeholder, this mode is only used in userspace */
67665+ GR_NOTROJAN = 0x00000200,
67666+ GR_PROTPROCFD = 0x00000400,
67667+ GR_PROCACCT = 0x00000800,
67668+ GR_RELAXPTRACE = 0x00001000,
67669+ GR_NESTED = 0x00002000,
67670+ GR_INHERITLEARN = 0x00004000,
67671+ GR_PROCFIND = 0x00008000,
67672+ GR_POVERRIDE = 0x00010000,
67673+ GR_KERNELAUTH = 0x00020000,
67674+ GR_ATSECURE = 0x00040000,
67675+ GR_SHMEXEC = 0x00080000
67676+};
67677+
67678+enum {
67679+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
67680+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
67681+ GR_PAX_ENABLE_MPROTECT = 0x0004,
67682+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
67683+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
67684+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
67685+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
67686+ GR_PAX_DISABLE_MPROTECT = 0x0400,
67687+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
67688+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
67689+};
67690+
67691+enum {
67692+ GR_ID_USER = 0x01,
67693+ GR_ID_GROUP = 0x02,
67694+};
67695+
67696+enum {
67697+ GR_ID_ALLOW = 0x01,
67698+ GR_ID_DENY = 0x02,
67699+};
67700+
67701+#define GR_CRASH_RES 31
67702+#define GR_UIDTABLE_MAX 500
67703+
67704+/* begin resource learning section */
67705+enum {
67706+ GR_RLIM_CPU_BUMP = 60,
67707+ GR_RLIM_FSIZE_BUMP = 50000,
67708+ GR_RLIM_DATA_BUMP = 10000,
67709+ GR_RLIM_STACK_BUMP = 1000,
67710+ GR_RLIM_CORE_BUMP = 10000,
67711+ GR_RLIM_RSS_BUMP = 500000,
67712+ GR_RLIM_NPROC_BUMP = 1,
67713+ GR_RLIM_NOFILE_BUMP = 5,
67714+ GR_RLIM_MEMLOCK_BUMP = 50000,
67715+ GR_RLIM_AS_BUMP = 500000,
67716+ GR_RLIM_LOCKS_BUMP = 2,
67717+ GR_RLIM_SIGPENDING_BUMP = 5,
67718+ GR_RLIM_MSGQUEUE_BUMP = 10000,
67719+ GR_RLIM_NICE_BUMP = 1,
67720+ GR_RLIM_RTPRIO_BUMP = 1,
67721+ GR_RLIM_RTTIME_BUMP = 1000000
67722+};
67723+
67724+#endif
67725diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
67726new file mode 100644
67727index 0000000..3826b91
67728--- /dev/null
67729+++ b/include/linux/grinternal.h
67730@@ -0,0 +1,219 @@
67731+#ifndef __GRINTERNAL_H
67732+#define __GRINTERNAL_H
67733+
67734+#ifdef CONFIG_GRKERNSEC
67735+
67736+#include <linux/fs.h>
67737+#include <linux/mnt_namespace.h>
67738+#include <linux/nsproxy.h>
67739+#include <linux/gracl.h>
67740+#include <linux/grdefs.h>
67741+#include <linux/grmsg.h>
67742+
67743+void gr_add_learn_entry(const char *fmt, ...)
67744+ __attribute__ ((format (printf, 1, 2)));
67745+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
67746+ const struct vfsmount *mnt);
67747+__u32 gr_check_create(const struct dentry *new_dentry,
67748+ const struct dentry *parent,
67749+ const struct vfsmount *mnt, const __u32 mode);
67750+int gr_check_protected_task(const struct task_struct *task);
67751+__u32 to_gr_audit(const __u32 reqmode);
67752+int gr_set_acls(const int type);
67753+int gr_apply_subject_to_task(struct task_struct *task);
67754+int gr_acl_is_enabled(void);
67755+char gr_roletype_to_char(void);
67756+
67757+void gr_handle_alertkill(struct task_struct *task);
67758+char *gr_to_filename(const struct dentry *dentry,
67759+ const struct vfsmount *mnt);
67760+char *gr_to_filename1(const struct dentry *dentry,
67761+ const struct vfsmount *mnt);
67762+char *gr_to_filename2(const struct dentry *dentry,
67763+ const struct vfsmount *mnt);
67764+char *gr_to_filename3(const struct dentry *dentry,
67765+ const struct vfsmount *mnt);
67766+
67767+extern int grsec_enable_ptrace_readexec;
67768+extern int grsec_enable_harden_ptrace;
67769+extern int grsec_enable_link;
67770+extern int grsec_enable_fifo;
67771+extern int grsec_enable_shm;
67772+extern int grsec_enable_execlog;
67773+extern int grsec_enable_signal;
67774+extern int grsec_enable_audit_ptrace;
67775+extern int grsec_enable_forkfail;
67776+extern int grsec_enable_time;
67777+extern int grsec_enable_rofs;
67778+extern int grsec_enable_chroot_shmat;
67779+extern int grsec_enable_chroot_mount;
67780+extern int grsec_enable_chroot_double;
67781+extern int grsec_enable_chroot_pivot;
67782+extern int grsec_enable_chroot_chdir;
67783+extern int grsec_enable_chroot_chmod;
67784+extern int grsec_enable_chroot_mknod;
67785+extern int grsec_enable_chroot_fchdir;
67786+extern int grsec_enable_chroot_nice;
67787+extern int grsec_enable_chroot_execlog;
67788+extern int grsec_enable_chroot_caps;
67789+extern int grsec_enable_chroot_sysctl;
67790+extern int grsec_enable_chroot_unix;
67791+extern int grsec_enable_tpe;
67792+extern int grsec_tpe_gid;
67793+extern int grsec_enable_tpe_all;
67794+extern int grsec_enable_tpe_invert;
67795+extern int grsec_enable_socket_all;
67796+extern int grsec_socket_all_gid;
67797+extern int grsec_enable_socket_client;
67798+extern int grsec_socket_client_gid;
67799+extern int grsec_enable_socket_server;
67800+extern int grsec_socket_server_gid;
67801+extern int grsec_audit_gid;
67802+extern int grsec_enable_group;
67803+extern int grsec_enable_audit_textrel;
67804+extern int grsec_enable_log_rwxmaps;
67805+extern int grsec_enable_mount;
67806+extern int grsec_enable_chdir;
67807+extern int grsec_resource_logging;
67808+extern int grsec_enable_blackhole;
67809+extern int grsec_lastack_retries;
67810+extern int grsec_enable_brute;
67811+extern int grsec_lock;
67812+
67813+extern spinlock_t grsec_alert_lock;
67814+extern unsigned long grsec_alert_wtime;
67815+extern unsigned long grsec_alert_fyet;
67816+
67817+extern spinlock_t grsec_audit_lock;
67818+
67819+extern rwlock_t grsec_exec_file_lock;
67820+
67821+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
67822+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
67823+ (tsk)->exec_file->f_vfsmnt) : "/")
67824+
67825+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
67826+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
67827+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67828+
67829+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
67830+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
67831+ (tsk)->exec_file->f_vfsmnt) : "/")
67832+
67833+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
67834+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
67835+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67836+
67837+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
67838+
67839+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
67840+
67841+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
67842+ (task)->pid, (cred)->uid, \
67843+ (cred)->euid, (cred)->gid, (cred)->egid, \
67844+ gr_parent_task_fullpath(task), \
67845+ (task)->real_parent->comm, (task)->real_parent->pid, \
67846+ (pcred)->uid, (pcred)->euid, \
67847+ (pcred)->gid, (pcred)->egid
67848+
67849+#define GR_CHROOT_CAPS {{ \
67850+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
67851+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
67852+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
67853+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
67854+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
67855+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
67856+ CAP_TO_MASK(CAP_MAC_ADMIN) }}
67857+
67858+#define security_learn(normal_msg,args...) \
67859+({ \
67860+ read_lock(&grsec_exec_file_lock); \
67861+ gr_add_learn_entry(normal_msg "\n", ## args); \
67862+ read_unlock(&grsec_exec_file_lock); \
67863+})
67864+
67865+enum {
67866+ GR_DO_AUDIT,
67867+ GR_DONT_AUDIT,
67868+ GR_DONT_AUDIT_GOOD
67869+};
67870+
67871+enum {
67872+ GR_TTYSNIFF,
67873+ GR_RBAC,
67874+ GR_RBAC_STR,
67875+ GR_STR_RBAC,
67876+ GR_RBAC_MODE2,
67877+ GR_RBAC_MODE3,
67878+ GR_FILENAME,
67879+ GR_SYSCTL_HIDDEN,
67880+ GR_NOARGS,
67881+ GR_ONE_INT,
67882+ GR_ONE_INT_TWO_STR,
67883+ GR_ONE_STR,
67884+ GR_STR_INT,
67885+ GR_TWO_STR_INT,
67886+ GR_TWO_INT,
67887+ GR_TWO_U64,
67888+ GR_THREE_INT,
67889+ GR_FIVE_INT_TWO_STR,
67890+ GR_TWO_STR,
67891+ GR_THREE_STR,
67892+ GR_FOUR_STR,
67893+ GR_STR_FILENAME,
67894+ GR_FILENAME_STR,
67895+ GR_FILENAME_TWO_INT,
67896+ GR_FILENAME_TWO_INT_STR,
67897+ GR_TEXTREL,
67898+ GR_PTRACE,
67899+ GR_RESOURCE,
67900+ GR_CAP,
67901+ GR_SIG,
67902+ GR_SIG2,
67903+ GR_CRASH1,
67904+ GR_CRASH2,
67905+ GR_PSACCT,
67906+ GR_RWXMAP
67907+};
67908+
67909+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
67910+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
67911+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
67912+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
67913+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
67914+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
67915+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
67916+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
67917+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
67918+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
67919+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
67920+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
67921+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
67922+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
67923+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
67924+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
67925+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
67926+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
67927+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
67928+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
67929+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
67930+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
67931+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
67932+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
67933+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
67934+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
67935+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
67936+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
67937+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
67938+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
67939+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
67940+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
67941+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
67942+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
67943+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
67944+
67945+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
67946+
67947+#endif
67948+
67949+#endif
67950diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
67951new file mode 100644
67952index 0000000..f885406
67953--- /dev/null
67954+++ b/include/linux/grmsg.h
67955@@ -0,0 +1,109 @@
67956+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
67957+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
67958+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
67959+#define GR_STOPMOD_MSG "denied modification of module state by "
67960+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
67961+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
67962+#define GR_IOPERM_MSG "denied use of ioperm() by "
67963+#define GR_IOPL_MSG "denied use of iopl() by "
67964+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
67965+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
67966+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
67967+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
67968+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
67969+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
67970+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
67971+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
67972+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
67973+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
67974+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
67975+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
67976+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
67977+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
67978+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
67979+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
67980+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
67981+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
67982+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
67983+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
67984+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
67985+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
67986+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
67987+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
67988+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
67989+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
67990+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
67991+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
67992+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
67993+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
67994+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
67995+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
67996+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
67997+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
67998+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
67999+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
68000+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
68001+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
68002+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
68003+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
68004+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
68005+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
68006+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
68007+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
68008+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
68009+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
68010+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
68011+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
68012+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
68013+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
68014+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
68015+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
68016+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
68017+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
68018+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
68019+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
68020+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
68021+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
68022+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
68023+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
68024+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
68025+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
68026+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
68027+#define GR_FAILFORK_MSG "failed fork with errno %s by "
68028+#define GR_NICE_CHROOT_MSG "denied priority change by "
68029+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
68030+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
68031+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
68032+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
68033+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
68034+#define GR_TIME_MSG "time set by "
68035+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
68036+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
68037+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
68038+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
68039+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
68040+#define GR_BIND_MSG "denied bind() by "
68041+#define GR_CONNECT_MSG "denied connect() by "
68042+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
68043+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
68044+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
68045+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
68046+#define GR_CAP_ACL_MSG "use of %s denied for "
68047+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
68048+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
68049+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
68050+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
68051+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
68052+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
68053+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
68054+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
68055+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
68056+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
68057+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
68058+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
68059+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
68060+#define GR_VM86_MSG "denied use of vm86 by "
68061+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
68062+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
68063+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
68064+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
68065diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
68066new file mode 100644
68067index 0000000..c1793ae
68068--- /dev/null
68069+++ b/include/linux/grsecurity.h
68070@@ -0,0 +1,219 @@
68071+#ifndef GR_SECURITY_H
68072+#define GR_SECURITY_H
68073+#include <linux/fs.h>
68074+#include <linux/fs_struct.h>
68075+#include <linux/binfmts.h>
68076+#include <linux/gracl.h>
68077+#include <linux/compat.h>
68078+
68079+/* notify of brain-dead configs */
68080+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68081+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
68082+#endif
68083+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
68084+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
68085+#endif
68086+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
68087+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
68088+#endif
68089+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
68090+#error "CONFIG_PAX enabled, but no PaX options are enabled."
68091+#endif
68092+
68093+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
68094+void gr_handle_brute_check(void);
68095+void gr_handle_kernel_exploit(void);
68096+int gr_process_user_ban(void);
68097+
68098+char gr_roletype_to_char(void);
68099+
68100+int gr_acl_enable_at_secure(void);
68101+
68102+int gr_check_user_change(int real, int effective, int fs);
68103+int gr_check_group_change(int real, int effective, int fs);
68104+
68105+void gr_del_task_from_ip_table(struct task_struct *p);
68106+
68107+int gr_pid_is_chrooted(struct task_struct *p);
68108+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
68109+int gr_handle_chroot_nice(void);
68110+int gr_handle_chroot_sysctl(const int op);
68111+int gr_handle_chroot_setpriority(struct task_struct *p,
68112+ const int niceval);
68113+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
68114+int gr_handle_chroot_chroot(const struct dentry *dentry,
68115+ const struct vfsmount *mnt);
68116+void gr_handle_chroot_chdir(struct path *path);
68117+int gr_handle_chroot_chmod(const struct dentry *dentry,
68118+ const struct vfsmount *mnt, const int mode);
68119+int gr_handle_chroot_mknod(const struct dentry *dentry,
68120+ const struct vfsmount *mnt, const int mode);
68121+int gr_handle_chroot_mount(const struct dentry *dentry,
68122+ const struct vfsmount *mnt,
68123+ const char *dev_name);
68124+int gr_handle_chroot_pivot(void);
68125+int gr_handle_chroot_unix(const pid_t pid);
68126+
68127+int gr_handle_rawio(const struct inode *inode);
68128+
68129+void gr_handle_ioperm(void);
68130+void gr_handle_iopl(void);
68131+
68132+umode_t gr_acl_umask(void);
68133+
68134+int gr_tpe_allow(const struct file *file);
68135+
68136+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
68137+void gr_clear_chroot_entries(struct task_struct *task);
68138+
68139+void gr_log_forkfail(const int retval);
68140+void gr_log_timechange(void);
68141+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
68142+void gr_log_chdir(const struct dentry *dentry,
68143+ const struct vfsmount *mnt);
68144+void gr_log_chroot_exec(const struct dentry *dentry,
68145+ const struct vfsmount *mnt);
68146+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
68147+#ifdef CONFIG_COMPAT
68148+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
68149+#endif
68150+void gr_log_remount(const char *devname, const int retval);
68151+void gr_log_unmount(const char *devname, const int retval);
68152+void gr_log_mount(const char *from, const char *to, const int retval);
68153+void gr_log_textrel(struct vm_area_struct *vma);
68154+void gr_log_rwxmmap(struct file *file);
68155+void gr_log_rwxmprotect(struct file *file);
68156+
68157+int gr_handle_follow_link(const struct inode *parent,
68158+ const struct inode *inode,
68159+ const struct dentry *dentry,
68160+ const struct vfsmount *mnt);
68161+int gr_handle_fifo(const struct dentry *dentry,
68162+ const struct vfsmount *mnt,
68163+ const struct dentry *dir, const int flag,
68164+ const int acc_mode);
68165+int gr_handle_hardlink(const struct dentry *dentry,
68166+ const struct vfsmount *mnt,
68167+ struct inode *inode,
68168+ const int mode, const char *to);
68169+
68170+int gr_is_capable(const int cap);
68171+int gr_is_capable_nolog(const int cap);
68172+void gr_learn_resource(const struct task_struct *task, const int limit,
68173+ const unsigned long wanted, const int gt);
68174+void gr_copy_label(struct task_struct *tsk);
68175+void gr_handle_crash(struct task_struct *task, const int sig);
68176+int gr_handle_signal(const struct task_struct *p, const int sig);
68177+int gr_check_crash_uid(const uid_t uid);
68178+int gr_check_protected_task(const struct task_struct *task);
68179+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
68180+int gr_acl_handle_mmap(const struct file *file,
68181+ const unsigned long prot);
68182+int gr_acl_handle_mprotect(const struct file *file,
68183+ const unsigned long prot);
68184+int gr_check_hidden_task(const struct task_struct *tsk);
68185+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
68186+ const struct vfsmount *mnt);
68187+__u32 gr_acl_handle_utime(const struct dentry *dentry,
68188+ const struct vfsmount *mnt);
68189+__u32 gr_acl_handle_access(const struct dentry *dentry,
68190+ const struct vfsmount *mnt, const int fmode);
68191+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
68192+ const struct vfsmount *mnt, umode_t *mode);
68193+__u32 gr_acl_handle_chown(const struct dentry *dentry,
68194+ const struct vfsmount *mnt);
68195+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
68196+ const struct vfsmount *mnt);
68197+int gr_handle_ptrace(struct task_struct *task, const long request);
68198+int gr_handle_proc_ptrace(struct task_struct *task);
68199+__u32 gr_acl_handle_execve(const struct dentry *dentry,
68200+ const struct vfsmount *mnt);
68201+int gr_check_crash_exec(const struct file *filp);
68202+int gr_acl_is_enabled(void);
68203+void gr_set_kernel_label(struct task_struct *task);
68204+void gr_set_role_label(struct task_struct *task, const uid_t uid,
68205+ const gid_t gid);
68206+int gr_set_proc_label(const struct dentry *dentry,
68207+ const struct vfsmount *mnt,
68208+ const int unsafe_flags);
68209+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
68210+ const struct vfsmount *mnt);
68211+__u32 gr_acl_handle_open(const struct dentry *dentry,
68212+ const struct vfsmount *mnt, int acc_mode);
68213+__u32 gr_acl_handle_creat(const struct dentry *dentry,
68214+ const struct dentry *p_dentry,
68215+ const struct vfsmount *p_mnt,
68216+ int open_flags, int acc_mode, const int imode);
68217+void gr_handle_create(const struct dentry *dentry,
68218+ const struct vfsmount *mnt);
68219+void gr_handle_proc_create(const struct dentry *dentry,
68220+ const struct inode *inode);
68221+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
68222+ const struct dentry *parent_dentry,
68223+ const struct vfsmount *parent_mnt,
68224+ const int mode);
68225+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
68226+ const struct dentry *parent_dentry,
68227+ const struct vfsmount *parent_mnt);
68228+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
68229+ const struct vfsmount *mnt);
68230+void gr_handle_delete(const ino_t ino, const dev_t dev);
68231+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
68232+ const struct vfsmount *mnt);
68233+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
68234+ const struct dentry *parent_dentry,
68235+ const struct vfsmount *parent_mnt,
68236+ const char *from);
68237+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
68238+ const struct dentry *parent_dentry,
68239+ const struct vfsmount *parent_mnt,
68240+ const struct dentry *old_dentry,
68241+ const struct vfsmount *old_mnt, const char *to);
68242+int gr_acl_handle_rename(struct dentry *new_dentry,
68243+ struct dentry *parent_dentry,
68244+ const struct vfsmount *parent_mnt,
68245+ struct dentry *old_dentry,
68246+ struct inode *old_parent_inode,
68247+ struct vfsmount *old_mnt, const char *newname);
68248+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
68249+ struct dentry *old_dentry,
68250+ struct dentry *new_dentry,
68251+ struct vfsmount *mnt, const __u8 replace);
68252+__u32 gr_check_link(const struct dentry *new_dentry,
68253+ const struct dentry *parent_dentry,
68254+ const struct vfsmount *parent_mnt,
68255+ const struct dentry *old_dentry,
68256+ const struct vfsmount *old_mnt);
68257+int gr_acl_handle_filldir(const struct file *file, const char *name,
68258+ const unsigned int namelen, const ino_t ino);
68259+
68260+__u32 gr_acl_handle_unix(const struct dentry *dentry,
68261+ const struct vfsmount *mnt);
68262+void gr_acl_handle_exit(void);
68263+void gr_acl_handle_psacct(struct task_struct *task, const long code);
68264+int gr_acl_handle_procpidmem(const struct task_struct *task);
68265+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
68266+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
68267+void gr_audit_ptrace(struct task_struct *task);
68268+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
68269+
68270+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
68271+
68272+#ifdef CONFIG_GRKERNSEC
68273+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
68274+void gr_handle_vm86(void);
68275+void gr_handle_mem_readwrite(u64 from, u64 to);
68276+
68277+void gr_log_badprocpid(const char *entry);
68278+
68279+extern int grsec_enable_dmesg;
68280+extern int grsec_disable_privio;
68281+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68282+extern int grsec_enable_chroot_findtask;
68283+#endif
68284+#ifdef CONFIG_GRKERNSEC_SETXID
68285+extern int grsec_enable_setxid;
68286+#endif
68287+#endif
68288+
68289+#endif
68290diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
68291index 6a87154..a3ce57b 100644
68292--- a/include/linux/hdpu_features.h
68293+++ b/include/linux/hdpu_features.h
68294@@ -3,7 +3,7 @@
68295 struct cpustate_t {
68296 spinlock_t lock;
68297 int excl;
68298- int open_count;
68299+ atomic_t open_count;
68300 unsigned char cached_val;
68301 int inited;
68302 unsigned long *set_addr;
68303diff --git a/include/linux/highmem.h b/include/linux/highmem.h
68304index 211ff44..00ab6d7 100644
68305--- a/include/linux/highmem.h
68306+++ b/include/linux/highmem.h
68307@@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
68308 kunmap_atomic(kaddr, KM_USER0);
68309 }
68310
68311+static inline void sanitize_highpage(struct page *page)
68312+{
68313+ void *kaddr;
68314+ unsigned long flags;
68315+
68316+ local_irq_save(flags);
68317+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
68318+ clear_page(kaddr);
68319+ kunmap_atomic(kaddr, KM_CLEARPAGE);
68320+ local_irq_restore(flags);
68321+}
68322+
68323 static inline void zero_user_segments(struct page *page,
68324 unsigned start1, unsigned end1,
68325 unsigned start2, unsigned end2)
68326diff --git a/include/linux/i2c.h b/include/linux/i2c.h
68327index 7b40cda..24eb44e 100644
68328--- a/include/linux/i2c.h
68329+++ b/include/linux/i2c.h
68330@@ -325,6 +325,7 @@ struct i2c_algorithm {
68331 /* To determine what the adapter supports */
68332 u32 (*functionality) (struct i2c_adapter *);
68333 };
68334+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
68335
68336 /*
68337 * i2c_adapter is the structure used to identify a physical i2c bus along
68338diff --git a/include/linux/i2o.h b/include/linux/i2o.h
68339index 4c4e57d..f3c5303 100644
68340--- a/include/linux/i2o.h
68341+++ b/include/linux/i2o.h
68342@@ -564,7 +564,7 @@ struct i2o_controller {
68343 struct i2o_device *exec; /* Executive */
68344 #if BITS_PER_LONG == 64
68345 spinlock_t context_list_lock; /* lock for context_list */
68346- atomic_t context_list_counter; /* needed for unique contexts */
68347+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
68348 struct list_head context_list; /* list of context id's
68349 and pointers */
68350 #endif
68351diff --git a/include/linux/init_task.h b/include/linux/init_task.h
68352index 21a6f5d..dc42eab 100644
68353--- a/include/linux/init_task.h
68354+++ b/include/linux/init_task.h
68355@@ -83,6 +83,12 @@ extern struct group_info init_groups;
68356 #define INIT_IDS
68357 #endif
68358
68359+#ifdef CONFIG_X86
68360+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
68361+#else
68362+#define INIT_TASK_THREAD_INFO
68363+#endif
68364+
68365 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
68366 /*
68367 * Because of the reduced scope of CAP_SETPCAP when filesystem
68368@@ -156,6 +162,7 @@ extern struct cred init_cred;
68369 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
68370 .comm = "swapper", \
68371 .thread = INIT_THREAD, \
68372+ INIT_TASK_THREAD_INFO \
68373 .fs = &init_fs, \
68374 .files = &init_files, \
68375 .signal = &init_signals, \
68376diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
68377index 4f0a72a..a849599 100644
68378--- a/include/linux/intel-iommu.h
68379+++ b/include/linux/intel-iommu.h
68380@@ -296,7 +296,7 @@ struct iommu_flush {
68381 u8 fm, u64 type);
68382 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
68383 unsigned int size_order, u64 type);
68384-};
68385+} __no_const;
68386
68387 enum {
68388 SR_DMAR_FECTL_REG,
68389diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
68390index c739150..be577b5 100644
68391--- a/include/linux/interrupt.h
68392+++ b/include/linux/interrupt.h
68393@@ -369,7 +369,7 @@ enum
68394 /* map softirq index to softirq name. update 'softirq_to_name' in
68395 * kernel/softirq.c when adding a new softirq.
68396 */
68397-extern char *softirq_to_name[NR_SOFTIRQS];
68398+extern const char * const softirq_to_name[NR_SOFTIRQS];
68399
68400 /* softirq mask and active fields moved to irq_cpustat_t in
68401 * asm/hardirq.h to get better cache usage. KAO
68402@@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
68403
68404 struct softirq_action
68405 {
68406- void (*action)(struct softirq_action *);
68407+ void (*action)(void);
68408 };
68409
68410 asmlinkage void do_softirq(void);
68411 asmlinkage void __do_softirq(void);
68412-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
68413+extern void open_softirq(int nr, void (*action)(void));
68414 extern void softirq_init(void);
68415 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
68416 extern void raise_softirq_irqoff(unsigned int nr);
68417diff --git a/include/linux/irq.h b/include/linux/irq.h
68418index 9e5f45a..025865b 100644
68419--- a/include/linux/irq.h
68420+++ b/include/linux/irq.h
68421@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
68422 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
68423 bool boot)
68424 {
68425+#ifdef CONFIG_CPUMASK_OFFSTACK
68426 gfp_t gfp = GFP_ATOMIC;
68427
68428 if (boot)
68429 gfp = GFP_NOWAIT;
68430
68431-#ifdef CONFIG_CPUMASK_OFFSTACK
68432 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
68433 return false;
68434
68435diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
68436index 7922742..27306a2 100644
68437--- a/include/linux/kallsyms.h
68438+++ b/include/linux/kallsyms.h
68439@@ -15,7 +15,8 @@
68440
68441 struct module;
68442
68443-#ifdef CONFIG_KALLSYMS
68444+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
68445+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
68446 /* Lookup the address for a symbol. Returns 0 if not found. */
68447 unsigned long kallsyms_lookup_name(const char *name);
68448
68449@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
68450 /* Stupid that this does nothing, but I didn't create this mess. */
68451 #define __print_symbol(fmt, addr)
68452 #endif /*CONFIG_KALLSYMS*/
68453+#else /* when included by kallsyms.c, vsnprintf.c, or
68454+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
68455+extern void __print_symbol(const char *fmt, unsigned long address);
68456+extern int sprint_symbol(char *buffer, unsigned long address);
68457+const char *kallsyms_lookup(unsigned long addr,
68458+ unsigned long *symbolsize,
68459+ unsigned long *offset,
68460+ char **modname, char *namebuf);
68461+#endif
68462
68463 /* This macro allows us to keep printk typechecking */
68464 static void __check_printsym_format(const char *fmt, ...)
68465diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
68466index 6adcc29..13369e8 100644
68467--- a/include/linux/kgdb.h
68468+++ b/include/linux/kgdb.h
68469@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
68470
68471 extern int kgdb_connected;
68472
68473-extern atomic_t kgdb_setting_breakpoint;
68474-extern atomic_t kgdb_cpu_doing_single_step;
68475+extern atomic_unchecked_t kgdb_setting_breakpoint;
68476+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
68477
68478 extern struct task_struct *kgdb_usethread;
68479 extern struct task_struct *kgdb_contthread;
68480@@ -235,7 +235,7 @@ struct kgdb_arch {
68481 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
68482 void (*remove_all_hw_break)(void);
68483 void (*correct_hw_break)(void);
68484-};
68485+} __do_const;
68486
68487 /**
68488 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
68489@@ -257,14 +257,14 @@ struct kgdb_io {
68490 int (*init) (void);
68491 void (*pre_exception) (void);
68492 void (*post_exception) (void);
68493-};
68494+} __do_const;
68495
68496-extern struct kgdb_arch arch_kgdb_ops;
68497+extern const struct kgdb_arch arch_kgdb_ops;
68498
68499 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
68500
68501-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
68502-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
68503+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
68504+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
68505
68506 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
68507 extern int kgdb_mem2hex(char *mem, char *buf, int count);
68508diff --git a/include/linux/kmod.h b/include/linux/kmod.h
68509index 384ca8b..83dd97d 100644
68510--- a/include/linux/kmod.h
68511+++ b/include/linux/kmod.h
68512@@ -31,6 +31,8 @@
68513 * usually useless though. */
68514 extern int __request_module(bool wait, const char *name, ...) \
68515 __attribute__((format(printf, 2, 3)));
68516+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
68517+ __attribute__((format(printf, 3, 4)));
68518 #define request_module(mod...) __request_module(true, mod)
68519 #define request_module_nowait(mod...) __request_module(false, mod)
68520 #define try_then_request_module(x, mod...) \
68521diff --git a/include/linux/kobject.h b/include/linux/kobject.h
68522index 58ae8e0..3950d3c 100644
68523--- a/include/linux/kobject.h
68524+++ b/include/linux/kobject.h
68525@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
68526
68527 struct kobj_type {
68528 void (*release)(struct kobject *kobj);
68529- struct sysfs_ops *sysfs_ops;
68530+ const struct sysfs_ops *sysfs_ops;
68531 struct attribute **default_attrs;
68532 };
68533
68534@@ -118,9 +118,9 @@ struct kobj_uevent_env {
68535 };
68536
68537 struct kset_uevent_ops {
68538- int (*filter)(struct kset *kset, struct kobject *kobj);
68539- const char *(*name)(struct kset *kset, struct kobject *kobj);
68540- int (*uevent)(struct kset *kset, struct kobject *kobj,
68541+ int (* const filter)(struct kset *kset, struct kobject *kobj);
68542+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
68543+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
68544 struct kobj_uevent_env *env);
68545 };
68546
68547@@ -132,7 +132,7 @@ struct kobj_attribute {
68548 const char *buf, size_t count);
68549 };
68550
68551-extern struct sysfs_ops kobj_sysfs_ops;
68552+extern const struct sysfs_ops kobj_sysfs_ops;
68553
68554 /**
68555 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
68556@@ -155,14 +155,14 @@ struct kset {
68557 struct list_head list;
68558 spinlock_t list_lock;
68559 struct kobject kobj;
68560- struct kset_uevent_ops *uevent_ops;
68561+ const struct kset_uevent_ops *uevent_ops;
68562 };
68563
68564 extern void kset_init(struct kset *kset);
68565 extern int __must_check kset_register(struct kset *kset);
68566 extern void kset_unregister(struct kset *kset);
68567 extern struct kset * __must_check kset_create_and_add(const char *name,
68568- struct kset_uevent_ops *u,
68569+ const struct kset_uevent_ops *u,
68570 struct kobject *parent_kobj);
68571
68572 static inline struct kset *to_kset(struct kobject *kobj)
68573diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
68574index c728a50..752d821 100644
68575--- a/include/linux/kvm_host.h
68576+++ b/include/linux/kvm_host.h
68577@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
68578 void vcpu_load(struct kvm_vcpu *vcpu);
68579 void vcpu_put(struct kvm_vcpu *vcpu);
68580
68581-int kvm_init(void *opaque, unsigned int vcpu_size,
68582+int kvm_init(const void *opaque, unsigned int vcpu_size,
68583 struct module *module);
68584 void kvm_exit(void);
68585
68586@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
68587 struct kvm_guest_debug *dbg);
68588 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
68589
68590-int kvm_arch_init(void *opaque);
68591+int kvm_arch_init(const void *opaque);
68592 void kvm_arch_exit(void);
68593
68594 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
68595diff --git a/include/linux/libata.h b/include/linux/libata.h
68596index a069916..223edde 100644
68597--- a/include/linux/libata.h
68598+++ b/include/linux/libata.h
68599@@ -525,11 +525,11 @@ struct ata_ioports {
68600
68601 struct ata_host {
68602 spinlock_t lock;
68603- struct device *dev;
68604+ struct device *dev;
68605 void __iomem * const *iomap;
68606 unsigned int n_ports;
68607 void *private_data;
68608- struct ata_port_operations *ops;
68609+ const struct ata_port_operations *ops;
68610 unsigned long flags;
68611 #ifdef CONFIG_ATA_ACPI
68612 acpi_handle acpi_handle;
68613@@ -710,7 +710,7 @@ struct ata_link {
68614
68615 struct ata_port {
68616 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
68617- struct ata_port_operations *ops;
68618+ const struct ata_port_operations *ops;
68619 spinlock_t *lock;
68620 /* Flags owned by the EH context. Only EH should touch these once the
68621 port is active */
68622@@ -884,7 +884,7 @@ struct ata_port_operations {
68623 * fields must be pointers.
68624 */
68625 const struct ata_port_operations *inherits;
68626-};
68627+} __do_const;
68628
68629 struct ata_port_info {
68630 unsigned long flags;
68631@@ -892,7 +892,7 @@ struct ata_port_info {
68632 unsigned long pio_mask;
68633 unsigned long mwdma_mask;
68634 unsigned long udma_mask;
68635- struct ata_port_operations *port_ops;
68636+ const struct ata_port_operations *port_ops;
68637 void *private_data;
68638 };
68639
68640@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
68641 extern const unsigned long sata_deb_timing_hotplug[];
68642 extern const unsigned long sata_deb_timing_long[];
68643
68644-extern struct ata_port_operations ata_dummy_port_ops;
68645+extern const struct ata_port_operations ata_dummy_port_ops;
68646 extern const struct ata_port_info ata_dummy_port_info;
68647
68648 static inline const unsigned long *
68649@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
68650 struct scsi_host_template *sht);
68651 extern void ata_host_detach(struct ata_host *host);
68652 extern void ata_host_init(struct ata_host *, struct device *,
68653- unsigned long, struct ata_port_operations *);
68654+ unsigned long, const struct ata_port_operations *);
68655 extern int ata_scsi_detect(struct scsi_host_template *sht);
68656 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
68657 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
68658diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
68659index fbc48f8..0886e57 100644
68660--- a/include/linux/lockd/bind.h
68661+++ b/include/linux/lockd/bind.h
68662@@ -23,13 +23,13 @@ struct svc_rqst;
68663 * This is the set of functions for lockd->nfsd communication
68664 */
68665 struct nlmsvc_binding {
68666- __be32 (*fopen)(struct svc_rqst *,
68667+ __be32 (* const fopen)(struct svc_rqst *,
68668 struct nfs_fh *,
68669 struct file **);
68670- void (*fclose)(struct file *);
68671+ void (* const fclose)(struct file *);
68672 };
68673
68674-extern struct nlmsvc_binding * nlmsvc_ops;
68675+extern const struct nlmsvc_binding * nlmsvc_ops;
68676
68677 /*
68678 * Similar to nfs_client_initdata, but without the NFS-specific
68679diff --git a/include/linux/mca.h b/include/linux/mca.h
68680index 3797270..7765ede 100644
68681--- a/include/linux/mca.h
68682+++ b/include/linux/mca.h
68683@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
68684 int region);
68685 void * (*mca_transform_memory)(struct mca_device *,
68686 void *memory);
68687-};
68688+} __no_const;
68689
68690 struct mca_bus {
68691 u64 default_dma_mask;
68692diff --git a/include/linux/memory.h b/include/linux/memory.h
68693index 37fa19b..b597c85 100644
68694--- a/include/linux/memory.h
68695+++ b/include/linux/memory.h
68696@@ -108,7 +108,7 @@ struct memory_accessor {
68697 size_t count);
68698 ssize_t (*write)(struct memory_accessor *, const char *buf,
68699 off_t offset, size_t count);
68700-};
68701+} __no_const;
68702
68703 /*
68704 * Kernel text modification mutex, used for code patching. Users of this lock
68705diff --git a/include/linux/mm.h b/include/linux/mm.h
68706index 11e5be6..1ff2423 100644
68707--- a/include/linux/mm.h
68708+++ b/include/linux/mm.h
68709@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
68710
68711 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
68712 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
68713+
68714+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68715+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
68716+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
68717+#else
68718 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
68719+#endif
68720+
68721 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
68722 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
68723
68724@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
68725 int set_page_dirty_lock(struct page *page);
68726 int clear_page_dirty_for_io(struct page *page);
68727
68728-/* Is the vma a continuation of the stack vma above it? */
68729-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
68730-{
68731- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
68732-}
68733-
68734 extern unsigned long move_page_tables(struct vm_area_struct *vma,
68735 unsigned long old_addr, struct vm_area_struct *new_vma,
68736 unsigned long new_addr, unsigned long len);
68737@@ -890,6 +891,8 @@ struct shrinker {
68738 extern void register_shrinker(struct shrinker *);
68739 extern void unregister_shrinker(struct shrinker *);
68740
68741+pgprot_t vm_get_page_prot(unsigned long vm_flags);
68742+
68743 int vma_wants_writenotify(struct vm_area_struct *vma);
68744
68745 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
68746@@ -1162,6 +1165,7 @@ out:
68747 }
68748
68749 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
68750+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
68751
68752 extern unsigned long do_brk(unsigned long, unsigned long);
68753
68754@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
68755 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
68756 struct vm_area_struct **pprev);
68757
68758+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
68759+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
68760+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
68761+
68762 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
68763 NULL if none. Assume start_addr < end_addr. */
68764 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
68765@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
68766 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
68767 }
68768
68769-pgprot_t vm_get_page_prot(unsigned long vm_flags);
68770 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
68771 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
68772 unsigned long pfn, unsigned long size, pgprot_t);
68773@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
68774 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
68775 extern int sysctl_memory_failure_early_kill;
68776 extern int sysctl_memory_failure_recovery;
68777-extern atomic_long_t mce_bad_pages;
68778+extern atomic_long_unchecked_t mce_bad_pages;
68779+
68780+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68781+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
68782+#else
68783+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
68784+#endif
68785
68786 #endif /* __KERNEL__ */
68787 #endif /* _LINUX_MM_H */
68788diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
68789index 9d12ed5..6d9707a 100644
68790--- a/include/linux/mm_types.h
68791+++ b/include/linux/mm_types.h
68792@@ -186,6 +186,8 @@ struct vm_area_struct {
68793 #ifdef CONFIG_NUMA
68794 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
68795 #endif
68796+
68797+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
68798 };
68799
68800 struct core_thread {
68801@@ -287,6 +289,24 @@ struct mm_struct {
68802 #ifdef CONFIG_MMU_NOTIFIER
68803 struct mmu_notifier_mm *mmu_notifier_mm;
68804 #endif
68805+
68806+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68807+ unsigned long pax_flags;
68808+#endif
68809+
68810+#ifdef CONFIG_PAX_DLRESOLVE
68811+ unsigned long call_dl_resolve;
68812+#endif
68813+
68814+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
68815+ unsigned long call_syscall;
68816+#endif
68817+
68818+#ifdef CONFIG_PAX_ASLR
68819+ unsigned long delta_mmap; /* randomized offset */
68820+ unsigned long delta_stack; /* randomized offset */
68821+#endif
68822+
68823 };
68824
68825 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
68826diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
68827index 4e02ee2..afb159e 100644
68828--- a/include/linux/mmu_notifier.h
68829+++ b/include/linux/mmu_notifier.h
68830@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
68831 */
68832 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
68833 ({ \
68834- pte_t __pte; \
68835+ pte_t ___pte; \
68836 struct vm_area_struct *___vma = __vma; \
68837 unsigned long ___address = __address; \
68838- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
68839+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
68840 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
68841- __pte; \
68842+ ___pte; \
68843 })
68844
68845 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
68846diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
68847index 6c31a2a..4b0e930 100644
68848--- a/include/linux/mmzone.h
68849+++ b/include/linux/mmzone.h
68850@@ -350,7 +350,7 @@ struct zone {
68851 unsigned long flags; /* zone flags, see below */
68852
68853 /* Zone statistics */
68854- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68855+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68856
68857 /*
68858 * prev_priority holds the scanning priority for this zone. It is
68859diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
68860index f58e9d8..3503935 100644
68861--- a/include/linux/mod_devicetable.h
68862+++ b/include/linux/mod_devicetable.h
68863@@ -12,7 +12,7 @@
68864 typedef unsigned long kernel_ulong_t;
68865 #endif
68866
68867-#define PCI_ANY_ID (~0)
68868+#define PCI_ANY_ID ((__u16)~0)
68869
68870 struct pci_device_id {
68871 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
68872@@ -131,7 +131,7 @@ struct usb_device_id {
68873 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
68874 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
68875
68876-#define HID_ANY_ID (~0)
68877+#define HID_ANY_ID (~0U)
68878
68879 struct hid_device_id {
68880 __u16 bus;
68881diff --git a/include/linux/module.h b/include/linux/module.h
68882index 482efc8..642032b 100644
68883--- a/include/linux/module.h
68884+++ b/include/linux/module.h
68885@@ -16,6 +16,7 @@
68886 #include <linux/kobject.h>
68887 #include <linux/moduleparam.h>
68888 #include <linux/tracepoint.h>
68889+#include <linux/fs.h>
68890
68891 #include <asm/local.h>
68892 #include <asm/module.h>
68893@@ -287,16 +288,16 @@ struct module
68894 int (*init)(void);
68895
68896 /* If this is non-NULL, vfree after init() returns */
68897- void *module_init;
68898+ void *module_init_rx, *module_init_rw;
68899
68900 /* Here is the actual code + data, vfree'd on unload. */
68901- void *module_core;
68902+ void *module_core_rx, *module_core_rw;
68903
68904 /* Here are the sizes of the init and core sections */
68905- unsigned int init_size, core_size;
68906+ unsigned int init_size_rw, core_size_rw;
68907
68908 /* The size of the executable code in each section. */
68909- unsigned int init_text_size, core_text_size;
68910+ unsigned int init_size_rx, core_size_rx;
68911
68912 /* Arch-specific module values */
68913 struct mod_arch_specific arch;
68914@@ -345,6 +346,10 @@ struct module
68915 #ifdef CONFIG_EVENT_TRACING
68916 struct ftrace_event_call *trace_events;
68917 unsigned int num_trace_events;
68918+ struct file_operations trace_id;
68919+ struct file_operations trace_enable;
68920+ struct file_operations trace_format;
68921+ struct file_operations trace_filter;
68922 #endif
68923 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
68924 unsigned long *ftrace_callsites;
68925@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
68926 bool is_module_address(unsigned long addr);
68927 bool is_module_text_address(unsigned long addr);
68928
68929+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
68930+{
68931+
68932+#ifdef CONFIG_PAX_KERNEXEC
68933+ if (ktla_ktva(addr) >= (unsigned long)start &&
68934+ ktla_ktva(addr) < (unsigned long)start + size)
68935+ return 1;
68936+#endif
68937+
68938+ return ((void *)addr >= start && (void *)addr < start + size);
68939+}
68940+
68941+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
68942+{
68943+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
68944+}
68945+
68946+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
68947+{
68948+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
68949+}
68950+
68951+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
68952+{
68953+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
68954+}
68955+
68956+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
68957+{
68958+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
68959+}
68960+
68961 static inline int within_module_core(unsigned long addr, struct module *mod)
68962 {
68963- return (unsigned long)mod->module_core <= addr &&
68964- addr < (unsigned long)mod->module_core + mod->core_size;
68965+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
68966 }
68967
68968 static inline int within_module_init(unsigned long addr, struct module *mod)
68969 {
68970- return (unsigned long)mod->module_init <= addr &&
68971- addr < (unsigned long)mod->module_init + mod->init_size;
68972+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
68973 }
68974
68975 /* Search for module by name: must hold module_mutex. */
68976diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
68977index c1f40c2..682ca53 100644
68978--- a/include/linux/moduleloader.h
68979+++ b/include/linux/moduleloader.h
68980@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
68981 sections. Returns NULL on failure. */
68982 void *module_alloc(unsigned long size);
68983
68984+#ifdef CONFIG_PAX_KERNEXEC
68985+void *module_alloc_exec(unsigned long size);
68986+#else
68987+#define module_alloc_exec(x) module_alloc(x)
68988+#endif
68989+
68990 /* Free memory returned from module_alloc. */
68991 void module_free(struct module *mod, void *module_region);
68992
68993+#ifdef CONFIG_PAX_KERNEXEC
68994+void module_free_exec(struct module *mod, void *module_region);
68995+#else
68996+#define module_free_exec(x, y) module_free((x), (y))
68997+#endif
68998+
68999 /* Apply the given relocation to the (simplified) ELF. Return -error
69000 or 0. */
69001 int apply_relocate(Elf_Shdr *sechdrs,
69002diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
69003index 82a9124..8a5f622 100644
69004--- a/include/linux/moduleparam.h
69005+++ b/include/linux/moduleparam.h
69006@@ -132,7 +132,7 @@ struct kparam_array
69007
69008 /* Actually copy string: maxlen param is usually sizeof(string). */
69009 #define module_param_string(name, string, len, perm) \
69010- static const struct kparam_string __param_string_##name \
69011+ static const struct kparam_string __param_string_##name __used \
69012 = { len, string }; \
69013 __module_param_call(MODULE_PARAM_PREFIX, name, \
69014 param_set_copystring, param_get_string, \
69015@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
69016
69017 /* Comma-separated array: *nump is set to number they actually specified. */
69018 #define module_param_array_named(name, array, type, nump, perm) \
69019- static const struct kparam_array __param_arr_##name \
69020+ static const struct kparam_array __param_arr_##name __used \
69021 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
69022 sizeof(array[0]), array }; \
69023 __module_param_call(MODULE_PARAM_PREFIX, name, \
69024diff --git a/include/linux/mutex.h b/include/linux/mutex.h
69025index 878cab4..c92cb3e 100644
69026--- a/include/linux/mutex.h
69027+++ b/include/linux/mutex.h
69028@@ -51,7 +51,7 @@ struct mutex {
69029 spinlock_t wait_lock;
69030 struct list_head wait_list;
69031 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
69032- struct thread_info *owner;
69033+ struct task_struct *owner;
69034 #endif
69035 #ifdef CONFIG_DEBUG_MUTEXES
69036 const char *name;
69037diff --git a/include/linux/namei.h b/include/linux/namei.h
69038index ec0f607..d19e675 100644
69039--- a/include/linux/namei.h
69040+++ b/include/linux/namei.h
69041@@ -22,7 +22,7 @@ struct nameidata {
69042 unsigned int flags;
69043 int last_type;
69044 unsigned depth;
69045- char *saved_names[MAX_NESTED_LINKS + 1];
69046+ const char *saved_names[MAX_NESTED_LINKS + 1];
69047
69048 /* Intent data */
69049 union {
69050@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
69051 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
69052 extern void unlock_rename(struct dentry *, struct dentry *);
69053
69054-static inline void nd_set_link(struct nameidata *nd, char *path)
69055+static inline void nd_set_link(struct nameidata *nd, const char *path)
69056 {
69057 nd->saved_names[nd->depth] = path;
69058 }
69059
69060-static inline char *nd_get_link(struct nameidata *nd)
69061+static inline const char *nd_get_link(const struct nameidata *nd)
69062 {
69063 return nd->saved_names[nd->depth];
69064 }
69065diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
69066index 9d7e8f7..04428c5 100644
69067--- a/include/linux/netdevice.h
69068+++ b/include/linux/netdevice.h
69069@@ -637,6 +637,7 @@ struct net_device_ops {
69070 u16 xid);
69071 #endif
69072 };
69073+typedef struct net_device_ops __no_const net_device_ops_no_const;
69074
69075 /*
69076 * The DEVICE structure.
69077diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
69078new file mode 100644
69079index 0000000..33f4af8
69080--- /dev/null
69081+++ b/include/linux/netfilter/xt_gradm.h
69082@@ -0,0 +1,9 @@
69083+#ifndef _LINUX_NETFILTER_XT_GRADM_H
69084+#define _LINUX_NETFILTER_XT_GRADM_H 1
69085+
69086+struct xt_gradm_mtinfo {
69087+ __u16 flags;
69088+ __u16 invflags;
69089+};
69090+
69091+#endif
69092diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
69093index b359c4a..c08b334 100644
69094--- a/include/linux/nodemask.h
69095+++ b/include/linux/nodemask.h
69096@@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
69097
69098 #define any_online_node(mask) \
69099 ({ \
69100- int node; \
69101- for_each_node_mask(node, (mask)) \
69102- if (node_online(node)) \
69103+ int __node; \
69104+ for_each_node_mask(__node, (mask)) \
69105+ if (node_online(__node)) \
69106 break; \
69107- node; \
69108+ __node; \
69109 })
69110
69111 #define num_online_nodes() num_node_state(N_ONLINE)
69112diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
69113index 5171639..7cf4235 100644
69114--- a/include/linux/oprofile.h
69115+++ b/include/linux/oprofile.h
69116@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
69117 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
69118 char const * name, ulong * val);
69119
69120-/** Create a file for read-only access to an atomic_t. */
69121+/** Create a file for read-only access to an atomic_unchecked_t. */
69122 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
69123- char const * name, atomic_t * val);
69124+ char const * name, atomic_unchecked_t * val);
69125
69126 /** create a directory */
69127 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
69128diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
69129index 3c62ed4..8924c7c 100644
69130--- a/include/linux/pagemap.h
69131+++ b/include/linux/pagemap.h
69132@@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
69133 if (((unsigned long)uaddr & PAGE_MASK) !=
69134 ((unsigned long)end & PAGE_MASK))
69135 ret = __get_user(c, end);
69136+ (void)c;
69137 }
69138+ (void)c;
69139 return ret;
69140 }
69141
69142diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
69143index 81c9689..a567a55 100644
69144--- a/include/linux/perf_event.h
69145+++ b/include/linux/perf_event.h
69146@@ -476,7 +476,7 @@ struct hw_perf_event {
69147 struct hrtimer hrtimer;
69148 };
69149 };
69150- atomic64_t prev_count;
69151+ atomic64_unchecked_t prev_count;
69152 u64 sample_period;
69153 u64 last_period;
69154 atomic64_t period_left;
69155@@ -557,7 +557,7 @@ struct perf_event {
69156 const struct pmu *pmu;
69157
69158 enum perf_event_active_state state;
69159- atomic64_t count;
69160+ atomic64_unchecked_t count;
69161
69162 /*
69163 * These are the total time in nanoseconds that the event
69164@@ -595,8 +595,8 @@ struct perf_event {
69165 * These accumulate total time (in nanoseconds) that children
69166 * events have been enabled and running, respectively.
69167 */
69168- atomic64_t child_total_time_enabled;
69169- atomic64_t child_total_time_running;
69170+ atomic64_unchecked_t child_total_time_enabled;
69171+ atomic64_unchecked_t child_total_time_running;
69172
69173 /*
69174 * Protect attach/detach and child_list:
69175diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
69176index b43a9e0..b77d869 100644
69177--- a/include/linux/pipe_fs_i.h
69178+++ b/include/linux/pipe_fs_i.h
69179@@ -46,9 +46,9 @@ struct pipe_inode_info {
69180 wait_queue_head_t wait;
69181 unsigned int nrbufs, curbuf;
69182 struct page *tmp_page;
69183- unsigned int readers;
69184- unsigned int writers;
69185- unsigned int waiting_writers;
69186+ atomic_t readers;
69187+ atomic_t writers;
69188+ atomic_t waiting_writers;
69189 unsigned int r_counter;
69190 unsigned int w_counter;
69191 struct fasync_struct *fasync_readers;
69192diff --git a/include/linux/poison.h b/include/linux/poison.h
69193index 34066ff..e95d744 100644
69194--- a/include/linux/poison.h
69195+++ b/include/linux/poison.h
69196@@ -19,8 +19,8 @@
69197 * under normal circumstances, used to verify that nobody uses
69198 * non-initialized list entries.
69199 */
69200-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
69201-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
69202+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
69203+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
69204
69205 /********** include/linux/timer.h **********/
69206 /*
69207diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
69208index 4f71bf4..cd2f68e 100644
69209--- a/include/linux/posix-timers.h
69210+++ b/include/linux/posix-timers.h
69211@@ -82,7 +82,8 @@ struct k_clock {
69212 #define TIMER_RETRY 1
69213 void (*timer_get) (struct k_itimer * timr,
69214 struct itimerspec * cur_setting);
69215-};
69216+} __do_const;
69217+typedef struct k_clock __no_const k_clock_no_const;
69218
69219 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
69220
69221diff --git a/include/linux/preempt.h b/include/linux/preempt.h
69222index 72b1a10..13303a9 100644
69223--- a/include/linux/preempt.h
69224+++ b/include/linux/preempt.h
69225@@ -110,7 +110,7 @@ struct preempt_ops {
69226 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
69227 void (*sched_out)(struct preempt_notifier *notifier,
69228 struct task_struct *next);
69229-};
69230+} __no_const;
69231
69232 /**
69233 * preempt_notifier - key for installing preemption notifiers
69234diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
69235index 379eaed..1bf73e3 100644
69236--- a/include/linux/proc_fs.h
69237+++ b/include/linux/proc_fs.h
69238@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
69239 return proc_create_data(name, mode, parent, proc_fops, NULL);
69240 }
69241
69242+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
69243+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
69244+{
69245+#ifdef CONFIG_GRKERNSEC_PROC_USER
69246+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
69247+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69248+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
69249+#else
69250+ return proc_create_data(name, mode, parent, proc_fops, NULL);
69251+#endif
69252+}
69253+
69254+
69255 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
69256 mode_t mode, struct proc_dir_entry *base,
69257 read_proc_t *read_proc, void * data)
69258@@ -256,7 +269,7 @@ union proc_op {
69259 int (*proc_show)(struct seq_file *m,
69260 struct pid_namespace *ns, struct pid *pid,
69261 struct task_struct *task);
69262-};
69263+} __no_const;
69264
69265 struct ctl_table_header;
69266 struct ctl_table;
69267diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
69268index 7456d7d..6c1cfc9 100644
69269--- a/include/linux/ptrace.h
69270+++ b/include/linux/ptrace.h
69271@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
69272 extern void exit_ptrace(struct task_struct *tracer);
69273 #define PTRACE_MODE_READ 1
69274 #define PTRACE_MODE_ATTACH 2
69275-/* Returns 0 on success, -errno on denial. */
69276-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
69277 /* Returns true on success, false on denial. */
69278 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
69279+/* Returns true on success, false on denial. */
69280+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
69281
69282 static inline int ptrace_reparented(struct task_struct *child)
69283 {
69284diff --git a/include/linux/random.h b/include/linux/random.h
69285index 2948046..3262567 100644
69286--- a/include/linux/random.h
69287+++ b/include/linux/random.h
69288@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
69289 u32 random32(void);
69290 void srandom32(u32 seed);
69291
69292+static inline unsigned long pax_get_random_long(void)
69293+{
69294+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
69295+}
69296+
69297 #endif /* __KERNEL___ */
69298
69299 #endif /* _LINUX_RANDOM_H */
69300diff --git a/include/linux/reboot.h b/include/linux/reboot.h
69301index 988e55f..17cb4ef 100644
69302--- a/include/linux/reboot.h
69303+++ b/include/linux/reboot.h
69304@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
69305 * Architecture-specific implementations of sys_reboot commands.
69306 */
69307
69308-extern void machine_restart(char *cmd);
69309-extern void machine_halt(void);
69310-extern void machine_power_off(void);
69311+extern void machine_restart(char *cmd) __noreturn;
69312+extern void machine_halt(void) __noreturn;
69313+extern void machine_power_off(void) __noreturn;
69314
69315 extern void machine_shutdown(void);
69316 struct pt_regs;
69317@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
69318 */
69319
69320 extern void kernel_restart_prepare(char *cmd);
69321-extern void kernel_restart(char *cmd);
69322-extern void kernel_halt(void);
69323-extern void kernel_power_off(void);
69324+extern void kernel_restart(char *cmd) __noreturn;
69325+extern void kernel_halt(void) __noreturn;
69326+extern void kernel_power_off(void) __noreturn;
69327
69328 void ctrl_alt_del(void);
69329
69330@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
69331 * Emergency restart, callable from an interrupt handler.
69332 */
69333
69334-extern void emergency_restart(void);
69335+extern void emergency_restart(void) __noreturn;
69336 #include <asm/emergency-restart.h>
69337
69338 #endif
69339diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
69340index dd31e7b..5b03c5c 100644
69341--- a/include/linux/reiserfs_fs.h
69342+++ b/include/linux/reiserfs_fs.h
69343@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
69344 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
69345
69346 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
69347-#define get_generation(s) atomic_read (&fs_generation(s))
69348+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
69349 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
69350 #define __fs_changed(gen,s) (gen != get_generation (s))
69351 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
69352@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
69353 */
69354
69355 struct item_operations {
69356- int (*bytes_number) (struct item_head * ih, int block_size);
69357- void (*decrement_key) (struct cpu_key *);
69358- int (*is_left_mergeable) (struct reiserfs_key * ih,
69359+ int (* const bytes_number) (struct item_head * ih, int block_size);
69360+ void (* const decrement_key) (struct cpu_key *);
69361+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
69362 unsigned long bsize);
69363- void (*print_item) (struct item_head *, char *item);
69364- void (*check_item) (struct item_head *, char *item);
69365+ void (* const print_item) (struct item_head *, char *item);
69366+ void (* const check_item) (struct item_head *, char *item);
69367
69368- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69369+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
69370 int is_affected, int insert_size);
69371- int (*check_left) (struct virtual_item * vi, int free,
69372+ int (* const check_left) (struct virtual_item * vi, int free,
69373 int start_skip, int end_skip);
69374- int (*check_right) (struct virtual_item * vi, int free);
69375- int (*part_size) (struct virtual_item * vi, int from, int to);
69376- int (*unit_num) (struct virtual_item * vi);
69377- void (*print_vi) (struct virtual_item * vi);
69378+ int (* const check_right) (struct virtual_item * vi, int free);
69379+ int (* const part_size) (struct virtual_item * vi, int from, int to);
69380+ int (* const unit_num) (struct virtual_item * vi);
69381+ void (* const print_vi) (struct virtual_item * vi);
69382 };
69383
69384-extern struct item_operations *item_ops[TYPE_ANY + 1];
69385+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
69386
69387 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
69388 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
69389diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
69390index dab68bb..0688727 100644
69391--- a/include/linux/reiserfs_fs_sb.h
69392+++ b/include/linux/reiserfs_fs_sb.h
69393@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
69394 /* Comment? -Hans */
69395 wait_queue_head_t s_wait;
69396 /* To be obsoleted soon by per buffer seals.. -Hans */
69397- atomic_t s_generation_counter; // increased by one every time the
69398+ atomic_unchecked_t s_generation_counter; // increased by one every time the
69399 // tree gets re-balanced
69400 unsigned long s_properties; /* File system properties. Currently holds
69401 on-disk FS format */
69402diff --git a/include/linux/relay.h b/include/linux/relay.h
69403index 14a86bc..17d0700 100644
69404--- a/include/linux/relay.h
69405+++ b/include/linux/relay.h
69406@@ -159,7 +159,7 @@ struct rchan_callbacks
69407 * The callback should return 0 if successful, negative if not.
69408 */
69409 int (*remove_buf_file)(struct dentry *dentry);
69410-};
69411+} __no_const;
69412
69413 /*
69414 * CONFIG_RELAY kernel API, kernel/relay.c
69415diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
69416index 3392c59..a746428 100644
69417--- a/include/linux/rfkill.h
69418+++ b/include/linux/rfkill.h
69419@@ -144,6 +144,7 @@ struct rfkill_ops {
69420 void (*query)(struct rfkill *rfkill, void *data);
69421 int (*set_block)(void *data, bool blocked);
69422 };
69423+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
69424
69425 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
69426 /**
69427diff --git a/include/linux/sched.h b/include/linux/sched.h
69428index 71849bf..2ef383dc3 100644
69429--- a/include/linux/sched.h
69430+++ b/include/linux/sched.h
69431@@ -101,6 +101,7 @@ struct bio;
69432 struct fs_struct;
69433 struct bts_context;
69434 struct perf_event_context;
69435+struct linux_binprm;
69436
69437 /*
69438 * List of flags we want to share for kernel threads,
69439@@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
69440 extern signed long schedule_timeout_uninterruptible(signed long timeout);
69441 asmlinkage void __schedule(void);
69442 asmlinkage void schedule(void);
69443-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
69444+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
69445
69446 struct nsproxy;
69447 struct user_namespace;
69448@@ -371,9 +372,12 @@ struct user_namespace;
69449 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
69450
69451 extern int sysctl_max_map_count;
69452+extern unsigned long sysctl_heap_stack_gap;
69453
69454 #include <linux/aio.h>
69455
69456+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
69457+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
69458 extern unsigned long
69459 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
69460 unsigned long, unsigned long);
69461@@ -666,6 +670,16 @@ struct signal_struct {
69462 struct tty_audit_buf *tty_audit_buf;
69463 #endif
69464
69465+#ifdef CONFIG_GRKERNSEC
69466+ u32 curr_ip;
69467+ u32 saved_ip;
69468+ u32 gr_saddr;
69469+ u32 gr_daddr;
69470+ u16 gr_sport;
69471+ u16 gr_dport;
69472+ u8 used_accept:1;
69473+#endif
69474+
69475 int oom_adj; /* OOM kill score adjustment (bit shift) */
69476 };
69477
69478@@ -723,6 +737,11 @@ struct user_struct {
69479 struct key *session_keyring; /* UID's default session keyring */
69480 #endif
69481
69482+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
69483+ unsigned int banned;
69484+ unsigned long ban_expires;
69485+#endif
69486+
69487 /* Hash table maintenance information */
69488 struct hlist_node uidhash_node;
69489 uid_t uid;
69490@@ -1328,8 +1347,8 @@ struct task_struct {
69491 struct list_head thread_group;
69492
69493 struct completion *vfork_done; /* for vfork() */
69494- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
69495- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69496+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
69497+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69498
69499 cputime_t utime, stime, utimescaled, stimescaled;
69500 cputime_t gtime;
69501@@ -1343,16 +1362,6 @@ struct task_struct {
69502 struct task_cputime cputime_expires;
69503 struct list_head cpu_timers[3];
69504
69505-/* process credentials */
69506- const struct cred *real_cred; /* objective and real subjective task
69507- * credentials (COW) */
69508- const struct cred *cred; /* effective (overridable) subjective task
69509- * credentials (COW) */
69510- struct mutex cred_guard_mutex; /* guard against foreign influences on
69511- * credential calculations
69512- * (notably. ptrace) */
69513- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
69514-
69515 char comm[TASK_COMM_LEN]; /* executable name excluding path
69516 - access with [gs]et_task_comm (which lock
69517 it with task_lock())
69518@@ -1369,6 +1378,10 @@ struct task_struct {
69519 #endif
69520 /* CPU-specific state of this task */
69521 struct thread_struct thread;
69522+/* thread_info moved to task_struct */
69523+#ifdef CONFIG_X86
69524+ struct thread_info tinfo;
69525+#endif
69526 /* filesystem information */
69527 struct fs_struct *fs;
69528 /* open file information */
69529@@ -1436,6 +1449,15 @@ struct task_struct {
69530 int hardirq_context;
69531 int softirq_context;
69532 #endif
69533+
69534+/* process credentials */
69535+ const struct cred *real_cred; /* objective and real subjective task
69536+ * credentials (COW) */
69537+ struct mutex cred_guard_mutex; /* guard against foreign influences on
69538+ * credential calculations
69539+ * (notably. ptrace) */
69540+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
69541+
69542 #ifdef CONFIG_LOCKDEP
69543 # define MAX_LOCK_DEPTH 48UL
69544 u64 curr_chain_key;
69545@@ -1456,6 +1478,9 @@ struct task_struct {
69546
69547 struct backing_dev_info *backing_dev_info;
69548
69549+ const struct cred *cred; /* effective (overridable) subjective task
69550+ * credentials (COW) */
69551+
69552 struct io_context *io_context;
69553
69554 unsigned long ptrace_message;
69555@@ -1519,6 +1544,27 @@ struct task_struct {
69556 unsigned long default_timer_slack_ns;
69557
69558 struct list_head *scm_work_list;
69559+
69560+#ifdef CONFIG_GRKERNSEC
69561+ /* grsecurity */
69562+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69563+ u64 exec_id;
69564+#endif
69565+#ifdef CONFIG_GRKERNSEC_SETXID
69566+ const struct cred *delayed_cred;
69567+#endif
69568+ struct dentry *gr_chroot_dentry;
69569+ struct acl_subject_label *acl;
69570+ struct acl_role_label *role;
69571+ struct file *exec_file;
69572+ u16 acl_role_id;
69573+ /* is this the task that authenticated to the special role */
69574+ u8 acl_sp_role;
69575+ u8 is_writable;
69576+ u8 brute;
69577+ u8 gr_is_chrooted;
69578+#endif
69579+
69580 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
69581 /* Index of current stored adress in ret_stack */
69582 int curr_ret_stack;
69583@@ -1542,6 +1588,57 @@ struct task_struct {
69584 #endif /* CONFIG_TRACING */
69585 };
69586
69587+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
69588+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
69589+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
69590+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
69591+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
69592+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
69593+
69594+#ifdef CONFIG_PAX_SOFTMODE
69595+extern int pax_softmode;
69596+#endif
69597+
69598+extern int pax_check_flags(unsigned long *);
69599+
69600+/* if tsk != current then task_lock must be held on it */
69601+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
69602+static inline unsigned long pax_get_flags(struct task_struct *tsk)
69603+{
69604+ if (likely(tsk->mm))
69605+ return tsk->mm->pax_flags;
69606+ else
69607+ return 0UL;
69608+}
69609+
69610+/* if tsk != current then task_lock must be held on it */
69611+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
69612+{
69613+ if (likely(tsk->mm)) {
69614+ tsk->mm->pax_flags = flags;
69615+ return 0;
69616+ }
69617+ return -EINVAL;
69618+}
69619+#endif
69620+
69621+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
69622+extern void pax_set_initial_flags(struct linux_binprm *bprm);
69623+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
69624+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
69625+#endif
69626+
69627+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
69628+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
69629+extern void pax_report_refcount_overflow(struct pt_regs *regs);
69630+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
69631+
69632+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
69633+extern void pax_track_stack(void);
69634+#else
69635+static inline void pax_track_stack(void) {}
69636+#endif
69637+
69638 /* Future-safe accessor for struct task_struct's cpus_allowed. */
69639 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
69640
69641@@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
69642 #define PF_DUMPCORE 0x00000200 /* dumped core */
69643 #define PF_SIGNALED 0x00000400 /* killed by a signal */
69644 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
69645-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
69646+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
69647 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
69648 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
69649 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
69650@@ -1978,7 +2075,9 @@ void yield(void);
69651 extern struct exec_domain default_exec_domain;
69652
69653 union thread_union {
69654+#ifndef CONFIG_X86
69655 struct thread_info thread_info;
69656+#endif
69657 unsigned long stack[THREAD_SIZE/sizeof(long)];
69658 };
69659
69660@@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
69661 */
69662
69663 extern struct task_struct *find_task_by_vpid(pid_t nr);
69664+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
69665 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
69666 struct pid_namespace *ns);
69667
69668@@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
69669 extern void exit_itimers(struct signal_struct *);
69670 extern void flush_itimer_signals(void);
69671
69672-extern NORET_TYPE void do_group_exit(int);
69673+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
69674
69675 extern void daemonize(const char *, ...);
69676 extern int allow_signal(int);
69677@@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
69678
69679 #endif
69680
69681-static inline int object_is_on_stack(void *obj)
69682+static inline int object_starts_on_stack(void *obj)
69683 {
69684- void *stack = task_stack_page(current);
69685+ const void *stack = task_stack_page(current);
69686
69687 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
69688 }
69689
69690+#ifdef CONFIG_PAX_USERCOPY
69691+extern int object_is_on_stack(const void *obj, unsigned long len);
69692+#endif
69693+
69694 extern void thread_info_cache_init(void);
69695
69696 #ifdef CONFIG_DEBUG_STACK_USAGE
69697diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
69698index 1ee2c05..81b7ec4 100644
69699--- a/include/linux/screen_info.h
69700+++ b/include/linux/screen_info.h
69701@@ -42,7 +42,8 @@ struct screen_info {
69702 __u16 pages; /* 0x32 */
69703 __u16 vesa_attributes; /* 0x34 */
69704 __u32 capabilities; /* 0x36 */
69705- __u8 _reserved[6]; /* 0x3a */
69706+ __u16 vesapm_size; /* 0x3a */
69707+ __u8 _reserved[4]; /* 0x3c */
69708 } __attribute__((packed));
69709
69710 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
69711diff --git a/include/linux/security.h b/include/linux/security.h
69712index d40d23f..d739b08 100644
69713--- a/include/linux/security.h
69714+++ b/include/linux/security.h
69715@@ -34,6 +34,7 @@
69716 #include <linux/key.h>
69717 #include <linux/xfrm.h>
69718 #include <linux/gfp.h>
69719+#include <linux/grsecurity.h>
69720 #include <net/flow.h>
69721
69722 /* Maximum number of letters for an LSM name string */
69723@@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
69724 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
69725 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
69726 extern int cap_task_setnice(struct task_struct *p, int nice);
69727-extern int cap_syslog(int type);
69728+extern int cap_syslog(int type, bool from_file);
69729 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
69730
69731 struct msghdr;
69732@@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
69733 * logging to the console.
69734 * See the syslog(2) manual page for an explanation of the @type values.
69735 * @type contains the type of action.
69736+ * @from_file indicates the context of action (if it came from /proc).
69737 * Return 0 if permission is granted.
69738 * @settime:
69739 * Check permission to change the system time.
69740@@ -1445,7 +1447,7 @@ struct security_operations {
69741 int (*sysctl) (struct ctl_table *table, int op);
69742 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
69743 int (*quota_on) (struct dentry *dentry);
69744- int (*syslog) (int type);
69745+ int (*syslog) (int type, bool from_file);
69746 int (*settime) (struct timespec *ts, struct timezone *tz);
69747 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
69748
69749@@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
69750 int security_sysctl(struct ctl_table *table, int op);
69751 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
69752 int security_quota_on(struct dentry *dentry);
69753-int security_syslog(int type);
69754+int security_syslog(int type, bool from_file);
69755 int security_settime(struct timespec *ts, struct timezone *tz);
69756 int security_vm_enough_memory(long pages);
69757 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
69758@@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
69759 return 0;
69760 }
69761
69762-static inline int security_syslog(int type)
69763+static inline int security_syslog(int type, bool from_file)
69764 {
69765- return cap_syslog(type);
69766+ return cap_syslog(type, from_file);
69767 }
69768
69769 static inline int security_settime(struct timespec *ts, struct timezone *tz)
69770diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
69771index 8366d8f..cc5f9d6 100644
69772--- a/include/linux/seq_file.h
69773+++ b/include/linux/seq_file.h
69774@@ -23,6 +23,9 @@ struct seq_file {
69775 u64 version;
69776 struct mutex lock;
69777 const struct seq_operations *op;
69778+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69779+ u64 exec_id;
69780+#endif
69781 void *private;
69782 };
69783
69784@@ -32,6 +35,7 @@ struct seq_operations {
69785 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
69786 int (*show) (struct seq_file *m, void *v);
69787 };
69788+typedef struct seq_operations __no_const seq_operations_no_const;
69789
69790 #define SEQ_SKIP 1
69791
69792diff --git a/include/linux/shm.h b/include/linux/shm.h
69793index eca6235..c7417ed 100644
69794--- a/include/linux/shm.h
69795+++ b/include/linux/shm.h
69796@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
69797 pid_t shm_cprid;
69798 pid_t shm_lprid;
69799 struct user_struct *mlock_user;
69800+#ifdef CONFIG_GRKERNSEC
69801+ time_t shm_createtime;
69802+ pid_t shm_lapid;
69803+#endif
69804 };
69805
69806 /* shm_mode upper byte flags */
69807diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
69808index bcdd660..6e12e11 100644
69809--- a/include/linux/skbuff.h
69810+++ b/include/linux/skbuff.h
69811@@ -14,6 +14,7 @@
69812 #ifndef _LINUX_SKBUFF_H
69813 #define _LINUX_SKBUFF_H
69814
69815+#include <linux/const.h>
69816 #include <linux/kernel.h>
69817 #include <linux/kmemcheck.h>
69818 #include <linux/compiler.h>
69819@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
69820 */
69821 static inline int skb_queue_empty(const struct sk_buff_head *list)
69822 {
69823- return list->next == (struct sk_buff *)list;
69824+ return list->next == (const struct sk_buff *)list;
69825 }
69826
69827 /**
69828@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
69829 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69830 const struct sk_buff *skb)
69831 {
69832- return (skb->next == (struct sk_buff *) list);
69833+ return (skb->next == (const struct sk_buff *) list);
69834 }
69835
69836 /**
69837@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69838 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
69839 const struct sk_buff *skb)
69840 {
69841- return (skb->prev == (struct sk_buff *) list);
69842+ return (skb->prev == (const struct sk_buff *) list);
69843 }
69844
69845 /**
69846@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
69847 * headroom, you should not reduce this.
69848 */
69849 #ifndef NET_SKB_PAD
69850-#define NET_SKB_PAD 32
69851+#define NET_SKB_PAD (_AC(32,UL))
69852 #endif
69853
69854 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
69855diff --git a/include/linux/slab.h b/include/linux/slab.h
69856index 2da8372..a3be824 100644
69857--- a/include/linux/slab.h
69858+++ b/include/linux/slab.h
69859@@ -11,12 +11,20 @@
69860
69861 #include <linux/gfp.h>
69862 #include <linux/types.h>
69863+#include <linux/err.h>
69864
69865 /*
69866 * Flags to pass to kmem_cache_create().
69867 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
69868 */
69869 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
69870+
69871+#ifdef CONFIG_PAX_USERCOPY
69872+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
69873+#else
69874+#define SLAB_USERCOPY 0x00000000UL
69875+#endif
69876+
69877 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
69878 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
69879 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
69880@@ -82,10 +90,13 @@
69881 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
69882 * Both make kfree a no-op.
69883 */
69884-#define ZERO_SIZE_PTR ((void *)16)
69885+#define ZERO_SIZE_PTR \
69886+({ \
69887+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
69888+ (void *)(-MAX_ERRNO-1L); \
69889+})
69890
69891-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
69892- (unsigned long)ZERO_SIZE_PTR)
69893+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
69894
69895 /*
69896 * struct kmem_cache related prototypes
69897@@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
69898 void kfree(const void *);
69899 void kzfree(const void *);
69900 size_t ksize(const void *);
69901+void check_object_size(const void *ptr, unsigned long n, bool to);
69902
69903 /*
69904 * Allocator specific definitions. These are mainly used to establish optimized
69905@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
69906
69907 void __init kmem_cache_init_late(void);
69908
69909+#define kmalloc(x, y) \
69910+({ \
69911+ void *___retval; \
69912+ intoverflow_t ___x = (intoverflow_t)x; \
69913+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
69914+ ___retval = NULL; \
69915+ else \
69916+ ___retval = kmalloc((size_t)___x, (y)); \
69917+ ___retval; \
69918+})
69919+
69920+#define kmalloc_node(x, y, z) \
69921+({ \
69922+ void *___retval; \
69923+ intoverflow_t ___x = (intoverflow_t)x; \
69924+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
69925+ ___retval = NULL; \
69926+ else \
69927+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
69928+ ___retval; \
69929+})
69930+
69931+#define kzalloc(x, y) \
69932+({ \
69933+ void *___retval; \
69934+ intoverflow_t ___x = (intoverflow_t)x; \
69935+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
69936+ ___retval = NULL; \
69937+ else \
69938+ ___retval = kzalloc((size_t)___x, (y)); \
69939+ ___retval; \
69940+})
69941+
69942 #endif /* _LINUX_SLAB_H */
69943diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
69944index 850d057..d9dfe3c 100644
69945--- a/include/linux/slab_def.h
69946+++ b/include/linux/slab_def.h
69947@@ -69,10 +69,10 @@ struct kmem_cache {
69948 unsigned long node_allocs;
69949 unsigned long node_frees;
69950 unsigned long node_overflow;
69951- atomic_t allochit;
69952- atomic_t allocmiss;
69953- atomic_t freehit;
69954- atomic_t freemiss;
69955+ atomic_unchecked_t allochit;
69956+ atomic_unchecked_t allocmiss;
69957+ atomic_unchecked_t freehit;
69958+ atomic_unchecked_t freemiss;
69959
69960 /*
69961 * If debugging is enabled, then the allocator can add additional
69962diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
69963index 5ad70a6..57f9f65 100644
69964--- a/include/linux/slub_def.h
69965+++ b/include/linux/slub_def.h
69966@@ -86,7 +86,7 @@ struct kmem_cache {
69967 struct kmem_cache_order_objects max;
69968 struct kmem_cache_order_objects min;
69969 gfp_t allocflags; /* gfp flags to use on each alloc */
69970- int refcount; /* Refcount for slab cache destroy */
69971+ atomic_t refcount; /* Refcount for slab cache destroy */
69972 void (*ctor)(void *);
69973 int inuse; /* Offset to metadata */
69974 int align; /* Alignment */
69975@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
69976 #endif
69977
69978 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
69979-void *__kmalloc(size_t size, gfp_t flags);
69980+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
69981
69982 #ifdef CONFIG_KMEMTRACE
69983 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
69984diff --git a/include/linux/sonet.h b/include/linux/sonet.h
69985index 67ad11f..0bbd8af 100644
69986--- a/include/linux/sonet.h
69987+++ b/include/linux/sonet.h
69988@@ -61,7 +61,7 @@ struct sonet_stats {
69989 #include <asm/atomic.h>
69990
69991 struct k_sonet_stats {
69992-#define __HANDLE_ITEM(i) atomic_t i
69993+#define __HANDLE_ITEM(i) atomic_unchecked_t i
69994 __SONET_ITEMS
69995 #undef __HANDLE_ITEM
69996 };
69997diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
69998index 6f52b4d..5500323 100644
69999--- a/include/linux/sunrpc/cache.h
70000+++ b/include/linux/sunrpc/cache.h
70001@@ -125,7 +125,7 @@ struct cache_detail {
70002 */
70003 struct cache_req {
70004 struct cache_deferred_req *(*defer)(struct cache_req *req);
70005-};
70006+} __no_const;
70007 /* this must be embedded in a deferred_request that is being
70008 * delayed awaiting cache-fill
70009 */
70010diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
70011index 8ed9642..101ceab 100644
70012--- a/include/linux/sunrpc/clnt.h
70013+++ b/include/linux/sunrpc/clnt.h
70014@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
70015 {
70016 switch (sap->sa_family) {
70017 case AF_INET:
70018- return ntohs(((struct sockaddr_in *)sap)->sin_port);
70019+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
70020 case AF_INET6:
70021- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
70022+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
70023 }
70024 return 0;
70025 }
70026@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
70027 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
70028 const struct sockaddr *src)
70029 {
70030- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
70031+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
70032 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
70033
70034 dsin->sin_family = ssin->sin_family;
70035@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
70036 if (sa->sa_family != AF_INET6)
70037 return 0;
70038
70039- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
70040+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
70041 }
70042
70043 #endif /* __KERNEL__ */
70044diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
70045index c14fe86..393245e 100644
70046--- a/include/linux/sunrpc/svc_rdma.h
70047+++ b/include/linux/sunrpc/svc_rdma.h
70048@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
70049 extern unsigned int svcrdma_max_requests;
70050 extern unsigned int svcrdma_max_req_size;
70051
70052-extern atomic_t rdma_stat_recv;
70053-extern atomic_t rdma_stat_read;
70054-extern atomic_t rdma_stat_write;
70055-extern atomic_t rdma_stat_sq_starve;
70056-extern atomic_t rdma_stat_rq_starve;
70057-extern atomic_t rdma_stat_rq_poll;
70058-extern atomic_t rdma_stat_rq_prod;
70059-extern atomic_t rdma_stat_sq_poll;
70060-extern atomic_t rdma_stat_sq_prod;
70061+extern atomic_unchecked_t rdma_stat_recv;
70062+extern atomic_unchecked_t rdma_stat_read;
70063+extern atomic_unchecked_t rdma_stat_write;
70064+extern atomic_unchecked_t rdma_stat_sq_starve;
70065+extern atomic_unchecked_t rdma_stat_rq_starve;
70066+extern atomic_unchecked_t rdma_stat_rq_poll;
70067+extern atomic_unchecked_t rdma_stat_rq_prod;
70068+extern atomic_unchecked_t rdma_stat_sq_poll;
70069+extern atomic_unchecked_t rdma_stat_sq_prod;
70070
70071 #define RPCRDMA_VERSION 1
70072
70073diff --git a/include/linux/suspend.h b/include/linux/suspend.h
70074index 5e781d8..1e62818 100644
70075--- a/include/linux/suspend.h
70076+++ b/include/linux/suspend.h
70077@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
70078 * which require special recovery actions in that situation.
70079 */
70080 struct platform_suspend_ops {
70081- int (*valid)(suspend_state_t state);
70082- int (*begin)(suspend_state_t state);
70083- int (*prepare)(void);
70084- int (*prepare_late)(void);
70085- int (*enter)(suspend_state_t state);
70086- void (*wake)(void);
70087- void (*finish)(void);
70088- void (*end)(void);
70089- void (*recover)(void);
70090+ int (* const valid)(suspend_state_t state);
70091+ int (* const begin)(suspend_state_t state);
70092+ int (* const prepare)(void);
70093+ int (* const prepare_late)(void);
70094+ int (* const enter)(suspend_state_t state);
70095+ void (* const wake)(void);
70096+ void (* const finish)(void);
70097+ void (* const end)(void);
70098+ void (* const recover)(void);
70099 };
70100
70101 #ifdef CONFIG_SUSPEND
70102@@ -120,7 +120,7 @@ struct platform_suspend_ops {
70103 * suspend_set_ops - set platform dependent suspend operations
70104 * @ops: The new suspend operations to set.
70105 */
70106-extern void suspend_set_ops(struct platform_suspend_ops *ops);
70107+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
70108 extern int suspend_valid_only_mem(suspend_state_t state);
70109
70110 /**
70111@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
70112 #else /* !CONFIG_SUSPEND */
70113 #define suspend_valid_only_mem NULL
70114
70115-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
70116+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
70117 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
70118 #endif /* !CONFIG_SUSPEND */
70119
70120@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
70121 * platforms which require special recovery actions in that situation.
70122 */
70123 struct platform_hibernation_ops {
70124- int (*begin)(void);
70125- void (*end)(void);
70126- int (*pre_snapshot)(void);
70127- void (*finish)(void);
70128- int (*prepare)(void);
70129- int (*enter)(void);
70130- void (*leave)(void);
70131- int (*pre_restore)(void);
70132- void (*restore_cleanup)(void);
70133- void (*recover)(void);
70134+ int (* const begin)(void);
70135+ void (* const end)(void);
70136+ int (* const pre_snapshot)(void);
70137+ void (* const finish)(void);
70138+ int (* const prepare)(void);
70139+ int (* const enter)(void);
70140+ void (* const leave)(void);
70141+ int (* const pre_restore)(void);
70142+ void (* const restore_cleanup)(void);
70143+ void (* const recover)(void);
70144 };
70145
70146 #ifdef CONFIG_HIBERNATION
70147@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
70148 extern void swsusp_unset_page_free(struct page *);
70149 extern unsigned long get_safe_page(gfp_t gfp_mask);
70150
70151-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
70152+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
70153 extern int hibernate(void);
70154 extern bool system_entering_hibernation(void);
70155 #else /* CONFIG_HIBERNATION */
70156@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
70157 static inline void swsusp_set_page_free(struct page *p) {}
70158 static inline void swsusp_unset_page_free(struct page *p) {}
70159
70160-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
70161+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
70162 static inline int hibernate(void) { return -ENOSYS; }
70163 static inline bool system_entering_hibernation(void) { return false; }
70164 #endif /* CONFIG_HIBERNATION */
70165diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
70166index 0eb6942..a805cb6 100644
70167--- a/include/linux/sysctl.h
70168+++ b/include/linux/sysctl.h
70169@@ -164,7 +164,11 @@ enum
70170 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
70171 };
70172
70173-
70174+#ifdef CONFIG_PAX_SOFTMODE
70175+enum {
70176+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
70177+};
70178+#endif
70179
70180 /* CTL_VM names: */
70181 enum
70182@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
70183
70184 extern int proc_dostring(struct ctl_table *, int,
70185 void __user *, size_t *, loff_t *);
70186+extern int proc_dostring_modpriv(struct ctl_table *, int,
70187+ void __user *, size_t *, loff_t *);
70188 extern int proc_dointvec(struct ctl_table *, int,
70189 void __user *, size_t *, loff_t *);
70190 extern int proc_dointvec_minmax(struct ctl_table *, int,
70191@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
70192
70193 extern ctl_handler sysctl_data;
70194 extern ctl_handler sysctl_string;
70195+extern ctl_handler sysctl_string_modpriv;
70196 extern ctl_handler sysctl_intvec;
70197 extern ctl_handler sysctl_jiffies;
70198 extern ctl_handler sysctl_ms_jiffies;
70199diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
70200index 9d68fed..71f02cc 100644
70201--- a/include/linux/sysfs.h
70202+++ b/include/linux/sysfs.h
70203@@ -75,8 +75,8 @@ struct bin_attribute {
70204 };
70205
70206 struct sysfs_ops {
70207- ssize_t (*show)(struct kobject *, struct attribute *,char *);
70208- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
70209+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
70210+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
70211 };
70212
70213 struct sysfs_dirent;
70214diff --git a/include/linux/syslog.h b/include/linux/syslog.h
70215new file mode 100644
70216index 0000000..3891139
70217--- /dev/null
70218+++ b/include/linux/syslog.h
70219@@ -0,0 +1,52 @@
70220+/* Syslog internals
70221+ *
70222+ * Copyright 2010 Canonical, Ltd.
70223+ * Author: Kees Cook <kees.cook@canonical.com>
70224+ *
70225+ * This program is free software; you can redistribute it and/or modify
70226+ * it under the terms of the GNU General Public License as published by
70227+ * the Free Software Foundation; either version 2, or (at your option)
70228+ * any later version.
70229+ *
70230+ * This program is distributed in the hope that it will be useful,
70231+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
70232+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
70233+ * GNU General Public License for more details.
70234+ *
70235+ * You should have received a copy of the GNU General Public License
70236+ * along with this program; see the file COPYING. If not, write to
70237+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
70238+ */
70239+
70240+#ifndef _LINUX_SYSLOG_H
70241+#define _LINUX_SYSLOG_H
70242+
70243+/* Close the log. Currently a NOP. */
70244+#define SYSLOG_ACTION_CLOSE 0
70245+/* Open the log. Currently a NOP. */
70246+#define SYSLOG_ACTION_OPEN 1
70247+/* Read from the log. */
70248+#define SYSLOG_ACTION_READ 2
70249+/* Read all messages remaining in the ring buffer. */
70250+#define SYSLOG_ACTION_READ_ALL 3
70251+/* Read and clear all messages remaining in the ring buffer */
70252+#define SYSLOG_ACTION_READ_CLEAR 4
70253+/* Clear ring buffer. */
70254+#define SYSLOG_ACTION_CLEAR 5
70255+/* Disable printk's to console */
70256+#define SYSLOG_ACTION_CONSOLE_OFF 6
70257+/* Enable printk's to console */
70258+#define SYSLOG_ACTION_CONSOLE_ON 7
70259+/* Set level of messages printed to console */
70260+#define SYSLOG_ACTION_CONSOLE_LEVEL 8
70261+/* Return number of unread characters in the log buffer */
70262+#define SYSLOG_ACTION_SIZE_UNREAD 9
70263+/* Return size of the log buffer */
70264+#define SYSLOG_ACTION_SIZE_BUFFER 10
70265+
70266+#define SYSLOG_FROM_CALL 0
70267+#define SYSLOG_FROM_FILE 1
70268+
70269+int do_syslog(int type, char __user *buf, int count, bool from_file);
70270+
70271+#endif /* _LINUX_SYSLOG_H */
70272diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
70273index a8cc4e1..98d3b85 100644
70274--- a/include/linux/thread_info.h
70275+++ b/include/linux/thread_info.h
70276@@ -23,7 +23,7 @@ struct restart_block {
70277 };
70278 /* For futex_wait and futex_wait_requeue_pi */
70279 struct {
70280- u32 *uaddr;
70281+ u32 __user *uaddr;
70282 u32 val;
70283 u32 flags;
70284 u32 bitset;
70285diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
70286index 1eb44a9..f582df3 100644
70287--- a/include/linux/tracehook.h
70288+++ b/include/linux/tracehook.h
70289@@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
70290 /*
70291 * ptrace report for syscall entry and exit looks identical.
70292 */
70293-static inline void ptrace_report_syscall(struct pt_regs *regs)
70294+static inline int ptrace_report_syscall(struct pt_regs *regs)
70295 {
70296 int ptrace = task_ptrace(current);
70297
70298 if (!(ptrace & PT_PTRACED))
70299- return;
70300+ return 0;
70301
70302 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
70303
70304@@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70305 send_sig(current->exit_code, current, 1);
70306 current->exit_code = 0;
70307 }
70308+
70309+ return fatal_signal_pending(current);
70310 }
70311
70312 /**
70313@@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
70314 static inline __must_check int tracehook_report_syscall_entry(
70315 struct pt_regs *regs)
70316 {
70317- ptrace_report_syscall(regs);
70318- return 0;
70319+ return ptrace_report_syscall(regs);
70320 }
70321
70322 /**
70323diff --git a/include/linux/tty.h b/include/linux/tty.h
70324index e9c57e9..ee6d489 100644
70325--- a/include/linux/tty.h
70326+++ b/include/linux/tty.h
70327@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
70328 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
70329 extern void tty_ldisc_enable(struct tty_struct *tty);
70330
70331-
70332 /* n_tty.c */
70333 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
70334
70335diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
70336index 0c4ee9b..9f7c426 100644
70337--- a/include/linux/tty_ldisc.h
70338+++ b/include/linux/tty_ldisc.h
70339@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
70340
70341 struct module *owner;
70342
70343- int refcount;
70344+ atomic_t refcount;
70345 };
70346
70347 struct tty_ldisc {
70348diff --git a/include/linux/types.h b/include/linux/types.h
70349index c42724f..d190eee 100644
70350--- a/include/linux/types.h
70351+++ b/include/linux/types.h
70352@@ -191,10 +191,26 @@ typedef struct {
70353 volatile int counter;
70354 } atomic_t;
70355
70356+#ifdef CONFIG_PAX_REFCOUNT
70357+typedef struct {
70358+ volatile int counter;
70359+} atomic_unchecked_t;
70360+#else
70361+typedef atomic_t atomic_unchecked_t;
70362+#endif
70363+
70364 #ifdef CONFIG_64BIT
70365 typedef struct {
70366 volatile long counter;
70367 } atomic64_t;
70368+
70369+#ifdef CONFIG_PAX_REFCOUNT
70370+typedef struct {
70371+ volatile long counter;
70372+} atomic64_unchecked_t;
70373+#else
70374+typedef atomic64_t atomic64_unchecked_t;
70375+#endif
70376 #endif
70377
70378 struct ustat {
70379diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
70380index 6b58367..53a3e8e 100644
70381--- a/include/linux/uaccess.h
70382+++ b/include/linux/uaccess.h
70383@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70384 long ret; \
70385 mm_segment_t old_fs = get_fs(); \
70386 \
70387- set_fs(KERNEL_DS); \
70388 pagefault_disable(); \
70389- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
70390- pagefault_enable(); \
70391+ set_fs(KERNEL_DS); \
70392+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
70393 set_fs(old_fs); \
70394+ pagefault_enable(); \
70395 ret; \
70396 })
70397
70398@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70399 * Safely read from address @src to the buffer at @dst. If a kernel fault
70400 * happens, handle that and return -EFAULT.
70401 */
70402-extern long probe_kernel_read(void *dst, void *src, size_t size);
70403+extern long probe_kernel_read(void *dst, const void *src, size_t size);
70404
70405 /*
70406 * probe_kernel_write(): safely attempt to write to a location
70407@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
70408 * Safely write to address @dst from the buffer at @src. If a kernel fault
70409 * happens, handle that and return -EFAULT.
70410 */
70411-extern long probe_kernel_write(void *dst, void *src, size_t size);
70412+extern long probe_kernel_write(void *dst, const void *src, size_t size);
70413
70414 #endif /* __LINUX_UACCESS_H__ */
70415diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
70416index 99c1b4d..bb94261 100644
70417--- a/include/linux/unaligned/access_ok.h
70418+++ b/include/linux/unaligned/access_ok.h
70419@@ -6,32 +6,32 @@
70420
70421 static inline u16 get_unaligned_le16(const void *p)
70422 {
70423- return le16_to_cpup((__le16 *)p);
70424+ return le16_to_cpup((const __le16 *)p);
70425 }
70426
70427 static inline u32 get_unaligned_le32(const void *p)
70428 {
70429- return le32_to_cpup((__le32 *)p);
70430+ return le32_to_cpup((const __le32 *)p);
70431 }
70432
70433 static inline u64 get_unaligned_le64(const void *p)
70434 {
70435- return le64_to_cpup((__le64 *)p);
70436+ return le64_to_cpup((const __le64 *)p);
70437 }
70438
70439 static inline u16 get_unaligned_be16(const void *p)
70440 {
70441- return be16_to_cpup((__be16 *)p);
70442+ return be16_to_cpup((const __be16 *)p);
70443 }
70444
70445 static inline u32 get_unaligned_be32(const void *p)
70446 {
70447- return be32_to_cpup((__be32 *)p);
70448+ return be32_to_cpup((const __be32 *)p);
70449 }
70450
70451 static inline u64 get_unaligned_be64(const void *p)
70452 {
70453- return be64_to_cpup((__be64 *)p);
70454+ return be64_to_cpup((const __be64 *)p);
70455 }
70456
70457 static inline void put_unaligned_le16(u16 val, void *p)
70458diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
70459index 79b9837..b5a56f9 100644
70460--- a/include/linux/vermagic.h
70461+++ b/include/linux/vermagic.h
70462@@ -26,9 +26,35 @@
70463 #define MODULE_ARCH_VERMAGIC ""
70464 #endif
70465
70466+#ifdef CONFIG_PAX_REFCOUNT
70467+#define MODULE_PAX_REFCOUNT "REFCOUNT "
70468+#else
70469+#define MODULE_PAX_REFCOUNT ""
70470+#endif
70471+
70472+#ifdef CONSTIFY_PLUGIN
70473+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
70474+#else
70475+#define MODULE_CONSTIFY_PLUGIN ""
70476+#endif
70477+
70478+#ifdef STACKLEAK_PLUGIN
70479+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
70480+#else
70481+#define MODULE_STACKLEAK_PLUGIN ""
70482+#endif
70483+
70484+#ifdef CONFIG_GRKERNSEC
70485+#define MODULE_GRSEC "GRSEC "
70486+#else
70487+#define MODULE_GRSEC ""
70488+#endif
70489+
70490 #define VERMAGIC_STRING \
70491 UTS_RELEASE " " \
70492 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
70493 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
70494- MODULE_ARCH_VERMAGIC
70495+ MODULE_ARCH_VERMAGIC \
70496+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
70497+ MODULE_GRSEC
70498
70499diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
70500index 819a634..462ac12 100644
70501--- a/include/linux/vmalloc.h
70502+++ b/include/linux/vmalloc.h
70503@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
70504 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
70505 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
70506 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
70507+
70508+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70509+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
70510+#endif
70511+
70512 /* bits [20..32] reserved for arch specific ioremap internals */
70513
70514 /*
70515@@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
70516
70517 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
70518
70519+#define vmalloc(x) \
70520+({ \
70521+ void *___retval; \
70522+ intoverflow_t ___x = (intoverflow_t)x; \
70523+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
70524+ ___retval = NULL; \
70525+ else \
70526+ ___retval = vmalloc((unsigned long)___x); \
70527+ ___retval; \
70528+})
70529+
70530+#define __vmalloc(x, y, z) \
70531+({ \
70532+ void *___retval; \
70533+ intoverflow_t ___x = (intoverflow_t)x; \
70534+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
70535+ ___retval = NULL; \
70536+ else \
70537+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
70538+ ___retval; \
70539+})
70540+
70541+#define vmalloc_user(x) \
70542+({ \
70543+ void *___retval; \
70544+ intoverflow_t ___x = (intoverflow_t)x; \
70545+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
70546+ ___retval = NULL; \
70547+ else \
70548+ ___retval = vmalloc_user((unsigned long)___x); \
70549+ ___retval; \
70550+})
70551+
70552+#define vmalloc_exec(x) \
70553+({ \
70554+ void *___retval; \
70555+ intoverflow_t ___x = (intoverflow_t)x; \
70556+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
70557+ ___retval = NULL; \
70558+ else \
70559+ ___retval = vmalloc_exec((unsigned long)___x); \
70560+ ___retval; \
70561+})
70562+
70563+#define vmalloc_node(x, y) \
70564+({ \
70565+ void *___retval; \
70566+ intoverflow_t ___x = (intoverflow_t)x; \
70567+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
70568+ ___retval = NULL; \
70569+ else \
70570+ ___retval = vmalloc_node((unsigned long)___x, (y));\
70571+ ___retval; \
70572+})
70573+
70574+#define vmalloc_32(x) \
70575+({ \
70576+ void *___retval; \
70577+ intoverflow_t ___x = (intoverflow_t)x; \
70578+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
70579+ ___retval = NULL; \
70580+ else \
70581+ ___retval = vmalloc_32((unsigned long)___x); \
70582+ ___retval; \
70583+})
70584+
70585+#define vmalloc_32_user(x) \
70586+({ \
70587+ void *___retval; \
70588+ intoverflow_t ___x = (intoverflow_t)x; \
70589+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
70590+ ___retval = NULL; \
70591+ else \
70592+ ___retval = vmalloc_32_user((unsigned long)___x);\
70593+ ___retval; \
70594+})
70595+
70596 #endif /* _LINUX_VMALLOC_H */
70597diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
70598index 13070d6..aa4159a 100644
70599--- a/include/linux/vmstat.h
70600+++ b/include/linux/vmstat.h
70601@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
70602 /*
70603 * Zone based page accounting with per cpu differentials.
70604 */
70605-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70606+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70607
70608 static inline void zone_page_state_add(long x, struct zone *zone,
70609 enum zone_stat_item item)
70610 {
70611- atomic_long_add(x, &zone->vm_stat[item]);
70612- atomic_long_add(x, &vm_stat[item]);
70613+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
70614+ atomic_long_add_unchecked(x, &vm_stat[item]);
70615 }
70616
70617 static inline unsigned long global_page_state(enum zone_stat_item item)
70618 {
70619- long x = atomic_long_read(&vm_stat[item]);
70620+ long x = atomic_long_read_unchecked(&vm_stat[item]);
70621 #ifdef CONFIG_SMP
70622 if (x < 0)
70623 x = 0;
70624@@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
70625 static inline unsigned long zone_page_state(struct zone *zone,
70626 enum zone_stat_item item)
70627 {
70628- long x = atomic_long_read(&zone->vm_stat[item]);
70629+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70630 #ifdef CONFIG_SMP
70631 if (x < 0)
70632 x = 0;
70633@@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
70634 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
70635 enum zone_stat_item item)
70636 {
70637- long x = atomic_long_read(&zone->vm_stat[item]);
70638+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70639
70640 #ifdef CONFIG_SMP
70641 int cpu;
70642@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
70643
70644 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
70645 {
70646- atomic_long_inc(&zone->vm_stat[item]);
70647- atomic_long_inc(&vm_stat[item]);
70648+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
70649+ atomic_long_inc_unchecked(&vm_stat[item]);
70650 }
70651
70652 static inline void __inc_zone_page_state(struct page *page,
70653@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
70654
70655 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
70656 {
70657- atomic_long_dec(&zone->vm_stat[item]);
70658- atomic_long_dec(&vm_stat[item]);
70659+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
70660+ atomic_long_dec_unchecked(&vm_stat[item]);
70661 }
70662
70663 static inline void __dec_zone_page_state(struct page *page,
70664diff --git a/include/linux/xattr.h b/include/linux/xattr.h
70665index 5c84af8..1a3b6e2 100644
70666--- a/include/linux/xattr.h
70667+++ b/include/linux/xattr.h
70668@@ -33,6 +33,11 @@
70669 #define XATTR_USER_PREFIX "user."
70670 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
70671
70672+/* User namespace */
70673+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
70674+#define XATTR_PAX_FLAGS_SUFFIX "flags"
70675+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
70676+
70677 struct inode;
70678 struct dentry;
70679
70680diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
70681index eed5fcc..5080d24 100644
70682--- a/include/media/saa7146_vv.h
70683+++ b/include/media/saa7146_vv.h
70684@@ -167,7 +167,7 @@ struct saa7146_ext_vv
70685 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
70686
70687 /* the extension can override this */
70688- struct v4l2_ioctl_ops ops;
70689+ v4l2_ioctl_ops_no_const ops;
70690 /* pointer to the saa7146 core ops */
70691 const struct v4l2_ioctl_ops *core_ops;
70692
70693diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
70694index 73c9867..2da8837 100644
70695--- a/include/media/v4l2-dev.h
70696+++ b/include/media/v4l2-dev.h
70697@@ -34,7 +34,7 @@ struct v4l2_device;
70698 #define V4L2_FL_UNREGISTERED (0)
70699
70700 struct v4l2_file_operations {
70701- struct module *owner;
70702+ struct module * const owner;
70703 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
70704 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
70705 unsigned int (*poll) (struct file *, struct poll_table_struct *);
70706@@ -46,6 +46,7 @@ struct v4l2_file_operations {
70707 int (*open) (struct file *);
70708 int (*release) (struct file *);
70709 };
70710+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
70711
70712 /*
70713 * Newer version of video_device, handled by videodev2.c
70714diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
70715index 5d5d550..f559ef1 100644
70716--- a/include/media/v4l2-device.h
70717+++ b/include/media/v4l2-device.h
70718@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
70719 this function returns 0. If the name ends with a digit (e.g. cx18),
70720 then the name will be set to cx18-0 since cx180 looks really odd. */
70721 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
70722- atomic_t *instance);
70723+ atomic_unchecked_t *instance);
70724
70725 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
70726 Since the parent disappears this ensures that v4l2_dev doesn't have an
70727diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
70728index 7a4529d..7244290 100644
70729--- a/include/media/v4l2-ioctl.h
70730+++ b/include/media/v4l2-ioctl.h
70731@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
70732 long (*vidioc_default) (struct file *file, void *fh,
70733 int cmd, void *arg);
70734 };
70735+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
70736
70737
70738 /* v4l debugging and diagnostics */
70739diff --git a/include/net/flow.h b/include/net/flow.h
70740index 809970b..c3df4f3 100644
70741--- a/include/net/flow.h
70742+++ b/include/net/flow.h
70743@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
70744 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
70745 u8 dir, flow_resolve_t resolver);
70746 extern void flow_cache_flush(void);
70747-extern atomic_t flow_cache_genid;
70748+extern atomic_unchecked_t flow_cache_genid;
70749
70750 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
70751 {
70752diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
70753index 15e1f8fe..668837c 100644
70754--- a/include/net/inetpeer.h
70755+++ b/include/net/inetpeer.h
70756@@ -24,7 +24,7 @@ struct inet_peer
70757 __u32 dtime; /* the time of last use of not
70758 * referenced entries */
70759 atomic_t refcnt;
70760- atomic_t rid; /* Frag reception counter */
70761+ atomic_unchecked_t rid; /* Frag reception counter */
70762 __u32 tcp_ts;
70763 unsigned long tcp_ts_stamp;
70764 };
70765diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
70766index 98978e7..2243a3d 100644
70767--- a/include/net/ip_vs.h
70768+++ b/include/net/ip_vs.h
70769@@ -365,7 +365,7 @@ struct ip_vs_conn {
70770 struct ip_vs_conn *control; /* Master control connection */
70771 atomic_t n_control; /* Number of controlled ones */
70772 struct ip_vs_dest *dest; /* real server */
70773- atomic_t in_pkts; /* incoming packet counter */
70774+ atomic_unchecked_t in_pkts; /* incoming packet counter */
70775
70776 /* packet transmitter for different forwarding methods. If it
70777 mangles the packet, it must return NF_DROP or better NF_STOLEN,
70778@@ -466,7 +466,7 @@ struct ip_vs_dest {
70779 union nf_inet_addr addr; /* IP address of the server */
70780 __be16 port; /* port number of the server */
70781 volatile unsigned flags; /* dest status flags */
70782- atomic_t conn_flags; /* flags to copy to conn */
70783+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
70784 atomic_t weight; /* server weight */
70785
70786 atomic_t refcnt; /* reference counter */
70787diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
70788index 69b610a..fe3962c 100644
70789--- a/include/net/irda/ircomm_core.h
70790+++ b/include/net/irda/ircomm_core.h
70791@@ -51,7 +51,7 @@ typedef struct {
70792 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
70793 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
70794 struct ircomm_info *);
70795-} call_t;
70796+} __no_const call_t;
70797
70798 struct ircomm_cb {
70799 irda_queue_t queue;
70800diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
70801index eea2e61..08c692d 100644
70802--- a/include/net/irda/ircomm_tty.h
70803+++ b/include/net/irda/ircomm_tty.h
70804@@ -35,6 +35,7 @@
70805 #include <linux/termios.h>
70806 #include <linux/timer.h>
70807 #include <linux/tty.h> /* struct tty_struct */
70808+#include <asm/local.h>
70809
70810 #include <net/irda/irias_object.h>
70811 #include <net/irda/ircomm_core.h>
70812@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
70813 unsigned short close_delay;
70814 unsigned short closing_wait; /* time to wait before closing */
70815
70816- int open_count;
70817- int blocked_open; /* # of blocked opens */
70818+ local_t open_count;
70819+ local_t blocked_open; /* # of blocked opens */
70820
70821 /* Protect concurent access to :
70822 * o self->open_count
70823diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
70824index f82a1e8..82d81e8 100644
70825--- a/include/net/iucv/af_iucv.h
70826+++ b/include/net/iucv/af_iucv.h
70827@@ -87,7 +87,7 @@ struct iucv_sock {
70828 struct iucv_sock_list {
70829 struct hlist_head head;
70830 rwlock_t lock;
70831- atomic_t autobind_name;
70832+ atomic_unchecked_t autobind_name;
70833 };
70834
70835 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
70836diff --git a/include/net/lapb.h b/include/net/lapb.h
70837index 96cb5dd..25e8d4f 100644
70838--- a/include/net/lapb.h
70839+++ b/include/net/lapb.h
70840@@ -95,7 +95,7 @@ struct lapb_cb {
70841 struct sk_buff_head write_queue;
70842 struct sk_buff_head ack_queue;
70843 unsigned char window;
70844- struct lapb_register_struct callbacks;
70845+ struct lapb_register_struct *callbacks;
70846
70847 /* FRMR control information */
70848 struct lapb_frame frmr_data;
70849diff --git a/include/net/neighbour.h b/include/net/neighbour.h
70850index 3817fda..cdb2343 100644
70851--- a/include/net/neighbour.h
70852+++ b/include/net/neighbour.h
70853@@ -131,7 +131,7 @@ struct neigh_ops
70854 int (*connected_output)(struct sk_buff*);
70855 int (*hh_output)(struct sk_buff*);
70856 int (*queue_xmit)(struct sk_buff*);
70857-};
70858+} __do_const;
70859
70860 struct pneigh_entry
70861 {
70862diff --git a/include/net/netlink.h b/include/net/netlink.h
70863index c344646..4778c71 100644
70864--- a/include/net/netlink.h
70865+++ b/include/net/netlink.h
70866@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
70867 {
70868 return (remaining >= (int) sizeof(struct nlmsghdr) &&
70869 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
70870- nlh->nlmsg_len <= remaining);
70871+ nlh->nlmsg_len <= (unsigned int)remaining);
70872 }
70873
70874 /**
70875@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
70876 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
70877 {
70878 if (mark)
70879- skb_trim(skb, (unsigned char *) mark - skb->data);
70880+ skb_trim(skb, (const unsigned char *) mark - skb->data);
70881 }
70882
70883 /**
70884diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
70885index 9a4b8b7..e49e077 100644
70886--- a/include/net/netns/ipv4.h
70887+++ b/include/net/netns/ipv4.h
70888@@ -54,7 +54,7 @@ struct netns_ipv4 {
70889 int current_rt_cache_rebuild_count;
70890
70891 struct timer_list rt_secret_timer;
70892- atomic_t rt_genid;
70893+ atomic_unchecked_t rt_genid;
70894
70895 #ifdef CONFIG_IP_MROUTE
70896 struct sock *mroute_sk;
70897diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
70898index 8a6d529..171f401 100644
70899--- a/include/net/sctp/sctp.h
70900+++ b/include/net/sctp/sctp.h
70901@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
70902
70903 #else /* SCTP_DEBUG */
70904
70905-#define SCTP_DEBUG_PRINTK(whatever...)
70906-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
70907+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
70908+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
70909 #define SCTP_ENABLE_DEBUG
70910 #define SCTP_DISABLE_DEBUG
70911 #define SCTP_ASSERT(expr, str, func)
70912diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
70913index d97f689..f3b90ab 100644
70914--- a/include/net/secure_seq.h
70915+++ b/include/net/secure_seq.h
70916@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
70917 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
70918 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
70919 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
70920- __be16 dport);
70921+ __be16 dport);
70922 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
70923 __be16 sport, __be16 dport);
70924 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70925- __be16 sport, __be16 dport);
70926+ __be16 sport, __be16 dport);
70927 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
70928- __be16 sport, __be16 dport);
70929+ __be16 sport, __be16 dport);
70930 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70931- __be16 sport, __be16 dport);
70932+ __be16 sport, __be16 dport);
70933
70934 #endif /* _NET_SECURE_SEQ */
70935diff --git a/include/net/sock.h b/include/net/sock.h
70936index 78adf52..99afd29 100644
70937--- a/include/net/sock.h
70938+++ b/include/net/sock.h
70939@@ -272,7 +272,7 @@ struct sock {
70940 rwlock_t sk_callback_lock;
70941 int sk_err,
70942 sk_err_soft;
70943- atomic_t sk_drops;
70944+ atomic_unchecked_t sk_drops;
70945 unsigned short sk_ack_backlog;
70946 unsigned short sk_max_ack_backlog;
70947 __u32 sk_priority;
70948@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
70949 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
70950 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
70951 #else
70952-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
70953+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
70954 int inc)
70955 {
70956 }
70957diff --git a/include/net/tcp.h b/include/net/tcp.h
70958index 6cfe18b..dd21acb 100644
70959--- a/include/net/tcp.h
70960+++ b/include/net/tcp.h
70961@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
70962 struct tcp_seq_afinfo {
70963 char *name;
70964 sa_family_t family;
70965- struct file_operations seq_fops;
70966- struct seq_operations seq_ops;
70967+ file_operations_no_const seq_fops;
70968+ seq_operations_no_const seq_ops;
70969 };
70970
70971 struct tcp_iter_state {
70972diff --git a/include/net/udp.h b/include/net/udp.h
70973index f98abd2..b4b042f 100644
70974--- a/include/net/udp.h
70975+++ b/include/net/udp.h
70976@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
70977 char *name;
70978 sa_family_t family;
70979 struct udp_table *udp_table;
70980- struct file_operations seq_fops;
70981- struct seq_operations seq_ops;
70982+ file_operations_no_const seq_fops;
70983+ seq_operations_no_const seq_ops;
70984 };
70985
70986 struct udp_iter_state {
70987diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
70988index cbb822e..e9c1cbe 100644
70989--- a/include/rdma/iw_cm.h
70990+++ b/include/rdma/iw_cm.h
70991@@ -129,7 +129,7 @@ struct iw_cm_verbs {
70992 int backlog);
70993
70994 int (*destroy_listen)(struct iw_cm_id *cm_id);
70995-};
70996+} __no_const;
70997
70998 /**
70999 * iw_create_cm_id - Create an IW CM identifier.
71000diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
71001index 09a124b..caa8ca8 100644
71002--- a/include/scsi/libfc.h
71003+++ b/include/scsi/libfc.h
71004@@ -675,6 +675,7 @@ struct libfc_function_template {
71005 */
71006 void (*disc_stop_final) (struct fc_lport *);
71007 };
71008+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
71009
71010 /* information used by the discovery layer */
71011 struct fc_disc {
71012@@ -707,7 +708,7 @@ struct fc_lport {
71013 struct fc_disc disc;
71014
71015 /* Operational Information */
71016- struct libfc_function_template tt;
71017+ libfc_function_template_no_const tt;
71018 u8 link_up;
71019 u8 qfull;
71020 enum fc_lport_state state;
71021diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
71022index de8e180..f15e0d7 100644
71023--- a/include/scsi/scsi_device.h
71024+++ b/include/scsi/scsi_device.h
71025@@ -156,9 +156,9 @@ struct scsi_device {
71026 unsigned int max_device_blocked; /* what device_blocked counts down from */
71027 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
71028
71029- atomic_t iorequest_cnt;
71030- atomic_t iodone_cnt;
71031- atomic_t ioerr_cnt;
71032+ atomic_unchecked_t iorequest_cnt;
71033+ atomic_unchecked_t iodone_cnt;
71034+ atomic_unchecked_t ioerr_cnt;
71035
71036 struct device sdev_gendev,
71037 sdev_dev;
71038diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
71039index fc50bd6..81ba9cb 100644
71040--- a/include/scsi/scsi_transport_fc.h
71041+++ b/include/scsi/scsi_transport_fc.h
71042@@ -708,7 +708,7 @@ struct fc_function_template {
71043 unsigned long show_host_system_hostname:1;
71044
71045 unsigned long disable_target_scan:1;
71046-};
71047+} __do_const;
71048
71049
71050 /**
71051diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
71052index 3dae3f7..8440d6f 100644
71053--- a/include/sound/ac97_codec.h
71054+++ b/include/sound/ac97_codec.h
71055@@ -419,15 +419,15 @@
71056 struct snd_ac97;
71057
71058 struct snd_ac97_build_ops {
71059- int (*build_3d) (struct snd_ac97 *ac97);
71060- int (*build_specific) (struct snd_ac97 *ac97);
71061- int (*build_spdif) (struct snd_ac97 *ac97);
71062- int (*build_post_spdif) (struct snd_ac97 *ac97);
71063+ int (* const build_3d) (struct snd_ac97 *ac97);
71064+ int (* const build_specific) (struct snd_ac97 *ac97);
71065+ int (* const build_spdif) (struct snd_ac97 *ac97);
71066+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
71067 #ifdef CONFIG_PM
71068- void (*suspend) (struct snd_ac97 *ac97);
71069- void (*resume) (struct snd_ac97 *ac97);
71070+ void (* const suspend) (struct snd_ac97 *ac97);
71071+ void (* const resume) (struct snd_ac97 *ac97);
71072 #endif
71073- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
71074+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
71075 };
71076
71077 struct snd_ac97_bus_ops {
71078@@ -477,7 +477,7 @@ struct snd_ac97_template {
71079
71080 struct snd_ac97 {
71081 /* -- lowlevel (hardware) driver specific -- */
71082- struct snd_ac97_build_ops * build_ops;
71083+ const struct snd_ac97_build_ops * build_ops;
71084 void *private_data;
71085 void (*private_free) (struct snd_ac97 *ac97);
71086 /* --- */
71087diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
71088index 891cf1a..a94ba2b 100644
71089--- a/include/sound/ak4xxx-adda.h
71090+++ b/include/sound/ak4xxx-adda.h
71091@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
71092 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
71093 unsigned char val);
71094 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
71095-};
71096+} __no_const;
71097
71098 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
71099
71100diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
71101index 8c05e47..2b5df97 100644
71102--- a/include/sound/hwdep.h
71103+++ b/include/sound/hwdep.h
71104@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
71105 struct snd_hwdep_dsp_status *status);
71106 int (*dsp_load)(struct snd_hwdep *hw,
71107 struct snd_hwdep_dsp_image *image);
71108-};
71109+} __no_const;
71110
71111 struct snd_hwdep {
71112 struct snd_card *card;
71113diff --git a/include/sound/info.h b/include/sound/info.h
71114index 112e894..6fda5b5 100644
71115--- a/include/sound/info.h
71116+++ b/include/sound/info.h
71117@@ -44,7 +44,7 @@ struct snd_info_entry_text {
71118 struct snd_info_buffer *buffer);
71119 void (*write)(struct snd_info_entry *entry,
71120 struct snd_info_buffer *buffer);
71121-};
71122+} __no_const;
71123
71124 struct snd_info_entry_ops {
71125 int (*open)(struct snd_info_entry *entry,
71126diff --git a/include/sound/pcm.h b/include/sound/pcm.h
71127index de6d981..590a550 100644
71128--- a/include/sound/pcm.h
71129+++ b/include/sound/pcm.h
71130@@ -80,6 +80,7 @@ struct snd_pcm_ops {
71131 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
71132 int (*ack)(struct snd_pcm_substream *substream);
71133 };
71134+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
71135
71136 /*
71137 *
71138diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
71139index 736eac7..fe8a80f 100644
71140--- a/include/sound/sb16_csp.h
71141+++ b/include/sound/sb16_csp.h
71142@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
71143 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
71144 int (*csp_stop) (struct snd_sb_csp * p);
71145 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
71146-};
71147+} __no_const;
71148
71149 /*
71150 * CSP private data
71151diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
71152index 444cd6b..3327cc5 100644
71153--- a/include/sound/ymfpci.h
71154+++ b/include/sound/ymfpci.h
71155@@ -358,7 +358,7 @@ struct snd_ymfpci {
71156 spinlock_t reg_lock;
71157 spinlock_t voice_lock;
71158 wait_queue_head_t interrupt_sleep;
71159- atomic_t interrupt_sleep_count;
71160+ atomic_unchecked_t interrupt_sleep_count;
71161 struct snd_info_entry *proc_entry;
71162 const struct firmware *dsp_microcode;
71163 const struct firmware *controller_microcode;
71164diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
71165index b89f9db..f097b38 100644
71166--- a/include/trace/events/irq.h
71167+++ b/include/trace/events/irq.h
71168@@ -34,7 +34,7 @@
71169 */
71170 TRACE_EVENT(irq_handler_entry,
71171
71172- TP_PROTO(int irq, struct irqaction *action),
71173+ TP_PROTO(int irq, const struct irqaction *action),
71174
71175 TP_ARGS(irq, action),
71176
71177@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
71178 */
71179 TRACE_EVENT(irq_handler_exit,
71180
71181- TP_PROTO(int irq, struct irqaction *action, int ret),
71182+ TP_PROTO(int irq, const struct irqaction *action, int ret),
71183
71184 TP_ARGS(irq, action, ret),
71185
71186@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
71187 */
71188 TRACE_EVENT(softirq_entry,
71189
71190- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
71191+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
71192
71193 TP_ARGS(h, vec),
71194
71195@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
71196 */
71197 TRACE_EVENT(softirq_exit,
71198
71199- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
71200+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
71201
71202 TP_ARGS(h, vec),
71203
71204diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
71205index 0993a22..32ba2fe 100644
71206--- a/include/video/uvesafb.h
71207+++ b/include/video/uvesafb.h
71208@@ -177,6 +177,7 @@ struct uvesafb_par {
71209 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
71210 u8 pmi_setpal; /* PMI for palette changes */
71211 u16 *pmi_base; /* protected mode interface location */
71212+ u8 *pmi_code; /* protected mode code location */
71213 void *pmi_start;
71214 void *pmi_pal;
71215 u8 *vbe_state_orig; /*
71216diff --git a/init/Kconfig b/init/Kconfig
71217index d72691b..3996e54 100644
71218--- a/init/Kconfig
71219+++ b/init/Kconfig
71220@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
71221
71222 config COMPAT_BRK
71223 bool "Disable heap randomization"
71224- default y
71225+ default n
71226 help
71227 Randomizing heap placement makes heap exploits harder, but it
71228 also breaks ancient binaries (including anything libc5 based).
71229diff --git a/init/do_mounts.c b/init/do_mounts.c
71230index bb008d0..4fa3933 100644
71231--- a/init/do_mounts.c
71232+++ b/init/do_mounts.c
71233@@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
71234
71235 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
71236 {
71237- int err = sys_mount(name, "/root", fs, flags, data);
71238+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
71239 if (err)
71240 return err;
71241
71242- sys_chdir("/root");
71243+ sys_chdir((__force const char __user *)"/root");
71244 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
71245 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
71246 current->fs->pwd.mnt->mnt_sb->s_type->name,
71247@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
71248 va_start(args, fmt);
71249 vsprintf(buf, fmt, args);
71250 va_end(args);
71251- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
71252+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
71253 if (fd >= 0) {
71254 sys_ioctl(fd, FDEJECT, 0);
71255 sys_close(fd);
71256 }
71257 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
71258- fd = sys_open("/dev/console", O_RDWR, 0);
71259+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
71260 if (fd >= 0) {
71261 sys_ioctl(fd, TCGETS, (long)&termios);
71262 termios.c_lflag &= ~ICANON;
71263 sys_ioctl(fd, TCSETSF, (long)&termios);
71264- sys_read(fd, &c, 1);
71265+ sys_read(fd, (char __user *)&c, 1);
71266 termios.c_lflag |= ICANON;
71267 sys_ioctl(fd, TCSETSF, (long)&termios);
71268 sys_close(fd);
71269@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
71270 mount_root();
71271 out:
71272 devtmpfs_mount("dev");
71273- sys_mount(".", "/", NULL, MS_MOVE, NULL);
71274- sys_chroot(".");
71275+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
71276+ sys_chroot((__force char __user *)".");
71277 }
71278diff --git a/init/do_mounts.h b/init/do_mounts.h
71279index f5b978a..69dbfe8 100644
71280--- a/init/do_mounts.h
71281+++ b/init/do_mounts.h
71282@@ -15,15 +15,15 @@ extern int root_mountflags;
71283
71284 static inline int create_dev(char *name, dev_t dev)
71285 {
71286- sys_unlink(name);
71287- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
71288+ sys_unlink((char __force_user *)name);
71289+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
71290 }
71291
71292 #if BITS_PER_LONG == 32
71293 static inline u32 bstat(char *name)
71294 {
71295 struct stat64 stat;
71296- if (sys_stat64(name, &stat) != 0)
71297+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
71298 return 0;
71299 if (!S_ISBLK(stat.st_mode))
71300 return 0;
71301@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
71302 static inline u32 bstat(char *name)
71303 {
71304 struct stat stat;
71305- if (sys_newstat(name, &stat) != 0)
71306+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
71307 return 0;
71308 if (!S_ISBLK(stat.st_mode))
71309 return 0;
71310diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
71311index 614241b..4da046b 100644
71312--- a/init/do_mounts_initrd.c
71313+++ b/init/do_mounts_initrd.c
71314@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
71315 sys_close(old_fd);sys_close(root_fd);
71316 sys_close(0);sys_close(1);sys_close(2);
71317 sys_setsid();
71318- (void) sys_open("/dev/console",O_RDWR,0);
71319+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
71320 (void) sys_dup(0);
71321 (void) sys_dup(0);
71322 return kernel_execve(shell, argv, envp_init);
71323@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
71324 create_dev("/dev/root.old", Root_RAM0);
71325 /* mount initrd on rootfs' /root */
71326 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
71327- sys_mkdir("/old", 0700);
71328- root_fd = sys_open("/", 0, 0);
71329- old_fd = sys_open("/old", 0, 0);
71330+ sys_mkdir((const char __force_user *)"/old", 0700);
71331+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
71332+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
71333 /* move initrd over / and chdir/chroot in initrd root */
71334- sys_chdir("/root");
71335- sys_mount(".", "/", NULL, MS_MOVE, NULL);
71336- sys_chroot(".");
71337+ sys_chdir((const char __force_user *)"/root");
71338+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
71339+ sys_chroot((const char __force_user *)".");
71340
71341 /*
71342 * In case that a resume from disk is carried out by linuxrc or one of
71343@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
71344
71345 /* move initrd to rootfs' /old */
71346 sys_fchdir(old_fd);
71347- sys_mount("/", ".", NULL, MS_MOVE, NULL);
71348+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
71349 /* switch root and cwd back to / of rootfs */
71350 sys_fchdir(root_fd);
71351- sys_chroot(".");
71352+ sys_chroot((const char __force_user *)".");
71353 sys_close(old_fd);
71354 sys_close(root_fd);
71355
71356 if (new_decode_dev(real_root_dev) == Root_RAM0) {
71357- sys_chdir("/old");
71358+ sys_chdir((const char __force_user *)"/old");
71359 return;
71360 }
71361
71362@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
71363 mount_root();
71364
71365 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
71366- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
71367+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
71368 if (!error)
71369 printk("okay\n");
71370 else {
71371- int fd = sys_open("/dev/root.old", O_RDWR, 0);
71372+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
71373 if (error == -ENOENT)
71374 printk("/initrd does not exist. Ignored.\n");
71375 else
71376 printk("failed\n");
71377 printk(KERN_NOTICE "Unmounting old root\n");
71378- sys_umount("/old", MNT_DETACH);
71379+ sys_umount((char __force_user *)"/old", MNT_DETACH);
71380 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
71381 if (fd < 0) {
71382 error = fd;
71383@@ -119,11 +119,11 @@ int __init initrd_load(void)
71384 * mounted in the normal path.
71385 */
71386 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
71387- sys_unlink("/initrd.image");
71388+ sys_unlink((const char __force_user *)"/initrd.image");
71389 handle_initrd();
71390 return 1;
71391 }
71392 }
71393- sys_unlink("/initrd.image");
71394+ sys_unlink((const char __force_user *)"/initrd.image");
71395 return 0;
71396 }
71397diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
71398index 69aebbf..c0bf6a7 100644
71399--- a/init/do_mounts_md.c
71400+++ b/init/do_mounts_md.c
71401@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
71402 partitioned ? "_d" : "", minor,
71403 md_setup_args[ent].device_names);
71404
71405- fd = sys_open(name, 0, 0);
71406+ fd = sys_open((char __force_user *)name, 0, 0);
71407 if (fd < 0) {
71408 printk(KERN_ERR "md: open failed - cannot start "
71409 "array %s\n", name);
71410@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
71411 * array without it
71412 */
71413 sys_close(fd);
71414- fd = sys_open(name, 0, 0);
71415+ fd = sys_open((char __force_user *)name, 0, 0);
71416 sys_ioctl(fd, BLKRRPART, 0);
71417 }
71418 sys_close(fd);
71419@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
71420
71421 wait_for_device_probe();
71422
71423- fd = sys_open("/dev/md0", 0, 0);
71424+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
71425 if (fd >= 0) {
71426 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
71427 sys_close(fd);
71428diff --git a/init/initramfs.c b/init/initramfs.c
71429index 1fd59b8..a01b079 100644
71430--- a/init/initramfs.c
71431+++ b/init/initramfs.c
71432@@ -74,7 +74,7 @@ static void __init free_hash(void)
71433 }
71434 }
71435
71436-static long __init do_utime(char __user *filename, time_t mtime)
71437+static long __init do_utime(__force char __user *filename, time_t mtime)
71438 {
71439 struct timespec t[2];
71440
71441@@ -109,7 +109,7 @@ static void __init dir_utime(void)
71442 struct dir_entry *de, *tmp;
71443 list_for_each_entry_safe(de, tmp, &dir_list, list) {
71444 list_del(&de->list);
71445- do_utime(de->name, de->mtime);
71446+ do_utime((char __force_user *)de->name, de->mtime);
71447 kfree(de->name);
71448 kfree(de);
71449 }
71450@@ -271,7 +271,7 @@ static int __init maybe_link(void)
71451 if (nlink >= 2) {
71452 char *old = find_link(major, minor, ino, mode, collected);
71453 if (old)
71454- return (sys_link(old, collected) < 0) ? -1 : 1;
71455+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
71456 }
71457 return 0;
71458 }
71459@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
71460 {
71461 struct stat st;
71462
71463- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
71464+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
71465 if (S_ISDIR(st.st_mode))
71466- sys_rmdir(path);
71467+ sys_rmdir((char __force_user *)path);
71468 else
71469- sys_unlink(path);
71470+ sys_unlink((char __force_user *)path);
71471 }
71472 }
71473
71474@@ -305,7 +305,7 @@ static int __init do_name(void)
71475 int openflags = O_WRONLY|O_CREAT;
71476 if (ml != 1)
71477 openflags |= O_TRUNC;
71478- wfd = sys_open(collected, openflags, mode);
71479+ wfd = sys_open((char __force_user *)collected, openflags, mode);
71480
71481 if (wfd >= 0) {
71482 sys_fchown(wfd, uid, gid);
71483@@ -317,17 +317,17 @@ static int __init do_name(void)
71484 }
71485 }
71486 } else if (S_ISDIR(mode)) {
71487- sys_mkdir(collected, mode);
71488- sys_chown(collected, uid, gid);
71489- sys_chmod(collected, mode);
71490+ sys_mkdir((char __force_user *)collected, mode);
71491+ sys_chown((char __force_user *)collected, uid, gid);
71492+ sys_chmod((char __force_user *)collected, mode);
71493 dir_add(collected, mtime);
71494 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
71495 S_ISFIFO(mode) || S_ISSOCK(mode)) {
71496 if (maybe_link() == 0) {
71497- sys_mknod(collected, mode, rdev);
71498- sys_chown(collected, uid, gid);
71499- sys_chmod(collected, mode);
71500- do_utime(collected, mtime);
71501+ sys_mknod((char __force_user *)collected, mode, rdev);
71502+ sys_chown((char __force_user *)collected, uid, gid);
71503+ sys_chmod((char __force_user *)collected, mode);
71504+ do_utime((char __force_user *)collected, mtime);
71505 }
71506 }
71507 return 0;
71508@@ -336,15 +336,15 @@ static int __init do_name(void)
71509 static int __init do_copy(void)
71510 {
71511 if (count >= body_len) {
71512- sys_write(wfd, victim, body_len);
71513+ sys_write(wfd, (char __force_user *)victim, body_len);
71514 sys_close(wfd);
71515- do_utime(vcollected, mtime);
71516+ do_utime((char __force_user *)vcollected, mtime);
71517 kfree(vcollected);
71518 eat(body_len);
71519 state = SkipIt;
71520 return 0;
71521 } else {
71522- sys_write(wfd, victim, count);
71523+ sys_write(wfd, (char __force_user *)victim, count);
71524 body_len -= count;
71525 eat(count);
71526 return 1;
71527@@ -355,9 +355,9 @@ static int __init do_symlink(void)
71528 {
71529 collected[N_ALIGN(name_len) + body_len] = '\0';
71530 clean_path(collected, 0);
71531- sys_symlink(collected + N_ALIGN(name_len), collected);
71532- sys_lchown(collected, uid, gid);
71533- do_utime(collected, mtime);
71534+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
71535+ sys_lchown((char __force_user *)collected, uid, gid);
71536+ do_utime((char __force_user *)collected, mtime);
71537 state = SkipIt;
71538 next_state = Reset;
71539 return 0;
71540diff --git a/init/main.c b/init/main.c
71541index 1eb4bd5..fea5bbe 100644
71542--- a/init/main.c
71543+++ b/init/main.c
71544@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
71545 #ifdef CONFIG_TC
71546 extern void tc_init(void);
71547 #endif
71548+extern void grsecurity_init(void);
71549
71550 enum system_states system_state __read_mostly;
71551 EXPORT_SYMBOL(system_state);
71552@@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
71553
71554 __setup("reset_devices", set_reset_devices);
71555
71556+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
71557+extern char pax_enter_kernel_user[];
71558+extern char pax_exit_kernel_user[];
71559+extern pgdval_t clone_pgd_mask;
71560+#endif
71561+
71562+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
71563+static int __init setup_pax_nouderef(char *str)
71564+{
71565+#ifdef CONFIG_X86_32
71566+ unsigned int cpu;
71567+ struct desc_struct *gdt;
71568+
71569+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
71570+ gdt = get_cpu_gdt_table(cpu);
71571+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
71572+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
71573+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
71574+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
71575+ }
71576+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
71577+#else
71578+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
71579+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
71580+ clone_pgd_mask = ~(pgdval_t)0UL;
71581+#endif
71582+
71583+ return 0;
71584+}
71585+early_param("pax_nouderef", setup_pax_nouderef);
71586+#endif
71587+
71588+#ifdef CONFIG_PAX_SOFTMODE
71589+int pax_softmode;
71590+
71591+static int __init setup_pax_softmode(char *str)
71592+{
71593+ get_option(&str, &pax_softmode);
71594+ return 1;
71595+}
71596+__setup("pax_softmode=", setup_pax_softmode);
71597+#endif
71598+
71599 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
71600 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
71601 static const char *panic_later, *panic_param;
71602@@ -705,52 +749,53 @@ int initcall_debug;
71603 core_param(initcall_debug, initcall_debug, bool, 0644);
71604
71605 static char msgbuf[64];
71606-static struct boot_trace_call call;
71607-static struct boot_trace_ret ret;
71608+static struct boot_trace_call trace_call;
71609+static struct boot_trace_ret trace_ret;
71610
71611 int do_one_initcall(initcall_t fn)
71612 {
71613 int count = preempt_count();
71614 ktime_t calltime, delta, rettime;
71615+ const char *msg1 = "", *msg2 = "";
71616
71617 if (initcall_debug) {
71618- call.caller = task_pid_nr(current);
71619- printk("calling %pF @ %i\n", fn, call.caller);
71620+ trace_call.caller = task_pid_nr(current);
71621+ printk("calling %pF @ %i\n", fn, trace_call.caller);
71622 calltime = ktime_get();
71623- trace_boot_call(&call, fn);
71624+ trace_boot_call(&trace_call, fn);
71625 enable_boot_trace();
71626 }
71627
71628- ret.result = fn();
71629+ trace_ret.result = fn();
71630
71631 if (initcall_debug) {
71632 disable_boot_trace();
71633 rettime = ktime_get();
71634 delta = ktime_sub(rettime, calltime);
71635- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71636- trace_boot_ret(&ret, fn);
71637+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71638+ trace_boot_ret(&trace_ret, fn);
71639 printk("initcall %pF returned %d after %Ld usecs\n", fn,
71640- ret.result, ret.duration);
71641+ trace_ret.result, trace_ret.duration);
71642 }
71643
71644 msgbuf[0] = 0;
71645
71646- if (ret.result && ret.result != -ENODEV && initcall_debug)
71647- sprintf(msgbuf, "error code %d ", ret.result);
71648+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
71649+ sprintf(msgbuf, "error code %d ", trace_ret.result);
71650
71651 if (preempt_count() != count) {
71652- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
71653+ msg1 = " preemption imbalance";
71654 preempt_count() = count;
71655 }
71656 if (irqs_disabled()) {
71657- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
71658+ msg2 = " disabled interrupts";
71659 local_irq_enable();
71660 }
71661- if (msgbuf[0]) {
71662- printk("initcall %pF returned with %s\n", fn, msgbuf);
71663+ if (msgbuf[0] || *msg1 || *msg2) {
71664+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
71665 }
71666
71667- return ret.result;
71668+ return trace_ret.result;
71669 }
71670
71671
71672@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
71673 if (!ramdisk_execute_command)
71674 ramdisk_execute_command = "/init";
71675
71676- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
71677+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
71678 ramdisk_execute_command = NULL;
71679 prepare_namespace();
71680 }
71681
71682+ grsecurity_init();
71683+
71684 /*
71685 * Ok, we have completed the initial bootup, and
71686 * we're essentially up and running. Get rid of the
71687diff --git a/init/noinitramfs.c b/init/noinitramfs.c
71688index f4c1a3a..96c19bd 100644
71689--- a/init/noinitramfs.c
71690+++ b/init/noinitramfs.c
71691@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
71692 {
71693 int err;
71694
71695- err = sys_mkdir("/dev", 0755);
71696+ err = sys_mkdir((const char __user *)"/dev", 0755);
71697 if (err < 0)
71698 goto out;
71699
71700@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
71701 if (err < 0)
71702 goto out;
71703
71704- err = sys_mkdir("/root", 0700);
71705+ err = sys_mkdir((const char __user *)"/root", 0700);
71706 if (err < 0)
71707 goto out;
71708
71709diff --git a/ipc/mqueue.c b/ipc/mqueue.c
71710index d01bc14..8df81db 100644
71711--- a/ipc/mqueue.c
71712+++ b/ipc/mqueue.c
71713@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
71714 mq_bytes = (mq_msg_tblsz +
71715 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
71716
71717+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
71718 spin_lock(&mq_lock);
71719 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
71720 u->mq_bytes + mq_bytes >
71721diff --git a/ipc/msg.c b/ipc/msg.c
71722index 779f762..4af9e36 100644
71723--- a/ipc/msg.c
71724+++ b/ipc/msg.c
71725@@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
71726 return security_msg_queue_associate(msq, msgflg);
71727 }
71728
71729+static struct ipc_ops msg_ops = {
71730+ .getnew = newque,
71731+ .associate = msg_security,
71732+ .more_checks = NULL
71733+};
71734+
71735 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
71736 {
71737 struct ipc_namespace *ns;
71738- struct ipc_ops msg_ops;
71739 struct ipc_params msg_params;
71740
71741 ns = current->nsproxy->ipc_ns;
71742
71743- msg_ops.getnew = newque;
71744- msg_ops.associate = msg_security;
71745- msg_ops.more_checks = NULL;
71746-
71747 msg_params.key = key;
71748 msg_params.flg = msgflg;
71749
71750diff --git a/ipc/sem.c b/ipc/sem.c
71751index b781007..f738b04 100644
71752--- a/ipc/sem.c
71753+++ b/ipc/sem.c
71754@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
71755 return 0;
71756 }
71757
71758+static struct ipc_ops sem_ops = {
71759+ .getnew = newary,
71760+ .associate = sem_security,
71761+ .more_checks = sem_more_checks
71762+};
71763+
71764 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71765 {
71766 struct ipc_namespace *ns;
71767- struct ipc_ops sem_ops;
71768 struct ipc_params sem_params;
71769
71770 ns = current->nsproxy->ipc_ns;
71771@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71772 if (nsems < 0 || nsems > ns->sc_semmsl)
71773 return -EINVAL;
71774
71775- sem_ops.getnew = newary;
71776- sem_ops.associate = sem_security;
71777- sem_ops.more_checks = sem_more_checks;
71778-
71779 sem_params.key = key;
71780 sem_params.flg = semflg;
71781 sem_params.u.nsems = nsems;
71782@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
71783 ushort* sem_io = fast_sem_io;
71784 int nsems;
71785
71786+ pax_track_stack();
71787+
71788 sma = sem_lock_check(ns, semid);
71789 if (IS_ERR(sma))
71790 return PTR_ERR(sma);
71791@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
71792 unsigned long jiffies_left = 0;
71793 struct ipc_namespace *ns;
71794
71795+ pax_track_stack();
71796+
71797 ns = current->nsproxy->ipc_ns;
71798
71799 if (nsops < 1 || semid < 0)
71800diff --git a/ipc/shm.c b/ipc/shm.c
71801index d30732c..e4992cd 100644
71802--- a/ipc/shm.c
71803+++ b/ipc/shm.c
71804@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
71805 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
71806 #endif
71807
71808+#ifdef CONFIG_GRKERNSEC
71809+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71810+ const time_t shm_createtime, const uid_t cuid,
71811+ const int shmid);
71812+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71813+ const time_t shm_createtime);
71814+#endif
71815+
71816 void shm_init_ns(struct ipc_namespace *ns)
71817 {
71818 ns->shm_ctlmax = SHMMAX;
71819@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
71820 shp->shm_lprid = 0;
71821 shp->shm_atim = shp->shm_dtim = 0;
71822 shp->shm_ctim = get_seconds();
71823+#ifdef CONFIG_GRKERNSEC
71824+ {
71825+ struct timespec timeval;
71826+ do_posix_clock_monotonic_gettime(&timeval);
71827+
71828+ shp->shm_createtime = timeval.tv_sec;
71829+ }
71830+#endif
71831 shp->shm_segsz = size;
71832 shp->shm_nattch = 0;
71833 shp->shm_file = file;
71834@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
71835 return 0;
71836 }
71837
71838+static struct ipc_ops shm_ops = {
71839+ .getnew = newseg,
71840+ .associate = shm_security,
71841+ .more_checks = shm_more_checks
71842+};
71843+
71844 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
71845 {
71846 struct ipc_namespace *ns;
71847- struct ipc_ops shm_ops;
71848 struct ipc_params shm_params;
71849
71850 ns = current->nsproxy->ipc_ns;
71851
71852- shm_ops.getnew = newseg;
71853- shm_ops.associate = shm_security;
71854- shm_ops.more_checks = shm_more_checks;
71855-
71856 shm_params.key = key;
71857 shm_params.flg = shmflg;
71858 shm_params.u.size = size;
71859@@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71860 f_mode = FMODE_READ | FMODE_WRITE;
71861 }
71862 if (shmflg & SHM_EXEC) {
71863+
71864+#ifdef CONFIG_PAX_MPROTECT
71865+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
71866+ goto out;
71867+#endif
71868+
71869 prot |= PROT_EXEC;
71870 acc_mode |= S_IXUGO;
71871 }
71872@@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71873 if (err)
71874 goto out_unlock;
71875
71876+#ifdef CONFIG_GRKERNSEC
71877+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
71878+ shp->shm_perm.cuid, shmid) ||
71879+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
71880+ err = -EACCES;
71881+ goto out_unlock;
71882+ }
71883+#endif
71884+
71885 path.dentry = dget(shp->shm_file->f_path.dentry);
71886 path.mnt = shp->shm_file->f_path.mnt;
71887 shp->shm_nattch++;
71888+#ifdef CONFIG_GRKERNSEC
71889+ shp->shm_lapid = current->pid;
71890+#endif
71891 size = i_size_read(path.dentry->d_inode);
71892 shm_unlock(shp);
71893
71894diff --git a/kernel/acct.c b/kernel/acct.c
71895index a6605ca..ca91111 100644
71896--- a/kernel/acct.c
71897+++ b/kernel/acct.c
71898@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
71899 */
71900 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
71901 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
71902- file->f_op->write(file, (char *)&ac,
71903+ file->f_op->write(file, (char __force_user *)&ac,
71904 sizeof(acct_t), &file->f_pos);
71905 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
71906 set_fs(fs);
71907diff --git a/kernel/audit.c b/kernel/audit.c
71908index 5feed23..48415fd 100644
71909--- a/kernel/audit.c
71910+++ b/kernel/audit.c
71911@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
71912 3) suppressed due to audit_rate_limit
71913 4) suppressed due to audit_backlog_limit
71914 */
71915-static atomic_t audit_lost = ATOMIC_INIT(0);
71916+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
71917
71918 /* The netlink socket. */
71919 static struct sock *audit_sock;
71920@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
71921 unsigned long now;
71922 int print;
71923
71924- atomic_inc(&audit_lost);
71925+ atomic_inc_unchecked(&audit_lost);
71926
71927 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
71928
71929@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
71930 printk(KERN_WARNING
71931 "audit: audit_lost=%d audit_rate_limit=%d "
71932 "audit_backlog_limit=%d\n",
71933- atomic_read(&audit_lost),
71934+ atomic_read_unchecked(&audit_lost),
71935 audit_rate_limit,
71936 audit_backlog_limit);
71937 audit_panic(message);
71938@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71939 status_set.pid = audit_pid;
71940 status_set.rate_limit = audit_rate_limit;
71941 status_set.backlog_limit = audit_backlog_limit;
71942- status_set.lost = atomic_read(&audit_lost);
71943+ status_set.lost = atomic_read_unchecked(&audit_lost);
71944 status_set.backlog = skb_queue_len(&audit_skb_queue);
71945 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
71946 &status_set, sizeof(status_set));
71947@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71948 spin_unlock_irq(&tsk->sighand->siglock);
71949 }
71950 read_unlock(&tasklist_lock);
71951- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
71952- &s, sizeof(s));
71953+
71954+ if (!err)
71955+ audit_send_reply(NETLINK_CB(skb).pid, seq,
71956+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
71957 break;
71958 }
71959 case AUDIT_TTY_SET: {
71960@@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
71961 avail = audit_expand(ab,
71962 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
71963 if (!avail)
71964- goto out;
71965+ goto out_va_end;
71966 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
71967 }
71968- va_end(args2);
71969 if (len > 0)
71970 skb_put(skb, len);
71971+out_va_end:
71972+ va_end(args2);
71973 out:
71974 return;
71975 }
71976diff --git a/kernel/auditsc.c b/kernel/auditsc.c
71977index 267e484..ac41bc3 100644
71978--- a/kernel/auditsc.c
71979+++ b/kernel/auditsc.c
71980@@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
71981 struct audit_buffer **ab,
71982 struct audit_aux_data_execve *axi)
71983 {
71984- int i;
71985- size_t len, len_sent = 0;
71986+ int i, len;
71987+ size_t len_sent = 0;
71988 const char __user *p;
71989 char *buf;
71990
71991@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
71992 }
71993
71994 /* global counter which is incremented every time something logs in */
71995-static atomic_t session_id = ATOMIC_INIT(0);
71996+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
71997
71998 /**
71999 * audit_set_loginuid - set a task's audit_context loginuid
72000@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
72001 */
72002 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
72003 {
72004- unsigned int sessionid = atomic_inc_return(&session_id);
72005+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
72006 struct audit_context *context = task->audit_context;
72007
72008 if (context && context->in_syscall) {
72009diff --git a/kernel/capability.c b/kernel/capability.c
72010index 8a944f5..db5001e 100644
72011--- a/kernel/capability.c
72012+++ b/kernel/capability.c
72013@@ -305,10 +305,26 @@ int capable(int cap)
72014 BUG();
72015 }
72016
72017- if (security_capable(cap) == 0) {
72018+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
72019 current->flags |= PF_SUPERPRIV;
72020 return 1;
72021 }
72022 return 0;
72023 }
72024+
72025+int capable_nolog(int cap)
72026+{
72027+ if (unlikely(!cap_valid(cap))) {
72028+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
72029+ BUG();
72030+ }
72031+
72032+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
72033+ current->flags |= PF_SUPERPRIV;
72034+ return 1;
72035+ }
72036+ return 0;
72037+}
72038+
72039 EXPORT_SYMBOL(capable);
72040+EXPORT_SYMBOL(capable_nolog);
72041diff --git a/kernel/cgroup.c b/kernel/cgroup.c
72042index 1fbcc74..7000012 100644
72043--- a/kernel/cgroup.c
72044+++ b/kernel/cgroup.c
72045@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
72046 struct hlist_head *hhead;
72047 struct cg_cgroup_link *link;
72048
72049+ pax_track_stack();
72050+
72051 /* First see if we already have a cgroup group that matches
72052 * the desired set */
72053 read_lock(&css_set_lock);
72054diff --git a/kernel/compat.c b/kernel/compat.c
72055index 8bc5578..186e44a 100644
72056--- a/kernel/compat.c
72057+++ b/kernel/compat.c
72058@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
72059 mm_segment_t oldfs;
72060 long ret;
72061
72062- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
72063+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
72064 oldfs = get_fs();
72065 set_fs(KERNEL_DS);
72066 ret = hrtimer_nanosleep_restart(restart);
72067@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
72068 oldfs = get_fs();
72069 set_fs(KERNEL_DS);
72070 ret = hrtimer_nanosleep(&tu,
72071- rmtp ? (struct timespec __user *)&rmt : NULL,
72072+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
72073 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
72074 set_fs(oldfs);
72075
72076@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
72077 mm_segment_t old_fs = get_fs();
72078
72079 set_fs(KERNEL_DS);
72080- ret = sys_sigpending((old_sigset_t __user *) &s);
72081+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
72082 set_fs(old_fs);
72083 if (ret == 0)
72084 ret = put_user(s, set);
72085@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
72086 old_fs = get_fs();
72087 set_fs(KERNEL_DS);
72088 ret = sys_sigprocmask(how,
72089- set ? (old_sigset_t __user *) &s : NULL,
72090- oset ? (old_sigset_t __user *) &s : NULL);
72091+ set ? (old_sigset_t __force_user *) &s : NULL,
72092+ oset ? (old_sigset_t __force_user *) &s : NULL);
72093 set_fs(old_fs);
72094 if (ret == 0)
72095 if (oset)
72096@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
72097 mm_segment_t old_fs = get_fs();
72098
72099 set_fs(KERNEL_DS);
72100- ret = sys_old_getrlimit(resource, &r);
72101+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
72102 set_fs(old_fs);
72103
72104 if (!ret) {
72105@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
72106 mm_segment_t old_fs = get_fs();
72107
72108 set_fs(KERNEL_DS);
72109- ret = sys_getrusage(who, (struct rusage __user *) &r);
72110+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
72111 set_fs(old_fs);
72112
72113 if (ret)
72114@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
72115 set_fs (KERNEL_DS);
72116 ret = sys_wait4(pid,
72117 (stat_addr ?
72118- (unsigned int __user *) &status : NULL),
72119- options, (struct rusage __user *) &r);
72120+ (unsigned int __force_user *) &status : NULL),
72121+ options, (struct rusage __force_user *) &r);
72122 set_fs (old_fs);
72123
72124 if (ret > 0) {
72125@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
72126 memset(&info, 0, sizeof(info));
72127
72128 set_fs(KERNEL_DS);
72129- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
72130- uru ? (struct rusage __user *)&ru : NULL);
72131+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
72132+ uru ? (struct rusage __force_user *)&ru : NULL);
72133 set_fs(old_fs);
72134
72135 if ((ret < 0) || (info.si_signo == 0))
72136@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
72137 oldfs = get_fs();
72138 set_fs(KERNEL_DS);
72139 err = sys_timer_settime(timer_id, flags,
72140- (struct itimerspec __user *) &newts,
72141- (struct itimerspec __user *) &oldts);
72142+ (struct itimerspec __force_user *) &newts,
72143+ (struct itimerspec __force_user *) &oldts);
72144 set_fs(oldfs);
72145 if (!err && old && put_compat_itimerspec(old, &oldts))
72146 return -EFAULT;
72147@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
72148 oldfs = get_fs();
72149 set_fs(KERNEL_DS);
72150 err = sys_timer_gettime(timer_id,
72151- (struct itimerspec __user *) &ts);
72152+ (struct itimerspec __force_user *) &ts);
72153 set_fs(oldfs);
72154 if (!err && put_compat_itimerspec(setting, &ts))
72155 return -EFAULT;
72156@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
72157 oldfs = get_fs();
72158 set_fs(KERNEL_DS);
72159 err = sys_clock_settime(which_clock,
72160- (struct timespec __user *) &ts);
72161+ (struct timespec __force_user *) &ts);
72162 set_fs(oldfs);
72163 return err;
72164 }
72165@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
72166 oldfs = get_fs();
72167 set_fs(KERNEL_DS);
72168 err = sys_clock_gettime(which_clock,
72169- (struct timespec __user *) &ts);
72170+ (struct timespec __force_user *) &ts);
72171 set_fs(oldfs);
72172 if (!err && put_compat_timespec(&ts, tp))
72173 return -EFAULT;
72174@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
72175 oldfs = get_fs();
72176 set_fs(KERNEL_DS);
72177 err = sys_clock_getres(which_clock,
72178- (struct timespec __user *) &ts);
72179+ (struct timespec __force_user *) &ts);
72180 set_fs(oldfs);
72181 if (!err && tp && put_compat_timespec(&ts, tp))
72182 return -EFAULT;
72183@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
72184 long err;
72185 mm_segment_t oldfs;
72186 struct timespec tu;
72187- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
72188+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
72189
72190- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
72191+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
72192 oldfs = get_fs();
72193 set_fs(KERNEL_DS);
72194 err = clock_nanosleep_restart(restart);
72195@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
72196 oldfs = get_fs();
72197 set_fs(KERNEL_DS);
72198 err = sys_clock_nanosleep(which_clock, flags,
72199- (struct timespec __user *) &in,
72200- (struct timespec __user *) &out);
72201+ (struct timespec __force_user *) &in,
72202+ (struct timespec __force_user *) &out);
72203 set_fs(oldfs);
72204
72205 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
72206diff --git a/kernel/configs.c b/kernel/configs.c
72207index abaee68..047facd 100644
72208--- a/kernel/configs.c
72209+++ b/kernel/configs.c
72210@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
72211 struct proc_dir_entry *entry;
72212
72213 /* create the current config file */
72214+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
72215+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
72216+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
72217+ &ikconfig_file_ops);
72218+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72219+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
72220+ &ikconfig_file_ops);
72221+#endif
72222+#else
72223 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
72224 &ikconfig_file_ops);
72225+#endif
72226+
72227 if (!entry)
72228 return -ENOMEM;
72229
72230diff --git a/kernel/cpu.c b/kernel/cpu.c
72231index 3f2f04f..4e53ded 100644
72232--- a/kernel/cpu.c
72233+++ b/kernel/cpu.c
72234@@ -20,7 +20,7 @@
72235 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
72236 static DEFINE_MUTEX(cpu_add_remove_lock);
72237
72238-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
72239+static RAW_NOTIFIER_HEAD(cpu_chain);
72240
72241 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
72242 * Should always be manipulated under cpu_add_remove_lock
72243diff --git a/kernel/cred.c b/kernel/cred.c
72244index 0b5b5fc..f7fe51a 100644
72245--- a/kernel/cred.c
72246+++ b/kernel/cred.c
72247@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
72248 */
72249 void __put_cred(struct cred *cred)
72250 {
72251+ pax_track_stack();
72252+
72253 kdebug("__put_cred(%p{%d,%d})", cred,
72254 atomic_read(&cred->usage),
72255 read_cred_subscribers(cred));
72256@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
72257 {
72258 struct cred *cred;
72259
72260+ pax_track_stack();
72261+
72262 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
72263 atomic_read(&tsk->cred->usage),
72264 read_cred_subscribers(tsk->cred));
72265@@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
72266 validate_creds(cred);
72267 put_cred(cred);
72268 }
72269+
72270+#ifdef CONFIG_GRKERNSEC_SETXID
72271+ cred = (struct cred *) tsk->delayed_cred;
72272+ if (cred) {
72273+ tsk->delayed_cred = NULL;
72274+ validate_creds(cred);
72275+ put_cred(cred);
72276+ }
72277+#endif
72278 }
72279
72280 /**
72281@@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
72282 {
72283 const struct cred *cred;
72284
72285+ pax_track_stack();
72286+
72287 rcu_read_lock();
72288
72289 do {
72290@@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
72291 {
72292 struct cred *new;
72293
72294+ pax_track_stack();
72295+
72296 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
72297 if (!new)
72298 return NULL;
72299@@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
72300 const struct cred *old;
72301 struct cred *new;
72302
72303+ pax_track_stack();
72304+
72305 validate_process_creds();
72306
72307 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
72308@@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
72309 struct thread_group_cred *tgcred = NULL;
72310 struct cred *new;
72311
72312+ pax_track_stack();
72313+
72314 #ifdef CONFIG_KEYS
72315 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
72316 if (!tgcred)
72317@@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
72318 struct cred *new;
72319 int ret;
72320
72321+ pax_track_stack();
72322+
72323 mutex_init(&p->cred_guard_mutex);
72324
72325 if (
72326@@ -523,11 +546,13 @@ error_put:
72327 * Always returns 0 thus allowing this function to be tail-called at the end
72328 * of, say, sys_setgid().
72329 */
72330-int commit_creds(struct cred *new)
72331+static int __commit_creds(struct cred *new)
72332 {
72333 struct task_struct *task = current;
72334 const struct cred *old = task->real_cred;
72335
72336+ pax_track_stack();
72337+
72338 kdebug("commit_creds(%p{%d,%d})", new,
72339 atomic_read(&new->usage),
72340 read_cred_subscribers(new));
72341@@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
72342
72343 get_cred(new); /* we will require a ref for the subj creds too */
72344
72345+ gr_set_role_label(task, new->uid, new->gid);
72346+
72347 /* dumpability changes */
72348 if (old->euid != new->euid ||
72349 old->egid != new->egid ||
72350@@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
72351 key_fsgid_changed(task);
72352
72353 /* do it
72354- * - What if a process setreuid()'s and this brings the
72355- * new uid over his NPROC rlimit? We can check this now
72356- * cheaply with the new uid cache, so if it matters
72357- * we should be checking for it. -DaveM
72358+ * RLIMIT_NPROC limits on user->processes have already been checked
72359+ * in set_user().
72360 */
72361 alter_cred_subscribers(new, 2);
72362 if (new->user != old->user)
72363@@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
72364 put_cred(old);
72365 return 0;
72366 }
72367+
72368+#ifdef CONFIG_GRKERNSEC_SETXID
72369+extern int set_user(struct cred *new);
72370+
72371+void gr_delayed_cred_worker(void)
72372+{
72373+ const struct cred *new = current->delayed_cred;
72374+ struct cred *ncred;
72375+
72376+ current->delayed_cred = NULL;
72377+
72378+ if (current_uid() && new != NULL) {
72379+ // from doing get_cred on it when queueing this
72380+ put_cred(new);
72381+ return;
72382+ } else if (new == NULL)
72383+ return;
72384+
72385+ ncred = prepare_creds();
72386+ if (!ncred)
72387+ goto die;
72388+ // uids
72389+ ncred->uid = new->uid;
72390+ ncred->euid = new->euid;
72391+ ncred->suid = new->suid;
72392+ ncred->fsuid = new->fsuid;
72393+ // gids
72394+ ncred->gid = new->gid;
72395+ ncred->egid = new->egid;
72396+ ncred->sgid = new->sgid;
72397+ ncred->fsgid = new->fsgid;
72398+ // groups
72399+ if (set_groups(ncred, new->group_info) < 0) {
72400+ abort_creds(ncred);
72401+ goto die;
72402+ }
72403+ // caps
72404+ ncred->securebits = new->securebits;
72405+ ncred->cap_inheritable = new->cap_inheritable;
72406+ ncred->cap_permitted = new->cap_permitted;
72407+ ncred->cap_effective = new->cap_effective;
72408+ ncred->cap_bset = new->cap_bset;
72409+
72410+ if (set_user(ncred)) {
72411+ abort_creds(ncred);
72412+ goto die;
72413+ }
72414+
72415+ // from doing get_cred on it when queueing this
72416+ put_cred(new);
72417+
72418+ __commit_creds(ncred);
72419+ return;
72420+die:
72421+ // from doing get_cred on it when queueing this
72422+ put_cred(new);
72423+ do_group_exit(SIGKILL);
72424+}
72425+#endif
72426+
72427+int commit_creds(struct cred *new)
72428+{
72429+#ifdef CONFIG_GRKERNSEC_SETXID
72430+ struct task_struct *t;
72431+
72432+ /* we won't get called with tasklist_lock held for writing
72433+ and interrupts disabled as the cred struct in that case is
72434+ init_cred
72435+ */
72436+ if (grsec_enable_setxid && !current_is_single_threaded() &&
72437+ !current_uid() && new->uid) {
72438+ rcu_read_lock();
72439+ read_lock(&tasklist_lock);
72440+ for (t = next_thread(current); t != current;
72441+ t = next_thread(t)) {
72442+ if (t->delayed_cred == NULL) {
72443+ t->delayed_cred = get_cred(new);
72444+ set_tsk_need_resched(t);
72445+ }
72446+ }
72447+ read_unlock(&tasklist_lock);
72448+ rcu_read_unlock();
72449+ }
72450+#endif
72451+ return __commit_creds(new);
72452+}
72453+
72454 EXPORT_SYMBOL(commit_creds);
72455
72456+
72457 /**
72458 * abort_creds - Discard a set of credentials and unlock the current task
72459 * @new: The credentials that were going to be applied
72460@@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
72461 */
72462 void abort_creds(struct cred *new)
72463 {
72464+ pax_track_stack();
72465+
72466 kdebug("abort_creds(%p{%d,%d})", new,
72467 atomic_read(&new->usage),
72468 read_cred_subscribers(new));
72469@@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
72470 {
72471 const struct cred *old = current->cred;
72472
72473+ pax_track_stack();
72474+
72475 kdebug("override_creds(%p{%d,%d})", new,
72476 atomic_read(&new->usage),
72477 read_cred_subscribers(new));
72478@@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
72479 {
72480 const struct cred *override = current->cred;
72481
72482+ pax_track_stack();
72483+
72484 kdebug("revert_creds(%p{%d,%d})", old,
72485 atomic_read(&old->usage),
72486 read_cred_subscribers(old));
72487@@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
72488 const struct cred *old;
72489 struct cred *new;
72490
72491+ pax_track_stack();
72492+
72493 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
72494 if (!new)
72495 return NULL;
72496@@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
72497 */
72498 int set_security_override(struct cred *new, u32 secid)
72499 {
72500+ pax_track_stack();
72501+
72502 return security_kernel_act_as(new, secid);
72503 }
72504 EXPORT_SYMBOL(set_security_override);
72505@@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
72506 u32 secid;
72507 int ret;
72508
72509+ pax_track_stack();
72510+
72511 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
72512 if (ret < 0)
72513 return ret;
72514diff --git a/kernel/exit.c b/kernel/exit.c
72515index 0f8fae3..9344a56 100644
72516--- a/kernel/exit.c
72517+++ b/kernel/exit.c
72518@@ -55,6 +55,10 @@
72519 #include <asm/pgtable.h>
72520 #include <asm/mmu_context.h>
72521
72522+#ifdef CONFIG_GRKERNSEC
72523+extern rwlock_t grsec_exec_file_lock;
72524+#endif
72525+
72526 static void exit_mm(struct task_struct * tsk);
72527
72528 static void __unhash_process(struct task_struct *p)
72529@@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
72530 struct task_struct *leader;
72531 int zap_leader;
72532 repeat:
72533+#ifdef CONFIG_NET
72534+ gr_del_task_from_ip_table(p);
72535+#endif
72536+
72537 tracehook_prepare_release_task(p);
72538 /* don't need to get the RCU readlock here - the process is dead and
72539 * can't be modifying its own credentials */
72540@@ -397,7 +405,7 @@ int allow_signal(int sig)
72541 * know it'll be handled, so that they don't get converted to
72542 * SIGKILL or just silently dropped.
72543 */
72544- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
72545+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
72546 recalc_sigpending();
72547 spin_unlock_irq(&current->sighand->siglock);
72548 return 0;
72549@@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
72550 vsnprintf(current->comm, sizeof(current->comm), name, args);
72551 va_end(args);
72552
72553+#ifdef CONFIG_GRKERNSEC
72554+ write_lock(&grsec_exec_file_lock);
72555+ if (current->exec_file) {
72556+ fput(current->exec_file);
72557+ current->exec_file = NULL;
72558+ }
72559+ write_unlock(&grsec_exec_file_lock);
72560+#endif
72561+
72562+ gr_set_kernel_label(current);
72563+
72564 /*
72565 * If we were started as result of loading a module, close all of the
72566 * user space pages. We don't need them, and if we didn't close them
72567@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
72568 struct task_struct *tsk = current;
72569 int group_dead;
72570
72571- profile_task_exit(tsk);
72572-
72573- WARN_ON(atomic_read(&tsk->fs_excl));
72574-
72575+ /*
72576+ * Check this first since set_fs() below depends on
72577+ * current_thread_info(), which we better not access when we're in
72578+ * interrupt context. Other than that, we want to do the set_fs()
72579+ * as early as possible.
72580+ */
72581 if (unlikely(in_interrupt()))
72582 panic("Aiee, killing interrupt handler!");
72583- if (unlikely(!tsk->pid))
72584- panic("Attempted to kill the idle task!");
72585
72586 /*
72587- * If do_exit is called because this processes oopsed, it's possible
72588+ * If do_exit is called because this processes Oops'ed, it's possible
72589 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
72590 * continuing. Amongst other possible reasons, this is to prevent
72591 * mm_release()->clear_child_tid() from writing to a user-controlled
72592@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
72593 */
72594 set_fs(USER_DS);
72595
72596+ profile_task_exit(tsk);
72597+
72598+ WARN_ON(atomic_read(&tsk->fs_excl));
72599+
72600+ if (unlikely(!tsk->pid))
72601+ panic("Attempted to kill the idle task!");
72602+
72603 tracehook_report_exit(&code);
72604
72605 validate_creds_for_do_exit(tsk);
72606@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
72607 tsk->exit_code = code;
72608 taskstats_exit(tsk, group_dead);
72609
72610+ gr_acl_handle_psacct(tsk, code);
72611+ gr_acl_handle_exit();
72612+
72613 exit_mm(tsk);
72614
72615 if (group_dead)
72616@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
72617
72618 if (unlikely(wo->wo_flags & WNOWAIT)) {
72619 int exit_code = p->exit_code;
72620- int why, status;
72621+ int why;
72622
72623 get_task_struct(p);
72624 read_unlock(&tasklist_lock);
72625diff --git a/kernel/fork.c b/kernel/fork.c
72626index 4bde56f..29a9bab 100644
72627--- a/kernel/fork.c
72628+++ b/kernel/fork.c
72629@@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
72630 *stackend = STACK_END_MAGIC; /* for overflow detection */
72631
72632 #ifdef CONFIG_CC_STACKPROTECTOR
72633- tsk->stack_canary = get_random_int();
72634+ tsk->stack_canary = pax_get_random_long();
72635 #endif
72636
72637 /* One for us, one for whoever does the "release_task()" (usually parent) */
72638@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72639 mm->locked_vm = 0;
72640 mm->mmap = NULL;
72641 mm->mmap_cache = NULL;
72642- mm->free_area_cache = oldmm->mmap_base;
72643- mm->cached_hole_size = ~0UL;
72644+ mm->free_area_cache = oldmm->free_area_cache;
72645+ mm->cached_hole_size = oldmm->cached_hole_size;
72646 mm->map_count = 0;
72647 cpumask_clear(mm_cpumask(mm));
72648 mm->mm_rb = RB_ROOT;
72649@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72650 tmp->vm_flags &= ~VM_LOCKED;
72651 tmp->vm_mm = mm;
72652 tmp->vm_next = tmp->vm_prev = NULL;
72653+ tmp->vm_mirror = NULL;
72654 anon_vma_link(tmp);
72655 file = tmp->vm_file;
72656 if (file) {
72657@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72658 if (retval)
72659 goto out;
72660 }
72661+
72662+#ifdef CONFIG_PAX_SEGMEXEC
72663+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
72664+ struct vm_area_struct *mpnt_m;
72665+
72666+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
72667+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
72668+
72669+ if (!mpnt->vm_mirror)
72670+ continue;
72671+
72672+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
72673+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
72674+ mpnt->vm_mirror = mpnt_m;
72675+ } else {
72676+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
72677+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
72678+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
72679+ mpnt->vm_mirror->vm_mirror = mpnt;
72680+ }
72681+ }
72682+ BUG_ON(mpnt_m);
72683+ }
72684+#endif
72685+
72686 /* a new mm has just been created */
72687 arch_dup_mmap(oldmm, mm);
72688 retval = 0;
72689@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
72690 write_unlock(&fs->lock);
72691 return -EAGAIN;
72692 }
72693- fs->users++;
72694+ atomic_inc(&fs->users);
72695 write_unlock(&fs->lock);
72696 return 0;
72697 }
72698 tsk->fs = copy_fs_struct(fs);
72699 if (!tsk->fs)
72700 return -ENOMEM;
72701+ gr_set_chroot_entries(tsk, &tsk->fs->root);
72702 return 0;
72703 }
72704
72705@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72706 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
72707 #endif
72708 retval = -EAGAIN;
72709+
72710+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
72711+
72712 if (atomic_read(&p->real_cred->user->processes) >=
72713 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
72714- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
72715- p->real_cred->user != INIT_USER)
72716+ if (p->real_cred->user != INIT_USER &&
72717+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
72718 goto bad_fork_free;
72719 }
72720+ current->flags &= ~PF_NPROC_EXCEEDED;
72721
72722 retval = copy_creds(p, clone_flags);
72723 if (retval < 0)
72724@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72725 goto bad_fork_free_pid;
72726 }
72727
72728+ gr_copy_label(p);
72729+
72730 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
72731 /*
72732 * Clear TID on mm_release()?
72733@@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
72734 bad_fork_free:
72735 free_task(p);
72736 fork_out:
72737+ gr_log_forkfail(retval);
72738+
72739 return ERR_PTR(retval);
72740 }
72741
72742@@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
72743 if (clone_flags & CLONE_PARENT_SETTID)
72744 put_user(nr, parent_tidptr);
72745
72746+ gr_handle_brute_check();
72747+
72748 if (clone_flags & CLONE_VFORK) {
72749 p->vfork_done = &vfork;
72750 init_completion(&vfork);
72751@@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
72752 return 0;
72753
72754 /* don't need lock here; in the worst case we'll do useless copy */
72755- if (fs->users == 1)
72756+ if (atomic_read(&fs->users) == 1)
72757 return 0;
72758
72759 *new_fsp = copy_fs_struct(fs);
72760@@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
72761 fs = current->fs;
72762 write_lock(&fs->lock);
72763 current->fs = new_fs;
72764- if (--fs->users)
72765+ gr_set_chroot_entries(current, &current->fs->root);
72766+ if (atomic_dec_return(&fs->users))
72767 new_fs = NULL;
72768 else
72769 new_fs = fs;
72770diff --git a/kernel/futex.c b/kernel/futex.c
72771index fb98c9f..333faec 100644
72772--- a/kernel/futex.c
72773+++ b/kernel/futex.c
72774@@ -54,6 +54,7 @@
72775 #include <linux/mount.h>
72776 #include <linux/pagemap.h>
72777 #include <linux/syscalls.h>
72778+#include <linux/ptrace.h>
72779 #include <linux/signal.h>
72780 #include <linux/module.h>
72781 #include <linux/magic.h>
72782@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
72783 struct page *page;
72784 int err, ro = 0;
72785
72786+#ifdef CONFIG_PAX_SEGMEXEC
72787+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
72788+ return -EFAULT;
72789+#endif
72790+
72791 /*
72792 * The futex address must be "naturally" aligned.
72793 */
72794@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
72795 struct futex_q q;
72796 int ret;
72797
72798+ pax_track_stack();
72799+
72800 if (!bitset)
72801 return -EINVAL;
72802
72803@@ -1871,7 +1879,7 @@ retry:
72804
72805 restart = &current_thread_info()->restart_block;
72806 restart->fn = futex_wait_restart;
72807- restart->futex.uaddr = (u32 *)uaddr;
72808+ restart->futex.uaddr = uaddr;
72809 restart->futex.val = val;
72810 restart->futex.time = abs_time->tv64;
72811 restart->futex.bitset = bitset;
72812@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
72813 struct futex_q q;
72814 int res, ret;
72815
72816+ pax_track_stack();
72817+
72818 if (!bitset)
72819 return -EINVAL;
72820
72821@@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
72822 if (!p)
72823 goto err_unlock;
72824 ret = -EPERM;
72825+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72826+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
72827+ goto err_unlock;
72828+#endif
72829 pcred = __task_cred(p);
72830 if (cred->euid != pcred->euid &&
72831 cred->euid != pcred->uid &&
72832@@ -2489,7 +2503,7 @@ retry:
72833 */
72834 static inline int fetch_robust_entry(struct robust_list __user **entry,
72835 struct robust_list __user * __user *head,
72836- int *pi)
72837+ unsigned int *pi)
72838 {
72839 unsigned long uentry;
72840
72841@@ -2670,6 +2684,7 @@ static int __init futex_init(void)
72842 {
72843 u32 curval;
72844 int i;
72845+ mm_segment_t oldfs;
72846
72847 /*
72848 * This will fail and we want it. Some arch implementations do
72849@@ -2681,7 +2696,10 @@ static int __init futex_init(void)
72850 * implementation, the non functional ones will return
72851 * -ENOSYS.
72852 */
72853+ oldfs = get_fs();
72854+ set_fs(USER_DS);
72855 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
72856+ set_fs(oldfs);
72857 if (curval == -EFAULT)
72858 futex_cmpxchg_enabled = 1;
72859
72860diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
72861index 2357165..eb25501 100644
72862--- a/kernel/futex_compat.c
72863+++ b/kernel/futex_compat.c
72864@@ -10,6 +10,7 @@
72865 #include <linux/compat.h>
72866 #include <linux/nsproxy.h>
72867 #include <linux/futex.h>
72868+#include <linux/ptrace.h>
72869
72870 #include <asm/uaccess.h>
72871
72872@@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72873 {
72874 struct compat_robust_list_head __user *head;
72875 unsigned long ret;
72876- const struct cred *cred = current_cred(), *pcred;
72877+ const struct cred *cred = current_cred();
72878+ const struct cred *pcred;
72879
72880 if (!futex_cmpxchg_enabled)
72881 return -ENOSYS;
72882@@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72883 if (!p)
72884 goto err_unlock;
72885 ret = -EPERM;
72886+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72887+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
72888+ goto err_unlock;
72889+#endif
72890 pcred = __task_cred(p);
72891 if (cred->euid != pcred->euid &&
72892 cred->euid != pcred->uid &&
72893diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
72894index 9b22d03..6295b62 100644
72895--- a/kernel/gcov/base.c
72896+++ b/kernel/gcov/base.c
72897@@ -102,11 +102,6 @@ void gcov_enable_events(void)
72898 }
72899
72900 #ifdef CONFIG_MODULES
72901-static inline int within(void *addr, void *start, unsigned long size)
72902-{
72903- return ((addr >= start) && (addr < start + size));
72904-}
72905-
72906 /* Update list and generate events when modules are unloaded. */
72907 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72908 void *data)
72909@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72910 prev = NULL;
72911 /* Remove entries located in module from linked list. */
72912 for (info = gcov_info_head; info; info = info->next) {
72913- if (within(info, mod->module_core, mod->core_size)) {
72914+ if (within_module_core_rw((unsigned long)info, mod)) {
72915 if (prev)
72916 prev->next = info->next;
72917 else
72918diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
72919index a6e9d00..a0da4f9 100644
72920--- a/kernel/hrtimer.c
72921+++ b/kernel/hrtimer.c
72922@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
72923 local_irq_restore(flags);
72924 }
72925
72926-static void run_hrtimer_softirq(struct softirq_action *h)
72927+static void run_hrtimer_softirq(void)
72928 {
72929 hrtimer_peek_ahead_timers();
72930 }
72931diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
72932index 8b6b8b6..6bc87df 100644
72933--- a/kernel/kallsyms.c
72934+++ b/kernel/kallsyms.c
72935@@ -11,6 +11,9 @@
72936 * Changed the compression method from stem compression to "table lookup"
72937 * compression (see scripts/kallsyms.c for a more complete description)
72938 */
72939+#ifdef CONFIG_GRKERNSEC_HIDESYM
72940+#define __INCLUDED_BY_HIDESYM 1
72941+#endif
72942 #include <linux/kallsyms.h>
72943 #include <linux/module.h>
72944 #include <linux/init.h>
72945@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
72946
72947 static inline int is_kernel_inittext(unsigned long addr)
72948 {
72949+ if (system_state != SYSTEM_BOOTING)
72950+ return 0;
72951+
72952 if (addr >= (unsigned long)_sinittext
72953 && addr <= (unsigned long)_einittext)
72954 return 1;
72955 return 0;
72956 }
72957
72958+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72959+#ifdef CONFIG_MODULES
72960+static inline int is_module_text(unsigned long addr)
72961+{
72962+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
72963+ return 1;
72964+
72965+ addr = ktla_ktva(addr);
72966+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
72967+}
72968+#else
72969+static inline int is_module_text(unsigned long addr)
72970+{
72971+ return 0;
72972+}
72973+#endif
72974+#endif
72975+
72976 static inline int is_kernel_text(unsigned long addr)
72977 {
72978 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
72979@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
72980
72981 static inline int is_kernel(unsigned long addr)
72982 {
72983+
72984+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72985+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
72986+ return 1;
72987+
72988+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
72989+#else
72990 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
72991+#endif
72992+
72993 return 1;
72994 return in_gate_area_no_task(addr);
72995 }
72996
72997 static int is_ksym_addr(unsigned long addr)
72998 {
72999+
73000+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73001+ if (is_module_text(addr))
73002+ return 0;
73003+#endif
73004+
73005 if (all_var)
73006 return is_kernel(addr);
73007
73008@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
73009
73010 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
73011 {
73012- iter->name[0] = '\0';
73013 iter->nameoff = get_symbol_offset(new_pos);
73014 iter->pos = new_pos;
73015 }
73016@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
73017 {
73018 struct kallsym_iter *iter = m->private;
73019
73020+#ifdef CONFIG_GRKERNSEC_HIDESYM
73021+ if (current_uid())
73022+ return 0;
73023+#endif
73024+
73025 /* Some debugging symbols have no name. Ignore them. */
73026 if (!iter->name[0])
73027 return 0;
73028@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
73029 struct kallsym_iter *iter;
73030 int ret;
73031
73032- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
73033+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
73034 if (!iter)
73035 return -ENOMEM;
73036 reset_iter(iter, 0);
73037diff --git a/kernel/kexec.c b/kernel/kexec.c
73038index f336e21..9c1c20b 100644
73039--- a/kernel/kexec.c
73040+++ b/kernel/kexec.c
73041@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
73042 unsigned long flags)
73043 {
73044 struct compat_kexec_segment in;
73045- struct kexec_segment out, __user *ksegments;
73046+ struct kexec_segment out;
73047+ struct kexec_segment __user *ksegments;
73048 unsigned long i, result;
73049
73050 /* Don't allow clients that don't understand the native
73051diff --git a/kernel/kgdb.c b/kernel/kgdb.c
73052index 53dae4b..9ba3743 100644
73053--- a/kernel/kgdb.c
73054+++ b/kernel/kgdb.c
73055@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
73056 /* Guard for recursive entry */
73057 static int exception_level;
73058
73059-static struct kgdb_io *kgdb_io_ops;
73060+static const struct kgdb_io *kgdb_io_ops;
73061 static DEFINE_SPINLOCK(kgdb_registration_lock);
73062
73063 /* kgdb console driver is loaded */
73064@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
73065 */
73066 static atomic_t passive_cpu_wait[NR_CPUS];
73067 static atomic_t cpu_in_kgdb[NR_CPUS];
73068-atomic_t kgdb_setting_breakpoint;
73069+atomic_unchecked_t kgdb_setting_breakpoint;
73070
73071 struct task_struct *kgdb_usethread;
73072 struct task_struct *kgdb_contthread;
73073@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
73074 sizeof(unsigned long)];
73075
73076 /* to keep track of the CPU which is doing the single stepping*/
73077-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
73078+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
73079
73080 /*
73081 * If you are debugging a problem where roundup (the collection of
73082@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
73083 return 0;
73084 if (kgdb_connected)
73085 return 1;
73086- if (atomic_read(&kgdb_setting_breakpoint))
73087+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
73088 return 1;
73089 if (print_wait)
73090 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
73091@@ -1426,8 +1426,8 @@ acquirelock:
73092 * instance of the exception handler wanted to come into the
73093 * debugger on a different CPU via a single step
73094 */
73095- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
73096- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
73097+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
73098+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
73099
73100 atomic_set(&kgdb_active, -1);
73101 touch_softlockup_watchdog();
73102@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
73103 *
73104 * Register it with the KGDB core.
73105 */
73106-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
73107+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
73108 {
73109 int err;
73110
73111@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
73112 *
73113 * Unregister it with the KGDB core.
73114 */
73115-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
73116+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
73117 {
73118 BUG_ON(kgdb_connected);
73119
73120@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
73121 */
73122 void kgdb_breakpoint(void)
73123 {
73124- atomic_set(&kgdb_setting_breakpoint, 1);
73125+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
73126 wmb(); /* Sync point before breakpoint */
73127 arch_kgdb_breakpoint();
73128 wmb(); /* Sync point after breakpoint */
73129- atomic_set(&kgdb_setting_breakpoint, 0);
73130+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
73131 }
73132 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
73133
73134diff --git a/kernel/kmod.c b/kernel/kmod.c
73135index d206078..e27ba6a 100644
73136--- a/kernel/kmod.c
73137+++ b/kernel/kmod.c
73138@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
73139 * If module auto-loading support is disabled then this function
73140 * becomes a no-operation.
73141 */
73142-int __request_module(bool wait, const char *fmt, ...)
73143+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
73144 {
73145- va_list args;
73146 char module_name[MODULE_NAME_LEN];
73147 unsigned int max_modprobes;
73148 int ret;
73149- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
73150+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
73151 static char *envp[] = { "HOME=/",
73152 "TERM=linux",
73153 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
73154@@ -84,12 +83,24 @@ int __request_module(bool wait, const char *fmt, ...)
73155 if (ret)
73156 return ret;
73157
73158- va_start(args, fmt);
73159- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
73160- va_end(args);
73161+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
73162 if (ret >= MODULE_NAME_LEN)
73163 return -ENAMETOOLONG;
73164
73165+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73166+ if (!current_uid()) {
73167+ /* hack to workaround consolekit/udisks stupidity */
73168+ read_lock(&tasklist_lock);
73169+ if (!strcmp(current->comm, "mount") &&
73170+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
73171+ read_unlock(&tasklist_lock);
73172+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
73173+ return -EPERM;
73174+ }
73175+ read_unlock(&tasklist_lock);
73176+ }
73177+#endif
73178+
73179 /* If modprobe needs a service that is in a module, we get a recursive
73180 * loop. Limit the number of running kmod threads to max_threads/2 or
73181 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
73182@@ -123,6 +134,48 @@ int __request_module(bool wait, const char *fmt, ...)
73183 atomic_dec(&kmod_concurrent);
73184 return ret;
73185 }
73186+
73187+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
73188+{
73189+ va_list args;
73190+ int ret;
73191+
73192+ va_start(args, fmt);
73193+ ret = ____request_module(wait, module_param, fmt, args);
73194+ va_end(args);
73195+
73196+ return ret;
73197+}
73198+
73199+int __request_module(bool wait, const char *fmt, ...)
73200+{
73201+ va_list args;
73202+ int ret;
73203+
73204+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73205+ if (current_uid()) {
73206+ char module_param[MODULE_NAME_LEN];
73207+
73208+ memset(module_param, 0, sizeof(module_param));
73209+
73210+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
73211+
73212+ va_start(args, fmt);
73213+ ret = ____request_module(wait, module_param, fmt, args);
73214+ va_end(args);
73215+
73216+ return ret;
73217+ }
73218+#endif
73219+
73220+ va_start(args, fmt);
73221+ ret = ____request_module(wait, NULL, fmt, args);
73222+ va_end(args);
73223+
73224+ return ret;
73225+}
73226+
73227+
73228 EXPORT_SYMBOL(__request_module);
73229 #endif /* CONFIG_MODULES */
73230
73231@@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
73232 *
73233 * Thus the __user pointer cast is valid here.
73234 */
73235- sys_wait4(pid, (int __user *)&ret, 0, NULL);
73236+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
73237
73238 /*
73239 * If ret is 0, either ____call_usermodehelper failed and the
73240diff --git a/kernel/kprobes.c b/kernel/kprobes.c
73241index 176d825..77fa8ea 100644
73242--- a/kernel/kprobes.c
73243+++ b/kernel/kprobes.c
73244@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
73245 * kernel image and loaded module images reside. This is required
73246 * so x86_64 can correctly handle the %rip-relative fixups.
73247 */
73248- kip->insns = module_alloc(PAGE_SIZE);
73249+ kip->insns = module_alloc_exec(PAGE_SIZE);
73250 if (!kip->insns) {
73251 kfree(kip);
73252 return NULL;
73253@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
73254 */
73255 if (!list_is_singular(&kprobe_insn_pages)) {
73256 list_del(&kip->list);
73257- module_free(NULL, kip->insns);
73258+ module_free_exec(NULL, kip->insns);
73259 kfree(kip);
73260 }
73261 return 1;
73262@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
73263 {
73264 int i, err = 0;
73265 unsigned long offset = 0, size = 0;
73266- char *modname, namebuf[128];
73267+ char *modname, namebuf[KSYM_NAME_LEN];
73268 const char *symbol_name;
73269 void *addr;
73270 struct kprobe_blackpoint *kb;
73271@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
73272 const char *sym = NULL;
73273 unsigned int i = *(loff_t *) v;
73274 unsigned long offset = 0;
73275- char *modname, namebuf[128];
73276+ char *modname, namebuf[KSYM_NAME_LEN];
73277
73278 head = &kprobe_table[i];
73279 preempt_disable();
73280diff --git a/kernel/lockdep.c b/kernel/lockdep.c
73281index d86fe89..d12fc66 100644
73282--- a/kernel/lockdep.c
73283+++ b/kernel/lockdep.c
73284@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
73285 /*
73286 * Various lockdep statistics:
73287 */
73288-atomic_t chain_lookup_hits;
73289-atomic_t chain_lookup_misses;
73290-atomic_t hardirqs_on_events;
73291-atomic_t hardirqs_off_events;
73292-atomic_t redundant_hardirqs_on;
73293-atomic_t redundant_hardirqs_off;
73294-atomic_t softirqs_on_events;
73295-atomic_t softirqs_off_events;
73296-atomic_t redundant_softirqs_on;
73297-atomic_t redundant_softirqs_off;
73298-atomic_t nr_unused_locks;
73299-atomic_t nr_cyclic_checks;
73300-atomic_t nr_find_usage_forwards_checks;
73301-atomic_t nr_find_usage_backwards_checks;
73302+atomic_unchecked_t chain_lookup_hits;
73303+atomic_unchecked_t chain_lookup_misses;
73304+atomic_unchecked_t hardirqs_on_events;
73305+atomic_unchecked_t hardirqs_off_events;
73306+atomic_unchecked_t redundant_hardirqs_on;
73307+atomic_unchecked_t redundant_hardirqs_off;
73308+atomic_unchecked_t softirqs_on_events;
73309+atomic_unchecked_t softirqs_off_events;
73310+atomic_unchecked_t redundant_softirqs_on;
73311+atomic_unchecked_t redundant_softirqs_off;
73312+atomic_unchecked_t nr_unused_locks;
73313+atomic_unchecked_t nr_cyclic_checks;
73314+atomic_unchecked_t nr_find_usage_forwards_checks;
73315+atomic_unchecked_t nr_find_usage_backwards_checks;
73316 #endif
73317
73318 /*
73319@@ -577,6 +577,10 @@ static int static_obj(void *obj)
73320 int i;
73321 #endif
73322
73323+#ifdef CONFIG_PAX_KERNEXEC
73324+ start = ktla_ktva(start);
73325+#endif
73326+
73327 /*
73328 * static variable?
73329 */
73330@@ -592,8 +596,7 @@ static int static_obj(void *obj)
73331 */
73332 for_each_possible_cpu(i) {
73333 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
73334- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
73335- + per_cpu_offset(i);
73336+ end = start + PERCPU_ENOUGH_ROOM;
73337
73338 if ((addr >= start) && (addr < end))
73339 return 1;
73340@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
73341 if (!static_obj(lock->key)) {
73342 debug_locks_off();
73343 printk("INFO: trying to register non-static key.\n");
73344+ printk("lock:%pS key:%pS.\n", lock, lock->key);
73345 printk("the code is fine but needs lockdep annotation.\n");
73346 printk("turning off the locking correctness validator.\n");
73347 dump_stack();
73348@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
73349 if (!class)
73350 return 0;
73351 }
73352- debug_atomic_inc((atomic_t *)&class->ops);
73353+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
73354 if (very_verbose(class)) {
73355 printk("\nacquire class [%p] %s", class->key, class->name);
73356 if (class->name_version > 1)
73357diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
73358index a2ee95a..092f0f2 100644
73359--- a/kernel/lockdep_internals.h
73360+++ b/kernel/lockdep_internals.h
73361@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
73362 /*
73363 * Various lockdep statistics:
73364 */
73365-extern atomic_t chain_lookup_hits;
73366-extern atomic_t chain_lookup_misses;
73367-extern atomic_t hardirqs_on_events;
73368-extern atomic_t hardirqs_off_events;
73369-extern atomic_t redundant_hardirqs_on;
73370-extern atomic_t redundant_hardirqs_off;
73371-extern atomic_t softirqs_on_events;
73372-extern atomic_t softirqs_off_events;
73373-extern atomic_t redundant_softirqs_on;
73374-extern atomic_t redundant_softirqs_off;
73375-extern atomic_t nr_unused_locks;
73376-extern atomic_t nr_cyclic_checks;
73377-extern atomic_t nr_cyclic_check_recursions;
73378-extern atomic_t nr_find_usage_forwards_checks;
73379-extern atomic_t nr_find_usage_forwards_recursions;
73380-extern atomic_t nr_find_usage_backwards_checks;
73381-extern atomic_t nr_find_usage_backwards_recursions;
73382-# define debug_atomic_inc(ptr) atomic_inc(ptr)
73383-# define debug_atomic_dec(ptr) atomic_dec(ptr)
73384-# define debug_atomic_read(ptr) atomic_read(ptr)
73385+extern atomic_unchecked_t chain_lookup_hits;
73386+extern atomic_unchecked_t chain_lookup_misses;
73387+extern atomic_unchecked_t hardirqs_on_events;
73388+extern atomic_unchecked_t hardirqs_off_events;
73389+extern atomic_unchecked_t redundant_hardirqs_on;
73390+extern atomic_unchecked_t redundant_hardirqs_off;
73391+extern atomic_unchecked_t softirqs_on_events;
73392+extern atomic_unchecked_t softirqs_off_events;
73393+extern atomic_unchecked_t redundant_softirqs_on;
73394+extern atomic_unchecked_t redundant_softirqs_off;
73395+extern atomic_unchecked_t nr_unused_locks;
73396+extern atomic_unchecked_t nr_cyclic_checks;
73397+extern atomic_unchecked_t nr_cyclic_check_recursions;
73398+extern atomic_unchecked_t nr_find_usage_forwards_checks;
73399+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
73400+extern atomic_unchecked_t nr_find_usage_backwards_checks;
73401+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
73402+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
73403+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
73404+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
73405 #else
73406 # define debug_atomic_inc(ptr) do { } while (0)
73407 # define debug_atomic_dec(ptr) do { } while (0)
73408diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
73409index d4aba4f..02a353f 100644
73410--- a/kernel/lockdep_proc.c
73411+++ b/kernel/lockdep_proc.c
73412@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
73413
73414 static void print_name(struct seq_file *m, struct lock_class *class)
73415 {
73416- char str[128];
73417+ char str[KSYM_NAME_LEN];
73418 const char *name = class->name;
73419
73420 if (!name) {
73421diff --git a/kernel/module.c b/kernel/module.c
73422index 4b270e6..2226274 100644
73423--- a/kernel/module.c
73424+++ b/kernel/module.c
73425@@ -55,6 +55,7 @@
73426 #include <linux/async.h>
73427 #include <linux/percpu.h>
73428 #include <linux/kmemleak.h>
73429+#include <linux/grsecurity.h>
73430
73431 #define CREATE_TRACE_POINTS
73432 #include <trace/events/module.h>
73433@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
73434 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
73435
73436 /* Bounds of module allocation, for speeding __module_address */
73437-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
73438+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
73439+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
73440
73441 int register_module_notifier(struct notifier_block * nb)
73442 {
73443@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
73444 return true;
73445
73446 list_for_each_entry_rcu(mod, &modules, list) {
73447- struct symsearch arr[] = {
73448+ struct symsearch modarr[] = {
73449 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
73450 NOT_GPL_ONLY, false },
73451 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
73452@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
73453 #endif
73454 };
73455
73456- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
73457+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
73458 return true;
73459 }
73460 return false;
73461@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
73462 void *ptr;
73463 int cpu;
73464
73465- if (align > PAGE_SIZE) {
73466+ if (align-1 >= PAGE_SIZE) {
73467 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
73468 name, align, PAGE_SIZE);
73469 align = PAGE_SIZE;
73470@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
73471 * /sys/module/foo/sections stuff
73472 * J. Corbet <corbet@lwn.net>
73473 */
73474-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
73475+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
73476
73477 static inline bool sect_empty(const Elf_Shdr *sect)
73478 {
73479@@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
73480 destroy_params(mod->kp, mod->num_kp);
73481
73482 /* This may be NULL, but that's OK */
73483- module_free(mod, mod->module_init);
73484+ module_free(mod, mod->module_init_rw);
73485+ module_free_exec(mod, mod->module_init_rx);
73486 kfree(mod->args);
73487 if (mod->percpu)
73488 percpu_modfree(mod->percpu);
73489@@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
73490 percpu_modfree(mod->refptr);
73491 #endif
73492 /* Free lock-classes: */
73493- lockdep_free_key_range(mod->module_core, mod->core_size);
73494+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
73495+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
73496
73497 /* Finally, free the core (containing the module structure) */
73498- module_free(mod, mod->module_core);
73499+ module_free_exec(mod, mod->module_core_rx);
73500+ module_free(mod, mod->module_core_rw);
73501
73502 #ifdef CONFIG_MPU
73503 update_protections(current->mm);
73504@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73505 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73506 int ret = 0;
73507 const struct kernel_symbol *ksym;
73508+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73509+ int is_fs_load = 0;
73510+ int register_filesystem_found = 0;
73511+ char *p;
73512+
73513+ p = strstr(mod->args, "grsec_modharden_fs");
73514+
73515+ if (p) {
73516+ char *endptr = p + strlen("grsec_modharden_fs");
73517+ /* copy \0 as well */
73518+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
73519+ is_fs_load = 1;
73520+ }
73521+#endif
73522+
73523
73524 for (i = 1; i < n; i++) {
73525+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73526+ const char *name = strtab + sym[i].st_name;
73527+
73528+ /* it's a real shame this will never get ripped and copied
73529+ upstream! ;(
73530+ */
73531+ if (is_fs_load && !strcmp(name, "register_filesystem"))
73532+ register_filesystem_found = 1;
73533+#endif
73534 switch (sym[i].st_shndx) {
73535 case SHN_COMMON:
73536 /* We compiled with -fno-common. These are not
73537@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73538 strtab + sym[i].st_name, mod);
73539 /* Ok if resolved. */
73540 if (ksym) {
73541+ pax_open_kernel();
73542 sym[i].st_value = ksym->value;
73543+ pax_close_kernel();
73544 break;
73545 }
73546
73547@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
73548 secbase = (unsigned long)mod->percpu;
73549 else
73550 secbase = sechdrs[sym[i].st_shndx].sh_addr;
73551+ pax_open_kernel();
73552 sym[i].st_value += secbase;
73553+ pax_close_kernel();
73554 break;
73555 }
73556 }
73557
73558+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73559+ if (is_fs_load && !register_filesystem_found) {
73560+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
73561+ ret = -EPERM;
73562+ }
73563+#endif
73564+
73565 return ret;
73566 }
73567
73568@@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
73569 || s->sh_entsize != ~0UL
73570 || strstarts(secstrings + s->sh_name, ".init"))
73571 continue;
73572- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
73573+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
73574+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
73575+ else
73576+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
73577 DEBUGP("\t%s\n", secstrings + s->sh_name);
73578 }
73579- if (m == 0)
73580- mod->core_text_size = mod->core_size;
73581 }
73582
73583 DEBUGP("Init section allocation order:\n");
73584@@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
73585 || s->sh_entsize != ~0UL
73586 || !strstarts(secstrings + s->sh_name, ".init"))
73587 continue;
73588- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
73589- | INIT_OFFSET_MASK);
73590+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
73591+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
73592+ else
73593+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
73594+ s->sh_entsize |= INIT_OFFSET_MASK;
73595 DEBUGP("\t%s\n", secstrings + s->sh_name);
73596 }
73597- if (m == 0)
73598- mod->init_text_size = mod->init_size;
73599 }
73600 }
73601
73602@@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
73603
73604 /* As per nm */
73605 static char elf_type(const Elf_Sym *sym,
73606- Elf_Shdr *sechdrs,
73607- const char *secstrings,
73608- struct module *mod)
73609+ const Elf_Shdr *sechdrs,
73610+ const char *secstrings)
73611 {
73612 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
73613 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
73614@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
73615
73616 /* Put symbol section at end of init part of module. */
73617 symsect->sh_flags |= SHF_ALLOC;
73618- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
73619+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
73620 symindex) | INIT_OFFSET_MASK;
73621 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
73622
73623@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
73624 }
73625
73626 /* Append room for core symbols at end of core part. */
73627- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
73628- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
73629+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
73630+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
73631
73632 /* Put string table section at end of init part of module. */
73633 strsect->sh_flags |= SHF_ALLOC;
73634- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
73635+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
73636 strindex) | INIT_OFFSET_MASK;
73637 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
73638
73639 /* Append room for core symbols' strings at end of core part. */
73640- *pstroffs = mod->core_size;
73641+ *pstroffs = mod->core_size_rx;
73642 __set_bit(0, strmap);
73643- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
73644+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
73645
73646 return symoffs;
73647 }
73648@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
73649 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73650 mod->strtab = (void *)sechdrs[strindex].sh_addr;
73651
73652+ pax_open_kernel();
73653+
73654 /* Set types up while we still have access to sections. */
73655 for (i = 0; i < mod->num_symtab; i++)
73656 mod->symtab[i].st_info
73657- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
73658+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
73659
73660- mod->core_symtab = dst = mod->module_core + symoffs;
73661+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
73662 src = mod->symtab;
73663 *dst = *src;
73664 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
73665@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
73666 }
73667 mod->core_num_syms = ndst;
73668
73669- mod->core_strtab = s = mod->module_core + stroffs;
73670+ mod->core_strtab = s = mod->module_core_rx + stroffs;
73671 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
73672 if (test_bit(i, strmap))
73673 *++s = mod->strtab[i];
73674+
73675+ pax_close_kernel();
73676 }
73677 #else
73678 static inline unsigned long layout_symtab(struct module *mod,
73679@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
73680 #endif
73681 }
73682
73683-static void *module_alloc_update_bounds(unsigned long size)
73684+static void *module_alloc_update_bounds_rw(unsigned long size)
73685 {
73686 void *ret = module_alloc(size);
73687
73688 if (ret) {
73689 /* Update module bounds. */
73690- if ((unsigned long)ret < module_addr_min)
73691- module_addr_min = (unsigned long)ret;
73692- if ((unsigned long)ret + size > module_addr_max)
73693- module_addr_max = (unsigned long)ret + size;
73694+ if ((unsigned long)ret < module_addr_min_rw)
73695+ module_addr_min_rw = (unsigned long)ret;
73696+ if ((unsigned long)ret + size > module_addr_max_rw)
73697+ module_addr_max_rw = (unsigned long)ret + size;
73698+ }
73699+ return ret;
73700+}
73701+
73702+static void *module_alloc_update_bounds_rx(unsigned long size)
73703+{
73704+ void *ret = module_alloc_exec(size);
73705+
73706+ if (ret) {
73707+ /* Update module bounds. */
73708+ if ((unsigned long)ret < module_addr_min_rx)
73709+ module_addr_min_rx = (unsigned long)ret;
73710+ if ((unsigned long)ret + size > module_addr_max_rx)
73711+ module_addr_max_rx = (unsigned long)ret + size;
73712 }
73713 return ret;
73714 }
73715@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73716 unsigned int i;
73717
73718 /* only scan the sections containing data */
73719- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
73720- (unsigned long)mod->module_core,
73721+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
73722+ (unsigned long)mod->module_core_rw,
73723 sizeof(struct module), GFP_KERNEL);
73724
73725 for (i = 1; i < hdr->e_shnum; i++) {
73726@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73727 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
73728 continue;
73729
73730- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
73731- (unsigned long)mod->module_core,
73732+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
73733+ (unsigned long)mod->module_core_rw,
73734 sechdrs[i].sh_size, GFP_KERNEL);
73735 }
73736 }
73737@@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
73738 Elf_Ehdr *hdr;
73739 Elf_Shdr *sechdrs;
73740 char *secstrings, *args, *modmagic, *strtab = NULL;
73741- char *staging;
73742+ char *staging, *license;
73743 unsigned int i;
73744 unsigned int symindex = 0;
73745 unsigned int strindex = 0;
73746@@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
73747 goto free_hdr;
73748 }
73749
73750+ license = get_modinfo(sechdrs, infoindex, "license");
73751+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
73752+ if (!license || !license_is_gpl_compatible(license)) {
73753+ err -ENOEXEC;
73754+ goto free_hdr;
73755+ }
73756+#endif
73757+
73758 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
73759 /* This is allowed: modprobe --force will invalidate it. */
73760 if (!modmagic) {
73761@@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
73762 secstrings, &stroffs, strmap);
73763
73764 /* Do the allocs. */
73765- ptr = module_alloc_update_bounds(mod->core_size);
73766+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
73767 /*
73768 * The pointer to this block is stored in the module structure
73769 * which is inside the block. Just mark it as not being a
73770@@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
73771 err = -ENOMEM;
73772 goto free_percpu;
73773 }
73774- memset(ptr, 0, mod->core_size);
73775- mod->module_core = ptr;
73776+ memset(ptr, 0, mod->core_size_rw);
73777+ mod->module_core_rw = ptr;
73778
73779- ptr = module_alloc_update_bounds(mod->init_size);
73780+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
73781 /*
73782 * The pointer to this block is stored in the module structure
73783 * which is inside the block. This block doesn't need to be
73784 * scanned as it contains data and code that will be freed
73785 * after the module is initialized.
73786 */
73787- kmemleak_ignore(ptr);
73788- if (!ptr && mod->init_size) {
73789+ kmemleak_not_leak(ptr);
73790+ if (!ptr && mod->init_size_rw) {
73791 err = -ENOMEM;
73792- goto free_core;
73793+ goto free_core_rw;
73794 }
73795- memset(ptr, 0, mod->init_size);
73796- mod->module_init = ptr;
73797+ memset(ptr, 0, mod->init_size_rw);
73798+ mod->module_init_rw = ptr;
73799+
73800+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
73801+ kmemleak_not_leak(ptr);
73802+ if (!ptr) {
73803+ err = -ENOMEM;
73804+ goto free_init_rw;
73805+ }
73806+
73807+ pax_open_kernel();
73808+ memset(ptr, 0, mod->core_size_rx);
73809+ pax_close_kernel();
73810+ mod->module_core_rx = ptr;
73811+
73812+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
73813+ kmemleak_not_leak(ptr);
73814+ if (!ptr && mod->init_size_rx) {
73815+ err = -ENOMEM;
73816+ goto free_core_rx;
73817+ }
73818+
73819+ pax_open_kernel();
73820+ memset(ptr, 0, mod->init_size_rx);
73821+ pax_close_kernel();
73822+ mod->module_init_rx = ptr;
73823
73824 /* Transfer each section which specifies SHF_ALLOC */
73825 DEBUGP("final section addresses:\n");
73826@@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
73827 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
73828 continue;
73829
73830- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
73831- dest = mod->module_init
73832- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73833- else
73834- dest = mod->module_core + sechdrs[i].sh_entsize;
73835+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
73836+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73837+ dest = mod->module_init_rw
73838+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73839+ else
73840+ dest = mod->module_init_rx
73841+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73842+ } else {
73843+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73844+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
73845+ else
73846+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
73847+ }
73848
73849- if (sechdrs[i].sh_type != SHT_NOBITS)
73850- memcpy(dest, (void *)sechdrs[i].sh_addr,
73851- sechdrs[i].sh_size);
73852+ if (sechdrs[i].sh_type != SHT_NOBITS) {
73853+
73854+#ifdef CONFIG_PAX_KERNEXEC
73855+#ifdef CONFIG_X86_64
73856+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
73857+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
73858+#endif
73859+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
73860+ pax_open_kernel();
73861+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73862+ pax_close_kernel();
73863+ } else
73864+#endif
73865+
73866+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73867+ }
73868 /* Update sh_addr to point to copy in image. */
73869- sechdrs[i].sh_addr = (unsigned long)dest;
73870+
73871+#ifdef CONFIG_PAX_KERNEXEC
73872+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
73873+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
73874+ else
73875+#endif
73876+
73877+ sechdrs[i].sh_addr = (unsigned long)dest;
73878 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
73879 }
73880 /* Module has been moved. */
73881@@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
73882 mod->name);
73883 if (!mod->refptr) {
73884 err = -ENOMEM;
73885- goto free_init;
73886+ goto free_init_rx;
73887 }
73888 #endif
73889 /* Now we've moved module, initialize linked lists, etc. */
73890@@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
73891 goto free_unload;
73892
73893 /* Set up license info based on the info section */
73894- set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
73895+ set_license(mod, license);
73896
73897 /*
73898 * ndiswrapper is under GPL by itself, but loads proprietary modules.
73899@@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
73900 /* Set up MODINFO_ATTR fields */
73901 setup_modinfo(mod, sechdrs, infoindex);
73902
73903+ mod->args = args;
73904+
73905+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73906+ {
73907+ char *p, *p2;
73908+
73909+ if (strstr(mod->args, "grsec_modharden_netdev")) {
73910+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
73911+ err = -EPERM;
73912+ goto cleanup;
73913+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
73914+ p += strlen("grsec_modharden_normal");
73915+ p2 = strstr(p, "_");
73916+ if (p2) {
73917+ *p2 = '\0';
73918+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
73919+ *p2 = '_';
73920+ }
73921+ err = -EPERM;
73922+ goto cleanup;
73923+ }
73924+ }
73925+#endif
73926+
73927+
73928 /* Fix up syms, so that st_value is a pointer to location. */
73929 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
73930 mod);
73931@@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
73932
73933 /* Now do relocations. */
73934 for (i = 1; i < hdr->e_shnum; i++) {
73935- const char *strtab = (char *)sechdrs[strindex].sh_addr;
73936 unsigned int info = sechdrs[i].sh_info;
73937+ strtab = (char *)sechdrs[strindex].sh_addr;
73938
73939 /* Not a valid relocation section? */
73940 if (info >= hdr->e_shnum)
73941@@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
73942 * Do it before processing of module parameters, so the module
73943 * can provide parameter accessor functions of its own.
73944 */
73945- if (mod->module_init)
73946- flush_icache_range((unsigned long)mod->module_init,
73947- (unsigned long)mod->module_init
73948- + mod->init_size);
73949- flush_icache_range((unsigned long)mod->module_core,
73950- (unsigned long)mod->module_core + mod->core_size);
73951+ if (mod->module_init_rx)
73952+ flush_icache_range((unsigned long)mod->module_init_rx,
73953+ (unsigned long)mod->module_init_rx
73954+ + mod->init_size_rx);
73955+ flush_icache_range((unsigned long)mod->module_core_rx,
73956+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
73957
73958 set_fs(old_fs);
73959
73960- mod->args = args;
73961 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
73962 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
73963 mod->name);
73964@@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
73965 free_unload:
73966 module_unload_free(mod);
73967 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
73968+ free_init_rx:
73969 percpu_modfree(mod->refptr);
73970- free_init:
73971 #endif
73972- module_free(mod, mod->module_init);
73973- free_core:
73974- module_free(mod, mod->module_core);
73975+ module_free_exec(mod, mod->module_init_rx);
73976+ free_core_rx:
73977+ module_free_exec(mod, mod->module_core_rx);
73978+ free_init_rw:
73979+ module_free(mod, mod->module_init_rw);
73980+ free_core_rw:
73981+ module_free(mod, mod->module_core_rw);
73982 /* mod will be freed with core. Don't access it beyond this line! */
73983 free_percpu:
73984 if (percpu)
73985@@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
73986 mod->symtab = mod->core_symtab;
73987 mod->strtab = mod->core_strtab;
73988 #endif
73989- module_free(mod, mod->module_init);
73990- mod->module_init = NULL;
73991- mod->init_size = 0;
73992- mod->init_text_size = 0;
73993+ module_free(mod, mod->module_init_rw);
73994+ module_free_exec(mod, mod->module_init_rx);
73995+ mod->module_init_rw = NULL;
73996+ mod->module_init_rx = NULL;
73997+ mod->init_size_rw = 0;
73998+ mod->init_size_rx = 0;
73999 mutex_unlock(&module_mutex);
74000
74001 return 0;
74002@@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
74003 unsigned long nextval;
74004
74005 /* At worse, next value is at end of module */
74006- if (within_module_init(addr, mod))
74007- nextval = (unsigned long)mod->module_init+mod->init_text_size;
74008+ if (within_module_init_rx(addr, mod))
74009+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
74010+ else if (within_module_init_rw(addr, mod))
74011+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
74012+ else if (within_module_core_rx(addr, mod))
74013+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
74014+ else if (within_module_core_rw(addr, mod))
74015+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
74016 else
74017- nextval = (unsigned long)mod->module_core+mod->core_text_size;
74018+ return NULL;
74019
74020 /* Scan for closest preceeding symbol, and next symbol. (ELF
74021 starts real symbols at 1). */
74022@@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
74023 char buf[8];
74024
74025 seq_printf(m, "%s %u",
74026- mod->name, mod->init_size + mod->core_size);
74027+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
74028 print_unload_info(m, mod);
74029
74030 /* Informative for users. */
74031@@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
74032 mod->state == MODULE_STATE_COMING ? "Loading":
74033 "Live");
74034 /* Used by oprofile and other similar tools. */
74035- seq_printf(m, " 0x%p", mod->module_core);
74036+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
74037
74038 /* Taints info */
74039 if (mod->taints)
74040@@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
74041
74042 static int __init proc_modules_init(void)
74043 {
74044+#ifndef CONFIG_GRKERNSEC_HIDESYM
74045+#ifdef CONFIG_GRKERNSEC_PROC_USER
74046+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
74047+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74048+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
74049+#else
74050 proc_create("modules", 0, NULL, &proc_modules_operations);
74051+#endif
74052+#else
74053+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
74054+#endif
74055 return 0;
74056 }
74057 module_init(proc_modules_init);
74058@@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
74059 {
74060 struct module *mod;
74061
74062- if (addr < module_addr_min || addr > module_addr_max)
74063+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
74064+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
74065 return NULL;
74066
74067 list_for_each_entry_rcu(mod, &modules, list)
74068- if (within_module_core(addr, mod)
74069- || within_module_init(addr, mod))
74070+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
74071 return mod;
74072 return NULL;
74073 }
74074@@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
74075 */
74076 struct module *__module_text_address(unsigned long addr)
74077 {
74078- struct module *mod = __module_address(addr);
74079+ struct module *mod;
74080+
74081+#ifdef CONFIG_X86_32
74082+ addr = ktla_ktva(addr);
74083+#endif
74084+
74085+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
74086+ return NULL;
74087+
74088+ mod = __module_address(addr);
74089+
74090 if (mod) {
74091 /* Make sure it's within the text section. */
74092- if (!within(addr, mod->module_init, mod->init_text_size)
74093- && !within(addr, mod->module_core, mod->core_text_size))
74094+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
74095 mod = NULL;
74096 }
74097 return mod;
74098diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
74099index ec815a9..fe46e99 100644
74100--- a/kernel/mutex-debug.c
74101+++ b/kernel/mutex-debug.c
74102@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
74103 }
74104
74105 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
74106- struct thread_info *ti)
74107+ struct task_struct *task)
74108 {
74109 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
74110
74111 /* Mark the current thread as blocked on the lock: */
74112- ti->task->blocked_on = waiter;
74113+ task->blocked_on = waiter;
74114 }
74115
74116 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
74117- struct thread_info *ti)
74118+ struct task_struct *task)
74119 {
74120 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
74121- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
74122- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
74123- ti->task->blocked_on = NULL;
74124+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
74125+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
74126+ task->blocked_on = NULL;
74127
74128 list_del_init(&waiter->list);
74129 waiter->task = NULL;
74130@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
74131 return;
74132
74133 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
74134- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
74135+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
74136 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
74137 mutex_clear_owner(lock);
74138 }
74139diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
74140index 6b2d735..372d3c4 100644
74141--- a/kernel/mutex-debug.h
74142+++ b/kernel/mutex-debug.h
74143@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
74144 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
74145 extern void debug_mutex_add_waiter(struct mutex *lock,
74146 struct mutex_waiter *waiter,
74147- struct thread_info *ti);
74148+ struct task_struct *task);
74149 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
74150- struct thread_info *ti);
74151+ struct task_struct *task);
74152 extern void debug_mutex_unlock(struct mutex *lock);
74153 extern void debug_mutex_init(struct mutex *lock, const char *name,
74154 struct lock_class_key *key);
74155
74156 static inline void mutex_set_owner(struct mutex *lock)
74157 {
74158- lock->owner = current_thread_info();
74159+ lock->owner = current;
74160 }
74161
74162 static inline void mutex_clear_owner(struct mutex *lock)
74163diff --git a/kernel/mutex.c b/kernel/mutex.c
74164index f85644c..5ee9f77 100644
74165--- a/kernel/mutex.c
74166+++ b/kernel/mutex.c
74167@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74168 */
74169
74170 for (;;) {
74171- struct thread_info *owner;
74172+ struct task_struct *owner;
74173
74174 /*
74175 * If we own the BKL, then don't spin. The owner of
74176@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74177 spin_lock_mutex(&lock->wait_lock, flags);
74178
74179 debug_mutex_lock_common(lock, &waiter);
74180- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
74181+ debug_mutex_add_waiter(lock, &waiter, task);
74182
74183 /* add waiting tasks to the end of the waitqueue (FIFO): */
74184 list_add_tail(&waiter.list, &lock->wait_list);
74185@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74186 * TASK_UNINTERRUPTIBLE case.)
74187 */
74188 if (unlikely(signal_pending_state(state, task))) {
74189- mutex_remove_waiter(lock, &waiter,
74190- task_thread_info(task));
74191+ mutex_remove_waiter(lock, &waiter, task);
74192 mutex_release(&lock->dep_map, 1, ip);
74193 spin_unlock_mutex(&lock->wait_lock, flags);
74194
74195@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
74196 done:
74197 lock_acquired(&lock->dep_map, ip);
74198 /* got the lock - rejoice! */
74199- mutex_remove_waiter(lock, &waiter, current_thread_info());
74200+ mutex_remove_waiter(lock, &waiter, task);
74201 mutex_set_owner(lock);
74202
74203 /* set it to 0 if there are no waiters left: */
74204diff --git a/kernel/mutex.h b/kernel/mutex.h
74205index 67578ca..4115fbf 100644
74206--- a/kernel/mutex.h
74207+++ b/kernel/mutex.h
74208@@ -19,7 +19,7 @@
74209 #ifdef CONFIG_SMP
74210 static inline void mutex_set_owner(struct mutex *lock)
74211 {
74212- lock->owner = current_thread_info();
74213+ lock->owner = current;
74214 }
74215
74216 static inline void mutex_clear_owner(struct mutex *lock)
74217diff --git a/kernel/panic.c b/kernel/panic.c
74218index 96b45d0..ff70a46 100644
74219--- a/kernel/panic.c
74220+++ b/kernel/panic.c
74221@@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
74222 va_end(args);
74223 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
74224 #ifdef CONFIG_DEBUG_BUGVERBOSE
74225- dump_stack();
74226+ /*
74227+ * Avoid nested stack-dumping if a panic occurs during oops processing
74228+ */
74229+ if (!oops_in_progress)
74230+ dump_stack();
74231 #endif
74232
74233 /*
74234@@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
74235 const char *board;
74236
74237 printk(KERN_WARNING "------------[ cut here ]------------\n");
74238- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
74239+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
74240 board = dmi_get_system_info(DMI_PRODUCT_NAME);
74241 if (board)
74242 printk(KERN_WARNING "Hardware name: %s\n", board);
74243@@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
74244 */
74245 void __stack_chk_fail(void)
74246 {
74247- panic("stack-protector: Kernel stack is corrupted in: %p\n",
74248+ dump_stack();
74249+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
74250 __builtin_return_address(0));
74251 }
74252 EXPORT_SYMBOL(__stack_chk_fail);
74253diff --git a/kernel/params.c b/kernel/params.c
74254index d656c27..21e452c 100644
74255--- a/kernel/params.c
74256+++ b/kernel/params.c
74257@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
74258 return ret;
74259 }
74260
74261-static struct sysfs_ops module_sysfs_ops = {
74262+static const struct sysfs_ops module_sysfs_ops = {
74263 .show = module_attr_show,
74264 .store = module_attr_store,
74265 };
74266@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
74267 return 0;
74268 }
74269
74270-static struct kset_uevent_ops module_uevent_ops = {
74271+static const struct kset_uevent_ops module_uevent_ops = {
74272 .filter = uevent_filter,
74273 };
74274
74275diff --git a/kernel/perf_event.c b/kernel/perf_event.c
74276index 37ebc14..9c121d9 100644
74277--- a/kernel/perf_event.c
74278+++ b/kernel/perf_event.c
74279@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
74280 */
74281 int sysctl_perf_event_sample_rate __read_mostly = 100000;
74282
74283-static atomic64_t perf_event_id;
74284+static atomic64_unchecked_t perf_event_id;
74285
74286 /*
74287 * Lock for (sysadmin-configurable) event reservations:
74288@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
74289 * In order to keep per-task stats reliable we need to flip the event
74290 * values when we flip the contexts.
74291 */
74292- value = atomic64_read(&next_event->count);
74293- value = atomic64_xchg(&event->count, value);
74294- atomic64_set(&next_event->count, value);
74295+ value = atomic64_read_unchecked(&next_event->count);
74296+ value = atomic64_xchg_unchecked(&event->count, value);
74297+ atomic64_set_unchecked(&next_event->count, value);
74298
74299 swap(event->total_time_enabled, next_event->total_time_enabled);
74300 swap(event->total_time_running, next_event->total_time_running);
74301@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
74302 update_event_times(event);
74303 }
74304
74305- return atomic64_read(&event->count);
74306+ return atomic64_read_unchecked(&event->count);
74307 }
74308
74309 /*
74310@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
74311 values[n++] = 1 + leader->nr_siblings;
74312 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74313 values[n++] = leader->total_time_enabled +
74314- atomic64_read(&leader->child_total_time_enabled);
74315+ atomic64_read_unchecked(&leader->child_total_time_enabled);
74316 }
74317 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74318 values[n++] = leader->total_time_running +
74319- atomic64_read(&leader->child_total_time_running);
74320+ atomic64_read_unchecked(&leader->child_total_time_running);
74321 }
74322
74323 size = n * sizeof(u64);
74324@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
74325 values[n++] = perf_event_read_value(event);
74326 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74327 values[n++] = event->total_time_enabled +
74328- atomic64_read(&event->child_total_time_enabled);
74329+ atomic64_read_unchecked(&event->child_total_time_enabled);
74330 }
74331 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74332 values[n++] = event->total_time_running +
74333- atomic64_read(&event->child_total_time_running);
74334+ atomic64_read_unchecked(&event->child_total_time_running);
74335 }
74336 if (read_format & PERF_FORMAT_ID)
74337 values[n++] = primary_event_id(event);
74338@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
74339 static void perf_event_reset(struct perf_event *event)
74340 {
74341 (void)perf_event_read(event);
74342- atomic64_set(&event->count, 0);
74343+ atomic64_set_unchecked(&event->count, 0);
74344 perf_event_update_userpage(event);
74345 }
74346
74347@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
74348 ++userpg->lock;
74349 barrier();
74350 userpg->index = perf_event_index(event);
74351- userpg->offset = atomic64_read(&event->count);
74352+ userpg->offset = atomic64_read_unchecked(&event->count);
74353 if (event->state == PERF_EVENT_STATE_ACTIVE)
74354- userpg->offset -= atomic64_read(&event->hw.prev_count);
74355+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
74356
74357 userpg->time_enabled = event->total_time_enabled +
74358- atomic64_read(&event->child_total_time_enabled);
74359+ atomic64_read_unchecked(&event->child_total_time_enabled);
74360
74361 userpg->time_running = event->total_time_running +
74362- atomic64_read(&event->child_total_time_running);
74363+ atomic64_read_unchecked(&event->child_total_time_running);
74364
74365 barrier();
74366 ++userpg->lock;
74367@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
74368 u64 values[4];
74369 int n = 0;
74370
74371- values[n++] = atomic64_read(&event->count);
74372+ values[n++] = atomic64_read_unchecked(&event->count);
74373 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
74374 values[n++] = event->total_time_enabled +
74375- atomic64_read(&event->child_total_time_enabled);
74376+ atomic64_read_unchecked(&event->child_total_time_enabled);
74377 }
74378 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
74379 values[n++] = event->total_time_running +
74380- atomic64_read(&event->child_total_time_running);
74381+ atomic64_read_unchecked(&event->child_total_time_running);
74382 }
74383 if (read_format & PERF_FORMAT_ID)
74384 values[n++] = primary_event_id(event);
74385@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74386 if (leader != event)
74387 leader->pmu->read(leader);
74388
74389- values[n++] = atomic64_read(&leader->count);
74390+ values[n++] = atomic64_read_unchecked(&leader->count);
74391 if (read_format & PERF_FORMAT_ID)
74392 values[n++] = primary_event_id(leader);
74393
74394@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
74395 if (sub != event)
74396 sub->pmu->read(sub);
74397
74398- values[n++] = atomic64_read(&sub->count);
74399+ values[n++] = atomic64_read_unchecked(&sub->count);
74400 if (read_format & PERF_FORMAT_ID)
74401 values[n++] = primary_event_id(sub);
74402
74403@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
74404 * need to add enough zero bytes after the string to handle
74405 * the 64bit alignment we do later.
74406 */
74407- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
74408+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
74409 if (!buf) {
74410 name = strncpy(tmp, "//enomem", sizeof(tmp));
74411 goto got_name;
74412 }
74413- name = d_path(&file->f_path, buf, PATH_MAX);
74414+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
74415 if (IS_ERR(name)) {
74416 name = strncpy(tmp, "//toolong", sizeof(tmp));
74417 goto got_name;
74418@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
74419 {
74420 struct hw_perf_event *hwc = &event->hw;
74421
74422- atomic64_add(nr, &event->count);
74423+ atomic64_add_unchecked(nr, &event->count);
74424
74425 if (!hwc->sample_period)
74426 return;
74427@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
74428 u64 now;
74429
74430 now = cpu_clock(cpu);
74431- prev = atomic64_read(&event->hw.prev_count);
74432- atomic64_set(&event->hw.prev_count, now);
74433- atomic64_add(now - prev, &event->count);
74434+ prev = atomic64_read_unchecked(&event->hw.prev_count);
74435+ atomic64_set_unchecked(&event->hw.prev_count, now);
74436+ atomic64_add_unchecked(now - prev, &event->count);
74437 }
74438
74439 static int cpu_clock_perf_event_enable(struct perf_event *event)
74440@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
74441 struct hw_perf_event *hwc = &event->hw;
74442 int cpu = raw_smp_processor_id();
74443
74444- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
74445+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
74446 perf_swevent_start_hrtimer(event);
74447
74448 return 0;
74449@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
74450 u64 prev;
74451 s64 delta;
74452
74453- prev = atomic64_xchg(&event->hw.prev_count, now);
74454+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
74455 delta = now - prev;
74456- atomic64_add(delta, &event->count);
74457+ atomic64_add_unchecked(delta, &event->count);
74458 }
74459
74460 static int task_clock_perf_event_enable(struct perf_event *event)
74461@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
74462
74463 now = event->ctx->time;
74464
74465- atomic64_set(&hwc->prev_count, now);
74466+ atomic64_set_unchecked(&hwc->prev_count, now);
74467
74468 perf_swevent_start_hrtimer(event);
74469
74470@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
74471 event->parent = parent_event;
74472
74473 event->ns = get_pid_ns(current->nsproxy->pid_ns);
74474- event->id = atomic64_inc_return(&perf_event_id);
74475+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
74476
74477 event->state = PERF_EVENT_STATE_INACTIVE;
74478
74479@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
74480 if (child_event->attr.inherit_stat)
74481 perf_event_read_event(child_event, child);
74482
74483- child_val = atomic64_read(&child_event->count);
74484+ child_val = atomic64_read_unchecked(&child_event->count);
74485
74486 /*
74487 * Add back the child's count to the parent's count:
74488 */
74489- atomic64_add(child_val, &parent_event->count);
74490- atomic64_add(child_event->total_time_enabled,
74491+ atomic64_add_unchecked(child_val, &parent_event->count);
74492+ atomic64_add_unchecked(child_event->total_time_enabled,
74493 &parent_event->child_total_time_enabled);
74494- atomic64_add(child_event->total_time_running,
74495+ atomic64_add_unchecked(child_event->total_time_running,
74496 &parent_event->child_total_time_running);
74497
74498 /*
74499diff --git a/kernel/pid.c b/kernel/pid.c
74500index fce7198..4f23a7e 100644
74501--- a/kernel/pid.c
74502+++ b/kernel/pid.c
74503@@ -33,6 +33,7 @@
74504 #include <linux/rculist.h>
74505 #include <linux/bootmem.h>
74506 #include <linux/hash.h>
74507+#include <linux/security.h>
74508 #include <linux/pid_namespace.h>
74509 #include <linux/init_task.h>
74510 #include <linux/syscalls.h>
74511@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
74512
74513 int pid_max = PID_MAX_DEFAULT;
74514
74515-#define RESERVED_PIDS 300
74516+#define RESERVED_PIDS 500
74517
74518 int pid_max_min = RESERVED_PIDS + 1;
74519 int pid_max_max = PID_MAX_LIMIT;
74520@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
74521 */
74522 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
74523 {
74524- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
74525+ struct task_struct *task;
74526+
74527+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
74528+
74529+ if (gr_pid_is_chrooted(task))
74530+ return NULL;
74531+
74532+ return task;
74533 }
74534
74535 struct task_struct *find_task_by_vpid(pid_t vnr)
74536@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
74537 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
74538 }
74539
74540+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
74541+{
74542+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
74543+}
74544+
74545 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
74546 {
74547 struct pid *pid;
74548diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
74549index 5c9dc22..d271117 100644
74550--- a/kernel/posix-cpu-timers.c
74551+++ b/kernel/posix-cpu-timers.c
74552@@ -6,6 +6,7 @@
74553 #include <linux/posix-timers.h>
74554 #include <linux/errno.h>
74555 #include <linux/math64.h>
74556+#include <linux/security.h>
74557 #include <asm/uaccess.h>
74558 #include <linux/kernel_stat.h>
74559 #include <trace/events/timer.h>
74560@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
74561
74562 static __init int init_posix_cpu_timers(void)
74563 {
74564- struct k_clock process = {
74565+ static struct k_clock process = {
74566 .clock_getres = process_cpu_clock_getres,
74567 .clock_get = process_cpu_clock_get,
74568 .clock_set = do_posix_clock_nosettime,
74569@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
74570 .nsleep = process_cpu_nsleep,
74571 .nsleep_restart = process_cpu_nsleep_restart,
74572 };
74573- struct k_clock thread = {
74574+ static struct k_clock thread = {
74575 .clock_getres = thread_cpu_clock_getres,
74576 .clock_get = thread_cpu_clock_get,
74577 .clock_set = do_posix_clock_nosettime,
74578diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
74579index 5e76d22..cf1baeb 100644
74580--- a/kernel/posix-timers.c
74581+++ b/kernel/posix-timers.c
74582@@ -42,6 +42,7 @@
74583 #include <linux/compiler.h>
74584 #include <linux/idr.h>
74585 #include <linux/posix-timers.h>
74586+#include <linux/grsecurity.h>
74587 #include <linux/syscalls.h>
74588 #include <linux/wait.h>
74589 #include <linux/workqueue.h>
74590@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
74591 * which we beg off on and pass to do_sys_settimeofday().
74592 */
74593
74594-static struct k_clock posix_clocks[MAX_CLOCKS];
74595+static struct k_clock *posix_clocks[MAX_CLOCKS];
74596
74597 /*
74598 * These ones are defined below.
74599@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
74600 */
74601 #define CLOCK_DISPATCH(clock, call, arglist) \
74602 ((clock) < 0 ? posix_cpu_##call arglist : \
74603- (posix_clocks[clock].call != NULL \
74604- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
74605+ (posix_clocks[clock]->call != NULL \
74606+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
74607
74608 /*
74609 * Default clock hook functions when the struct k_clock passed
74610@@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
74611 struct timespec *tp)
74612 {
74613 tp->tv_sec = 0;
74614- tp->tv_nsec = posix_clocks[which_clock].res;
74615+ tp->tv_nsec = posix_clocks[which_clock]->res;
74616 return 0;
74617 }
74618
74619@@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
74620 return 0;
74621 if ((unsigned) which_clock >= MAX_CLOCKS)
74622 return 1;
74623- if (posix_clocks[which_clock].clock_getres != NULL)
74624+ if (posix_clocks[which_clock] == NULL)
74625 return 0;
74626- if (posix_clocks[which_clock].res != 0)
74627+ if (posix_clocks[which_clock]->clock_getres != NULL)
74628+ return 0;
74629+ if (posix_clocks[which_clock]->res != 0)
74630 return 0;
74631 return 1;
74632 }
74633@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
74634 */
74635 static __init int init_posix_timers(void)
74636 {
74637- struct k_clock clock_realtime = {
74638+ static struct k_clock clock_realtime = {
74639 .clock_getres = hrtimer_get_res,
74640 };
74641- struct k_clock clock_monotonic = {
74642+ static struct k_clock clock_monotonic = {
74643 .clock_getres = hrtimer_get_res,
74644 .clock_get = posix_ktime_get_ts,
74645 .clock_set = do_posix_clock_nosettime,
74646 };
74647- struct k_clock clock_monotonic_raw = {
74648+ static struct k_clock clock_monotonic_raw = {
74649 .clock_getres = hrtimer_get_res,
74650 .clock_get = posix_get_monotonic_raw,
74651 .clock_set = do_posix_clock_nosettime,
74652 .timer_create = no_timer_create,
74653 .nsleep = no_nsleep,
74654 };
74655- struct k_clock clock_realtime_coarse = {
74656+ static struct k_clock clock_realtime_coarse = {
74657 .clock_getres = posix_get_coarse_res,
74658 .clock_get = posix_get_realtime_coarse,
74659 .clock_set = do_posix_clock_nosettime,
74660 .timer_create = no_timer_create,
74661 .nsleep = no_nsleep,
74662 };
74663- struct k_clock clock_monotonic_coarse = {
74664+ static struct k_clock clock_monotonic_coarse = {
74665 .clock_getres = posix_get_coarse_res,
74666 .clock_get = posix_get_monotonic_coarse,
74667 .clock_set = do_posix_clock_nosettime,
74668@@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
74669 .nsleep = no_nsleep,
74670 };
74671
74672+ pax_track_stack();
74673+
74674 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
74675 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
74676 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
74677@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
74678 return;
74679 }
74680
74681- posix_clocks[clock_id] = *new_clock;
74682+ posix_clocks[clock_id] = new_clock;
74683 }
74684 EXPORT_SYMBOL_GPL(register_posix_clock);
74685
74686@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
74687 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
74688 return -EFAULT;
74689
74690+ /* only the CLOCK_REALTIME clock can be set, all other clocks
74691+ have their clock_set fptr set to a nosettime dummy function
74692+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
74693+ call common_clock_set, which calls do_sys_settimeofday, which
74694+ we hook
74695+ */
74696+
74697 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
74698 }
74699
74700diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
74701index 04a9e90..bc355aa 100644
74702--- a/kernel/power/hibernate.c
74703+++ b/kernel/power/hibernate.c
74704@@ -48,14 +48,14 @@ enum {
74705
74706 static int hibernation_mode = HIBERNATION_SHUTDOWN;
74707
74708-static struct platform_hibernation_ops *hibernation_ops;
74709+static const struct platform_hibernation_ops *hibernation_ops;
74710
74711 /**
74712 * hibernation_set_ops - set the global hibernate operations
74713 * @ops: the hibernation operations to use in subsequent hibernation transitions
74714 */
74715
74716-void hibernation_set_ops(struct platform_hibernation_ops *ops)
74717+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
74718 {
74719 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
74720 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
74721diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
74722index e8b3370..484c2e4 100644
74723--- a/kernel/power/poweroff.c
74724+++ b/kernel/power/poweroff.c
74725@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
74726 .enable_mask = SYSRQ_ENABLE_BOOT,
74727 };
74728
74729-static int pm_sysrq_init(void)
74730+static int __init pm_sysrq_init(void)
74731 {
74732 register_sysrq_key('o', &sysrq_poweroff_op);
74733 return 0;
74734diff --git a/kernel/power/process.c b/kernel/power/process.c
74735index e7cd671..56d5f459 100644
74736--- a/kernel/power/process.c
74737+++ b/kernel/power/process.c
74738@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
74739 struct timeval start, end;
74740 u64 elapsed_csecs64;
74741 unsigned int elapsed_csecs;
74742+ bool timedout = false;
74743
74744 do_gettimeofday(&start);
74745
74746 end_time = jiffies + TIMEOUT;
74747 do {
74748 todo = 0;
74749+ if (time_after(jiffies, end_time))
74750+ timedout = true;
74751 read_lock(&tasklist_lock);
74752 do_each_thread(g, p) {
74753 if (frozen(p) || !freezeable(p))
74754@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
74755 * It is "frozen enough". If the task does wake
74756 * up, it will immediately call try_to_freeze.
74757 */
74758- if (!task_is_stopped_or_traced(p) &&
74759- !freezer_should_skip(p))
74760+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
74761 todo++;
74762+ if (timedout) {
74763+ printk(KERN_ERR "Task refusing to freeze:\n");
74764+ sched_show_task(p);
74765+ }
74766+ }
74767 } while_each_thread(g, p);
74768 read_unlock(&tasklist_lock);
74769 yield(); /* Yield is okay here */
74770- if (time_after(jiffies, end_time))
74771- break;
74772- } while (todo);
74773+ } while (todo && !timedout);
74774
74775 do_gettimeofday(&end);
74776 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
74777diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
74778index 40dd021..fb30ceb 100644
74779--- a/kernel/power/suspend.c
74780+++ b/kernel/power/suspend.c
74781@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
74782 [PM_SUSPEND_MEM] = "mem",
74783 };
74784
74785-static struct platform_suspend_ops *suspend_ops;
74786+static const struct platform_suspend_ops *suspend_ops;
74787
74788 /**
74789 * suspend_set_ops - Set the global suspend method table.
74790 * @ops: Pointer to ops structure.
74791 */
74792-void suspend_set_ops(struct platform_suspend_ops *ops)
74793+void suspend_set_ops(const struct platform_suspend_ops *ops)
74794 {
74795 mutex_lock(&pm_mutex);
74796 suspend_ops = ops;
74797diff --git a/kernel/printk.c b/kernel/printk.c
74798index 4cade47..4d17900 100644
74799--- a/kernel/printk.c
74800+++ b/kernel/printk.c
74801@@ -33,6 +33,7 @@
74802 #include <linux/bootmem.h>
74803 #include <linux/syscalls.h>
74804 #include <linux/kexec.h>
74805+#include <linux/syslog.h>
74806
74807 #include <asm/uaccess.h>
74808
74809@@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
74810 }
74811 #endif
74812
74813-/*
74814- * Commands to do_syslog:
74815- *
74816- * 0 -- Close the log. Currently a NOP.
74817- * 1 -- Open the log. Currently a NOP.
74818- * 2 -- Read from the log.
74819- * 3 -- Read all messages remaining in the ring buffer.
74820- * 4 -- Read and clear all messages remaining in the ring buffer
74821- * 5 -- Clear ring buffer.
74822- * 6 -- Disable printk's to console
74823- * 7 -- Enable printk's to console
74824- * 8 -- Set level of messages printed to console
74825- * 9 -- Return number of unread characters in the log buffer
74826- * 10 -- Return size of the log buffer
74827- */
74828-int do_syslog(int type, char __user *buf, int len)
74829+int do_syslog(int type, char __user *buf, int len, bool from_file)
74830 {
74831 unsigned i, j, limit, count;
74832 int do_clear = 0;
74833 char c;
74834 int error = 0;
74835
74836- error = security_syslog(type);
74837+#ifdef CONFIG_GRKERNSEC_DMESG
74838+ if (grsec_enable_dmesg &&
74839+ (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
74840+ !capable(CAP_SYS_ADMIN))
74841+ return -EPERM;
74842+#endif
74843+
74844+ error = security_syslog(type, from_file);
74845 if (error)
74846 return error;
74847
74848 switch (type) {
74849- case 0: /* Close log */
74850+ case SYSLOG_ACTION_CLOSE: /* Close log */
74851 break;
74852- case 1: /* Open log */
74853+ case SYSLOG_ACTION_OPEN: /* Open log */
74854 break;
74855- case 2: /* Read from log */
74856+ case SYSLOG_ACTION_READ: /* Read from log */
74857 error = -EINVAL;
74858 if (!buf || len < 0)
74859 goto out;
74860@@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
74861 if (!error)
74862 error = i;
74863 break;
74864- case 4: /* Read/clear last kernel messages */
74865+ /* Read/clear last kernel messages */
74866+ case SYSLOG_ACTION_READ_CLEAR:
74867 do_clear = 1;
74868 /* FALL THRU */
74869- case 3: /* Read last kernel messages */
74870+ /* Read last kernel messages */
74871+ case SYSLOG_ACTION_READ_ALL:
74872 error = -EINVAL;
74873 if (!buf || len < 0)
74874 goto out;
74875@@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
74876 }
74877 }
74878 break;
74879- case 5: /* Clear ring buffer */
74880+ /* Clear ring buffer */
74881+ case SYSLOG_ACTION_CLEAR:
74882 logged_chars = 0;
74883 break;
74884- case 6: /* Disable logging to console */
74885+ /* Disable logging to console */
74886+ case SYSLOG_ACTION_CONSOLE_OFF:
74887 if (saved_console_loglevel == -1)
74888 saved_console_loglevel = console_loglevel;
74889 console_loglevel = minimum_console_loglevel;
74890 break;
74891- case 7: /* Enable logging to console */
74892+ /* Enable logging to console */
74893+ case SYSLOG_ACTION_CONSOLE_ON:
74894 if (saved_console_loglevel != -1) {
74895 console_loglevel = saved_console_loglevel;
74896 saved_console_loglevel = -1;
74897 }
74898 break;
74899- case 8: /* Set level of messages printed to console */
74900+ /* Set level of messages printed to console */
74901+ case SYSLOG_ACTION_CONSOLE_LEVEL:
74902 error = -EINVAL;
74903 if (len < 1 || len > 8)
74904 goto out;
74905@@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
74906 saved_console_loglevel = -1;
74907 error = 0;
74908 break;
74909- case 9: /* Number of chars in the log buffer */
74910+ /* Number of chars in the log buffer */
74911+ case SYSLOG_ACTION_SIZE_UNREAD:
74912 error = log_end - log_start;
74913 break;
74914- case 10: /* Size of the log buffer */
74915+ /* Size of the log buffer */
74916+ case SYSLOG_ACTION_SIZE_BUFFER:
74917 error = log_buf_len;
74918 break;
74919 default:
74920@@ -415,7 +416,7 @@ out:
74921
74922 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
74923 {
74924- return do_syslog(type, buf, len);
74925+ return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
74926 }
74927
74928 /*
74929diff --git a/kernel/profile.c b/kernel/profile.c
74930index dfadc5b..7f59404 100644
74931--- a/kernel/profile.c
74932+++ b/kernel/profile.c
74933@@ -39,7 +39,7 @@ struct profile_hit {
74934 /* Oprofile timer tick hook */
74935 static int (*timer_hook)(struct pt_regs *) __read_mostly;
74936
74937-static atomic_t *prof_buffer;
74938+static atomic_unchecked_t *prof_buffer;
74939 static unsigned long prof_len, prof_shift;
74940
74941 int prof_on __read_mostly;
74942@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
74943 hits[i].pc = 0;
74944 continue;
74945 }
74946- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74947+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74948 hits[i].hits = hits[i].pc = 0;
74949 }
74950 }
74951@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74952 * Add the current hit(s) and flush the write-queue out
74953 * to the global buffer:
74954 */
74955- atomic_add(nr_hits, &prof_buffer[pc]);
74956+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
74957 for (i = 0; i < NR_PROFILE_HIT; ++i) {
74958- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74959+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74960 hits[i].pc = hits[i].hits = 0;
74961 }
74962 out:
74963@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74964 if (prof_on != type || !prof_buffer)
74965 return;
74966 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
74967- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74968+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74969 }
74970 #endif /* !CONFIG_SMP */
74971 EXPORT_SYMBOL_GPL(profile_hits);
74972@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
74973 return -EFAULT;
74974 buf++; p++; count--; read++;
74975 }
74976- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
74977+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
74978 if (copy_to_user(buf, (void *)pnt, count))
74979 return -EFAULT;
74980 read += count;
74981@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
74982 }
74983 #endif
74984 profile_discard_flip_buffers();
74985- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
74986+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
74987 return count;
74988 }
74989
74990diff --git a/kernel/ptrace.c b/kernel/ptrace.c
74991index 05625f6..733bf70 100644
74992--- a/kernel/ptrace.c
74993+++ b/kernel/ptrace.c
74994@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
74995 return ret;
74996 }
74997
74998-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74999+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
75000+ unsigned int log)
75001 {
75002 const struct cred *cred = current_cred(), *tcred;
75003
75004@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
75005 cred->gid != tcred->egid ||
75006 cred->gid != tcred->sgid ||
75007 cred->gid != tcred->gid) &&
75008- !capable(CAP_SYS_PTRACE)) {
75009+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
75010+ (log && !capable(CAP_SYS_PTRACE)))
75011+ ) {
75012 rcu_read_unlock();
75013 return -EPERM;
75014 }
75015@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
75016 smp_rmb();
75017 if (task->mm)
75018 dumpable = get_dumpable(task->mm);
75019- if (!dumpable && !capable(CAP_SYS_PTRACE))
75020+ if (!dumpable &&
75021+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
75022+ (log && !capable(CAP_SYS_PTRACE))))
75023 return -EPERM;
75024
75025 return security_ptrace_access_check(task, mode);
75026@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
75027 {
75028 int err;
75029 task_lock(task);
75030- err = __ptrace_may_access(task, mode);
75031+ err = __ptrace_may_access(task, mode, 0);
75032+ task_unlock(task);
75033+ return !err;
75034+}
75035+
75036+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
75037+{
75038+ int err;
75039+ task_lock(task);
75040+ err = __ptrace_may_access(task, mode, 1);
75041 task_unlock(task);
75042 return !err;
75043 }
75044@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
75045 goto out;
75046
75047 task_lock(task);
75048- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
75049+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
75050 task_unlock(task);
75051 if (retval)
75052 goto unlock_creds;
75053@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
75054 goto unlock_tasklist;
75055
75056 task->ptrace = PT_PTRACED;
75057- if (capable(CAP_SYS_PTRACE))
75058+ if (capable_nolog(CAP_SYS_PTRACE))
75059 task->ptrace |= PT_PTRACE_CAP;
75060
75061 __ptrace_link(task, current);
75062@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
75063 {
75064 int copied = 0;
75065
75066+ pax_track_stack();
75067+
75068 while (len > 0) {
75069 char buf[128];
75070 int this_len, retval;
75071@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
75072 {
75073 int copied = 0;
75074
75075+ pax_track_stack();
75076+
75077 while (len > 0) {
75078 char buf[128];
75079 int this_len, retval;
75080@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
75081 int ret = -EIO;
75082 siginfo_t siginfo;
75083
75084+ pax_track_stack();
75085+
75086 switch (request) {
75087 case PTRACE_PEEKTEXT:
75088 case PTRACE_PEEKDATA:
75089@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
75090 ret = ptrace_setoptions(child, data);
75091 break;
75092 case PTRACE_GETEVENTMSG:
75093- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
75094+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
75095 break;
75096
75097 case PTRACE_GETSIGINFO:
75098 ret = ptrace_getsiginfo(child, &siginfo);
75099 if (!ret)
75100- ret = copy_siginfo_to_user((siginfo_t __user *) data,
75101+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
75102 &siginfo);
75103 break;
75104
75105 case PTRACE_SETSIGINFO:
75106- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
75107+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
75108 sizeof siginfo))
75109 ret = -EFAULT;
75110 else
75111@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
75112 goto out;
75113 }
75114
75115+ if (gr_handle_ptrace(child, request)) {
75116+ ret = -EPERM;
75117+ goto out_put_task_struct;
75118+ }
75119+
75120 if (request == PTRACE_ATTACH) {
75121 ret = ptrace_attach(child);
75122 /*
75123 * Some architectures need to do book-keeping after
75124 * a ptrace attach.
75125 */
75126- if (!ret)
75127+ if (!ret) {
75128 arch_ptrace_attach(child);
75129+ gr_audit_ptrace(child);
75130+ }
75131 goto out_put_task_struct;
75132 }
75133
75134@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
75135 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
75136 if (copied != sizeof(tmp))
75137 return -EIO;
75138- return put_user(tmp, (unsigned long __user *)data);
75139+ return put_user(tmp, (__force unsigned long __user *)data);
75140 }
75141
75142 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
75143@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
75144 siginfo_t siginfo;
75145 int ret;
75146
75147+ pax_track_stack();
75148+
75149 switch (request) {
75150 case PTRACE_PEEKTEXT:
75151 case PTRACE_PEEKDATA:
75152@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
75153 goto out;
75154 }
75155
75156+ if (gr_handle_ptrace(child, request)) {
75157+ ret = -EPERM;
75158+ goto out_put_task_struct;
75159+ }
75160+
75161 if (request == PTRACE_ATTACH) {
75162 ret = ptrace_attach(child);
75163 /*
75164 * Some architectures need to do book-keeping after
75165 * a ptrace attach.
75166 */
75167- if (!ret)
75168+ if (!ret) {
75169 arch_ptrace_attach(child);
75170+ gr_audit_ptrace(child);
75171+ }
75172 goto out_put_task_struct;
75173 }
75174
75175diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
75176index 697c0a0..2402696 100644
75177--- a/kernel/rcutorture.c
75178+++ b/kernel/rcutorture.c
75179@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
75180 { 0 };
75181 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
75182 { 0 };
75183-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
75184-static atomic_t n_rcu_torture_alloc;
75185-static atomic_t n_rcu_torture_alloc_fail;
75186-static atomic_t n_rcu_torture_free;
75187-static atomic_t n_rcu_torture_mberror;
75188-static atomic_t n_rcu_torture_error;
75189+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
75190+static atomic_unchecked_t n_rcu_torture_alloc;
75191+static atomic_unchecked_t n_rcu_torture_alloc_fail;
75192+static atomic_unchecked_t n_rcu_torture_free;
75193+static atomic_unchecked_t n_rcu_torture_mberror;
75194+static atomic_unchecked_t n_rcu_torture_error;
75195 static long n_rcu_torture_timers;
75196 static struct list_head rcu_torture_removed;
75197 static cpumask_var_t shuffle_tmp_mask;
75198@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
75199
75200 spin_lock_bh(&rcu_torture_lock);
75201 if (list_empty(&rcu_torture_freelist)) {
75202- atomic_inc(&n_rcu_torture_alloc_fail);
75203+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
75204 spin_unlock_bh(&rcu_torture_lock);
75205 return NULL;
75206 }
75207- atomic_inc(&n_rcu_torture_alloc);
75208+ atomic_inc_unchecked(&n_rcu_torture_alloc);
75209 p = rcu_torture_freelist.next;
75210 list_del_init(p);
75211 spin_unlock_bh(&rcu_torture_lock);
75212@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
75213 static void
75214 rcu_torture_free(struct rcu_torture *p)
75215 {
75216- atomic_inc(&n_rcu_torture_free);
75217+ atomic_inc_unchecked(&n_rcu_torture_free);
75218 spin_lock_bh(&rcu_torture_lock);
75219 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
75220 spin_unlock_bh(&rcu_torture_lock);
75221@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
75222 i = rp->rtort_pipe_count;
75223 if (i > RCU_TORTURE_PIPE_LEN)
75224 i = RCU_TORTURE_PIPE_LEN;
75225- atomic_inc(&rcu_torture_wcount[i]);
75226+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
75227 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
75228 rp->rtort_mbtest = 0;
75229 rcu_torture_free(rp);
75230@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
75231 i = rp->rtort_pipe_count;
75232 if (i > RCU_TORTURE_PIPE_LEN)
75233 i = RCU_TORTURE_PIPE_LEN;
75234- atomic_inc(&rcu_torture_wcount[i]);
75235+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
75236 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
75237 rp->rtort_mbtest = 0;
75238 list_del(&rp->rtort_free);
75239@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
75240 i = old_rp->rtort_pipe_count;
75241 if (i > RCU_TORTURE_PIPE_LEN)
75242 i = RCU_TORTURE_PIPE_LEN;
75243- atomic_inc(&rcu_torture_wcount[i]);
75244+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
75245 old_rp->rtort_pipe_count++;
75246 cur_ops->deferred_free(old_rp);
75247 }
75248@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
75249 return;
75250 }
75251 if (p->rtort_mbtest == 0)
75252- atomic_inc(&n_rcu_torture_mberror);
75253+ atomic_inc_unchecked(&n_rcu_torture_mberror);
75254 spin_lock(&rand_lock);
75255 cur_ops->read_delay(&rand);
75256 n_rcu_torture_timers++;
75257@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
75258 continue;
75259 }
75260 if (p->rtort_mbtest == 0)
75261- atomic_inc(&n_rcu_torture_mberror);
75262+ atomic_inc_unchecked(&n_rcu_torture_mberror);
75263 cur_ops->read_delay(&rand);
75264 preempt_disable();
75265 pipe_count = p->rtort_pipe_count;
75266@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
75267 rcu_torture_current,
75268 rcu_torture_current_version,
75269 list_empty(&rcu_torture_freelist),
75270- atomic_read(&n_rcu_torture_alloc),
75271- atomic_read(&n_rcu_torture_alloc_fail),
75272- atomic_read(&n_rcu_torture_free),
75273- atomic_read(&n_rcu_torture_mberror),
75274+ atomic_read_unchecked(&n_rcu_torture_alloc),
75275+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
75276+ atomic_read_unchecked(&n_rcu_torture_free),
75277+ atomic_read_unchecked(&n_rcu_torture_mberror),
75278 n_rcu_torture_timers);
75279- if (atomic_read(&n_rcu_torture_mberror) != 0)
75280+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
75281 cnt += sprintf(&page[cnt], " !!!");
75282 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
75283 if (i > 1) {
75284 cnt += sprintf(&page[cnt], "!!! ");
75285- atomic_inc(&n_rcu_torture_error);
75286+ atomic_inc_unchecked(&n_rcu_torture_error);
75287 WARN_ON_ONCE(1);
75288 }
75289 cnt += sprintf(&page[cnt], "Reader Pipe: ");
75290@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
75291 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
75292 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75293 cnt += sprintf(&page[cnt], " %d",
75294- atomic_read(&rcu_torture_wcount[i]));
75295+ atomic_read_unchecked(&rcu_torture_wcount[i]));
75296 }
75297 cnt += sprintf(&page[cnt], "\n");
75298 if (cur_ops->stats)
75299@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
75300
75301 if (cur_ops->cleanup)
75302 cur_ops->cleanup();
75303- if (atomic_read(&n_rcu_torture_error))
75304+ if (atomic_read_unchecked(&n_rcu_torture_error))
75305 rcu_torture_print_module_parms("End of test: FAILURE");
75306 else
75307 rcu_torture_print_module_parms("End of test: SUCCESS");
75308@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
75309
75310 rcu_torture_current = NULL;
75311 rcu_torture_current_version = 0;
75312- atomic_set(&n_rcu_torture_alloc, 0);
75313- atomic_set(&n_rcu_torture_alloc_fail, 0);
75314- atomic_set(&n_rcu_torture_free, 0);
75315- atomic_set(&n_rcu_torture_mberror, 0);
75316- atomic_set(&n_rcu_torture_error, 0);
75317+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
75318+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
75319+ atomic_set_unchecked(&n_rcu_torture_free, 0);
75320+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
75321+ atomic_set_unchecked(&n_rcu_torture_error, 0);
75322 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
75323- atomic_set(&rcu_torture_wcount[i], 0);
75324+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
75325 for_each_possible_cpu(cpu) {
75326 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75327 per_cpu(rcu_torture_count, cpu)[i] = 0;
75328diff --git a/kernel/rcutree.c b/kernel/rcutree.c
75329index 683c4f3..97f54c6 100644
75330--- a/kernel/rcutree.c
75331+++ b/kernel/rcutree.c
75332@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
75333 /*
75334 * Do softirq processing for the current CPU.
75335 */
75336-static void rcu_process_callbacks(struct softirq_action *unused)
75337+static void rcu_process_callbacks(void)
75338 {
75339 /*
75340 * Memory references from any prior RCU read-side critical sections
75341diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
75342index c03edf7..ac1b341 100644
75343--- a/kernel/rcutree_plugin.h
75344+++ b/kernel/rcutree_plugin.h
75345@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
75346 */
75347 void __rcu_read_lock(void)
75348 {
75349- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
75350+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
75351 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
75352 }
75353 EXPORT_SYMBOL_GPL(__rcu_read_lock);
75354@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
75355 struct task_struct *t = current;
75356
75357 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
75358- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
75359+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
75360 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
75361 rcu_read_unlock_special(t);
75362 }
75363diff --git a/kernel/relay.c b/kernel/relay.c
75364index 760c262..908e9ee 100644
75365--- a/kernel/relay.c
75366+++ b/kernel/relay.c
75367@@ -171,10 +171,14 @@ depopulate:
75368 */
75369 static struct rchan_buf *relay_create_buf(struct rchan *chan)
75370 {
75371- struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
75372+ struct rchan_buf *buf;
75373+
75374+ if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
75375+ return NULL;
75376+
75377+ buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
75378 if (!buf)
75379 return NULL;
75380-
75381 buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
75382 if (!buf->padding)
75383 goto free_buf;
75384@@ -581,6 +585,8 @@ struct rchan *relay_open(const char *base_filename,
75385
75386 if (!(subbuf_size && n_subbufs))
75387 return NULL;
75388+ if (subbuf_size > UINT_MAX / n_subbufs)
75389+ return NULL;
75390
75391 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
75392 if (!chan)
75393@@ -1222,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in,
75394 unsigned int flags,
75395 int *nonpad_ret)
75396 {
75397- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
75398+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
75399 struct rchan_buf *rbuf = in->private_data;
75400 unsigned int subbuf_size = rbuf->chan->subbuf_size;
75401 uint64_t pos = (uint64_t) *ppos;
75402@@ -1241,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in,
75403 .ops = &relay_pipe_buf_ops,
75404 .spd_release = relay_page_release,
75405 };
75406+ ssize_t ret;
75407+
75408+ pax_track_stack();
75409
75410 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
75411 return 0;
75412diff --git a/kernel/resource.c b/kernel/resource.c
75413index fb11a58..4e61ae1 100644
75414--- a/kernel/resource.c
75415+++ b/kernel/resource.c
75416@@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
75417
75418 static int __init ioresources_init(void)
75419 {
75420+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75421+#ifdef CONFIG_GRKERNSEC_PROC_USER
75422+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
75423+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
75424+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75425+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
75426+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
75427+#endif
75428+#else
75429 proc_create("ioports", 0, NULL, &proc_ioports_operations);
75430 proc_create("iomem", 0, NULL, &proc_iomem_operations);
75431+#endif
75432 return 0;
75433 }
75434 __initcall(ioresources_init);
75435diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
75436index a56f629..1fc4989 100644
75437--- a/kernel/rtmutex-tester.c
75438+++ b/kernel/rtmutex-tester.c
75439@@ -21,7 +21,7 @@
75440 #define MAX_RT_TEST_MUTEXES 8
75441
75442 static spinlock_t rttest_lock;
75443-static atomic_t rttest_event;
75444+static atomic_unchecked_t rttest_event;
75445
75446 struct test_thread_data {
75447 int opcode;
75448@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75449
75450 case RTTEST_LOCKCONT:
75451 td->mutexes[td->opdata] = 1;
75452- td->event = atomic_add_return(1, &rttest_event);
75453+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75454 return 0;
75455
75456 case RTTEST_RESET:
75457@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75458 return 0;
75459
75460 case RTTEST_RESETEVENT:
75461- atomic_set(&rttest_event, 0);
75462+ atomic_set_unchecked(&rttest_event, 0);
75463 return 0;
75464
75465 default:
75466@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75467 return ret;
75468
75469 td->mutexes[id] = 1;
75470- td->event = atomic_add_return(1, &rttest_event);
75471+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75472 rt_mutex_lock(&mutexes[id]);
75473- td->event = atomic_add_return(1, &rttest_event);
75474+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75475 td->mutexes[id] = 4;
75476 return 0;
75477
75478@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75479 return ret;
75480
75481 td->mutexes[id] = 1;
75482- td->event = atomic_add_return(1, &rttest_event);
75483+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75484 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
75485- td->event = atomic_add_return(1, &rttest_event);
75486+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75487 td->mutexes[id] = ret ? 0 : 4;
75488 return ret ? -EINTR : 0;
75489
75490@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
75491 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
75492 return ret;
75493
75494- td->event = atomic_add_return(1, &rttest_event);
75495+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75496 rt_mutex_unlock(&mutexes[id]);
75497- td->event = atomic_add_return(1, &rttest_event);
75498+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75499 td->mutexes[id] = 0;
75500 return 0;
75501
75502@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75503 break;
75504
75505 td->mutexes[dat] = 2;
75506- td->event = atomic_add_return(1, &rttest_event);
75507+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75508 break;
75509
75510 case RTTEST_LOCKBKL:
75511@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75512 return;
75513
75514 td->mutexes[dat] = 3;
75515- td->event = atomic_add_return(1, &rttest_event);
75516+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75517 break;
75518
75519 case RTTEST_LOCKNOWAIT:
75520@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
75521 return;
75522
75523 td->mutexes[dat] = 1;
75524- td->event = atomic_add_return(1, &rttest_event);
75525+ td->event = atomic_add_return_unchecked(1, &rttest_event);
75526 return;
75527
75528 case RTTEST_LOCKBKL:
75529diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
75530index 29bd4ba..8c5de90 100644
75531--- a/kernel/rtmutex.c
75532+++ b/kernel/rtmutex.c
75533@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
75534 */
75535 spin_lock_irqsave(&pendowner->pi_lock, flags);
75536
75537- WARN_ON(!pendowner->pi_blocked_on);
75538+ BUG_ON(!pendowner->pi_blocked_on);
75539 WARN_ON(pendowner->pi_blocked_on != waiter);
75540 WARN_ON(pendowner->pi_blocked_on->lock != lock);
75541
75542diff --git a/kernel/sched.c b/kernel/sched.c
75543index 0591df8..e3af3a4 100644
75544--- a/kernel/sched.c
75545+++ b/kernel/sched.c
75546@@ -5043,7 +5043,7 @@ out:
75547 * In CONFIG_NO_HZ case, the idle load balance owner will do the
75548 * rebalancing for all the cpus for whom scheduler ticks are stopped.
75549 */
75550-static void run_rebalance_domains(struct softirq_action *h)
75551+static void run_rebalance_domains(void)
75552 {
75553 int this_cpu = smp_processor_id();
75554 struct rq *this_rq = cpu_rq(this_cpu);
75555@@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
75556 }
75557 }
75558
75559+#ifdef CONFIG_GRKERNSEC_SETXID
75560+extern void gr_delayed_cred_worker(void);
75561+static inline void gr_cred_schedule(void)
75562+{
75563+ if (unlikely(current->delayed_cred))
75564+ gr_delayed_cred_worker();
75565+}
75566+#else
75567+static inline void gr_cred_schedule(void)
75568+{
75569+}
75570+#endif
75571+
75572 /*
75573 * schedule() is the main scheduler function.
75574 */
75575@@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
75576 struct rq *rq;
75577 int cpu;
75578
75579+ pax_track_stack();
75580+
75581 need_resched:
75582 preempt_disable();
75583 cpu = smp_processor_id();
75584@@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
75585
75586 schedule_debug(prev);
75587
75588+ gr_cred_schedule();
75589+
75590 if (sched_feat(HRTICK))
75591 hrtick_clear(rq);
75592
75593@@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
75594 * Look out! "owner" is an entirely speculative pointer
75595 * access and not reliable.
75596 */
75597-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75598+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
75599 {
75600 unsigned int cpu;
75601 struct rq *rq;
75602@@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75603 * DEBUG_PAGEALLOC could have unmapped it if
75604 * the mutex owner just released it and exited.
75605 */
75606- if (probe_kernel_address(&owner->cpu, cpu))
75607+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
75608 return 0;
75609 #else
75610- cpu = owner->cpu;
75611+ cpu = task_thread_info(owner)->cpu;
75612 #endif
75613
75614 /*
75615@@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75616 /*
75617 * Is that owner really running on that cpu?
75618 */
75619- if (task_thread_info(rq->curr) != owner || need_resched())
75620+ if (rq->curr != owner || need_resched())
75621 return 0;
75622
75623 cpu_relax();
75624@@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
75625 /* convert nice value [19,-20] to rlimit style value [1,40] */
75626 int nice_rlim = 20 - nice;
75627
75628+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
75629+
75630 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
75631 capable(CAP_SYS_NICE));
75632 }
75633@@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
75634 if (nice > 19)
75635 nice = 19;
75636
75637- if (increment < 0 && !can_nice(current, nice))
75638+ if (increment < 0 && (!can_nice(current, nice) ||
75639+ gr_handle_chroot_nice()))
75640 return -EPERM;
75641
75642 retval = security_task_setnice(current, nice);
75643@@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
75644 long power;
75645 int weight;
75646
75647- WARN_ON(!sd || !sd->groups);
75648+ BUG_ON(!sd || !sd->groups);
75649
75650 if (cpu != group_first_cpu(sd->groups))
75651 return;
75652diff --git a/kernel/signal.c b/kernel/signal.c
75653index 2494827..cda80a0 100644
75654--- a/kernel/signal.c
75655+++ b/kernel/signal.c
75656@@ -41,12 +41,12 @@
75657
75658 static struct kmem_cache *sigqueue_cachep;
75659
75660-static void __user *sig_handler(struct task_struct *t, int sig)
75661+static __sighandler_t sig_handler(struct task_struct *t, int sig)
75662 {
75663 return t->sighand->action[sig - 1].sa.sa_handler;
75664 }
75665
75666-static int sig_handler_ignored(void __user *handler, int sig)
75667+static int sig_handler_ignored(__sighandler_t handler, int sig)
75668 {
75669 /* Is it explicitly or implicitly ignored? */
75670 return handler == SIG_IGN ||
75671@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
75672 static int sig_task_ignored(struct task_struct *t, int sig,
75673 int from_ancestor_ns)
75674 {
75675- void __user *handler;
75676+ __sighandler_t handler;
75677
75678 handler = sig_handler(t, sig);
75679
75680@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
75681 */
75682 user = get_uid(__task_cred(t)->user);
75683 atomic_inc(&user->sigpending);
75684+
75685+ if (!override_rlimit)
75686+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
75687 if (override_rlimit ||
75688 atomic_read(&user->sigpending) <=
75689 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
75690@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
75691
75692 int unhandled_signal(struct task_struct *tsk, int sig)
75693 {
75694- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
75695+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
75696 if (is_global_init(tsk))
75697 return 1;
75698 if (handler != SIG_IGN && handler != SIG_DFL)
75699@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
75700 }
75701 }
75702
75703+ /* allow glibc communication via tgkill to other threads in our
75704+ thread group */
75705+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
75706+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
75707+ && gr_handle_signal(t, sig))
75708+ return -EPERM;
75709+
75710 return security_task_kill(t, info, sig, 0);
75711 }
75712
75713@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75714 return send_signal(sig, info, p, 1);
75715 }
75716
75717-static int
75718+int
75719 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75720 {
75721 return send_signal(sig, info, t, 0);
75722@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75723 unsigned long int flags;
75724 int ret, blocked, ignored;
75725 struct k_sigaction *action;
75726+ int is_unhandled = 0;
75727
75728 spin_lock_irqsave(&t->sighand->siglock, flags);
75729 action = &t->sighand->action[sig-1];
75730@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75731 }
75732 if (action->sa.sa_handler == SIG_DFL)
75733 t->signal->flags &= ~SIGNAL_UNKILLABLE;
75734+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
75735+ is_unhandled = 1;
75736 ret = specific_send_sig_info(sig, info, t);
75737 spin_unlock_irqrestore(&t->sighand->siglock, flags);
75738
75739+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
75740+ normal operation */
75741+ if (is_unhandled) {
75742+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
75743+ gr_handle_crash(t, sig);
75744+ }
75745+
75746 return ret;
75747 }
75748
75749@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75750 {
75751 int ret = check_kill_permission(sig, info, p);
75752
75753- if (!ret && sig)
75754+ if (!ret && sig) {
75755 ret = do_send_sig_info(sig, info, p, true);
75756+ if (!ret)
75757+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
75758+ }
75759
75760 return ret;
75761 }
75762@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
75763 {
75764 siginfo_t info;
75765
75766+ pax_track_stack();
75767+
75768 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
75769
75770 memset(&info, 0, sizeof info);
75771@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
75772 int error = -ESRCH;
75773
75774 rcu_read_lock();
75775- p = find_task_by_vpid(pid);
75776+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75777+ /* allow glibc communication via tgkill to other threads in our
75778+ thread group */
75779+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
75780+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
75781+ p = find_task_by_vpid_unrestricted(pid);
75782+ else
75783+#endif
75784+ p = find_task_by_vpid(pid);
75785 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
75786 error = check_kill_permission(sig, info, p);
75787 /*
75788diff --git a/kernel/smp.c b/kernel/smp.c
75789index aa9cff3..631a0de 100644
75790--- a/kernel/smp.c
75791+++ b/kernel/smp.c
75792@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
75793 }
75794 EXPORT_SYMBOL(smp_call_function);
75795
75796-void ipi_call_lock(void)
75797+void ipi_call_lock(void) __acquires(call_function.lock)
75798 {
75799 spin_lock(&call_function.lock);
75800 }
75801
75802-void ipi_call_unlock(void)
75803+void ipi_call_unlock(void) __releases(call_function.lock)
75804 {
75805 spin_unlock(&call_function.lock);
75806 }
75807
75808-void ipi_call_lock_irq(void)
75809+void ipi_call_lock_irq(void) __acquires(call_function.lock)
75810 {
75811 spin_lock_irq(&call_function.lock);
75812 }
75813
75814-void ipi_call_unlock_irq(void)
75815+void ipi_call_unlock_irq(void) __releases(call_function.lock)
75816 {
75817 spin_unlock_irq(&call_function.lock);
75818 }
75819diff --git a/kernel/softirq.c b/kernel/softirq.c
75820index 04a0252..580c512 100644
75821--- a/kernel/softirq.c
75822+++ b/kernel/softirq.c
75823@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
75824
75825 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
75826
75827-char *softirq_to_name[NR_SOFTIRQS] = {
75828+const char * const softirq_to_name[NR_SOFTIRQS] = {
75829 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
75830 "TASKLET", "SCHED", "HRTIMER", "RCU"
75831 };
75832@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
75833
75834 asmlinkage void __do_softirq(void)
75835 {
75836- struct softirq_action *h;
75837+ const struct softirq_action *h;
75838 __u32 pending;
75839 int max_restart = MAX_SOFTIRQ_RESTART;
75840 int cpu;
75841@@ -233,7 +233,7 @@ restart:
75842 kstat_incr_softirqs_this_cpu(h - softirq_vec);
75843
75844 trace_softirq_entry(h, softirq_vec);
75845- h->action(h);
75846+ h->action();
75847 trace_softirq_exit(h, softirq_vec);
75848 if (unlikely(prev_count != preempt_count())) {
75849 printk(KERN_ERR "huh, entered softirq %td %s %p"
75850@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
75851 local_irq_restore(flags);
75852 }
75853
75854-void open_softirq(int nr, void (*action)(struct softirq_action *))
75855+void open_softirq(int nr, void (*action)(void))
75856 {
75857- softirq_vec[nr].action = action;
75858+ pax_open_kernel();
75859+ *(void **)&softirq_vec[nr].action = action;
75860+ pax_close_kernel();
75861 }
75862
75863 /*
75864@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
75865
75866 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
75867
75868-static void tasklet_action(struct softirq_action *a)
75869+static void tasklet_action(void)
75870 {
75871 struct tasklet_struct *list;
75872
75873@@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
75874 }
75875 }
75876
75877-static void tasklet_hi_action(struct softirq_action *a)
75878+static void tasklet_hi_action(void)
75879 {
75880 struct tasklet_struct *list;
75881
75882diff --git a/kernel/sys.c b/kernel/sys.c
75883index e9512b1..f07185f 100644
75884--- a/kernel/sys.c
75885+++ b/kernel/sys.c
75886@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
75887 error = -EACCES;
75888 goto out;
75889 }
75890+
75891+ if (gr_handle_chroot_setpriority(p, niceval)) {
75892+ error = -EACCES;
75893+ goto out;
75894+ }
75895+
75896 no_nice = security_task_setnice(p, niceval);
75897 if (no_nice) {
75898 error = no_nice;
75899@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
75900 !(user = find_user(who)))
75901 goto out_unlock; /* No processes for this user */
75902
75903- do_each_thread(g, p)
75904+ do_each_thread(g, p) {
75905 if (__task_cred(p)->uid == who)
75906 error = set_one_prio(p, niceval, error);
75907- while_each_thread(g, p);
75908+ } while_each_thread(g, p);
75909 if (who != cred->uid)
75910 free_uid(user); /* For find_user() */
75911 break;
75912@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
75913 !(user = find_user(who)))
75914 goto out_unlock; /* No processes for this user */
75915
75916- do_each_thread(g, p)
75917+ do_each_thread(g, p) {
75918 if (__task_cred(p)->uid == who) {
75919 niceval = 20 - task_nice(p);
75920 if (niceval > retval)
75921 retval = niceval;
75922 }
75923- while_each_thread(g, p);
75924+ } while_each_thread(g, p);
75925 if (who != cred->uid)
75926 free_uid(user); /* for find_user() */
75927 break;
75928@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
75929 goto error;
75930 }
75931
75932+ if (gr_check_group_change(new->gid, new->egid, -1))
75933+ goto error;
75934+
75935 if (rgid != (gid_t) -1 ||
75936 (egid != (gid_t) -1 && egid != old->gid))
75937 new->sgid = new->egid;
75938@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
75939 goto error;
75940
75941 retval = -EPERM;
75942+
75943+ if (gr_check_group_change(gid, gid, gid))
75944+ goto error;
75945+
75946 if (capable(CAP_SETGID))
75947 new->gid = new->egid = new->sgid = new->fsgid = gid;
75948 else if (gid == old->gid || gid == old->sgid)
75949@@ -559,7 +572,7 @@ error:
75950 /*
75951 * change the user struct in a credentials set to match the new UID
75952 */
75953-static int set_user(struct cred *new)
75954+int set_user(struct cred *new)
75955 {
75956 struct user_struct *new_user;
75957
75958@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
75959 if (!new_user)
75960 return -EAGAIN;
75961
75962+ /*
75963+ * We don't fail in case of NPROC limit excess here because too many
75964+ * poorly written programs don't check set*uid() return code, assuming
75965+ * it never fails if called by root. We may still enforce NPROC limit
75966+ * for programs doing set*uid()+execve() by harmlessly deferring the
75967+ * failure to the execve() stage.
75968+ */
75969 if (atomic_read(&new_user->processes) >=
75970 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
75971- new_user != INIT_USER) {
75972- free_uid(new_user);
75973- return -EAGAIN;
75974- }
75975+ new_user != INIT_USER)
75976+ current->flags |= PF_NPROC_EXCEEDED;
75977+ else
75978+ current->flags &= ~PF_NPROC_EXCEEDED;
75979
75980 free_uid(new->user);
75981 new->user = new_user;
75982@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
75983 goto error;
75984 }
75985
75986+ if (gr_check_user_change(new->uid, new->euid, -1))
75987+ goto error;
75988+
75989 if (new->uid != old->uid) {
75990 retval = set_user(new);
75991 if (retval < 0)
75992@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
75993 goto error;
75994
75995 retval = -EPERM;
75996+
75997+ if (gr_check_crash_uid(uid))
75998+ goto error;
75999+ if (gr_check_user_change(uid, uid, uid))
76000+ goto error;
76001+
76002 if (capable(CAP_SETUID)) {
76003 new->suid = new->uid = uid;
76004 if (uid != old->uid) {
76005@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
76006 goto error;
76007 }
76008
76009+ if (gr_check_user_change(ruid, euid, -1))
76010+ goto error;
76011+
76012 if (ruid != (uid_t) -1) {
76013 new->uid = ruid;
76014 if (ruid != old->uid) {
76015@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
76016 goto error;
76017 }
76018
76019+ if (gr_check_group_change(rgid, egid, -1))
76020+ goto error;
76021+
76022 if (rgid != (gid_t) -1)
76023 new->gid = rgid;
76024 if (egid != (gid_t) -1)
76025@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
76026 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
76027 goto error;
76028
76029+ if (gr_check_user_change(-1, -1, uid))
76030+ goto error;
76031+
76032 if (uid == old->uid || uid == old->euid ||
76033 uid == old->suid || uid == old->fsuid ||
76034 capable(CAP_SETUID)) {
76035@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
76036 if (gid == old->gid || gid == old->egid ||
76037 gid == old->sgid || gid == old->fsgid ||
76038 capable(CAP_SETGID)) {
76039+ if (gr_check_group_change(-1, -1, gid))
76040+ goto error;
76041+
76042 if (gid != old_fsgid) {
76043 new->fsgid = gid;
76044 goto change_okay;
76045@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
76046 error = get_dumpable(me->mm);
76047 break;
76048 case PR_SET_DUMPABLE:
76049- if (arg2 < 0 || arg2 > 1) {
76050+ if (arg2 > 1) {
76051 error = -EINVAL;
76052 break;
76053 }
76054diff --git a/kernel/sysctl.c b/kernel/sysctl.c
76055index b8bd058..ab6a76be 100644
76056--- a/kernel/sysctl.c
76057+++ b/kernel/sysctl.c
76058@@ -63,6 +63,13 @@
76059 static int deprecated_sysctl_warning(struct __sysctl_args *args);
76060
76061 #if defined(CONFIG_SYSCTL)
76062+#include <linux/grsecurity.h>
76063+#include <linux/grinternal.h>
76064+
76065+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
76066+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
76067+ const int op);
76068+extern int gr_handle_chroot_sysctl(const int op);
76069
76070 /* External variables not in a header file. */
76071 extern int C_A_D;
76072@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
76073 static int proc_taint(struct ctl_table *table, int write,
76074 void __user *buffer, size_t *lenp, loff_t *ppos);
76075 #endif
76076+extern ctl_table grsecurity_table[];
76077
76078 static struct ctl_table root_table[];
76079 static struct ctl_table_root sysctl_table_root;
76080@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
76081 int sysctl_legacy_va_layout;
76082 #endif
76083
76084+#ifdef CONFIG_PAX_SOFTMODE
76085+static ctl_table pax_table[] = {
76086+ {
76087+ .ctl_name = CTL_UNNUMBERED,
76088+ .procname = "softmode",
76089+ .data = &pax_softmode,
76090+ .maxlen = sizeof(unsigned int),
76091+ .mode = 0600,
76092+ .proc_handler = &proc_dointvec,
76093+ },
76094+
76095+ { .ctl_name = 0 }
76096+};
76097+#endif
76098+
76099 extern int prove_locking;
76100 extern int lock_stat;
76101
76102@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
76103 #endif
76104
76105 static struct ctl_table kern_table[] = {
76106+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
76107+ {
76108+ .ctl_name = CTL_UNNUMBERED,
76109+ .procname = "grsecurity",
76110+ .mode = 0500,
76111+ .child = grsecurity_table,
76112+ },
76113+#endif
76114+
76115+#ifdef CONFIG_PAX_SOFTMODE
76116+ {
76117+ .ctl_name = CTL_UNNUMBERED,
76118+ .procname = "pax",
76119+ .mode = 0500,
76120+ .child = pax_table,
76121+ },
76122+#endif
76123+
76124 {
76125 .ctl_name = CTL_UNNUMBERED,
76126 .procname = "sched_child_runs_first",
76127@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
76128 .data = &modprobe_path,
76129 .maxlen = KMOD_PATH_LEN,
76130 .mode = 0644,
76131- .proc_handler = &proc_dostring,
76132- .strategy = &sysctl_string,
76133+ .proc_handler = &proc_dostring_modpriv,
76134+ .strategy = &sysctl_string_modpriv,
76135 },
76136 {
76137 .ctl_name = CTL_UNNUMBERED,
76138@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
76139 .mode = 0644,
76140 .proc_handler = &proc_dointvec
76141 },
76142+ {
76143+ .procname = "heap_stack_gap",
76144+ .data = &sysctl_heap_stack_gap,
76145+ .maxlen = sizeof(sysctl_heap_stack_gap),
76146+ .mode = 0644,
76147+ .proc_handler = proc_doulongvec_minmax,
76148+ },
76149 #else
76150 {
76151 .ctl_name = CTL_UNNUMBERED,
76152@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
76153 return 0;
76154 }
76155
76156+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
76157+
76158 static int parse_table(int __user *name, int nlen,
76159 void __user *oldval, size_t __user *oldlenp,
76160 void __user *newval, size_t newlen,
76161@@ -1821,7 +1871,7 @@ repeat:
76162 if (n == table->ctl_name) {
76163 int error;
76164 if (table->child) {
76165- if (sysctl_perm(root, table, MAY_EXEC))
76166+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
76167 return -EPERM;
76168 name++;
76169 nlen--;
76170@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
76171 int error;
76172 int mode;
76173
76174+ if (table->parent != NULL && table->parent->procname != NULL &&
76175+ table->procname != NULL &&
76176+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
76177+ return -EACCES;
76178+ if (gr_handle_chroot_sysctl(op))
76179+ return -EACCES;
76180+ error = gr_handle_sysctl(table, op);
76181+ if (error)
76182+ return error;
76183+
76184+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
76185+ if (error)
76186+ return error;
76187+
76188+ if (root->permissions)
76189+ mode = root->permissions(root, current->nsproxy, table);
76190+ else
76191+ mode = table->mode;
76192+
76193+ return test_perm(mode, op);
76194+}
76195+
76196+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
76197+{
76198+ int error;
76199+ int mode;
76200+
76201 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
76202 if (error)
76203 return error;
76204@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
76205 buffer, lenp, ppos);
76206 }
76207
76208+int proc_dostring_modpriv(struct ctl_table *table, int write,
76209+ void __user *buffer, size_t *lenp, loff_t *ppos)
76210+{
76211+ if (write && !capable(CAP_SYS_MODULE))
76212+ return -EPERM;
76213+
76214+ return _proc_do_string(table->data, table->maxlen, write,
76215+ buffer, lenp, ppos);
76216+}
76217+
76218
76219 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
76220 int *valp,
76221@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
76222 vleft = table->maxlen / sizeof(unsigned long);
76223 left = *lenp;
76224
76225- for (; left && vleft--; i++, min++, max++, first=0) {
76226+ for (; left && vleft--; i++, first=0) {
76227 if (write) {
76228 while (left) {
76229 char c;
76230@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
76231 return -ENOSYS;
76232 }
76233
76234+int proc_dostring_modpriv(struct ctl_table *table, int write,
76235+ void __user *buffer, size_t *lenp, loff_t *ppos)
76236+{
76237+ return -ENOSYS;
76238+}
76239+
76240 int proc_dointvec(struct ctl_table *table, int write,
76241 void __user *buffer, size_t *lenp, loff_t *ppos)
76242 {
76243@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
76244 return 1;
76245 }
76246
76247+int sysctl_string_modpriv(struct ctl_table *table,
76248+ void __user *oldval, size_t __user *oldlenp,
76249+ void __user *newval, size_t newlen)
76250+{
76251+ if (newval && newlen && !capable(CAP_SYS_MODULE))
76252+ return -EPERM;
76253+
76254+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
76255+}
76256+
76257 /*
76258 * This function makes sure that all of the integers in the vector
76259 * are between the minimum and maximum values given in the arrays
76260@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
76261 return -ENOSYS;
76262 }
76263
76264+int sysctl_string_modpriv(struct ctl_table *table,
76265+ void __user *oldval, size_t __user *oldlenp,
76266+ void __user *newval, size_t newlen)
76267+{
76268+ return -ENOSYS;
76269+}
76270+
76271 int sysctl_intvec(struct ctl_table *table,
76272 void __user *oldval, size_t __user *oldlenp,
76273 void __user *newval, size_t newlen)
76274@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
76275 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
76276 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
76277 EXPORT_SYMBOL(proc_dostring);
76278+EXPORT_SYMBOL(proc_dostring_modpriv);
76279 EXPORT_SYMBOL(proc_doulongvec_minmax);
76280 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
76281 EXPORT_SYMBOL(register_sysctl_table);
76282@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
76283 EXPORT_SYMBOL(sysctl_jiffies);
76284 EXPORT_SYMBOL(sysctl_ms_jiffies);
76285 EXPORT_SYMBOL(sysctl_string);
76286+EXPORT_SYMBOL(sysctl_string_modpriv);
76287 EXPORT_SYMBOL(sysctl_data);
76288 EXPORT_SYMBOL(unregister_sysctl_table);
76289diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
76290index 469193c..ea3ecb2 100644
76291--- a/kernel/sysctl_check.c
76292+++ b/kernel/sysctl_check.c
76293@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
76294 } else {
76295 if ((table->strategy == sysctl_data) ||
76296 (table->strategy == sysctl_string) ||
76297+ (table->strategy == sysctl_string_modpriv) ||
76298 (table->strategy == sysctl_intvec) ||
76299 (table->strategy == sysctl_jiffies) ||
76300 (table->strategy == sysctl_ms_jiffies) ||
76301 (table->proc_handler == proc_dostring) ||
76302+ (table->proc_handler == proc_dostring_modpriv) ||
76303 (table->proc_handler == proc_dointvec) ||
76304 (table->proc_handler == proc_dointvec_minmax) ||
76305 (table->proc_handler == proc_dointvec_jiffies) ||
76306diff --git a/kernel/taskstats.c b/kernel/taskstats.c
76307index a4ef542..798bcd7 100644
76308--- a/kernel/taskstats.c
76309+++ b/kernel/taskstats.c
76310@@ -26,9 +26,12 @@
76311 #include <linux/cgroup.h>
76312 #include <linux/fs.h>
76313 #include <linux/file.h>
76314+#include <linux/grsecurity.h>
76315 #include <net/genetlink.h>
76316 #include <asm/atomic.h>
76317
76318+extern int gr_is_taskstats_denied(int pid);
76319+
76320 /*
76321 * Maximum length of a cpumask that can be specified in
76322 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
76323@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
76324 size_t size;
76325 cpumask_var_t mask;
76326
76327+ if (gr_is_taskstats_denied(current->pid))
76328+ return -EACCES;
76329+
76330 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
76331 return -ENOMEM;
76332
76333diff --git a/kernel/time.c b/kernel/time.c
76334index 33df60e..ca768bd 100644
76335--- a/kernel/time.c
76336+++ b/kernel/time.c
76337@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
76338 return error;
76339
76340 if (tz) {
76341+ /* we log in do_settimeofday called below, so don't log twice
76342+ */
76343+ if (!tv)
76344+ gr_log_timechange();
76345+
76346 /* SMP safe, global irq locking makes it work. */
76347 sys_tz = *tz;
76348 update_vsyscall_tz();
76349@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
76350 * Avoid unnecessary multiplications/divisions in the
76351 * two most common HZ cases:
76352 */
76353-unsigned int inline jiffies_to_msecs(const unsigned long j)
76354+inline unsigned int jiffies_to_msecs(const unsigned long j)
76355 {
76356 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
76357 return (MSEC_PER_SEC / HZ) * j;
76358@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
76359 }
76360 EXPORT_SYMBOL(jiffies_to_msecs);
76361
76362-unsigned int inline jiffies_to_usecs(const unsigned long j)
76363+inline unsigned int jiffies_to_usecs(const unsigned long j)
76364 {
76365 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
76366 return (USEC_PER_SEC / HZ) * j;
76367diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
76368index 57b953f..06f149f 100644
76369--- a/kernel/time/tick-broadcast.c
76370+++ b/kernel/time/tick-broadcast.c
76371@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
76372 * then clear the broadcast bit.
76373 */
76374 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
76375- int cpu = smp_processor_id();
76376+ cpu = smp_processor_id();
76377
76378 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
76379 tick_broadcast_clear_oneshot(cpu);
76380diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
76381index 4a71cff..ffb5548 100644
76382--- a/kernel/time/timekeeping.c
76383+++ b/kernel/time/timekeeping.c
76384@@ -14,6 +14,7 @@
76385 #include <linux/init.h>
76386 #include <linux/mm.h>
76387 #include <linux/sched.h>
76388+#include <linux/grsecurity.h>
76389 #include <linux/sysdev.h>
76390 #include <linux/clocksource.h>
76391 #include <linux/jiffies.h>
76392@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
76393 */
76394 struct timespec ts = xtime;
76395 timespec_add_ns(&ts, nsec);
76396- ACCESS_ONCE(xtime_cache) = ts;
76397+ ACCESS_ONCE_RW(xtime_cache) = ts;
76398 }
76399
76400 /* must hold xtime_lock */
76401@@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
76402 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
76403 return -EINVAL;
76404
76405+ gr_log_timechange();
76406+
76407 write_seqlock_irqsave(&xtime_lock, flags);
76408
76409 timekeeping_forward_now();
76410diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
76411index 54c0dda..e9095d9 100644
76412--- a/kernel/time/timer_list.c
76413+++ b/kernel/time/timer_list.c
76414@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
76415
76416 static void print_name_offset(struct seq_file *m, void *sym)
76417 {
76418+#ifdef CONFIG_GRKERNSEC_HIDESYM
76419+ SEQ_printf(m, "<%p>", NULL);
76420+#else
76421 char symname[KSYM_NAME_LEN];
76422
76423 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
76424 SEQ_printf(m, "<%p>", sym);
76425 else
76426 SEQ_printf(m, "%s", symname);
76427+#endif
76428 }
76429
76430 static void
76431@@ -112,7 +116,11 @@ next_one:
76432 static void
76433 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
76434 {
76435+#ifdef CONFIG_GRKERNSEC_HIDESYM
76436+ SEQ_printf(m, " .base: %p\n", NULL);
76437+#else
76438 SEQ_printf(m, " .base: %p\n", base);
76439+#endif
76440 SEQ_printf(m, " .index: %d\n",
76441 base->index);
76442 SEQ_printf(m, " .resolution: %Lu nsecs\n",
76443@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
76444 {
76445 struct proc_dir_entry *pe;
76446
76447+#ifdef CONFIG_GRKERNSEC_PROC_ADD
76448+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
76449+#else
76450 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
76451+#endif
76452 if (!pe)
76453 return -ENOMEM;
76454 return 0;
76455diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
76456index ee5681f..634089b 100644
76457--- a/kernel/time/timer_stats.c
76458+++ b/kernel/time/timer_stats.c
76459@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
76460 static unsigned long nr_entries;
76461 static struct entry entries[MAX_ENTRIES];
76462
76463-static atomic_t overflow_count;
76464+static atomic_unchecked_t overflow_count;
76465
76466 /*
76467 * The entries are in a hash-table, for fast lookup:
76468@@ -140,7 +140,7 @@ static void reset_entries(void)
76469 nr_entries = 0;
76470 memset(entries, 0, sizeof(entries));
76471 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
76472- atomic_set(&overflow_count, 0);
76473+ atomic_set_unchecked(&overflow_count, 0);
76474 }
76475
76476 static struct entry *alloc_entry(void)
76477@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
76478 if (likely(entry))
76479 entry->count++;
76480 else
76481- atomic_inc(&overflow_count);
76482+ atomic_inc_unchecked(&overflow_count);
76483
76484 out_unlock:
76485 spin_unlock_irqrestore(lock, flags);
76486@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
76487
76488 static void print_name_offset(struct seq_file *m, unsigned long addr)
76489 {
76490+#ifdef CONFIG_GRKERNSEC_HIDESYM
76491+ seq_printf(m, "<%p>", NULL);
76492+#else
76493 char symname[KSYM_NAME_LEN];
76494
76495 if (lookup_symbol_name(addr, symname) < 0)
76496 seq_printf(m, "<%p>", (void *)addr);
76497 else
76498 seq_printf(m, "%s", symname);
76499+#endif
76500 }
76501
76502 static int tstats_show(struct seq_file *m, void *v)
76503@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
76504
76505 seq_puts(m, "Timer Stats Version: v0.2\n");
76506 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
76507- if (atomic_read(&overflow_count))
76508+ if (atomic_read_unchecked(&overflow_count))
76509 seq_printf(m, "Overflow: %d entries\n",
76510- atomic_read(&overflow_count));
76511+ atomic_read_unchecked(&overflow_count));
76512
76513 for (i = 0; i < nr_entries; i++) {
76514 entry = entries + i;
76515@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
76516 {
76517 struct proc_dir_entry *pe;
76518
76519+#ifdef CONFIG_GRKERNSEC_PROC_ADD
76520+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
76521+#else
76522 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
76523+#endif
76524 if (!pe)
76525 return -ENOMEM;
76526 return 0;
76527diff --git a/kernel/timer.c b/kernel/timer.c
76528index cb3c1f1..8bf5526 100644
76529--- a/kernel/timer.c
76530+++ b/kernel/timer.c
76531@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
76532 /*
76533 * This function runs timers and the timer-tq in bottom half context.
76534 */
76535-static void run_timer_softirq(struct softirq_action *h)
76536+static void run_timer_softirq(void)
76537 {
76538 struct tvec_base *base = __get_cpu_var(tvec_bases);
76539
76540diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
76541index d9d6206..f19467e 100644
76542--- a/kernel/trace/blktrace.c
76543+++ b/kernel/trace/blktrace.c
76544@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
76545 struct blk_trace *bt = filp->private_data;
76546 char buf[16];
76547
76548- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
76549+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
76550
76551 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
76552 }
76553@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
76554 return 1;
76555
76556 bt = buf->chan->private_data;
76557- atomic_inc(&bt->dropped);
76558+ atomic_inc_unchecked(&bt->dropped);
76559 return 0;
76560 }
76561
76562@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
76563
76564 bt->dir = dir;
76565 bt->dev = dev;
76566- atomic_set(&bt->dropped, 0);
76567+ atomic_set_unchecked(&bt->dropped, 0);
76568
76569 ret = -EIO;
76570 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
76571diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
76572index 4872937..c794d40 100644
76573--- a/kernel/trace/ftrace.c
76574+++ b/kernel/trace/ftrace.c
76575@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
76576
76577 ip = rec->ip;
76578
76579+ ret = ftrace_arch_code_modify_prepare();
76580+ FTRACE_WARN_ON(ret);
76581+ if (ret)
76582+ return 0;
76583+
76584 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
76585+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
76586 if (ret) {
76587 ftrace_bug(ret, ip);
76588 rec->flags |= FTRACE_FL_FAILED;
76589- return 0;
76590 }
76591- return 1;
76592+ return ret ? 0 : 1;
76593 }
76594
76595 /*
76596diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
76597index e749a05..19c6e94 100644
76598--- a/kernel/trace/ring_buffer.c
76599+++ b/kernel/trace/ring_buffer.c
76600@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
76601 * the reader page). But if the next page is a header page,
76602 * its flags will be non zero.
76603 */
76604-static int inline
76605+static inline int
76606 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
76607 struct buffer_page *page, struct list_head *list)
76608 {
76609diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
76610index a2a2d1f..7f32b09 100644
76611--- a/kernel/trace/trace.c
76612+++ b/kernel/trace/trace.c
76613@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
76614 size_t rem;
76615 unsigned int i;
76616
76617+ pax_track_stack();
76618+
76619 /* copy the tracer to avoid using a global lock all around */
76620 mutex_lock(&trace_types_lock);
76621 if (unlikely(old_tracer != current_trace && current_trace)) {
76622@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
76623 int entries, size, i;
76624 size_t ret;
76625
76626+ pax_track_stack();
76627+
76628 if (*ppos & (PAGE_SIZE - 1)) {
76629 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
76630 return -EINVAL;
76631@@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
76632 };
76633 #endif
76634
76635-static struct dentry *d_tracer;
76636-
76637 struct dentry *tracing_init_dentry(void)
76638 {
76639+ static struct dentry *d_tracer;
76640 static int once;
76641
76642 if (d_tracer)
76643@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
76644 return d_tracer;
76645 }
76646
76647-static struct dentry *d_percpu;
76648-
76649 struct dentry *tracing_dentry_percpu(void)
76650 {
76651+ static struct dentry *d_percpu;
76652 static int once;
76653 struct dentry *d_tracer;
76654
76655diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
76656index d128f65..f37b4af 100644
76657--- a/kernel/trace/trace_events.c
76658+++ b/kernel/trace/trace_events.c
76659@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
76660 * Modules must own their file_operations to keep up with
76661 * reference counting.
76662 */
76663+
76664 struct ftrace_module_file_ops {
76665 struct list_head list;
76666 struct module *mod;
76667- struct file_operations id;
76668- struct file_operations enable;
76669- struct file_operations format;
76670- struct file_operations filter;
76671 };
76672
76673 static void remove_subsystem_dir(const char *name)
76674@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
76675
76676 file_ops->mod = mod;
76677
76678- file_ops->id = ftrace_event_id_fops;
76679- file_ops->id.owner = mod;
76680-
76681- file_ops->enable = ftrace_enable_fops;
76682- file_ops->enable.owner = mod;
76683-
76684- file_ops->filter = ftrace_event_filter_fops;
76685- file_ops->filter.owner = mod;
76686-
76687- file_ops->format = ftrace_event_format_fops;
76688- file_ops->format.owner = mod;
76689+ pax_open_kernel();
76690+ *(void **)&mod->trace_id.owner = mod;
76691+ *(void **)&mod->trace_enable.owner = mod;
76692+ *(void **)&mod->trace_filter.owner = mod;
76693+ *(void **)&mod->trace_format.owner = mod;
76694+ pax_close_kernel();
76695
76696 list_add(&file_ops->list, &ftrace_module_file_list);
76697
76698@@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
76699 call->mod = mod;
76700 list_add(&call->list, &ftrace_events);
76701 event_create_dir(call, d_events,
76702- &file_ops->id, &file_ops->enable,
76703- &file_ops->filter, &file_ops->format);
76704+ &mod->trace_id, &mod->trace_enable,
76705+ &mod->trace_filter, &mod->trace_format);
76706 }
76707 }
76708
76709diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
76710index 0acd834..b800b56 100644
76711--- a/kernel/trace/trace_mmiotrace.c
76712+++ b/kernel/trace/trace_mmiotrace.c
76713@@ -23,7 +23,7 @@ struct header_iter {
76714 static struct trace_array *mmio_trace_array;
76715 static bool overrun_detected;
76716 static unsigned long prev_overruns;
76717-static atomic_t dropped_count;
76718+static atomic_unchecked_t dropped_count;
76719
76720 static void mmio_reset_data(struct trace_array *tr)
76721 {
76722@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
76723
76724 static unsigned long count_overruns(struct trace_iterator *iter)
76725 {
76726- unsigned long cnt = atomic_xchg(&dropped_count, 0);
76727+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
76728 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
76729
76730 if (over > prev_overruns)
76731@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
76732 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
76733 sizeof(*entry), 0, pc);
76734 if (!event) {
76735- atomic_inc(&dropped_count);
76736+ atomic_inc_unchecked(&dropped_count);
76737 return;
76738 }
76739 entry = ring_buffer_event_data(event);
76740@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
76741 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
76742 sizeof(*entry), 0, pc);
76743 if (!event) {
76744- atomic_inc(&dropped_count);
76745+ atomic_inc_unchecked(&dropped_count);
76746 return;
76747 }
76748 entry = ring_buffer_event_data(event);
76749diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
76750index b6c12c6..41fdc53 100644
76751--- a/kernel/trace/trace_output.c
76752+++ b/kernel/trace/trace_output.c
76753@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
76754 return 0;
76755 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
76756 if (!IS_ERR(p)) {
76757- p = mangle_path(s->buffer + s->len, p, "\n");
76758+ p = mangle_path(s->buffer + s->len, p, "\n\\");
76759 if (p) {
76760 s->len = p - s->buffer;
76761 return 1;
76762diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
76763index 8504ac7..ecf0adb 100644
76764--- a/kernel/trace/trace_stack.c
76765+++ b/kernel/trace/trace_stack.c
76766@@ -50,7 +50,7 @@ static inline void check_stack(void)
76767 return;
76768
76769 /* we do not handle interrupt stacks yet */
76770- if (!object_is_on_stack(&this_size))
76771+ if (!object_starts_on_stack(&this_size))
76772 return;
76773
76774 local_irq_save(flags);
76775diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
76776index 40cafb0..d5ead43 100644
76777--- a/kernel/trace/trace_workqueue.c
76778+++ b/kernel/trace/trace_workqueue.c
76779@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
76780 int cpu;
76781 pid_t pid;
76782 /* Can be inserted from interrupt or user context, need to be atomic */
76783- atomic_t inserted;
76784+ atomic_unchecked_t inserted;
76785 /*
76786 * Don't need to be atomic, works are serialized in a single workqueue thread
76787 * on a single CPU.
76788@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
76789 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
76790 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
76791 if (node->pid == wq_thread->pid) {
76792- atomic_inc(&node->inserted);
76793+ atomic_inc_unchecked(&node->inserted);
76794 goto found;
76795 }
76796 }
76797@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
76798 tsk = get_pid_task(pid, PIDTYPE_PID);
76799 if (tsk) {
76800 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
76801- atomic_read(&cws->inserted), cws->executed,
76802+ atomic_read_unchecked(&cws->inserted), cws->executed,
76803 tsk->comm);
76804 put_task_struct(tsk);
76805 }
76806diff --git a/kernel/user.c b/kernel/user.c
76807index 1b91701..8795237 100644
76808--- a/kernel/user.c
76809+++ b/kernel/user.c
76810@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
76811 spin_lock_irq(&uidhash_lock);
76812 up = uid_hash_find(uid, hashent);
76813 if (up) {
76814+ put_user_ns(ns);
76815 key_put(new->uid_keyring);
76816 key_put(new->session_keyring);
76817 kmem_cache_free(uid_cachep, new);
76818diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
76819index 234ceb1..ad74049 100644
76820--- a/lib/Kconfig.debug
76821+++ b/lib/Kconfig.debug
76822@@ -905,7 +905,7 @@ config LATENCYTOP
76823 select STACKTRACE
76824 select SCHEDSTATS
76825 select SCHED_DEBUG
76826- depends on HAVE_LATENCYTOP_SUPPORT
76827+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
76828 help
76829 Enable this option if you want to use the LatencyTOP tool
76830 to find out which userspace is blocking on what kernel operations.
76831diff --git a/lib/bitmap.c b/lib/bitmap.c
76832index 7025658..8d14cab 100644
76833--- a/lib/bitmap.c
76834+++ b/lib/bitmap.c
76835@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
76836 {
76837 int c, old_c, totaldigits, ndigits, nchunks, nbits;
76838 u32 chunk;
76839- const char __user *ubuf = buf;
76840+ const char __user *ubuf = (const char __force_user *)buf;
76841
76842 bitmap_zero(maskp, nmaskbits);
76843
76844@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
76845 {
76846 if (!access_ok(VERIFY_READ, ubuf, ulen))
76847 return -EFAULT;
76848- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
76849+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
76850 }
76851 EXPORT_SYMBOL(bitmap_parse_user);
76852
76853diff --git a/lib/bug.c b/lib/bug.c
76854index 300e41a..2779eb0 100644
76855--- a/lib/bug.c
76856+++ b/lib/bug.c
76857@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
76858 return BUG_TRAP_TYPE_NONE;
76859
76860 bug = find_bug(bugaddr);
76861+ if (!bug)
76862+ return BUG_TRAP_TYPE_NONE;
76863
76864 printk(KERN_EMERG "------------[ cut here ]------------\n");
76865
76866diff --git a/lib/debugobjects.c b/lib/debugobjects.c
76867index 2b413db..e21d207 100644
76868--- a/lib/debugobjects.c
76869+++ b/lib/debugobjects.c
76870@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
76871 if (limit > 4)
76872 return;
76873
76874- is_on_stack = object_is_on_stack(addr);
76875+ is_on_stack = object_starts_on_stack(addr);
76876 if (is_on_stack == onstack)
76877 return;
76878
76879diff --git a/lib/devres.c b/lib/devres.c
76880index 72c8909..7543868 100644
76881--- a/lib/devres.c
76882+++ b/lib/devres.c
76883@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
76884 {
76885 iounmap(addr);
76886 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
76887- (void *)addr));
76888+ (void __force *)addr));
76889 }
76890 EXPORT_SYMBOL(devm_iounmap);
76891
76892@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
76893 {
76894 ioport_unmap(addr);
76895 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
76896- devm_ioport_map_match, (void *)addr));
76897+ devm_ioport_map_match, (void __force *)addr));
76898 }
76899 EXPORT_SYMBOL(devm_ioport_unmap);
76900
76901diff --git a/lib/dma-debug.c b/lib/dma-debug.c
76902index 084e879..0674448 100644
76903--- a/lib/dma-debug.c
76904+++ b/lib/dma-debug.c
76905@@ -861,7 +861,7 @@ out:
76906
76907 static void check_for_stack(struct device *dev, void *addr)
76908 {
76909- if (object_is_on_stack(addr))
76910+ if (object_starts_on_stack(addr))
76911 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
76912 "stack [addr=%p]\n", addr);
76913 }
76914diff --git a/lib/idr.c b/lib/idr.c
76915index eda7ba3..915dfae 100644
76916--- a/lib/idr.c
76917+++ b/lib/idr.c
76918@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
76919 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
76920
76921 /* if already at the top layer, we need to grow */
76922- if (id >= 1 << (idp->layers * IDR_BITS)) {
76923+ if (id >= (1 << (idp->layers * IDR_BITS))) {
76924 *starting_id = id;
76925 return IDR_NEED_TO_GROW;
76926 }
76927diff --git a/lib/inflate.c b/lib/inflate.c
76928index d102559..4215f31 100644
76929--- a/lib/inflate.c
76930+++ b/lib/inflate.c
76931@@ -266,7 +266,7 @@ static void free(void *where)
76932 malloc_ptr = free_mem_ptr;
76933 }
76934 #else
76935-#define malloc(a) kmalloc(a, GFP_KERNEL)
76936+#define malloc(a) kmalloc((a), GFP_KERNEL)
76937 #define free(a) kfree(a)
76938 #endif
76939
76940diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
76941index bd2bea9..6b3c95e 100644
76942--- a/lib/is_single_threaded.c
76943+++ b/lib/is_single_threaded.c
76944@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
76945 struct task_struct *p, *t;
76946 bool ret;
76947
76948+ if (!mm)
76949+ return true;
76950+
76951 if (atomic_read(&task->signal->live) != 1)
76952 return false;
76953
76954diff --git a/lib/kobject.c b/lib/kobject.c
76955index b512b74..8115eb1 100644
76956--- a/lib/kobject.c
76957+++ b/lib/kobject.c
76958@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
76959 return ret;
76960 }
76961
76962-struct sysfs_ops kobj_sysfs_ops = {
76963+const struct sysfs_ops kobj_sysfs_ops = {
76964 .show = kobj_attr_show,
76965 .store = kobj_attr_store,
76966 };
76967@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
76968 * If the kset was not able to be created, NULL will be returned.
76969 */
76970 static struct kset *kset_create(const char *name,
76971- struct kset_uevent_ops *uevent_ops,
76972+ const struct kset_uevent_ops *uevent_ops,
76973 struct kobject *parent_kobj)
76974 {
76975 struct kset *kset;
76976@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
76977 * If the kset was not able to be created, NULL will be returned.
76978 */
76979 struct kset *kset_create_and_add(const char *name,
76980- struct kset_uevent_ops *uevent_ops,
76981+ const struct kset_uevent_ops *uevent_ops,
76982 struct kobject *parent_kobj)
76983 {
76984 struct kset *kset;
76985diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
76986index 507b821..0bf8ed0 100644
76987--- a/lib/kobject_uevent.c
76988+++ b/lib/kobject_uevent.c
76989@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
76990 const char *subsystem;
76991 struct kobject *top_kobj;
76992 struct kset *kset;
76993- struct kset_uevent_ops *uevent_ops;
76994+ const struct kset_uevent_ops *uevent_ops;
76995 u64 seq;
76996 int i = 0;
76997 int retval = 0;
76998diff --git a/lib/kref.c b/lib/kref.c
76999index 9ecd6e8..12c94c1 100644
77000--- a/lib/kref.c
77001+++ b/lib/kref.c
77002@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
77003 */
77004 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
77005 {
77006- WARN_ON(release == NULL);
77007+ BUG_ON(release == NULL);
77008 WARN_ON(release == (void (*)(struct kref *))kfree);
77009
77010 if (atomic_dec_and_test(&kref->refcount)) {
77011diff --git a/lib/parser.c b/lib/parser.c
77012index b00d020..1b34325 100644
77013--- a/lib/parser.c
77014+++ b/lib/parser.c
77015@@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
77016 char *buf;
77017 int ret;
77018
77019- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
77020+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
77021 if (!buf)
77022 return -ENOMEM;
77023 memcpy(buf, s->from, s->to - s->from);
77024diff --git a/lib/radix-tree.c b/lib/radix-tree.c
77025index 92cdd99..a8149d7 100644
77026--- a/lib/radix-tree.c
77027+++ b/lib/radix-tree.c
77028@@ -81,7 +81,7 @@ struct radix_tree_preload {
77029 int nr;
77030 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
77031 };
77032-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
77033+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
77034
77035 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
77036 {
77037diff --git a/lib/random32.c b/lib/random32.c
77038index 217d5c4..45aba8a 100644
77039--- a/lib/random32.c
77040+++ b/lib/random32.c
77041@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
77042 */
77043 static inline u32 __seed(u32 x, u32 m)
77044 {
77045- return (x < m) ? x + m : x;
77046+ return (x <= m) ? x + m + 1 : x;
77047 }
77048
77049 /**
77050diff --git a/lib/vsprintf.c b/lib/vsprintf.c
77051index 33bed5e..1477e46 100644
77052--- a/lib/vsprintf.c
77053+++ b/lib/vsprintf.c
77054@@ -16,6 +16,9 @@
77055 * - scnprintf and vscnprintf
77056 */
77057
77058+#ifdef CONFIG_GRKERNSEC_HIDESYM
77059+#define __INCLUDED_BY_HIDESYM 1
77060+#endif
77061 #include <stdarg.h>
77062 #include <linux/module.h>
77063 #include <linux/types.h>
77064@@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
77065 return buf;
77066 }
77067
77068-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
77069+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
77070 {
77071 int len, i;
77072
77073 if ((unsigned long)s < PAGE_SIZE)
77074- s = "<NULL>";
77075+ s = "(null)";
77076
77077 len = strnlen(s, spec.precision);
77078
77079@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
77080 unsigned long value = (unsigned long) ptr;
77081 #ifdef CONFIG_KALLSYMS
77082 char sym[KSYM_SYMBOL_LEN];
77083- if (ext != 'f' && ext != 's')
77084+ if (ext != 'f' && ext != 's' && ext != 'a')
77085 sprint_symbol(sym, value);
77086 else
77087 kallsyms_lookup(value, NULL, NULL, NULL, sym);
77088@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
77089 * - 'f' For simple symbolic function names without offset
77090 * - 'S' For symbolic direct pointers with offset
77091 * - 's' For symbolic direct pointers without offset
77092+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
77093+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
77094 * - 'R' For a struct resource pointer, it prints the range of
77095 * addresses (not the name nor the flags)
77096 * - 'M' For a 6-byte MAC address, it prints the address in the
77097@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
77098 struct printf_spec spec)
77099 {
77100 if (!ptr)
77101- return string(buf, end, "(null)", spec);
77102+ return string(buf, end, "(nil)", spec);
77103
77104 switch (*fmt) {
77105 case 'F':
77106@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
77107 case 's':
77108 /* Fallthrough */
77109 case 'S':
77110+#ifdef CONFIG_GRKERNSEC_HIDESYM
77111+ break;
77112+#else
77113+ return symbol_string(buf, end, ptr, spec, *fmt);
77114+#endif
77115+ case 'a':
77116+ /* Fallthrough */
77117+ case 'A':
77118 return symbol_string(buf, end, ptr, spec, *fmt);
77119 case 'R':
77120 return resource_string(buf, end, ptr, spec);
77121@@ -1445,7 +1458,7 @@ do { \
77122 size_t len;
77123 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
77124 || (unsigned long)save_str < PAGE_SIZE)
77125- save_str = "<NULL>";
77126+ save_str = "(null)";
77127 len = strlen(save_str);
77128 if (str + len + 1 < end)
77129 memcpy(str, save_str, len + 1);
77130@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
77131 typeof(type) value; \
77132 if (sizeof(type) == 8) { \
77133 args = PTR_ALIGN(args, sizeof(u32)); \
77134- *(u32 *)&value = *(u32 *)args; \
77135- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
77136+ *(u32 *)&value = *(const u32 *)args; \
77137+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
77138 } else { \
77139 args = PTR_ALIGN(args, sizeof(type)); \
77140- value = *(typeof(type) *)args; \
77141+ value = *(const typeof(type) *)args; \
77142 } \
77143 args += sizeof(type); \
77144 value; \
77145@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
77146 const char *str_arg = args;
77147 size_t len = strlen(str_arg);
77148 args += len + 1;
77149- str = string(str, end, (char *)str_arg, spec);
77150+ str = string(str, end, str_arg, spec);
77151 break;
77152 }
77153
77154diff --git a/localversion-grsec b/localversion-grsec
77155new file mode 100644
77156index 0000000..7cd6065
77157--- /dev/null
77158+++ b/localversion-grsec
77159@@ -0,0 +1 @@
77160+-grsec
77161diff --git a/mm/Kconfig b/mm/Kconfig
77162index 2c19c0b..f3c3f83 100644
77163--- a/mm/Kconfig
77164+++ b/mm/Kconfig
77165@@ -228,7 +228,7 @@ config KSM
77166 config DEFAULT_MMAP_MIN_ADDR
77167 int "Low address space to protect from user allocation"
77168 depends on MMU
77169- default 4096
77170+ default 65536
77171 help
77172 This is the portion of low virtual memory which should be protected
77173 from userspace allocation. Keeping a user from writing to low pages
77174diff --git a/mm/backing-dev.c b/mm/backing-dev.c
77175index 67a33a5..094dcf1 100644
77176--- a/mm/backing-dev.c
77177+++ b/mm/backing-dev.c
77178@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
77179 list_add_tail_rcu(&wb->list, &bdi->wb_list);
77180 spin_unlock(&bdi->wb_lock);
77181
77182- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
77183+ tsk->flags |= PF_SWAPWRITE;
77184 set_freezable();
77185
77186 /*
77187@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
77188 * Add the default flusher task that gets created for any bdi
77189 * that has dirty data pending writeout
77190 */
77191-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
77192+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
77193 {
77194 if (!bdi_cap_writeback_dirty(bdi))
77195 return;
77196diff --git a/mm/filemap.c b/mm/filemap.c
77197index a1fe378..e26702f 100644
77198--- a/mm/filemap.c
77199+++ b/mm/filemap.c
77200@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
77201 struct address_space *mapping = file->f_mapping;
77202
77203 if (!mapping->a_ops->readpage)
77204- return -ENOEXEC;
77205+ return -ENODEV;
77206 file_accessed(file);
77207 vma->vm_ops = &generic_file_vm_ops;
77208 vma->vm_flags |= VM_CAN_NONLINEAR;
77209@@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
77210 *pos = i_size_read(inode);
77211
77212 if (limit != RLIM_INFINITY) {
77213+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
77214 if (*pos >= limit) {
77215 send_sig(SIGXFSZ, current, 0);
77216 return -EFBIG;
77217diff --git a/mm/fremap.c b/mm/fremap.c
77218index b6ec85a..a24ac22 100644
77219--- a/mm/fremap.c
77220+++ b/mm/fremap.c
77221@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
77222 retry:
77223 vma = find_vma(mm, start);
77224
77225+#ifdef CONFIG_PAX_SEGMEXEC
77226+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
77227+ goto out;
77228+#endif
77229+
77230 /*
77231 * Make sure the vma is shared, that it supports prefaulting,
77232 * and that the remapped range is valid and fully within
77233@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
77234 /*
77235 * drop PG_Mlocked flag for over-mapped range
77236 */
77237- unsigned int saved_flags = vma->vm_flags;
77238+ unsigned long saved_flags = vma->vm_flags;
77239 munlock_vma_pages_range(vma, start, start + size);
77240 vma->vm_flags = saved_flags;
77241 }
77242diff --git a/mm/highmem.c b/mm/highmem.c
77243index 9c1e627..5ca9447 100644
77244--- a/mm/highmem.c
77245+++ b/mm/highmem.c
77246@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
77247 * So no dangers, even with speculative execution.
77248 */
77249 page = pte_page(pkmap_page_table[i]);
77250+ pax_open_kernel();
77251 pte_clear(&init_mm, (unsigned long)page_address(page),
77252 &pkmap_page_table[i]);
77253-
77254+ pax_close_kernel();
77255 set_page_address(page, NULL);
77256 need_flush = 1;
77257 }
77258@@ -177,9 +178,11 @@ start:
77259 }
77260 }
77261 vaddr = PKMAP_ADDR(last_pkmap_nr);
77262+
77263+ pax_open_kernel();
77264 set_pte_at(&init_mm, vaddr,
77265 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
77266-
77267+ pax_close_kernel();
77268 pkmap_count[last_pkmap_nr] = 1;
77269 set_page_address(page, (void *)vaddr);
77270
77271diff --git a/mm/hugetlb.c b/mm/hugetlb.c
77272index 5e1e508..ac70275 100644
77273--- a/mm/hugetlb.c
77274+++ b/mm/hugetlb.c
77275@@ -869,6 +869,7 @@ free:
77276 list_del(&page->lru);
77277 enqueue_huge_page(h, page);
77278 }
77279+ spin_unlock(&hugetlb_lock);
77280
77281 /* Free unnecessary surplus pages to the buddy allocator */
77282 if (!list_empty(&surplus_list)) {
77283@@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
77284 return 1;
77285 }
77286
77287+#ifdef CONFIG_PAX_SEGMEXEC
77288+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
77289+{
77290+ struct mm_struct *mm = vma->vm_mm;
77291+ struct vm_area_struct *vma_m;
77292+ unsigned long address_m;
77293+ pte_t *ptep_m;
77294+
77295+ vma_m = pax_find_mirror_vma(vma);
77296+ if (!vma_m)
77297+ return;
77298+
77299+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77300+ address_m = address + SEGMEXEC_TASK_SIZE;
77301+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
77302+ get_page(page_m);
77303+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
77304+}
77305+#endif
77306+
77307 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
77308 unsigned long address, pte_t *ptep, pte_t pte,
77309 struct page *pagecache_page)
77310@@ -2004,6 +2025,11 @@ retry_avoidcopy:
77311 huge_ptep_clear_flush(vma, address, ptep);
77312 set_huge_pte_at(mm, address, ptep,
77313 make_huge_pte(vma, new_page, 1));
77314+
77315+#ifdef CONFIG_PAX_SEGMEXEC
77316+ pax_mirror_huge_pte(vma, address, new_page);
77317+#endif
77318+
77319 /* Make the old page be freed below */
77320 new_page = old_page;
77321 }
77322@@ -2135,6 +2161,10 @@ retry:
77323 && (vma->vm_flags & VM_SHARED)));
77324 set_huge_pte_at(mm, address, ptep, new_pte);
77325
77326+#ifdef CONFIG_PAX_SEGMEXEC
77327+ pax_mirror_huge_pte(vma, address, page);
77328+#endif
77329+
77330 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
77331 /* Optimization, do the COW without a second fault */
77332 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
77333@@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77334 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
77335 struct hstate *h = hstate_vma(vma);
77336
77337+#ifdef CONFIG_PAX_SEGMEXEC
77338+ struct vm_area_struct *vma_m;
77339+
77340+ vma_m = pax_find_mirror_vma(vma);
77341+ if (vma_m) {
77342+ unsigned long address_m;
77343+
77344+ if (vma->vm_start > vma_m->vm_start) {
77345+ address_m = address;
77346+ address -= SEGMEXEC_TASK_SIZE;
77347+ vma = vma_m;
77348+ h = hstate_vma(vma);
77349+ } else
77350+ address_m = address + SEGMEXEC_TASK_SIZE;
77351+
77352+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
77353+ return VM_FAULT_OOM;
77354+ address_m &= HPAGE_MASK;
77355+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
77356+ }
77357+#endif
77358+
77359 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
77360 if (!ptep)
77361 return VM_FAULT_OOM;
77362diff --git a/mm/internal.h b/mm/internal.h
77363index f03e8e2..7354343 100644
77364--- a/mm/internal.h
77365+++ b/mm/internal.h
77366@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
77367 * in mm/page_alloc.c
77368 */
77369 extern void __free_pages_bootmem(struct page *page, unsigned int order);
77370+extern void free_compound_page(struct page *page);
77371 extern void prep_compound_page(struct page *page, unsigned long order);
77372
77373
77374diff --git a/mm/kmemleak.c b/mm/kmemleak.c
77375index c346660..b47382f 100644
77376--- a/mm/kmemleak.c
77377+++ b/mm/kmemleak.c
77378@@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
77379
77380 for (i = 0; i < object->trace_len; i++) {
77381 void *ptr = (void *)object->trace[i];
77382- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
77383+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
77384 }
77385 }
77386
77387diff --git a/mm/maccess.c b/mm/maccess.c
77388index 9073695..1127f348 100644
77389--- a/mm/maccess.c
77390+++ b/mm/maccess.c
77391@@ -14,7 +14,7 @@
77392 * Safely read from address @src to the buffer at @dst. If a kernel fault
77393 * happens, handle that and return -EFAULT.
77394 */
77395-long probe_kernel_read(void *dst, void *src, size_t size)
77396+long probe_kernel_read(void *dst, const void *src, size_t size)
77397 {
77398 long ret;
77399 mm_segment_t old_fs = get_fs();
77400@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
77401 set_fs(KERNEL_DS);
77402 pagefault_disable();
77403 ret = __copy_from_user_inatomic(dst,
77404- (__force const void __user *)src, size);
77405+ (const void __force_user *)src, size);
77406 pagefault_enable();
77407 set_fs(old_fs);
77408
77409@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
77410 * Safely write to address @dst from the buffer at @src. If a kernel fault
77411 * happens, handle that and return -EFAULT.
77412 */
77413-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
77414+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
77415 {
77416 long ret;
77417 mm_segment_t old_fs = get_fs();
77418
77419 set_fs(KERNEL_DS);
77420 pagefault_disable();
77421- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
77422+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
77423 pagefault_enable();
77424 set_fs(old_fs);
77425
77426diff --git a/mm/madvise.c b/mm/madvise.c
77427index 35b1479..499f7d4 100644
77428--- a/mm/madvise.c
77429+++ b/mm/madvise.c
77430@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
77431 pgoff_t pgoff;
77432 unsigned long new_flags = vma->vm_flags;
77433
77434+#ifdef CONFIG_PAX_SEGMEXEC
77435+ struct vm_area_struct *vma_m;
77436+#endif
77437+
77438 switch (behavior) {
77439 case MADV_NORMAL:
77440 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
77441@@ -103,6 +107,13 @@ success:
77442 /*
77443 * vm_flags is protected by the mmap_sem held in write mode.
77444 */
77445+
77446+#ifdef CONFIG_PAX_SEGMEXEC
77447+ vma_m = pax_find_mirror_vma(vma);
77448+ if (vma_m)
77449+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
77450+#endif
77451+
77452 vma->vm_flags = new_flags;
77453
77454 out:
77455@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77456 struct vm_area_struct ** prev,
77457 unsigned long start, unsigned long end)
77458 {
77459+
77460+#ifdef CONFIG_PAX_SEGMEXEC
77461+ struct vm_area_struct *vma_m;
77462+#endif
77463+
77464 *prev = vma;
77465 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
77466 return -EINVAL;
77467@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
77468 zap_page_range(vma, start, end - start, &details);
77469 } else
77470 zap_page_range(vma, start, end - start, NULL);
77471+
77472+#ifdef CONFIG_PAX_SEGMEXEC
77473+ vma_m = pax_find_mirror_vma(vma);
77474+ if (vma_m) {
77475+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
77476+ struct zap_details details = {
77477+ .nonlinear_vma = vma_m,
77478+ .last_index = ULONG_MAX,
77479+ };
77480+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
77481+ } else
77482+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
77483+ }
77484+#endif
77485+
77486 return 0;
77487 }
77488
77489@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
77490 if (end < start)
77491 goto out;
77492
77493+#ifdef CONFIG_PAX_SEGMEXEC
77494+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
77495+ if (end > SEGMEXEC_TASK_SIZE)
77496+ goto out;
77497+ } else
77498+#endif
77499+
77500+ if (end > TASK_SIZE)
77501+ goto out;
77502+
77503 error = 0;
77504 if (end == start)
77505 goto out;
77506diff --git a/mm/memory-failure.c b/mm/memory-failure.c
77507index 8aeba53..b4a4198 100644
77508--- a/mm/memory-failure.c
77509+++ b/mm/memory-failure.c
77510@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
77511
77512 int sysctl_memory_failure_recovery __read_mostly = 1;
77513
77514-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
77515+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
77516
77517 /*
77518 * Send all the processes who have the page mapped an ``action optional''
77519@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
77520 si.si_signo = SIGBUS;
77521 si.si_errno = 0;
77522 si.si_code = BUS_MCEERR_AO;
77523- si.si_addr = (void *)addr;
77524+ si.si_addr = (void __user *)addr;
77525 #ifdef __ARCH_SI_TRAPNO
77526 si.si_trapno = trapno;
77527 #endif
77528@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
77529 return 0;
77530 }
77531
77532- atomic_long_add(1, &mce_bad_pages);
77533+ atomic_long_add_unchecked(1, &mce_bad_pages);
77534
77535 /*
77536 * We need/can do nothing about count=0 pages.
77537diff --git a/mm/memory.c b/mm/memory.c
77538index 6c836d3..48f3264 100644
77539--- a/mm/memory.c
77540+++ b/mm/memory.c
77541@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
77542 return;
77543
77544 pmd = pmd_offset(pud, start);
77545+
77546+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
77547 pud_clear(pud);
77548 pmd_free_tlb(tlb, pmd, start);
77549+#endif
77550+
77551 }
77552
77553 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
77554@@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
77555 if (end - 1 > ceiling - 1)
77556 return;
77557
77558+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
77559 pud = pud_offset(pgd, start);
77560 pgd_clear(pgd);
77561 pud_free_tlb(tlb, pud, start);
77562+#endif
77563+
77564 }
77565
77566 /*
77567@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77568 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
77569 i = 0;
77570
77571- do {
77572+ while (nr_pages) {
77573 struct vm_area_struct *vma;
77574
77575- vma = find_extend_vma(mm, start);
77576+ vma = find_vma(mm, start);
77577 if (!vma && in_gate_area(tsk, start)) {
77578 unsigned long pg = start & PAGE_MASK;
77579 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
77580@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77581 continue;
77582 }
77583
77584- if (!vma ||
77585+ if (!vma || start < vma->vm_start ||
77586 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
77587 !(vm_flags & vma->vm_flags))
77588 return i ? : -EFAULT;
77589@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77590 start += PAGE_SIZE;
77591 nr_pages--;
77592 } while (nr_pages && start < vma->vm_end);
77593- } while (nr_pages);
77594+ }
77595 return i;
77596 }
77597
77598@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
77599 page_add_file_rmap(page);
77600 set_pte_at(mm, addr, pte, mk_pte(page, prot));
77601
77602+#ifdef CONFIG_PAX_SEGMEXEC
77603+ pax_mirror_file_pte(vma, addr, page, ptl);
77604+#endif
77605+
77606 retval = 0;
77607 pte_unmap_unlock(pte, ptl);
77608 return retval;
77609@@ -1560,10 +1571,22 @@ out:
77610 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
77611 struct page *page)
77612 {
77613+
77614+#ifdef CONFIG_PAX_SEGMEXEC
77615+ struct vm_area_struct *vma_m;
77616+#endif
77617+
77618 if (addr < vma->vm_start || addr >= vma->vm_end)
77619 return -EFAULT;
77620 if (!page_count(page))
77621 return -EINVAL;
77622+
77623+#ifdef CONFIG_PAX_SEGMEXEC
77624+ vma_m = pax_find_mirror_vma(vma);
77625+ if (vma_m)
77626+ vma_m->vm_flags |= VM_INSERTPAGE;
77627+#endif
77628+
77629 vma->vm_flags |= VM_INSERTPAGE;
77630 return insert_page(vma, addr, page, vma->vm_page_prot);
77631 }
77632@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
77633 unsigned long pfn)
77634 {
77635 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
77636+ BUG_ON(vma->vm_mirror);
77637
77638 if (addr < vma->vm_start || addr >= vma->vm_end)
77639 return -EFAULT;
77640@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
77641 copy_user_highpage(dst, src, va, vma);
77642 }
77643
77644+#ifdef CONFIG_PAX_SEGMEXEC
77645+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
77646+{
77647+ struct mm_struct *mm = vma->vm_mm;
77648+ spinlock_t *ptl;
77649+ pte_t *pte, entry;
77650+
77651+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
77652+ entry = *pte;
77653+ if (!pte_present(entry)) {
77654+ if (!pte_none(entry)) {
77655+ BUG_ON(pte_file(entry));
77656+ free_swap_and_cache(pte_to_swp_entry(entry));
77657+ pte_clear_not_present_full(mm, address, pte, 0);
77658+ }
77659+ } else {
77660+ struct page *page;
77661+
77662+ flush_cache_page(vma, address, pte_pfn(entry));
77663+ entry = ptep_clear_flush(vma, address, pte);
77664+ BUG_ON(pte_dirty(entry));
77665+ page = vm_normal_page(vma, address, entry);
77666+ if (page) {
77667+ update_hiwater_rss(mm);
77668+ if (PageAnon(page))
77669+ dec_mm_counter(mm, anon_rss);
77670+ else
77671+ dec_mm_counter(mm, file_rss);
77672+ page_remove_rmap(page);
77673+ page_cache_release(page);
77674+ }
77675+ }
77676+ pte_unmap_unlock(pte, ptl);
77677+}
77678+
77679+/* PaX: if vma is mirrored, synchronize the mirror's PTE
77680+ *
77681+ * the ptl of the lower mapped page is held on entry and is not released on exit
77682+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
77683+ */
77684+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77685+{
77686+ struct mm_struct *mm = vma->vm_mm;
77687+ unsigned long address_m;
77688+ spinlock_t *ptl_m;
77689+ struct vm_area_struct *vma_m;
77690+ pmd_t *pmd_m;
77691+ pte_t *pte_m, entry_m;
77692+
77693+ BUG_ON(!page_m || !PageAnon(page_m));
77694+
77695+ vma_m = pax_find_mirror_vma(vma);
77696+ if (!vma_m)
77697+ return;
77698+
77699+ BUG_ON(!PageLocked(page_m));
77700+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77701+ address_m = address + SEGMEXEC_TASK_SIZE;
77702+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77703+ pte_m = pte_offset_map_nested(pmd_m, address_m);
77704+ ptl_m = pte_lockptr(mm, pmd_m);
77705+ if (ptl != ptl_m) {
77706+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77707+ if (!pte_none(*pte_m))
77708+ goto out;
77709+ }
77710+
77711+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77712+ page_cache_get(page_m);
77713+ page_add_anon_rmap(page_m, vma_m, address_m);
77714+ inc_mm_counter(mm, anon_rss);
77715+ set_pte_at(mm, address_m, pte_m, entry_m);
77716+ update_mmu_cache(vma_m, address_m, entry_m);
77717+out:
77718+ if (ptl != ptl_m)
77719+ spin_unlock(ptl_m);
77720+ pte_unmap_nested(pte_m);
77721+ unlock_page(page_m);
77722+}
77723+
77724+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77725+{
77726+ struct mm_struct *mm = vma->vm_mm;
77727+ unsigned long address_m;
77728+ spinlock_t *ptl_m;
77729+ struct vm_area_struct *vma_m;
77730+ pmd_t *pmd_m;
77731+ pte_t *pte_m, entry_m;
77732+
77733+ BUG_ON(!page_m || PageAnon(page_m));
77734+
77735+ vma_m = pax_find_mirror_vma(vma);
77736+ if (!vma_m)
77737+ return;
77738+
77739+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77740+ address_m = address + SEGMEXEC_TASK_SIZE;
77741+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77742+ pte_m = pte_offset_map_nested(pmd_m, address_m);
77743+ ptl_m = pte_lockptr(mm, pmd_m);
77744+ if (ptl != ptl_m) {
77745+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77746+ if (!pte_none(*pte_m))
77747+ goto out;
77748+ }
77749+
77750+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77751+ page_cache_get(page_m);
77752+ page_add_file_rmap(page_m);
77753+ inc_mm_counter(mm, file_rss);
77754+ set_pte_at(mm, address_m, pte_m, entry_m);
77755+ update_mmu_cache(vma_m, address_m, entry_m);
77756+out:
77757+ if (ptl != ptl_m)
77758+ spin_unlock(ptl_m);
77759+ pte_unmap_nested(pte_m);
77760+}
77761+
77762+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
77763+{
77764+ struct mm_struct *mm = vma->vm_mm;
77765+ unsigned long address_m;
77766+ spinlock_t *ptl_m;
77767+ struct vm_area_struct *vma_m;
77768+ pmd_t *pmd_m;
77769+ pte_t *pte_m, entry_m;
77770+
77771+ vma_m = pax_find_mirror_vma(vma);
77772+ if (!vma_m)
77773+ return;
77774+
77775+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77776+ address_m = address + SEGMEXEC_TASK_SIZE;
77777+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77778+ pte_m = pte_offset_map_nested(pmd_m, address_m);
77779+ ptl_m = pte_lockptr(mm, pmd_m);
77780+ if (ptl != ptl_m) {
77781+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77782+ if (!pte_none(*pte_m))
77783+ goto out;
77784+ }
77785+
77786+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
77787+ set_pte_at(mm, address_m, pte_m, entry_m);
77788+out:
77789+ if (ptl != ptl_m)
77790+ spin_unlock(ptl_m);
77791+ pte_unmap_nested(pte_m);
77792+}
77793+
77794+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
77795+{
77796+ struct page *page_m;
77797+ pte_t entry;
77798+
77799+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
77800+ goto out;
77801+
77802+ entry = *pte;
77803+ page_m = vm_normal_page(vma, address, entry);
77804+ if (!page_m)
77805+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
77806+ else if (PageAnon(page_m)) {
77807+ if (pax_find_mirror_vma(vma)) {
77808+ pte_unmap_unlock(pte, ptl);
77809+ lock_page(page_m);
77810+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
77811+ if (pte_same(entry, *pte))
77812+ pax_mirror_anon_pte(vma, address, page_m, ptl);
77813+ else
77814+ unlock_page(page_m);
77815+ }
77816+ } else
77817+ pax_mirror_file_pte(vma, address, page_m, ptl);
77818+
77819+out:
77820+ pte_unmap_unlock(pte, ptl);
77821+}
77822+#endif
77823+
77824 /*
77825 * This routine handles present pages, when users try to write
77826 * to a shared page. It is done by copying the page to a new address
77827@@ -2156,6 +2360,12 @@ gotten:
77828 */
77829 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77830 if (likely(pte_same(*page_table, orig_pte))) {
77831+
77832+#ifdef CONFIG_PAX_SEGMEXEC
77833+ if (pax_find_mirror_vma(vma))
77834+ BUG_ON(!trylock_page(new_page));
77835+#endif
77836+
77837 if (old_page) {
77838 if (!PageAnon(old_page)) {
77839 dec_mm_counter(mm, file_rss);
77840@@ -2207,6 +2417,10 @@ gotten:
77841 page_remove_rmap(old_page);
77842 }
77843
77844+#ifdef CONFIG_PAX_SEGMEXEC
77845+ pax_mirror_anon_pte(vma, address, new_page, ptl);
77846+#endif
77847+
77848 /* Free the old page.. */
77849 new_page = old_page;
77850 ret |= VM_FAULT_WRITE;
77851@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77852 swap_free(entry);
77853 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
77854 try_to_free_swap(page);
77855+
77856+#ifdef CONFIG_PAX_SEGMEXEC
77857+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
77858+#endif
77859+
77860 unlock_page(page);
77861
77862 if (flags & FAULT_FLAG_WRITE) {
77863@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77864
77865 /* No need to invalidate - it was non-present before */
77866 update_mmu_cache(vma, address, pte);
77867+
77868+#ifdef CONFIG_PAX_SEGMEXEC
77869+ pax_mirror_anon_pte(vma, address, page, ptl);
77870+#endif
77871+
77872 unlock:
77873 pte_unmap_unlock(page_table, ptl);
77874 out:
77875@@ -2632,40 +2856,6 @@ out_release:
77876 }
77877
77878 /*
77879- * This is like a special single-page "expand_{down|up}wards()",
77880- * except we must first make sure that 'address{-|+}PAGE_SIZE'
77881- * doesn't hit another vma.
77882- */
77883-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
77884-{
77885- address &= PAGE_MASK;
77886- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
77887- struct vm_area_struct *prev = vma->vm_prev;
77888-
77889- /*
77890- * Is there a mapping abutting this one below?
77891- *
77892- * That's only ok if it's the same stack mapping
77893- * that has gotten split..
77894- */
77895- if (prev && prev->vm_end == address)
77896- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
77897-
77898- expand_stack(vma, address - PAGE_SIZE);
77899- }
77900- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
77901- struct vm_area_struct *next = vma->vm_next;
77902-
77903- /* As VM_GROWSDOWN but s/below/above/ */
77904- if (next && next->vm_start == address + PAGE_SIZE)
77905- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
77906-
77907- expand_upwards(vma, address + PAGE_SIZE);
77908- }
77909- return 0;
77910-}
77911-
77912-/*
77913 * We enter with non-exclusive mmap_sem (to exclude vma changes,
77914 * but allow concurrent faults), and pte mapped but not yet locked.
77915 * We return with mmap_sem still held, but pte unmapped and unlocked.
77916@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77917 unsigned long address, pte_t *page_table, pmd_t *pmd,
77918 unsigned int flags)
77919 {
77920- struct page *page;
77921+ struct page *page = NULL;
77922 spinlock_t *ptl;
77923 pte_t entry;
77924
77925- pte_unmap(page_table);
77926-
77927- /* Check if we need to add a guard page to the stack */
77928- if (check_stack_guard_page(vma, address) < 0)
77929- return VM_FAULT_SIGBUS;
77930-
77931- /* Use the zero-page for reads */
77932 if (!(flags & FAULT_FLAG_WRITE)) {
77933 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
77934 vma->vm_page_prot));
77935- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77936+ ptl = pte_lockptr(mm, pmd);
77937+ spin_lock(ptl);
77938 if (!pte_none(*page_table))
77939 goto unlock;
77940 goto setpte;
77941 }
77942
77943 /* Allocate our own private page. */
77944+ pte_unmap(page_table);
77945+
77946 if (unlikely(anon_vma_prepare(vma)))
77947 goto oom;
77948 page = alloc_zeroed_user_highpage_movable(vma, address);
77949@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77950 if (!pte_none(*page_table))
77951 goto release;
77952
77953+#ifdef CONFIG_PAX_SEGMEXEC
77954+ if (pax_find_mirror_vma(vma))
77955+ BUG_ON(!trylock_page(page));
77956+#endif
77957+
77958 inc_mm_counter(mm, anon_rss);
77959 page_add_new_anon_rmap(page, vma, address);
77960 setpte:
77961@@ -2720,6 +2911,12 @@ setpte:
77962
77963 /* No need to invalidate - it was non-present before */
77964 update_mmu_cache(vma, address, entry);
77965+
77966+#ifdef CONFIG_PAX_SEGMEXEC
77967+ if (page)
77968+ pax_mirror_anon_pte(vma, address, page, ptl);
77969+#endif
77970+
77971 unlock:
77972 pte_unmap_unlock(page_table, ptl);
77973 return 0;
77974@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77975 */
77976 /* Only go through if we didn't race with anybody else... */
77977 if (likely(pte_same(*page_table, orig_pte))) {
77978+
77979+#ifdef CONFIG_PAX_SEGMEXEC
77980+ if (anon && pax_find_mirror_vma(vma))
77981+ BUG_ON(!trylock_page(page));
77982+#endif
77983+
77984 flush_icache_page(vma, page);
77985 entry = mk_pte(page, vma->vm_page_prot);
77986 if (flags & FAULT_FLAG_WRITE)
77987@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77988
77989 /* no need to invalidate: a not-present page won't be cached */
77990 update_mmu_cache(vma, address, entry);
77991+
77992+#ifdef CONFIG_PAX_SEGMEXEC
77993+ if (anon)
77994+ pax_mirror_anon_pte(vma, address, page, ptl);
77995+ else
77996+ pax_mirror_file_pte(vma, address, page, ptl);
77997+#endif
77998+
77999 } else {
78000 if (charged)
78001 mem_cgroup_uncharge_page(page);
78002@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
78003 if (flags & FAULT_FLAG_WRITE)
78004 flush_tlb_page(vma, address);
78005 }
78006+
78007+#ifdef CONFIG_PAX_SEGMEXEC
78008+ pax_mirror_pte(vma, address, pte, pmd, ptl);
78009+ return 0;
78010+#endif
78011+
78012 unlock:
78013 pte_unmap_unlock(pte, ptl);
78014 return 0;
78015@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
78016 pmd_t *pmd;
78017 pte_t *pte;
78018
78019+#ifdef CONFIG_PAX_SEGMEXEC
78020+ struct vm_area_struct *vma_m;
78021+#endif
78022+
78023 __set_current_state(TASK_RUNNING);
78024
78025 count_vm_event(PGFAULT);
78026@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
78027 if (unlikely(is_vm_hugetlb_page(vma)))
78028 return hugetlb_fault(mm, vma, address, flags);
78029
78030+#ifdef CONFIG_PAX_SEGMEXEC
78031+ vma_m = pax_find_mirror_vma(vma);
78032+ if (vma_m) {
78033+ unsigned long address_m;
78034+ pgd_t *pgd_m;
78035+ pud_t *pud_m;
78036+ pmd_t *pmd_m;
78037+
78038+ if (vma->vm_start > vma_m->vm_start) {
78039+ address_m = address;
78040+ address -= SEGMEXEC_TASK_SIZE;
78041+ vma = vma_m;
78042+ } else
78043+ address_m = address + SEGMEXEC_TASK_SIZE;
78044+
78045+ pgd_m = pgd_offset(mm, address_m);
78046+ pud_m = pud_alloc(mm, pgd_m, address_m);
78047+ if (!pud_m)
78048+ return VM_FAULT_OOM;
78049+ pmd_m = pmd_alloc(mm, pud_m, address_m);
78050+ if (!pmd_m)
78051+ return VM_FAULT_OOM;
78052+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
78053+ return VM_FAULT_OOM;
78054+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
78055+ }
78056+#endif
78057+
78058 pgd = pgd_offset(mm, address);
78059 pud = pud_alloc(mm, pgd, address);
78060 if (!pud)
78061@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
78062 gate_vma.vm_start = FIXADDR_USER_START;
78063 gate_vma.vm_end = FIXADDR_USER_END;
78064 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
78065- gate_vma.vm_page_prot = __P101;
78066+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
78067 /*
78068 * Make sure the vDSO gets into every core dump.
78069 * Dumping its contents makes post-mortem fully interpretable later
78070diff --git a/mm/mempolicy.c b/mm/mempolicy.c
78071index 3c6e3e2..b1ddbb8 100644
78072--- a/mm/mempolicy.c
78073+++ b/mm/mempolicy.c
78074@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
78075 struct vm_area_struct *next;
78076 int err;
78077
78078+#ifdef CONFIG_PAX_SEGMEXEC
78079+ struct vm_area_struct *vma_m;
78080+#endif
78081+
78082 err = 0;
78083 for (; vma && vma->vm_start < end; vma = next) {
78084 next = vma->vm_next;
78085@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
78086 err = policy_vma(vma, new);
78087 if (err)
78088 break;
78089+
78090+#ifdef CONFIG_PAX_SEGMEXEC
78091+ vma_m = pax_find_mirror_vma(vma);
78092+ if (vma_m) {
78093+ err = policy_vma(vma_m, new);
78094+ if (err)
78095+ break;
78096+ }
78097+#endif
78098+
78099 }
78100 return err;
78101 }
78102@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
78103
78104 if (end < start)
78105 return -EINVAL;
78106+
78107+#ifdef CONFIG_PAX_SEGMEXEC
78108+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
78109+ if (end > SEGMEXEC_TASK_SIZE)
78110+ return -EINVAL;
78111+ } else
78112+#endif
78113+
78114+ if (end > TASK_SIZE)
78115+ return -EINVAL;
78116+
78117 if (end == start)
78118 return 0;
78119
78120@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
78121 if (!mm)
78122 return -EINVAL;
78123
78124+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78125+ if (mm != current->mm &&
78126+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
78127+ err = -EPERM;
78128+ goto out;
78129+ }
78130+#endif
78131+
78132 /*
78133 * Check if this process has the right to modify the specified
78134 * process. The right exists if the process has administrative
78135@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
78136 rcu_read_lock();
78137 tcred = __task_cred(task);
78138 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
78139- cred->uid != tcred->suid && cred->uid != tcred->uid &&
78140- !capable(CAP_SYS_NICE)) {
78141+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
78142 rcu_read_unlock();
78143 err = -EPERM;
78144 goto out;
78145@@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
78146 }
78147 #endif
78148
78149+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78150+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
78151+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
78152+ _mm->pax_flags & MF_PAX_SEGMEXEC))
78153+#endif
78154+
78155 /*
78156 * Display pages allocated per node and memory policy via /proc.
78157 */
78158@@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
78159 int n;
78160 char buffer[50];
78161
78162+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78163+ if (current->exec_id != m->exec_id) {
78164+ gr_log_badprocpid("numa_maps");
78165+ return 0;
78166+ }
78167+#endif
78168+
78169 if (!mm)
78170 return 0;
78171
78172@@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
78173 mpol_to_str(buffer, sizeof(buffer), pol, 0);
78174 mpol_cond_put(pol);
78175
78176+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78177+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
78178+#else
78179 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
78180+#endif
78181
78182 if (file) {
78183 seq_printf(m, " file=");
78184- seq_path(m, &file->f_path, "\n\t= ");
78185+ seq_path(m, &file->f_path, "\n\t\\= ");
78186 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
78187 seq_printf(m, " heap");
78188 } else if (vma->vm_start <= mm->start_stack &&
78189diff --git a/mm/migrate.c b/mm/migrate.c
78190index aaca868..2ebecdc 100644
78191--- a/mm/migrate.c
78192+++ b/mm/migrate.c
78193@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
78194 unsigned long chunk_start;
78195 int err;
78196
78197+ pax_track_stack();
78198+
78199 task_nodes = cpuset_mems_allowed(task);
78200
78201 err = -ENOMEM;
78202@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
78203 if (!mm)
78204 return -EINVAL;
78205
78206+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78207+ if (mm != current->mm &&
78208+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
78209+ err = -EPERM;
78210+ goto out;
78211+ }
78212+#endif
78213+
78214 /*
78215 * Check if this process has the right to modify the specified
78216 * process. The right exists if the process has administrative
78217@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
78218 rcu_read_lock();
78219 tcred = __task_cred(task);
78220 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
78221- cred->uid != tcred->suid && cred->uid != tcred->uid &&
78222- !capable(CAP_SYS_NICE)) {
78223+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
78224 rcu_read_unlock();
78225 err = -EPERM;
78226 goto out;
78227diff --git a/mm/mlock.c b/mm/mlock.c
78228index 2d846cf..98134d2 100644
78229--- a/mm/mlock.c
78230+++ b/mm/mlock.c
78231@@ -13,6 +13,7 @@
78232 #include <linux/pagemap.h>
78233 #include <linux/mempolicy.h>
78234 #include <linux/syscalls.h>
78235+#include <linux/security.h>
78236 #include <linux/sched.h>
78237 #include <linux/module.h>
78238 #include <linux/rmap.h>
78239@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
78240 }
78241 }
78242
78243-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
78244-{
78245- return (vma->vm_flags & VM_GROWSDOWN) &&
78246- (vma->vm_start == addr) &&
78247- !vma_stack_continue(vma->vm_prev, addr);
78248-}
78249-
78250 /**
78251 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
78252 * @vma: target vma
78253@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
78254 if (vma->vm_flags & VM_WRITE)
78255 gup_flags |= FOLL_WRITE;
78256
78257- /* We don't try to access the guard page of a stack vma */
78258- if (stack_guard_page(vma, start)) {
78259- addr += PAGE_SIZE;
78260- nr_pages--;
78261- }
78262-
78263 while (nr_pages > 0) {
78264 int i;
78265
78266@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
78267 {
78268 unsigned long nstart, end, tmp;
78269 struct vm_area_struct * vma, * prev;
78270- int error;
78271+ int error = -EINVAL;
78272
78273 len = PAGE_ALIGN(len);
78274 end = start + len;
78275@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
78276 return -EINVAL;
78277 if (end == start)
78278 return 0;
78279+ if (end > TASK_SIZE)
78280+ return -EINVAL;
78281+
78282 vma = find_vma_prev(current->mm, start, &prev);
78283 if (!vma || vma->vm_start > start)
78284 return -ENOMEM;
78285@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
78286 for (nstart = start ; ; ) {
78287 unsigned int newflags;
78288
78289+#ifdef CONFIG_PAX_SEGMEXEC
78290+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
78291+ break;
78292+#endif
78293+
78294 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
78295
78296 newflags = vma->vm_flags | VM_LOCKED;
78297@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
78298 lock_limit >>= PAGE_SHIFT;
78299
78300 /* check against resource limits */
78301+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
78302 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
78303 error = do_mlock(start, len, 1);
78304 up_write(&current->mm->mmap_sem);
78305@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
78306 static int do_mlockall(int flags)
78307 {
78308 struct vm_area_struct * vma, * prev = NULL;
78309- unsigned int def_flags = 0;
78310
78311 if (flags & MCL_FUTURE)
78312- def_flags = VM_LOCKED;
78313- current->mm->def_flags = def_flags;
78314+ current->mm->def_flags |= VM_LOCKED;
78315+ else
78316+ current->mm->def_flags &= ~VM_LOCKED;
78317 if (flags == MCL_FUTURE)
78318 goto out;
78319
78320 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
78321- unsigned int newflags;
78322+ unsigned long newflags;
78323
78324+#ifdef CONFIG_PAX_SEGMEXEC
78325+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
78326+ break;
78327+#endif
78328+
78329+ BUG_ON(vma->vm_end > TASK_SIZE);
78330 newflags = vma->vm_flags | VM_LOCKED;
78331 if (!(flags & MCL_CURRENT))
78332 newflags &= ~VM_LOCKED;
78333@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
78334 lock_limit >>= PAGE_SHIFT;
78335
78336 ret = -ENOMEM;
78337+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
78338 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
78339 capable(CAP_IPC_LOCK))
78340 ret = do_mlockall(flags);
78341diff --git a/mm/mmap.c b/mm/mmap.c
78342index 4b80cbf..cd3731c 100644
78343--- a/mm/mmap.c
78344+++ b/mm/mmap.c
78345@@ -45,6 +45,16 @@
78346 #define arch_rebalance_pgtables(addr, len) (addr)
78347 #endif
78348
78349+static inline void verify_mm_writelocked(struct mm_struct *mm)
78350+{
78351+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
78352+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
78353+ up_read(&mm->mmap_sem);
78354+ BUG();
78355+ }
78356+#endif
78357+}
78358+
78359 static void unmap_region(struct mm_struct *mm,
78360 struct vm_area_struct *vma, struct vm_area_struct *prev,
78361 unsigned long start, unsigned long end);
78362@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
78363 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
78364 *
78365 */
78366-pgprot_t protection_map[16] = {
78367+pgprot_t protection_map[16] __read_only = {
78368 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
78369 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
78370 };
78371
78372 pgprot_t vm_get_page_prot(unsigned long vm_flags)
78373 {
78374- return __pgprot(pgprot_val(protection_map[vm_flags &
78375+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
78376 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
78377 pgprot_val(arch_vm_get_page_prot(vm_flags)));
78378+
78379+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78380+ if (!nx_enabled &&
78381+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
78382+ (vm_flags & (VM_READ | VM_WRITE)))
78383+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
78384+#endif
78385+
78386+ return prot;
78387 }
78388 EXPORT_SYMBOL(vm_get_page_prot);
78389
78390 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
78391 int sysctl_overcommit_ratio = 50; /* default is 50% */
78392 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
78393+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
78394 struct percpu_counter vm_committed_as;
78395
78396 /*
78397@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
78398 struct vm_area_struct *next = vma->vm_next;
78399
78400 might_sleep();
78401+ BUG_ON(vma->vm_mirror);
78402 if (vma->vm_ops && vma->vm_ops->close)
78403 vma->vm_ops->close(vma);
78404 if (vma->vm_file) {
78405@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
78406 * not page aligned -Ram Gupta
78407 */
78408 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
78409+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
78410 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
78411 (mm->end_data - mm->start_data) > rlim)
78412 goto out;
78413@@ -704,6 +726,12 @@ static int
78414 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
78415 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78416 {
78417+
78418+#ifdef CONFIG_PAX_SEGMEXEC
78419+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
78420+ return 0;
78421+#endif
78422+
78423 if (is_mergeable_vma(vma, file, vm_flags) &&
78424 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78425 if (vma->vm_pgoff == vm_pgoff)
78426@@ -723,6 +751,12 @@ static int
78427 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78428 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
78429 {
78430+
78431+#ifdef CONFIG_PAX_SEGMEXEC
78432+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
78433+ return 0;
78434+#endif
78435+
78436 if (is_mergeable_vma(vma, file, vm_flags) &&
78437 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
78438 pgoff_t vm_pglen;
78439@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
78440 struct vm_area_struct *vma_merge(struct mm_struct *mm,
78441 struct vm_area_struct *prev, unsigned long addr,
78442 unsigned long end, unsigned long vm_flags,
78443- struct anon_vma *anon_vma, struct file *file,
78444+ struct anon_vma *anon_vma, struct file *file,
78445 pgoff_t pgoff, struct mempolicy *policy)
78446 {
78447 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
78448 struct vm_area_struct *area, *next;
78449
78450+#ifdef CONFIG_PAX_SEGMEXEC
78451+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
78452+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
78453+
78454+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
78455+#endif
78456+
78457 /*
78458 * We later require that vma->vm_flags == vm_flags,
78459 * so this tests vma->vm_flags & VM_SPECIAL, too.
78460@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78461 if (next && next->vm_end == end) /* cases 6, 7, 8 */
78462 next = next->vm_next;
78463
78464+#ifdef CONFIG_PAX_SEGMEXEC
78465+ if (prev)
78466+ prev_m = pax_find_mirror_vma(prev);
78467+ if (area)
78468+ area_m = pax_find_mirror_vma(area);
78469+ if (next)
78470+ next_m = pax_find_mirror_vma(next);
78471+#endif
78472+
78473 /*
78474 * Can it merge with the predecessor?
78475 */
78476@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78477 /* cases 1, 6 */
78478 vma_adjust(prev, prev->vm_start,
78479 next->vm_end, prev->vm_pgoff, NULL);
78480- } else /* cases 2, 5, 7 */
78481+
78482+#ifdef CONFIG_PAX_SEGMEXEC
78483+ if (prev_m)
78484+ vma_adjust(prev_m, prev_m->vm_start,
78485+ next_m->vm_end, prev_m->vm_pgoff, NULL);
78486+#endif
78487+
78488+ } else { /* cases 2, 5, 7 */
78489 vma_adjust(prev, prev->vm_start,
78490 end, prev->vm_pgoff, NULL);
78491+
78492+#ifdef CONFIG_PAX_SEGMEXEC
78493+ if (prev_m)
78494+ vma_adjust(prev_m, prev_m->vm_start,
78495+ end_m, prev_m->vm_pgoff, NULL);
78496+#endif
78497+
78498+ }
78499 return prev;
78500 }
78501
78502@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
78503 mpol_equal(policy, vma_policy(next)) &&
78504 can_vma_merge_before(next, vm_flags,
78505 anon_vma, file, pgoff+pglen)) {
78506- if (prev && addr < prev->vm_end) /* case 4 */
78507+ if (prev && addr < prev->vm_end) { /* case 4 */
78508 vma_adjust(prev, prev->vm_start,
78509 addr, prev->vm_pgoff, NULL);
78510- else /* cases 3, 8 */
78511+
78512+#ifdef CONFIG_PAX_SEGMEXEC
78513+ if (prev_m)
78514+ vma_adjust(prev_m, prev_m->vm_start,
78515+ addr_m, prev_m->vm_pgoff, NULL);
78516+#endif
78517+
78518+ } else { /* cases 3, 8 */
78519 vma_adjust(area, addr, next->vm_end,
78520 next->vm_pgoff - pglen, NULL);
78521+
78522+#ifdef CONFIG_PAX_SEGMEXEC
78523+ if (area_m)
78524+ vma_adjust(area_m, addr_m, next_m->vm_end,
78525+ next_m->vm_pgoff - pglen, NULL);
78526+#endif
78527+
78528+ }
78529 return area;
78530 }
78531
78532@@ -898,14 +978,11 @@ none:
78533 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
78534 struct file *file, long pages)
78535 {
78536- const unsigned long stack_flags
78537- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
78538-
78539 if (file) {
78540 mm->shared_vm += pages;
78541 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
78542 mm->exec_vm += pages;
78543- } else if (flags & stack_flags)
78544+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
78545 mm->stack_vm += pages;
78546 if (flags & (VM_RESERVED|VM_IO))
78547 mm->reserved_vm += pages;
78548@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78549 * (the exception is when the underlying filesystem is noexec
78550 * mounted, in which case we dont add PROT_EXEC.)
78551 */
78552- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
78553+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
78554 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
78555 prot |= PROT_EXEC;
78556
78557@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78558 /* Obtain the address to map to. we verify (or select) it and ensure
78559 * that it represents a valid section of the address space.
78560 */
78561- addr = get_unmapped_area(file, addr, len, pgoff, flags);
78562+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
78563 if (addr & ~PAGE_MASK)
78564 return addr;
78565
78566@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78567 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
78568 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
78569
78570+#ifdef CONFIG_PAX_MPROTECT
78571+ if (mm->pax_flags & MF_PAX_MPROTECT) {
78572+#ifndef CONFIG_PAX_MPROTECT_COMPAT
78573+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
78574+ gr_log_rwxmmap(file);
78575+
78576+#ifdef CONFIG_PAX_EMUPLT
78577+ vm_flags &= ~VM_EXEC;
78578+#else
78579+ return -EPERM;
78580+#endif
78581+
78582+ }
78583+
78584+ if (!(vm_flags & VM_EXEC))
78585+ vm_flags &= ~VM_MAYEXEC;
78586+#else
78587+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
78588+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
78589+#endif
78590+ else
78591+ vm_flags &= ~VM_MAYWRITE;
78592+ }
78593+#endif
78594+
78595+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78596+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
78597+ vm_flags &= ~VM_PAGEEXEC;
78598+#endif
78599+
78600 if (flags & MAP_LOCKED)
78601 if (!can_do_mlock())
78602 return -EPERM;
78603@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78604 locked += mm->locked_vm;
78605 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
78606 lock_limit >>= PAGE_SHIFT;
78607+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78608 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
78609 return -EAGAIN;
78610 }
78611@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
78612 if (error)
78613 return error;
78614
78615+ if (!gr_acl_handle_mmap(file, prot))
78616+ return -EACCES;
78617+
78618 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
78619 }
78620 EXPORT_SYMBOL(do_mmap_pgoff);
78621@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
78622 */
78623 int vma_wants_writenotify(struct vm_area_struct *vma)
78624 {
78625- unsigned int vm_flags = vma->vm_flags;
78626+ unsigned long vm_flags = vma->vm_flags;
78627
78628 /* If it was private or non-writable, the write bit is already clear */
78629- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
78630+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
78631 return 0;
78632
78633 /* The backer wishes to know when pages are first written to? */
78634@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
78635 unsigned long charged = 0;
78636 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
78637
78638+#ifdef CONFIG_PAX_SEGMEXEC
78639+ struct vm_area_struct *vma_m = NULL;
78640+#endif
78641+
78642+ /*
78643+ * mm->mmap_sem is required to protect against another thread
78644+ * changing the mappings in case we sleep.
78645+ */
78646+ verify_mm_writelocked(mm);
78647+
78648 /* Clear old maps */
78649 error = -ENOMEM;
78650-munmap_back:
78651 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78652 if (vma && vma->vm_start < addr + len) {
78653 if (do_munmap(mm, addr, len))
78654 return -ENOMEM;
78655- goto munmap_back;
78656+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78657+ BUG_ON(vma && vma->vm_start < addr + len);
78658 }
78659
78660 /* Check against address space limit. */
78661@@ -1173,6 +1294,16 @@ munmap_back:
78662 goto unacct_error;
78663 }
78664
78665+#ifdef CONFIG_PAX_SEGMEXEC
78666+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
78667+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78668+ if (!vma_m) {
78669+ error = -ENOMEM;
78670+ goto free_vma;
78671+ }
78672+ }
78673+#endif
78674+
78675 vma->vm_mm = mm;
78676 vma->vm_start = addr;
78677 vma->vm_end = addr + len;
78678@@ -1195,6 +1326,19 @@ munmap_back:
78679 error = file->f_op->mmap(file, vma);
78680 if (error)
78681 goto unmap_and_free_vma;
78682+
78683+#ifdef CONFIG_PAX_SEGMEXEC
78684+ if (vma_m && (vm_flags & VM_EXECUTABLE))
78685+ added_exe_file_vma(mm);
78686+#endif
78687+
78688+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78689+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
78690+ vma->vm_flags |= VM_PAGEEXEC;
78691+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78692+ }
78693+#endif
78694+
78695 if (vm_flags & VM_EXECUTABLE)
78696 added_exe_file_vma(mm);
78697
78698@@ -1218,6 +1362,11 @@ munmap_back:
78699 vma_link(mm, vma, prev, rb_link, rb_parent);
78700 file = vma->vm_file;
78701
78702+#ifdef CONFIG_PAX_SEGMEXEC
78703+ if (vma_m)
78704+ pax_mirror_vma(vma_m, vma);
78705+#endif
78706+
78707 /* Once vma denies write, undo our temporary denial count */
78708 if (correct_wcount)
78709 atomic_inc(&inode->i_writecount);
78710@@ -1226,6 +1375,7 @@ out:
78711
78712 mm->total_vm += len >> PAGE_SHIFT;
78713 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
78714+ track_exec_limit(mm, addr, addr + len, vm_flags);
78715 if (vm_flags & VM_LOCKED) {
78716 /*
78717 * makes pages present; downgrades, drops, reacquires mmap_sem
78718@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
78719 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
78720 charged = 0;
78721 free_vma:
78722+
78723+#ifdef CONFIG_PAX_SEGMEXEC
78724+ if (vma_m)
78725+ kmem_cache_free(vm_area_cachep, vma_m);
78726+#endif
78727+
78728 kmem_cache_free(vm_area_cachep, vma);
78729 unacct_error:
78730 if (charged)
78731@@ -1255,6 +1411,44 @@ unacct_error:
78732 return error;
78733 }
78734
78735+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
78736+{
78737+ if (!vma) {
78738+#ifdef CONFIG_STACK_GROWSUP
78739+ if (addr > sysctl_heap_stack_gap)
78740+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
78741+ else
78742+ vma = find_vma(current->mm, 0);
78743+ if (vma && (vma->vm_flags & VM_GROWSUP))
78744+ return false;
78745+#endif
78746+ return true;
78747+ }
78748+
78749+ if (addr + len > vma->vm_start)
78750+ return false;
78751+
78752+ if (vma->vm_flags & VM_GROWSDOWN)
78753+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
78754+#ifdef CONFIG_STACK_GROWSUP
78755+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
78756+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
78757+#endif
78758+
78759+ return true;
78760+}
78761+
78762+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
78763+{
78764+ if (vma->vm_start < len)
78765+ return -ENOMEM;
78766+ if (!(vma->vm_flags & VM_GROWSDOWN))
78767+ return vma->vm_start - len;
78768+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
78769+ return vma->vm_start - len - sysctl_heap_stack_gap;
78770+ return -ENOMEM;
78771+}
78772+
78773 /* Get an address range which is currently unmapped.
78774 * For shmat() with addr=0.
78775 *
78776@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
78777 if (flags & MAP_FIXED)
78778 return addr;
78779
78780+#ifdef CONFIG_PAX_RANDMMAP
78781+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78782+#endif
78783+
78784 if (addr) {
78785 addr = PAGE_ALIGN(addr);
78786- vma = find_vma(mm, addr);
78787- if (TASK_SIZE - len >= addr &&
78788- (!vma || addr + len <= vma->vm_start))
78789- return addr;
78790+ if (TASK_SIZE - len >= addr) {
78791+ vma = find_vma(mm, addr);
78792+ if (check_heap_stack_gap(vma, addr, len))
78793+ return addr;
78794+ }
78795 }
78796 if (len > mm->cached_hole_size) {
78797- start_addr = addr = mm->free_area_cache;
78798+ start_addr = addr = mm->free_area_cache;
78799 } else {
78800- start_addr = addr = TASK_UNMAPPED_BASE;
78801- mm->cached_hole_size = 0;
78802+ start_addr = addr = mm->mmap_base;
78803+ mm->cached_hole_size = 0;
78804 }
78805
78806 full_search:
78807@@ -1303,34 +1502,40 @@ full_search:
78808 * Start a new search - just in case we missed
78809 * some holes.
78810 */
78811- if (start_addr != TASK_UNMAPPED_BASE) {
78812- addr = TASK_UNMAPPED_BASE;
78813- start_addr = addr;
78814+ if (start_addr != mm->mmap_base) {
78815+ start_addr = addr = mm->mmap_base;
78816 mm->cached_hole_size = 0;
78817 goto full_search;
78818 }
78819 return -ENOMEM;
78820 }
78821- if (!vma || addr + len <= vma->vm_start) {
78822- /*
78823- * Remember the place where we stopped the search:
78824- */
78825- mm->free_area_cache = addr + len;
78826- return addr;
78827- }
78828+ if (check_heap_stack_gap(vma, addr, len))
78829+ break;
78830 if (addr + mm->cached_hole_size < vma->vm_start)
78831 mm->cached_hole_size = vma->vm_start - addr;
78832 addr = vma->vm_end;
78833 }
78834+
78835+ /*
78836+ * Remember the place where we stopped the search:
78837+ */
78838+ mm->free_area_cache = addr + len;
78839+ return addr;
78840 }
78841 #endif
78842
78843 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
78844 {
78845+
78846+#ifdef CONFIG_PAX_SEGMEXEC
78847+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78848+ return;
78849+#endif
78850+
78851 /*
78852 * Is this a new hole at the lowest possible address?
78853 */
78854- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
78855+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
78856 mm->free_area_cache = addr;
78857 mm->cached_hole_size = ~0UL;
78858 }
78859@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78860 {
78861 struct vm_area_struct *vma;
78862 struct mm_struct *mm = current->mm;
78863- unsigned long addr = addr0;
78864+ unsigned long base = mm->mmap_base, addr = addr0;
78865
78866 /* requested length too big for entire address space */
78867 if (len > TASK_SIZE)
78868@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78869 if (flags & MAP_FIXED)
78870 return addr;
78871
78872+#ifdef CONFIG_PAX_RANDMMAP
78873+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78874+#endif
78875+
78876 /* requesting a specific address */
78877 if (addr) {
78878 addr = PAGE_ALIGN(addr);
78879- vma = find_vma(mm, addr);
78880- if (TASK_SIZE - len >= addr &&
78881- (!vma || addr + len <= vma->vm_start))
78882- return addr;
78883+ if (TASK_SIZE - len >= addr) {
78884+ vma = find_vma(mm, addr);
78885+ if (check_heap_stack_gap(vma, addr, len))
78886+ return addr;
78887+ }
78888 }
78889
78890 /* check if free_area_cache is useful for us */
78891@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78892 /* make sure it can fit in the remaining address space */
78893 if (addr > len) {
78894 vma = find_vma(mm, addr-len);
78895- if (!vma || addr <= vma->vm_start)
78896+ if (check_heap_stack_gap(vma, addr - len, len))
78897 /* remember the address as a hint for next time */
78898 return (mm->free_area_cache = addr-len);
78899 }
78900@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78901 * return with success:
78902 */
78903 vma = find_vma(mm, addr);
78904- if (!vma || addr+len <= vma->vm_start)
78905+ if (check_heap_stack_gap(vma, addr, len))
78906 /* remember the address as a hint for next time */
78907 return (mm->free_area_cache = addr);
78908
78909@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78910 mm->cached_hole_size = vma->vm_start - addr;
78911
78912 /* try just below the current vma->vm_start */
78913- addr = vma->vm_start-len;
78914- } while (len < vma->vm_start);
78915+ addr = skip_heap_stack_gap(vma, len);
78916+ } while (!IS_ERR_VALUE(addr));
78917
78918 bottomup:
78919 /*
78920@@ -1414,13 +1624,21 @@ bottomup:
78921 * can happen with large stack limits and large mmap()
78922 * allocations.
78923 */
78924+ mm->mmap_base = TASK_UNMAPPED_BASE;
78925+
78926+#ifdef CONFIG_PAX_RANDMMAP
78927+ if (mm->pax_flags & MF_PAX_RANDMMAP)
78928+ mm->mmap_base += mm->delta_mmap;
78929+#endif
78930+
78931+ mm->free_area_cache = mm->mmap_base;
78932 mm->cached_hole_size = ~0UL;
78933- mm->free_area_cache = TASK_UNMAPPED_BASE;
78934 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
78935 /*
78936 * Restore the topdown base:
78937 */
78938- mm->free_area_cache = mm->mmap_base;
78939+ mm->mmap_base = base;
78940+ mm->free_area_cache = base;
78941 mm->cached_hole_size = ~0UL;
78942
78943 return addr;
78944@@ -1429,6 +1647,12 @@ bottomup:
78945
78946 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78947 {
78948+
78949+#ifdef CONFIG_PAX_SEGMEXEC
78950+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78951+ return;
78952+#endif
78953+
78954 /*
78955 * Is this a new hole at the highest possible address?
78956 */
78957@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78958 mm->free_area_cache = addr;
78959
78960 /* dont allow allocations above current base */
78961- if (mm->free_area_cache > mm->mmap_base)
78962+ if (mm->free_area_cache > mm->mmap_base) {
78963 mm->free_area_cache = mm->mmap_base;
78964+ mm->cached_hole_size = ~0UL;
78965+ }
78966 }
78967
78968 unsigned long
78969@@ -1510,40 +1736,41 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
78970
78971 EXPORT_SYMBOL(find_vma);
78972
78973-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
78974+/*
78975+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
78976+ * Note: pprev is set to NULL when return value is NULL.
78977+ */
78978 struct vm_area_struct *
78979 find_vma_prev(struct mm_struct *mm, unsigned long addr,
78980 struct vm_area_struct **pprev)
78981 {
78982- struct vm_area_struct *vma = NULL, *prev = NULL;
78983- struct rb_node *rb_node;
78984- if (!mm)
78985- goto out;
78986+ struct vm_area_struct *vma;
78987
78988- /* Guard against addr being lower than the first VMA */
78989- vma = mm->mmap;
78990+ vma = find_vma(mm, addr);
78991+ *pprev = vma ? vma->vm_prev : NULL;
78992+ return vma;
78993+}
78994
78995- /* Go through the RB tree quickly. */
78996- rb_node = mm->mm_rb.rb_node;
78997+#ifdef CONFIG_PAX_SEGMEXEC
78998+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
78999+{
79000+ struct vm_area_struct *vma_m;
79001
79002- while (rb_node) {
79003- struct vm_area_struct *vma_tmp;
79004- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
79005-
79006- if (addr < vma_tmp->vm_end) {
79007- rb_node = rb_node->rb_left;
79008- } else {
79009- prev = vma_tmp;
79010- if (!prev->vm_next || (addr < prev->vm_next->vm_end))
79011- break;
79012- rb_node = rb_node->rb_right;
79013- }
79014+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
79015+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
79016+ BUG_ON(vma->vm_mirror);
79017+ return NULL;
79018 }
79019-
79020-out:
79021- *pprev = prev;
79022- return prev ? prev->vm_next : vma;
79023+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
79024+ vma_m = vma->vm_mirror;
79025+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
79026+ BUG_ON(vma->vm_file != vma_m->vm_file);
79027+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
79028+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
79029+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
79030+ return vma_m;
79031 }
79032+#endif
79033
79034 /*
79035 * Verify that the stack growth is acceptable and
79036@@ -1561,6 +1788,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
79037 return -ENOMEM;
79038
79039 /* Stack limit test */
79040+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
79041 if (size > rlim[RLIMIT_STACK].rlim_cur)
79042 return -ENOMEM;
79043
79044@@ -1570,6 +1798,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
79045 unsigned long limit;
79046 locked = mm->locked_vm + grow;
79047 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
79048+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
79049 if (locked > limit && !capable(CAP_IPC_LOCK))
79050 return -ENOMEM;
79051 }
79052@@ -1600,37 +1829,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
79053 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
79054 * vma is the last one with address > vma->vm_end. Have to extend vma.
79055 */
79056+#ifndef CONFIG_IA64
79057+static
79058+#endif
79059 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
79060 {
79061 int error;
79062+ bool locknext;
79063
79064 if (!(vma->vm_flags & VM_GROWSUP))
79065 return -EFAULT;
79066
79067+ /* Also guard against wrapping around to address 0. */
79068+ if (address < PAGE_ALIGN(address+1))
79069+ address = PAGE_ALIGN(address+1);
79070+ else
79071+ return -ENOMEM;
79072+
79073 /*
79074 * We must make sure the anon_vma is allocated
79075 * so that the anon_vma locking is not a noop.
79076 */
79077 if (unlikely(anon_vma_prepare(vma)))
79078 return -ENOMEM;
79079+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
79080+ if (locknext && anon_vma_prepare(vma->vm_next))
79081+ return -ENOMEM;
79082 anon_vma_lock(vma);
79083+ if (locknext)
79084+ anon_vma_lock(vma->vm_next);
79085
79086 /*
79087 * vma->vm_start/vm_end cannot change under us because the caller
79088 * is required to hold the mmap_sem in read mode. We need the
79089- * anon_vma lock to serialize against concurrent expand_stacks.
79090- * Also guard against wrapping around to address 0.
79091+ * anon_vma locks to serialize against concurrent expand_stacks
79092+ * and expand_upwards.
79093 */
79094- if (address < PAGE_ALIGN(address+4))
79095- address = PAGE_ALIGN(address+4);
79096- else {
79097- anon_vma_unlock(vma);
79098- return -ENOMEM;
79099- }
79100 error = 0;
79101
79102 /* Somebody else might have raced and expanded it already */
79103- if (address > vma->vm_end) {
79104+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
79105+ error = -ENOMEM;
79106+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
79107 unsigned long size, grow;
79108
79109 size = address - vma->vm_start;
79110@@ -1643,6 +1883,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
79111 vma->vm_end = address;
79112 }
79113 }
79114+ if (locknext)
79115+ anon_vma_unlock(vma->vm_next);
79116 anon_vma_unlock(vma);
79117 return error;
79118 }
79119@@ -1655,6 +1897,8 @@ static int expand_downwards(struct vm_area_struct *vma,
79120 unsigned long address)
79121 {
79122 int error;
79123+ bool lockprev = false;
79124+ struct vm_area_struct *prev;
79125
79126 /*
79127 * We must make sure the anon_vma is allocated
79128@@ -1668,6 +1912,15 @@ static int expand_downwards(struct vm_area_struct *vma,
79129 if (error)
79130 return error;
79131
79132+ prev = vma->vm_prev;
79133+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
79134+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
79135+#endif
79136+ if (lockprev && anon_vma_prepare(prev))
79137+ return -ENOMEM;
79138+ if (lockprev)
79139+ anon_vma_lock(prev);
79140+
79141 anon_vma_lock(vma);
79142
79143 /*
79144@@ -1677,9 +1930,17 @@ static int expand_downwards(struct vm_area_struct *vma,
79145 */
79146
79147 /* Somebody else might have raced and expanded it already */
79148- if (address < vma->vm_start) {
79149+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
79150+ error = -ENOMEM;
79151+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
79152 unsigned long size, grow;
79153
79154+#ifdef CONFIG_PAX_SEGMEXEC
79155+ struct vm_area_struct *vma_m;
79156+
79157+ vma_m = pax_find_mirror_vma(vma);
79158+#endif
79159+
79160 size = vma->vm_end - address;
79161 grow = (vma->vm_start - address) >> PAGE_SHIFT;
79162
79163@@ -1689,10 +1950,22 @@ static int expand_downwards(struct vm_area_struct *vma,
79164 if (!error) {
79165 vma->vm_start = address;
79166 vma->vm_pgoff -= grow;
79167+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
79168+
79169+#ifdef CONFIG_PAX_SEGMEXEC
79170+ if (vma_m) {
79171+ vma_m->vm_start -= grow << PAGE_SHIFT;
79172+ vma_m->vm_pgoff -= grow;
79173+ }
79174+#endif
79175+
79176+
79177 }
79178 }
79179 }
79180 anon_vma_unlock(vma);
79181+ if (lockprev)
79182+ anon_vma_unlock(prev);
79183 return error;
79184 }
79185
79186@@ -1768,6 +2041,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
79187 do {
79188 long nrpages = vma_pages(vma);
79189
79190+#ifdef CONFIG_PAX_SEGMEXEC
79191+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
79192+ vma = remove_vma(vma);
79193+ continue;
79194+ }
79195+#endif
79196+
79197 mm->total_vm -= nrpages;
79198 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
79199 vma = remove_vma(vma);
79200@@ -1813,6 +2093,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
79201 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
79202 vma->vm_prev = NULL;
79203 do {
79204+
79205+#ifdef CONFIG_PAX_SEGMEXEC
79206+ if (vma->vm_mirror) {
79207+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
79208+ vma->vm_mirror->vm_mirror = NULL;
79209+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
79210+ vma->vm_mirror = NULL;
79211+ }
79212+#endif
79213+
79214 rb_erase(&vma->vm_rb, &mm->mm_rb);
79215 mm->map_count--;
79216 tail_vma = vma;
79217@@ -1840,10 +2130,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79218 struct mempolicy *pol;
79219 struct vm_area_struct *new;
79220
79221+#ifdef CONFIG_PAX_SEGMEXEC
79222+ struct vm_area_struct *vma_m, *new_m = NULL;
79223+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
79224+#endif
79225+
79226 if (is_vm_hugetlb_page(vma) && (addr &
79227 ~(huge_page_mask(hstate_vma(vma)))))
79228 return -EINVAL;
79229
79230+#ifdef CONFIG_PAX_SEGMEXEC
79231+ vma_m = pax_find_mirror_vma(vma);
79232+
79233+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
79234+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
79235+ if (mm->map_count >= sysctl_max_map_count-1)
79236+ return -ENOMEM;
79237+ } else
79238+#endif
79239+
79240 if (mm->map_count >= sysctl_max_map_count)
79241 return -ENOMEM;
79242
79243@@ -1851,6 +2156,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79244 if (!new)
79245 return -ENOMEM;
79246
79247+#ifdef CONFIG_PAX_SEGMEXEC
79248+ if (vma_m) {
79249+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
79250+ if (!new_m) {
79251+ kmem_cache_free(vm_area_cachep, new);
79252+ return -ENOMEM;
79253+ }
79254+ }
79255+#endif
79256+
79257 /* most fields are the same, copy all, and then fixup */
79258 *new = *vma;
79259
79260@@ -1861,8 +2176,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79261 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
79262 }
79263
79264+#ifdef CONFIG_PAX_SEGMEXEC
79265+ if (vma_m) {
79266+ *new_m = *vma_m;
79267+ new_m->vm_mirror = new;
79268+ new->vm_mirror = new_m;
79269+
79270+ if (new_below)
79271+ new_m->vm_end = addr_m;
79272+ else {
79273+ new_m->vm_start = addr_m;
79274+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
79275+ }
79276+ }
79277+#endif
79278+
79279 pol = mpol_dup(vma_policy(vma));
79280 if (IS_ERR(pol)) {
79281+
79282+#ifdef CONFIG_PAX_SEGMEXEC
79283+ if (new_m)
79284+ kmem_cache_free(vm_area_cachep, new_m);
79285+#endif
79286+
79287 kmem_cache_free(vm_area_cachep, new);
79288 return PTR_ERR(pol);
79289 }
79290@@ -1883,6 +2219,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79291 else
79292 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
79293
79294+#ifdef CONFIG_PAX_SEGMEXEC
79295+ if (vma_m) {
79296+ mpol_get(pol);
79297+ vma_set_policy(new_m, pol);
79298+
79299+ if (new_m->vm_file) {
79300+ get_file(new_m->vm_file);
79301+ if (vma_m->vm_flags & VM_EXECUTABLE)
79302+ added_exe_file_vma(mm);
79303+ }
79304+
79305+ if (new_m->vm_ops && new_m->vm_ops->open)
79306+ new_m->vm_ops->open(new_m);
79307+
79308+ if (new_below)
79309+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
79310+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
79311+ else
79312+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
79313+ }
79314+#endif
79315+
79316 return 0;
79317 }
79318
79319@@ -1891,11 +2249,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
79320 * work. This now handles partial unmappings.
79321 * Jeremy Fitzhardinge <jeremy@goop.org>
79322 */
79323+#ifdef CONFIG_PAX_SEGMEXEC
79324 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79325 {
79326+ int ret = __do_munmap(mm, start, len);
79327+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
79328+ return ret;
79329+
79330+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
79331+}
79332+
79333+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79334+#else
79335+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79336+#endif
79337+{
79338 unsigned long end;
79339 struct vm_area_struct *vma, *prev, *last;
79340
79341+ /*
79342+ * mm->mmap_sem is required to protect against another thread
79343+ * changing the mappings in case we sleep.
79344+ */
79345+ verify_mm_writelocked(mm);
79346+
79347 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
79348 return -EINVAL;
79349
79350@@ -1959,6 +2336,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
79351 /* Fix up all other VM information */
79352 remove_vma_list(mm, vma);
79353
79354+ track_exec_limit(mm, start, end, 0UL);
79355+
79356 return 0;
79357 }
79358
79359@@ -1971,22 +2350,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
79360
79361 profile_munmap(addr);
79362
79363+#ifdef CONFIG_PAX_SEGMEXEC
79364+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
79365+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
79366+ return -EINVAL;
79367+#endif
79368+
79369 down_write(&mm->mmap_sem);
79370 ret = do_munmap(mm, addr, len);
79371 up_write(&mm->mmap_sem);
79372 return ret;
79373 }
79374
79375-static inline void verify_mm_writelocked(struct mm_struct *mm)
79376-{
79377-#ifdef CONFIG_DEBUG_VM
79378- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
79379- WARN_ON(1);
79380- up_read(&mm->mmap_sem);
79381- }
79382-#endif
79383-}
79384-
79385 /*
79386 * this is really a simplified "do_mmap". it only handles
79387 * anonymous maps. eventually we may be able to do some
79388@@ -2000,6 +2375,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79389 struct rb_node ** rb_link, * rb_parent;
79390 pgoff_t pgoff = addr >> PAGE_SHIFT;
79391 int error;
79392+ unsigned long charged;
79393
79394 len = PAGE_ALIGN(len);
79395 if (!len)
79396@@ -2011,16 +2387,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79397
79398 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
79399
79400+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
79401+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
79402+ flags &= ~VM_EXEC;
79403+
79404+#ifdef CONFIG_PAX_MPROTECT
79405+ if (mm->pax_flags & MF_PAX_MPROTECT)
79406+ flags &= ~VM_MAYEXEC;
79407+#endif
79408+
79409+ }
79410+#endif
79411+
79412 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
79413 if (error & ~PAGE_MASK)
79414 return error;
79415
79416+ charged = len >> PAGE_SHIFT;
79417+
79418 /*
79419 * mlock MCL_FUTURE?
79420 */
79421 if (mm->def_flags & VM_LOCKED) {
79422 unsigned long locked, lock_limit;
79423- locked = len >> PAGE_SHIFT;
79424+ locked = charged;
79425 locked += mm->locked_vm;
79426 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
79427 lock_limit >>= PAGE_SHIFT;
79428@@ -2037,22 +2427,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79429 /*
79430 * Clear old maps. this also does some error checking for us
79431 */
79432- munmap_back:
79433 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79434 if (vma && vma->vm_start < addr + len) {
79435 if (do_munmap(mm, addr, len))
79436 return -ENOMEM;
79437- goto munmap_back;
79438+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
79439+ BUG_ON(vma && vma->vm_start < addr + len);
79440 }
79441
79442 /* Check against address space limits *after* clearing old maps... */
79443- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
79444+ if (!may_expand_vm(mm, charged))
79445 return -ENOMEM;
79446
79447 if (mm->map_count > sysctl_max_map_count)
79448 return -ENOMEM;
79449
79450- if (security_vm_enough_memory(len >> PAGE_SHIFT))
79451+ if (security_vm_enough_memory(charged))
79452 return -ENOMEM;
79453
79454 /* Can we just expand an old private anonymous mapping? */
79455@@ -2066,7 +2456,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79456 */
79457 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79458 if (!vma) {
79459- vm_unacct_memory(len >> PAGE_SHIFT);
79460+ vm_unacct_memory(charged);
79461 return -ENOMEM;
79462 }
79463
79464@@ -2078,11 +2468,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
79465 vma->vm_page_prot = vm_get_page_prot(flags);
79466 vma_link(mm, vma, prev, rb_link, rb_parent);
79467 out:
79468- mm->total_vm += len >> PAGE_SHIFT;
79469+ mm->total_vm += charged;
79470 if (flags & VM_LOCKED) {
79471 if (!mlock_vma_pages_range(vma, addr, addr + len))
79472- mm->locked_vm += (len >> PAGE_SHIFT);
79473+ mm->locked_vm += charged;
79474 }
79475+ track_exec_limit(mm, addr, addr + len, flags);
79476 return addr;
79477 }
79478
79479@@ -2129,8 +2520,10 @@ void exit_mmap(struct mm_struct *mm)
79480 * Walk the list again, actually closing and freeing it,
79481 * with preemption enabled, without holding any MM locks.
79482 */
79483- while (vma)
79484+ while (vma) {
79485+ vma->vm_mirror = NULL;
79486 vma = remove_vma(vma);
79487+ }
79488
79489 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
79490 }
79491@@ -2144,6 +2537,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
79492 struct vm_area_struct * __vma, * prev;
79493 struct rb_node ** rb_link, * rb_parent;
79494
79495+#ifdef CONFIG_PAX_SEGMEXEC
79496+ struct vm_area_struct *vma_m = NULL;
79497+#endif
79498+
79499 /*
79500 * The vm_pgoff of a purely anonymous vma should be irrelevant
79501 * until its first write fault, when page's anon_vma and index
79502@@ -2166,7 +2563,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
79503 if ((vma->vm_flags & VM_ACCOUNT) &&
79504 security_vm_enough_memory_mm(mm, vma_pages(vma)))
79505 return -ENOMEM;
79506+
79507+#ifdef CONFIG_PAX_SEGMEXEC
79508+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
79509+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79510+ if (!vma_m)
79511+ return -ENOMEM;
79512+ }
79513+#endif
79514+
79515 vma_link(mm, vma, prev, rb_link, rb_parent);
79516+
79517+#ifdef CONFIG_PAX_SEGMEXEC
79518+ if (vma_m)
79519+ pax_mirror_vma(vma_m, vma);
79520+#endif
79521+
79522 return 0;
79523 }
79524
79525@@ -2184,6 +2596,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
79526 struct rb_node **rb_link, *rb_parent;
79527 struct mempolicy *pol;
79528
79529+ BUG_ON(vma->vm_mirror);
79530+
79531 /*
79532 * If anonymous vma has not yet been faulted, update new pgoff
79533 * to match new location, to increase its chance of merging.
79534@@ -2227,6 +2641,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
79535 return new_vma;
79536 }
79537
79538+#ifdef CONFIG_PAX_SEGMEXEC
79539+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
79540+{
79541+ struct vm_area_struct *prev_m;
79542+ struct rb_node **rb_link_m, *rb_parent_m;
79543+ struct mempolicy *pol_m;
79544+
79545+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
79546+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
79547+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
79548+ *vma_m = *vma;
79549+ pol_m = vma_policy(vma_m);
79550+ mpol_get(pol_m);
79551+ vma_set_policy(vma_m, pol_m);
79552+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
79553+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
79554+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
79555+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
79556+ if (vma_m->vm_file)
79557+ get_file(vma_m->vm_file);
79558+ if (vma_m->vm_ops && vma_m->vm_ops->open)
79559+ vma_m->vm_ops->open(vma_m);
79560+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
79561+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
79562+ vma_m->vm_mirror = vma;
79563+ vma->vm_mirror = vma_m;
79564+}
79565+#endif
79566+
79567 /*
79568 * Return true if the calling process may expand its vm space by the passed
79569 * number of pages
79570@@ -2237,7 +2680,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
79571 unsigned long lim;
79572
79573 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
79574-
79575+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
79576 if (cur + npages > lim)
79577 return 0;
79578 return 1;
79579@@ -2307,6 +2750,22 @@ int install_special_mapping(struct mm_struct *mm,
79580 vma->vm_start = addr;
79581 vma->vm_end = addr + len;
79582
79583+#ifdef CONFIG_PAX_MPROTECT
79584+ if (mm->pax_flags & MF_PAX_MPROTECT) {
79585+#ifndef CONFIG_PAX_MPROTECT_COMPAT
79586+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
79587+ return -EPERM;
79588+ if (!(vm_flags & VM_EXEC))
79589+ vm_flags &= ~VM_MAYEXEC;
79590+#else
79591+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
79592+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
79593+#endif
79594+ else
79595+ vm_flags &= ~VM_MAYWRITE;
79596+ }
79597+#endif
79598+
79599 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
79600 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
79601
79602diff --git a/mm/mprotect.c b/mm/mprotect.c
79603index 1737c7e..c7faeb4 100644
79604--- a/mm/mprotect.c
79605+++ b/mm/mprotect.c
79606@@ -24,10 +24,16 @@
79607 #include <linux/mmu_notifier.h>
79608 #include <linux/migrate.h>
79609 #include <linux/perf_event.h>
79610+
79611+#ifdef CONFIG_PAX_MPROTECT
79612+#include <linux/elf.h>
79613+#endif
79614+
79615 #include <asm/uaccess.h>
79616 #include <asm/pgtable.h>
79617 #include <asm/cacheflush.h>
79618 #include <asm/tlbflush.h>
79619+#include <asm/mmu_context.h>
79620
79621 #ifndef pgprot_modify
79622 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
79623@@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
79624 flush_tlb_range(vma, start, end);
79625 }
79626
79627+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79628+/* called while holding the mmap semaphor for writing except stack expansion */
79629+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
79630+{
79631+ unsigned long oldlimit, newlimit = 0UL;
79632+
79633+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
79634+ return;
79635+
79636+ spin_lock(&mm->page_table_lock);
79637+ oldlimit = mm->context.user_cs_limit;
79638+ if ((prot & VM_EXEC) && oldlimit < end)
79639+ /* USER_CS limit moved up */
79640+ newlimit = end;
79641+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
79642+ /* USER_CS limit moved down */
79643+ newlimit = start;
79644+
79645+ if (newlimit) {
79646+ mm->context.user_cs_limit = newlimit;
79647+
79648+#ifdef CONFIG_SMP
79649+ wmb();
79650+ cpus_clear(mm->context.cpu_user_cs_mask);
79651+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
79652+#endif
79653+
79654+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
79655+ }
79656+ spin_unlock(&mm->page_table_lock);
79657+ if (newlimit == end) {
79658+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
79659+
79660+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
79661+ if (is_vm_hugetlb_page(vma))
79662+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
79663+ else
79664+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
79665+ }
79666+}
79667+#endif
79668+
79669 int
79670 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79671 unsigned long start, unsigned long end, unsigned long newflags)
79672@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79673 int error;
79674 int dirty_accountable = 0;
79675
79676+#ifdef CONFIG_PAX_SEGMEXEC
79677+ struct vm_area_struct *vma_m = NULL;
79678+ unsigned long start_m, end_m;
79679+
79680+ start_m = start + SEGMEXEC_TASK_SIZE;
79681+ end_m = end + SEGMEXEC_TASK_SIZE;
79682+#endif
79683+
79684 if (newflags == oldflags) {
79685 *pprev = vma;
79686 return 0;
79687 }
79688
79689+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
79690+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
79691+
79692+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
79693+ return -ENOMEM;
79694+
79695+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
79696+ return -ENOMEM;
79697+ }
79698+
79699 /*
79700 * If we make a private mapping writable we increase our commit;
79701 * but (without finer accounting) cannot reduce our commit if we
79702@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79703 }
79704 }
79705
79706+#ifdef CONFIG_PAX_SEGMEXEC
79707+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
79708+ if (start != vma->vm_start) {
79709+ error = split_vma(mm, vma, start, 1);
79710+ if (error)
79711+ goto fail;
79712+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
79713+ *pprev = (*pprev)->vm_next;
79714+ }
79715+
79716+ if (end != vma->vm_end) {
79717+ error = split_vma(mm, vma, end, 0);
79718+ if (error)
79719+ goto fail;
79720+ }
79721+
79722+ if (pax_find_mirror_vma(vma)) {
79723+ error = __do_munmap(mm, start_m, end_m - start_m);
79724+ if (error)
79725+ goto fail;
79726+ } else {
79727+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79728+ if (!vma_m) {
79729+ error = -ENOMEM;
79730+ goto fail;
79731+ }
79732+ vma->vm_flags = newflags;
79733+ pax_mirror_vma(vma_m, vma);
79734+ }
79735+ }
79736+#endif
79737+
79738 /*
79739 * First try to merge with previous and/or next vma.
79740 */
79741@@ -195,9 +293,21 @@ success:
79742 * vm_flags and vm_page_prot are protected by the mmap_sem
79743 * held in write mode.
79744 */
79745+
79746+#ifdef CONFIG_PAX_SEGMEXEC
79747+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
79748+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
79749+#endif
79750+
79751 vma->vm_flags = newflags;
79752+
79753+#ifdef CONFIG_PAX_MPROTECT
79754+ if (mm->binfmt && mm->binfmt->handle_mprotect)
79755+ mm->binfmt->handle_mprotect(vma, newflags);
79756+#endif
79757+
79758 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
79759- vm_get_page_prot(newflags));
79760+ vm_get_page_prot(vma->vm_flags));
79761
79762 if (vma_wants_writenotify(vma)) {
79763 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
79764@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79765 end = start + len;
79766 if (end <= start)
79767 return -ENOMEM;
79768+
79769+#ifdef CONFIG_PAX_SEGMEXEC
79770+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
79771+ if (end > SEGMEXEC_TASK_SIZE)
79772+ return -EINVAL;
79773+ } else
79774+#endif
79775+
79776+ if (end > TASK_SIZE)
79777+ return -EINVAL;
79778+
79779 if (!arch_validate_prot(prot))
79780 return -EINVAL;
79781
79782@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79783 /*
79784 * Does the application expect PROT_READ to imply PROT_EXEC:
79785 */
79786- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
79787+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
79788 prot |= PROT_EXEC;
79789
79790 vm_flags = calc_vm_prot_bits(prot);
79791@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79792 if (start > vma->vm_start)
79793 prev = vma;
79794
79795+#ifdef CONFIG_PAX_MPROTECT
79796+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
79797+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
79798+#endif
79799+
79800 for (nstart = start ; ; ) {
79801 unsigned long newflags;
79802
79803@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79804
79805 /* newflags >> 4 shift VM_MAY% in place of VM_% */
79806 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
79807+ if (prot & (PROT_WRITE | PROT_EXEC))
79808+ gr_log_rwxmprotect(vma->vm_file);
79809+
79810+ error = -EACCES;
79811+ goto out;
79812+ }
79813+
79814+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
79815 error = -EACCES;
79816 goto out;
79817 }
79818@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79819 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
79820 if (error)
79821 goto out;
79822+
79823+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
79824+
79825 nstart = tmp;
79826
79827 if (nstart < prev->vm_end)
79828diff --git a/mm/mremap.c b/mm/mremap.c
79829index 3e98d79..1706cec 100644
79830--- a/mm/mremap.c
79831+++ b/mm/mremap.c
79832@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
79833 continue;
79834 pte = ptep_clear_flush(vma, old_addr, old_pte);
79835 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
79836+
79837+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79838+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
79839+ pte = pte_exprotect(pte);
79840+#endif
79841+
79842 set_pte_at(mm, new_addr, new_pte, pte);
79843 }
79844
79845@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
79846 if (is_vm_hugetlb_page(vma))
79847 goto Einval;
79848
79849+#ifdef CONFIG_PAX_SEGMEXEC
79850+ if (pax_find_mirror_vma(vma))
79851+ goto Einval;
79852+#endif
79853+
79854 /* We can't remap across vm area boundaries */
79855 if (old_len > vma->vm_end - addr)
79856 goto Efault;
79857@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
79858 unsigned long ret = -EINVAL;
79859 unsigned long charged = 0;
79860 unsigned long map_flags;
79861+ unsigned long pax_task_size = TASK_SIZE;
79862
79863 if (new_addr & ~PAGE_MASK)
79864 goto out;
79865
79866- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
79867+#ifdef CONFIG_PAX_SEGMEXEC
79868+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
79869+ pax_task_size = SEGMEXEC_TASK_SIZE;
79870+#endif
79871+
79872+ pax_task_size -= PAGE_SIZE;
79873+
79874+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
79875 goto out;
79876
79877 /* Check if the location we're moving into overlaps the
79878 * old location at all, and fail if it does.
79879 */
79880- if ((new_addr <= addr) && (new_addr+new_len) > addr)
79881- goto out;
79882-
79883- if ((addr <= new_addr) && (addr+old_len) > new_addr)
79884+ if (addr + old_len > new_addr && new_addr + new_len > addr)
79885 goto out;
79886
79887 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79888@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
79889 struct vm_area_struct *vma;
79890 unsigned long ret = -EINVAL;
79891 unsigned long charged = 0;
79892+ unsigned long pax_task_size = TASK_SIZE;
79893
79894 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
79895 goto out;
79896@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
79897 if (!new_len)
79898 goto out;
79899
79900+#ifdef CONFIG_PAX_SEGMEXEC
79901+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
79902+ pax_task_size = SEGMEXEC_TASK_SIZE;
79903+#endif
79904+
79905+ pax_task_size -= PAGE_SIZE;
79906+
79907+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
79908+ old_len > pax_task_size || addr > pax_task_size-old_len)
79909+ goto out;
79910+
79911 if (flags & MREMAP_FIXED) {
79912 if (flags & MREMAP_MAYMOVE)
79913 ret = mremap_to(addr, old_len, new_addr, new_len);
79914@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
79915 addr + new_len);
79916 }
79917 ret = addr;
79918+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
79919 goto out;
79920 }
79921 }
79922@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
79923 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79924 if (ret)
79925 goto out;
79926+
79927+ map_flags = vma->vm_flags;
79928 ret = move_vma(vma, addr, old_len, new_len, new_addr);
79929+ if (!(ret & ~PAGE_MASK)) {
79930+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
79931+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
79932+ }
79933 }
79934 out:
79935 if (ret & ~PAGE_MASK)
79936diff --git a/mm/nommu.c b/mm/nommu.c
79937index 406e8d4..53970d3 100644
79938--- a/mm/nommu.c
79939+++ b/mm/nommu.c
79940@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
79941 int sysctl_overcommit_ratio = 50; /* default is 50% */
79942 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
79943 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
79944-int heap_stack_gap = 0;
79945
79946 atomic_long_t mmap_pages_allocated;
79947
79948@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
79949 EXPORT_SYMBOL(find_vma);
79950
79951 /*
79952- * find a VMA
79953- * - we don't extend stack VMAs under NOMMU conditions
79954- */
79955-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
79956-{
79957- return find_vma(mm, addr);
79958-}
79959-
79960-/*
79961 * expand a stack to a given address
79962 * - not supported under NOMMU conditions
79963 */
79964diff --git a/mm/page_alloc.c b/mm/page_alloc.c
79965index 3ecab7e..594a471 100644
79966--- a/mm/page_alloc.c
79967+++ b/mm/page_alloc.c
79968@@ -289,7 +289,7 @@ out:
79969 * This usage means that zero-order pages may not be compound.
79970 */
79971
79972-static void free_compound_page(struct page *page)
79973+void free_compound_page(struct page *page)
79974 {
79975 __free_pages_ok(page, compound_order(page));
79976 }
79977@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79978 int bad = 0;
79979 int wasMlocked = __TestClearPageMlocked(page);
79980
79981+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79982+ unsigned long index = 1UL << order;
79983+#endif
79984+
79985 kmemcheck_free_shadow(page, order);
79986
79987 for (i = 0 ; i < (1 << order) ; ++i)
79988@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79989 debug_check_no_obj_freed(page_address(page),
79990 PAGE_SIZE << order);
79991 }
79992+
79993+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79994+ for (; index; --index)
79995+ sanitize_highpage(page + index - 1);
79996+#endif
79997+
79998 arch_free_page(page, order);
79999 kernel_map_pages(page, 1 << order, 0);
80000
80001@@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
80002 arch_alloc_page(page, order);
80003 kernel_map_pages(page, 1 << order, 1);
80004
80005+#ifndef CONFIG_PAX_MEMORY_SANITIZE
80006 if (gfp_flags & __GFP_ZERO)
80007 prep_zero_page(page, order, gfp_flags);
80008+#endif
80009
80010 if (order && (gfp_flags & __GFP_COMP))
80011 prep_compound_page(page, order);
80012@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
80013 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
80014 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
80015 }
80016+
80017+#ifdef CONFIG_PAX_MEMORY_SANITIZE
80018+ sanitize_highpage(page);
80019+#endif
80020+
80021 arch_free_page(page, 0);
80022 kernel_map_pages(page, 1, 0);
80023
80024@@ -2179,6 +2196,8 @@ void show_free_areas(void)
80025 int cpu;
80026 struct zone *zone;
80027
80028+ pax_track_stack();
80029+
80030 for_each_populated_zone(zone) {
80031 show_node(zone);
80032 printk("%s per-cpu:\n", zone->name);
80033@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
80034 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
80035 }
80036 #else
80037-static void inline setup_usemap(struct pglist_data *pgdat,
80038+static inline void setup_usemap(struct pglist_data *pgdat,
80039 struct zone *zone, unsigned long zonesize) {}
80040 #endif /* CONFIG_SPARSEMEM */
80041
80042diff --git a/mm/percpu.c b/mm/percpu.c
80043index c90614a..5f7b7b8 100644
80044--- a/mm/percpu.c
80045+++ b/mm/percpu.c
80046@@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
80047 static unsigned int pcpu_high_unit_cpu __read_mostly;
80048
80049 /* the address of the first chunk which starts with the kernel static area */
80050-void *pcpu_base_addr __read_mostly;
80051+void *pcpu_base_addr __read_only;
80052 EXPORT_SYMBOL_GPL(pcpu_base_addr);
80053
80054 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
80055diff --git a/mm/rmap.c b/mm/rmap.c
80056index dd43373..d848cd7 100644
80057--- a/mm/rmap.c
80058+++ b/mm/rmap.c
80059@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
80060 /* page_table_lock to protect against threads */
80061 spin_lock(&mm->page_table_lock);
80062 if (likely(!vma->anon_vma)) {
80063+
80064+#ifdef CONFIG_PAX_SEGMEXEC
80065+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
80066+
80067+ if (vma_m) {
80068+ BUG_ON(vma_m->anon_vma);
80069+ vma_m->anon_vma = anon_vma;
80070+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
80071+ }
80072+#endif
80073+
80074 vma->anon_vma = anon_vma;
80075 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
80076 allocated = NULL;
80077diff --git a/mm/shmem.c b/mm/shmem.c
80078index 3e0005b..1d659a8 100644
80079--- a/mm/shmem.c
80080+++ b/mm/shmem.c
80081@@ -31,7 +31,7 @@
80082 #include <linux/swap.h>
80083 #include <linux/ima.h>
80084
80085-static struct vfsmount *shm_mnt;
80086+struct vfsmount *shm_mnt;
80087
80088 #ifdef CONFIG_SHMEM
80089 /*
80090@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
80091 goto unlock;
80092 }
80093 entry = shmem_swp_entry(info, index, NULL);
80094+ if (!entry)
80095+ goto unlock;
80096 if (entry->val) {
80097 /*
80098 * The more uptodate page coming down from a stacked
80099@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
80100 struct vm_area_struct pvma;
80101 struct page *page;
80102
80103+ pax_track_stack();
80104+
80105 spol = mpol_cond_copy(&mpol,
80106 mpol_shared_policy_lookup(&info->policy, idx));
80107
80108@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
80109
80110 info = SHMEM_I(inode);
80111 inode->i_size = len-1;
80112- if (len <= (char *)inode - (char *)info) {
80113+ if (len <= (char *)inode - (char *)info && len <= 64) {
80114 /* do it inline */
80115 memcpy(info, symname, len);
80116 inode->i_op = &shmem_symlink_inline_operations;
80117@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
80118 int err = -ENOMEM;
80119
80120 /* Round up to L1_CACHE_BYTES to resist false sharing */
80121- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
80122- L1_CACHE_BYTES), GFP_KERNEL);
80123+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
80124 if (!sbinfo)
80125 return -ENOMEM;
80126
80127diff --git a/mm/slab.c b/mm/slab.c
80128index c8d466a..909e01e 100644
80129--- a/mm/slab.c
80130+++ b/mm/slab.c
80131@@ -174,7 +174,7 @@
80132
80133 /* Legal flag mask for kmem_cache_create(). */
80134 #if DEBUG
80135-# define CREATE_MASK (SLAB_RED_ZONE | \
80136+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
80137 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
80138 SLAB_CACHE_DMA | \
80139 SLAB_STORE_USER | \
80140@@ -182,7 +182,7 @@
80141 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
80142 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
80143 #else
80144-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
80145+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
80146 SLAB_CACHE_DMA | \
80147 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
80148 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
80149@@ -308,7 +308,7 @@ struct kmem_list3 {
80150 * Need this for bootstrapping a per node allocator.
80151 */
80152 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
80153-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
80154+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
80155 #define CACHE_CACHE 0
80156 #define SIZE_AC MAX_NUMNODES
80157 #define SIZE_L3 (2 * MAX_NUMNODES)
80158@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
80159 if ((x)->max_freeable < i) \
80160 (x)->max_freeable = i; \
80161 } while (0)
80162-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
80163-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
80164-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
80165-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
80166+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
80167+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
80168+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
80169+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
80170 #else
80171 #define STATS_INC_ACTIVE(x) do { } while (0)
80172 #define STATS_DEC_ACTIVE(x) do { } while (0)
80173@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
80174 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
80175 */
80176 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
80177- const struct slab *slab, void *obj)
80178+ const struct slab *slab, const void *obj)
80179 {
80180 u32 offset = (obj - slab->s_mem);
80181 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
80182@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
80183 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
80184 sizes[INDEX_AC].cs_size,
80185 ARCH_KMALLOC_MINALIGN,
80186- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
80187+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
80188 NULL);
80189
80190 if (INDEX_AC != INDEX_L3) {
80191@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
80192 kmem_cache_create(names[INDEX_L3].name,
80193 sizes[INDEX_L3].cs_size,
80194 ARCH_KMALLOC_MINALIGN,
80195- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
80196+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
80197 NULL);
80198 }
80199
80200@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
80201 sizes->cs_cachep = kmem_cache_create(names->name,
80202 sizes->cs_size,
80203 ARCH_KMALLOC_MINALIGN,
80204- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
80205+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
80206 NULL);
80207 }
80208 #ifdef CONFIG_ZONE_DMA
80209@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
80210 }
80211 /* cpu stats */
80212 {
80213- unsigned long allochit = atomic_read(&cachep->allochit);
80214- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
80215- unsigned long freehit = atomic_read(&cachep->freehit);
80216- unsigned long freemiss = atomic_read(&cachep->freemiss);
80217+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
80218+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
80219+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
80220+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
80221
80222 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
80223 allochit, allocmiss, freehit, freemiss);
80224@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
80225
80226 static int __init slab_proc_init(void)
80227 {
80228- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
80229+ mode_t gr_mode = S_IRUGO;
80230+
80231+#ifdef CONFIG_GRKERNSEC_PROC_ADD
80232+ gr_mode = S_IRUSR;
80233+#endif
80234+
80235+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
80236 #ifdef CONFIG_DEBUG_SLAB_LEAK
80237- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
80238+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
80239 #endif
80240 return 0;
80241 }
80242 module_init(slab_proc_init);
80243 #endif
80244
80245+void check_object_size(const void *ptr, unsigned long n, bool to)
80246+{
80247+
80248+#ifdef CONFIG_PAX_USERCOPY
80249+ struct page *page;
80250+ struct kmem_cache *cachep = NULL;
80251+ struct slab *slabp;
80252+ unsigned int objnr;
80253+ unsigned long offset;
80254+ const char *type;
80255+
80256+ if (!n)
80257+ return;
80258+
80259+ type = "<null>";
80260+ if (ZERO_OR_NULL_PTR(ptr))
80261+ goto report;
80262+
80263+ if (!virt_addr_valid(ptr))
80264+ return;
80265+
80266+ page = virt_to_head_page(ptr);
80267+
80268+ type = "<process stack>";
80269+ if (!PageSlab(page)) {
80270+ if (object_is_on_stack(ptr, n) == -1)
80271+ goto report;
80272+ return;
80273+ }
80274+
80275+ cachep = page_get_cache(page);
80276+ type = cachep->name;
80277+ if (!(cachep->flags & SLAB_USERCOPY))
80278+ goto report;
80279+
80280+ slabp = page_get_slab(page);
80281+ objnr = obj_to_index(cachep, slabp, ptr);
80282+ BUG_ON(objnr >= cachep->num);
80283+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
80284+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
80285+ return;
80286+
80287+report:
80288+ pax_report_usercopy(ptr, n, to, type);
80289+#endif
80290+
80291+}
80292+EXPORT_SYMBOL(check_object_size);
80293+
80294 /**
80295 * ksize - get the actual amount of memory allocated for a given object
80296 * @objp: Pointer to the object
80297diff --git a/mm/slob.c b/mm/slob.c
80298index 837ebd6..0bd23bc 100644
80299--- a/mm/slob.c
80300+++ b/mm/slob.c
80301@@ -29,7 +29,7 @@
80302 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
80303 * alloc_pages() directly, allocating compound pages so the page order
80304 * does not have to be separately tracked, and also stores the exact
80305- * allocation size in page->private so that it can be used to accurately
80306+ * allocation size in slob_page->size so that it can be used to accurately
80307 * provide ksize(). These objects are detected in kfree() because slob_page()
80308 * is false for them.
80309 *
80310@@ -58,6 +58,7 @@
80311 */
80312
80313 #include <linux/kernel.h>
80314+#include <linux/sched.h>
80315 #include <linux/slab.h>
80316 #include <linux/mm.h>
80317 #include <linux/swap.h> /* struct reclaim_state */
80318@@ -100,7 +101,8 @@ struct slob_page {
80319 unsigned long flags; /* mandatory */
80320 atomic_t _count; /* mandatory */
80321 slobidx_t units; /* free units left in page */
80322- unsigned long pad[2];
80323+ unsigned long pad[1];
80324+ unsigned long size; /* size when >=PAGE_SIZE */
80325 slob_t *free; /* first free slob_t in page */
80326 struct list_head list; /* linked list of free pages */
80327 };
80328@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
80329 */
80330 static inline int is_slob_page(struct slob_page *sp)
80331 {
80332- return PageSlab((struct page *)sp);
80333+ return PageSlab((struct page *)sp) && !sp->size;
80334 }
80335
80336 static inline void set_slob_page(struct slob_page *sp)
80337@@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
80338
80339 static inline struct slob_page *slob_page(const void *addr)
80340 {
80341- return (struct slob_page *)virt_to_page(addr);
80342+ return (struct slob_page *)virt_to_head_page(addr);
80343 }
80344
80345 /*
80346@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
80347 /*
80348 * Return the size of a slob block.
80349 */
80350-static slobidx_t slob_units(slob_t *s)
80351+static slobidx_t slob_units(const slob_t *s)
80352 {
80353 if (s->units > 0)
80354 return s->units;
80355@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
80356 /*
80357 * Return the next free slob block pointer after this one.
80358 */
80359-static slob_t *slob_next(slob_t *s)
80360+static slob_t *slob_next(const slob_t *s)
80361 {
80362 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
80363 slobidx_t next;
80364@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
80365 /*
80366 * Returns true if s is the last free block in its page.
80367 */
80368-static int slob_last(slob_t *s)
80369+static int slob_last(const slob_t *s)
80370 {
80371 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
80372 }
80373@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
80374 if (!page)
80375 return NULL;
80376
80377+ set_slob_page(page);
80378 return page_address(page);
80379 }
80380
80381@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
80382 if (!b)
80383 return NULL;
80384 sp = slob_page(b);
80385- set_slob_page(sp);
80386
80387 spin_lock_irqsave(&slob_lock, flags);
80388 sp->units = SLOB_UNITS(PAGE_SIZE);
80389 sp->free = b;
80390+ sp->size = 0;
80391 INIT_LIST_HEAD(&sp->list);
80392 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
80393 set_slob_page_free(sp, slob_list);
80394@@ -475,10 +478,9 @@ out:
80395 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
80396 #endif
80397
80398-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80399+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
80400 {
80401- unsigned int *m;
80402- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80403+ slob_t *m;
80404 void *ret;
80405
80406 lockdep_trace_alloc(gfp);
80407@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80408
80409 if (!m)
80410 return NULL;
80411- *m = size;
80412+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
80413+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
80414+ m[0].units = size;
80415+ m[1].units = align;
80416 ret = (void *)m + align;
80417
80418 trace_kmalloc_node(_RET_IP_, ret,
80419@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80420
80421 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
80422 if (ret) {
80423- struct page *page;
80424- page = virt_to_page(ret);
80425- page->private = size;
80426+ struct slob_page *sp;
80427+ sp = slob_page(ret);
80428+ sp->size = size;
80429 }
80430
80431 trace_kmalloc_node(_RET_IP_, ret,
80432 size, PAGE_SIZE << order, gfp, node);
80433 }
80434
80435- kmemleak_alloc(ret, size, 1, gfp);
80436+ return ret;
80437+}
80438+
80439+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
80440+{
80441+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80442+ void *ret = __kmalloc_node_align(size, gfp, node, align);
80443+
80444+ if (!ZERO_OR_NULL_PTR(ret))
80445+ kmemleak_alloc(ret, size, 1, gfp);
80446 return ret;
80447 }
80448 EXPORT_SYMBOL(__kmalloc_node);
80449@@ -528,13 +542,92 @@ void kfree(const void *block)
80450 sp = slob_page(block);
80451 if (is_slob_page(sp)) {
80452 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80453- unsigned int *m = (unsigned int *)(block - align);
80454- slob_free(m, *m + align);
80455- } else
80456+ slob_t *m = (slob_t *)(block - align);
80457+ slob_free(m, m[0].units + align);
80458+ } else {
80459+ clear_slob_page(sp);
80460+ free_slob_page(sp);
80461+ sp->size = 0;
80462 put_page(&sp->page);
80463+ }
80464 }
80465 EXPORT_SYMBOL(kfree);
80466
80467+void check_object_size(const void *ptr, unsigned long n, bool to)
80468+{
80469+
80470+#ifdef CONFIG_PAX_USERCOPY
80471+ struct slob_page *sp;
80472+ const slob_t *free;
80473+ const void *base;
80474+ unsigned long flags;
80475+ const char *type;
80476+
80477+ if (!n)
80478+ return;
80479+
80480+ type = "<null>";
80481+ if (ZERO_OR_NULL_PTR(ptr))
80482+ goto report;
80483+
80484+ if (!virt_addr_valid(ptr))
80485+ return;
80486+
80487+ type = "<process stack>";
80488+ sp = slob_page(ptr);
80489+ if (!PageSlab((struct page *)sp)) {
80490+ if (object_is_on_stack(ptr, n) == -1)
80491+ goto report;
80492+ return;
80493+ }
80494+
80495+ type = "<slob>";
80496+ if (sp->size) {
80497+ base = page_address(&sp->page);
80498+ if (base <= ptr && n <= sp->size - (ptr - base))
80499+ return;
80500+ goto report;
80501+ }
80502+
80503+ /* some tricky double walking to find the chunk */
80504+ spin_lock_irqsave(&slob_lock, flags);
80505+ base = (void *)((unsigned long)ptr & PAGE_MASK);
80506+ free = sp->free;
80507+
80508+ while (!slob_last(free) && (void *)free <= ptr) {
80509+ base = free + slob_units(free);
80510+ free = slob_next(free);
80511+ }
80512+
80513+ while (base < (void *)free) {
80514+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
80515+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
80516+ int offset;
80517+
80518+ if (ptr < base + align)
80519+ break;
80520+
80521+ offset = ptr - base - align;
80522+ if (offset >= m) {
80523+ base += size;
80524+ continue;
80525+ }
80526+
80527+ if (n > m - offset)
80528+ break;
80529+
80530+ spin_unlock_irqrestore(&slob_lock, flags);
80531+ return;
80532+ }
80533+
80534+ spin_unlock_irqrestore(&slob_lock, flags);
80535+report:
80536+ pax_report_usercopy(ptr, n, to, type);
80537+#endif
80538+
80539+}
80540+EXPORT_SYMBOL(check_object_size);
80541+
80542 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
80543 size_t ksize(const void *block)
80544 {
80545@@ -547,10 +640,10 @@ size_t ksize(const void *block)
80546 sp = slob_page(block);
80547 if (is_slob_page(sp)) {
80548 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
80549- unsigned int *m = (unsigned int *)(block - align);
80550- return SLOB_UNITS(*m) * SLOB_UNIT;
80551+ slob_t *m = (slob_t *)(block - align);
80552+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
80553 } else
80554- return sp->page.private;
80555+ return sp->size;
80556 }
80557 EXPORT_SYMBOL(ksize);
80558
80559@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80560 {
80561 struct kmem_cache *c;
80562
80563+#ifdef CONFIG_PAX_USERCOPY
80564+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
80565+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
80566+#else
80567 c = slob_alloc(sizeof(struct kmem_cache),
80568 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
80569+#endif
80570
80571 if (c) {
80572 c->name = name;
80573@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
80574 {
80575 void *b;
80576
80577+#ifdef CONFIG_PAX_USERCOPY
80578+ b = __kmalloc_node_align(c->size, flags, node, c->align);
80579+#else
80580 if (c->size < PAGE_SIZE) {
80581 b = slob_alloc(c->size, flags, c->align, node);
80582 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
80583 SLOB_UNITS(c->size) * SLOB_UNIT,
80584 flags, node);
80585 } else {
80586+ struct slob_page *sp;
80587+
80588 b = slob_new_pages(flags, get_order(c->size), node);
80589+ sp = slob_page(b);
80590+ sp->size = c->size;
80591 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
80592 PAGE_SIZE << get_order(c->size),
80593 flags, node);
80594 }
80595+#endif
80596
80597 if (c->ctor)
80598 c->ctor(b);
80599@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
80600
80601 static void __kmem_cache_free(void *b, int size)
80602 {
80603- if (size < PAGE_SIZE)
80604+ struct slob_page *sp = slob_page(b);
80605+
80606+ if (is_slob_page(sp))
80607 slob_free(b, size);
80608- else
80609+ else {
80610+ clear_slob_page(sp);
80611+ free_slob_page(sp);
80612+ sp->size = 0;
80613 slob_free_pages(b, get_order(size));
80614+ }
80615 }
80616
80617 static void kmem_rcu_free(struct rcu_head *head)
80618@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
80619
80620 void kmem_cache_free(struct kmem_cache *c, void *b)
80621 {
80622+ int size = c->size;
80623+
80624+#ifdef CONFIG_PAX_USERCOPY
80625+ if (size + c->align < PAGE_SIZE) {
80626+ size += c->align;
80627+ b -= c->align;
80628+ }
80629+#endif
80630+
80631 kmemleak_free_recursive(b, c->flags);
80632 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
80633 struct slob_rcu *slob_rcu;
80634- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
80635+ slob_rcu = b + (size - sizeof(struct slob_rcu));
80636 INIT_RCU_HEAD(&slob_rcu->head);
80637- slob_rcu->size = c->size;
80638+ slob_rcu->size = size;
80639 call_rcu(&slob_rcu->head, kmem_rcu_free);
80640 } else {
80641- __kmem_cache_free(b, c->size);
80642+ __kmem_cache_free(b, size);
80643 }
80644
80645+#ifdef CONFIG_PAX_USERCOPY
80646+ trace_kfree(_RET_IP_, b);
80647+#else
80648 trace_kmem_cache_free(_RET_IP_, b);
80649+#endif
80650+
80651 }
80652 EXPORT_SYMBOL(kmem_cache_free);
80653
80654diff --git a/mm/slub.c b/mm/slub.c
80655index 4996fc7..87e01d0 100644
80656--- a/mm/slub.c
80657+++ b/mm/slub.c
80658@@ -201,7 +201,7 @@ struct track {
80659
80660 enum track_item { TRACK_ALLOC, TRACK_FREE };
80661
80662-#ifdef CONFIG_SLUB_DEBUG
80663+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80664 static int sysfs_slab_add(struct kmem_cache *);
80665 static int sysfs_slab_alias(struct kmem_cache *, const char *);
80666 static void sysfs_slab_remove(struct kmem_cache *);
80667@@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
80668 if (!t->addr)
80669 return;
80670
80671- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
80672+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
80673 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
80674 }
80675
80676@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
80677
80678 page = virt_to_head_page(x);
80679
80680+ BUG_ON(!PageSlab(page));
80681+
80682 slab_free(s, page, x, _RET_IP_);
80683
80684 trace_kmem_cache_free(_RET_IP_, x);
80685@@ -1937,7 +1939,7 @@ static int slub_min_objects;
80686 * Merge control. If this is set then no merging of slab caches will occur.
80687 * (Could be removed. This was introduced to pacify the merge skeptics.)
80688 */
80689-static int slub_nomerge;
80690+static int slub_nomerge = 1;
80691
80692 /*
80693 * Calculate the order of allocation given an slab object size.
80694@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
80695 * list to avoid pounding the page allocator excessively.
80696 */
80697 set_min_partial(s, ilog2(s->size));
80698- s->refcount = 1;
80699+ atomic_set(&s->refcount, 1);
80700 #ifdef CONFIG_NUMA
80701 s->remote_node_defrag_ratio = 1000;
80702 #endif
80703@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
80704 void kmem_cache_destroy(struct kmem_cache *s)
80705 {
80706 down_write(&slub_lock);
80707- s->refcount--;
80708- if (!s->refcount) {
80709+ if (atomic_dec_and_test(&s->refcount)) {
80710 list_del(&s->list);
80711 up_write(&slub_lock);
80712 if (kmem_cache_close(s)) {
80713@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
80714 __setup("slub_nomerge", setup_slub_nomerge);
80715
80716 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
80717- const char *name, int size, gfp_t gfp_flags)
80718+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
80719 {
80720- unsigned int flags = 0;
80721-
80722 if (gfp_flags & SLUB_DMA)
80723- flags = SLAB_CACHE_DMA;
80724+ flags |= SLAB_CACHE_DMA;
80725
80726 /*
80727 * This function is called with IRQs disabled during early-boot on
80728@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
80729 EXPORT_SYMBOL(__kmalloc_node);
80730 #endif
80731
80732+void check_object_size(const void *ptr, unsigned long n, bool to)
80733+{
80734+
80735+#ifdef CONFIG_PAX_USERCOPY
80736+ struct page *page;
80737+ struct kmem_cache *s = NULL;
80738+ unsigned long offset;
80739+ const char *type;
80740+
80741+ if (!n)
80742+ return;
80743+
80744+ type = "<null>";
80745+ if (ZERO_OR_NULL_PTR(ptr))
80746+ goto report;
80747+
80748+ if (!virt_addr_valid(ptr))
80749+ return;
80750+
80751+ page = get_object_page(ptr);
80752+
80753+ type = "<process stack>";
80754+ if (!page) {
80755+ if (object_is_on_stack(ptr, n) == -1)
80756+ goto report;
80757+ return;
80758+ }
80759+
80760+ s = page->slab;
80761+ type = s->name;
80762+ if (!(s->flags & SLAB_USERCOPY))
80763+ goto report;
80764+
80765+ offset = (ptr - page_address(page)) % s->size;
80766+ if (offset <= s->objsize && n <= s->objsize - offset)
80767+ return;
80768+
80769+report:
80770+ pax_report_usercopy(ptr, n, to, type);
80771+#endif
80772+
80773+}
80774+EXPORT_SYMBOL(check_object_size);
80775+
80776 size_t ksize(const void *object)
80777 {
80778 struct page *page;
80779@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
80780 * kmem_cache_open for slab_state == DOWN.
80781 */
80782 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
80783- sizeof(struct kmem_cache_node), GFP_NOWAIT);
80784- kmalloc_caches[0].refcount = -1;
80785+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
80786+ atomic_set(&kmalloc_caches[0].refcount, -1);
80787 caches++;
80788
80789 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
80790@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
80791 /* Caches that are not of the two-to-the-power-of size */
80792 if (KMALLOC_MIN_SIZE <= 32) {
80793 create_kmalloc_cache(&kmalloc_caches[1],
80794- "kmalloc-96", 96, GFP_NOWAIT);
80795+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
80796 caches++;
80797 }
80798 if (KMALLOC_MIN_SIZE <= 64) {
80799 create_kmalloc_cache(&kmalloc_caches[2],
80800- "kmalloc-192", 192, GFP_NOWAIT);
80801+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
80802 caches++;
80803 }
80804
80805 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
80806 create_kmalloc_cache(&kmalloc_caches[i],
80807- "kmalloc", 1 << i, GFP_NOWAIT);
80808+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
80809 caches++;
80810 }
80811
80812@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
80813 /*
80814 * We may have set a slab to be unmergeable during bootstrap.
80815 */
80816- if (s->refcount < 0)
80817+ if (atomic_read(&s->refcount) < 0)
80818 return 1;
80819
80820 return 0;
80821@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80822 if (s) {
80823 int cpu;
80824
80825- s->refcount++;
80826+ atomic_inc(&s->refcount);
80827 /*
80828 * Adjust the object sizes so that we clear
80829 * the complete object on kzalloc.
80830@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80831
80832 if (sysfs_slab_alias(s, name)) {
80833 down_write(&slub_lock);
80834- s->refcount--;
80835+ atomic_dec(&s->refcount);
80836 up_write(&slub_lock);
80837 goto err;
80838 }
80839@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
80840
80841 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
80842 {
80843- return sprintf(buf, "%d\n", s->refcount - 1);
80844+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
80845 }
80846 SLAB_ATTR_RO(aliases);
80847
80848@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
80849 kfree(s);
80850 }
80851
80852-static struct sysfs_ops slab_sysfs_ops = {
80853+static const struct sysfs_ops slab_sysfs_ops = {
80854 .show = slab_attr_show,
80855 .store = slab_attr_store,
80856 };
80857@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
80858 return 0;
80859 }
80860
80861-static struct kset_uevent_ops slab_uevent_ops = {
80862+static const struct kset_uevent_ops slab_uevent_ops = {
80863 .filter = uevent_filter,
80864 };
80865
80866@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
80867 return name;
80868 }
80869
80870+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80871 static int sysfs_slab_add(struct kmem_cache *s)
80872 {
80873 int err;
80874@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
80875 kobject_del(&s->kobj);
80876 kobject_put(&s->kobj);
80877 }
80878+#endif
80879
80880 /*
80881 * Need to buffer aliases during bootup until sysfs becomes
80882@@ -4632,6 +4677,7 @@ struct saved_alias {
80883
80884 static struct saved_alias *alias_list;
80885
80886+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80887 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80888 {
80889 struct saved_alias *al;
80890@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80891 alias_list = al;
80892 return 0;
80893 }
80894+#endif
80895
80896 static int __init slab_sysfs_init(void)
80897 {
80898@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
80899
80900 static int __init slab_proc_init(void)
80901 {
80902- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
80903+ mode_t gr_mode = S_IRUGO;
80904+
80905+#ifdef CONFIG_GRKERNSEC_PROC_ADD
80906+ gr_mode = S_IRUSR;
80907+#endif
80908+
80909+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
80910 return 0;
80911 }
80912 module_init(slab_proc_init);
80913diff --git a/mm/swap.c b/mm/swap.c
80914index 308e57d..5de19c0 100644
80915--- a/mm/swap.c
80916+++ b/mm/swap.c
80917@@ -30,6 +30,7 @@
80918 #include <linux/notifier.h>
80919 #include <linux/backing-dev.h>
80920 #include <linux/memcontrol.h>
80921+#include <linux/hugetlb.h>
80922
80923 #include "internal.h"
80924
80925@@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
80926 compound_page_dtor *dtor;
80927
80928 dtor = get_compound_page_dtor(page);
80929+ if (!PageHuge(page))
80930+ BUG_ON(dtor != free_compound_page);
80931 (*dtor)(page);
80932 }
80933 }
80934diff --git a/mm/util.c b/mm/util.c
80935index e48b493..24a601d 100644
80936--- a/mm/util.c
80937+++ b/mm/util.c
80938@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
80939 void arch_pick_mmap_layout(struct mm_struct *mm)
80940 {
80941 mm->mmap_base = TASK_UNMAPPED_BASE;
80942+
80943+#ifdef CONFIG_PAX_RANDMMAP
80944+ if (mm->pax_flags & MF_PAX_RANDMMAP)
80945+ mm->mmap_base += mm->delta_mmap;
80946+#endif
80947+
80948 mm->get_unmapped_area = arch_get_unmapped_area;
80949 mm->unmap_area = arch_unmap_area;
80950 }
80951diff --git a/mm/vmalloc.c b/mm/vmalloc.c
80952index f34ffd0..e60c44f 100644
80953--- a/mm/vmalloc.c
80954+++ b/mm/vmalloc.c
80955@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
80956
80957 pte = pte_offset_kernel(pmd, addr);
80958 do {
80959- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80960- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80961+
80962+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80963+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
80964+ BUG_ON(!pte_exec(*pte));
80965+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
80966+ continue;
80967+ }
80968+#endif
80969+
80970+ {
80971+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80972+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80973+ }
80974 } while (pte++, addr += PAGE_SIZE, addr != end);
80975 }
80976
80977@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80978 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
80979 {
80980 pte_t *pte;
80981+ int ret = -ENOMEM;
80982
80983 /*
80984 * nr is a running index into the array which helps higher level
80985@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80986 pte = pte_alloc_kernel(pmd, addr);
80987 if (!pte)
80988 return -ENOMEM;
80989+
80990+ pax_open_kernel();
80991 do {
80992 struct page *page = pages[*nr];
80993
80994- if (WARN_ON(!pte_none(*pte)))
80995- return -EBUSY;
80996- if (WARN_ON(!page))
80997- return -ENOMEM;
80998+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80999+ if (!(pgprot_val(prot) & _PAGE_NX))
81000+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
81001+ else
81002+#endif
81003+
81004+ if (WARN_ON(!pte_none(*pte))) {
81005+ ret = -EBUSY;
81006+ goto out;
81007+ }
81008+ if (WARN_ON(!page)) {
81009+ ret = -ENOMEM;
81010+ goto out;
81011+ }
81012 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
81013 (*nr)++;
81014 } while (pte++, addr += PAGE_SIZE, addr != end);
81015- return 0;
81016+ ret = 0;
81017+out:
81018+ pax_close_kernel();
81019+ return ret;
81020 }
81021
81022 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
81023@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
81024 * and fall back on vmalloc() if that fails. Others
81025 * just put it in the vmalloc space.
81026 */
81027-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
81028+#ifdef CONFIG_MODULES
81029+#ifdef MODULES_VADDR
81030 unsigned long addr = (unsigned long)x;
81031 if (addr >= MODULES_VADDR && addr < MODULES_END)
81032 return 1;
81033 #endif
81034+
81035+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
81036+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
81037+ return 1;
81038+#endif
81039+
81040+#endif
81041+
81042 return is_vmalloc_addr(x);
81043 }
81044
81045@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
81046
81047 if (!pgd_none(*pgd)) {
81048 pud_t *pud = pud_offset(pgd, addr);
81049+#ifdef CONFIG_X86
81050+ if (!pud_large(*pud))
81051+#endif
81052 if (!pud_none(*pud)) {
81053 pmd_t *pmd = pmd_offset(pud, addr);
81054+#ifdef CONFIG_X86
81055+ if (!pmd_large(*pmd))
81056+#endif
81057 if (!pmd_none(*pmd)) {
81058 pte_t *ptep, pte;
81059
81060@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
81061 struct rb_node *tmp;
81062
81063 while (*p) {
81064- struct vmap_area *tmp;
81065+ struct vmap_area *varea;
81066
81067 parent = *p;
81068- tmp = rb_entry(parent, struct vmap_area, rb_node);
81069- if (va->va_start < tmp->va_end)
81070+ varea = rb_entry(parent, struct vmap_area, rb_node);
81071+ if (va->va_start < varea->va_end)
81072 p = &(*p)->rb_left;
81073- else if (va->va_end > tmp->va_start)
81074+ else if (va->va_end > varea->va_start)
81075 p = &(*p)->rb_right;
81076 else
81077 BUG();
81078@@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
81079 struct vm_struct *area;
81080
81081 BUG_ON(in_interrupt());
81082+
81083+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
81084+ if (flags & VM_KERNEXEC) {
81085+ if (start != VMALLOC_START || end != VMALLOC_END)
81086+ return NULL;
81087+ start = (unsigned long)MODULES_EXEC_VADDR;
81088+ end = (unsigned long)MODULES_EXEC_END;
81089+ }
81090+#endif
81091+
81092 if (flags & VM_IOREMAP) {
81093 int bit = fls(size);
81094
81095@@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
81096 if (count > totalram_pages)
81097 return NULL;
81098
81099+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
81100+ if (!(pgprot_val(prot) & _PAGE_NX))
81101+ flags |= VM_KERNEXEC;
81102+#endif
81103+
81104 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
81105 __builtin_return_address(0));
81106 if (!area)
81107@@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
81108 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
81109 return NULL;
81110
81111+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
81112+ if (!(pgprot_val(prot) & _PAGE_NX))
81113+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
81114+ VMALLOC_START, VMALLOC_END, node,
81115+ gfp_mask, caller);
81116+ else
81117+#endif
81118+
81119 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
81120 VMALLOC_START, VMALLOC_END, node,
81121 gfp_mask, caller);
81122@@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
81123 return addr;
81124 }
81125
81126+#undef __vmalloc
81127 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
81128 {
81129 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
81130@@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
81131 * For tight control over page level allocator and protection flags
81132 * use __vmalloc() instead.
81133 */
81134+#undef vmalloc
81135 void *vmalloc(unsigned long size)
81136 {
81137 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
81138@@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
81139 * The resulting memory area is zeroed so it can be mapped to userspace
81140 * without leaking data.
81141 */
81142+#undef vmalloc_user
81143 void *vmalloc_user(unsigned long size)
81144 {
81145 struct vm_struct *area;
81146@@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
81147 * For tight control over page level allocator and protection flags
81148 * use __vmalloc() instead.
81149 */
81150+#undef vmalloc_node
81151 void *vmalloc_node(unsigned long size, int node)
81152 {
81153 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
81154@@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
81155 * For tight control over page level allocator and protection flags
81156 * use __vmalloc() instead.
81157 */
81158-
81159+#undef vmalloc_exec
81160 void *vmalloc_exec(unsigned long size)
81161 {
81162- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
81163+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
81164 -1, __builtin_return_address(0));
81165 }
81166
81167@@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
81168 * Allocate enough 32bit PA addressable pages to cover @size from the
81169 * page level allocator and map them into contiguous kernel virtual space.
81170 */
81171+#undef vmalloc_32
81172 void *vmalloc_32(unsigned long size)
81173 {
81174 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
81175@@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
81176 * The resulting memory area is 32bit addressable and zeroed so it can be
81177 * mapped to userspace without leaking data.
81178 */
81179+#undef vmalloc_32_user
81180 void *vmalloc_32_user(unsigned long size)
81181 {
81182 struct vm_struct *area;
81183@@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
81184 unsigned long uaddr = vma->vm_start;
81185 unsigned long usize = vma->vm_end - vma->vm_start;
81186
81187+ BUG_ON(vma->vm_mirror);
81188+
81189 if ((PAGE_SIZE-1) & (unsigned long)addr)
81190 return -EINVAL;
81191
81192diff --git a/mm/vmstat.c b/mm/vmstat.c
81193index 42d76c6..5643dc4 100644
81194--- a/mm/vmstat.c
81195+++ b/mm/vmstat.c
81196@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
81197 *
81198 * vm_stat contains the global counters
81199 */
81200-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81201+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
81202 EXPORT_SYMBOL(vm_stat);
81203
81204 #ifdef CONFIG_SMP
81205@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
81206 v = p->vm_stat_diff[i];
81207 p->vm_stat_diff[i] = 0;
81208 local_irq_restore(flags);
81209- atomic_long_add(v, &zone->vm_stat[i]);
81210+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
81211 global_diff[i] += v;
81212 #ifdef CONFIG_NUMA
81213 /* 3 seconds idle till flush */
81214@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
81215
81216 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
81217 if (global_diff[i])
81218- atomic_long_add(global_diff[i], &vm_stat[i]);
81219+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
81220 }
81221
81222 #endif
81223@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
81224 start_cpu_timer(cpu);
81225 #endif
81226 #ifdef CONFIG_PROC_FS
81227- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
81228- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
81229- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
81230- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
81231+ {
81232+ mode_t gr_mode = S_IRUGO;
81233+#ifdef CONFIG_GRKERNSEC_PROC_ADD
81234+ gr_mode = S_IRUSR;
81235+#endif
81236+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
81237+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
81238+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
81239+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
81240+#else
81241+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
81242+#endif
81243+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
81244+ }
81245 #endif
81246 return 0;
81247 }
81248diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
81249index a29c5ab..6143f20 100644
81250--- a/net/8021q/vlan.c
81251+++ b/net/8021q/vlan.c
81252@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
81253 err = -EPERM;
81254 if (!capable(CAP_NET_ADMIN))
81255 break;
81256- if ((args.u.name_type >= 0) &&
81257- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
81258+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
81259 struct vlan_net *vn;
81260
81261 vn = net_generic(net, vlan_net_id);
81262diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
81263index a2d2984..f9eb711 100644
81264--- a/net/9p/trans_fd.c
81265+++ b/net/9p/trans_fd.c
81266@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
81267 oldfs = get_fs();
81268 set_fs(get_ds());
81269 /* The cast to a user pointer is valid due to the set_fs() */
81270- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
81271+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
81272 set_fs(oldfs);
81273
81274 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
81275diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
81276index 02cc7e7..4514f1b 100644
81277--- a/net/atm/atm_misc.c
81278+++ b/net/atm/atm_misc.c
81279@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
81280 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
81281 return 1;
81282 atm_return(vcc,truesize);
81283- atomic_inc(&vcc->stats->rx_drop);
81284+ atomic_inc_unchecked(&vcc->stats->rx_drop);
81285 return 0;
81286 }
81287
81288@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
81289 }
81290 }
81291 atm_return(vcc,guess);
81292- atomic_inc(&vcc->stats->rx_drop);
81293+ atomic_inc_unchecked(&vcc->stats->rx_drop);
81294 return NULL;
81295 }
81296
81297@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
81298
81299 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81300 {
81301-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
81302+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81303 __SONET_ITEMS
81304 #undef __HANDLE_ITEM
81305 }
81306@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81307
81308 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
81309 {
81310-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
81311+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
81312 __SONET_ITEMS
81313 #undef __HANDLE_ITEM
81314 }
81315diff --git a/net/atm/lec.h b/net/atm/lec.h
81316index 9d14d19..5c145f3 100644
81317--- a/net/atm/lec.h
81318+++ b/net/atm/lec.h
81319@@ -48,7 +48,7 @@ struct lane2_ops {
81320 const u8 *tlvs, u32 sizeoftlvs);
81321 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
81322 const u8 *tlvs, u32 sizeoftlvs);
81323-};
81324+} __no_const;
81325
81326 /*
81327 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
81328diff --git a/net/atm/mpc.h b/net/atm/mpc.h
81329index 0919a88..a23d54e 100644
81330--- a/net/atm/mpc.h
81331+++ b/net/atm/mpc.h
81332@@ -33,7 +33,7 @@ struct mpoa_client {
81333 struct mpc_parameters parameters; /* parameters for this client */
81334
81335 const struct net_device_ops *old_ops;
81336- struct net_device_ops new_ops;
81337+ net_device_ops_no_const new_ops;
81338 };
81339
81340
81341diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
81342index 4504a4b..1733f1e 100644
81343--- a/net/atm/mpoa_caches.c
81344+++ b/net/atm/mpoa_caches.c
81345@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
81346 struct timeval now;
81347 struct k_message msg;
81348
81349+ pax_track_stack();
81350+
81351 do_gettimeofday(&now);
81352
81353 write_lock_irq(&client->egress_lock);
81354diff --git a/net/atm/proc.c b/net/atm/proc.c
81355index ab8419a..aa91497 100644
81356--- a/net/atm/proc.c
81357+++ b/net/atm/proc.c
81358@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
81359 const struct k_atm_aal_stats *stats)
81360 {
81361 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
81362- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
81363- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
81364- atomic_read(&stats->rx_drop));
81365+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
81366+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
81367+ atomic_read_unchecked(&stats->rx_drop));
81368 }
81369
81370 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
81371@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
81372 {
81373 struct sock *sk = sk_atm(vcc);
81374
81375+#ifdef CONFIG_GRKERNSEC_HIDESYM
81376+ seq_printf(seq, "%p ", NULL);
81377+#else
81378 seq_printf(seq, "%p ", vcc);
81379+#endif
81380+
81381 if (!vcc->dev)
81382 seq_printf(seq, "Unassigned ");
81383 else
81384@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
81385 {
81386 if (!vcc->dev)
81387 seq_printf(seq, sizeof(void *) == 4 ?
81388+#ifdef CONFIG_GRKERNSEC_HIDESYM
81389+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
81390+#else
81391 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
81392+#endif
81393 else
81394 seq_printf(seq, "%3d %3d %5d ",
81395 vcc->dev->number, vcc->vpi, vcc->vci);
81396diff --git a/net/atm/resources.c b/net/atm/resources.c
81397index 56b7322..c48b84e 100644
81398--- a/net/atm/resources.c
81399+++ b/net/atm/resources.c
81400@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
81401 static void copy_aal_stats(struct k_atm_aal_stats *from,
81402 struct atm_aal_stats *to)
81403 {
81404-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
81405+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
81406 __AAL_STAT_ITEMS
81407 #undef __HANDLE_ITEM
81408 }
81409@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
81410 static void subtract_aal_stats(struct k_atm_aal_stats *from,
81411 struct atm_aal_stats *to)
81412 {
81413-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
81414+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
81415 __AAL_STAT_ITEMS
81416 #undef __HANDLE_ITEM
81417 }
81418diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
81419index 8567d47..bba2292 100644
81420--- a/net/bridge/br_private.h
81421+++ b/net/bridge/br_private.h
81422@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
81423
81424 #ifdef CONFIG_SYSFS
81425 /* br_sysfs_if.c */
81426-extern struct sysfs_ops brport_sysfs_ops;
81427+extern const struct sysfs_ops brport_sysfs_ops;
81428 extern int br_sysfs_addif(struct net_bridge_port *p);
81429
81430 /* br_sysfs_br.c */
81431diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
81432index 9a52ac5..c97538e 100644
81433--- a/net/bridge/br_stp_if.c
81434+++ b/net/bridge/br_stp_if.c
81435@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
81436 char *envp[] = { NULL };
81437
81438 if (br->stp_enabled == BR_USER_STP) {
81439- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
81440+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
81441 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
81442 br->dev->name, r);
81443
81444diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
81445index 820643a..ce77fb3 100644
81446--- a/net/bridge/br_sysfs_if.c
81447+++ b/net/bridge/br_sysfs_if.c
81448@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
81449 return ret;
81450 }
81451
81452-struct sysfs_ops brport_sysfs_ops = {
81453+const struct sysfs_ops brport_sysfs_ops = {
81454 .show = brport_show,
81455 .store = brport_store,
81456 };
81457diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
81458index d73d47f..72df42a 100644
81459--- a/net/bridge/netfilter/ebtables.c
81460+++ b/net/bridge/netfilter/ebtables.c
81461@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
81462 unsigned int entries_size, nentries;
81463 char *entries;
81464
81465+ pax_track_stack();
81466+
81467 if (cmd == EBT_SO_GET_ENTRIES) {
81468 entries_size = t->private->entries_size;
81469 nentries = t->private->nentries;
81470diff --git a/net/can/bcm.c b/net/can/bcm.c
81471index 2ffd2e0..72a7486 100644
81472--- a/net/can/bcm.c
81473+++ b/net/can/bcm.c
81474@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
81475 struct bcm_sock *bo = bcm_sk(sk);
81476 struct bcm_op *op;
81477
81478+#ifdef CONFIG_GRKERNSEC_HIDESYM
81479+ seq_printf(m, ">>> socket %p", NULL);
81480+ seq_printf(m, " / sk %p", NULL);
81481+ seq_printf(m, " / bo %p", NULL);
81482+#else
81483 seq_printf(m, ">>> socket %p", sk->sk_socket);
81484 seq_printf(m, " / sk %p", sk);
81485 seq_printf(m, " / bo %p", bo);
81486+#endif
81487 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
81488 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
81489 seq_printf(m, " <<<\n");
81490diff --git a/net/compat.c b/net/compat.c
81491index 9559afc..ccd74e1 100644
81492--- a/net/compat.c
81493+++ b/net/compat.c
81494@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
81495 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
81496 __get_user(kmsg->msg_flags, &umsg->msg_flags))
81497 return -EFAULT;
81498- kmsg->msg_name = compat_ptr(tmp1);
81499- kmsg->msg_iov = compat_ptr(tmp2);
81500- kmsg->msg_control = compat_ptr(tmp3);
81501+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
81502+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
81503+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
81504 return 0;
81505 }
81506
81507@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
81508 kern_msg->msg_name = NULL;
81509
81510 tot_len = iov_from_user_compat_to_kern(kern_iov,
81511- (struct compat_iovec __user *)kern_msg->msg_iov,
81512+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
81513 kern_msg->msg_iovlen);
81514 if (tot_len >= 0)
81515 kern_msg->msg_iov = kern_iov;
81516@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
81517
81518 #define CMSG_COMPAT_FIRSTHDR(msg) \
81519 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
81520- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
81521+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
81522 (struct compat_cmsghdr __user *)NULL)
81523
81524 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
81525 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
81526 (ucmlen) <= (unsigned long) \
81527 ((mhdr)->msg_controllen - \
81528- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
81529+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
81530
81531 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
81532 struct compat_cmsghdr __user *cmsg, int cmsg_len)
81533 {
81534 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
81535- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
81536+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
81537 msg->msg_controllen)
81538 return NULL;
81539 return (struct compat_cmsghdr __user *)ptr;
81540@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
81541 {
81542 struct compat_timeval ctv;
81543 struct compat_timespec cts[3];
81544- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
81545+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
81546 struct compat_cmsghdr cmhdr;
81547 int cmlen;
81548
81549@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
81550
81551 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
81552 {
81553- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
81554+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
81555 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
81556 int fdnum = scm->fp->count;
81557 struct file **fp = scm->fp->fp;
81558@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
81559 len = sizeof(ktime);
81560 old_fs = get_fs();
81561 set_fs(KERNEL_DS);
81562- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
81563+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
81564 set_fs(old_fs);
81565
81566 if (!err) {
81567@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81568 case MCAST_JOIN_GROUP:
81569 case MCAST_LEAVE_GROUP:
81570 {
81571- struct compat_group_req __user *gr32 = (void *)optval;
81572+ struct compat_group_req __user *gr32 = (void __user *)optval;
81573 struct group_req __user *kgr =
81574 compat_alloc_user_space(sizeof(struct group_req));
81575 u32 interface;
81576@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81577 case MCAST_BLOCK_SOURCE:
81578 case MCAST_UNBLOCK_SOURCE:
81579 {
81580- struct compat_group_source_req __user *gsr32 = (void *)optval;
81581+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
81582 struct group_source_req __user *kgsr = compat_alloc_user_space(
81583 sizeof(struct group_source_req));
81584 u32 interface;
81585@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
81586 }
81587 case MCAST_MSFILTER:
81588 {
81589- struct compat_group_filter __user *gf32 = (void *)optval;
81590+ struct compat_group_filter __user *gf32 = (void __user *)optval;
81591 struct group_filter __user *kgf;
81592 u32 interface, fmode, numsrc;
81593
81594diff --git a/net/core/dev.c b/net/core/dev.c
81595index 84a0705..575db4c 100644
81596--- a/net/core/dev.c
81597+++ b/net/core/dev.c
81598@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
81599 if (no_module && capable(CAP_NET_ADMIN))
81600 no_module = request_module("netdev-%s", name);
81601 if (no_module && capable(CAP_SYS_MODULE)) {
81602+#ifdef CONFIG_GRKERNSEC_MODHARDEN
81603+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
81604+#else
81605 if (!request_module("%s", name))
81606 pr_err("Loading kernel module for a network device "
81607 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
81608 "instead\n", name);
81609+#endif
81610 }
81611 }
81612 EXPORT_SYMBOL(dev_load);
81613@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
81614
81615 struct dev_gso_cb {
81616 void (*destructor)(struct sk_buff *skb);
81617-};
81618+} __no_const;
81619
81620 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
81621
81622@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
81623 }
81624 EXPORT_SYMBOL(netif_rx_ni);
81625
81626-static void net_tx_action(struct softirq_action *h)
81627+static void net_tx_action(void)
81628 {
81629 struct softnet_data *sd = &__get_cpu_var(softnet_data);
81630
81631@@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
81632 EXPORT_SYMBOL(netif_napi_del);
81633
81634
81635-static void net_rx_action(struct softirq_action *h)
81636+static void net_rx_action(void)
81637 {
81638 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
81639 unsigned long time_limit = jiffies + 2;
81640diff --git a/net/core/flow.c b/net/core/flow.c
81641index 9601587..8c4824e 100644
81642--- a/net/core/flow.c
81643+++ b/net/core/flow.c
81644@@ -35,11 +35,11 @@ struct flow_cache_entry {
81645 atomic_t *object_ref;
81646 };
81647
81648-atomic_t flow_cache_genid = ATOMIC_INIT(0);
81649+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
81650
81651 static u32 flow_hash_shift;
81652 #define flow_hash_size (1 << flow_hash_shift)
81653-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
81654+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
81655
81656 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
81657
81658@@ -52,7 +52,7 @@ struct flow_percpu_info {
81659 u32 hash_rnd;
81660 int count;
81661 };
81662-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
81663+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
81664
81665 #define flow_hash_rnd_recalc(cpu) \
81666 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
81667@@ -69,7 +69,7 @@ struct flow_flush_info {
81668 atomic_t cpuleft;
81669 struct completion completion;
81670 };
81671-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
81672+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
81673
81674 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
81675
81676@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
81677 if (fle->family == family &&
81678 fle->dir == dir &&
81679 flow_key_compare(key, &fle->key) == 0) {
81680- if (fle->genid == atomic_read(&flow_cache_genid)) {
81681+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
81682 void *ret = fle->object;
81683
81684 if (ret)
81685@@ -228,7 +228,7 @@ nocache:
81686 err = resolver(net, key, family, dir, &obj, &obj_ref);
81687
81688 if (fle && !err) {
81689- fle->genid = atomic_read(&flow_cache_genid);
81690+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
81691
81692 if (fle->object)
81693 atomic_dec(fle->object_ref);
81694@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
81695
81696 fle = flow_table(cpu)[i];
81697 for (; fle; fle = fle->next) {
81698- unsigned genid = atomic_read(&flow_cache_genid);
81699+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
81700
81701 if (!fle->object || fle->genid == genid)
81702 continue;
81703diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
81704index d4fd895..ac9b1e6 100644
81705--- a/net/core/rtnetlink.c
81706+++ b/net/core/rtnetlink.c
81707@@ -57,7 +57,7 @@ struct rtnl_link
81708 {
81709 rtnl_doit_func doit;
81710 rtnl_dumpit_func dumpit;
81711-};
81712+} __no_const;
81713
81714 static DEFINE_MUTEX(rtnl_mutex);
81715
81716diff --git a/net/core/scm.c b/net/core/scm.c
81717index d98eafc..1a190a9 100644
81718--- a/net/core/scm.c
81719+++ b/net/core/scm.c
81720@@ -191,7 +191,7 @@ error:
81721 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81722 {
81723 struct cmsghdr __user *cm
81724- = (__force struct cmsghdr __user *)msg->msg_control;
81725+ = (struct cmsghdr __force_user *)msg->msg_control;
81726 struct cmsghdr cmhdr;
81727 int cmlen = CMSG_LEN(len);
81728 int err;
81729@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81730 err = -EFAULT;
81731 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
81732 goto out;
81733- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
81734+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
81735 goto out;
81736 cmlen = CMSG_SPACE(len);
81737 if (msg->msg_controllen < cmlen)
81738@@ -229,7 +229,7 @@ out:
81739 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81740 {
81741 struct cmsghdr __user *cm
81742- = (__force struct cmsghdr __user*)msg->msg_control;
81743+ = (struct cmsghdr __force_user *)msg->msg_control;
81744
81745 int fdmax = 0;
81746 int fdnum = scm->fp->count;
81747@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81748 if (fdnum < fdmax)
81749 fdmax = fdnum;
81750
81751- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
81752+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
81753 i++, cmfptr++)
81754 {
81755 int new_fd;
81756diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
81757index 45329d7..626aaa6 100644
81758--- a/net/core/secure_seq.c
81759+++ b/net/core/secure_seq.c
81760@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
81761 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
81762
81763 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81764- __be16 dport)
81765+ __be16 dport)
81766 {
81767 u32 secret[MD5_MESSAGE_BYTES / 4];
81768 u32 hash[MD5_DIGEST_WORDS];
81769@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81770 secret[i] = net_secret[i];
81771
81772 md5_transform(hash, secret);
81773-
81774 return hash[0];
81775 }
81776 #endif
81777diff --git a/net/core/skbuff.c b/net/core/skbuff.c
81778index 025f924..70a71c4 100644
81779--- a/net/core/skbuff.c
81780+++ b/net/core/skbuff.c
81781@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
81782 struct sk_buff *frag_iter;
81783 struct sock *sk = skb->sk;
81784
81785+ pax_track_stack();
81786+
81787 /*
81788 * __skb_splice_bits() only fails if the output has no room left,
81789 * so no point in going over the frag_list for the error case.
81790diff --git a/net/core/sock.c b/net/core/sock.c
81791index 6605e75..3acebda 100644
81792--- a/net/core/sock.c
81793+++ b/net/core/sock.c
81794@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
81795 break;
81796
81797 case SO_PEERCRED:
81798+ {
81799+ struct ucred peercred;
81800 if (len > sizeof(sk->sk_peercred))
81801 len = sizeof(sk->sk_peercred);
81802- if (copy_to_user(optval, &sk->sk_peercred, len))
81803+ peercred = sk->sk_peercred;
81804+ if (copy_to_user(optval, &peercred, len))
81805 return -EFAULT;
81806 goto lenout;
81807+ }
81808
81809 case SO_PEERNAME:
81810 {
81811@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
81812 */
81813 smp_wmb();
81814 atomic_set(&sk->sk_refcnt, 1);
81815- atomic_set(&sk->sk_drops, 0);
81816+ atomic_set_unchecked(&sk->sk_drops, 0);
81817 }
81818 EXPORT_SYMBOL(sock_init_data);
81819
81820diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
81821index 2036568..c55883d 100644
81822--- a/net/decnet/sysctl_net_decnet.c
81823+++ b/net/decnet/sysctl_net_decnet.c
81824@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
81825
81826 if (len > *lenp) len = *lenp;
81827
81828- if (copy_to_user(buffer, addr, len))
81829+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
81830 return -EFAULT;
81831
81832 *lenp = len;
81833@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
81834
81835 if (len > *lenp) len = *lenp;
81836
81837- if (copy_to_user(buffer, devname, len))
81838+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
81839 return -EFAULT;
81840
81841 *lenp = len;
81842diff --git a/net/econet/Kconfig b/net/econet/Kconfig
81843index 39a2d29..f39c0fe 100644
81844--- a/net/econet/Kconfig
81845+++ b/net/econet/Kconfig
81846@@ -4,7 +4,7 @@
81847
81848 config ECONET
81849 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
81850- depends on EXPERIMENTAL && INET
81851+ depends on EXPERIMENTAL && INET && BROKEN
81852 ---help---
81853 Econet is a fairly old and slow networking protocol mainly used by
81854 Acorn computers to access file and print servers. It uses native
81855diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
81856index a413b1b..380849c 100644
81857--- a/net/ieee802154/dgram.c
81858+++ b/net/ieee802154/dgram.c
81859@@ -318,7 +318,7 @@ out:
81860 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
81861 {
81862 if (sock_queue_rcv_skb(sk, skb) < 0) {
81863- atomic_inc(&sk->sk_drops);
81864+ atomic_inc_unchecked(&sk->sk_drops);
81865 kfree_skb(skb);
81866 return NET_RX_DROP;
81867 }
81868diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
81869index 30e74ee..bfc6ee0 100644
81870--- a/net/ieee802154/raw.c
81871+++ b/net/ieee802154/raw.c
81872@@ -206,7 +206,7 @@ out:
81873 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
81874 {
81875 if (sock_queue_rcv_skb(sk, skb) < 0) {
81876- atomic_inc(&sk->sk_drops);
81877+ atomic_inc_unchecked(&sk->sk_drops);
81878 kfree_skb(skb);
81879 return NET_RX_DROP;
81880 }
81881diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
81882index dba56d2..acee5d6 100644
81883--- a/net/ipv4/inet_diag.c
81884+++ b/net/ipv4/inet_diag.c
81885@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
81886 r->idiag_retrans = 0;
81887
81888 r->id.idiag_if = sk->sk_bound_dev_if;
81889+#ifdef CONFIG_GRKERNSEC_HIDESYM
81890+ r->id.idiag_cookie[0] = 0;
81891+ r->id.idiag_cookie[1] = 0;
81892+#else
81893 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
81894 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
81895+#endif
81896
81897 r->id.idiag_sport = inet->sport;
81898 r->id.idiag_dport = inet->dport;
81899@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
81900 r->idiag_family = tw->tw_family;
81901 r->idiag_retrans = 0;
81902 r->id.idiag_if = tw->tw_bound_dev_if;
81903+
81904+#ifdef CONFIG_GRKERNSEC_HIDESYM
81905+ r->id.idiag_cookie[0] = 0;
81906+ r->id.idiag_cookie[1] = 0;
81907+#else
81908 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
81909 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
81910+#endif
81911+
81912 r->id.idiag_sport = tw->tw_sport;
81913 r->id.idiag_dport = tw->tw_dport;
81914 r->id.idiag_src[0] = tw->tw_rcv_saddr;
81915@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
81916 if (sk == NULL)
81917 goto unlock;
81918
81919+#ifndef CONFIG_GRKERNSEC_HIDESYM
81920 err = -ESTALE;
81921 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
81922 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
81923 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
81924 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
81925 goto out;
81926+#endif
81927
81928 err = -ENOMEM;
81929 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
81930@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
81931 r->idiag_retrans = req->retrans;
81932
81933 r->id.idiag_if = sk->sk_bound_dev_if;
81934+
81935+#ifdef CONFIG_GRKERNSEC_HIDESYM
81936+ r->id.idiag_cookie[0] = 0;
81937+ r->id.idiag_cookie[1] = 0;
81938+#else
81939 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
81940 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
81941+#endif
81942
81943 tmo = req->expires - jiffies;
81944 if (tmo < 0)
81945diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
81946index d717267..56de7e7 100644
81947--- a/net/ipv4/inet_hashtables.c
81948+++ b/net/ipv4/inet_hashtables.c
81949@@ -18,12 +18,15 @@
81950 #include <linux/sched.h>
81951 #include <linux/slab.h>
81952 #include <linux/wait.h>
81953+#include <linux/security.h>
81954
81955 #include <net/inet_connection_sock.h>
81956 #include <net/inet_hashtables.h>
81957 #include <net/secure_seq.h>
81958 #include <net/ip.h>
81959
81960+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
81961+
81962 /*
81963 * Allocate and initialize a new local port bind bucket.
81964 * The bindhash mutex for snum's hash chain must be held here.
81965@@ -491,6 +494,8 @@ ok:
81966 }
81967 spin_unlock(&head->lock);
81968
81969+ gr_update_task_in_ip_table(current, inet_sk(sk));
81970+
81971 if (tw) {
81972 inet_twsk_deschedule(tw, death_row);
81973 inet_twsk_put(tw);
81974diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
81975index 13b229f..6956484 100644
81976--- a/net/ipv4/inetpeer.c
81977+++ b/net/ipv4/inetpeer.c
81978@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81979 struct inet_peer *p, *n;
81980 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
81981
81982+ pax_track_stack();
81983+
81984 /* Look up for the address quickly. */
81985 read_lock_bh(&peer_pool_lock);
81986 p = lookup(daddr, NULL);
81987@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81988 return NULL;
81989 n->v4daddr = daddr;
81990 atomic_set(&n->refcnt, 1);
81991- atomic_set(&n->rid, 0);
81992+ atomic_set_unchecked(&n->rid, 0);
81993 n->ip_id_count = secure_ip_id(daddr);
81994 n->tcp_ts_stamp = 0;
81995
81996diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
81997index d3fe10b..feeafc9 100644
81998--- a/net/ipv4/ip_fragment.c
81999+++ b/net/ipv4/ip_fragment.c
82000@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
82001 return 0;
82002
82003 start = qp->rid;
82004- end = atomic_inc_return(&peer->rid);
82005+ end = atomic_inc_return_unchecked(&peer->rid);
82006 qp->rid = end;
82007
82008 rc = qp->q.fragments && (end - start) > max;
82009diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
82010index e982b5c..f079d75 100644
82011--- a/net/ipv4/ip_sockglue.c
82012+++ b/net/ipv4/ip_sockglue.c
82013@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
82014 int val;
82015 int len;
82016
82017+ pax_track_stack();
82018+
82019 if (level != SOL_IP)
82020 return -EOPNOTSUPP;
82021
82022@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
82023 if (sk->sk_type != SOCK_STREAM)
82024 return -ENOPROTOOPT;
82025
82026- msg.msg_control = optval;
82027+ msg.msg_control = (void __force_kernel *)optval;
82028 msg.msg_controllen = len;
82029 msg.msg_flags = 0;
82030
82031diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
82032index f8d04c2..c1188f2 100644
82033--- a/net/ipv4/ipconfig.c
82034+++ b/net/ipv4/ipconfig.c
82035@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
82036
82037 mm_segment_t oldfs = get_fs();
82038 set_fs(get_ds());
82039- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
82040+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
82041 set_fs(oldfs);
82042 return res;
82043 }
82044@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
82045
82046 mm_segment_t oldfs = get_fs();
82047 set_fs(get_ds());
82048- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
82049+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
82050 set_fs(oldfs);
82051 return res;
82052 }
82053@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
82054
82055 mm_segment_t oldfs = get_fs();
82056 set_fs(get_ds());
82057- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
82058+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
82059 set_fs(oldfs);
82060 return res;
82061 }
82062diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
82063index c8b0cc3..4da5ae2 100644
82064--- a/net/ipv4/netfilter/arp_tables.c
82065+++ b/net/ipv4/netfilter/arp_tables.c
82066@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
82067 private = &tmp;
82068 }
82069 #endif
82070+ memset(&info, 0, sizeof(info));
82071 info.valid_hooks = t->valid_hooks;
82072 memcpy(info.hook_entry, private->hook_entry,
82073 sizeof(info.hook_entry));
82074diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
82075index c156db2..e772975 100644
82076--- a/net/ipv4/netfilter/ip_queue.c
82077+++ b/net/ipv4/netfilter/ip_queue.c
82078@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
82079
82080 if (v->data_len < sizeof(*user_iph))
82081 return 0;
82082+ if (v->data_len > 65535)
82083+ return -EMSGSIZE;
82084+
82085 diff = v->data_len - e->skb->len;
82086 if (diff < 0) {
82087 if (pskb_trim(e->skb, v->data_len))
82088@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
82089 static inline void
82090 __ipq_rcv_skb(struct sk_buff *skb)
82091 {
82092- int status, type, pid, flags, nlmsglen, skblen;
82093+ int status, type, pid, flags;
82094+ unsigned int nlmsglen, skblen;
82095 struct nlmsghdr *nlh;
82096
82097 skblen = skb->len;
82098diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
82099index 0606db1..02e7e4c 100644
82100--- a/net/ipv4/netfilter/ip_tables.c
82101+++ b/net/ipv4/netfilter/ip_tables.c
82102@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
82103 private = &tmp;
82104 }
82105 #endif
82106+ memset(&info, 0, sizeof(info));
82107 info.valid_hooks = t->valid_hooks;
82108 memcpy(info.hook_entry, private->hook_entry,
82109 sizeof(info.hook_entry));
82110diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
82111index d9521f6..3c3eb25 100644
82112--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
82113+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
82114@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
82115
82116 *len = 0;
82117
82118- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
82119+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
82120 if (*octets == NULL) {
82121 if (net_ratelimit())
82122 printk("OOM in bsalg (%d)\n", __LINE__);
82123diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
82124index ab996f9..3da5f96 100644
82125--- a/net/ipv4/raw.c
82126+++ b/net/ipv4/raw.c
82127@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
82128 /* Charge it to the socket. */
82129
82130 if (sock_queue_rcv_skb(sk, skb) < 0) {
82131- atomic_inc(&sk->sk_drops);
82132+ atomic_inc_unchecked(&sk->sk_drops);
82133 kfree_skb(skb);
82134 return NET_RX_DROP;
82135 }
82136@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
82137 int raw_rcv(struct sock *sk, struct sk_buff *skb)
82138 {
82139 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
82140- atomic_inc(&sk->sk_drops);
82141+ atomic_inc_unchecked(&sk->sk_drops);
82142 kfree_skb(skb);
82143 return NET_RX_DROP;
82144 }
82145@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
82146
82147 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
82148 {
82149+ struct icmp_filter filter;
82150+
82151+ if (optlen < 0)
82152+ return -EINVAL;
82153 if (optlen > sizeof(struct icmp_filter))
82154 optlen = sizeof(struct icmp_filter);
82155- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
82156+ if (copy_from_user(&filter, optval, optlen))
82157 return -EFAULT;
82158+ raw_sk(sk)->filter = filter;
82159+
82160 return 0;
82161 }
82162
82163 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
82164 {
82165 int len, ret = -EFAULT;
82166+ struct icmp_filter filter;
82167
82168 if (get_user(len, optlen))
82169 goto out;
82170@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
82171 if (len > sizeof(struct icmp_filter))
82172 len = sizeof(struct icmp_filter);
82173 ret = -EFAULT;
82174- if (put_user(len, optlen) ||
82175- copy_to_user(optval, &raw_sk(sk)->filter, len))
82176+ filter = raw_sk(sk)->filter;
82177+ if (put_user(len, optlen) || len > sizeof filter ||
82178+ copy_to_user(optval, &filter, len))
82179 goto out;
82180 ret = 0;
82181 out: return ret;
82182@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
82183 sk_wmem_alloc_get(sp),
82184 sk_rmem_alloc_get(sp),
82185 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
82186- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
82187+ atomic_read(&sp->sk_refcnt),
82188+#ifdef CONFIG_GRKERNSEC_HIDESYM
82189+ NULL,
82190+#else
82191+ sp,
82192+#endif
82193+ atomic_read_unchecked(&sp->sk_drops));
82194 }
82195
82196 static int raw_seq_show(struct seq_file *seq, void *v)
82197diff --git a/net/ipv4/route.c b/net/ipv4/route.c
82198index 58f141b..b759702 100644
82199--- a/net/ipv4/route.c
82200+++ b/net/ipv4/route.c
82201@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
82202
82203 static inline int rt_genid(struct net *net)
82204 {
82205- return atomic_read(&net->ipv4.rt_genid);
82206+ return atomic_read_unchecked(&net->ipv4.rt_genid);
82207 }
82208
82209 #ifdef CONFIG_PROC_FS
82210@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
82211 unsigned char shuffle;
82212
82213 get_random_bytes(&shuffle, sizeof(shuffle));
82214- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
82215+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
82216 }
82217
82218 /*
82219@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
82220
82221 static __net_init int rt_secret_timer_init(struct net *net)
82222 {
82223- atomic_set(&net->ipv4.rt_genid,
82224+ atomic_set_unchecked(&net->ipv4.rt_genid,
82225 (int) ((num_physpages ^ (num_physpages>>8)) ^
82226 (jiffies ^ (jiffies >> 7))));
82227
82228diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
82229index f095659..adc892a 100644
82230--- a/net/ipv4/tcp.c
82231+++ b/net/ipv4/tcp.c
82232@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
82233 int val;
82234 int err = 0;
82235
82236+ pax_track_stack();
82237+
82238 /* This is a string value all the others are int's */
82239 if (optname == TCP_CONGESTION) {
82240 char name[TCP_CA_NAME_MAX];
82241@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
82242 struct tcp_sock *tp = tcp_sk(sk);
82243 int val, len;
82244
82245+ pax_track_stack();
82246+
82247 if (get_user(len, optlen))
82248 return -EFAULT;
82249
82250diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
82251index 6fc7961..33bad4a 100644
82252--- a/net/ipv4/tcp_ipv4.c
82253+++ b/net/ipv4/tcp_ipv4.c
82254@@ -85,6 +85,9 @@
82255 int sysctl_tcp_tw_reuse __read_mostly;
82256 int sysctl_tcp_low_latency __read_mostly;
82257
82258+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82259+extern int grsec_enable_blackhole;
82260+#endif
82261
82262 #ifdef CONFIG_TCP_MD5SIG
82263 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
82264@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
82265 return 0;
82266
82267 reset:
82268+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82269+ if (!grsec_enable_blackhole)
82270+#endif
82271 tcp_v4_send_reset(rsk, skb);
82272 discard:
82273 kfree_skb(skb);
82274@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
82275 TCP_SKB_CB(skb)->sacked = 0;
82276
82277 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82278- if (!sk)
82279+ if (!sk) {
82280+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82281+ ret = 1;
82282+#endif
82283 goto no_tcp_socket;
82284+ }
82285
82286 process:
82287- if (sk->sk_state == TCP_TIME_WAIT)
82288+ if (sk->sk_state == TCP_TIME_WAIT) {
82289+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82290+ ret = 2;
82291+#endif
82292 goto do_time_wait;
82293+ }
82294
82295 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
82296 goto discard_and_relse;
82297@@ -1651,6 +1665,10 @@ no_tcp_socket:
82298 bad_packet:
82299 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82300 } else {
82301+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82302+ if (!grsec_enable_blackhole || (ret == 1 &&
82303+ (skb->dev->flags & IFF_LOOPBACK)))
82304+#endif
82305 tcp_v4_send_reset(NULL, skb);
82306 }
82307
82308@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
82309 0, /* non standard timer */
82310 0, /* open_requests have no inode */
82311 atomic_read(&sk->sk_refcnt),
82312+#ifdef CONFIG_GRKERNSEC_HIDESYM
82313+ NULL,
82314+#else
82315 req,
82316+#endif
82317 len);
82318 }
82319
82320@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
82321 sock_i_uid(sk),
82322 icsk->icsk_probes_out,
82323 sock_i_ino(sk),
82324- atomic_read(&sk->sk_refcnt), sk,
82325+ atomic_read(&sk->sk_refcnt),
82326+#ifdef CONFIG_GRKERNSEC_HIDESYM
82327+ NULL,
82328+#else
82329+ sk,
82330+#endif
82331 jiffies_to_clock_t(icsk->icsk_rto),
82332 jiffies_to_clock_t(icsk->icsk_ack.ato),
82333 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
82334@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
82335 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
82336 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
82337 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82338- atomic_read(&tw->tw_refcnt), tw, len);
82339+ atomic_read(&tw->tw_refcnt),
82340+#ifdef CONFIG_GRKERNSEC_HIDESYM
82341+ NULL,
82342+#else
82343+ tw,
82344+#endif
82345+ len);
82346 }
82347
82348 #define TMPSZ 150
82349diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
82350index 4c03598..e09a8e8 100644
82351--- a/net/ipv4/tcp_minisocks.c
82352+++ b/net/ipv4/tcp_minisocks.c
82353@@ -26,6 +26,10 @@
82354 #include <net/inet_common.h>
82355 #include <net/xfrm.h>
82356
82357+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82358+extern int grsec_enable_blackhole;
82359+#endif
82360+
82361 #ifdef CONFIG_SYSCTL
82362 #define SYNC_INIT 0 /* let the user enable it */
82363 #else
82364@@ -672,6 +676,10 @@ listen_overflow:
82365
82366 embryonic_reset:
82367 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
82368+
82369+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82370+ if (!grsec_enable_blackhole)
82371+#endif
82372 if (!(flg & TCP_FLAG_RST))
82373 req->rsk_ops->send_reset(sk, skb);
82374
82375diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
82376index af83bdf..ec91cb2 100644
82377--- a/net/ipv4/tcp_output.c
82378+++ b/net/ipv4/tcp_output.c
82379@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
82380 __u8 *md5_hash_location;
82381 int mss;
82382
82383+ pax_track_stack();
82384+
82385 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
82386 if (skb == NULL)
82387 return NULL;
82388diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
82389index 59f5b5e..193860f 100644
82390--- a/net/ipv4/tcp_probe.c
82391+++ b/net/ipv4/tcp_probe.c
82392@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
82393 if (cnt + width >= len)
82394 break;
82395
82396- if (copy_to_user(buf + cnt, tbuf, width))
82397+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
82398 return -EFAULT;
82399 cnt += width;
82400 }
82401diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
82402index 57d5501..a9ed13a 100644
82403--- a/net/ipv4/tcp_timer.c
82404+++ b/net/ipv4/tcp_timer.c
82405@@ -21,6 +21,10 @@
82406 #include <linux/module.h>
82407 #include <net/tcp.h>
82408
82409+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82410+extern int grsec_lastack_retries;
82411+#endif
82412+
82413 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
82414 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
82415 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
82416@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
82417 }
82418 }
82419
82420+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82421+ if ((sk->sk_state == TCP_LAST_ACK) &&
82422+ (grsec_lastack_retries > 0) &&
82423+ (grsec_lastack_retries < retry_until))
82424+ retry_until = grsec_lastack_retries;
82425+#endif
82426+
82427 if (retransmits_timed_out(sk, retry_until)) {
82428 /* Has it gone just too far? */
82429 tcp_write_err(sk);
82430diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
82431index 8e28770..72105c8 100644
82432--- a/net/ipv4/udp.c
82433+++ b/net/ipv4/udp.c
82434@@ -86,6 +86,7 @@
82435 #include <linux/types.h>
82436 #include <linux/fcntl.h>
82437 #include <linux/module.h>
82438+#include <linux/security.h>
82439 #include <linux/socket.h>
82440 #include <linux/sockios.h>
82441 #include <linux/igmp.h>
82442@@ -106,6 +107,10 @@
82443 #include <net/xfrm.h>
82444 #include "udp_impl.h"
82445
82446+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82447+extern int grsec_enable_blackhole;
82448+#endif
82449+
82450 struct udp_table udp_table;
82451 EXPORT_SYMBOL(udp_table);
82452
82453@@ -371,6 +376,9 @@ found:
82454 return s;
82455 }
82456
82457+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
82458+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
82459+
82460 /*
82461 * This routine is called by the ICMP module when it gets some
82462 * sort of error condition. If err < 0 then the socket should
82463@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
82464 dport = usin->sin_port;
82465 if (dport == 0)
82466 return -EINVAL;
82467+
82468+ err = gr_search_udp_sendmsg(sk, usin);
82469+ if (err)
82470+ return err;
82471 } else {
82472 if (sk->sk_state != TCP_ESTABLISHED)
82473 return -EDESTADDRREQ;
82474+
82475+ err = gr_search_udp_sendmsg(sk, NULL);
82476+ if (err)
82477+ return err;
82478+
82479 daddr = inet->daddr;
82480 dport = inet->dport;
82481 /* Open fast path for connected socket.
82482@@ -945,6 +962,10 @@ try_again:
82483 if (!skb)
82484 goto out;
82485
82486+ err = gr_search_udp_recvmsg(sk, skb);
82487+ if (err)
82488+ goto out_free;
82489+
82490 ulen = skb->len - sizeof(struct udphdr);
82491 copied = len;
82492 if (copied > ulen)
82493@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
82494 if (rc == -ENOMEM) {
82495 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
82496 is_udplite);
82497- atomic_inc(&sk->sk_drops);
82498+ atomic_inc_unchecked(&sk->sk_drops);
82499 }
82500 goto drop;
82501 }
82502@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82503 goto csum_error;
82504
82505 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
82506+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82507+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82508+#endif
82509 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
82510
82511 /*
82512@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
82513 sk_wmem_alloc_get(sp),
82514 sk_rmem_alloc_get(sp),
82515 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
82516- atomic_read(&sp->sk_refcnt), sp,
82517- atomic_read(&sp->sk_drops), len);
82518+ atomic_read(&sp->sk_refcnt),
82519+#ifdef CONFIG_GRKERNSEC_HIDESYM
82520+ NULL,
82521+#else
82522+ sp,
82523+#endif
82524+ atomic_read_unchecked(&sp->sk_drops), len);
82525 }
82526
82527 int udp4_seq_show(struct seq_file *seq, void *v)
82528diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
82529index 8ac3d09..fc58c5f 100644
82530--- a/net/ipv6/addrconf.c
82531+++ b/net/ipv6/addrconf.c
82532@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
82533 p.iph.ihl = 5;
82534 p.iph.protocol = IPPROTO_IPV6;
82535 p.iph.ttl = 64;
82536- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
82537+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
82538
82539 if (ops->ndo_do_ioctl) {
82540 mm_segment_t oldfs = get_fs();
82541diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
82542index cc4797d..7cfdfcc 100644
82543--- a/net/ipv6/inet6_connection_sock.c
82544+++ b/net/ipv6/inet6_connection_sock.c
82545@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
82546 #ifdef CONFIG_XFRM
82547 {
82548 struct rt6_info *rt = (struct rt6_info *)dst;
82549- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
82550+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
82551 }
82552 #endif
82553 }
82554@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
82555 #ifdef CONFIG_XFRM
82556 if (dst) {
82557 struct rt6_info *rt = (struct rt6_info *)dst;
82558- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
82559+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
82560 sk->sk_dst_cache = NULL;
82561 dst_release(dst);
82562 dst = NULL;
82563diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
82564index 093e9b2..f72cddb 100644
82565--- a/net/ipv6/inet6_hashtables.c
82566+++ b/net/ipv6/inet6_hashtables.c
82567@@ -119,7 +119,7 @@ out:
82568 }
82569 EXPORT_SYMBOL(__inet6_lookup_established);
82570
82571-static int inline compute_score(struct sock *sk, struct net *net,
82572+static inline int compute_score(struct sock *sk, struct net *net,
82573 const unsigned short hnum,
82574 const struct in6_addr *daddr,
82575 const int dif)
82576diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
82577index 4f7aaf6..f7acf45 100644
82578--- a/net/ipv6/ipv6_sockglue.c
82579+++ b/net/ipv6/ipv6_sockglue.c
82580@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
82581 int val, valbool;
82582 int retv = -ENOPROTOOPT;
82583
82584+ pax_track_stack();
82585+
82586 if (optval == NULL)
82587 val=0;
82588 else {
82589@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
82590 int len;
82591 int val;
82592
82593+ pax_track_stack();
82594+
82595 if (ip6_mroute_opt(optname))
82596 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
82597
82598@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
82599 if (sk->sk_type != SOCK_STREAM)
82600 return -ENOPROTOOPT;
82601
82602- msg.msg_control = optval;
82603+ msg.msg_control = (void __force_kernel *)optval;
82604 msg.msg_controllen = len;
82605 msg.msg_flags = 0;
82606
82607diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
82608index 1cf3f0c..1d4376f 100644
82609--- a/net/ipv6/netfilter/ip6_queue.c
82610+++ b/net/ipv6/netfilter/ip6_queue.c
82611@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
82612
82613 if (v->data_len < sizeof(*user_iph))
82614 return 0;
82615+ if (v->data_len > 65535)
82616+ return -EMSGSIZE;
82617+
82618 diff = v->data_len - e->skb->len;
82619 if (diff < 0) {
82620 if (pskb_trim(e->skb, v->data_len))
82621@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
82622 static inline void
82623 __ipq_rcv_skb(struct sk_buff *skb)
82624 {
82625- int status, type, pid, flags, nlmsglen, skblen;
82626+ int status, type, pid, flags;
82627+ unsigned int nlmsglen, skblen;
82628 struct nlmsghdr *nlh;
82629
82630 skblen = skb->len;
82631diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
82632index 78b5a36..7f37433 100644
82633--- a/net/ipv6/netfilter/ip6_tables.c
82634+++ b/net/ipv6/netfilter/ip6_tables.c
82635@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
82636 private = &tmp;
82637 }
82638 #endif
82639+ memset(&info, 0, sizeof(info));
82640 info.valid_hooks = t->valid_hooks;
82641 memcpy(info.hook_entry, private->hook_entry,
82642 sizeof(info.hook_entry));
82643diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
82644index 4f24570..b813b34 100644
82645--- a/net/ipv6/raw.c
82646+++ b/net/ipv6/raw.c
82647@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
82648 {
82649 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
82650 skb_checksum_complete(skb)) {
82651- atomic_inc(&sk->sk_drops);
82652+ atomic_inc_unchecked(&sk->sk_drops);
82653 kfree_skb(skb);
82654 return NET_RX_DROP;
82655 }
82656
82657 /* Charge it to the socket. */
82658 if (sock_queue_rcv_skb(sk,skb)<0) {
82659- atomic_inc(&sk->sk_drops);
82660+ atomic_inc_unchecked(&sk->sk_drops);
82661 kfree_skb(skb);
82662 return NET_RX_DROP;
82663 }
82664@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82665 struct raw6_sock *rp = raw6_sk(sk);
82666
82667 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
82668- atomic_inc(&sk->sk_drops);
82669+ atomic_inc_unchecked(&sk->sk_drops);
82670 kfree_skb(skb);
82671 return NET_RX_DROP;
82672 }
82673@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82674
82675 if (inet->hdrincl) {
82676 if (skb_checksum_complete(skb)) {
82677- atomic_inc(&sk->sk_drops);
82678+ atomic_inc_unchecked(&sk->sk_drops);
82679 kfree_skb(skb);
82680 return NET_RX_DROP;
82681 }
82682@@ -518,7 +518,7 @@ csum_copy_err:
82683 as some normal condition.
82684 */
82685 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
82686- atomic_inc(&sk->sk_drops);
82687+ atomic_inc_unchecked(&sk->sk_drops);
82688 goto out;
82689 }
82690
82691@@ -600,7 +600,7 @@ out:
82692 return err;
82693 }
82694
82695-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
82696+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
82697 struct flowi *fl, struct rt6_info *rt,
82698 unsigned int flags)
82699 {
82700@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
82701 u16 proto;
82702 int err;
82703
82704+ pax_track_stack();
82705+
82706 /* Rough check on arithmetic overflow,
82707 better check is made in ip6_append_data().
82708 */
82709@@ -916,12 +918,17 @@ do_confirm:
82710 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
82711 char __user *optval, int optlen)
82712 {
82713+ struct icmp6_filter filter;
82714+
82715 switch (optname) {
82716 case ICMPV6_FILTER:
82717+ if (optlen < 0)
82718+ return -EINVAL;
82719 if (optlen > sizeof(struct icmp6_filter))
82720 optlen = sizeof(struct icmp6_filter);
82721- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
82722+ if (copy_from_user(&filter, optval, optlen))
82723 return -EFAULT;
82724+ raw6_sk(sk)->filter = filter;
82725 return 0;
82726 default:
82727 return -ENOPROTOOPT;
82728@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82729 char __user *optval, int __user *optlen)
82730 {
82731 int len;
82732+ struct icmp6_filter filter;
82733
82734 switch (optname) {
82735 case ICMPV6_FILTER:
82736@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82737 len = sizeof(struct icmp6_filter);
82738 if (put_user(len, optlen))
82739 return -EFAULT;
82740- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
82741+ filter = raw6_sk(sk)->filter;
82742+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
82743 return -EFAULT;
82744 return 0;
82745 default:
82746@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
82747 0, 0L, 0,
82748 sock_i_uid(sp), 0,
82749 sock_i_ino(sp),
82750- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
82751+ atomic_read(&sp->sk_refcnt),
82752+#ifdef CONFIG_GRKERNSEC_HIDESYM
82753+ NULL,
82754+#else
82755+ sp,
82756+#endif
82757+ atomic_read_unchecked(&sp->sk_drops));
82758 }
82759
82760 static int raw6_seq_show(struct seq_file *seq, void *v)
82761diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
82762index faae6df..d4430c1 100644
82763--- a/net/ipv6/tcp_ipv6.c
82764+++ b/net/ipv6/tcp_ipv6.c
82765@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
82766 }
82767 #endif
82768
82769+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82770+extern int grsec_enable_blackhole;
82771+#endif
82772+
82773 static void tcp_v6_hash(struct sock *sk)
82774 {
82775 if (sk->sk_state != TCP_CLOSE) {
82776@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
82777 return 0;
82778
82779 reset:
82780+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82781+ if (!grsec_enable_blackhole)
82782+#endif
82783 tcp_v6_send_reset(sk, skb);
82784 discard:
82785 if (opt_skb)
82786@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
82787 TCP_SKB_CB(skb)->sacked = 0;
82788
82789 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82790- if (!sk)
82791+ if (!sk) {
82792+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82793+ ret = 1;
82794+#endif
82795 goto no_tcp_socket;
82796+ }
82797
82798 process:
82799- if (sk->sk_state == TCP_TIME_WAIT)
82800+ if (sk->sk_state == TCP_TIME_WAIT) {
82801+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82802+ ret = 2;
82803+#endif
82804 goto do_time_wait;
82805+ }
82806
82807 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
82808 goto discard_and_relse;
82809@@ -1701,6 +1716,10 @@ no_tcp_socket:
82810 bad_packet:
82811 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82812 } else {
82813+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82814+ if (!grsec_enable_blackhole || (ret == 1 &&
82815+ (skb->dev->flags & IFF_LOOPBACK)))
82816+#endif
82817 tcp_v6_send_reset(NULL, skb);
82818 }
82819
82820@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
82821 uid,
82822 0, /* non standard timer */
82823 0, /* open_requests have no inode */
82824- 0, req);
82825+ 0,
82826+#ifdef CONFIG_GRKERNSEC_HIDESYM
82827+ NULL
82828+#else
82829+ req
82830+#endif
82831+ );
82832 }
82833
82834 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82835@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82836 sock_i_uid(sp),
82837 icsk->icsk_probes_out,
82838 sock_i_ino(sp),
82839- atomic_read(&sp->sk_refcnt), sp,
82840+ atomic_read(&sp->sk_refcnt),
82841+#ifdef CONFIG_GRKERNSEC_HIDESYM
82842+ NULL,
82843+#else
82844+ sp,
82845+#endif
82846 jiffies_to_clock_t(icsk->icsk_rto),
82847 jiffies_to_clock_t(icsk->icsk_ack.ato),
82848 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
82849@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
82850 dest->s6_addr32[2], dest->s6_addr32[3], destp,
82851 tw->tw_substate, 0, 0,
82852 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82853- atomic_read(&tw->tw_refcnt), tw);
82854+ atomic_read(&tw->tw_refcnt),
82855+#ifdef CONFIG_GRKERNSEC_HIDESYM
82856+ NULL
82857+#else
82858+ tw
82859+#endif
82860+ );
82861 }
82862
82863 static int tcp6_seq_show(struct seq_file *seq, void *v)
82864diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
82865index 9cc6289..052c521 100644
82866--- a/net/ipv6/udp.c
82867+++ b/net/ipv6/udp.c
82868@@ -49,6 +49,10 @@
82869 #include <linux/seq_file.h>
82870 #include "udp_impl.h"
82871
82872+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82873+extern int grsec_enable_blackhole;
82874+#endif
82875+
82876 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
82877 {
82878 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
82879@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
82880 if (rc == -ENOMEM) {
82881 UDP6_INC_STATS_BH(sock_net(sk),
82882 UDP_MIB_RCVBUFERRORS, is_udplite);
82883- atomic_inc(&sk->sk_drops);
82884+ atomic_inc_unchecked(&sk->sk_drops);
82885 }
82886 goto drop;
82887 }
82888@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82889 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
82890 proto == IPPROTO_UDPLITE);
82891
82892+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82893+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82894+#endif
82895 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
82896
82897 kfree_skb(skb);
82898@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
82899 0, 0L, 0,
82900 sock_i_uid(sp), 0,
82901 sock_i_ino(sp),
82902- atomic_read(&sp->sk_refcnt), sp,
82903- atomic_read(&sp->sk_drops));
82904+ atomic_read(&sp->sk_refcnt),
82905+#ifdef CONFIG_GRKERNSEC_HIDESYM
82906+ NULL,
82907+#else
82908+ sp,
82909+#endif
82910+ atomic_read_unchecked(&sp->sk_drops));
82911 }
82912
82913 int udp6_seq_show(struct seq_file *seq, void *v)
82914diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
82915index 811984d..11f59b7 100644
82916--- a/net/irda/ircomm/ircomm_tty.c
82917+++ b/net/irda/ircomm/ircomm_tty.c
82918@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82919 add_wait_queue(&self->open_wait, &wait);
82920
82921 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
82922- __FILE__,__LINE__, tty->driver->name, self->open_count );
82923+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82924
82925 /* As far as I can see, we protect open_count - Jean II */
82926 spin_lock_irqsave(&self->spinlock, flags);
82927 if (!tty_hung_up_p(filp)) {
82928 extra_count = 1;
82929- self->open_count--;
82930+ local_dec(&self->open_count);
82931 }
82932 spin_unlock_irqrestore(&self->spinlock, flags);
82933- self->blocked_open++;
82934+ local_inc(&self->blocked_open);
82935
82936 while (1) {
82937 if (tty->termios->c_cflag & CBAUD) {
82938@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82939 }
82940
82941 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
82942- __FILE__,__LINE__, tty->driver->name, self->open_count );
82943+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82944
82945 schedule();
82946 }
82947@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82948 if (extra_count) {
82949 /* ++ is not atomic, so this should be protected - Jean II */
82950 spin_lock_irqsave(&self->spinlock, flags);
82951- self->open_count++;
82952+ local_inc(&self->open_count);
82953 spin_unlock_irqrestore(&self->spinlock, flags);
82954 }
82955- self->blocked_open--;
82956+ local_dec(&self->blocked_open);
82957
82958 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
82959- __FILE__,__LINE__, tty->driver->name, self->open_count);
82960+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
82961
82962 if (!retval)
82963 self->flags |= ASYNC_NORMAL_ACTIVE;
82964@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
82965 }
82966 /* ++ is not atomic, so this should be protected - Jean II */
82967 spin_lock_irqsave(&self->spinlock, flags);
82968- self->open_count++;
82969+ local_inc(&self->open_count);
82970
82971 tty->driver_data = self;
82972 self->tty = tty;
82973 spin_unlock_irqrestore(&self->spinlock, flags);
82974
82975 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
82976- self->line, self->open_count);
82977+ self->line, local_read(&self->open_count));
82978
82979 /* Not really used by us, but lets do it anyway */
82980 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
82981@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82982 return;
82983 }
82984
82985- if ((tty->count == 1) && (self->open_count != 1)) {
82986+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
82987 /*
82988 * Uh, oh. tty->count is 1, which means that the tty
82989 * structure will be freed. state->count should always
82990@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82991 */
82992 IRDA_DEBUG(0, "%s(), bad serial port count; "
82993 "tty->count is 1, state->count is %d\n", __func__ ,
82994- self->open_count);
82995- self->open_count = 1;
82996+ local_read(&self->open_count));
82997+ local_set(&self->open_count, 1);
82998 }
82999
83000- if (--self->open_count < 0) {
83001+ if (local_dec_return(&self->open_count) < 0) {
83002 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
83003- __func__, self->line, self->open_count);
83004- self->open_count = 0;
83005+ __func__, self->line, local_read(&self->open_count));
83006+ local_set(&self->open_count, 0);
83007 }
83008- if (self->open_count) {
83009+ if (local_read(&self->open_count)) {
83010 spin_unlock_irqrestore(&self->spinlock, flags);
83011
83012 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
83013@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
83014 tty->closing = 0;
83015 self->tty = NULL;
83016
83017- if (self->blocked_open) {
83018+ if (local_read(&self->blocked_open)) {
83019 if (self->close_delay)
83020 schedule_timeout_interruptible(self->close_delay);
83021 wake_up_interruptible(&self->open_wait);
83022@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
83023 spin_lock_irqsave(&self->spinlock, flags);
83024 self->flags &= ~ASYNC_NORMAL_ACTIVE;
83025 self->tty = NULL;
83026- self->open_count = 0;
83027+ local_set(&self->open_count, 0);
83028 spin_unlock_irqrestore(&self->spinlock, flags);
83029
83030 wake_up_interruptible(&self->open_wait);
83031@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
83032 seq_putc(m, '\n');
83033
83034 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
83035- seq_printf(m, "Open count: %d\n", self->open_count);
83036+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
83037 seq_printf(m, "Max data size: %d\n", self->max_data_size);
83038 seq_printf(m, "Max header size: %d\n", self->max_header_size);
83039
83040diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
83041index bada1b9..f325943 100644
83042--- a/net/iucv/af_iucv.c
83043+++ b/net/iucv/af_iucv.c
83044@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
83045
83046 write_lock_bh(&iucv_sk_list.lock);
83047
83048- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
83049+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
83050 while (__iucv_get_sock_by_name(name)) {
83051 sprintf(name, "%08x",
83052- atomic_inc_return(&iucv_sk_list.autobind_name));
83053+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
83054 }
83055
83056 write_unlock_bh(&iucv_sk_list.lock);
83057diff --git a/net/key/af_key.c b/net/key/af_key.c
83058index 4e98193..439b449 100644
83059--- a/net/key/af_key.c
83060+++ b/net/key/af_key.c
83061@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
83062 struct xfrm_migrate m[XFRM_MAX_DEPTH];
83063 struct xfrm_kmaddress k;
83064
83065+ pax_track_stack();
83066+
83067 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
83068 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
83069 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
83070@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
83071 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
83072 else
83073 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
83074+#ifdef CONFIG_GRKERNSEC_HIDESYM
83075+ NULL,
83076+#else
83077 s,
83078+#endif
83079 atomic_read(&s->sk_refcnt),
83080 sk_rmem_alloc_get(s),
83081 sk_wmem_alloc_get(s),
83082diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
83083index bda96d1..c038b72 100644
83084--- a/net/lapb/lapb_iface.c
83085+++ b/net/lapb/lapb_iface.c
83086@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
83087 goto out;
83088
83089 lapb->dev = dev;
83090- lapb->callbacks = *callbacks;
83091+ lapb->callbacks = callbacks;
83092
83093 __lapb_insert_cb(lapb);
83094
83095@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
83096
83097 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
83098 {
83099- if (lapb->callbacks.connect_confirmation)
83100- lapb->callbacks.connect_confirmation(lapb->dev, reason);
83101+ if (lapb->callbacks->connect_confirmation)
83102+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
83103 }
83104
83105 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
83106 {
83107- if (lapb->callbacks.connect_indication)
83108- lapb->callbacks.connect_indication(lapb->dev, reason);
83109+ if (lapb->callbacks->connect_indication)
83110+ lapb->callbacks->connect_indication(lapb->dev, reason);
83111 }
83112
83113 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
83114 {
83115- if (lapb->callbacks.disconnect_confirmation)
83116- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
83117+ if (lapb->callbacks->disconnect_confirmation)
83118+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
83119 }
83120
83121 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
83122 {
83123- if (lapb->callbacks.disconnect_indication)
83124- lapb->callbacks.disconnect_indication(lapb->dev, reason);
83125+ if (lapb->callbacks->disconnect_indication)
83126+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
83127 }
83128
83129 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
83130 {
83131- if (lapb->callbacks.data_indication)
83132- return lapb->callbacks.data_indication(lapb->dev, skb);
83133+ if (lapb->callbacks->data_indication)
83134+ return lapb->callbacks->data_indication(lapb->dev, skb);
83135
83136 kfree_skb(skb);
83137 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
83138@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
83139 {
83140 int used = 0;
83141
83142- if (lapb->callbacks.data_transmit) {
83143- lapb->callbacks.data_transmit(lapb->dev, skb);
83144+ if (lapb->callbacks->data_transmit) {
83145+ lapb->callbacks->data_transmit(lapb->dev, skb);
83146 used = 1;
83147 }
83148
83149diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
83150index fe2d3f8..e57f683 100644
83151--- a/net/mac80211/cfg.c
83152+++ b/net/mac80211/cfg.c
83153@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
83154 return err;
83155 }
83156
83157-struct cfg80211_ops mac80211_config_ops = {
83158+const struct cfg80211_ops mac80211_config_ops = {
83159 .add_virtual_intf = ieee80211_add_iface,
83160 .del_virtual_intf = ieee80211_del_iface,
83161 .change_virtual_intf = ieee80211_change_iface,
83162diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
83163index 7d7879f..2d51f62 100644
83164--- a/net/mac80211/cfg.h
83165+++ b/net/mac80211/cfg.h
83166@@ -4,6 +4,6 @@
83167 #ifndef __CFG_H
83168 #define __CFG_H
83169
83170-extern struct cfg80211_ops mac80211_config_ops;
83171+extern const struct cfg80211_ops mac80211_config_ops;
83172
83173 #endif /* __CFG_H */
83174diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
83175index 99c7525..9cb4937 100644
83176--- a/net/mac80211/debugfs_key.c
83177+++ b/net/mac80211/debugfs_key.c
83178@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
83179 size_t count, loff_t *ppos)
83180 {
83181 struct ieee80211_key *key = file->private_data;
83182- int i, res, bufsize = 2 * key->conf.keylen + 2;
83183+ int i, bufsize = 2 * key->conf.keylen + 2;
83184 char *buf = kmalloc(bufsize, GFP_KERNEL);
83185 char *p = buf;
83186+ ssize_t res;
83187+
83188+ if (buf == NULL)
83189+ return -ENOMEM;
83190
83191 for (i = 0; i < key->conf.keylen; i++)
83192 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
83193diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
83194index 33a2e89..08650c8 100644
83195--- a/net/mac80211/debugfs_sta.c
83196+++ b/net/mac80211/debugfs_sta.c
83197@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
83198 int i;
83199 struct sta_info *sta = file->private_data;
83200
83201+ pax_track_stack();
83202+
83203 spin_lock_bh(&sta->lock);
83204 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
83205 sta->ampdu_mlme.dialog_token_allocator + 1);
83206diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
83207index ca62bfe..6657a03 100644
83208--- a/net/mac80211/ieee80211_i.h
83209+++ b/net/mac80211/ieee80211_i.h
83210@@ -25,6 +25,7 @@
83211 #include <linux/etherdevice.h>
83212 #include <net/cfg80211.h>
83213 #include <net/mac80211.h>
83214+#include <asm/local.h>
83215 #include "key.h"
83216 #include "sta_info.h"
83217
83218@@ -635,7 +636,7 @@ struct ieee80211_local {
83219 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
83220 spinlock_t queue_stop_reason_lock;
83221
83222- int open_count;
83223+ local_t open_count;
83224 int monitors, cooked_mntrs;
83225 /* number of interfaces with corresponding FIF_ flags */
83226 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
83227diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
83228index 079c500..eb3c6d4 100644
83229--- a/net/mac80211/iface.c
83230+++ b/net/mac80211/iface.c
83231@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
83232 break;
83233 }
83234
83235- if (local->open_count == 0) {
83236+ if (local_read(&local->open_count) == 0) {
83237 res = drv_start(local);
83238 if (res)
83239 goto err_del_bss;
83240@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
83241 * Validate the MAC address for this device.
83242 */
83243 if (!is_valid_ether_addr(dev->dev_addr)) {
83244- if (!local->open_count)
83245+ if (!local_read(&local->open_count))
83246 drv_stop(local);
83247 return -EADDRNOTAVAIL;
83248 }
83249@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
83250
83251 hw_reconf_flags |= __ieee80211_recalc_idle(local);
83252
83253- local->open_count++;
83254+ local_inc(&local->open_count);
83255 if (hw_reconf_flags) {
83256 ieee80211_hw_config(local, hw_reconf_flags);
83257 /*
83258@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
83259 err_del_interface:
83260 drv_remove_interface(local, &conf);
83261 err_stop:
83262- if (!local->open_count)
83263+ if (!local_read(&local->open_count))
83264 drv_stop(local);
83265 err_del_bss:
83266 sdata->bss = NULL;
83267@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
83268 WARN_ON(!list_empty(&sdata->u.ap.vlans));
83269 }
83270
83271- local->open_count--;
83272+ local_dec(&local->open_count);
83273
83274 switch (sdata->vif.type) {
83275 case NL80211_IFTYPE_AP_VLAN:
83276@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
83277
83278 ieee80211_recalc_ps(local, -1);
83279
83280- if (local->open_count == 0) {
83281+ if (local_read(&local->open_count) == 0) {
83282 ieee80211_clear_tx_pending(local);
83283 ieee80211_stop_device(local);
83284
83285diff --git a/net/mac80211/main.c b/net/mac80211/main.c
83286index 2dfe176..74e4388 100644
83287--- a/net/mac80211/main.c
83288+++ b/net/mac80211/main.c
83289@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
83290 local->hw.conf.power_level = power;
83291 }
83292
83293- if (changed && local->open_count) {
83294+ if (changed && local_read(&local->open_count)) {
83295 ret = drv_config(local, changed);
83296 /*
83297 * Goal:
83298diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
83299index e67eea7..fcc227e 100644
83300--- a/net/mac80211/mlme.c
83301+++ b/net/mac80211/mlme.c
83302@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
83303 bool have_higher_than_11mbit = false, newsta = false;
83304 u16 ap_ht_cap_flags;
83305
83306+ pax_track_stack();
83307+
83308 /*
83309 * AssocResp and ReassocResp have identical structure, so process both
83310 * of them in this function.
83311diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
83312index e535f1c..4d733d1 100644
83313--- a/net/mac80211/pm.c
83314+++ b/net/mac80211/pm.c
83315@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
83316 }
83317
83318 /* stop hardware - this must stop RX */
83319- if (local->open_count)
83320+ if (local_read(&local->open_count))
83321 ieee80211_stop_device(local);
83322
83323 local->suspended = true;
83324diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
83325index b33efc4..0a2efb6 100644
83326--- a/net/mac80211/rate.c
83327+++ b/net/mac80211/rate.c
83328@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
83329 struct rate_control_ref *ref, *old;
83330
83331 ASSERT_RTNL();
83332- if (local->open_count)
83333+ if (local_read(&local->open_count))
83334 return -EBUSY;
83335
83336 ref = rate_control_alloc(name, local);
83337diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
83338index b1d7904..57e4da7 100644
83339--- a/net/mac80211/tx.c
83340+++ b/net/mac80211/tx.c
83341@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
83342 return cpu_to_le16(dur);
83343 }
83344
83345-static int inline is_ieee80211_device(struct ieee80211_local *local,
83346+static inline int is_ieee80211_device(struct ieee80211_local *local,
83347 struct net_device *dev)
83348 {
83349 return local == wdev_priv(dev->ieee80211_ptr);
83350diff --git a/net/mac80211/util.c b/net/mac80211/util.c
83351index 31b1085..48fb26d 100644
83352--- a/net/mac80211/util.c
83353+++ b/net/mac80211/util.c
83354@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
83355 local->resuming = true;
83356
83357 /* restart hardware */
83358- if (local->open_count) {
83359+ if (local_read(&local->open_count)) {
83360 /*
83361 * Upon resume hardware can sometimes be goofy due to
83362 * various platform / driver / bus issues, so restarting
83363diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
83364index 634d14a..b35a608 100644
83365--- a/net/netfilter/Kconfig
83366+++ b/net/netfilter/Kconfig
83367@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
83368
83369 To compile it as a module, choose M here. If unsure, say N.
83370
83371+config NETFILTER_XT_MATCH_GRADM
83372+ tristate '"gradm" match support'
83373+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
83374+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
83375+ ---help---
83376+ The gradm match allows to match on grsecurity RBAC being enabled.
83377+ It is useful when iptables rules are applied early on bootup to
83378+ prevent connections to the machine (except from a trusted host)
83379+ while the RBAC system is disabled.
83380+
83381 config NETFILTER_XT_MATCH_HASHLIMIT
83382 tristate '"hashlimit" match support'
83383 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
83384diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
83385index 49f62ee..a17b2c6 100644
83386--- a/net/netfilter/Makefile
83387+++ b/net/netfilter/Makefile
83388@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
83389 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
83390 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
83391 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
83392+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
83393 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
83394 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
83395 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
83396diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
83397index 3c7e427..724043c 100644
83398--- a/net/netfilter/ipvs/ip_vs_app.c
83399+++ b/net/netfilter/ipvs/ip_vs_app.c
83400@@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
83401 .open = ip_vs_app_open,
83402 .read = seq_read,
83403 .llseek = seq_lseek,
83404- .release = seq_release,
83405+ .release = seq_release_net,
83406 };
83407 #endif
83408
83409diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
83410index 95682e5..457dbac 100644
83411--- a/net/netfilter/ipvs/ip_vs_conn.c
83412+++ b/net/netfilter/ipvs/ip_vs_conn.c
83413@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
83414 /* if the connection is not template and is created
83415 * by sync, preserve the activity flag.
83416 */
83417- cp->flags |= atomic_read(&dest->conn_flags) &
83418+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
83419 (~IP_VS_CONN_F_INACTIVE);
83420 else
83421- cp->flags |= atomic_read(&dest->conn_flags);
83422+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
83423 cp->dest = dest;
83424
83425 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
83426@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
83427 atomic_set(&cp->refcnt, 1);
83428
83429 atomic_set(&cp->n_control, 0);
83430- atomic_set(&cp->in_pkts, 0);
83431+ atomic_set_unchecked(&cp->in_pkts, 0);
83432
83433 atomic_inc(&ip_vs_conn_count);
83434 if (flags & IP_VS_CONN_F_NO_CPORT)
83435@@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
83436 .open = ip_vs_conn_open,
83437 .read = seq_read,
83438 .llseek = seq_lseek,
83439- .release = seq_release,
83440+ .release = seq_release_net,
83441 };
83442
83443 static const char *ip_vs_origin_name(unsigned flags)
83444@@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
83445 .open = ip_vs_conn_sync_open,
83446 .read = seq_read,
83447 .llseek = seq_lseek,
83448- .release = seq_release,
83449+ .release = seq_release_net,
83450 };
83451
83452 #endif
83453@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
83454
83455 /* Don't drop the entry if its number of incoming packets is not
83456 located in [0, 8] */
83457- i = atomic_read(&cp->in_pkts);
83458+ i = atomic_read_unchecked(&cp->in_pkts);
83459 if (i > 8 || i < 0) return 0;
83460
83461 if (!todrop_rate[i]) return 0;
83462diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
83463index b95699f..5fee919 100644
83464--- a/net/netfilter/ipvs/ip_vs_core.c
83465+++ b/net/netfilter/ipvs/ip_vs_core.c
83466@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
83467 ret = cp->packet_xmit(skb, cp, pp);
83468 /* do not touch skb anymore */
83469
83470- atomic_inc(&cp->in_pkts);
83471+ atomic_inc_unchecked(&cp->in_pkts);
83472 ip_vs_conn_put(cp);
83473 return ret;
83474 }
83475@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
83476 * Sync connection if it is about to close to
83477 * encorage the standby servers to update the connections timeout
83478 */
83479- pkts = atomic_add_return(1, &cp->in_pkts);
83480+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
83481 if (af == AF_INET &&
83482 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
83483 (((cp->protocol != IPPROTO_TCP ||
83484diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
83485index 02b2610..2d89424 100644
83486--- a/net/netfilter/ipvs/ip_vs_ctl.c
83487+++ b/net/netfilter/ipvs/ip_vs_ctl.c
83488@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
83489 ip_vs_rs_hash(dest);
83490 write_unlock_bh(&__ip_vs_rs_lock);
83491 }
83492- atomic_set(&dest->conn_flags, conn_flags);
83493+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
83494
83495 /* bind the service */
83496 if (!dest->svc) {
83497@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
83498 " %-7s %-6d %-10d %-10d\n",
83499 &dest->addr.in6,
83500 ntohs(dest->port),
83501- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
83502+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
83503 atomic_read(&dest->weight),
83504 atomic_read(&dest->activeconns),
83505 atomic_read(&dest->inactconns));
83506@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
83507 "%-7s %-6d %-10d %-10d\n",
83508 ntohl(dest->addr.ip),
83509 ntohs(dest->port),
83510- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
83511+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
83512 atomic_read(&dest->weight),
83513 atomic_read(&dest->activeconns),
83514 atomic_read(&dest->inactconns));
83515@@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
83516 .open = ip_vs_info_open,
83517 .read = seq_read,
83518 .llseek = seq_lseek,
83519- .release = seq_release_private,
83520+ .release = seq_release_net,
83521 };
83522
83523 #endif
83524@@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
83525 .open = ip_vs_stats_seq_open,
83526 .read = seq_read,
83527 .llseek = seq_lseek,
83528- .release = single_release,
83529+ .release = single_release_net,
83530 };
83531
83532 #endif
83533@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
83534
83535 entry.addr = dest->addr.ip;
83536 entry.port = dest->port;
83537- entry.conn_flags = atomic_read(&dest->conn_flags);
83538+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
83539 entry.weight = atomic_read(&dest->weight);
83540 entry.u_threshold = dest->u_threshold;
83541 entry.l_threshold = dest->l_threshold;
83542@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
83543 unsigned char arg[128];
83544 int ret = 0;
83545
83546+ pax_track_stack();
83547+
83548 if (!capable(CAP_NET_ADMIN))
83549 return -EPERM;
83550
83551@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
83552 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
83553
83554 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
83555- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
83556+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
83557 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
83558 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
83559 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
83560diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
83561index e177f0d..55e8581 100644
83562--- a/net/netfilter/ipvs/ip_vs_sync.c
83563+++ b/net/netfilter/ipvs/ip_vs_sync.c
83564@@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
83565
83566 if (opt)
83567 memcpy(&cp->in_seq, opt, sizeof(*opt));
83568- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
83569+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
83570 cp->state = state;
83571 cp->old_state = cp->state;
83572 /*
83573diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
83574index 30b3189..e2e4b55 100644
83575--- a/net/netfilter/ipvs/ip_vs_xmit.c
83576+++ b/net/netfilter/ipvs/ip_vs_xmit.c
83577@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
83578 else
83579 rc = NF_ACCEPT;
83580 /* do not touch skb anymore */
83581- atomic_inc(&cp->in_pkts);
83582+ atomic_inc_unchecked(&cp->in_pkts);
83583 goto out;
83584 }
83585
83586@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
83587 else
83588 rc = NF_ACCEPT;
83589 /* do not touch skb anymore */
83590- atomic_inc(&cp->in_pkts);
83591+ atomic_inc_unchecked(&cp->in_pkts);
83592 goto out;
83593 }
83594
83595diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
83596index d521718..d0fd7a1 100644
83597--- a/net/netfilter/nf_conntrack_netlink.c
83598+++ b/net/netfilter/nf_conntrack_netlink.c
83599@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
83600 static int
83601 ctnetlink_parse_tuple(const struct nlattr * const cda[],
83602 struct nf_conntrack_tuple *tuple,
83603- enum ctattr_tuple type, u_int8_t l3num)
83604+ enum ctattr_type type, u_int8_t l3num)
83605 {
83606 struct nlattr *tb[CTA_TUPLE_MAX+1];
83607 int err;
83608diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
83609index f900dc3..5e45346 100644
83610--- a/net/netfilter/nfnetlink_log.c
83611+++ b/net/netfilter/nfnetlink_log.c
83612@@ -68,7 +68,7 @@ struct nfulnl_instance {
83613 };
83614
83615 static DEFINE_RWLOCK(instances_lock);
83616-static atomic_t global_seq;
83617+static atomic_unchecked_t global_seq;
83618
83619 #define INSTANCE_BUCKETS 16
83620 static struct hlist_head instance_table[INSTANCE_BUCKETS];
83621@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
83622 /* global sequence number */
83623 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
83624 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
83625- htonl(atomic_inc_return(&global_seq)));
83626+ htonl(atomic_inc_return_unchecked(&global_seq)));
83627
83628 if (data_len) {
83629 struct nlattr *nla;
83630diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
83631new file mode 100644
83632index 0000000..b1bac76
83633--- /dev/null
83634+++ b/net/netfilter/xt_gradm.c
83635@@ -0,0 +1,51 @@
83636+/*
83637+ * gradm match for netfilter
83638