]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9-2.6.32.58-201203131839.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-2.6.32.58-201203131839.patch
CommitLineData
dc66cfc9
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index e1efc40..76e689e 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9+*.cis
10 *.cpio
11 *.csp
12+*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18+*.gcno
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *_MODULES
32+*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36@@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40+GPATH
41+GRTAGS
42+GSYMS
43+GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49+PERF*
50 SCCS
51 System.map*
52 TAGS
53@@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57+capability_names.h
58+capflags.c
59 classlist.h*
60+clut_vga16.c
61+common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65@@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69+config.c
70+config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74@@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78+gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90+initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -117,6 +138,7 @@ kxgettext
100 lkc_defs.h
101 lex.c
102 lex.*.c
103+lib1funcs.S
104 logo_*.c
105 logo_*_clut224.c
106 logo_*_mono.c
107@@ -127,13 +149,16 @@ machtypes.h
108 map
109 maui_boot.h
110 mconf
111+mdp
112 miboot*
113 mk_elfconfig
114 mkboot
115 mkbugboot
116 mkcpustr
117 mkdep
118+mkpiggy
119 mkprep
120+mkregtable
121 mktables
122 mktree
123 modpost
124@@ -149,6 +174,7 @@ patches*
125 pca200e.bin
126 pca200e_ecd.bin2
127 piggy.gz
128+piggy.S
129 piggyback
130 pnmtologo
131 ppc_defs.h*
132@@ -157,12 +183,15 @@ qconf
133 raid6altivec*.c
134 raid6int*.c
135 raid6tables.c
136+regdb.c
137 relocs
138+rlim_names.h
139 series
140 setup
141 setup.bin
142 setup.elf
143 sImage
144+slabinfo
145 sm_tbl*
146 split-include
147 syscalltab.h
148@@ -171,6 +200,7 @@ tftpboot.img
149 timeconst.h
150 times.h*
151 trix_boot.h
152+user_constants.h
153 utsrelease.h*
154 vdso-syms.lds
155 vdso.lds
156@@ -186,14 +216,20 @@ version.h*
157 vmlinux
158 vmlinux-*
159 vmlinux.aout
160+vmlinux.bin.all
161+vmlinux.bin.bz2
162 vmlinux.lds
163+vmlinux.relocs
164+voffset.h
165 vsyscall.lds
166 vsyscall_32.lds
167 wanxlfw.inc
168 uImage
169 unifdef
170+utsrelease.h
171 wakeup.bin
172 wakeup.elf
173 wakeup.lds
174 zImage*
175 zconf.hash.c
176+zoffset.h
177diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
178index c840e7d..f4c451c 100644
179--- a/Documentation/kernel-parameters.txt
180+++ b/Documentation/kernel-parameters.txt
181@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
182 the specified number of seconds. This is to be used if
183 your oopses keep scrolling off the screen.
184
185+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
186+ virtualization environments that don't cope well with the
187+ expand down segment used by UDEREF on X86-32 or the frequent
188+ page table updates on X86-64.
189+
190+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
191+
192 pcbit= [HW,ISDN]
193
194 pcd. [PARIDE]
195diff --git a/MAINTAINERS b/MAINTAINERS
196index 613da5d..4fe3eda 100644
197--- a/MAINTAINERS
198+++ b/MAINTAINERS
199@@ -5725,6 +5725,14 @@ L: netdev@vger.kernel.org
200 S: Maintained
201 F: drivers/net/vmxnet3/
202
203+VMware PVSCSI driver
204+M: Alok Kataria <akataria@vmware.com>
205+M: VMware PV-Drivers <pv-drivers@vmware.com>
206+L: linux-scsi@vger.kernel.org
207+S: Maintained
208+F: drivers/scsi/vmw_pvscsi.c
209+F: drivers/scsi/vmw_pvscsi.h
210+
211 VOLTAGE AND CURRENT REGULATOR FRAMEWORK
212 M: Liam Girdwood <lrg@slimlogic.co.uk>
213 M: Mark Brown <broonie@opensource.wolfsonmicro.com>
214diff --git a/Makefile b/Makefile
215index ed78982..cb8fa66 100644
216--- a/Makefile
217+++ b/Makefile
218@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
219
220 HOSTCC = gcc
221 HOSTCXX = g++
222-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
223-HOSTCXXFLAGS = -O2
224+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
225+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
226+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
227
228 # Decide whether to build built-in, modular, or both.
229 # Normally, just do built-in.
230@@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
231 # Rules shared between *config targets and build targets
232
233 # Basic helpers built in scripts/
234-PHONY += scripts_basic
235-scripts_basic:
236+PHONY += scripts_basic gcc-plugins
237+scripts_basic: gcc-plugins
238 $(Q)$(MAKE) $(build)=scripts/basic
239
240 # To avoid any implicit rule to kick in, define an empty command.
241@@ -403,7 +404,7 @@ endif
242 # of make so .config is not included in this case either (for *config).
243
244 no-dot-config-targets := clean mrproper distclean \
245- cscope TAGS tags help %docs check% \
246+ cscope gtags TAGS tags help %docs check% \
247 include/linux/version.h headers_% \
248 kernelrelease kernelversion
249
250@@ -526,6 +527,50 @@ else
251 KBUILD_CFLAGS += -O2
252 endif
253
254+ifndef DISABLE_PAX_PLUGINS
255+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
256+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
257+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
258+endif
259+ifdef CONFIG_PAX_MEMORY_STACKLEAK
260+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
261+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
262+endif
263+ifdef CONFIG_KALLOCSTAT_PLUGIN
264+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
265+endif
266+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
267+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
268+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
269+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
270+endif
271+ifdef CONFIG_CHECKER_PLUGIN
272+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
273+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
274+endif
275+endif
276+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
277+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
278+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
279+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
280+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
281+ifeq ($(KBUILD_EXTMOD),)
282+gcc-plugins:
283+ $(Q)$(MAKE) $(build)=tools/gcc
284+else
285+gcc-plugins: ;
286+endif
287+else
288+gcc-plugins:
289+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
290+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
291+else
292+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
293+endif
294+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
295+endif
296+endif
297+
298 include $(srctree)/arch/$(SRCARCH)/Makefile
299
300 ifneq ($(CONFIG_FRAME_WARN),0)
301@@ -647,7 +692,7 @@ export mod_strip_cmd
302
303
304 ifeq ($(KBUILD_EXTMOD),)
305-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
306+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
307
308 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
309 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
310@@ -868,6 +913,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
311
312 # The actual objects are generated when descending,
313 # make sure no implicit rule kicks in
314+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
315+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
316 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
317
318 # Handle descending into subdirectories listed in $(vmlinux-dirs)
319@@ -877,7 +924,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
320 # Error messages still appears in the original language
321
322 PHONY += $(vmlinux-dirs)
323-$(vmlinux-dirs): prepare scripts
324+$(vmlinux-dirs): gcc-plugins prepare scripts
325 $(Q)$(MAKE) $(build)=$@
326
327 # Build the kernel release string
328@@ -986,6 +1033,7 @@ prepare0: archprepare FORCE
329 $(Q)$(MAKE) $(build)=. missing-syscalls
330
331 # All the preparing..
332+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
333 prepare: prepare0
334
335 # The asm symlink changes when $(ARCH) changes.
336@@ -1127,6 +1175,8 @@ all: modules
337 # using awk while concatenating to the final file.
338
339 PHONY += modules
340+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
341+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
342 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
343 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
344 @$(kecho) ' Building modules, stage 2.';
345@@ -1136,7 +1186,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
346
347 # Target to prepare building external modules
348 PHONY += modules_prepare
349-modules_prepare: prepare scripts
350+modules_prepare: gcc-plugins prepare scripts
351
352 # Target to install modules
353 PHONY += modules_install
354@@ -1201,7 +1251,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
355 include/linux/autoconf.h include/linux/version.h \
356 include/linux/utsrelease.h \
357 include/linux/bounds.h include/asm*/asm-offsets.h \
358- Module.symvers Module.markers tags TAGS cscope*
359+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
360
361 # clean - Delete most, but leave enough to build external modules
362 #
363@@ -1245,7 +1295,7 @@ distclean: mrproper
364 @find $(srctree) $(RCS_FIND_IGNORE) \
365 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
366 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
367- -o -name '.*.rej' -o -size 0 \
368+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
369 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
370 -type f -print | xargs rm -f
371
372@@ -1292,6 +1342,7 @@ help:
373 @echo ' modules_prepare - Set up for building external modules'
374 @echo ' tags/TAGS - Generate tags file for editors'
375 @echo ' cscope - Generate cscope index'
376+ @echo ' gtags - Generate GNU GLOBAL index'
377 @echo ' kernelrelease - Output the release version string'
378 @echo ' kernelversion - Output the version stored in Makefile'
379 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
380@@ -1393,6 +1444,8 @@ PHONY += $(module-dirs) modules
381 $(module-dirs): crmodverdir $(objtree)/Module.symvers
382 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
383
384+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
385+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
386 modules: $(module-dirs)
387 @$(kecho) ' Building modules, stage 2.';
388 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
389@@ -1448,7 +1501,7 @@ endif # KBUILD_EXTMOD
390 quiet_cmd_tags = GEN $@
391 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
392
393-tags TAGS cscope: FORCE
394+tags TAGS cscope gtags: FORCE
395 $(call cmd,tags)
396
397 # Scripts to check various things for consistency
398@@ -1513,17 +1566,21 @@ else
399 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
400 endif
401
402-%.s: %.c prepare scripts FORCE
403+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
404+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
405+%.s: %.c gcc-plugins prepare scripts FORCE
406 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
407 %.i: %.c prepare scripts FORCE
408 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
409-%.o: %.c prepare scripts FORCE
410+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
411+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
412+%.o: %.c gcc-plugins prepare scripts FORCE
413 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
414 %.lst: %.c prepare scripts FORCE
415 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
416-%.s: %.S prepare scripts FORCE
417+%.s: %.S gcc-plugins prepare scripts FORCE
418 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
419-%.o: %.S prepare scripts FORCE
420+%.o: %.S gcc-plugins prepare scripts FORCE
421 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
422 %.symtypes: %.c prepare scripts FORCE
423 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
424@@ -1533,11 +1590,15 @@ endif
425 $(cmd_crmodverdir)
426 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
427 $(build)=$(build-dir)
428-%/: prepare scripts FORCE
429+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
430+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
431+%/: gcc-plugins prepare scripts FORCE
432 $(cmd_crmodverdir)
433 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
434 $(build)=$(build-dir)
435-%.ko: prepare scripts FORCE
436+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
437+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
438+%.ko: gcc-plugins prepare scripts FORCE
439 $(cmd_crmodverdir)
440 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
441 $(build)=$(build-dir) $(@:.ko=.o)
442diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
443index 610dff4..f396854 100644
444--- a/arch/alpha/include/asm/atomic.h
445+++ b/arch/alpha/include/asm/atomic.h
446@@ -251,6 +251,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
447 #define atomic_dec(v) atomic_sub(1,(v))
448 #define atomic64_dec(v) atomic64_sub(1,(v))
449
450+#define atomic64_read_unchecked(v) atomic64_read(v)
451+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
452+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
453+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
454+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
455+#define atomic64_inc_unchecked(v) atomic64_inc(v)
456+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
457+#define atomic64_dec_unchecked(v) atomic64_dec(v)
458+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
459+
460 #define smp_mb__before_atomic_dec() smp_mb()
461 #define smp_mb__after_atomic_dec() smp_mb()
462 #define smp_mb__before_atomic_inc() smp_mb()
463diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
464index f199e69..af005f5 100644
465--- a/arch/alpha/include/asm/cache.h
466+++ b/arch/alpha/include/asm/cache.h
467@@ -4,19 +4,20 @@
468 #ifndef __ARCH_ALPHA_CACHE_H
469 #define __ARCH_ALPHA_CACHE_H
470
471+#include <linux/const.h>
472
473 /* Bytes per L1 (data) cache line. */
474 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
475-# define L1_CACHE_BYTES 64
476 # define L1_CACHE_SHIFT 6
477 #else
478 /* Both EV4 and EV5 are write-through, read-allocate,
479 direct-mapped, physical.
480 */
481-# define L1_CACHE_BYTES 32
482 # define L1_CACHE_SHIFT 5
483 #endif
484
485+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
486+
487 #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
488 #define SMP_CACHE_BYTES L1_CACHE_BYTES
489
490diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
491index 5c75c1b..c82f878 100644
492--- a/arch/alpha/include/asm/elf.h
493+++ b/arch/alpha/include/asm/elf.h
494@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
495
496 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
497
498+#ifdef CONFIG_PAX_ASLR
499+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
500+
501+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
502+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
503+#endif
504+
505 /* $0 is set by ld.so to a pointer to a function which might be
506 registered using atexit. This provides a mean for the dynamic
507 linker to call DT_FINI functions for shared libraries that have
508diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
509index 3f0c59f..cf1e100 100644
510--- a/arch/alpha/include/asm/pgtable.h
511+++ b/arch/alpha/include/asm/pgtable.h
512@@ -101,6 +101,17 @@ struct vm_area_struct;
513 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
514 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
515 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
516+
517+#ifdef CONFIG_PAX_PAGEEXEC
518+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
519+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
520+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
521+#else
522+# define PAGE_SHARED_NOEXEC PAGE_SHARED
523+# define PAGE_COPY_NOEXEC PAGE_COPY
524+# define PAGE_READONLY_NOEXEC PAGE_READONLY
525+#endif
526+
527 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
528
529 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
530diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
531index ebc3c89..20cfa63 100644
532--- a/arch/alpha/kernel/module.c
533+++ b/arch/alpha/kernel/module.c
534@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
535
536 /* The small sections were sorted to the end of the segment.
537 The following should definitely cover them. */
538- gp = (u64)me->module_core + me->core_size - 0x8000;
539+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
540 got = sechdrs[me->arch.gotsecindex].sh_addr;
541
542 for (i = 0; i < n; i++) {
543diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
544index a94e49c..d71dd44 100644
545--- a/arch/alpha/kernel/osf_sys.c
546+++ b/arch/alpha/kernel/osf_sys.c
547@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
548 /* At this point: (!vma || addr < vma->vm_end). */
549 if (limit - len < addr)
550 return -ENOMEM;
551- if (!vma || addr + len <= vma->vm_start)
552+ if (check_heap_stack_gap(vma, addr, len))
553 return addr;
554 addr = vma->vm_end;
555 vma = vma->vm_next;
556@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
557 merely specific addresses, but regions of memory -- perhaps
558 this feature should be incorporated into all ports? */
559
560+#ifdef CONFIG_PAX_RANDMMAP
561+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
562+#endif
563+
564 if (addr) {
565 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
566 if (addr != (unsigned long) -ENOMEM)
567@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
568 }
569
570 /* Next, try allocating at TASK_UNMAPPED_BASE. */
571- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
572- len, limit);
573+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
574+
575 if (addr != (unsigned long) -ENOMEM)
576 return addr;
577
578diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
579index 00a31de..2ded0f2 100644
580--- a/arch/alpha/mm/fault.c
581+++ b/arch/alpha/mm/fault.c
582@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
583 __reload_thread(pcb);
584 }
585
586+#ifdef CONFIG_PAX_PAGEEXEC
587+/*
588+ * PaX: decide what to do with offenders (regs->pc = fault address)
589+ *
590+ * returns 1 when task should be killed
591+ * 2 when patched PLT trampoline was detected
592+ * 3 when unpatched PLT trampoline was detected
593+ */
594+static int pax_handle_fetch_fault(struct pt_regs *regs)
595+{
596+
597+#ifdef CONFIG_PAX_EMUPLT
598+ int err;
599+
600+ do { /* PaX: patched PLT emulation #1 */
601+ unsigned int ldah, ldq, jmp;
602+
603+ err = get_user(ldah, (unsigned int *)regs->pc);
604+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
605+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
606+
607+ if (err)
608+ break;
609+
610+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
611+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
612+ jmp == 0x6BFB0000U)
613+ {
614+ unsigned long r27, addr;
615+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
616+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
617+
618+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
619+ err = get_user(r27, (unsigned long *)addr);
620+ if (err)
621+ break;
622+
623+ regs->r27 = r27;
624+ regs->pc = r27;
625+ return 2;
626+ }
627+ } while (0);
628+
629+ do { /* PaX: patched PLT emulation #2 */
630+ unsigned int ldah, lda, br;
631+
632+ err = get_user(ldah, (unsigned int *)regs->pc);
633+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
634+ err |= get_user(br, (unsigned int *)(regs->pc+8));
635+
636+ if (err)
637+ break;
638+
639+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
640+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
641+ (br & 0xFFE00000U) == 0xC3E00000U)
642+ {
643+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
644+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
645+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
646+
647+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
648+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
649+ return 2;
650+ }
651+ } while (0);
652+
653+ do { /* PaX: unpatched PLT emulation */
654+ unsigned int br;
655+
656+ err = get_user(br, (unsigned int *)regs->pc);
657+
658+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
659+ unsigned int br2, ldq, nop, jmp;
660+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
661+
662+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
663+ err = get_user(br2, (unsigned int *)addr);
664+ err |= get_user(ldq, (unsigned int *)(addr+4));
665+ err |= get_user(nop, (unsigned int *)(addr+8));
666+ err |= get_user(jmp, (unsigned int *)(addr+12));
667+ err |= get_user(resolver, (unsigned long *)(addr+16));
668+
669+ if (err)
670+ break;
671+
672+ if (br2 == 0xC3600000U &&
673+ ldq == 0xA77B000CU &&
674+ nop == 0x47FF041FU &&
675+ jmp == 0x6B7B0000U)
676+ {
677+ regs->r28 = regs->pc+4;
678+ regs->r27 = addr+16;
679+ regs->pc = resolver;
680+ return 3;
681+ }
682+ }
683+ } while (0);
684+#endif
685+
686+ return 1;
687+}
688+
689+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
690+{
691+ unsigned long i;
692+
693+ printk(KERN_ERR "PAX: bytes at PC: ");
694+ for (i = 0; i < 5; i++) {
695+ unsigned int c;
696+ if (get_user(c, (unsigned int *)pc+i))
697+ printk(KERN_CONT "???????? ");
698+ else
699+ printk(KERN_CONT "%08x ", c);
700+ }
701+ printk("\n");
702+}
703+#endif
704
705 /*
706 * This routine handles page faults. It determines the address,
707@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
708 good_area:
709 si_code = SEGV_ACCERR;
710 if (cause < 0) {
711- if (!(vma->vm_flags & VM_EXEC))
712+ if (!(vma->vm_flags & VM_EXEC)) {
713+
714+#ifdef CONFIG_PAX_PAGEEXEC
715+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
716+ goto bad_area;
717+
718+ up_read(&mm->mmap_sem);
719+ switch (pax_handle_fetch_fault(regs)) {
720+
721+#ifdef CONFIG_PAX_EMUPLT
722+ case 2:
723+ case 3:
724+ return;
725+#endif
726+
727+ }
728+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
729+ do_group_exit(SIGKILL);
730+#else
731 goto bad_area;
732+#endif
733+
734+ }
735 } else if (!cause) {
736 /* Allow reads even for write-only mappings */
737 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
738diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
739index b68faef..6dd1496 100644
740--- a/arch/arm/Kconfig
741+++ b/arch/arm/Kconfig
742@@ -14,6 +14,7 @@ config ARM
743 select SYS_SUPPORTS_APM_EMULATION
744 select HAVE_OPROFILE
745 select HAVE_ARCH_KGDB
746+ select GENERIC_ATOMIC64
747 select HAVE_KPROBES if (!XIP_KERNEL)
748 select HAVE_KRETPROBES if (HAVE_KPROBES)
749 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
750diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
751index d0daeab..ca7e10e 100644
752--- a/arch/arm/include/asm/atomic.h
753+++ b/arch/arm/include/asm/atomic.h
754@@ -15,6 +15,10 @@
755 #include <linux/types.h>
756 #include <asm/system.h>
757
758+#ifdef CONFIG_GENERIC_ATOMIC64
759+#include <asm-generic/atomic64.h>
760+#endif
761+
762 #define ATOMIC_INIT(i) { (i) }
763
764 #ifdef __KERNEL__
765@@ -24,8 +28,16 @@
766 * strex/ldrex monitor on some implementations. The reason we can use it for
767 * atomic_set() is the clrex or dummy strex done on every exception return.
768 */
769-#define atomic_read(v) ((v)->counter)
770+#define atomic_read(v) (*(volatile int *)&(v)->counter)
771+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
772+{
773+ return v->counter;
774+}
775 #define atomic_set(v,i) (((v)->counter) = (i))
776+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
777+{
778+ v->counter = i;
779+}
780
781 #if __LINUX_ARM_ARCH__ >= 6
782
783@@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
784 int result;
785
786 __asm__ __volatile__("@ atomic_add\n"
787+"1: ldrex %1, [%2]\n"
788+" add %0, %1, %3\n"
789+
790+#ifdef CONFIG_PAX_REFCOUNT
791+" bvc 3f\n"
792+"2: bkpt 0xf103\n"
793+"3:\n"
794+#endif
795+
796+" strex %1, %0, [%2]\n"
797+" teq %1, #0\n"
798+" bne 1b"
799+
800+#ifdef CONFIG_PAX_REFCOUNT
801+"\n4:\n"
802+ _ASM_EXTABLE(2b, 4b)
803+#endif
804+
805+ : "=&r" (result), "=&r" (tmp)
806+ : "r" (&v->counter), "Ir" (i)
807+ : "cc");
808+}
809+
810+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
811+{
812+ unsigned long tmp;
813+ int result;
814+
815+ __asm__ __volatile__("@ atomic_add_unchecked\n"
816 "1: ldrex %0, [%2]\n"
817 " add %0, %0, %3\n"
818 " strex %1, %0, [%2]\n"
819@@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
820 smp_mb();
821
822 __asm__ __volatile__("@ atomic_add_return\n"
823+"1: ldrex %1, [%2]\n"
824+" add %0, %1, %3\n"
825+
826+#ifdef CONFIG_PAX_REFCOUNT
827+" bvc 3f\n"
828+" mov %0, %1\n"
829+"2: bkpt 0xf103\n"
830+"3:\n"
831+#endif
832+
833+" strex %1, %0, [%2]\n"
834+" teq %1, #0\n"
835+" bne 1b"
836+
837+#ifdef CONFIG_PAX_REFCOUNT
838+"\n4:\n"
839+ _ASM_EXTABLE(2b, 4b)
840+#endif
841+
842+ : "=&r" (result), "=&r" (tmp)
843+ : "r" (&v->counter), "Ir" (i)
844+ : "cc");
845+
846+ smp_mb();
847+
848+ return result;
849+}
850+
851+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
852+{
853+ unsigned long tmp;
854+ int result;
855+
856+ smp_mb();
857+
858+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
859 "1: ldrex %0, [%2]\n"
860 " add %0, %0, %3\n"
861 " strex %1, %0, [%2]\n"
862@@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
863 int result;
864
865 __asm__ __volatile__("@ atomic_sub\n"
866+"1: ldrex %1, [%2]\n"
867+" sub %0, %1, %3\n"
868+
869+#ifdef CONFIG_PAX_REFCOUNT
870+" bvc 3f\n"
871+"2: bkpt 0xf103\n"
872+"3:\n"
873+#endif
874+
875+" strex %1, %0, [%2]\n"
876+" teq %1, #0\n"
877+" bne 1b"
878+
879+#ifdef CONFIG_PAX_REFCOUNT
880+"\n4:\n"
881+ _ASM_EXTABLE(2b, 4b)
882+#endif
883+
884+ : "=&r" (result), "=&r" (tmp)
885+ : "r" (&v->counter), "Ir" (i)
886+ : "cc");
887+}
888+
889+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
890+{
891+ unsigned long tmp;
892+ int result;
893+
894+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
895 "1: ldrex %0, [%2]\n"
896 " sub %0, %0, %3\n"
897 " strex %1, %0, [%2]\n"
898@@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
899 smp_mb();
900
901 __asm__ __volatile__("@ atomic_sub_return\n"
902-"1: ldrex %0, [%2]\n"
903-" sub %0, %0, %3\n"
904+"1: ldrex %1, [%2]\n"
905+" sub %0, %1, %3\n"
906+
907+#ifdef CONFIG_PAX_REFCOUNT
908+" bvc 3f\n"
909+" mov %0, %1\n"
910+"2: bkpt 0xf103\n"
911+"3:\n"
912+#endif
913+
914 " strex %1, %0, [%2]\n"
915 " teq %1, #0\n"
916 " bne 1b"
917+
918+#ifdef CONFIG_PAX_REFCOUNT
919+"\n4:\n"
920+ _ASM_EXTABLE(2b, 4b)
921+#endif
922+
923 : "=&r" (result), "=&r" (tmp)
924 : "r" (&v->counter), "Ir" (i)
925 : "cc");
926@@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
927 return oldval;
928 }
929
930+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
931+{
932+ unsigned long oldval, res;
933+
934+ smp_mb();
935+
936+ do {
937+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
938+ "ldrex %1, [%2]\n"
939+ "mov %0, #0\n"
940+ "teq %1, %3\n"
941+ "strexeq %0, %4, [%2]\n"
942+ : "=&r" (res), "=&r" (oldval)
943+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
944+ : "cc");
945+ } while (res);
946+
947+ smp_mb();
948+
949+ return oldval;
950+}
951+
952 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
953 {
954 unsigned long tmp, tmp2;
955@@ -207,6 +349,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
956 #endif /* __LINUX_ARM_ARCH__ */
957
958 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
959+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
960+{
961+ return xchg(&v->counter, new);
962+}
963
964 static inline int atomic_add_unless(atomic_t *v, int a, int u)
965 {
966@@ -220,11 +366,27 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
967 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
968
969 #define atomic_inc(v) atomic_add(1, v)
970+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
971+{
972+ atomic_add_unchecked(1, v);
973+}
974 #define atomic_dec(v) atomic_sub(1, v)
975+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
976+{
977+ atomic_sub_unchecked(1, v);
978+}
979
980 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
981+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
982+{
983+ return atomic_add_return_unchecked(1, v) == 0;
984+}
985 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
986 #define atomic_inc_return(v) (atomic_add_return(1, v))
987+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
988+{
989+ return atomic_add_return_unchecked(1, v);
990+}
991 #define atomic_dec_return(v) (atomic_sub_return(1, v))
992 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
993
994diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
995index 66c160b..bca1449 100644
996--- a/arch/arm/include/asm/cache.h
997+++ b/arch/arm/include/asm/cache.h
998@@ -4,8 +4,10 @@
999 #ifndef __ASMARM_CACHE_H
1000 #define __ASMARM_CACHE_H
1001
1002+#include <linux/const.h>
1003+
1004 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1005-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1006+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1007
1008 /*
1009 * Memory returned by kmalloc() may be used for DMA, so we must make
1010diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1011index 3d0cdd2..19957c5 100644
1012--- a/arch/arm/include/asm/cacheflush.h
1013+++ b/arch/arm/include/asm/cacheflush.h
1014@@ -216,13 +216,13 @@ struct cpu_cache_fns {
1015 void (*dma_inv_range)(const void *, const void *);
1016 void (*dma_clean_range)(const void *, const void *);
1017 void (*dma_flush_range)(const void *, const void *);
1018-};
1019+} __no_const;
1020
1021 struct outer_cache_fns {
1022 void (*inv_range)(unsigned long, unsigned long);
1023 void (*clean_range)(unsigned long, unsigned long);
1024 void (*flush_range)(unsigned long, unsigned long);
1025-};
1026+} __no_const;
1027
1028 /*
1029 * Select the calling method
1030diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1031index 6aac3f5..265536b 100644
1032--- a/arch/arm/include/asm/elf.h
1033+++ b/arch/arm/include/asm/elf.h
1034@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1035 the loader. We need to make sure that it is out of the way of the program
1036 that it will "exec", and that there is sufficient room for the brk. */
1037
1038-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1039+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1040+
1041+#ifdef CONFIG_PAX_ASLR
1042+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1043+
1044+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1045+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1046+#endif
1047
1048 /* When the program starts, a1 contains a pointer to a function to be
1049 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1050diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1051index c019949..388fdd1 100644
1052--- a/arch/arm/include/asm/kmap_types.h
1053+++ b/arch/arm/include/asm/kmap_types.h
1054@@ -19,6 +19,7 @@ enum km_type {
1055 KM_SOFTIRQ0,
1056 KM_SOFTIRQ1,
1057 KM_L2_CACHE,
1058+ KM_CLEARPAGE,
1059 KM_TYPE_NR
1060 };
1061
1062diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1063index 3a32af4..c8def8a 100644
1064--- a/arch/arm/include/asm/page.h
1065+++ b/arch/arm/include/asm/page.h
1066@@ -122,7 +122,7 @@ struct cpu_user_fns {
1067 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1068 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1069 unsigned long vaddr);
1070-};
1071+} __no_const;
1072
1073 #ifdef MULTI_USER
1074 extern struct cpu_user_fns cpu_user;
1075diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1076index d65b2f5..9d87555 100644
1077--- a/arch/arm/include/asm/system.h
1078+++ b/arch/arm/include/asm/system.h
1079@@ -86,6 +86,8 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
1080
1081 #define xchg(ptr,x) \
1082 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1083+#define xchg_unchecked(ptr,x) \
1084+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1085
1086 extern asmlinkage void __backtrace(void);
1087 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1088@@ -98,7 +100,7 @@ extern int cpu_architecture(void);
1089 extern void cpu_init(void);
1090
1091 void arm_machine_restart(char mode, const char *cmd);
1092-extern void (*arm_pm_restart)(char str, const char *cmd);
1093+extern void (*arm_pm_restart)(char str, const char *cmd) __noreturn;
1094
1095 #define UDBG_UNDEFINED (1 << 0)
1096 #define UDBG_SYSCALL (1 << 1)
1097@@ -505,6 +507,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1098
1099 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1100
1101+#define _ASM_EXTABLE(from, to) \
1102+" .pushsection __ex_table,\"a\"\n"\
1103+" .align 3\n" \
1104+" .long " #from ", " #to"\n" \
1105+" .popsection"
1106+
1107+
1108 #endif /* __ASSEMBLY__ */
1109
1110 #define arch_align_stack(x) (x)
1111diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1112index 1d6bd40..fba0cb9 100644
1113--- a/arch/arm/include/asm/uaccess.h
1114+++ b/arch/arm/include/asm/uaccess.h
1115@@ -22,6 +22,8 @@
1116 #define VERIFY_READ 0
1117 #define VERIFY_WRITE 1
1118
1119+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1120+
1121 /*
1122 * The exception table consists of pairs of addresses: the first is the
1123 * address of an instruction that is allowed to fault, and the second is
1124@@ -387,8 +389,23 @@ do { \
1125
1126
1127 #ifdef CONFIG_MMU
1128-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1129-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1130+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1131+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1132+
1133+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1134+{
1135+ if (!__builtin_constant_p(n))
1136+ check_object_size(to, n, false);
1137+ return ___copy_from_user(to, from, n);
1138+}
1139+
1140+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1141+{
1142+ if (!__builtin_constant_p(n))
1143+ check_object_size(from, n, true);
1144+ return ___copy_to_user(to, from, n);
1145+}
1146+
1147 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1148 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1149 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1150@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1151
1152 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1153 {
1154+ if ((long)n < 0)
1155+ return n;
1156+
1157 if (access_ok(VERIFY_READ, from, n))
1158 n = __copy_from_user(to, from, n);
1159 else /* security hole - plug it */
1160@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1161
1162 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1163 {
1164+ if ((long)n < 0)
1165+ return n;
1166+
1167 if (access_ok(VERIFY_WRITE, to, n))
1168 n = __copy_to_user(to, from, n);
1169 return n;
1170diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1171index 0e62770..e2c2cd6 100644
1172--- a/arch/arm/kernel/armksyms.c
1173+++ b/arch/arm/kernel/armksyms.c
1174@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1175 #ifdef CONFIG_MMU
1176 EXPORT_SYMBOL(copy_page);
1177
1178-EXPORT_SYMBOL(__copy_from_user);
1179-EXPORT_SYMBOL(__copy_to_user);
1180+EXPORT_SYMBOL(___copy_from_user);
1181+EXPORT_SYMBOL(___copy_to_user);
1182 EXPORT_SYMBOL(__clear_user);
1183
1184 EXPORT_SYMBOL(__get_user_1);
1185diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
1186index ba8ccfe..2dc34dc 100644
1187--- a/arch/arm/kernel/kgdb.c
1188+++ b/arch/arm/kernel/kgdb.c
1189@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
1190 * and we handle the normal undef case within the do_undefinstr
1191 * handler.
1192 */
1193-struct kgdb_arch arch_kgdb_ops = {
1194+const struct kgdb_arch arch_kgdb_ops = {
1195 #ifndef __ARMEB__
1196 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
1197 #else /* ! __ARMEB__ */
1198diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1199index 61f90d3..771ab27 100644
1200--- a/arch/arm/kernel/process.c
1201+++ b/arch/arm/kernel/process.c
1202@@ -83,7 +83,7 @@ static int __init hlt_setup(char *__unused)
1203 __setup("nohlt", nohlt_setup);
1204 __setup("hlt", hlt_setup);
1205
1206-void arm_machine_restart(char mode, const char *cmd)
1207+__noreturn void arm_machine_restart(char mode, const char *cmd)
1208 {
1209 /*
1210 * Clean and disable cache, and turn off interrupts
1211@@ -117,7 +117,7 @@ void arm_machine_restart(char mode, const char *cmd)
1212 void (*pm_power_off)(void);
1213 EXPORT_SYMBOL(pm_power_off);
1214
1215-void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
1216+void (*arm_pm_restart)(char str, const char *cmd) __noreturn = arm_machine_restart;
1217 EXPORT_SYMBOL_GPL(arm_pm_restart);
1218
1219
1220@@ -195,6 +195,7 @@ __setup("reboot=", reboot_setup);
1221
1222 void machine_halt(void)
1223 {
1224+ BUG();
1225 }
1226
1227
1228@@ -202,6 +203,7 @@ void machine_power_off(void)
1229 {
1230 if (pm_power_off)
1231 pm_power_off();
1232+ BUG();
1233 }
1234
1235 void machine_restart(char *cmd)
1236diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1237index c6c57b6..0c3b29e 100644
1238--- a/arch/arm/kernel/setup.c
1239+++ b/arch/arm/kernel/setup.c
1240@@ -92,16 +92,16 @@ EXPORT_SYMBOL(elf_hwcap);
1241 struct processor processor;
1242 #endif
1243 #ifdef MULTI_TLB
1244-struct cpu_tlb_fns cpu_tlb;
1245+struct cpu_tlb_fns cpu_tlb __read_only;
1246 #endif
1247 #ifdef MULTI_USER
1248-struct cpu_user_fns cpu_user;
1249+struct cpu_user_fns cpu_user __read_only;
1250 #endif
1251 #ifdef MULTI_CACHE
1252-struct cpu_cache_fns cpu_cache;
1253+struct cpu_cache_fns cpu_cache __read_only;
1254 #endif
1255 #ifdef CONFIG_OUTER_CACHE
1256-struct outer_cache_fns outer_cache;
1257+struct outer_cache_fns outer_cache __read_only;
1258 #endif
1259
1260 struct stack {
1261diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1262index 3f361a7..6e806e1 100644
1263--- a/arch/arm/kernel/traps.c
1264+++ b/arch/arm/kernel/traps.c
1265@@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
1266
1267 DEFINE_SPINLOCK(die_lock);
1268
1269+extern void gr_handle_kernel_exploit(void);
1270+
1271 /*
1272 * This function is protected against re-entrancy.
1273 */
1274@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
1275 if (panic_on_oops)
1276 panic("Fatal exception");
1277
1278+ gr_handle_kernel_exploit();
1279+
1280 do_exit(SIGSEGV);
1281 }
1282
1283diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
1284index aecf87df..bed731b 100644
1285--- a/arch/arm/kernel/vmlinux.lds.S
1286+++ b/arch/arm/kernel/vmlinux.lds.S
1287@@ -74,14 +74,18 @@ SECTIONS
1288 #ifndef CONFIG_XIP_KERNEL
1289 __init_begin = _stext;
1290 INIT_DATA
1291+ EXIT_TEXT
1292+ EXIT_DATA
1293 . = ALIGN(PAGE_SIZE);
1294 __init_end = .;
1295 #endif
1296 }
1297
1298 /DISCARD/ : { /* Exit code and data */
1299+#ifdef CONFIG_XIP_KERNEL
1300 EXIT_TEXT
1301 EXIT_DATA
1302+#endif
1303 *(.exitcall.exit)
1304 *(.discard)
1305 *(.ARM.exidx.exit.text)
1306diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1307index e4fe124..0fc246b 100644
1308--- a/arch/arm/lib/copy_from_user.S
1309+++ b/arch/arm/lib/copy_from_user.S
1310@@ -16,7 +16,7 @@
1311 /*
1312 * Prototype:
1313 *
1314- * size_t __copy_from_user(void *to, const void *from, size_t n)
1315+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
1316 *
1317 * Purpose:
1318 *
1319@@ -84,11 +84,11 @@
1320
1321 .text
1322
1323-ENTRY(__copy_from_user)
1324+ENTRY(___copy_from_user)
1325
1326 #include "copy_template.S"
1327
1328-ENDPROC(__copy_from_user)
1329+ENDPROC(___copy_from_user)
1330
1331 .section .fixup,"ax"
1332 .align 0
1333diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1334index 6ee2f67..d1cce76 100644
1335--- a/arch/arm/lib/copy_page.S
1336+++ b/arch/arm/lib/copy_page.S
1337@@ -10,6 +10,7 @@
1338 * ASM optimised string functions
1339 */
1340 #include <linux/linkage.h>
1341+#include <linux/const.h>
1342 #include <asm/assembler.h>
1343 #include <asm/asm-offsets.h>
1344 #include <asm/cache.h>
1345diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1346index 1a71e15..ac7b258 100644
1347--- a/arch/arm/lib/copy_to_user.S
1348+++ b/arch/arm/lib/copy_to_user.S
1349@@ -16,7 +16,7 @@
1350 /*
1351 * Prototype:
1352 *
1353- * size_t __copy_to_user(void *to, const void *from, size_t n)
1354+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
1355 *
1356 * Purpose:
1357 *
1358@@ -88,11 +88,11 @@
1359 .text
1360
1361 ENTRY(__copy_to_user_std)
1362-WEAK(__copy_to_user)
1363+WEAK(___copy_to_user)
1364
1365 #include "copy_template.S"
1366
1367-ENDPROC(__copy_to_user)
1368+ENDPROC(___copy_to_user)
1369
1370 .section .fixup,"ax"
1371 .align 0
1372diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1373index ffdd274..91017b6 100644
1374--- a/arch/arm/lib/uaccess.S
1375+++ b/arch/arm/lib/uaccess.S
1376@@ -19,7 +19,7 @@
1377
1378 #define PAGE_SHIFT 12
1379
1380-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1381+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1382 * Purpose : copy a block to user memory from kernel memory
1383 * Params : to - user memory
1384 * : from - kernel memory
1385@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
1386 sub r2, r2, ip
1387 b .Lc2u_dest_aligned
1388
1389-ENTRY(__copy_to_user)
1390+ENTRY(___copy_to_user)
1391 stmfd sp!, {r2, r4 - r7, lr}
1392 cmp r2, #4
1393 blt .Lc2u_not_enough
1394@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
1395 ldrgtb r3, [r1], #0
1396 USER( strgtbt r3, [r0], #1) @ May fault
1397 b .Lc2u_finished
1398-ENDPROC(__copy_to_user)
1399+ENDPROC(___copy_to_user)
1400
1401 .section .fixup,"ax"
1402 .align 0
1403 9001: ldmfd sp!, {r0, r4 - r7, pc}
1404 .previous
1405
1406-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1407+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1408 * Purpose : copy a block from user memory to kernel memory
1409 * Params : to - kernel memory
1410 * : from - user memory
1411@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
1412 sub r2, r2, ip
1413 b .Lcfu_dest_aligned
1414
1415-ENTRY(__copy_from_user)
1416+ENTRY(___copy_from_user)
1417 stmfd sp!, {r0, r2, r4 - r7, lr}
1418 cmp r2, #4
1419 blt .Lcfu_not_enough
1420@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
1421 USER( ldrgtbt r3, [r1], #1) @ May fault
1422 strgtb r3, [r0], #1
1423 b .Lcfu_finished
1424-ENDPROC(__copy_from_user)
1425+ENDPROC(___copy_from_user)
1426
1427 .section .fixup,"ax"
1428 .align 0
1429diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1430index 6b967ff..67d5b2b 100644
1431--- a/arch/arm/lib/uaccess_with_memcpy.c
1432+++ b/arch/arm/lib/uaccess_with_memcpy.c
1433@@ -97,7 +97,7 @@ out:
1434 }
1435
1436 unsigned long
1437-__copy_to_user(void __user *to, const void *from, unsigned long n)
1438+___copy_to_user(void __user *to, const void *from, unsigned long n)
1439 {
1440 /*
1441 * This test is stubbed out of the main function above to keep
1442diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
1443index 4028724..beec230 100644
1444--- a/arch/arm/mach-at91/pm.c
1445+++ b/arch/arm/mach-at91/pm.c
1446@@ -348,7 +348,7 @@ static void at91_pm_end(void)
1447 }
1448
1449
1450-static struct platform_suspend_ops at91_pm_ops ={
1451+static const struct platform_suspend_ops at91_pm_ops ={
1452 .valid = at91_pm_valid_state,
1453 .begin = at91_pm_begin,
1454 .enter = at91_pm_enter,
1455diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
1456index 5218943..0a34552 100644
1457--- a/arch/arm/mach-omap1/pm.c
1458+++ b/arch/arm/mach-omap1/pm.c
1459@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
1460
1461
1462
1463-static struct platform_suspend_ops omap_pm_ops ={
1464+static const struct platform_suspend_ops omap_pm_ops ={
1465 .prepare = omap_pm_prepare,
1466 .enter = omap_pm_enter,
1467 .finish = omap_pm_finish,
1468diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
1469index bff5c4e..d4c649b 100644
1470--- a/arch/arm/mach-omap2/pm24xx.c
1471+++ b/arch/arm/mach-omap2/pm24xx.c
1472@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
1473 enable_hlt();
1474 }
1475
1476-static struct platform_suspend_ops omap_pm_ops = {
1477+static const struct platform_suspend_ops omap_pm_ops = {
1478 .prepare = omap2_pm_prepare,
1479 .enter = omap2_pm_enter,
1480 .finish = omap2_pm_finish,
1481diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
1482index 8946319..7d3e661 100644
1483--- a/arch/arm/mach-omap2/pm34xx.c
1484+++ b/arch/arm/mach-omap2/pm34xx.c
1485@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
1486 return;
1487 }
1488
1489-static struct platform_suspend_ops omap_pm_ops = {
1490+static const struct platform_suspend_ops omap_pm_ops = {
1491 .begin = omap3_pm_begin,
1492 .end = omap3_pm_end,
1493 .prepare = omap3_pm_prepare,
1494diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
1495index b3d8d53..6e68ebc 100644
1496--- a/arch/arm/mach-pnx4008/pm.c
1497+++ b/arch/arm/mach-pnx4008/pm.c
1498@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
1499 (state == PM_SUSPEND_MEM);
1500 }
1501
1502-static struct platform_suspend_ops pnx4008_pm_ops = {
1503+static const struct platform_suspend_ops pnx4008_pm_ops = {
1504 .enter = pnx4008_pm_enter,
1505 .valid = pnx4008_pm_valid,
1506 };
1507diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
1508index 7693355..9beb00a 100644
1509--- a/arch/arm/mach-pxa/pm.c
1510+++ b/arch/arm/mach-pxa/pm.c
1511@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
1512 pxa_cpu_pm_fns->finish();
1513 }
1514
1515-static struct platform_suspend_ops pxa_pm_ops = {
1516+static const struct platform_suspend_ops pxa_pm_ops = {
1517 .valid = pxa_pm_valid,
1518 .enter = pxa_pm_enter,
1519 .prepare = pxa_pm_prepare,
1520diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
1521index 629e05d..06be589 100644
1522--- a/arch/arm/mach-pxa/sharpsl_pm.c
1523+++ b/arch/arm/mach-pxa/sharpsl_pm.c
1524@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1525 }
1526
1527 #ifdef CONFIG_PM
1528-static struct platform_suspend_ops sharpsl_pm_ops = {
1529+static const struct platform_suspend_ops sharpsl_pm_ops = {
1530 .prepare = pxa_pm_prepare,
1531 .finish = pxa_pm_finish,
1532 .enter = corgi_pxa_pm_enter,
1533diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1534index c83fdc8..ab9fc44 100644
1535--- a/arch/arm/mach-sa1100/pm.c
1536+++ b/arch/arm/mach-sa1100/pm.c
1537@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1538 return virt_to_phys(sp);
1539 }
1540
1541-static struct platform_suspend_ops sa11x0_pm_ops = {
1542+static const struct platform_suspend_ops sa11x0_pm_ops = {
1543 .enter = sa11x0_pm_enter,
1544 .valid = suspend_valid_only_mem,
1545 };
1546diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1547index 3191cd6..c322981 100644
1548--- a/arch/arm/mm/fault.c
1549+++ b/arch/arm/mm/fault.c
1550@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1551 }
1552 #endif
1553
1554+#ifdef CONFIG_PAX_PAGEEXEC
1555+ if (fsr & FSR_LNX_PF) {
1556+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1557+ do_group_exit(SIGKILL);
1558+ }
1559+#endif
1560+
1561 tsk->thread.address = addr;
1562 tsk->thread.error_code = fsr;
1563 tsk->thread.trap_no = 14;
1564@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1565 }
1566 #endif /* CONFIG_MMU */
1567
1568+#ifdef CONFIG_PAX_PAGEEXEC
1569+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1570+{
1571+ long i;
1572+
1573+ printk(KERN_ERR "PAX: bytes at PC: ");
1574+ for (i = 0; i < 20; i++) {
1575+ unsigned char c;
1576+ if (get_user(c, (__force unsigned char __user *)pc+i))
1577+ printk(KERN_CONT "?? ");
1578+ else
1579+ printk(KERN_CONT "%02x ", c);
1580+ }
1581+ printk("\n");
1582+
1583+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1584+ for (i = -1; i < 20; i++) {
1585+ unsigned long c;
1586+ if (get_user(c, (__force unsigned long __user *)sp+i))
1587+ printk(KERN_CONT "???????? ");
1588+ else
1589+ printk(KERN_CONT "%08lx ", c);
1590+ }
1591+ printk("\n");
1592+}
1593+#endif
1594+
1595 /*
1596 * First Level Translation Fault Handler
1597 *
1598@@ -569,6 +603,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1599 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1600 struct siginfo info;
1601
1602+#ifdef CONFIG_PAX_REFCOUNT
1603+ if (fsr_fs(ifsr) == 2) {
1604+ unsigned int bkpt;
1605+
1606+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1607+ current->thread.error_code = ifsr;
1608+ current->thread.trap_no = 0;
1609+ pax_report_refcount_overflow(regs);
1610+ fixup_exception(regs);
1611+ return;
1612+ }
1613+ }
1614+#endif
1615+
1616 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1617 return;
1618
1619diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1620index f5abc51..7ec524c 100644
1621--- a/arch/arm/mm/mmap.c
1622+++ b/arch/arm/mm/mmap.c
1623@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1624 if (len > TASK_SIZE)
1625 return -ENOMEM;
1626
1627+#ifdef CONFIG_PAX_RANDMMAP
1628+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1629+#endif
1630+
1631 if (addr) {
1632 if (do_align)
1633 addr = COLOUR_ALIGN(addr, pgoff);
1634@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1635 addr = PAGE_ALIGN(addr);
1636
1637 vma = find_vma(mm, addr);
1638- if (TASK_SIZE - len >= addr &&
1639- (!vma || addr + len <= vma->vm_start))
1640+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1641 return addr;
1642 }
1643 if (len > mm->cached_hole_size) {
1644- start_addr = addr = mm->free_area_cache;
1645+ start_addr = addr = mm->free_area_cache;
1646 } else {
1647- start_addr = addr = TASK_UNMAPPED_BASE;
1648- mm->cached_hole_size = 0;
1649+ start_addr = addr = mm->mmap_base;
1650+ mm->cached_hole_size = 0;
1651 }
1652
1653 full_search:
1654@@ -94,14 +97,14 @@ full_search:
1655 * Start a new search - just in case we missed
1656 * some holes.
1657 */
1658- if (start_addr != TASK_UNMAPPED_BASE) {
1659- start_addr = addr = TASK_UNMAPPED_BASE;
1660+ if (start_addr != mm->mmap_base) {
1661+ start_addr = addr = mm->mmap_base;
1662 mm->cached_hole_size = 0;
1663 goto full_search;
1664 }
1665 return -ENOMEM;
1666 }
1667- if (!vma || addr + len <= vma->vm_start) {
1668+ if (check_heap_stack_gap(vma, addr, len)) {
1669 /*
1670 * Remember the place where we stopped the search:
1671 */
1672diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1673index 8d97db2..b66cfa5 100644
1674--- a/arch/arm/plat-s3c/pm.c
1675+++ b/arch/arm/plat-s3c/pm.c
1676@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1677 s3c_pm_check_cleanup();
1678 }
1679
1680-static struct platform_suspend_ops s3c_pm_ops = {
1681+static const struct platform_suspend_ops s3c_pm_ops = {
1682 .enter = s3c_pm_enter,
1683 .prepare = s3c_pm_prepare,
1684 .finish = s3c_pm_finish,
1685diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1686index d3cf35a..0ba6053 100644
1687--- a/arch/avr32/include/asm/cache.h
1688+++ b/arch/avr32/include/asm/cache.h
1689@@ -1,8 +1,10 @@
1690 #ifndef __ASM_AVR32_CACHE_H
1691 #define __ASM_AVR32_CACHE_H
1692
1693+#include <linux/const.h>
1694+
1695 #define L1_CACHE_SHIFT 5
1696-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1697+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1698
1699 /*
1700 * Memory returned by kmalloc() may be used for DMA, so we must make
1701diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1702index d5d1d41..856e2ed 100644
1703--- a/arch/avr32/include/asm/elf.h
1704+++ b/arch/avr32/include/asm/elf.h
1705@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1706 the loader. We need to make sure that it is out of the way of the program
1707 that it will "exec", and that there is sufficient room for the brk. */
1708
1709-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1710+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1711
1712+#ifdef CONFIG_PAX_ASLR
1713+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1714+
1715+#define PAX_DELTA_MMAP_LEN 15
1716+#define PAX_DELTA_STACK_LEN 15
1717+#endif
1718
1719 /* This yields a mask that user programs can use to figure out what
1720 instruction set this CPU supports. This could be done in user space,
1721diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1722index b7f5c68..556135c 100644
1723--- a/arch/avr32/include/asm/kmap_types.h
1724+++ b/arch/avr32/include/asm/kmap_types.h
1725@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1726 D(11) KM_IRQ1,
1727 D(12) KM_SOFTIRQ0,
1728 D(13) KM_SOFTIRQ1,
1729-D(14) KM_TYPE_NR
1730+D(14) KM_CLEARPAGE,
1731+D(15) KM_TYPE_NR
1732 };
1733
1734 #undef D
1735diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1736index f021edf..32d680e 100644
1737--- a/arch/avr32/mach-at32ap/pm.c
1738+++ b/arch/avr32/mach-at32ap/pm.c
1739@@ -176,7 +176,7 @@ out:
1740 return 0;
1741 }
1742
1743-static struct platform_suspend_ops avr32_pm_ops = {
1744+static const struct platform_suspend_ops avr32_pm_ops = {
1745 .valid = avr32_pm_valid_state,
1746 .enter = avr32_pm_enter,
1747 };
1748diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1749index b61d86d..e292c7f 100644
1750--- a/arch/avr32/mm/fault.c
1751+++ b/arch/avr32/mm/fault.c
1752@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1753
1754 int exception_trace = 1;
1755
1756+#ifdef CONFIG_PAX_PAGEEXEC
1757+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1758+{
1759+ unsigned long i;
1760+
1761+ printk(KERN_ERR "PAX: bytes at PC: ");
1762+ for (i = 0; i < 20; i++) {
1763+ unsigned char c;
1764+ if (get_user(c, (unsigned char *)pc+i))
1765+ printk(KERN_CONT "???????? ");
1766+ else
1767+ printk(KERN_CONT "%02x ", c);
1768+ }
1769+ printk("\n");
1770+}
1771+#endif
1772+
1773 /*
1774 * This routine handles page faults. It determines the address and the
1775 * problem, and then passes it off to one of the appropriate routines.
1776@@ -157,6 +174,16 @@ bad_area:
1777 up_read(&mm->mmap_sem);
1778
1779 if (user_mode(regs)) {
1780+
1781+#ifdef CONFIG_PAX_PAGEEXEC
1782+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1783+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1784+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1785+ do_group_exit(SIGKILL);
1786+ }
1787+ }
1788+#endif
1789+
1790 if (exception_trace && printk_ratelimit())
1791 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1792 "sp %08lx ecr %lu\n",
1793diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
1794index 93f6c63..d144953 100644
1795--- a/arch/blackfin/include/asm/cache.h
1796+++ b/arch/blackfin/include/asm/cache.h
1797@@ -7,12 +7,14 @@
1798 #ifndef __ARCH_BLACKFIN_CACHE_H
1799 #define __ARCH_BLACKFIN_CACHE_H
1800
1801+#include <linux/const.h>
1802+
1803 /*
1804 * Bytes per L1 cache line
1805 * Blackfin loads 32 bytes for cache
1806 */
1807 #define L1_CACHE_SHIFT 5
1808-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1809+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1810 #define SMP_CACHE_BYTES L1_CACHE_BYTES
1811
1812 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1813diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1814index cce79d0..c406c85 100644
1815--- a/arch/blackfin/kernel/kgdb.c
1816+++ b/arch/blackfin/kernel/kgdb.c
1817@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1818 return -1; /* this means that we do not want to exit from the handler */
1819 }
1820
1821-struct kgdb_arch arch_kgdb_ops = {
1822+const struct kgdb_arch arch_kgdb_ops = {
1823 .gdb_bpt_instr = {0xa1},
1824 #ifdef CONFIG_SMP
1825 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1826diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1827index 8837be4..b2fb413 100644
1828--- a/arch/blackfin/mach-common/pm.c
1829+++ b/arch/blackfin/mach-common/pm.c
1830@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1831 return 0;
1832 }
1833
1834-struct platform_suspend_ops bfin_pm_ops = {
1835+const struct platform_suspend_ops bfin_pm_ops = {
1836 .enter = bfin_pm_enter,
1837 .valid = bfin_pm_valid,
1838 };
1839diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
1840index aea2718..3639a60 100644
1841--- a/arch/cris/include/arch-v10/arch/cache.h
1842+++ b/arch/cris/include/arch-v10/arch/cache.h
1843@@ -1,8 +1,9 @@
1844 #ifndef _ASM_ARCH_CACHE_H
1845 #define _ASM_ARCH_CACHE_H
1846
1847+#include <linux/const.h>
1848 /* Etrax 100LX have 32-byte cache-lines. */
1849-#define L1_CACHE_BYTES 32
1850 #define L1_CACHE_SHIFT 5
1851+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1852
1853 #endif /* _ASM_ARCH_CACHE_H */
1854diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
1855index dfc7305..417f5b3 100644
1856--- a/arch/cris/include/arch-v32/arch/cache.h
1857+++ b/arch/cris/include/arch-v32/arch/cache.h
1858@@ -1,11 +1,12 @@
1859 #ifndef _ASM_CRIS_ARCH_CACHE_H
1860 #define _ASM_CRIS_ARCH_CACHE_H
1861
1862+#include <linux/const.h>
1863 #include <arch/hwregs/dma.h>
1864
1865 /* A cache-line is 32 bytes. */
1866-#define L1_CACHE_BYTES 32
1867 #define L1_CACHE_SHIFT 5
1868+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1869
1870 void flush_dma_list(dma_descr_data *descr);
1871 void flush_dma_descr(dma_descr_data *descr, int flush_buf);
1872diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
1873index 00a57af..c3ef0cd 100644
1874--- a/arch/frv/include/asm/atomic.h
1875+++ b/arch/frv/include/asm/atomic.h
1876@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
1877 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
1878 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
1879
1880+#define atomic64_read_unchecked(v) atomic64_read(v)
1881+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
1882+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
1883+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
1884+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
1885+#define atomic64_inc_unchecked(v) atomic64_inc(v)
1886+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
1887+#define atomic64_dec_unchecked(v) atomic64_dec(v)
1888+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
1889+
1890 static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
1891 {
1892 int c, old;
1893diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
1894index 7dc0f0f..1e6a620 100644
1895--- a/arch/frv/include/asm/cache.h
1896+++ b/arch/frv/include/asm/cache.h
1897@@ -12,10 +12,11 @@
1898 #ifndef __ASM_CACHE_H
1899 #define __ASM_CACHE_H
1900
1901+#include <linux/const.h>
1902
1903 /* bytes per L1 cache line */
1904 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
1905-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1906+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1907
1908 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
1909
1910diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1911index f8e16b2..c73ff79 100644
1912--- a/arch/frv/include/asm/kmap_types.h
1913+++ b/arch/frv/include/asm/kmap_types.h
1914@@ -23,6 +23,7 @@ enum km_type {
1915 KM_IRQ1,
1916 KM_SOFTIRQ0,
1917 KM_SOFTIRQ1,
1918+ KM_CLEARPAGE,
1919 KM_TYPE_NR
1920 };
1921
1922diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1923index 385fd30..6c3d97e 100644
1924--- a/arch/frv/mm/elf-fdpic.c
1925+++ b/arch/frv/mm/elf-fdpic.c
1926@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1927 if (addr) {
1928 addr = PAGE_ALIGN(addr);
1929 vma = find_vma(current->mm, addr);
1930- if (TASK_SIZE - len >= addr &&
1931- (!vma || addr + len <= vma->vm_start))
1932+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1933 goto success;
1934 }
1935
1936@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1937 for (; vma; vma = vma->vm_next) {
1938 if (addr > limit)
1939 break;
1940- if (addr + len <= vma->vm_start)
1941+ if (check_heap_stack_gap(vma, addr, len))
1942 goto success;
1943 addr = vma->vm_end;
1944 }
1945@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1946 for (; vma; vma = vma->vm_next) {
1947 if (addr > limit)
1948 break;
1949- if (addr + len <= vma->vm_start)
1950+ if (check_heap_stack_gap(vma, addr, len))
1951 goto success;
1952 addr = vma->vm_end;
1953 }
1954diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
1955index c635028..6d9445a 100644
1956--- a/arch/h8300/include/asm/cache.h
1957+++ b/arch/h8300/include/asm/cache.h
1958@@ -1,8 +1,10 @@
1959 #ifndef __ARCH_H8300_CACHE_H
1960 #define __ARCH_H8300_CACHE_H
1961
1962+#include <linux/const.h>
1963+
1964 /* bytes per L1 cache line */
1965-#define L1_CACHE_BYTES 4
1966+#define L1_CACHE_BYTES _AC(4,UL)
1967
1968 /* m68k-elf-gcc 2.95.2 doesn't like these */
1969
1970diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1971index e4a80d8..11a7ea1 100644
1972--- a/arch/ia64/hp/common/hwsw_iommu.c
1973+++ b/arch/ia64/hp/common/hwsw_iommu.c
1974@@ -17,7 +17,7 @@
1975 #include <linux/swiotlb.h>
1976 #include <asm/machvec.h>
1977
1978-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1979+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1980
1981 /* swiotlb declarations & definitions: */
1982 extern int swiotlb_late_init_with_default_size (size_t size);
1983@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1984 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1985 }
1986
1987-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1988+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1989 {
1990 if (use_swiotlb(dev))
1991 return &swiotlb_dma_ops;
1992diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1993index 01ae69b..35752fd 100644
1994--- a/arch/ia64/hp/common/sba_iommu.c
1995+++ b/arch/ia64/hp/common/sba_iommu.c
1996@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1997 },
1998 };
1999
2000-extern struct dma_map_ops swiotlb_dma_ops;
2001+extern const struct dma_map_ops swiotlb_dma_ops;
2002
2003 static int __init
2004 sba_init(void)
2005@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
2006
2007 __setup("sbapagesize=",sba_page_override);
2008
2009-struct dma_map_ops sba_dma_ops = {
2010+const struct dma_map_ops sba_dma_ops = {
2011 .alloc_coherent = sba_alloc_coherent,
2012 .free_coherent = sba_free_coherent,
2013 .map_page = sba_map_page,
2014diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
2015index c69552b..c7122f4 100644
2016--- a/arch/ia64/ia32/binfmt_elf32.c
2017+++ b/arch/ia64/ia32/binfmt_elf32.c
2018@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
2019
2020 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
2021
2022+#ifdef CONFIG_PAX_ASLR
2023+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2024+
2025+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2026+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2027+#endif
2028+
2029 /* Ugly but avoids duplication */
2030 #include "../../../fs/binfmt_elf.c"
2031
2032diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
2033index 0f15349..26b3429 100644
2034--- a/arch/ia64/ia32/ia32priv.h
2035+++ b/arch/ia64/ia32/ia32priv.h
2036@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
2037 #define ELF_DATA ELFDATA2LSB
2038 #define ELF_ARCH EM_386
2039
2040-#define IA32_STACK_TOP IA32_PAGE_OFFSET
2041+#ifdef CONFIG_PAX_RANDUSTACK
2042+#define __IA32_DELTA_STACK (current->mm->delta_stack)
2043+#else
2044+#define __IA32_DELTA_STACK 0UL
2045+#endif
2046+
2047+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
2048+
2049 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
2050 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
2051
2052diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2053index 88405cb..de5ca5d 100644
2054--- a/arch/ia64/include/asm/atomic.h
2055+++ b/arch/ia64/include/asm/atomic.h
2056@@ -210,6 +210,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2057 #define atomic64_inc(v) atomic64_add(1, (v))
2058 #define atomic64_dec(v) atomic64_sub(1, (v))
2059
2060+#define atomic64_read_unchecked(v) atomic64_read(v)
2061+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2062+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2063+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2064+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2065+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2066+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2067+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2068+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2069+
2070 /* Atomic operations are already serializing */
2071 #define smp_mb__before_atomic_dec() barrier()
2072 #define smp_mb__after_atomic_dec() barrier()
2073diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2074index e7482bd..d1c9b8e 100644
2075--- a/arch/ia64/include/asm/cache.h
2076+++ b/arch/ia64/include/asm/cache.h
2077@@ -1,6 +1,7 @@
2078 #ifndef _ASM_IA64_CACHE_H
2079 #define _ASM_IA64_CACHE_H
2080
2081+#include <linux/const.h>
2082
2083 /*
2084 * Copyright (C) 1998-2000 Hewlett-Packard Co
2085@@ -9,7 +10,7 @@
2086
2087 /* Bytes per L1 (data) cache line. */
2088 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2089-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2090+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2091
2092 #ifdef CONFIG_SMP
2093 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2094diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
2095index 8d3c79c..71b3af6 100644
2096--- a/arch/ia64/include/asm/dma-mapping.h
2097+++ b/arch/ia64/include/asm/dma-mapping.h
2098@@ -12,7 +12,7 @@
2099
2100 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
2101
2102-extern struct dma_map_ops *dma_ops;
2103+extern const struct dma_map_ops *dma_ops;
2104 extern struct ia64_machine_vector ia64_mv;
2105 extern void set_iommu_machvec(void);
2106
2107@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
2108 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2109 dma_addr_t *daddr, gfp_t gfp)
2110 {
2111- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2112+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2113 void *caddr;
2114
2115 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
2116@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2117 static inline void dma_free_coherent(struct device *dev, size_t size,
2118 void *caddr, dma_addr_t daddr)
2119 {
2120- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2121+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2122 debug_dma_free_coherent(dev, size, caddr, daddr);
2123 ops->free_coherent(dev, size, caddr, daddr);
2124 }
2125@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2126
2127 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
2128 {
2129- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2130+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2131 return ops->mapping_error(dev, daddr);
2132 }
2133
2134 static inline int dma_supported(struct device *dev, u64 mask)
2135 {
2136- struct dma_map_ops *ops = platform_dma_get_ops(dev);
2137+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
2138 return ops->dma_supported(dev, mask);
2139 }
2140
2141diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2142index 86eddee..b116bb4 100644
2143--- a/arch/ia64/include/asm/elf.h
2144+++ b/arch/ia64/include/asm/elf.h
2145@@ -43,6 +43,13 @@
2146 */
2147 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2148
2149+#ifdef CONFIG_PAX_ASLR
2150+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2151+
2152+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2153+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2154+#endif
2155+
2156 #define PT_IA_64_UNWIND 0x70000001
2157
2158 /* IA-64 relocations: */
2159diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
2160index 367d299..9ad4279 100644
2161--- a/arch/ia64/include/asm/machvec.h
2162+++ b/arch/ia64/include/asm/machvec.h
2163@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
2164 /* DMA-mapping interface: */
2165 typedef void ia64_mv_dma_init (void);
2166 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
2167-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
2168+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
2169
2170 /*
2171 * WARNING: The legacy I/O space is _architected_. Platforms are
2172@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
2173 # endif /* CONFIG_IA64_GENERIC */
2174
2175 extern void swiotlb_dma_init(void);
2176-extern struct dma_map_ops *dma_get_ops(struct device *);
2177+extern const struct dma_map_ops *dma_get_ops(struct device *);
2178
2179 /*
2180 * Define default versions so we can extend machvec for new platforms without having
2181diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2182index 8840a69..cdb63d9 100644
2183--- a/arch/ia64/include/asm/pgtable.h
2184+++ b/arch/ia64/include/asm/pgtable.h
2185@@ -12,7 +12,7 @@
2186 * David Mosberger-Tang <davidm@hpl.hp.com>
2187 */
2188
2189-
2190+#include <linux/const.h>
2191 #include <asm/mman.h>
2192 #include <asm/page.h>
2193 #include <asm/processor.h>
2194@@ -143,6 +143,17 @@
2195 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2196 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2197 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2198+
2199+#ifdef CONFIG_PAX_PAGEEXEC
2200+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2201+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2202+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2203+#else
2204+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2205+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2206+# define PAGE_COPY_NOEXEC PAGE_COPY
2207+#endif
2208+
2209 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2210 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2211 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2212diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2213index 239ecdc..f94170e 100644
2214--- a/arch/ia64/include/asm/spinlock.h
2215+++ b/arch/ia64/include/asm/spinlock.h
2216@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
2217 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2218
2219 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2220- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2221+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2222 }
2223
2224 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
2225diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2226index 449c8c0..432a3d2 100644
2227--- a/arch/ia64/include/asm/uaccess.h
2228+++ b/arch/ia64/include/asm/uaccess.h
2229@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2230 const void *__cu_from = (from); \
2231 long __cu_len = (n); \
2232 \
2233- if (__access_ok(__cu_to, __cu_len, get_fs())) \
2234+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2235 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2236 __cu_len; \
2237 })
2238@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2239 long __cu_len = (n); \
2240 \
2241 __chk_user_ptr(__cu_from); \
2242- if (__access_ok(__cu_from, __cu_len, get_fs())) \
2243+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2244 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2245 __cu_len; \
2246 })
2247diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
2248index f2c1600..969398a 100644
2249--- a/arch/ia64/kernel/dma-mapping.c
2250+++ b/arch/ia64/kernel/dma-mapping.c
2251@@ -3,7 +3,7 @@
2252 /* Set this to 1 if there is a HW IOMMU in the system */
2253 int iommu_detected __read_mostly;
2254
2255-struct dma_map_ops *dma_ops;
2256+const struct dma_map_ops *dma_ops;
2257 EXPORT_SYMBOL(dma_ops);
2258
2259 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
2260@@ -16,7 +16,7 @@ static int __init dma_init(void)
2261 }
2262 fs_initcall(dma_init);
2263
2264-struct dma_map_ops *dma_get_ops(struct device *dev)
2265+const struct dma_map_ops *dma_get_ops(struct device *dev)
2266 {
2267 return dma_ops;
2268 }
2269diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2270index 1481b0a..e7d38ff 100644
2271--- a/arch/ia64/kernel/module.c
2272+++ b/arch/ia64/kernel/module.c
2273@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
2274 void
2275 module_free (struct module *mod, void *module_region)
2276 {
2277- if (mod && mod->arch.init_unw_table &&
2278- module_region == mod->module_init) {
2279+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2280 unw_remove_unwind_table(mod->arch.init_unw_table);
2281 mod->arch.init_unw_table = NULL;
2282 }
2283@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2284 }
2285
2286 static inline int
2287+in_init_rx (const struct module *mod, uint64_t addr)
2288+{
2289+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2290+}
2291+
2292+static inline int
2293+in_init_rw (const struct module *mod, uint64_t addr)
2294+{
2295+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2296+}
2297+
2298+static inline int
2299 in_init (const struct module *mod, uint64_t addr)
2300 {
2301- return addr - (uint64_t) mod->module_init < mod->init_size;
2302+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2303+}
2304+
2305+static inline int
2306+in_core_rx (const struct module *mod, uint64_t addr)
2307+{
2308+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2309+}
2310+
2311+static inline int
2312+in_core_rw (const struct module *mod, uint64_t addr)
2313+{
2314+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2315 }
2316
2317 static inline int
2318 in_core (const struct module *mod, uint64_t addr)
2319 {
2320- return addr - (uint64_t) mod->module_core < mod->core_size;
2321+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2322 }
2323
2324 static inline int
2325@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2326 break;
2327
2328 case RV_BDREL:
2329- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2330+ if (in_init_rx(mod, val))
2331+ val -= (uint64_t) mod->module_init_rx;
2332+ else if (in_init_rw(mod, val))
2333+ val -= (uint64_t) mod->module_init_rw;
2334+ else if (in_core_rx(mod, val))
2335+ val -= (uint64_t) mod->module_core_rx;
2336+ else if (in_core_rw(mod, val))
2337+ val -= (uint64_t) mod->module_core_rw;
2338 break;
2339
2340 case RV_LTV:
2341@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2342 * addresses have been selected...
2343 */
2344 uint64_t gp;
2345- if (mod->core_size > MAX_LTOFF)
2346+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2347 /*
2348 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2349 * at the end of the module.
2350 */
2351- gp = mod->core_size - MAX_LTOFF / 2;
2352+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2353 else
2354- gp = mod->core_size / 2;
2355- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2356+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2357+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2358 mod->arch.gp = gp;
2359 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2360 }
2361diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
2362index f6b1ff0..de773fb 100644
2363--- a/arch/ia64/kernel/pci-dma.c
2364+++ b/arch/ia64/kernel/pci-dma.c
2365@@ -43,7 +43,7 @@ struct device fallback_dev = {
2366 .dma_mask = &fallback_dev.coherent_dma_mask,
2367 };
2368
2369-extern struct dma_map_ops intel_dma_ops;
2370+extern const struct dma_map_ops intel_dma_ops;
2371
2372 static int __init pci_iommu_init(void)
2373 {
2374@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
2375 }
2376 EXPORT_SYMBOL(iommu_dma_supported);
2377
2378+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
2379+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
2380+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
2381+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
2382+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
2383+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
2384+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
2385+
2386+static const struct dma_map_ops intel_iommu_dma_ops = {
2387+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
2388+ .alloc_coherent = intel_alloc_coherent,
2389+ .free_coherent = intel_free_coherent,
2390+ .map_sg = intel_map_sg,
2391+ .unmap_sg = intel_unmap_sg,
2392+ .map_page = intel_map_page,
2393+ .unmap_page = intel_unmap_page,
2394+ .mapping_error = intel_mapping_error,
2395+
2396+ .sync_single_for_cpu = machvec_dma_sync_single,
2397+ .sync_sg_for_cpu = machvec_dma_sync_sg,
2398+ .sync_single_for_device = machvec_dma_sync_single,
2399+ .sync_sg_for_device = machvec_dma_sync_sg,
2400+ .dma_supported = iommu_dma_supported,
2401+};
2402+
2403 void __init pci_iommu_alloc(void)
2404 {
2405- dma_ops = &intel_dma_ops;
2406-
2407- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
2408- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
2409- dma_ops->sync_single_for_device = machvec_dma_sync_single;
2410- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
2411- dma_ops->dma_supported = iommu_dma_supported;
2412+ dma_ops = &intel_iommu_dma_ops;
2413
2414 /*
2415 * The order of these functions is important for
2416diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
2417index 285aae8..61dbab6 100644
2418--- a/arch/ia64/kernel/pci-swiotlb.c
2419+++ b/arch/ia64/kernel/pci-swiotlb.c
2420@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
2421 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
2422 }
2423
2424-struct dma_map_ops swiotlb_dma_ops = {
2425+const struct dma_map_ops swiotlb_dma_ops = {
2426 .alloc_coherent = ia64_swiotlb_alloc_coherent,
2427 .free_coherent = swiotlb_free_coherent,
2428 .map_page = swiotlb_map_page,
2429diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2430index 609d500..7dde2a8 100644
2431--- a/arch/ia64/kernel/sys_ia64.c
2432+++ b/arch/ia64/kernel/sys_ia64.c
2433@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2434 if (REGION_NUMBER(addr) == RGN_HPAGE)
2435 addr = 0;
2436 #endif
2437+
2438+#ifdef CONFIG_PAX_RANDMMAP
2439+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2440+ addr = mm->free_area_cache;
2441+ else
2442+#endif
2443+
2444 if (!addr)
2445 addr = mm->free_area_cache;
2446
2447@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2448 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2449 /* At this point: (!vma || addr < vma->vm_end). */
2450 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2451- if (start_addr != TASK_UNMAPPED_BASE) {
2452+ if (start_addr != mm->mmap_base) {
2453 /* Start a new search --- just in case we missed some holes. */
2454- addr = TASK_UNMAPPED_BASE;
2455+ addr = mm->mmap_base;
2456 goto full_search;
2457 }
2458 return -ENOMEM;
2459 }
2460- if (!vma || addr + len <= vma->vm_start) {
2461+ if (check_heap_stack_gap(vma, addr, len)) {
2462 /* Remember the address where we stopped this search: */
2463 mm->free_area_cache = addr + len;
2464 return addr;
2465diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
2466index 8f06035..b3a5818 100644
2467--- a/arch/ia64/kernel/topology.c
2468+++ b/arch/ia64/kernel/topology.c
2469@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
2470 return ret;
2471 }
2472
2473-static struct sysfs_ops cache_sysfs_ops = {
2474+static const struct sysfs_ops cache_sysfs_ops = {
2475 .show = cache_show
2476 };
2477
2478diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2479index 0a0c77b..8e55a81 100644
2480--- a/arch/ia64/kernel/vmlinux.lds.S
2481+++ b/arch/ia64/kernel/vmlinux.lds.S
2482@@ -190,7 +190,7 @@ SECTIONS
2483 /* Per-cpu data: */
2484 . = ALIGN(PERCPU_PAGE_SIZE);
2485 PERCPU_VADDR(PERCPU_ADDR, :percpu)
2486- __phys_per_cpu_start = __per_cpu_load;
2487+ __phys_per_cpu_start = per_cpu_load;
2488 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
2489 * into percpu page size
2490 */
2491diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2492index 19261a9..1611b7a 100644
2493--- a/arch/ia64/mm/fault.c
2494+++ b/arch/ia64/mm/fault.c
2495@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2496 return pte_present(pte);
2497 }
2498
2499+#ifdef CONFIG_PAX_PAGEEXEC
2500+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2501+{
2502+ unsigned long i;
2503+
2504+ printk(KERN_ERR "PAX: bytes at PC: ");
2505+ for (i = 0; i < 8; i++) {
2506+ unsigned int c;
2507+ if (get_user(c, (unsigned int *)pc+i))
2508+ printk(KERN_CONT "???????? ");
2509+ else
2510+ printk(KERN_CONT "%08x ", c);
2511+ }
2512+ printk("\n");
2513+}
2514+#endif
2515+
2516 void __kprobes
2517 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2518 {
2519@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2520 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2521 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2522
2523- if ((vma->vm_flags & mask) != mask)
2524+ if ((vma->vm_flags & mask) != mask) {
2525+
2526+#ifdef CONFIG_PAX_PAGEEXEC
2527+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2528+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2529+ goto bad_area;
2530+
2531+ up_read(&mm->mmap_sem);
2532+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2533+ do_group_exit(SIGKILL);
2534+ }
2535+#endif
2536+
2537 goto bad_area;
2538
2539+ }
2540+
2541 survive:
2542 /*
2543 * If for any reason at all we couldn't handle the fault, make
2544diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2545index b0f6157..a082bbc 100644
2546--- a/arch/ia64/mm/hugetlbpage.c
2547+++ b/arch/ia64/mm/hugetlbpage.c
2548@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2549 /* At this point: (!vmm || addr < vmm->vm_end). */
2550 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2551 return -ENOMEM;
2552- if (!vmm || (addr + len) <= vmm->vm_start)
2553+ if (check_heap_stack_gap(vmm, addr, len))
2554 return addr;
2555 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2556 }
2557diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2558index 1857766..05cc6a3 100644
2559--- a/arch/ia64/mm/init.c
2560+++ b/arch/ia64/mm/init.c
2561@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
2562 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2563 vma->vm_end = vma->vm_start + PAGE_SIZE;
2564 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2565+
2566+#ifdef CONFIG_PAX_PAGEEXEC
2567+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2568+ vma->vm_flags &= ~VM_EXEC;
2569+
2570+#ifdef CONFIG_PAX_MPROTECT
2571+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
2572+ vma->vm_flags &= ~VM_MAYEXEC;
2573+#endif
2574+
2575+ }
2576+#endif
2577+
2578 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2579 down_write(&current->mm->mmap_sem);
2580 if (insert_vm_struct(current->mm, vma)) {
2581diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
2582index 98b6849..8046766 100644
2583--- a/arch/ia64/sn/pci/pci_dma.c
2584+++ b/arch/ia64/sn/pci/pci_dma.c
2585@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
2586 return ret;
2587 }
2588
2589-static struct dma_map_ops sn_dma_ops = {
2590+static const struct dma_map_ops sn_dma_ops = {
2591 .alloc_coherent = sn_dma_alloc_coherent,
2592 .free_coherent = sn_dma_free_coherent,
2593 .map_page = sn_dma_map_page,
2594diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2595index 40b3ee9..8c2c112 100644
2596--- a/arch/m32r/include/asm/cache.h
2597+++ b/arch/m32r/include/asm/cache.h
2598@@ -1,8 +1,10 @@
2599 #ifndef _ASM_M32R_CACHE_H
2600 #define _ASM_M32R_CACHE_H
2601
2602+#include <linux/const.h>
2603+
2604 /* L1 cache line size */
2605 #define L1_CACHE_SHIFT 4
2606-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2607+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2608
2609 #endif /* _ASM_M32R_CACHE_H */
2610diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2611index 82abd15..d95ae5d 100644
2612--- a/arch/m32r/lib/usercopy.c
2613+++ b/arch/m32r/lib/usercopy.c
2614@@ -14,6 +14,9 @@
2615 unsigned long
2616 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2617 {
2618+ if ((long)n < 0)
2619+ return n;
2620+
2621 prefetch(from);
2622 if (access_ok(VERIFY_WRITE, to, n))
2623 __copy_user(to,from,n);
2624@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2625 unsigned long
2626 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2627 {
2628+ if ((long)n < 0)
2629+ return n;
2630+
2631 prefetchw(to);
2632 if (access_ok(VERIFY_READ, from, n))
2633 __copy_user_zeroing(to,from,n);
2634diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2635index ecafbe1..432c3e4 100644
2636--- a/arch/m68k/include/asm/cache.h
2637+++ b/arch/m68k/include/asm/cache.h
2638@@ -4,9 +4,11 @@
2639 #ifndef __ARCH_M68K_CACHE_H
2640 #define __ARCH_M68K_CACHE_H
2641
2642+#include <linux/const.h>
2643+
2644 /* bytes per L1 cache line */
2645 #define L1_CACHE_SHIFT 4
2646-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2647+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2648
2649 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
2650
2651diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2652index c209c47..2ba96e2 100644
2653--- a/arch/microblaze/include/asm/cache.h
2654+++ b/arch/microblaze/include/asm/cache.h
2655@@ -13,11 +13,12 @@
2656 #ifndef _ASM_MICROBLAZE_CACHE_H
2657 #define _ASM_MICROBLAZE_CACHE_H
2658
2659+#include <linux/const.h>
2660 #include <asm/registers.h>
2661
2662 #define L1_CACHE_SHIFT 2
2663 /* word-granular cache in microblaze */
2664-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2665+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2666
2667 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2668
2669diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
2670index fd7620f..63d73a6 100644
2671--- a/arch/mips/Kconfig
2672+++ b/arch/mips/Kconfig
2673@@ -5,6 +5,7 @@ config MIPS
2674 select HAVE_IDE
2675 select HAVE_OPROFILE
2676 select HAVE_ARCH_KGDB
2677+ select GENERIC_ATOMIC64 if !64BIT
2678 # Horrible source of confusion. Die, die, die ...
2679 select EMBEDDED
2680 select RTC_LIB if !LEMOTE_FULOONG2E
2681diff --git a/arch/mips/Makefile b/arch/mips/Makefile
2682index 77f5021..2b1db8a 100644
2683--- a/arch/mips/Makefile
2684+++ b/arch/mips/Makefile
2685@@ -51,6 +51,8 @@ endif
2686 cflags-y := -ffunction-sections
2687 cflags-y += $(call cc-option, -mno-check-zero-division)
2688
2689+cflags-y += -Wno-sign-compare -Wno-extra
2690+
2691 ifdef CONFIG_32BIT
2692 ld-emul = $(32bit-emul)
2693 vmlinux-32 = vmlinux
2694diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
2695index 632f986..fd0378d 100644
2696--- a/arch/mips/alchemy/devboards/pm.c
2697+++ b/arch/mips/alchemy/devboards/pm.c
2698@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
2699
2700 }
2701
2702-static struct platform_suspend_ops db1x_pm_ops = {
2703+static const struct platform_suspend_ops db1x_pm_ops = {
2704 .valid = suspend_valid_only_mem,
2705 .begin = db1x_pm_begin,
2706 .enter = db1x_pm_enter,
2707diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2708index 09e7128..111035b 100644
2709--- a/arch/mips/include/asm/atomic.h
2710+++ b/arch/mips/include/asm/atomic.h
2711@@ -21,6 +21,10 @@
2712 #include <asm/war.h>
2713 #include <asm/system.h>
2714
2715+#ifdef CONFIG_GENERIC_ATOMIC64
2716+#include <asm-generic/atomic64.h>
2717+#endif
2718+
2719 #define ATOMIC_INIT(i) { (i) }
2720
2721 /*
2722@@ -782,6 +786,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2723 */
2724 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2725
2726+#define atomic64_read_unchecked(v) atomic64_read(v)
2727+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2728+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2729+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2730+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2731+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2732+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2733+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2734+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2735+
2736 #endif /* CONFIG_64BIT */
2737
2738 /*
2739diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2740index 37f175c..c7a3065 100644
2741--- a/arch/mips/include/asm/cache.h
2742+++ b/arch/mips/include/asm/cache.h
2743@@ -9,10 +9,11 @@
2744 #ifndef _ASM_CACHE_H
2745 #define _ASM_CACHE_H
2746
2747+#include <linux/const.h>
2748 #include <kmalloc.h>
2749
2750 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2751-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2752+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2753
2754 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2755 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2756diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2757index 7990694..4e93acf 100644
2758--- a/arch/mips/include/asm/elf.h
2759+++ b/arch/mips/include/asm/elf.h
2760@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
2761 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2762 #endif
2763
2764+#ifdef CONFIG_PAX_ASLR
2765+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2766+
2767+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2768+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2769+#endif
2770+
2771 #endif /* _ASM_ELF_H */
2772diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2773index f266295..627cfff 100644
2774--- a/arch/mips/include/asm/page.h
2775+++ b/arch/mips/include/asm/page.h
2776@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2777 #ifdef CONFIG_CPU_MIPS32
2778 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2779 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2780- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2781+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2782 #else
2783 typedef struct { unsigned long long pte; } pte_t;
2784 #define pte_val(x) ((x).pte)
2785diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
2786index e48c0bf..f3acf65 100644
2787--- a/arch/mips/include/asm/reboot.h
2788+++ b/arch/mips/include/asm/reboot.h
2789@@ -9,7 +9,7 @@
2790 #ifndef _ASM_REBOOT_H
2791 #define _ASM_REBOOT_H
2792
2793-extern void (*_machine_restart)(char *command);
2794-extern void (*_machine_halt)(void);
2795+extern void (*__noreturn _machine_restart)(char *command);
2796+extern void (*__noreturn _machine_halt)(void);
2797
2798 #endif /* _ASM_REBOOT_H */
2799diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2800index 83b5509..9fa24a23 100644
2801--- a/arch/mips/include/asm/system.h
2802+++ b/arch/mips/include/asm/system.h
2803@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2804 */
2805 #define __ARCH_WANT_UNLOCKED_CTXSW
2806
2807-extern unsigned long arch_align_stack(unsigned long sp);
2808+#define arch_align_stack(x) ((x) & ~0xfUL)
2809
2810 #endif /* _ASM_SYSTEM_H */
2811diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2812index 9fdd8bc..fcf9d68 100644
2813--- a/arch/mips/kernel/binfmt_elfn32.c
2814+++ b/arch/mips/kernel/binfmt_elfn32.c
2815@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2816 #undef ELF_ET_DYN_BASE
2817 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2818
2819+#ifdef CONFIG_PAX_ASLR
2820+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2821+
2822+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2823+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2824+#endif
2825+
2826 #include <asm/processor.h>
2827 #include <linux/module.h>
2828 #include <linux/elfcore.h>
2829diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2830index ff44823..cf0b48a 100644
2831--- a/arch/mips/kernel/binfmt_elfo32.c
2832+++ b/arch/mips/kernel/binfmt_elfo32.c
2833@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2834 #undef ELF_ET_DYN_BASE
2835 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2836
2837+#ifdef CONFIG_PAX_ASLR
2838+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2839+
2840+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2841+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2842+#endif
2843+
2844 #include <asm/processor.h>
2845
2846 /*
2847diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2848index 50c9bb8..efdd5f8 100644
2849--- a/arch/mips/kernel/kgdb.c
2850+++ b/arch/mips/kernel/kgdb.c
2851@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2852 return -1;
2853 }
2854
2855+/* cannot be const */
2856 struct kgdb_arch arch_kgdb_ops;
2857
2858 /*
2859diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2860index f3d73e1..bb3f57a 100644
2861--- a/arch/mips/kernel/process.c
2862+++ b/arch/mips/kernel/process.c
2863@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2864 out:
2865 return pc;
2866 }
2867-
2868-/*
2869- * Don't forget that the stack pointer must be aligned on a 8 bytes
2870- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2871- */
2872-unsigned long arch_align_stack(unsigned long sp)
2873-{
2874- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2875- sp -= get_random_int() & ~PAGE_MASK;
2876-
2877- return sp & ALMASK;
2878-}
2879diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2880index 060563a..7fbf310 100644
2881--- a/arch/mips/kernel/reset.c
2882+++ b/arch/mips/kernel/reset.c
2883@@ -19,8 +19,8 @@
2884 * So handle all using function pointers to machine specific
2885 * functions.
2886 */
2887-void (*_machine_restart)(char *command);
2888-void (*_machine_halt)(void);
2889+void (*__noreturn _machine_restart)(char *command);
2890+void (*__noreturn _machine_halt)(void);
2891 void (*pm_power_off)(void);
2892
2893 EXPORT_SYMBOL(pm_power_off);
2894@@ -29,16 +29,19 @@ void machine_restart(char *command)
2895 {
2896 if (_machine_restart)
2897 _machine_restart(command);
2898+ BUG();
2899 }
2900
2901 void machine_halt(void)
2902 {
2903 if (_machine_halt)
2904 _machine_halt();
2905+ BUG();
2906 }
2907
2908 void machine_power_off(void)
2909 {
2910 if (pm_power_off)
2911 pm_power_off();
2912+ BUG();
2913 }
2914diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2915index 3f7f466..3abe0b5 100644
2916--- a/arch/mips/kernel/syscall.c
2917+++ b/arch/mips/kernel/syscall.c
2918@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2919 do_color_align = 0;
2920 if (filp || (flags & MAP_SHARED))
2921 do_color_align = 1;
2922+
2923+#ifdef CONFIG_PAX_RANDMMAP
2924+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2925+#endif
2926+
2927 if (addr) {
2928 if (do_color_align)
2929 addr = COLOUR_ALIGN(addr, pgoff);
2930 else
2931 addr = PAGE_ALIGN(addr);
2932 vmm = find_vma(current->mm, addr);
2933- if (task_size - len >= addr &&
2934- (!vmm || addr + len <= vmm->vm_start))
2935+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2936 return addr;
2937 }
2938- addr = TASK_UNMAPPED_BASE;
2939+ addr = current->mm->mmap_base;
2940 if (do_color_align)
2941 addr = COLOUR_ALIGN(addr, pgoff);
2942 else
2943@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2944 /* At this point: (!vmm || addr < vmm->vm_end). */
2945 if (task_size - len < addr)
2946 return -ENOMEM;
2947- if (!vmm || addr + len <= vmm->vm_start)
2948+ if (check_heap_stack_gap(vmm, addr, len))
2949 return addr;
2950 addr = vmm->vm_end;
2951 if (do_color_align)
2952diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2953index e97a7a2..f18f5b0 100644
2954--- a/arch/mips/mm/fault.c
2955+++ b/arch/mips/mm/fault.c
2956@@ -26,6 +26,23 @@
2957 #include <asm/ptrace.h>
2958 #include <asm/highmem.h> /* For VMALLOC_END */
2959
2960+#ifdef CONFIG_PAX_PAGEEXEC
2961+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2962+{
2963+ unsigned long i;
2964+
2965+ printk(KERN_ERR "PAX: bytes at PC: ");
2966+ for (i = 0; i < 5; i++) {
2967+ unsigned int c;
2968+ if (get_user(c, (unsigned int *)pc+i))
2969+ printk(KERN_CONT "???????? ");
2970+ else
2971+ printk(KERN_CONT "%08x ", c);
2972+ }
2973+ printk("\n");
2974+}
2975+#endif
2976+
2977 /*
2978 * This routine handles page faults. It determines the address,
2979 * and the problem, and then passes it off to one of the appropriate
2980diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2981index bdc1f9a..e8de5c5 100644
2982--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2983+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2984@@ -11,12 +11,14 @@
2985 #ifndef _ASM_PROC_CACHE_H
2986 #define _ASM_PROC_CACHE_H
2987
2988+#include <linux/const.h>
2989+
2990 /* L1 cache */
2991
2992 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2993 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2994-#define L1_CACHE_BYTES 16 /* bytes per entry */
2995 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
2996+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2997 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
2998
2999 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3000diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3001index 8bc9e96..26554f8 100644
3002--- a/arch/parisc/include/asm/atomic.h
3003+++ b/arch/parisc/include/asm/atomic.h
3004@@ -336,6 +336,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3005
3006 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3007
3008+#define atomic64_read_unchecked(v) atomic64_read(v)
3009+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3010+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3011+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3012+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3013+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3014+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3015+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3016+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3017+
3018 #else /* CONFIG_64BIT */
3019
3020 #include <asm-generic/atomic64.h>
3021diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3022index 32c2cca..a7b3a64 100644
3023--- a/arch/parisc/include/asm/cache.h
3024+++ b/arch/parisc/include/asm/cache.h
3025@@ -5,6 +5,7 @@
3026 #ifndef __ARCH_PARISC_CACHE_H
3027 #define __ARCH_PARISC_CACHE_H
3028
3029+#include <linux/const.h>
3030
3031 /*
3032 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3033@@ -15,13 +16,13 @@
3034 * just ruin performance.
3035 */
3036 #ifdef CONFIG_PA20
3037-#define L1_CACHE_BYTES 64
3038 #define L1_CACHE_SHIFT 6
3039 #else
3040-#define L1_CACHE_BYTES 32
3041 #define L1_CACHE_SHIFT 5
3042 #endif
3043
3044+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3045+
3046 #ifndef __ASSEMBLY__
3047
3048 #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
3049diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3050index 9c802eb..0592e41 100644
3051--- a/arch/parisc/include/asm/elf.h
3052+++ b/arch/parisc/include/asm/elf.h
3053@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
3054
3055 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3056
3057+#ifdef CONFIG_PAX_ASLR
3058+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3059+
3060+#define PAX_DELTA_MMAP_LEN 16
3061+#define PAX_DELTA_STACK_LEN 16
3062+#endif
3063+
3064 /* This yields a mask that user programs can use to figure out what
3065 instruction set this CPU supports. This could be done in user space,
3066 but it's not easy, and we've already done it here. */
3067diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3068index a27d2e2..18fd845 100644
3069--- a/arch/parisc/include/asm/pgtable.h
3070+++ b/arch/parisc/include/asm/pgtable.h
3071@@ -207,6 +207,17 @@
3072 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3073 #define PAGE_COPY PAGE_EXECREAD
3074 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3075+
3076+#ifdef CONFIG_PAX_PAGEEXEC
3077+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3078+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3079+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3080+#else
3081+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3082+# define PAGE_COPY_NOEXEC PAGE_COPY
3083+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3084+#endif
3085+
3086 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3087 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
3088 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
3089diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3090index 2120746..8d70a5e 100644
3091--- a/arch/parisc/kernel/module.c
3092+++ b/arch/parisc/kernel/module.c
3093@@ -95,16 +95,38 @@
3094
3095 /* three functions to determine where in the module core
3096 * or init pieces the location is */
3097+static inline int in_init_rx(struct module *me, void *loc)
3098+{
3099+ return (loc >= me->module_init_rx &&
3100+ loc < (me->module_init_rx + me->init_size_rx));
3101+}
3102+
3103+static inline int in_init_rw(struct module *me, void *loc)
3104+{
3105+ return (loc >= me->module_init_rw &&
3106+ loc < (me->module_init_rw + me->init_size_rw));
3107+}
3108+
3109 static inline int in_init(struct module *me, void *loc)
3110 {
3111- return (loc >= me->module_init &&
3112- loc <= (me->module_init + me->init_size));
3113+ return in_init_rx(me, loc) || in_init_rw(me, loc);
3114+}
3115+
3116+static inline int in_core_rx(struct module *me, void *loc)
3117+{
3118+ return (loc >= me->module_core_rx &&
3119+ loc < (me->module_core_rx + me->core_size_rx));
3120+}
3121+
3122+static inline int in_core_rw(struct module *me, void *loc)
3123+{
3124+ return (loc >= me->module_core_rw &&
3125+ loc < (me->module_core_rw + me->core_size_rw));
3126 }
3127
3128 static inline int in_core(struct module *me, void *loc)
3129 {
3130- return (loc >= me->module_core &&
3131- loc <= (me->module_core + me->core_size));
3132+ return in_core_rx(me, loc) || in_core_rw(me, loc);
3133 }
3134
3135 static inline int in_local(struct module *me, void *loc)
3136@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3137 }
3138
3139 /* align things a bit */
3140- me->core_size = ALIGN(me->core_size, 16);
3141- me->arch.got_offset = me->core_size;
3142- me->core_size += gots * sizeof(struct got_entry);
3143+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3144+ me->arch.got_offset = me->core_size_rw;
3145+ me->core_size_rw += gots * sizeof(struct got_entry);
3146
3147- me->core_size = ALIGN(me->core_size, 16);
3148- me->arch.fdesc_offset = me->core_size;
3149- me->core_size += fdescs * sizeof(Elf_Fdesc);
3150+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3151+ me->arch.fdesc_offset = me->core_size_rw;
3152+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3153
3154 me->arch.got_max = gots;
3155 me->arch.fdesc_max = fdescs;
3156@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3157
3158 BUG_ON(value == 0);
3159
3160- got = me->module_core + me->arch.got_offset;
3161+ got = me->module_core_rw + me->arch.got_offset;
3162 for (i = 0; got[i].addr; i++)
3163 if (got[i].addr == value)
3164 goto out;
3165@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3166 #ifdef CONFIG_64BIT
3167 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3168 {
3169- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3170+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3171
3172 if (!value) {
3173 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3174@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3175
3176 /* Create new one */
3177 fdesc->addr = value;
3178- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3179+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3180 return (Elf_Addr)fdesc;
3181 }
3182 #endif /* CONFIG_64BIT */
3183@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
3184
3185 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3186 end = table + sechdrs[me->arch.unwind_section].sh_size;
3187- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3188+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3189
3190 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3191 me->arch.unwind_section, table, end, gp);
3192diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3193index 9147391..f3d949a 100644
3194--- a/arch/parisc/kernel/sys_parisc.c
3195+++ b/arch/parisc/kernel/sys_parisc.c
3196@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3197 /* At this point: (!vma || addr < vma->vm_end). */
3198 if (TASK_SIZE - len < addr)
3199 return -ENOMEM;
3200- if (!vma || addr + len <= vma->vm_start)
3201+ if (check_heap_stack_gap(vma, addr, len))
3202 return addr;
3203 addr = vma->vm_end;
3204 }
3205@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3206 /* At this point: (!vma || addr < vma->vm_end). */
3207 if (TASK_SIZE - len < addr)
3208 return -ENOMEM;
3209- if (!vma || addr + len <= vma->vm_start)
3210+ if (check_heap_stack_gap(vma, addr, len))
3211 return addr;
3212 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3213 if (addr < vma->vm_end) /* handle wraparound */
3214@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3215 if (flags & MAP_FIXED)
3216 return addr;
3217 if (!addr)
3218- addr = TASK_UNMAPPED_BASE;
3219+ addr = current->mm->mmap_base;
3220
3221 if (filp) {
3222 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3223diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3224index 8b58bf0..7afff03 100644
3225--- a/arch/parisc/kernel/traps.c
3226+++ b/arch/parisc/kernel/traps.c
3227@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3228
3229 down_read(&current->mm->mmap_sem);
3230 vma = find_vma(current->mm,regs->iaoq[0]);
3231- if (vma && (regs->iaoq[0] >= vma->vm_start)
3232- && (vma->vm_flags & VM_EXEC)) {
3233-
3234+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3235 fault_address = regs->iaoq[0];
3236 fault_space = regs->iasq[0];
3237
3238diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3239index c6afbfc..c5839f6 100644
3240--- a/arch/parisc/mm/fault.c
3241+++ b/arch/parisc/mm/fault.c
3242@@ -15,6 +15,7 @@
3243 #include <linux/sched.h>
3244 #include <linux/interrupt.h>
3245 #include <linux/module.h>
3246+#include <linux/unistd.h>
3247
3248 #include <asm/uaccess.h>
3249 #include <asm/traps.h>
3250@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3251 static unsigned long
3252 parisc_acctyp(unsigned long code, unsigned int inst)
3253 {
3254- if (code == 6 || code == 16)
3255+ if (code == 6 || code == 7 || code == 16)
3256 return VM_EXEC;
3257
3258 switch (inst & 0xf0000000) {
3259@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3260 }
3261 #endif
3262
3263+#ifdef CONFIG_PAX_PAGEEXEC
3264+/*
3265+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3266+ *
3267+ * returns 1 when task should be killed
3268+ * 2 when rt_sigreturn trampoline was detected
3269+ * 3 when unpatched PLT trampoline was detected
3270+ */
3271+static int pax_handle_fetch_fault(struct pt_regs *regs)
3272+{
3273+
3274+#ifdef CONFIG_PAX_EMUPLT
3275+ int err;
3276+
3277+ do { /* PaX: unpatched PLT emulation */
3278+ unsigned int bl, depwi;
3279+
3280+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3281+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3282+
3283+ if (err)
3284+ break;
3285+
3286+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3287+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3288+
3289+ err = get_user(ldw, (unsigned int *)addr);
3290+ err |= get_user(bv, (unsigned int *)(addr+4));
3291+ err |= get_user(ldw2, (unsigned int *)(addr+8));
3292+
3293+ if (err)
3294+ break;
3295+
3296+ if (ldw == 0x0E801096U &&
3297+ bv == 0xEAC0C000U &&
3298+ ldw2 == 0x0E881095U)
3299+ {
3300+ unsigned int resolver, map;
3301+
3302+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3303+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3304+ if (err)
3305+ break;
3306+
3307+ regs->gr[20] = instruction_pointer(regs)+8;
3308+ regs->gr[21] = map;
3309+ regs->gr[22] = resolver;
3310+ regs->iaoq[0] = resolver | 3UL;
3311+ regs->iaoq[1] = regs->iaoq[0] + 4;
3312+ return 3;
3313+ }
3314+ }
3315+ } while (0);
3316+#endif
3317+
3318+#ifdef CONFIG_PAX_EMUTRAMP
3319+
3320+#ifndef CONFIG_PAX_EMUSIGRT
3321+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3322+ return 1;
3323+#endif
3324+
3325+ do { /* PaX: rt_sigreturn emulation */
3326+ unsigned int ldi1, ldi2, bel, nop;
3327+
3328+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3329+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3330+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3331+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3332+
3333+ if (err)
3334+ break;
3335+
3336+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3337+ ldi2 == 0x3414015AU &&
3338+ bel == 0xE4008200U &&
3339+ nop == 0x08000240U)
3340+ {
3341+ regs->gr[25] = (ldi1 & 2) >> 1;
3342+ regs->gr[20] = __NR_rt_sigreturn;
3343+ regs->gr[31] = regs->iaoq[1] + 16;
3344+ regs->sr[0] = regs->iasq[1];
3345+ regs->iaoq[0] = 0x100UL;
3346+ regs->iaoq[1] = regs->iaoq[0] + 4;
3347+ regs->iasq[0] = regs->sr[2];
3348+ regs->iasq[1] = regs->sr[2];
3349+ return 2;
3350+ }
3351+ } while (0);
3352+#endif
3353+
3354+ return 1;
3355+}
3356+
3357+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3358+{
3359+ unsigned long i;
3360+
3361+ printk(KERN_ERR "PAX: bytes at PC: ");
3362+ for (i = 0; i < 5; i++) {
3363+ unsigned int c;
3364+ if (get_user(c, (unsigned int *)pc+i))
3365+ printk(KERN_CONT "???????? ");
3366+ else
3367+ printk(KERN_CONT "%08x ", c);
3368+ }
3369+ printk("\n");
3370+}
3371+#endif
3372+
3373 int fixup_exception(struct pt_regs *regs)
3374 {
3375 const struct exception_table_entry *fix;
3376@@ -192,8 +303,33 @@ good_area:
3377
3378 acc_type = parisc_acctyp(code,regs->iir);
3379
3380- if ((vma->vm_flags & acc_type) != acc_type)
3381+ if ((vma->vm_flags & acc_type) != acc_type) {
3382+
3383+#ifdef CONFIG_PAX_PAGEEXEC
3384+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3385+ (address & ~3UL) == instruction_pointer(regs))
3386+ {
3387+ up_read(&mm->mmap_sem);
3388+ switch (pax_handle_fetch_fault(regs)) {
3389+
3390+#ifdef CONFIG_PAX_EMUPLT
3391+ case 3:
3392+ return;
3393+#endif
3394+
3395+#ifdef CONFIG_PAX_EMUTRAMP
3396+ case 2:
3397+ return;
3398+#endif
3399+
3400+ }
3401+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3402+ do_group_exit(SIGKILL);
3403+ }
3404+#endif
3405+
3406 goto bad_area;
3407+ }
3408
3409 /*
3410 * If for any reason at all we couldn't handle the fault, make
3411diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
3412index c107b74..409dc0f 100644
3413--- a/arch/powerpc/Makefile
3414+++ b/arch/powerpc/Makefile
3415@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
3416 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
3417 CPP = $(CC) -E $(KBUILD_CFLAGS)
3418
3419+cflags-y += -Wno-sign-compare -Wno-extra
3420+
3421 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
3422
3423 ifeq ($(CONFIG_PPC64),y)
3424diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3425index 81de6eb..d5d0e24 100644
3426--- a/arch/powerpc/include/asm/cache.h
3427+++ b/arch/powerpc/include/asm/cache.h
3428@@ -3,6 +3,7 @@
3429
3430 #ifdef __KERNEL__
3431
3432+#include <linux/const.h>
3433
3434 /* bytes per L1 cache line */
3435 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3436@@ -18,7 +19,7 @@
3437 #define L1_CACHE_SHIFT 7
3438 #endif
3439
3440-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3441+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3442
3443 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3444
3445diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
3446index 6d94d27..50d4cad 100644
3447--- a/arch/powerpc/include/asm/device.h
3448+++ b/arch/powerpc/include/asm/device.h
3449@@ -14,7 +14,7 @@ struct dev_archdata {
3450 struct device_node *of_node;
3451
3452 /* DMA operations on that device */
3453- struct dma_map_ops *dma_ops;
3454+ const struct dma_map_ops *dma_ops;
3455
3456 /*
3457 * When an iommu is in use, dma_data is used as a ptr to the base of the
3458diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
3459index e281dae..2b8a784 100644
3460--- a/arch/powerpc/include/asm/dma-mapping.h
3461+++ b/arch/powerpc/include/asm/dma-mapping.h
3462@@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
3463 #ifdef CONFIG_PPC64
3464 extern struct dma_map_ops dma_iommu_ops;
3465 #endif
3466-extern struct dma_map_ops dma_direct_ops;
3467+extern const struct dma_map_ops dma_direct_ops;
3468
3469-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3470+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3471 {
3472 /* We don't handle the NULL dev case for ISA for now. We could
3473 * do it via an out of line call but it is not needed for now. The
3474@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3475 return dev->archdata.dma_ops;
3476 }
3477
3478-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
3479+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
3480 {
3481 dev->archdata.dma_ops = ops;
3482 }
3483@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
3484
3485 static inline int dma_supported(struct device *dev, u64 mask)
3486 {
3487- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3488+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3489
3490 if (unlikely(dma_ops == NULL))
3491 return 0;
3492@@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
3493
3494 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
3495 {
3496- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3497+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3498
3499 if (unlikely(dma_ops == NULL))
3500 return -EIO;
3501@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
3502 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3503 dma_addr_t *dma_handle, gfp_t flag)
3504 {
3505- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3506+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3507 void *cpu_addr;
3508
3509 BUG_ON(!dma_ops);
3510@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3511 static inline void dma_free_coherent(struct device *dev, size_t size,
3512 void *cpu_addr, dma_addr_t dma_handle)
3513 {
3514- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3515+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3516
3517 BUG_ON(!dma_ops);
3518
3519@@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
3520
3521 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
3522 {
3523- struct dma_map_ops *dma_ops = get_dma_ops(dev);
3524+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
3525
3526 if (dma_ops->mapping_error)
3527 return dma_ops->mapping_error(dev, dma_addr);
3528diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3529index 5698502..5db093c 100644
3530--- a/arch/powerpc/include/asm/elf.h
3531+++ b/arch/powerpc/include/asm/elf.h
3532@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3533 the loader. We need to make sure that it is out of the way of the program
3534 that it will "exec", and that there is sufficient room for the brk. */
3535
3536-extern unsigned long randomize_et_dyn(unsigned long base);
3537-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3538+#define ELF_ET_DYN_BASE (0x20000000)
3539+
3540+#ifdef CONFIG_PAX_ASLR
3541+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3542+
3543+#ifdef __powerpc64__
3544+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
3545+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
3546+#else
3547+#define PAX_DELTA_MMAP_LEN 15
3548+#define PAX_DELTA_STACK_LEN 15
3549+#endif
3550+#endif
3551
3552 /*
3553 * Our registers are always unsigned longs, whether we're a 32 bit
3554@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3555 (0x7ff >> (PAGE_SHIFT - 12)) : \
3556 (0x3ffff >> (PAGE_SHIFT - 12)))
3557
3558-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3559-#define arch_randomize_brk arch_randomize_brk
3560-
3561 #endif /* __KERNEL__ */
3562
3563 /*
3564diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
3565index edfc980..1766f59 100644
3566--- a/arch/powerpc/include/asm/iommu.h
3567+++ b/arch/powerpc/include/asm/iommu.h
3568@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
3569 extern void iommu_init_early_dart(void);
3570 extern void iommu_init_early_pasemi(void);
3571
3572+/* dma-iommu.c */
3573+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
3574+
3575 #ifdef CONFIG_PCI
3576 extern void pci_iommu_init(void);
3577 extern void pci_direct_iommu_init(void);
3578diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3579index 9163695..5a00112 100644
3580--- a/arch/powerpc/include/asm/kmap_types.h
3581+++ b/arch/powerpc/include/asm/kmap_types.h
3582@@ -26,6 +26,7 @@ enum km_type {
3583 KM_SOFTIRQ1,
3584 KM_PPC_SYNC_PAGE,
3585 KM_PPC_SYNC_ICACHE,
3586+ KM_CLEARPAGE,
3587 KM_TYPE_NR
3588 };
3589
3590diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3591index ff24254..fe45b21 100644
3592--- a/arch/powerpc/include/asm/page.h
3593+++ b/arch/powerpc/include/asm/page.h
3594@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
3595 * and needs to be executable. This means the whole heap ends
3596 * up being executable.
3597 */
3598-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3599- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3600+#define VM_DATA_DEFAULT_FLAGS32 \
3601+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3602+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3603
3604 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3605 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3606@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
3607 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3608 #endif
3609
3610+#define ktla_ktva(addr) (addr)
3611+#define ktva_ktla(addr) (addr)
3612+
3613 #ifndef __ASSEMBLY__
3614
3615 #undef STRICT_MM_TYPECHECKS
3616diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3617index 3f17b83..1f9e766 100644
3618--- a/arch/powerpc/include/asm/page_64.h
3619+++ b/arch/powerpc/include/asm/page_64.h
3620@@ -180,15 +180,18 @@ do { \
3621 * stack by default, so in the absense of a PT_GNU_STACK program header
3622 * we turn execute permission off.
3623 */
3624-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3625- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3626+#define VM_STACK_DEFAULT_FLAGS32 \
3627+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3628+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3629
3630 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3631 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3632
3633+#ifndef CONFIG_PAX_PAGEEXEC
3634 #define VM_STACK_DEFAULT_FLAGS \
3635 (test_thread_flag(TIF_32BIT) ? \
3636 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3637+#endif
3638
3639 #include <asm-generic/getorder.h>
3640
3641diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
3642index b5ea626..40308222 100644
3643--- a/arch/powerpc/include/asm/pci.h
3644+++ b/arch/powerpc/include/asm/pci.h
3645@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
3646 }
3647
3648 #ifdef CONFIG_PCI
3649-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
3650-extern struct dma_map_ops *get_pci_dma_ops(void);
3651+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
3652+extern const struct dma_map_ops *get_pci_dma_ops(void);
3653 #else /* CONFIG_PCI */
3654 #define set_pci_dma_ops(d)
3655 #define get_pci_dma_ops() NULL
3656diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3657index 2a5da06..d65bea2 100644
3658--- a/arch/powerpc/include/asm/pgtable.h
3659+++ b/arch/powerpc/include/asm/pgtable.h
3660@@ -2,6 +2,7 @@
3661 #define _ASM_POWERPC_PGTABLE_H
3662 #ifdef __KERNEL__
3663
3664+#include <linux/const.h>
3665 #ifndef __ASSEMBLY__
3666 #include <asm/processor.h> /* For TASK_SIZE */
3667 #include <asm/mmu.h>
3668diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3669index 4aad413..85d86bf 100644
3670--- a/arch/powerpc/include/asm/pte-hash32.h
3671+++ b/arch/powerpc/include/asm/pte-hash32.h
3672@@ -21,6 +21,7 @@
3673 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3674 #define _PAGE_USER 0x004 /* usermode access allowed */
3675 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3676+#define _PAGE_EXEC _PAGE_GUARDED
3677 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3678 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3679 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3680diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
3681index 8c34149..78f425a 100644
3682--- a/arch/powerpc/include/asm/ptrace.h
3683+++ b/arch/powerpc/include/asm/ptrace.h
3684@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
3685 } while(0)
3686
3687 struct task_struct;
3688-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
3689+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
3690 extern int ptrace_put_reg(struct task_struct *task, int regno,
3691 unsigned long data);
3692
3693diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3694index 32a7c30..be3a8bb 100644
3695--- a/arch/powerpc/include/asm/reg.h
3696+++ b/arch/powerpc/include/asm/reg.h
3697@@ -191,6 +191,7 @@
3698 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3699 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3700 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3701+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3702 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3703 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3704 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3705diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
3706index 8979d4c..d2fd0d3 100644
3707--- a/arch/powerpc/include/asm/swiotlb.h
3708+++ b/arch/powerpc/include/asm/swiotlb.h
3709@@ -13,7 +13,7 @@
3710
3711 #include <linux/swiotlb.h>
3712
3713-extern struct dma_map_ops swiotlb_dma_ops;
3714+extern const struct dma_map_ops swiotlb_dma_ops;
3715
3716 static inline void dma_mark_clean(void *addr, size_t size) {}
3717
3718diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3719index 094a12a..877a60a 100644
3720--- a/arch/powerpc/include/asm/system.h
3721+++ b/arch/powerpc/include/asm/system.h
3722@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3723 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3724 #endif
3725
3726-extern unsigned long arch_align_stack(unsigned long sp);
3727+#define arch_align_stack(x) ((x) & ~0xfUL)
3728
3729 /* Used in very early kernel initialization. */
3730 extern unsigned long reloc_offset(void);
3731diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3732index bd0fb84..a42a14b 100644
3733--- a/arch/powerpc/include/asm/uaccess.h
3734+++ b/arch/powerpc/include/asm/uaccess.h
3735@@ -13,6 +13,8 @@
3736 #define VERIFY_READ 0
3737 #define VERIFY_WRITE 1
3738
3739+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3740+
3741 /*
3742 * The fs value determines whether argument validity checking should be
3743 * performed or not. If get_fs() == USER_DS, checking is performed, with
3744@@ -327,52 +329,6 @@ do { \
3745 extern unsigned long __copy_tofrom_user(void __user *to,
3746 const void __user *from, unsigned long size);
3747
3748-#ifndef __powerpc64__
3749-
3750-static inline unsigned long copy_from_user(void *to,
3751- const void __user *from, unsigned long n)
3752-{
3753- unsigned long over;
3754-
3755- if (access_ok(VERIFY_READ, from, n))
3756- return __copy_tofrom_user((__force void __user *)to, from, n);
3757- if ((unsigned long)from < TASK_SIZE) {
3758- over = (unsigned long)from + n - TASK_SIZE;
3759- return __copy_tofrom_user((__force void __user *)to, from,
3760- n - over) + over;
3761- }
3762- return n;
3763-}
3764-
3765-static inline unsigned long copy_to_user(void __user *to,
3766- const void *from, unsigned long n)
3767-{
3768- unsigned long over;
3769-
3770- if (access_ok(VERIFY_WRITE, to, n))
3771- return __copy_tofrom_user(to, (__force void __user *)from, n);
3772- if ((unsigned long)to < TASK_SIZE) {
3773- over = (unsigned long)to + n - TASK_SIZE;
3774- return __copy_tofrom_user(to, (__force void __user *)from,
3775- n - over) + over;
3776- }
3777- return n;
3778-}
3779-
3780-#else /* __powerpc64__ */
3781-
3782-#define __copy_in_user(to, from, size) \
3783- __copy_tofrom_user((to), (from), (size))
3784-
3785-extern unsigned long copy_from_user(void *to, const void __user *from,
3786- unsigned long n);
3787-extern unsigned long copy_to_user(void __user *to, const void *from,
3788- unsigned long n);
3789-extern unsigned long copy_in_user(void __user *to, const void __user *from,
3790- unsigned long n);
3791-
3792-#endif /* __powerpc64__ */
3793-
3794 static inline unsigned long __copy_from_user_inatomic(void *to,
3795 const void __user *from, unsigned long n)
3796 {
3797@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3798 if (ret == 0)
3799 return 0;
3800 }
3801+
3802+ if (!__builtin_constant_p(n))
3803+ check_object_size(to, n, false);
3804+
3805 return __copy_tofrom_user((__force void __user *)to, from, n);
3806 }
3807
3808@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3809 if (ret == 0)
3810 return 0;
3811 }
3812+
3813+ if (!__builtin_constant_p(n))
3814+ check_object_size(from, n, true);
3815+
3816 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3817 }
3818
3819@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3820 return __copy_to_user_inatomic(to, from, size);
3821 }
3822
3823+#ifndef __powerpc64__
3824+
3825+static inline unsigned long __must_check copy_from_user(void *to,
3826+ const void __user *from, unsigned long n)
3827+{
3828+ unsigned long over;
3829+
3830+ if ((long)n < 0)
3831+ return n;
3832+
3833+ if (access_ok(VERIFY_READ, from, n)) {
3834+ if (!__builtin_constant_p(n))
3835+ check_object_size(to, n, false);
3836+ return __copy_tofrom_user((__force void __user *)to, from, n);
3837+ }
3838+ if ((unsigned long)from < TASK_SIZE) {
3839+ over = (unsigned long)from + n - TASK_SIZE;
3840+ if (!__builtin_constant_p(n - over))
3841+ check_object_size(to, n - over, false);
3842+ return __copy_tofrom_user((__force void __user *)to, from,
3843+ n - over) + over;
3844+ }
3845+ return n;
3846+}
3847+
3848+static inline unsigned long __must_check copy_to_user(void __user *to,
3849+ const void *from, unsigned long n)
3850+{
3851+ unsigned long over;
3852+
3853+ if ((long)n < 0)
3854+ return n;
3855+
3856+ if (access_ok(VERIFY_WRITE, to, n)) {
3857+ if (!__builtin_constant_p(n))
3858+ check_object_size(from, n, true);
3859+ return __copy_tofrom_user(to, (__force void __user *)from, n);
3860+ }
3861+ if ((unsigned long)to < TASK_SIZE) {
3862+ over = (unsigned long)to + n - TASK_SIZE;
3863+ if (!__builtin_constant_p(n))
3864+ check_object_size(from, n - over, true);
3865+ return __copy_tofrom_user(to, (__force void __user *)from,
3866+ n - over) + over;
3867+ }
3868+ return n;
3869+}
3870+
3871+#else /* __powerpc64__ */
3872+
3873+#define __copy_in_user(to, from, size) \
3874+ __copy_tofrom_user((to), (from), (size))
3875+
3876+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3877+{
3878+ if ((long)n < 0 || n > INT_MAX)
3879+ return n;
3880+
3881+ if (!__builtin_constant_p(n))
3882+ check_object_size(to, n, false);
3883+
3884+ if (likely(access_ok(VERIFY_READ, from, n)))
3885+ n = __copy_from_user(to, from, n);
3886+ else
3887+ memset(to, 0, n);
3888+ return n;
3889+}
3890+
3891+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3892+{
3893+ if ((long)n < 0 || n > INT_MAX)
3894+ return n;
3895+
3896+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
3897+ if (!__builtin_constant_p(n))
3898+ check_object_size(from, n, true);
3899+ n = __copy_to_user(to, from, n);
3900+ }
3901+ return n;
3902+}
3903+
3904+extern unsigned long copy_in_user(void __user *to, const void __user *from,
3905+ unsigned long n);
3906+
3907+#endif /* __powerpc64__ */
3908+
3909 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3910
3911 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3912diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3913index bb37b1d..01fe9ce 100644
3914--- a/arch/powerpc/kernel/cacheinfo.c
3915+++ b/arch/powerpc/kernel/cacheinfo.c
3916@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3917 &cache_assoc_attr,
3918 };
3919
3920-static struct sysfs_ops cache_index_ops = {
3921+static const struct sysfs_ops cache_index_ops = {
3922 .show = cache_index_show,
3923 };
3924
3925diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3926index 37771a5..648530c 100644
3927--- a/arch/powerpc/kernel/dma-iommu.c
3928+++ b/arch/powerpc/kernel/dma-iommu.c
3929@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3930 }
3931
3932 /* We support DMA to/from any memory page via the iommu */
3933-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3934+int dma_iommu_dma_supported(struct device *dev, u64 mask)
3935 {
3936 struct iommu_table *tbl = get_iommu_table_base(dev);
3937
3938diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3939index e96cbbd..bdd6d41 100644
3940--- a/arch/powerpc/kernel/dma-swiotlb.c
3941+++ b/arch/powerpc/kernel/dma-swiotlb.c
3942@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3943 * map_page, and unmap_page on highmem, use normal dma_ops
3944 * for everything else.
3945 */
3946-struct dma_map_ops swiotlb_dma_ops = {
3947+const struct dma_map_ops swiotlb_dma_ops = {
3948 .alloc_coherent = dma_direct_alloc_coherent,
3949 .free_coherent = dma_direct_free_coherent,
3950 .map_sg = swiotlb_map_sg_attrs,
3951diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3952index 6215062..ebea59c 100644
3953--- a/arch/powerpc/kernel/dma.c
3954+++ b/arch/powerpc/kernel/dma.c
3955@@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3956 }
3957 #endif
3958
3959-struct dma_map_ops dma_direct_ops = {
3960+const struct dma_map_ops dma_direct_ops = {
3961 .alloc_coherent = dma_direct_alloc_coherent,
3962 .free_coherent = dma_direct_free_coherent,
3963 .map_sg = dma_direct_map_sg,
3964diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3965index 24dcc0e..a300455 100644
3966--- a/arch/powerpc/kernel/exceptions-64e.S
3967+++ b/arch/powerpc/kernel/exceptions-64e.S
3968@@ -455,6 +455,7 @@ storage_fault_common:
3969 std r14,_DAR(r1)
3970 std r15,_DSISR(r1)
3971 addi r3,r1,STACK_FRAME_OVERHEAD
3972+ bl .save_nvgprs
3973 mr r4,r14
3974 mr r5,r15
3975 ld r14,PACA_EXGEN+EX_R14(r13)
3976@@ -464,8 +465,7 @@ storage_fault_common:
3977 cmpdi r3,0
3978 bne- 1f
3979 b .ret_from_except_lite
3980-1: bl .save_nvgprs
3981- mr r5,r3
3982+1: mr r5,r3
3983 addi r3,r1,STACK_FRAME_OVERHEAD
3984 ld r4,_DAR(r1)
3985 bl .bad_page_fault
3986diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3987index 1808876..9fd206a 100644
3988--- a/arch/powerpc/kernel/exceptions-64s.S
3989+++ b/arch/powerpc/kernel/exceptions-64s.S
3990@@ -818,10 +818,10 @@ handle_page_fault:
3991 11: ld r4,_DAR(r1)
3992 ld r5,_DSISR(r1)
3993 addi r3,r1,STACK_FRAME_OVERHEAD
3994+ bl .save_nvgprs
3995 bl .do_page_fault
3996 cmpdi r3,0
3997 beq+ 13f
3998- bl .save_nvgprs
3999 mr r5,r3
4000 addi r3,r1,STACK_FRAME_OVERHEAD
4001 lwz r4,_DAR(r1)
4002diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
4003index a4c8b38..1b09ad9 100644
4004--- a/arch/powerpc/kernel/ibmebus.c
4005+++ b/arch/powerpc/kernel/ibmebus.c
4006@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
4007 return 1;
4008 }
4009
4010-static struct dma_map_ops ibmebus_dma_ops = {
4011+static const struct dma_map_ops ibmebus_dma_ops = {
4012 .alloc_coherent = ibmebus_alloc_coherent,
4013 .free_coherent = ibmebus_free_coherent,
4014 .map_sg = ibmebus_map_sg,
4015diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
4016index 8564a41..67f3471 100644
4017--- a/arch/powerpc/kernel/irq.c
4018+++ b/arch/powerpc/kernel/irq.c
4019@@ -490,9 +490,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
4020 host->ops = ops;
4021 host->of_node = of_node_get(of_node);
4022
4023- if (host->ops->match == NULL)
4024- host->ops->match = default_irq_host_match;
4025-
4026 spin_lock_irqsave(&irq_big_lock, flags);
4027
4028 /* If it's a legacy controller, check for duplicates and
4029@@ -567,7 +564,12 @@ struct irq_host *irq_find_host(struct device_node *node)
4030 */
4031 spin_lock_irqsave(&irq_big_lock, flags);
4032 list_for_each_entry(h, &irq_hosts, link)
4033- if (h->ops->match(h, node)) {
4034+ if (h->ops->match) {
4035+ if (h->ops->match(h, node)) {
4036+ found = h;
4037+ break;
4038+ }
4039+ } else if (default_irq_host_match(h, node)) {
4040 found = h;
4041 break;
4042 }
4043diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
4044index 641c74b..8339ad7 100644
4045--- a/arch/powerpc/kernel/kgdb.c
4046+++ b/arch/powerpc/kernel/kgdb.c
4047@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
4048 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
4049 return 0;
4050
4051- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
4052+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
4053 regs->nip += 4;
4054
4055 return 1;
4056@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
4057 /*
4058 * Global data
4059 */
4060-struct kgdb_arch arch_kgdb_ops = {
4061+const struct kgdb_arch arch_kgdb_ops = {
4062 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
4063 };
4064
4065diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
4066index 477c663..4f50234 100644
4067--- a/arch/powerpc/kernel/module.c
4068+++ b/arch/powerpc/kernel/module.c
4069@@ -31,11 +31,24 @@
4070
4071 LIST_HEAD(module_bug_list);
4072
4073+#ifdef CONFIG_PAX_KERNEXEC
4074 void *module_alloc(unsigned long size)
4075 {
4076 if (size == 0)
4077 return NULL;
4078
4079+ return vmalloc(size);
4080+}
4081+
4082+void *module_alloc_exec(unsigned long size)
4083+#else
4084+void *module_alloc(unsigned long size)
4085+#endif
4086+
4087+{
4088+ if (size == 0)
4089+ return NULL;
4090+
4091 return vmalloc_exec(size);
4092 }
4093
4094@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
4095 vfree(module_region);
4096 }
4097
4098+#ifdef CONFIG_PAX_KERNEXEC
4099+void module_free_exec(struct module *mod, void *module_region)
4100+{
4101+ module_free(mod, module_region);
4102+}
4103+#endif
4104+
4105 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
4106 const Elf_Shdr *sechdrs,
4107 const char *name)
4108diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4109index f832773..0507238 100644
4110--- a/arch/powerpc/kernel/module_32.c
4111+++ b/arch/powerpc/kernel/module_32.c
4112@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4113 me->arch.core_plt_section = i;
4114 }
4115 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4116- printk("Module doesn't contain .plt or .init.plt sections.\n");
4117+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4118 return -ENOEXEC;
4119 }
4120
4121@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
4122
4123 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4124 /* Init, or core PLT? */
4125- if (location >= mod->module_core
4126- && location < mod->module_core + mod->core_size)
4127+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4128+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4129 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4130- else
4131+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4132+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4133 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4134+ else {
4135+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4136+ return ~0UL;
4137+ }
4138
4139 /* Find this entry, or if that fails, the next avail. entry */
4140 while (entry->jump[0]) {
4141diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
4142index cadbed6..b9bbb00 100644
4143--- a/arch/powerpc/kernel/pci-common.c
4144+++ b/arch/powerpc/kernel/pci-common.c
4145@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
4146 unsigned int ppc_pci_flags = 0;
4147
4148
4149-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
4150+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
4151
4152-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
4153+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
4154 {
4155 pci_dma_ops = dma_ops;
4156 }
4157
4158-struct dma_map_ops *get_pci_dma_ops(void)
4159+const struct dma_map_ops *get_pci_dma_ops(void)
4160 {
4161 return pci_dma_ops;
4162 }
4163diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4164index 7b816da..8d5c277 100644
4165--- a/arch/powerpc/kernel/process.c
4166+++ b/arch/powerpc/kernel/process.c
4167@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
4168 * Lookup NIP late so we have the best change of getting the
4169 * above info out without failing
4170 */
4171- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4172- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4173+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4174+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4175 #endif
4176 show_stack(current, (unsigned long *) regs->gpr[1]);
4177 if (!user_mode(regs))
4178@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4179 newsp = stack[0];
4180 ip = stack[STACK_FRAME_LR_SAVE];
4181 if (!firstframe || ip != lr) {
4182- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4183+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4184 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4185 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4186- printk(" (%pS)",
4187+ printk(" (%pA)",
4188 (void *)current->ret_stack[curr_frame].ret);
4189 curr_frame--;
4190 }
4191@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4192 struct pt_regs *regs = (struct pt_regs *)
4193 (sp + STACK_FRAME_OVERHEAD);
4194 lr = regs->link;
4195- printk("--- Exception: %lx at %pS\n LR = %pS\n",
4196+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
4197 regs->trap, (void *)regs->nip, (void *)lr);
4198 firstframe = 1;
4199 }
4200@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
4201 }
4202
4203 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4204-
4205-unsigned long arch_align_stack(unsigned long sp)
4206-{
4207- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4208- sp -= get_random_int() & ~PAGE_MASK;
4209- return sp & ~0xf;
4210-}
4211-
4212-static inline unsigned long brk_rnd(void)
4213-{
4214- unsigned long rnd = 0;
4215-
4216- /* 8MB for 32bit, 1GB for 64bit */
4217- if (is_32bit_task())
4218- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4219- else
4220- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4221-
4222- return rnd << PAGE_SHIFT;
4223-}
4224-
4225-unsigned long arch_randomize_brk(struct mm_struct *mm)
4226-{
4227- unsigned long base = mm->brk;
4228- unsigned long ret;
4229-
4230-#ifdef CONFIG_PPC_STD_MMU_64
4231- /*
4232- * If we are using 1TB segments and we are allowed to randomise
4233- * the heap, we can put it above 1TB so it is backed by a 1TB
4234- * segment. Otherwise the heap will be in the bottom 1TB
4235- * which always uses 256MB segments and this may result in a
4236- * performance penalty.
4237- */
4238- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4239- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4240-#endif
4241-
4242- ret = PAGE_ALIGN(base + brk_rnd());
4243-
4244- if (ret < mm->brk)
4245- return mm->brk;
4246-
4247- return ret;
4248-}
4249-
4250-unsigned long randomize_et_dyn(unsigned long base)
4251-{
4252- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4253-
4254- if (ret < base)
4255- return base;
4256-
4257- return ret;
4258-}
4259diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4260index ef14988..856c4bc 100644
4261--- a/arch/powerpc/kernel/ptrace.c
4262+++ b/arch/powerpc/kernel/ptrace.c
4263@@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
4264 /*
4265 * Get contents of register REGNO in task TASK.
4266 */
4267-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
4268+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
4269 {
4270 if (task->thread.regs == NULL)
4271 return -EIO;
4272@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
4273
4274 CHECK_FULL_REGS(child->thread.regs);
4275 if (index < PT_FPR0) {
4276- tmp = ptrace_get_reg(child, (int) index);
4277+ tmp = ptrace_get_reg(child, index);
4278 } else {
4279 flush_fp_to_thread(child);
4280 tmp = ((unsigned long *)child->thread.fpr)
4281diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4282index d670429..2bc59b2 100644
4283--- a/arch/powerpc/kernel/signal_32.c
4284+++ b/arch/powerpc/kernel/signal_32.c
4285@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4286 /* Save user registers on the stack */
4287 frame = &rt_sf->uc.uc_mcontext;
4288 addr = frame;
4289- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4290+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4291 if (save_user_regs(regs, frame, 0, 1))
4292 goto badframe;
4293 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4294diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4295index 2fe6fc6..ada0d96 100644
4296--- a/arch/powerpc/kernel/signal_64.c
4297+++ b/arch/powerpc/kernel/signal_64.c
4298@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4299 current->thread.fpscr.val = 0;
4300
4301 /* Set up to return from userspace. */
4302- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4303+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4304 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4305 } else {
4306 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4307diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
4308index b97c2d6..dd01a6a 100644
4309--- a/arch/powerpc/kernel/sys_ppc32.c
4310+++ b/arch/powerpc/kernel/sys_ppc32.c
4311@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
4312 if (oldlenp) {
4313 if (!error) {
4314 if (get_user(oldlen, oldlenp) ||
4315- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
4316+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
4317+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
4318 error = -EFAULT;
4319 }
4320- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
4321 }
4322 return error;
4323 }
4324diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4325index 6f0ae1a..e4b6a56 100644
4326--- a/arch/powerpc/kernel/traps.c
4327+++ b/arch/powerpc/kernel/traps.c
4328@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
4329 static inline void pmac_backlight_unblank(void) { }
4330 #endif
4331
4332+extern void gr_handle_kernel_exploit(void);
4333+
4334 int die(const char *str, struct pt_regs *regs, long err)
4335 {
4336 static struct {
4337@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
4338 if (panic_on_oops)
4339 panic("Fatal exception");
4340
4341+ gr_handle_kernel_exploit();
4342+
4343 oops_exit();
4344 do_exit(err);
4345
4346diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4347index 137dc22..fe57a79 100644
4348--- a/arch/powerpc/kernel/vdso.c
4349+++ b/arch/powerpc/kernel/vdso.c
4350@@ -36,6 +36,7 @@
4351 #include <asm/firmware.h>
4352 #include <asm/vdso.h>
4353 #include <asm/vdso_datapage.h>
4354+#include <asm/mman.h>
4355
4356 #include "setup.h"
4357
4358@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4359 vdso_base = VDSO32_MBASE;
4360 #endif
4361
4362- current->mm->context.vdso_base = 0;
4363+ current->mm->context.vdso_base = ~0UL;
4364
4365 /* vDSO has a problem and was disabled, just don't "enable" it for the
4366 * process
4367@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4368 vdso_base = get_unmapped_area(NULL, vdso_base,
4369 (vdso_pages << PAGE_SHIFT) +
4370 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4371- 0, 0);
4372+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
4373 if (IS_ERR_VALUE(vdso_base)) {
4374 rc = vdso_base;
4375 goto fail_mmapsem;
4376diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
4377index 77f6421..829564a 100644
4378--- a/arch/powerpc/kernel/vio.c
4379+++ b/arch/powerpc/kernel/vio.c
4380@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
4381 vio_cmo_dealloc(viodev, alloc_size);
4382 }
4383
4384-struct dma_map_ops vio_dma_mapping_ops = {
4385+static const struct dma_map_ops vio_dma_mapping_ops = {
4386 .alloc_coherent = vio_dma_iommu_alloc_coherent,
4387 .free_coherent = vio_dma_iommu_free_coherent,
4388 .map_sg = vio_dma_iommu_map_sg,
4389 .unmap_sg = vio_dma_iommu_unmap_sg,
4390+ .dma_supported = dma_iommu_dma_supported,
4391 .map_page = vio_dma_iommu_map_page,
4392 .unmap_page = vio_dma_iommu_unmap_page,
4393
4394@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
4395
4396 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
4397 {
4398- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
4399 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
4400 }
4401
4402diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4403index 5eea6f3..5d10396 100644
4404--- a/arch/powerpc/lib/usercopy_64.c
4405+++ b/arch/powerpc/lib/usercopy_64.c
4406@@ -9,22 +9,6 @@
4407 #include <linux/module.h>
4408 #include <asm/uaccess.h>
4409
4410-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4411-{
4412- if (likely(access_ok(VERIFY_READ, from, n)))
4413- n = __copy_from_user(to, from, n);
4414- else
4415- memset(to, 0, n);
4416- return n;
4417-}
4418-
4419-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4420-{
4421- if (likely(access_ok(VERIFY_WRITE, to, n)))
4422- n = __copy_to_user(to, from, n);
4423- return n;
4424-}
4425-
4426 unsigned long copy_in_user(void __user *to, const void __user *from,
4427 unsigned long n)
4428 {
4429@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4430 return n;
4431 }
4432
4433-EXPORT_SYMBOL(copy_from_user);
4434-EXPORT_SYMBOL(copy_to_user);
4435 EXPORT_SYMBOL(copy_in_user);
4436
4437diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4438index e7dae82..877ce0d 100644
4439--- a/arch/powerpc/mm/fault.c
4440+++ b/arch/powerpc/mm/fault.c
4441@@ -30,6 +30,10 @@
4442 #include <linux/kprobes.h>
4443 #include <linux/kdebug.h>
4444 #include <linux/perf_event.h>
4445+#include <linux/slab.h>
4446+#include <linux/pagemap.h>
4447+#include <linux/compiler.h>
4448+#include <linux/unistd.h>
4449
4450 #include <asm/firmware.h>
4451 #include <asm/page.h>
4452@@ -40,6 +44,7 @@
4453 #include <asm/uaccess.h>
4454 #include <asm/tlbflush.h>
4455 #include <asm/siginfo.h>
4456+#include <asm/ptrace.h>
4457
4458
4459 #ifdef CONFIG_KPROBES
4460@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4461 }
4462 #endif
4463
4464+#ifdef CONFIG_PAX_PAGEEXEC
4465+/*
4466+ * PaX: decide what to do with offenders (regs->nip = fault address)
4467+ *
4468+ * returns 1 when task should be killed
4469+ */
4470+static int pax_handle_fetch_fault(struct pt_regs *regs)
4471+{
4472+ return 1;
4473+}
4474+
4475+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4476+{
4477+ unsigned long i;
4478+
4479+ printk(KERN_ERR "PAX: bytes at PC: ");
4480+ for (i = 0; i < 5; i++) {
4481+ unsigned int c;
4482+ if (get_user(c, (unsigned int __user *)pc+i))
4483+ printk(KERN_CONT "???????? ");
4484+ else
4485+ printk(KERN_CONT "%08x ", c);
4486+ }
4487+ printk("\n");
4488+}
4489+#endif
4490+
4491 /*
4492 * Check whether the instruction at regs->nip is a store using
4493 * an update addressing form which will update r1.
4494@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4495 * indicate errors in DSISR but can validly be set in SRR1.
4496 */
4497 if (trap == 0x400)
4498- error_code &= 0x48200000;
4499+ error_code &= 0x58200000;
4500 else
4501 is_write = error_code & DSISR_ISSTORE;
4502 #else
4503@@ -250,7 +282,7 @@ good_area:
4504 * "undefined". Of those that can be set, this is the only
4505 * one which seems bad.
4506 */
4507- if (error_code & 0x10000000)
4508+ if (error_code & DSISR_GUARDED)
4509 /* Guarded storage error. */
4510 goto bad_area;
4511 #endif /* CONFIG_8xx */
4512@@ -265,7 +297,7 @@ good_area:
4513 * processors use the same I/D cache coherency mechanism
4514 * as embedded.
4515 */
4516- if (error_code & DSISR_PROTFAULT)
4517+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4518 goto bad_area;
4519 #endif /* CONFIG_PPC_STD_MMU */
4520
4521@@ -335,6 +367,23 @@ bad_area:
4522 bad_area_nosemaphore:
4523 /* User mode accesses cause a SIGSEGV */
4524 if (user_mode(regs)) {
4525+
4526+#ifdef CONFIG_PAX_PAGEEXEC
4527+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4528+#ifdef CONFIG_PPC_STD_MMU
4529+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4530+#else
4531+ if (is_exec && regs->nip == address) {
4532+#endif
4533+ switch (pax_handle_fetch_fault(regs)) {
4534+ }
4535+
4536+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4537+ do_group_exit(SIGKILL);
4538+ }
4539+ }
4540+#endif
4541+
4542 _exception(SIGSEGV, regs, code, address);
4543 return 0;
4544 }
4545diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
4546index 5973631..ad617af 100644
4547--- a/arch/powerpc/mm/mem.c
4548+++ b/arch/powerpc/mm/mem.c
4549@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
4550 {
4551 unsigned long lmb_next_region_start_pfn,
4552 lmb_region_max_pfn;
4553- int i;
4554+ unsigned int i;
4555
4556 for (i = 0; i < lmb.memory.cnt - 1; i++) {
4557 lmb_region_max_pfn =
4558diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4559index 0d957a4..26d968f 100644
4560--- a/arch/powerpc/mm/mmap_64.c
4561+++ b/arch/powerpc/mm/mmap_64.c
4562@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4563 */
4564 if (mmap_is_legacy()) {
4565 mm->mmap_base = TASK_UNMAPPED_BASE;
4566+
4567+#ifdef CONFIG_PAX_RANDMMAP
4568+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4569+ mm->mmap_base += mm->delta_mmap;
4570+#endif
4571+
4572 mm->get_unmapped_area = arch_get_unmapped_area;
4573 mm->unmap_area = arch_unmap_area;
4574 } else {
4575 mm->mmap_base = mmap_base();
4576+
4577+#ifdef CONFIG_PAX_RANDMMAP
4578+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4579+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4580+#endif
4581+
4582 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4583 mm->unmap_area = arch_unmap_area_topdown;
4584 }
4585diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4586index ba51948..23009d9 100644
4587--- a/arch/powerpc/mm/slice.c
4588+++ b/arch/powerpc/mm/slice.c
4589@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4590 if ((mm->task_size - len) < addr)
4591 return 0;
4592 vma = find_vma(mm, addr);
4593- return (!vma || (addr + len) <= vma->vm_start);
4594+ return check_heap_stack_gap(vma, addr, len);
4595 }
4596
4597 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4598@@ -256,7 +256,7 @@ full_search:
4599 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4600 continue;
4601 }
4602- if (!vma || addr + len <= vma->vm_start) {
4603+ if (check_heap_stack_gap(vma, addr, len)) {
4604 /*
4605 * Remember the place where we stopped the search:
4606 */
4607@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4608 }
4609 }
4610
4611- addr = mm->mmap_base;
4612- while (addr > len) {
4613+ if (mm->mmap_base < len)
4614+ addr = -ENOMEM;
4615+ else
4616+ addr = mm->mmap_base - len;
4617+
4618+ while (!IS_ERR_VALUE(addr)) {
4619 /* Go down by chunk size */
4620- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4621+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
4622
4623 /* Check for hit with different page size */
4624 mask = slice_range_to_mask(addr, len);
4625@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4626 * return with success:
4627 */
4628 vma = find_vma(mm, addr);
4629- if (!vma || (addr + len) <= vma->vm_start) {
4630+ if (check_heap_stack_gap(vma, addr, len)) {
4631 /* remember the address as a hint for next time */
4632 if (use_cache)
4633 mm->free_area_cache = addr;
4634@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4635 mm->cached_hole_size = vma->vm_start - addr;
4636
4637 /* try just below the current vma->vm_start */
4638- addr = vma->vm_start;
4639+ addr = skip_heap_stack_gap(vma, len);
4640 }
4641
4642 /*
4643@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4644 if (fixed && addr > (mm->task_size - len))
4645 return -EINVAL;
4646
4647+#ifdef CONFIG_PAX_RANDMMAP
4648+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4649+ addr = 0;
4650+#endif
4651+
4652 /* If hint, make sure it matches our alignment restrictions */
4653 if (!fixed && addr) {
4654 addr = _ALIGN_UP(addr, 1ul << pshift);
4655diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
4656index b5c753d..8f01abe 100644
4657--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
4658+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
4659@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
4660 lite5200_pm_target_state = PM_SUSPEND_ON;
4661 }
4662
4663-static struct platform_suspend_ops lite5200_pm_ops = {
4664+static const struct platform_suspend_ops lite5200_pm_ops = {
4665 .valid = lite5200_pm_valid,
4666 .begin = lite5200_pm_begin,
4667 .prepare = lite5200_pm_prepare,
4668diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4669index a55b0b6..478c18e 100644
4670--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4671+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
4672@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
4673 iounmap(mbar);
4674 }
4675
4676-static struct platform_suspend_ops mpc52xx_pm_ops = {
4677+static const struct platform_suspend_ops mpc52xx_pm_ops = {
4678 .valid = mpc52xx_pm_valid,
4679 .prepare = mpc52xx_pm_prepare,
4680 .enter = mpc52xx_pm_enter,
4681diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
4682index 08e65fc..643d3ac 100644
4683--- a/arch/powerpc/platforms/83xx/suspend.c
4684+++ b/arch/powerpc/platforms/83xx/suspend.c
4685@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
4686 return ret;
4687 }
4688
4689-static struct platform_suspend_ops mpc83xx_suspend_ops = {
4690+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
4691 .valid = mpc83xx_suspend_valid,
4692 .begin = mpc83xx_suspend_begin,
4693 .enter = mpc83xx_suspend_enter,
4694diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
4695index ca5bfdf..1602e09 100644
4696--- a/arch/powerpc/platforms/cell/iommu.c
4697+++ b/arch/powerpc/platforms/cell/iommu.c
4698@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
4699
4700 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
4701
4702-struct dma_map_ops dma_iommu_fixed_ops = {
4703+const struct dma_map_ops dma_iommu_fixed_ops = {
4704 .alloc_coherent = dma_fixed_alloc_coherent,
4705 .free_coherent = dma_fixed_free_coherent,
4706 .map_sg = dma_fixed_map_sg,
4707diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
4708index e34b305..20e48ec 100644
4709--- a/arch/powerpc/platforms/ps3/system-bus.c
4710+++ b/arch/powerpc/platforms/ps3/system-bus.c
4711@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
4712 return mask >= DMA_BIT_MASK(32);
4713 }
4714
4715-static struct dma_map_ops ps3_sb_dma_ops = {
4716+static const struct dma_map_ops ps3_sb_dma_ops = {
4717 .alloc_coherent = ps3_alloc_coherent,
4718 .free_coherent = ps3_free_coherent,
4719 .map_sg = ps3_sb_map_sg,
4720@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
4721 .unmap_page = ps3_unmap_page,
4722 };
4723
4724-static struct dma_map_ops ps3_ioc0_dma_ops = {
4725+static const struct dma_map_ops ps3_ioc0_dma_ops = {
4726 .alloc_coherent = ps3_alloc_coherent,
4727 .free_coherent = ps3_free_coherent,
4728 .map_sg = ps3_ioc0_map_sg,
4729diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
4730index f0e6f28..60d53ed 100644
4731--- a/arch/powerpc/platforms/pseries/Kconfig
4732+++ b/arch/powerpc/platforms/pseries/Kconfig
4733@@ -2,6 +2,8 @@ config PPC_PSERIES
4734 depends on PPC64 && PPC_BOOK3S
4735 bool "IBM pSeries & new (POWER5-based) iSeries"
4736 select MPIC
4737+ select PCI_MSI
4738+ select XICS
4739 select PPC_I8259
4740 select PPC_RTAS
4741 select RTAS_ERROR_LOGGING
4742diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
4743index 43c0aca..42c045b 100644
4744--- a/arch/s390/Kconfig
4745+++ b/arch/s390/Kconfig
4746@@ -194,28 +194,26 @@ config AUDIT_ARCH
4747
4748 config S390_SWITCH_AMODE
4749 bool "Switch kernel/user addressing modes"
4750+ default y
4751 help
4752 This option allows to switch the addressing modes of kernel and user
4753- space. The kernel parameter switch_amode=on will enable this feature,
4754- default is disabled. Enabling this (via kernel parameter) on machines
4755- earlier than IBM System z9-109 EC/BC will reduce system performance.
4756+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
4757+ will reduce system performance.
4758
4759 Note that this option will also be selected by selecting the execute
4760- protection option below. Enabling the execute protection via the
4761- noexec kernel parameter will also switch the addressing modes,
4762- independent of the switch_amode kernel parameter.
4763+ protection option below. Enabling the execute protection will also
4764+ switch the addressing modes, independent of this option.
4765
4766
4767 config S390_EXEC_PROTECT
4768 bool "Data execute protection"
4769+ default y
4770 select S390_SWITCH_AMODE
4771 help
4772 This option allows to enable a buffer overflow protection for user
4773 space programs and it also selects the addressing mode option above.
4774- The kernel parameter noexec=on will enable this feature and also
4775- switch the addressing modes, default is disabled. Enabling this (via
4776- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
4777- will reduce system performance.
4778+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
4779+ reduce system performance.
4780
4781 comment "Code generation options"
4782
4783diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4784index ae7c8f9..3f01a0c 100644
4785--- a/arch/s390/include/asm/atomic.h
4786+++ b/arch/s390/include/asm/atomic.h
4787@@ -362,6 +362,16 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
4788 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4789 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4790
4791+#define atomic64_read_unchecked(v) atomic64_read(v)
4792+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4793+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4794+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4795+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4796+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4797+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4798+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4799+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4800+
4801 #define smp_mb__before_atomic_dec() smp_mb()
4802 #define smp_mb__after_atomic_dec() smp_mb()
4803 #define smp_mb__before_atomic_inc() smp_mb()
4804diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4805index 9b86681..c5140db 100644
4806--- a/arch/s390/include/asm/cache.h
4807+++ b/arch/s390/include/asm/cache.h
4808@@ -11,8 +11,10 @@
4809 #ifndef __ARCH_S390_CACHE_H
4810 #define __ARCH_S390_CACHE_H
4811
4812-#define L1_CACHE_BYTES 256
4813+#include <linux/const.h>
4814+
4815 #define L1_CACHE_SHIFT 8
4816+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4817
4818 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
4819
4820diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4821index e885442..e3a2817 100644
4822--- a/arch/s390/include/asm/elf.h
4823+++ b/arch/s390/include/asm/elf.h
4824@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
4825 that it will "exec", and that there is sufficient room for the brk. */
4826 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4827
4828+#ifdef CONFIG_PAX_ASLR
4829+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4830+
4831+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4832+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4833+#endif
4834+
4835 /* This yields a mask that user programs can use to figure out what
4836 instruction set this CPU supports. */
4837
4838diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
4839index e37478e..9ce0e9f 100644
4840--- a/arch/s390/include/asm/setup.h
4841+++ b/arch/s390/include/asm/setup.h
4842@@ -50,13 +50,13 @@ extern unsigned long memory_end;
4843 void detect_memory_layout(struct mem_chunk chunk[]);
4844
4845 #ifdef CONFIG_S390_SWITCH_AMODE
4846-extern unsigned int switch_amode;
4847+#define switch_amode (1)
4848 #else
4849 #define switch_amode (0)
4850 #endif
4851
4852 #ifdef CONFIG_S390_EXEC_PROTECT
4853-extern unsigned int s390_noexec;
4854+#define s390_noexec (1)
4855 #else
4856 #define s390_noexec (0)
4857 #endif
4858diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4859index 8377e91..e28e6f1 100644
4860--- a/arch/s390/include/asm/uaccess.h
4861+++ b/arch/s390/include/asm/uaccess.h
4862@@ -232,6 +232,10 @@ static inline unsigned long __must_check
4863 copy_to_user(void __user *to, const void *from, unsigned long n)
4864 {
4865 might_fault();
4866+
4867+ if ((long)n < 0)
4868+ return n;
4869+
4870 if (access_ok(VERIFY_WRITE, to, n))
4871 n = __copy_to_user(to, from, n);
4872 return n;
4873@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4874 static inline unsigned long __must_check
4875 __copy_from_user(void *to, const void __user *from, unsigned long n)
4876 {
4877+ if ((long)n < 0)
4878+ return n;
4879+
4880 if (__builtin_constant_p(n) && (n <= 256))
4881 return uaccess.copy_from_user_small(n, from, to);
4882 else
4883@@ -283,6 +290,10 @@ static inline unsigned long __must_check
4884 copy_from_user(void *to, const void __user *from, unsigned long n)
4885 {
4886 might_fault();
4887+
4888+ if ((long)n < 0)
4889+ return n;
4890+
4891 if (access_ok(VERIFY_READ, from, n))
4892 n = __copy_from_user(to, from, n);
4893 else
4894diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4895index 639380a..72e3c02 100644
4896--- a/arch/s390/kernel/module.c
4897+++ b/arch/s390/kernel/module.c
4898@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4899
4900 /* Increase core size by size of got & plt and set start
4901 offsets for got and plt. */
4902- me->core_size = ALIGN(me->core_size, 4);
4903- me->arch.got_offset = me->core_size;
4904- me->core_size += me->arch.got_size;
4905- me->arch.plt_offset = me->core_size;
4906- me->core_size += me->arch.plt_size;
4907+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4908+ me->arch.got_offset = me->core_size_rw;
4909+ me->core_size_rw += me->arch.got_size;
4910+ me->arch.plt_offset = me->core_size_rx;
4911+ me->core_size_rx += me->arch.plt_size;
4912 return 0;
4913 }
4914
4915@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4916 if (info->got_initialized == 0) {
4917 Elf_Addr *gotent;
4918
4919- gotent = me->module_core + me->arch.got_offset +
4920+ gotent = me->module_core_rw + me->arch.got_offset +
4921 info->got_offset;
4922 *gotent = val;
4923 info->got_initialized = 1;
4924@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4925 else if (r_type == R_390_GOTENT ||
4926 r_type == R_390_GOTPLTENT)
4927 *(unsigned int *) loc =
4928- (val + (Elf_Addr) me->module_core - loc) >> 1;
4929+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4930 else if (r_type == R_390_GOT64 ||
4931 r_type == R_390_GOTPLT64)
4932 *(unsigned long *) loc = val;
4933@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4934 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4935 if (info->plt_initialized == 0) {
4936 unsigned int *ip;
4937- ip = me->module_core + me->arch.plt_offset +
4938+ ip = me->module_core_rx + me->arch.plt_offset +
4939 info->plt_offset;
4940 #ifndef CONFIG_64BIT
4941 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4942@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4943 val - loc + 0xffffUL < 0x1ffffeUL) ||
4944 (r_type == R_390_PLT32DBL &&
4945 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4946- val = (Elf_Addr) me->module_core +
4947+ val = (Elf_Addr) me->module_core_rx +
4948 me->arch.plt_offset +
4949 info->plt_offset;
4950 val += rela->r_addend - loc;
4951@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4952 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4953 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4954 val = val + rela->r_addend -
4955- ((Elf_Addr) me->module_core + me->arch.got_offset);
4956+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4957 if (r_type == R_390_GOTOFF16)
4958 *(unsigned short *) loc = val;
4959 else if (r_type == R_390_GOTOFF32)
4960@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4961 break;
4962 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4963 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4964- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4965+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4966 rela->r_addend - loc;
4967 if (r_type == R_390_GOTPC)
4968 *(unsigned int *) loc = val;
4969diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
4970index 0b2573a..71a22ec 100644
4971--- a/arch/s390/kernel/setup.c
4972+++ b/arch/s390/kernel/setup.c
4973@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
4974 early_param("mem", early_parse_mem);
4975
4976 #ifdef CONFIG_S390_SWITCH_AMODE
4977-unsigned int switch_amode = 0;
4978-EXPORT_SYMBOL_GPL(switch_amode);
4979-
4980 static int set_amode_and_uaccess(unsigned long user_amode,
4981 unsigned long user32_amode)
4982 {
4983@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4984 return 0;
4985 }
4986 }
4987-
4988-/*
4989- * Switch kernel/user addressing modes?
4990- */
4991-static int __init early_parse_switch_amode(char *p)
4992-{
4993- switch_amode = 1;
4994- return 0;
4995-}
4996-early_param("switch_amode", early_parse_switch_amode);
4997-
4998 #else /* CONFIG_S390_SWITCH_AMODE */
4999 static inline int set_amode_and_uaccess(unsigned long user_amode,
5000 unsigned long user32_amode)
5001@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
5002 }
5003 #endif /* CONFIG_S390_SWITCH_AMODE */
5004
5005-#ifdef CONFIG_S390_EXEC_PROTECT
5006-unsigned int s390_noexec = 0;
5007-EXPORT_SYMBOL_GPL(s390_noexec);
5008-
5009-/*
5010- * Enable execute protection?
5011- */
5012-static int __init early_parse_noexec(char *p)
5013-{
5014- if (!strncmp(p, "off", 3))
5015- return 0;
5016- switch_amode = 1;
5017- s390_noexec = 1;
5018- return 0;
5019-}
5020-early_param("noexec", early_parse_noexec);
5021-#endif /* CONFIG_S390_EXEC_PROTECT */
5022-
5023 static void setup_addressing_mode(void)
5024 {
5025 if (s390_noexec) {
5026diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
5027index 0ab74ae..c8b68f9 100644
5028--- a/arch/s390/mm/mmap.c
5029+++ b/arch/s390/mm/mmap.c
5030@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5031 */
5032 if (mmap_is_legacy()) {
5033 mm->mmap_base = TASK_UNMAPPED_BASE;
5034+
5035+#ifdef CONFIG_PAX_RANDMMAP
5036+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5037+ mm->mmap_base += mm->delta_mmap;
5038+#endif
5039+
5040 mm->get_unmapped_area = arch_get_unmapped_area;
5041 mm->unmap_area = arch_unmap_area;
5042 } else {
5043 mm->mmap_base = mmap_base();
5044+
5045+#ifdef CONFIG_PAX_RANDMMAP
5046+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5047+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5048+#endif
5049+
5050 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5051 mm->unmap_area = arch_unmap_area_topdown;
5052 }
5053@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5054 */
5055 if (mmap_is_legacy()) {
5056 mm->mmap_base = TASK_UNMAPPED_BASE;
5057+
5058+#ifdef CONFIG_PAX_RANDMMAP
5059+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5060+ mm->mmap_base += mm->delta_mmap;
5061+#endif
5062+
5063 mm->get_unmapped_area = s390_get_unmapped_area;
5064 mm->unmap_area = arch_unmap_area;
5065 } else {
5066 mm->mmap_base = mmap_base();
5067+
5068+#ifdef CONFIG_PAX_RANDMMAP
5069+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5070+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5071+#endif
5072+
5073 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
5074 mm->unmap_area = arch_unmap_area_topdown;
5075 }
5076diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
5077index ae3d59f..f65f075 100644
5078--- a/arch/score/include/asm/cache.h
5079+++ b/arch/score/include/asm/cache.h
5080@@ -1,7 +1,9 @@
5081 #ifndef _ASM_SCORE_CACHE_H
5082 #define _ASM_SCORE_CACHE_H
5083
5084+#include <linux/const.h>
5085+
5086 #define L1_CACHE_SHIFT 4
5087-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5088+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5089
5090 #endif /* _ASM_SCORE_CACHE_H */
5091diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
5092index 589d5c7..669e274 100644
5093--- a/arch/score/include/asm/system.h
5094+++ b/arch/score/include/asm/system.h
5095@@ -17,7 +17,7 @@ do { \
5096 #define finish_arch_switch(prev) do {} while (0)
5097
5098 typedef void (*vi_handler_t)(void);
5099-extern unsigned long arch_align_stack(unsigned long sp);
5100+#define arch_align_stack(x) (x)
5101
5102 #define mb() barrier()
5103 #define rmb() barrier()
5104diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
5105index 25d0803..d6c8e36 100644
5106--- a/arch/score/kernel/process.c
5107+++ b/arch/score/kernel/process.c
5108@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
5109
5110 return task_pt_regs(task)->cp0_epc;
5111 }
5112-
5113-unsigned long arch_align_stack(unsigned long sp)
5114-{
5115- return sp;
5116-}
5117diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
5118index d936c1a..304a252 100644
5119--- a/arch/sh/boards/mach-hp6xx/pm.c
5120+++ b/arch/sh/boards/mach-hp6xx/pm.c
5121@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
5122 return 0;
5123 }
5124
5125-static struct platform_suspend_ops hp6x0_pm_ops = {
5126+static const struct platform_suspend_ops hp6x0_pm_ops = {
5127 .enter = hp6x0_pm_enter,
5128 .valid = suspend_valid_only_mem,
5129 };
5130diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
5131index 02df18e..ae3a793 100644
5132--- a/arch/sh/include/asm/cache.h
5133+++ b/arch/sh/include/asm/cache.h
5134@@ -9,10 +9,11 @@
5135 #define __ASM_SH_CACHE_H
5136 #ifdef __KERNEL__
5137
5138+#include <linux/const.h>
5139 #include <linux/init.h>
5140 #include <cpu/cache.h>
5141
5142-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5143+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5144
5145 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
5146
5147diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
5148index 8a8a993..7b3079b 100644
5149--- a/arch/sh/kernel/cpu/sh4/sq.c
5150+++ b/arch/sh/kernel/cpu/sh4/sq.c
5151@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
5152 NULL,
5153 };
5154
5155-static struct sysfs_ops sq_sysfs_ops = {
5156+static const struct sysfs_ops sq_sysfs_ops = {
5157 .show = sq_sysfs_show,
5158 .store = sq_sysfs_store,
5159 };
5160diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
5161index ee3c2aa..c49cee6 100644
5162--- a/arch/sh/kernel/cpu/shmobile/pm.c
5163+++ b/arch/sh/kernel/cpu/shmobile/pm.c
5164@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
5165 return 0;
5166 }
5167
5168-static struct platform_suspend_ops sh_pm_ops = {
5169+static const struct platform_suspend_ops sh_pm_ops = {
5170 .enter = sh_pm_enter,
5171 .valid = suspend_valid_only_mem,
5172 };
5173diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
5174index 3e532d0..9faa306 100644
5175--- a/arch/sh/kernel/kgdb.c
5176+++ b/arch/sh/kernel/kgdb.c
5177@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
5178 {
5179 }
5180
5181-struct kgdb_arch arch_kgdb_ops = {
5182+const struct kgdb_arch arch_kgdb_ops = {
5183 /* Breakpoint instruction: trapa #0x3c */
5184 #ifdef CONFIG_CPU_LITTLE_ENDIAN
5185 .gdb_bpt_instr = { 0x3c, 0xc3 },
5186diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5187index afeb710..d1d1289 100644
5188--- a/arch/sh/mm/mmap.c
5189+++ b/arch/sh/mm/mmap.c
5190@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5191 addr = PAGE_ALIGN(addr);
5192
5193 vma = find_vma(mm, addr);
5194- if (TASK_SIZE - len >= addr &&
5195- (!vma || addr + len <= vma->vm_start))
5196+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5197 return addr;
5198 }
5199
5200@@ -106,7 +105,7 @@ full_search:
5201 }
5202 return -ENOMEM;
5203 }
5204- if (likely(!vma || addr + len <= vma->vm_start)) {
5205+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5206 /*
5207 * Remember the place where we stopped the search:
5208 */
5209@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5210 addr = PAGE_ALIGN(addr);
5211
5212 vma = find_vma(mm, addr);
5213- if (TASK_SIZE - len >= addr &&
5214- (!vma || addr + len <= vma->vm_start))
5215+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5216 return addr;
5217 }
5218
5219@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5220 /* make sure it can fit in the remaining address space */
5221 if (likely(addr > len)) {
5222 vma = find_vma(mm, addr-len);
5223- if (!vma || addr <= vma->vm_start) {
5224+ if (check_heap_stack_gap(vma, addr - len, len)) {
5225 /* remember the address as a hint for next time */
5226 return (mm->free_area_cache = addr-len);
5227 }
5228@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5229 if (unlikely(mm->mmap_base < len))
5230 goto bottomup;
5231
5232- addr = mm->mmap_base-len;
5233- if (do_colour_align)
5234- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5235+ addr = mm->mmap_base - len;
5236
5237 do {
5238+ if (do_colour_align)
5239+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5240 /*
5241 * Lookup failure means no vma is above this address,
5242 * else if new region fits below vma->vm_start,
5243 * return with success:
5244 */
5245 vma = find_vma(mm, addr);
5246- if (likely(!vma || addr+len <= vma->vm_start)) {
5247+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5248 /* remember the address as a hint for next time */
5249 return (mm->free_area_cache = addr);
5250 }
5251@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5252 mm->cached_hole_size = vma->vm_start - addr;
5253
5254 /* try just below the current vma->vm_start */
5255- addr = vma->vm_start-len;
5256- if (do_colour_align)
5257- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5258- } while (likely(len < vma->vm_start));
5259+ addr = skip_heap_stack_gap(vma, len);
5260+ } while (!IS_ERR_VALUE(addr));
5261
5262 bottomup:
5263 /*
5264diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
5265index 05ef538..dc9c857 100644
5266--- a/arch/sparc/Kconfig
5267+++ b/arch/sparc/Kconfig
5268@@ -32,6 +32,7 @@ config SPARC
5269
5270 config SPARC32
5271 def_bool !64BIT
5272+ select GENERIC_ATOMIC64
5273
5274 config SPARC64
5275 def_bool 64BIT
5276diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5277index 113225b..7fd04e7 100644
5278--- a/arch/sparc/Makefile
5279+++ b/arch/sparc/Makefile
5280@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5281 # Export what is needed by arch/sparc/boot/Makefile
5282 export VMLINUX_INIT VMLINUX_MAIN
5283 VMLINUX_INIT := $(head-y) $(init-y)
5284-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5285+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5286 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5287 VMLINUX_MAIN += $(drivers-y) $(net-y)
5288
5289diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
5290index f0d343c..cf36e68 100644
5291--- a/arch/sparc/include/asm/atomic_32.h
5292+++ b/arch/sparc/include/asm/atomic_32.h
5293@@ -13,6 +13,8 @@
5294
5295 #include <linux/types.h>
5296
5297+#include <asm-generic/atomic64.h>
5298+
5299 #ifdef __KERNEL__
5300
5301 #include <asm/system.h>
5302diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5303index f5cc06f..f858d47 100644
5304--- a/arch/sparc/include/asm/atomic_64.h
5305+++ b/arch/sparc/include/asm/atomic_64.h
5306@@ -14,18 +14,40 @@
5307 #define ATOMIC64_INIT(i) { (i) }
5308
5309 #define atomic_read(v) ((v)->counter)
5310+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5311+{
5312+ return v->counter;
5313+}
5314 #define atomic64_read(v) ((v)->counter)
5315+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5316+{
5317+ return v->counter;
5318+}
5319
5320 #define atomic_set(v, i) (((v)->counter) = i)
5321+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5322+{
5323+ v->counter = i;
5324+}
5325 #define atomic64_set(v, i) (((v)->counter) = i)
5326+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5327+{
5328+ v->counter = i;
5329+}
5330
5331 extern void atomic_add(int, atomic_t *);
5332+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5333 extern void atomic64_add(long, atomic64_t *);
5334+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5335 extern void atomic_sub(int, atomic_t *);
5336+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5337 extern void atomic64_sub(long, atomic64_t *);
5338+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5339
5340 extern int atomic_add_ret(int, atomic_t *);
5341+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5342 extern long atomic64_add_ret(long, atomic64_t *);
5343+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5344 extern int atomic_sub_ret(int, atomic_t *);
5345 extern long atomic64_sub_ret(long, atomic64_t *);
5346
5347@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5348 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5349
5350 #define atomic_inc_return(v) atomic_add_ret(1, v)
5351+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5352+{
5353+ return atomic_add_ret_unchecked(1, v);
5354+}
5355 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5356+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5357+{
5358+ return atomic64_add_ret_unchecked(1, v);
5359+}
5360
5361 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5362 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5363
5364 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5365+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5366+{
5367+ return atomic_add_ret_unchecked(i, v);
5368+}
5369 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5370+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5371+{
5372+ return atomic64_add_ret_unchecked(i, v);
5373+}
5374
5375 /*
5376 * atomic_inc_and_test - increment and test
5377@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5378 * other cases.
5379 */
5380 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5381+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5382+{
5383+ return atomic_inc_return_unchecked(v) == 0;
5384+}
5385 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5386
5387 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5388@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5389 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5390
5391 #define atomic_inc(v) atomic_add(1, v)
5392+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5393+{
5394+ atomic_add_unchecked(1, v);
5395+}
5396 #define atomic64_inc(v) atomic64_add(1, v)
5397+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5398+{
5399+ atomic64_add_unchecked(1, v);
5400+}
5401
5402 #define atomic_dec(v) atomic_sub(1, v)
5403+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5404+{
5405+ atomic_sub_unchecked(1, v);
5406+}
5407 #define atomic64_dec(v) atomic64_sub(1, v)
5408+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5409+{
5410+ atomic64_sub_unchecked(1, v);
5411+}
5412
5413 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5414 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5415
5416 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5417+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5418+{
5419+ return cmpxchg(&v->counter, old, new);
5420+}
5421 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5422+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5423+{
5424+ return xchg(&v->counter, new);
5425+}
5426
5427 static inline int atomic_add_unless(atomic_t *v, int a, int u)
5428 {
5429- int c, old;
5430+ int c, old, new;
5431 c = atomic_read(v);
5432 for (;;) {
5433- if (unlikely(c == (u)))
5434+ if (unlikely(c == u))
5435 break;
5436- old = atomic_cmpxchg((v), c, c + (a));
5437+
5438+ asm volatile("addcc %2, %0, %0\n"
5439+
5440+#ifdef CONFIG_PAX_REFCOUNT
5441+ "tvs %%icc, 6\n"
5442+#endif
5443+
5444+ : "=r" (new)
5445+ : "0" (c), "ir" (a)
5446+ : "cc");
5447+
5448+ old = atomic_cmpxchg(v, c, new);
5449 if (likely(old == c))
5450 break;
5451 c = old;
5452 }
5453- return c != (u);
5454+ return c != u;
5455 }
5456
5457 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
5458@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
5459 #define atomic64_cmpxchg(v, o, n) \
5460 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5461 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5462+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5463+{
5464+ return xchg(&v->counter, new);
5465+}
5466
5467 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5468 {
5469- long c, old;
5470+ long c, old, new;
5471 c = atomic64_read(v);
5472 for (;;) {
5473- if (unlikely(c == (u)))
5474+ if (unlikely(c == u))
5475 break;
5476- old = atomic64_cmpxchg((v), c, c + (a));
5477+
5478+ asm volatile("addcc %2, %0, %0\n"
5479+
5480+#ifdef CONFIG_PAX_REFCOUNT
5481+ "tvs %%xcc, 6\n"
5482+#endif
5483+
5484+ : "=r" (new)
5485+ : "0" (c), "ir" (a)
5486+ : "cc");
5487+
5488+ old = atomic64_cmpxchg(v, c, new);
5489 if (likely(old == c))
5490 break;
5491 c = old;
5492 }
5493- return c != (u);
5494+ return c != u;
5495 }
5496
5497 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5498diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5499index 41f85ae..73b80b5 100644
5500--- a/arch/sparc/include/asm/cache.h
5501+++ b/arch/sparc/include/asm/cache.h
5502@@ -7,8 +7,10 @@
5503 #ifndef _SPARC_CACHE_H
5504 #define _SPARC_CACHE_H
5505
5506+#include <linux/const.h>
5507+
5508 #define L1_CACHE_SHIFT 5
5509-#define L1_CACHE_BYTES 32
5510+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5511 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
5512
5513 #ifdef CONFIG_SPARC32
5514diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
5515index 5a8c308..38def92 100644
5516--- a/arch/sparc/include/asm/dma-mapping.h
5517+++ b/arch/sparc/include/asm/dma-mapping.h
5518@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
5519 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
5520 #define dma_is_consistent(d, h) (1)
5521
5522-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
5523+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
5524 extern struct bus_type pci_bus_type;
5525
5526-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
5527+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
5528 {
5529 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
5530 if (dev->bus == &pci_bus_type)
5531@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
5532 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
5533 dma_addr_t *dma_handle, gfp_t flag)
5534 {
5535- struct dma_map_ops *ops = get_dma_ops(dev);
5536+ const struct dma_map_ops *ops = get_dma_ops(dev);
5537 void *cpu_addr;
5538
5539 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
5540@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
5541 static inline void dma_free_coherent(struct device *dev, size_t size,
5542 void *cpu_addr, dma_addr_t dma_handle)
5543 {
5544- struct dma_map_ops *ops = get_dma_ops(dev);
5545+ const struct dma_map_ops *ops = get_dma_ops(dev);
5546
5547 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
5548 ops->free_coherent(dev, size, cpu_addr, dma_handle);
5549diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5550index 381a1b5..b97e3ff 100644
5551--- a/arch/sparc/include/asm/elf_32.h
5552+++ b/arch/sparc/include/asm/elf_32.h
5553@@ -116,6 +116,13 @@ typedef struct {
5554
5555 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5556
5557+#ifdef CONFIG_PAX_ASLR
5558+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5559+
5560+#define PAX_DELTA_MMAP_LEN 16
5561+#define PAX_DELTA_STACK_LEN 16
5562+#endif
5563+
5564 /* This yields a mask that user programs can use to figure out what
5565 instruction set this cpu supports. This can NOT be done in userspace
5566 on Sparc. */
5567diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5568index 9968085..c2106ef 100644
5569--- a/arch/sparc/include/asm/elf_64.h
5570+++ b/arch/sparc/include/asm/elf_64.h
5571@@ -163,6 +163,12 @@ typedef struct {
5572 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5573 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5574
5575+#ifdef CONFIG_PAX_ASLR
5576+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5577+
5578+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5579+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5580+#endif
5581
5582 /* This yields a mask that user programs can use to figure out what
5583 instruction set this cpu supports. */
5584diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
5585index 156707b..aefa786 100644
5586--- a/arch/sparc/include/asm/page_32.h
5587+++ b/arch/sparc/include/asm/page_32.h
5588@@ -8,6 +8,8 @@
5589 #ifndef _SPARC_PAGE_H
5590 #define _SPARC_PAGE_H
5591
5592+#include <linux/const.h>
5593+
5594 #define PAGE_SHIFT 12
5595
5596 #ifndef __ASSEMBLY__
5597diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5598index e0cabe7..efd60f1 100644
5599--- a/arch/sparc/include/asm/pgtable_32.h
5600+++ b/arch/sparc/include/asm/pgtable_32.h
5601@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5602 BTFIXUPDEF_INT(page_none)
5603 BTFIXUPDEF_INT(page_copy)
5604 BTFIXUPDEF_INT(page_readonly)
5605+
5606+#ifdef CONFIG_PAX_PAGEEXEC
5607+BTFIXUPDEF_INT(page_shared_noexec)
5608+BTFIXUPDEF_INT(page_copy_noexec)
5609+BTFIXUPDEF_INT(page_readonly_noexec)
5610+#endif
5611+
5612 BTFIXUPDEF_INT(page_kernel)
5613
5614 #define PMD_SHIFT SUN4C_PMD_SHIFT
5615@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
5616 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5617 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5618
5619+#ifdef CONFIG_PAX_PAGEEXEC
5620+extern pgprot_t PAGE_SHARED_NOEXEC;
5621+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5622+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5623+#else
5624+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5625+# define PAGE_COPY_NOEXEC PAGE_COPY
5626+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5627+#endif
5628+
5629 extern unsigned long page_kernel;
5630
5631 #ifdef MODULE
5632diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5633index 1407c07..7e10231 100644
5634--- a/arch/sparc/include/asm/pgtsrmmu.h
5635+++ b/arch/sparc/include/asm/pgtsrmmu.h
5636@@ -115,6 +115,13 @@
5637 SRMMU_EXEC | SRMMU_REF)
5638 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5639 SRMMU_EXEC | SRMMU_REF)
5640+
5641+#ifdef CONFIG_PAX_PAGEEXEC
5642+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5643+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5644+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5645+#endif
5646+
5647 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5648 SRMMU_DIRTY | SRMMU_REF)
5649
5650diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5651index 43e5147..47622a1 100644
5652--- a/arch/sparc/include/asm/spinlock_64.h
5653+++ b/arch/sparc/include/asm/spinlock_64.h
5654@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
5655
5656 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5657
5658-static void inline arch_read_lock(raw_rwlock_t *lock)
5659+static inline void arch_read_lock(raw_rwlock_t *lock)
5660 {
5661 unsigned long tmp1, tmp2;
5662
5663 __asm__ __volatile__ (
5664 "1: ldsw [%2], %0\n"
5665 " brlz,pn %0, 2f\n"
5666-"4: add %0, 1, %1\n"
5667+"4: addcc %0, 1, %1\n"
5668+
5669+#ifdef CONFIG_PAX_REFCOUNT
5670+" tvs %%icc, 6\n"
5671+#endif
5672+
5673 " cas [%2], %0, %1\n"
5674 " cmp %0, %1\n"
5675 " bne,pn %%icc, 1b\n"
5676@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
5677 " .previous"
5678 : "=&r" (tmp1), "=&r" (tmp2)
5679 : "r" (lock)
5680- : "memory");
5681+ : "memory", "cc");
5682 }
5683
5684-static int inline arch_read_trylock(raw_rwlock_t *lock)
5685+static inline int arch_read_trylock(raw_rwlock_t *lock)
5686 {
5687 int tmp1, tmp2;
5688
5689@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
5690 "1: ldsw [%2], %0\n"
5691 " brlz,a,pn %0, 2f\n"
5692 " mov 0, %0\n"
5693-" add %0, 1, %1\n"
5694+" addcc %0, 1, %1\n"
5695+
5696+#ifdef CONFIG_PAX_REFCOUNT
5697+" tvs %%icc, 6\n"
5698+#endif
5699+
5700 " cas [%2], %0, %1\n"
5701 " cmp %0, %1\n"
5702 " bne,pn %%icc, 1b\n"
5703@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
5704 return tmp1;
5705 }
5706
5707-static void inline arch_read_unlock(raw_rwlock_t *lock)
5708+static inline void arch_read_unlock(raw_rwlock_t *lock)
5709 {
5710 unsigned long tmp1, tmp2;
5711
5712 __asm__ __volatile__(
5713 "1: lduw [%2], %0\n"
5714-" sub %0, 1, %1\n"
5715+" subcc %0, 1, %1\n"
5716+
5717+#ifdef CONFIG_PAX_REFCOUNT
5718+" tvs %%icc, 6\n"
5719+#endif
5720+
5721 " cas [%2], %0, %1\n"
5722 " cmp %0, %1\n"
5723 " bne,pn %%xcc, 1b\n"
5724@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
5725 : "memory");
5726 }
5727
5728-static void inline arch_write_lock(raw_rwlock_t *lock)
5729+static inline void arch_write_lock(raw_rwlock_t *lock)
5730 {
5731 unsigned long mask, tmp1, tmp2;
5732
5733@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
5734 : "memory");
5735 }
5736
5737-static void inline arch_write_unlock(raw_rwlock_t *lock)
5738+static inline void arch_write_unlock(raw_rwlock_t *lock)
5739 {
5740 __asm__ __volatile__(
5741 " stw %%g0, [%0]"
5742@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
5743 : "memory");
5744 }
5745
5746-static int inline arch_write_trylock(raw_rwlock_t *lock)
5747+static inline int arch_write_trylock(raw_rwlock_t *lock)
5748 {
5749 unsigned long mask, tmp1, tmp2, result;
5750
5751diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5752index 844d73a..f787fb9 100644
5753--- a/arch/sparc/include/asm/thread_info_32.h
5754+++ b/arch/sparc/include/asm/thread_info_32.h
5755@@ -50,6 +50,8 @@ struct thread_info {
5756 unsigned long w_saved;
5757
5758 struct restart_block restart_block;
5759+
5760+ unsigned long lowest_stack;
5761 };
5762
5763 /*
5764diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5765index f78ad9a..9f55fc7 100644
5766--- a/arch/sparc/include/asm/thread_info_64.h
5767+++ b/arch/sparc/include/asm/thread_info_64.h
5768@@ -68,6 +68,8 @@ struct thread_info {
5769 struct pt_regs *kern_una_regs;
5770 unsigned int kern_una_insn;
5771
5772+ unsigned long lowest_stack;
5773+
5774 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5775 };
5776
5777diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5778index e88fbe5..96b0ce5 100644
5779--- a/arch/sparc/include/asm/uaccess.h
5780+++ b/arch/sparc/include/asm/uaccess.h
5781@@ -1,5 +1,13 @@
5782 #ifndef ___ASM_SPARC_UACCESS_H
5783 #define ___ASM_SPARC_UACCESS_H
5784+
5785+#ifdef __KERNEL__
5786+#ifndef __ASSEMBLY__
5787+#include <linux/types.h>
5788+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5789+#endif
5790+#endif
5791+
5792 #if defined(__sparc__) && defined(__arch64__)
5793 #include <asm/uaccess_64.h>
5794 #else
5795diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5796index 8303ac4..07f333d 100644
5797--- a/arch/sparc/include/asm/uaccess_32.h
5798+++ b/arch/sparc/include/asm/uaccess_32.h
5799@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5800
5801 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5802 {
5803- if (n && __access_ok((unsigned long) to, n))
5804+ if ((long)n < 0)
5805+ return n;
5806+
5807+ if (n && __access_ok((unsigned long) to, n)) {
5808+ if (!__builtin_constant_p(n))
5809+ check_object_size(from, n, true);
5810 return __copy_user(to, (__force void __user *) from, n);
5811- else
5812+ } else
5813 return n;
5814 }
5815
5816 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5817 {
5818+ if ((long)n < 0)
5819+ return n;
5820+
5821+ if (!__builtin_constant_p(n))
5822+ check_object_size(from, n, true);
5823+
5824 return __copy_user(to, (__force void __user *) from, n);
5825 }
5826
5827 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5828 {
5829- if (n && __access_ok((unsigned long) from, n))
5830+ if ((long)n < 0)
5831+ return n;
5832+
5833+ if (n && __access_ok((unsigned long) from, n)) {
5834+ if (!__builtin_constant_p(n))
5835+ check_object_size(to, n, false);
5836 return __copy_user((__force void __user *) to, from, n);
5837- else
5838+ } else
5839 return n;
5840 }
5841
5842 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5843 {
5844+ if ((long)n < 0)
5845+ return n;
5846+
5847 return __copy_user((__force void __user *) to, from, n);
5848 }
5849
5850diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5851index 9ea271e..7b8a271 100644
5852--- a/arch/sparc/include/asm/uaccess_64.h
5853+++ b/arch/sparc/include/asm/uaccess_64.h
5854@@ -9,6 +9,7 @@
5855 #include <linux/compiler.h>
5856 #include <linux/string.h>
5857 #include <linux/thread_info.h>
5858+#include <linux/kernel.h>
5859 #include <asm/asi.h>
5860 #include <asm/system.h>
5861 #include <asm/spitfire.h>
5862@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5863 static inline unsigned long __must_check
5864 copy_from_user(void *to, const void __user *from, unsigned long size)
5865 {
5866- unsigned long ret = ___copy_from_user(to, from, size);
5867+ unsigned long ret;
5868
5869+ if ((long)size < 0 || size > INT_MAX)
5870+ return size;
5871+
5872+ if (!__builtin_constant_p(size))
5873+ check_object_size(to, size, false);
5874+
5875+ ret = ___copy_from_user(to, from, size);
5876 if (unlikely(ret))
5877 ret = copy_from_user_fixup(to, from, size);
5878 return ret;
5879@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5880 static inline unsigned long __must_check
5881 copy_to_user(void __user *to, const void *from, unsigned long size)
5882 {
5883- unsigned long ret = ___copy_to_user(to, from, size);
5884+ unsigned long ret;
5885
5886+ if ((long)size < 0 || size > INT_MAX)
5887+ return size;
5888+
5889+ if (!__builtin_constant_p(size))
5890+ check_object_size(from, size, true);
5891+
5892+ ret = ___copy_to_user(to, from, size);
5893 if (unlikely(ret))
5894 ret = copy_to_user_fixup(to, from, size);
5895 return ret;
5896diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5897index 2782681..77ded84 100644
5898--- a/arch/sparc/kernel/Makefile
5899+++ b/arch/sparc/kernel/Makefile
5900@@ -3,7 +3,7 @@
5901 #
5902
5903 asflags-y := -ansi
5904-ccflags-y := -Werror
5905+#ccflags-y := -Werror
5906
5907 extra-y := head_$(BITS).o
5908 extra-y += init_task.o
5909diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
5910index 7690cc2..ece64c9 100644
5911--- a/arch/sparc/kernel/iommu.c
5912+++ b/arch/sparc/kernel/iommu.c
5913@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
5914 spin_unlock_irqrestore(&iommu->lock, flags);
5915 }
5916
5917-static struct dma_map_ops sun4u_dma_ops = {
5918+static const struct dma_map_ops sun4u_dma_ops = {
5919 .alloc_coherent = dma_4u_alloc_coherent,
5920 .free_coherent = dma_4u_free_coherent,
5921 .map_page = dma_4u_map_page,
5922@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
5923 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
5924 };
5925
5926-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5927+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
5928 EXPORT_SYMBOL(dma_ops);
5929
5930 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
5931diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
5932index 9f61fd8..bd048db 100644
5933--- a/arch/sparc/kernel/ioport.c
5934+++ b/arch/sparc/kernel/ioport.c
5935@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
5936 BUG();
5937 }
5938
5939-struct dma_map_ops sbus_dma_ops = {
5940+const struct dma_map_ops sbus_dma_ops = {
5941 .alloc_coherent = sbus_alloc_coherent,
5942 .free_coherent = sbus_free_coherent,
5943 .map_page = sbus_map_page,
5944@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
5945 .sync_sg_for_device = sbus_sync_sg_for_device,
5946 };
5947
5948-struct dma_map_ops *dma_ops = &sbus_dma_ops;
5949+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
5950 EXPORT_SYMBOL(dma_ops);
5951
5952 static int __init sparc_register_ioport(void)
5953@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
5954 }
5955 }
5956
5957-struct dma_map_ops pci32_dma_ops = {
5958+const struct dma_map_ops pci32_dma_ops = {
5959 .alloc_coherent = pci32_alloc_coherent,
5960 .free_coherent = pci32_free_coherent,
5961 .map_page = pci32_map_page,
5962diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
5963index 04df4ed..55c4b6e 100644
5964--- a/arch/sparc/kernel/kgdb_32.c
5965+++ b/arch/sparc/kernel/kgdb_32.c
5966@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
5967 {
5968 }
5969
5970-struct kgdb_arch arch_kgdb_ops = {
5971+const struct kgdb_arch arch_kgdb_ops = {
5972 /* Breakpoint instruction: ta 0x7d */
5973 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
5974 };
5975diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
5976index f5a0fd4..d886f71 100644
5977--- a/arch/sparc/kernel/kgdb_64.c
5978+++ b/arch/sparc/kernel/kgdb_64.c
5979@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
5980 {
5981 }
5982
5983-struct kgdb_arch arch_kgdb_ops = {
5984+const struct kgdb_arch arch_kgdb_ops = {
5985 /* Breakpoint instruction: ta 0x72 */
5986 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
5987 };
5988diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
5989index 23c33ff..d137fbd 100644
5990--- a/arch/sparc/kernel/pci_sun4v.c
5991+++ b/arch/sparc/kernel/pci_sun4v.c
5992@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
5993 spin_unlock_irqrestore(&iommu->lock, flags);
5994 }
5995
5996-static struct dma_map_ops sun4v_dma_ops = {
5997+static const struct dma_map_ops sun4v_dma_ops = {
5998 .alloc_coherent = dma_4v_alloc_coherent,
5999 .free_coherent = dma_4v_free_coherent,
6000 .map_page = dma_4v_map_page,
6001diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
6002index c49865b..b41a81b 100644
6003--- a/arch/sparc/kernel/process_32.c
6004+++ b/arch/sparc/kernel/process_32.c
6005@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
6006 rw->ins[4], rw->ins[5],
6007 rw->ins[6],
6008 rw->ins[7]);
6009- printk("%pS\n", (void *) rw->ins[7]);
6010+ printk("%pA\n", (void *) rw->ins[7]);
6011 rw = (struct reg_window32 *) rw->ins[6];
6012 }
6013 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
6014@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
6015
6016 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
6017 r->psr, r->pc, r->npc, r->y, print_tainted());
6018- printk("PC: <%pS>\n", (void *) r->pc);
6019+ printk("PC: <%pA>\n", (void *) r->pc);
6020 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6021 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
6022 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
6023 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6024 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
6025 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
6026- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
6027+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
6028
6029 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
6030 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
6031@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6032 rw = (struct reg_window32 *) fp;
6033 pc = rw->ins[7];
6034 printk("[%08lx : ", pc);
6035- printk("%pS ] ", (void *) pc);
6036+ printk("%pA ] ", (void *) pc);
6037 fp = rw->ins[6];
6038 } while (++count < 16);
6039 printk("\n");
6040diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
6041index cb70476..3d0c191 100644
6042--- a/arch/sparc/kernel/process_64.c
6043+++ b/arch/sparc/kernel/process_64.c
6044@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
6045 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
6046 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
6047 if (regs->tstate & TSTATE_PRIV)
6048- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
6049+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
6050 }
6051
6052 void show_regs(struct pt_regs *regs)
6053 {
6054 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
6055 regs->tpc, regs->tnpc, regs->y, print_tainted());
6056- printk("TPC: <%pS>\n", (void *) regs->tpc);
6057+ printk("TPC: <%pA>\n", (void *) regs->tpc);
6058 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
6059 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
6060 regs->u_regs[3]);
6061@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
6062 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
6063 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
6064 regs->u_regs[15]);
6065- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
6066+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
6067 show_regwindow(regs);
6068 }
6069
6070@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
6071 ((tp && tp->task) ? tp->task->pid : -1));
6072
6073 if (gp->tstate & TSTATE_PRIV) {
6074- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
6075+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
6076 (void *) gp->tpc,
6077 (void *) gp->o7,
6078 (void *) gp->i7,
6079diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
6080index 6edc4e5..06a69b4 100644
6081--- a/arch/sparc/kernel/sigutil_64.c
6082+++ b/arch/sparc/kernel/sigutil_64.c
6083@@ -2,6 +2,7 @@
6084 #include <linux/types.h>
6085 #include <linux/thread_info.h>
6086 #include <linux/uaccess.h>
6087+#include <linux/errno.h>
6088
6089 #include <asm/sigcontext.h>
6090 #include <asm/fpumacro.h>
6091diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
6092index 3a82e65..ce0a53a 100644
6093--- a/arch/sparc/kernel/sys_sparc_32.c
6094+++ b/arch/sparc/kernel/sys_sparc_32.c
6095@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6096 if (ARCH_SUN4C && len > 0x20000000)
6097 return -ENOMEM;
6098 if (!addr)
6099- addr = TASK_UNMAPPED_BASE;
6100+ addr = current->mm->mmap_base;
6101
6102 if (flags & MAP_SHARED)
6103 addr = COLOUR_ALIGN(addr);
6104@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6105 }
6106 if (TASK_SIZE - PAGE_SIZE - len < addr)
6107 return -ENOMEM;
6108- if (!vmm || addr + len <= vmm->vm_start)
6109+ if (check_heap_stack_gap(vmm, addr, len))
6110 return addr;
6111 addr = vmm->vm_end;
6112 if (flags & MAP_SHARED)
6113diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
6114index cfa0e19..98972ac 100644
6115--- a/arch/sparc/kernel/sys_sparc_64.c
6116+++ b/arch/sparc/kernel/sys_sparc_64.c
6117@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6118 /* We do not accept a shared mapping if it would violate
6119 * cache aliasing constraints.
6120 */
6121- if ((flags & MAP_SHARED) &&
6122+ if ((filp || (flags & MAP_SHARED)) &&
6123 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6124 return -EINVAL;
6125 return addr;
6126@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6127 if (filp || (flags & MAP_SHARED))
6128 do_color_align = 1;
6129
6130+#ifdef CONFIG_PAX_RANDMMAP
6131+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6132+#endif
6133+
6134 if (addr) {
6135 if (do_color_align)
6136 addr = COLOUR_ALIGN(addr, pgoff);
6137@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
6138 addr = PAGE_ALIGN(addr);
6139
6140 vma = find_vma(mm, addr);
6141- if (task_size - len >= addr &&
6142- (!vma || addr + len <= vma->vm_start))
6143+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6144 return addr;
6145 }
6146
6147 if (len > mm->cached_hole_size) {
6148- start_addr = addr = mm->free_area_cache;
6149+ start_addr = addr = mm->free_area_cache;
6150 } else {
6151- start_addr = addr = TASK_UNMAPPED_BASE;
6152+ start_addr = addr = mm->mmap_base;
6153 mm->cached_hole_size = 0;
6154 }
6155
6156@@ -175,14 +178,14 @@ full_search:
6157 vma = find_vma(mm, VA_EXCLUDE_END);
6158 }
6159 if (unlikely(task_size < addr)) {
6160- if (start_addr != TASK_UNMAPPED_BASE) {
6161- start_addr = addr = TASK_UNMAPPED_BASE;
6162+ if (start_addr != mm->mmap_base) {
6163+ start_addr = addr = mm->mmap_base;
6164 mm->cached_hole_size = 0;
6165 goto full_search;
6166 }
6167 return -ENOMEM;
6168 }
6169- if (likely(!vma || addr + len <= vma->vm_start)) {
6170+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6171 /*
6172 * Remember the place where we stopped the search:
6173 */
6174@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6175 /* We do not accept a shared mapping if it would violate
6176 * cache aliasing constraints.
6177 */
6178- if ((flags & MAP_SHARED) &&
6179+ if ((filp || (flags & MAP_SHARED)) &&
6180 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
6181 return -EINVAL;
6182 return addr;
6183@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6184 addr = PAGE_ALIGN(addr);
6185
6186 vma = find_vma(mm, addr);
6187- if (task_size - len >= addr &&
6188- (!vma || addr + len <= vma->vm_start))
6189+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6190 return addr;
6191 }
6192
6193@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6194 /* make sure it can fit in the remaining address space */
6195 if (likely(addr > len)) {
6196 vma = find_vma(mm, addr-len);
6197- if (!vma || addr <= vma->vm_start) {
6198+ if (check_heap_stack_gap(vma, addr - len, len)) {
6199 /* remember the address as a hint for next time */
6200 return (mm->free_area_cache = addr-len);
6201 }
6202@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6203 if (unlikely(mm->mmap_base < len))
6204 goto bottomup;
6205
6206- addr = mm->mmap_base-len;
6207- if (do_color_align)
6208- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6209+ addr = mm->mmap_base - len;
6210
6211 do {
6212+ if (do_color_align)
6213+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6214 /*
6215 * Lookup failure means no vma is above this address,
6216 * else if new region fits below vma->vm_start,
6217 * return with success:
6218 */
6219 vma = find_vma(mm, addr);
6220- if (likely(!vma || addr+len <= vma->vm_start)) {
6221+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6222 /* remember the address as a hint for next time */
6223 return (mm->free_area_cache = addr);
6224 }
6225@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6226 mm->cached_hole_size = vma->vm_start - addr;
6227
6228 /* try just below the current vma->vm_start */
6229- addr = vma->vm_start-len;
6230- if (do_color_align)
6231- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
6232- } while (likely(len < vma->vm_start));
6233+ addr = skip_heap_stack_gap(vma, len);
6234+ } while (!IS_ERR_VALUE(addr));
6235
6236 bottomup:
6237 /*
6238@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6239 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
6240 sysctl_legacy_va_layout) {
6241 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6242+
6243+#ifdef CONFIG_PAX_RANDMMAP
6244+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6245+ mm->mmap_base += mm->delta_mmap;
6246+#endif
6247+
6248 mm->get_unmapped_area = arch_get_unmapped_area;
6249 mm->unmap_area = arch_unmap_area;
6250 } else {
6251@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6252 gap = (task_size / 6 * 5);
6253
6254 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
6255+
6256+#ifdef CONFIG_PAX_RANDMMAP
6257+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6258+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6259+#endif
6260+
6261 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6262 mm->unmap_area = arch_unmap_area_topdown;
6263 }
6264diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6265index c0490c7..84959d1 100644
6266--- a/arch/sparc/kernel/traps_32.c
6267+++ b/arch/sparc/kernel/traps_32.c
6268@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6269 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6270 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6271
6272+extern void gr_handle_kernel_exploit(void);
6273+
6274 void die_if_kernel(char *str, struct pt_regs *regs)
6275 {
6276 static int die_counter;
6277@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6278 count++ < 30 &&
6279 (((unsigned long) rw) >= PAGE_OFFSET) &&
6280 !(((unsigned long) rw) & 0x7)) {
6281- printk("Caller[%08lx]: %pS\n", rw->ins[7],
6282+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
6283 (void *) rw->ins[7]);
6284 rw = (struct reg_window32 *)rw->ins[6];
6285 }
6286 }
6287 printk("Instruction DUMP:");
6288 instruction_dump ((unsigned long *) regs->pc);
6289- if(regs->psr & PSR_PS)
6290+ if(regs->psr & PSR_PS) {
6291+ gr_handle_kernel_exploit();
6292 do_exit(SIGKILL);
6293+ }
6294 do_exit(SIGSEGV);
6295 }
6296
6297diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6298index 10f7bb9..cdb6793 100644
6299--- a/arch/sparc/kernel/traps_64.c
6300+++ b/arch/sparc/kernel/traps_64.c
6301@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6302 i + 1,
6303 p->trapstack[i].tstate, p->trapstack[i].tpc,
6304 p->trapstack[i].tnpc, p->trapstack[i].tt);
6305- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6306+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6307 }
6308 }
6309
6310@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6311
6312 lvl -= 0x100;
6313 if (regs->tstate & TSTATE_PRIV) {
6314+
6315+#ifdef CONFIG_PAX_REFCOUNT
6316+ if (lvl == 6)
6317+ pax_report_refcount_overflow(regs);
6318+#endif
6319+
6320 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6321 die_if_kernel(buffer, regs);
6322 }
6323@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6324 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6325 {
6326 char buffer[32];
6327-
6328+
6329 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6330 0, lvl, SIGTRAP) == NOTIFY_STOP)
6331 return;
6332
6333+#ifdef CONFIG_PAX_REFCOUNT
6334+ if (lvl == 6)
6335+ pax_report_refcount_overflow(regs);
6336+#endif
6337+
6338 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6339
6340 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6341@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6342 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6343 printk("%s" "ERROR(%d): ",
6344 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6345- printk("TPC<%pS>\n", (void *) regs->tpc);
6346+ printk("TPC<%pA>\n", (void *) regs->tpc);
6347 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6348 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6349 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6350@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6351 smp_processor_id(),
6352 (type & 0x1) ? 'I' : 'D',
6353 regs->tpc);
6354- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6355+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6356 panic("Irrecoverable Cheetah+ parity error.");
6357 }
6358
6359@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6360 smp_processor_id(),
6361 (type & 0x1) ? 'I' : 'D',
6362 regs->tpc);
6363- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6364+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6365 }
6366
6367 struct sun4v_error_entry {
6368@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6369
6370 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6371 regs->tpc, tl);
6372- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6373+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6374 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6375- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6376+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6377 (void *) regs->u_regs[UREG_I7]);
6378 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6379 "pte[%lx] error[%lx]\n",
6380@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6381
6382 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6383 regs->tpc, tl);
6384- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6385+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6386 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6387- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6388+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6389 (void *) regs->u_regs[UREG_I7]);
6390 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6391 "pte[%lx] error[%lx]\n",
6392@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6393 fp = (unsigned long)sf->fp + STACK_BIAS;
6394 }
6395
6396- printk(" [%016lx] %pS\n", pc, (void *) pc);
6397+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6398 } while (++count < 16);
6399 }
6400
6401@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6402 return (struct reg_window *) (fp + STACK_BIAS);
6403 }
6404
6405+extern void gr_handle_kernel_exploit(void);
6406+
6407 void die_if_kernel(char *str, struct pt_regs *regs)
6408 {
6409 static int die_counter;
6410@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6411 while (rw &&
6412 count++ < 30&&
6413 is_kernel_stack(current, rw)) {
6414- printk("Caller[%016lx]: %pS\n", rw->ins[7],
6415+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
6416 (void *) rw->ins[7]);
6417
6418 rw = kernel_stack_up(rw);
6419@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6420 }
6421 user_instruction_dump ((unsigned int __user *) regs->tpc);
6422 }
6423- if (regs->tstate & TSTATE_PRIV)
6424+ if (regs->tstate & TSTATE_PRIV) {
6425+ gr_handle_kernel_exploit();
6426 do_exit(SIGKILL);
6427+ }
6428+
6429 do_exit(SIGSEGV);
6430 }
6431 EXPORT_SYMBOL(die_if_kernel);
6432diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
6433index be183fe..1c8d332 100644
6434--- a/arch/sparc/kernel/una_asm_64.S
6435+++ b/arch/sparc/kernel/una_asm_64.S
6436@@ -127,7 +127,7 @@ do_int_load:
6437 wr %o5, 0x0, %asi
6438 retl
6439 mov 0, %o0
6440- .size __do_int_load, .-__do_int_load
6441+ .size do_int_load, .-do_int_load
6442
6443 .section __ex_table,"a"
6444 .word 4b, __retl_efault
6445diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6446index 3792099..2af17d8 100644
6447--- a/arch/sparc/kernel/unaligned_64.c
6448+++ b/arch/sparc/kernel/unaligned_64.c
6449@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
6450 if (count < 5) {
6451 last_time = jiffies;
6452 count++;
6453- printk("Kernel unaligned access at TPC[%lx] %pS\n",
6454+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
6455 regs->tpc, (void *) regs->tpc);
6456 }
6457 }
6458diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6459index e75faf0..24f12f9 100644
6460--- a/arch/sparc/lib/Makefile
6461+++ b/arch/sparc/lib/Makefile
6462@@ -2,7 +2,7 @@
6463 #
6464
6465 asflags-y := -ansi -DST_DIV0=0x02
6466-ccflags-y := -Werror
6467+#ccflags-y := -Werror
6468
6469 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6470 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6471diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6472index 0268210..f0291ca 100644
6473--- a/arch/sparc/lib/atomic_64.S
6474+++ b/arch/sparc/lib/atomic_64.S
6475@@ -18,7 +18,12 @@
6476 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6477 BACKOFF_SETUP(%o2)
6478 1: lduw [%o1], %g1
6479- add %g1, %o0, %g7
6480+ addcc %g1, %o0, %g7
6481+
6482+#ifdef CONFIG_PAX_REFCOUNT
6483+ tvs %icc, 6
6484+#endif
6485+
6486 cas [%o1], %g1, %g7
6487 cmp %g1, %g7
6488 bne,pn %icc, 2f
6489@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6490 2: BACKOFF_SPIN(%o2, %o3, 1b)
6491 .size atomic_add, .-atomic_add
6492
6493+ .globl atomic_add_unchecked
6494+ .type atomic_add_unchecked,#function
6495+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6496+ BACKOFF_SETUP(%o2)
6497+1: lduw [%o1], %g1
6498+ add %g1, %o0, %g7
6499+ cas [%o1], %g1, %g7
6500+ cmp %g1, %g7
6501+ bne,pn %icc, 2f
6502+ nop
6503+ retl
6504+ nop
6505+2: BACKOFF_SPIN(%o2, %o3, 1b)
6506+ .size atomic_add_unchecked, .-atomic_add_unchecked
6507+
6508 .globl atomic_sub
6509 .type atomic_sub,#function
6510 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6511 BACKOFF_SETUP(%o2)
6512 1: lduw [%o1], %g1
6513- sub %g1, %o0, %g7
6514+ subcc %g1, %o0, %g7
6515+
6516+#ifdef CONFIG_PAX_REFCOUNT
6517+ tvs %icc, 6
6518+#endif
6519+
6520 cas [%o1], %g1, %g7
6521 cmp %g1, %g7
6522 bne,pn %icc, 2f
6523@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6524 2: BACKOFF_SPIN(%o2, %o3, 1b)
6525 .size atomic_sub, .-atomic_sub
6526
6527+ .globl atomic_sub_unchecked
6528+ .type atomic_sub_unchecked,#function
6529+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6530+ BACKOFF_SETUP(%o2)
6531+1: lduw [%o1], %g1
6532+ sub %g1, %o0, %g7
6533+ cas [%o1], %g1, %g7
6534+ cmp %g1, %g7
6535+ bne,pn %icc, 2f
6536+ nop
6537+ retl
6538+ nop
6539+2: BACKOFF_SPIN(%o2, %o3, 1b)
6540+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
6541+
6542 .globl atomic_add_ret
6543 .type atomic_add_ret,#function
6544 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6545 BACKOFF_SETUP(%o2)
6546 1: lduw [%o1], %g1
6547- add %g1, %o0, %g7
6548+ addcc %g1, %o0, %g7
6549+
6550+#ifdef CONFIG_PAX_REFCOUNT
6551+ tvs %icc, 6
6552+#endif
6553+
6554 cas [%o1], %g1, %g7
6555 cmp %g1, %g7
6556 bne,pn %icc, 2f
6557@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6558 2: BACKOFF_SPIN(%o2, %o3, 1b)
6559 .size atomic_add_ret, .-atomic_add_ret
6560
6561+ .globl atomic_add_ret_unchecked
6562+ .type atomic_add_ret_unchecked,#function
6563+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6564+ BACKOFF_SETUP(%o2)
6565+1: lduw [%o1], %g1
6566+ addcc %g1, %o0, %g7
6567+ cas [%o1], %g1, %g7
6568+ cmp %g1, %g7
6569+ bne,pn %icc, 2f
6570+ add %g7, %o0, %g7
6571+ sra %g7, 0, %o0
6572+ retl
6573+ nop
6574+2: BACKOFF_SPIN(%o2, %o3, 1b)
6575+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6576+
6577 .globl atomic_sub_ret
6578 .type atomic_sub_ret,#function
6579 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6580 BACKOFF_SETUP(%o2)
6581 1: lduw [%o1], %g1
6582- sub %g1, %o0, %g7
6583+ subcc %g1, %o0, %g7
6584+
6585+#ifdef CONFIG_PAX_REFCOUNT
6586+ tvs %icc, 6
6587+#endif
6588+
6589 cas [%o1], %g1, %g7
6590 cmp %g1, %g7
6591 bne,pn %icc, 2f
6592@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6593 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6594 BACKOFF_SETUP(%o2)
6595 1: ldx [%o1], %g1
6596- add %g1, %o0, %g7
6597+ addcc %g1, %o0, %g7
6598+
6599+#ifdef CONFIG_PAX_REFCOUNT
6600+ tvs %xcc, 6
6601+#endif
6602+
6603 casx [%o1], %g1, %g7
6604 cmp %g1, %g7
6605 bne,pn %xcc, 2f
6606@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6607 2: BACKOFF_SPIN(%o2, %o3, 1b)
6608 .size atomic64_add, .-atomic64_add
6609
6610+ .globl atomic64_add_unchecked
6611+ .type atomic64_add_unchecked,#function
6612+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6613+ BACKOFF_SETUP(%o2)
6614+1: ldx [%o1], %g1
6615+ addcc %g1, %o0, %g7
6616+ casx [%o1], %g1, %g7
6617+ cmp %g1, %g7
6618+ bne,pn %xcc, 2f
6619+ nop
6620+ retl
6621+ nop
6622+2: BACKOFF_SPIN(%o2, %o3, 1b)
6623+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
6624+
6625 .globl atomic64_sub
6626 .type atomic64_sub,#function
6627 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6628 BACKOFF_SETUP(%o2)
6629 1: ldx [%o1], %g1
6630- sub %g1, %o0, %g7
6631+ subcc %g1, %o0, %g7
6632+
6633+#ifdef CONFIG_PAX_REFCOUNT
6634+ tvs %xcc, 6
6635+#endif
6636+
6637 casx [%o1], %g1, %g7
6638 cmp %g1, %g7
6639 bne,pn %xcc, 2f
6640@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6641 2: BACKOFF_SPIN(%o2, %o3, 1b)
6642 .size atomic64_sub, .-atomic64_sub
6643
6644+ .globl atomic64_sub_unchecked
6645+ .type atomic64_sub_unchecked,#function
6646+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6647+ BACKOFF_SETUP(%o2)
6648+1: ldx [%o1], %g1
6649+ subcc %g1, %o0, %g7
6650+ casx [%o1], %g1, %g7
6651+ cmp %g1, %g7
6652+ bne,pn %xcc, 2f
6653+ nop
6654+ retl
6655+ nop
6656+2: BACKOFF_SPIN(%o2, %o3, 1b)
6657+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6658+
6659 .globl atomic64_add_ret
6660 .type atomic64_add_ret,#function
6661 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6662 BACKOFF_SETUP(%o2)
6663 1: ldx [%o1], %g1
6664- add %g1, %o0, %g7
6665+ addcc %g1, %o0, %g7
6666+
6667+#ifdef CONFIG_PAX_REFCOUNT
6668+ tvs %xcc, 6
6669+#endif
6670+
6671 casx [%o1], %g1, %g7
6672 cmp %g1, %g7
6673 bne,pn %xcc, 2f
6674@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6675 2: BACKOFF_SPIN(%o2, %o3, 1b)
6676 .size atomic64_add_ret, .-atomic64_add_ret
6677
6678+ .globl atomic64_add_ret_unchecked
6679+ .type atomic64_add_ret_unchecked,#function
6680+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6681+ BACKOFF_SETUP(%o2)
6682+1: ldx [%o1], %g1
6683+ addcc %g1, %o0, %g7
6684+ casx [%o1], %g1, %g7
6685+ cmp %g1, %g7
6686+ bne,pn %xcc, 2f
6687+ add %g7, %o0, %g7
6688+ mov %g7, %o0
6689+ retl
6690+ nop
6691+2: BACKOFF_SPIN(%o2, %o3, 1b)
6692+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6693+
6694 .globl atomic64_sub_ret
6695 .type atomic64_sub_ret,#function
6696 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6697 BACKOFF_SETUP(%o2)
6698 1: ldx [%o1], %g1
6699- sub %g1, %o0, %g7
6700+ subcc %g1, %o0, %g7
6701+
6702+#ifdef CONFIG_PAX_REFCOUNT
6703+ tvs %xcc, 6
6704+#endif
6705+
6706 casx [%o1], %g1, %g7
6707 cmp %g1, %g7
6708 bne,pn %xcc, 2f
6709diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6710index 704b126..2e79d76 100644
6711--- a/arch/sparc/lib/ksyms.c
6712+++ b/arch/sparc/lib/ksyms.c
6713@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
6714
6715 /* Atomic counter implementation. */
6716 EXPORT_SYMBOL(atomic_add);
6717+EXPORT_SYMBOL(atomic_add_unchecked);
6718 EXPORT_SYMBOL(atomic_add_ret);
6719+EXPORT_SYMBOL(atomic_add_ret_unchecked);
6720 EXPORT_SYMBOL(atomic_sub);
6721+EXPORT_SYMBOL(atomic_sub_unchecked);
6722 EXPORT_SYMBOL(atomic_sub_ret);
6723 EXPORT_SYMBOL(atomic64_add);
6724+EXPORT_SYMBOL(atomic64_add_unchecked);
6725 EXPORT_SYMBOL(atomic64_add_ret);
6726+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6727 EXPORT_SYMBOL(atomic64_sub);
6728+EXPORT_SYMBOL(atomic64_sub_unchecked);
6729 EXPORT_SYMBOL(atomic64_sub_ret);
6730
6731 /* Atomic bit operations. */
6732diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
6733index 91a7d29..ce75c29 100644
6734--- a/arch/sparc/lib/rwsem_64.S
6735+++ b/arch/sparc/lib/rwsem_64.S
6736@@ -11,7 +11,12 @@
6737 .globl __down_read
6738 __down_read:
6739 1: lduw [%o0], %g1
6740- add %g1, 1, %g7
6741+ addcc %g1, 1, %g7
6742+
6743+#ifdef CONFIG_PAX_REFCOUNT
6744+ tvs %icc, 6
6745+#endif
6746+
6747 cas [%o0], %g1, %g7
6748 cmp %g1, %g7
6749 bne,pn %icc, 1b
6750@@ -33,7 +38,12 @@ __down_read:
6751 .globl __down_read_trylock
6752 __down_read_trylock:
6753 1: lduw [%o0], %g1
6754- add %g1, 1, %g7
6755+ addcc %g1, 1, %g7
6756+
6757+#ifdef CONFIG_PAX_REFCOUNT
6758+ tvs %icc, 6
6759+#endif
6760+
6761 cmp %g7, 0
6762 bl,pn %icc, 2f
6763 mov 0, %o1
6764@@ -51,7 +61,12 @@ __down_write:
6765 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
6766 1:
6767 lduw [%o0], %g3
6768- add %g3, %g1, %g7
6769+ addcc %g3, %g1, %g7
6770+
6771+#ifdef CONFIG_PAX_REFCOUNT
6772+ tvs %icc, 6
6773+#endif
6774+
6775 cas [%o0], %g3, %g7
6776 cmp %g3, %g7
6777 bne,pn %icc, 1b
6778@@ -77,7 +92,12 @@ __down_write_trylock:
6779 cmp %g3, 0
6780 bne,pn %icc, 2f
6781 mov 0, %o1
6782- add %g3, %g1, %g7
6783+ addcc %g3, %g1, %g7
6784+
6785+#ifdef CONFIG_PAX_REFCOUNT
6786+ tvs %icc, 6
6787+#endif
6788+
6789 cas [%o0], %g3, %g7
6790 cmp %g3, %g7
6791 bne,pn %icc, 1b
6792@@ -90,7 +110,12 @@ __down_write_trylock:
6793 __up_read:
6794 1:
6795 lduw [%o0], %g1
6796- sub %g1, 1, %g7
6797+ subcc %g1, 1, %g7
6798+
6799+#ifdef CONFIG_PAX_REFCOUNT
6800+ tvs %icc, 6
6801+#endif
6802+
6803 cas [%o0], %g1, %g7
6804 cmp %g1, %g7
6805 bne,pn %icc, 1b
6806@@ -118,7 +143,12 @@ __up_write:
6807 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
6808 1:
6809 lduw [%o0], %g3
6810- sub %g3, %g1, %g7
6811+ subcc %g3, %g1, %g7
6812+
6813+#ifdef CONFIG_PAX_REFCOUNT
6814+ tvs %icc, 6
6815+#endif
6816+
6817 cas [%o0], %g3, %g7
6818 cmp %g3, %g7
6819 bne,pn %icc, 1b
6820@@ -143,7 +173,12 @@ __downgrade_write:
6821 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
6822 1:
6823 lduw [%o0], %g3
6824- sub %g3, %g1, %g7
6825+ subcc %g3, %g1, %g7
6826+
6827+#ifdef CONFIG_PAX_REFCOUNT
6828+ tvs %icc, 6
6829+#endif
6830+
6831 cas [%o0], %g3, %g7
6832 cmp %g3, %g7
6833 bne,pn %icc, 1b
6834diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6835index 79836a7..62f47a2 100644
6836--- a/arch/sparc/mm/Makefile
6837+++ b/arch/sparc/mm/Makefile
6838@@ -2,7 +2,7 @@
6839 #
6840
6841 asflags-y := -ansi
6842-ccflags-y := -Werror
6843+#ccflags-y := -Werror
6844
6845 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6846 obj-y += fault_$(BITS).o
6847diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6848index b99f81c..3453e93 100644
6849--- a/arch/sparc/mm/fault_32.c
6850+++ b/arch/sparc/mm/fault_32.c
6851@@ -21,6 +21,9 @@
6852 #include <linux/interrupt.h>
6853 #include <linux/module.h>
6854 #include <linux/kdebug.h>
6855+#include <linux/slab.h>
6856+#include <linux/pagemap.h>
6857+#include <linux/compiler.h>
6858
6859 #include <asm/system.h>
6860 #include <asm/page.h>
6861@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6862 return safe_compute_effective_address(regs, insn);
6863 }
6864
6865+#ifdef CONFIG_PAX_PAGEEXEC
6866+#ifdef CONFIG_PAX_DLRESOLVE
6867+static void pax_emuplt_close(struct vm_area_struct *vma)
6868+{
6869+ vma->vm_mm->call_dl_resolve = 0UL;
6870+}
6871+
6872+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6873+{
6874+ unsigned int *kaddr;
6875+
6876+ vmf->page = alloc_page(GFP_HIGHUSER);
6877+ if (!vmf->page)
6878+ return VM_FAULT_OOM;
6879+
6880+ kaddr = kmap(vmf->page);
6881+ memset(kaddr, 0, PAGE_SIZE);
6882+ kaddr[0] = 0x9DE3BFA8U; /* save */
6883+ flush_dcache_page(vmf->page);
6884+ kunmap(vmf->page);
6885+ return VM_FAULT_MAJOR;
6886+}
6887+
6888+static const struct vm_operations_struct pax_vm_ops = {
6889+ .close = pax_emuplt_close,
6890+ .fault = pax_emuplt_fault
6891+};
6892+
6893+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6894+{
6895+ int ret;
6896+
6897+ vma->vm_mm = current->mm;
6898+ vma->vm_start = addr;
6899+ vma->vm_end = addr + PAGE_SIZE;
6900+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6901+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6902+ vma->vm_ops = &pax_vm_ops;
6903+
6904+ ret = insert_vm_struct(current->mm, vma);
6905+ if (ret)
6906+ return ret;
6907+
6908+ ++current->mm->total_vm;
6909+ return 0;
6910+}
6911+#endif
6912+
6913+/*
6914+ * PaX: decide what to do with offenders (regs->pc = fault address)
6915+ *
6916+ * returns 1 when task should be killed
6917+ * 2 when patched PLT trampoline was detected
6918+ * 3 when unpatched PLT trampoline was detected
6919+ */
6920+static int pax_handle_fetch_fault(struct pt_regs *regs)
6921+{
6922+
6923+#ifdef CONFIG_PAX_EMUPLT
6924+ int err;
6925+
6926+ do { /* PaX: patched PLT emulation #1 */
6927+ unsigned int sethi1, sethi2, jmpl;
6928+
6929+ err = get_user(sethi1, (unsigned int *)regs->pc);
6930+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6931+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6932+
6933+ if (err)
6934+ break;
6935+
6936+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6937+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6938+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6939+ {
6940+ unsigned int addr;
6941+
6942+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6943+ addr = regs->u_regs[UREG_G1];
6944+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6945+ regs->pc = addr;
6946+ regs->npc = addr+4;
6947+ return 2;
6948+ }
6949+ } while (0);
6950+
6951+ { /* PaX: patched PLT emulation #2 */
6952+ unsigned int ba;
6953+
6954+ err = get_user(ba, (unsigned int *)regs->pc);
6955+
6956+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6957+ unsigned int addr;
6958+
6959+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6960+ regs->pc = addr;
6961+ regs->npc = addr+4;
6962+ return 2;
6963+ }
6964+ }
6965+
6966+ do { /* PaX: patched PLT emulation #3 */
6967+ unsigned int sethi, jmpl, nop;
6968+
6969+ err = get_user(sethi, (unsigned int *)regs->pc);
6970+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6971+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6972+
6973+ if (err)
6974+ break;
6975+
6976+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6977+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6978+ nop == 0x01000000U)
6979+ {
6980+ unsigned int addr;
6981+
6982+ addr = (sethi & 0x003FFFFFU) << 10;
6983+ regs->u_regs[UREG_G1] = addr;
6984+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6985+ regs->pc = addr;
6986+ regs->npc = addr+4;
6987+ return 2;
6988+ }
6989+ } while (0);
6990+
6991+ do { /* PaX: unpatched PLT emulation step 1 */
6992+ unsigned int sethi, ba, nop;
6993+
6994+ err = get_user(sethi, (unsigned int *)regs->pc);
6995+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
6996+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6997+
6998+ if (err)
6999+ break;
7000+
7001+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7002+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7003+ nop == 0x01000000U)
7004+ {
7005+ unsigned int addr, save, call;
7006+
7007+ if ((ba & 0xFFC00000U) == 0x30800000U)
7008+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
7009+ else
7010+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
7011+
7012+ err = get_user(save, (unsigned int *)addr);
7013+ err |= get_user(call, (unsigned int *)(addr+4));
7014+ err |= get_user(nop, (unsigned int *)(addr+8));
7015+ if (err)
7016+ break;
7017+
7018+#ifdef CONFIG_PAX_DLRESOLVE
7019+ if (save == 0x9DE3BFA8U &&
7020+ (call & 0xC0000000U) == 0x40000000U &&
7021+ nop == 0x01000000U)
7022+ {
7023+ struct vm_area_struct *vma;
7024+ unsigned long call_dl_resolve;
7025+
7026+ down_read(&current->mm->mmap_sem);
7027+ call_dl_resolve = current->mm->call_dl_resolve;
7028+ up_read(&current->mm->mmap_sem);
7029+ if (likely(call_dl_resolve))
7030+ goto emulate;
7031+
7032+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7033+
7034+ down_write(&current->mm->mmap_sem);
7035+ if (current->mm->call_dl_resolve) {
7036+ call_dl_resolve = current->mm->call_dl_resolve;
7037+ up_write(&current->mm->mmap_sem);
7038+ if (vma)
7039+ kmem_cache_free(vm_area_cachep, vma);
7040+ goto emulate;
7041+ }
7042+
7043+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7044+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7045+ up_write(&current->mm->mmap_sem);
7046+ if (vma)
7047+ kmem_cache_free(vm_area_cachep, vma);
7048+ return 1;
7049+ }
7050+
7051+ if (pax_insert_vma(vma, call_dl_resolve)) {
7052+ up_write(&current->mm->mmap_sem);
7053+ kmem_cache_free(vm_area_cachep, vma);
7054+ return 1;
7055+ }
7056+
7057+ current->mm->call_dl_resolve = call_dl_resolve;
7058+ up_write(&current->mm->mmap_sem);
7059+
7060+emulate:
7061+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7062+ regs->pc = call_dl_resolve;
7063+ regs->npc = addr+4;
7064+ return 3;
7065+ }
7066+#endif
7067+
7068+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7069+ if ((save & 0xFFC00000U) == 0x05000000U &&
7070+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7071+ nop == 0x01000000U)
7072+ {
7073+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7074+ regs->u_regs[UREG_G2] = addr + 4;
7075+ addr = (save & 0x003FFFFFU) << 10;
7076+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
7077+ regs->pc = addr;
7078+ regs->npc = addr+4;
7079+ return 3;
7080+ }
7081+ }
7082+ } while (0);
7083+
7084+ do { /* PaX: unpatched PLT emulation step 2 */
7085+ unsigned int save, call, nop;
7086+
7087+ err = get_user(save, (unsigned int *)(regs->pc-4));
7088+ err |= get_user(call, (unsigned int *)regs->pc);
7089+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
7090+ if (err)
7091+ break;
7092+
7093+ if (save == 0x9DE3BFA8U &&
7094+ (call & 0xC0000000U) == 0x40000000U &&
7095+ nop == 0x01000000U)
7096+ {
7097+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
7098+
7099+ regs->u_regs[UREG_RETPC] = regs->pc;
7100+ regs->pc = dl_resolve;
7101+ regs->npc = dl_resolve+4;
7102+ return 3;
7103+ }
7104+ } while (0);
7105+#endif
7106+
7107+ return 1;
7108+}
7109+
7110+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7111+{
7112+ unsigned long i;
7113+
7114+ printk(KERN_ERR "PAX: bytes at PC: ");
7115+ for (i = 0; i < 8; i++) {
7116+ unsigned int c;
7117+ if (get_user(c, (unsigned int *)pc+i))
7118+ printk(KERN_CONT "???????? ");
7119+ else
7120+ printk(KERN_CONT "%08x ", c);
7121+ }
7122+ printk("\n");
7123+}
7124+#endif
7125+
7126 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
7127 unsigned long address)
7128 {
7129@@ -231,6 +495,24 @@ good_area:
7130 if(!(vma->vm_flags & VM_WRITE))
7131 goto bad_area;
7132 } else {
7133+
7134+#ifdef CONFIG_PAX_PAGEEXEC
7135+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
7136+ up_read(&mm->mmap_sem);
7137+ switch (pax_handle_fetch_fault(regs)) {
7138+
7139+#ifdef CONFIG_PAX_EMUPLT
7140+ case 2:
7141+ case 3:
7142+ return;
7143+#endif
7144+
7145+ }
7146+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
7147+ do_group_exit(SIGKILL);
7148+ }
7149+#endif
7150+
7151 /* Allow reads even for write-only mappings */
7152 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
7153 goto bad_area;
7154diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
7155index 43b0da9..a0b78f9 100644
7156--- a/arch/sparc/mm/fault_64.c
7157+++ b/arch/sparc/mm/fault_64.c
7158@@ -20,6 +20,9 @@
7159 #include <linux/kprobes.h>
7160 #include <linux/kdebug.h>
7161 #include <linux/percpu.h>
7162+#include <linux/slab.h>
7163+#include <linux/pagemap.h>
7164+#include <linux/compiler.h>
7165
7166 #include <asm/page.h>
7167 #include <asm/pgtable.h>
7168@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
7169 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
7170 regs->tpc);
7171 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
7172- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
7173+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
7174 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
7175 dump_stack();
7176 unhandled_fault(regs->tpc, current, regs);
7177@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
7178 show_regs(regs);
7179 }
7180
7181+#ifdef CONFIG_PAX_PAGEEXEC
7182+#ifdef CONFIG_PAX_DLRESOLVE
7183+static void pax_emuplt_close(struct vm_area_struct *vma)
7184+{
7185+ vma->vm_mm->call_dl_resolve = 0UL;
7186+}
7187+
7188+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7189+{
7190+ unsigned int *kaddr;
7191+
7192+ vmf->page = alloc_page(GFP_HIGHUSER);
7193+ if (!vmf->page)
7194+ return VM_FAULT_OOM;
7195+
7196+ kaddr = kmap(vmf->page);
7197+ memset(kaddr, 0, PAGE_SIZE);
7198+ kaddr[0] = 0x9DE3BFA8U; /* save */
7199+ flush_dcache_page(vmf->page);
7200+ kunmap(vmf->page);
7201+ return VM_FAULT_MAJOR;
7202+}
7203+
7204+static const struct vm_operations_struct pax_vm_ops = {
7205+ .close = pax_emuplt_close,
7206+ .fault = pax_emuplt_fault
7207+};
7208+
7209+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
7210+{
7211+ int ret;
7212+
7213+ vma->vm_mm = current->mm;
7214+ vma->vm_start = addr;
7215+ vma->vm_end = addr + PAGE_SIZE;
7216+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
7217+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
7218+ vma->vm_ops = &pax_vm_ops;
7219+
7220+ ret = insert_vm_struct(current->mm, vma);
7221+ if (ret)
7222+ return ret;
7223+
7224+ ++current->mm->total_vm;
7225+ return 0;
7226+}
7227+#endif
7228+
7229+/*
7230+ * PaX: decide what to do with offenders (regs->tpc = fault address)
7231+ *
7232+ * returns 1 when task should be killed
7233+ * 2 when patched PLT trampoline was detected
7234+ * 3 when unpatched PLT trampoline was detected
7235+ */
7236+static int pax_handle_fetch_fault(struct pt_regs *regs)
7237+{
7238+
7239+#ifdef CONFIG_PAX_EMUPLT
7240+ int err;
7241+
7242+ do { /* PaX: patched PLT emulation #1 */
7243+ unsigned int sethi1, sethi2, jmpl;
7244+
7245+ err = get_user(sethi1, (unsigned int *)regs->tpc);
7246+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
7247+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
7248+
7249+ if (err)
7250+ break;
7251+
7252+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
7253+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
7254+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
7255+ {
7256+ unsigned long addr;
7257+
7258+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
7259+ addr = regs->u_regs[UREG_G1];
7260+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7261+
7262+ if (test_thread_flag(TIF_32BIT))
7263+ addr &= 0xFFFFFFFFUL;
7264+
7265+ regs->tpc = addr;
7266+ regs->tnpc = addr+4;
7267+ return 2;
7268+ }
7269+ } while (0);
7270+
7271+ { /* PaX: patched PLT emulation #2 */
7272+ unsigned int ba;
7273+
7274+ err = get_user(ba, (unsigned int *)regs->tpc);
7275+
7276+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
7277+ unsigned long addr;
7278+
7279+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7280+
7281+ if (test_thread_flag(TIF_32BIT))
7282+ addr &= 0xFFFFFFFFUL;
7283+
7284+ regs->tpc = addr;
7285+ regs->tnpc = addr+4;
7286+ return 2;
7287+ }
7288+ }
7289+
7290+ do { /* PaX: patched PLT emulation #3 */
7291+ unsigned int sethi, jmpl, nop;
7292+
7293+ err = get_user(sethi, (unsigned int *)regs->tpc);
7294+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
7295+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7296+
7297+ if (err)
7298+ break;
7299+
7300+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7301+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
7302+ nop == 0x01000000U)
7303+ {
7304+ unsigned long addr;
7305+
7306+ addr = (sethi & 0x003FFFFFU) << 10;
7307+ regs->u_regs[UREG_G1] = addr;
7308+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7309+
7310+ if (test_thread_flag(TIF_32BIT))
7311+ addr &= 0xFFFFFFFFUL;
7312+
7313+ regs->tpc = addr;
7314+ regs->tnpc = addr+4;
7315+ return 2;
7316+ }
7317+ } while (0);
7318+
7319+ do { /* PaX: patched PLT emulation #4 */
7320+ unsigned int sethi, mov1, call, mov2;
7321+
7322+ err = get_user(sethi, (unsigned int *)regs->tpc);
7323+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7324+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
7325+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7326+
7327+ if (err)
7328+ break;
7329+
7330+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7331+ mov1 == 0x8210000FU &&
7332+ (call & 0xC0000000U) == 0x40000000U &&
7333+ mov2 == 0x9E100001U)
7334+ {
7335+ unsigned long addr;
7336+
7337+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7338+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7339+
7340+ if (test_thread_flag(TIF_32BIT))
7341+ addr &= 0xFFFFFFFFUL;
7342+
7343+ regs->tpc = addr;
7344+ regs->tnpc = addr+4;
7345+ return 2;
7346+ }
7347+ } while (0);
7348+
7349+ do { /* PaX: patched PLT emulation #5 */
7350+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7351+
7352+ err = get_user(sethi, (unsigned int *)regs->tpc);
7353+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7354+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7355+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7356+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7357+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7358+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7359+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7360+
7361+ if (err)
7362+ break;
7363+
7364+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7365+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7366+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7367+ (or1 & 0xFFFFE000U) == 0x82106000U &&
7368+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7369+ sllx == 0x83287020U &&
7370+ jmpl == 0x81C04005U &&
7371+ nop == 0x01000000U)
7372+ {
7373+ unsigned long addr;
7374+
7375+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7376+ regs->u_regs[UREG_G1] <<= 32;
7377+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7378+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7379+ regs->tpc = addr;
7380+ regs->tnpc = addr+4;
7381+ return 2;
7382+ }
7383+ } while (0);
7384+
7385+ do { /* PaX: patched PLT emulation #6 */
7386+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7387+
7388+ err = get_user(sethi, (unsigned int *)regs->tpc);
7389+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7390+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7391+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7392+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
7393+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7394+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7395+
7396+ if (err)
7397+ break;
7398+
7399+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7400+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
7401+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7402+ sllx == 0x83287020U &&
7403+ (or & 0xFFFFE000U) == 0x8A116000U &&
7404+ jmpl == 0x81C04005U &&
7405+ nop == 0x01000000U)
7406+ {
7407+ unsigned long addr;
7408+
7409+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7410+ regs->u_regs[UREG_G1] <<= 32;
7411+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7412+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7413+ regs->tpc = addr;
7414+ regs->tnpc = addr+4;
7415+ return 2;
7416+ }
7417+ } while (0);
7418+
7419+ do { /* PaX: unpatched PLT emulation step 1 */
7420+ unsigned int sethi, ba, nop;
7421+
7422+ err = get_user(sethi, (unsigned int *)regs->tpc);
7423+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7424+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7425+
7426+ if (err)
7427+ break;
7428+
7429+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7430+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7431+ nop == 0x01000000U)
7432+ {
7433+ unsigned long addr;
7434+ unsigned int save, call;
7435+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7436+
7437+ if ((ba & 0xFFC00000U) == 0x30800000U)
7438+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7439+ else
7440+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7441+
7442+ if (test_thread_flag(TIF_32BIT))
7443+ addr &= 0xFFFFFFFFUL;
7444+
7445+ err = get_user(save, (unsigned int *)addr);
7446+ err |= get_user(call, (unsigned int *)(addr+4));
7447+ err |= get_user(nop, (unsigned int *)(addr+8));
7448+ if (err)
7449+ break;
7450+
7451+#ifdef CONFIG_PAX_DLRESOLVE
7452+ if (save == 0x9DE3BFA8U &&
7453+ (call & 0xC0000000U) == 0x40000000U &&
7454+ nop == 0x01000000U)
7455+ {
7456+ struct vm_area_struct *vma;
7457+ unsigned long call_dl_resolve;
7458+
7459+ down_read(&current->mm->mmap_sem);
7460+ call_dl_resolve = current->mm->call_dl_resolve;
7461+ up_read(&current->mm->mmap_sem);
7462+ if (likely(call_dl_resolve))
7463+ goto emulate;
7464+
7465+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7466+
7467+ down_write(&current->mm->mmap_sem);
7468+ if (current->mm->call_dl_resolve) {
7469+ call_dl_resolve = current->mm->call_dl_resolve;
7470+ up_write(&current->mm->mmap_sem);
7471+ if (vma)
7472+ kmem_cache_free(vm_area_cachep, vma);
7473+ goto emulate;
7474+ }
7475+
7476+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7477+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7478+ up_write(&current->mm->mmap_sem);
7479+ if (vma)
7480+ kmem_cache_free(vm_area_cachep, vma);
7481+ return 1;
7482+ }
7483+
7484+ if (pax_insert_vma(vma, call_dl_resolve)) {
7485+ up_write(&current->mm->mmap_sem);
7486+ kmem_cache_free(vm_area_cachep, vma);
7487+ return 1;
7488+ }
7489+
7490+ current->mm->call_dl_resolve = call_dl_resolve;
7491+ up_write(&current->mm->mmap_sem);
7492+
7493+emulate:
7494+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7495+ regs->tpc = call_dl_resolve;
7496+ regs->tnpc = addr+4;
7497+ return 3;
7498+ }
7499+#endif
7500+
7501+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7502+ if ((save & 0xFFC00000U) == 0x05000000U &&
7503+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7504+ nop == 0x01000000U)
7505+ {
7506+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7507+ regs->u_regs[UREG_G2] = addr + 4;
7508+ addr = (save & 0x003FFFFFU) << 10;
7509+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7510+
7511+ if (test_thread_flag(TIF_32BIT))
7512+ addr &= 0xFFFFFFFFUL;
7513+
7514+ regs->tpc = addr;
7515+ regs->tnpc = addr+4;
7516+ return 3;
7517+ }
7518+
7519+ /* PaX: 64-bit PLT stub */
7520+ err = get_user(sethi1, (unsigned int *)addr);
7521+ err |= get_user(sethi2, (unsigned int *)(addr+4));
7522+ err |= get_user(or1, (unsigned int *)(addr+8));
7523+ err |= get_user(or2, (unsigned int *)(addr+12));
7524+ err |= get_user(sllx, (unsigned int *)(addr+16));
7525+ err |= get_user(add, (unsigned int *)(addr+20));
7526+ err |= get_user(jmpl, (unsigned int *)(addr+24));
7527+ err |= get_user(nop, (unsigned int *)(addr+28));
7528+ if (err)
7529+ break;
7530+
7531+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7532+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7533+ (or1 & 0xFFFFE000U) == 0x88112000U &&
7534+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7535+ sllx == 0x89293020U &&
7536+ add == 0x8A010005U &&
7537+ jmpl == 0x89C14000U &&
7538+ nop == 0x01000000U)
7539+ {
7540+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7541+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7542+ regs->u_regs[UREG_G4] <<= 32;
7543+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7544+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7545+ regs->u_regs[UREG_G4] = addr + 24;
7546+ addr = regs->u_regs[UREG_G5];
7547+ regs->tpc = addr;
7548+ regs->tnpc = addr+4;
7549+ return 3;
7550+ }
7551+ }
7552+ } while (0);
7553+
7554+#ifdef CONFIG_PAX_DLRESOLVE
7555+ do { /* PaX: unpatched PLT emulation step 2 */
7556+ unsigned int save, call, nop;
7557+
7558+ err = get_user(save, (unsigned int *)(regs->tpc-4));
7559+ err |= get_user(call, (unsigned int *)regs->tpc);
7560+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7561+ if (err)
7562+ break;
7563+
7564+ if (save == 0x9DE3BFA8U &&
7565+ (call & 0xC0000000U) == 0x40000000U &&
7566+ nop == 0x01000000U)
7567+ {
7568+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7569+
7570+ if (test_thread_flag(TIF_32BIT))
7571+ dl_resolve &= 0xFFFFFFFFUL;
7572+
7573+ regs->u_regs[UREG_RETPC] = regs->tpc;
7574+ regs->tpc = dl_resolve;
7575+ regs->tnpc = dl_resolve+4;
7576+ return 3;
7577+ }
7578+ } while (0);
7579+#endif
7580+
7581+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7582+ unsigned int sethi, ba, nop;
7583+
7584+ err = get_user(sethi, (unsigned int *)regs->tpc);
7585+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7586+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7587+
7588+ if (err)
7589+ break;
7590+
7591+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7592+ (ba & 0xFFF00000U) == 0x30600000U &&
7593+ nop == 0x01000000U)
7594+ {
7595+ unsigned long addr;
7596+
7597+ addr = (sethi & 0x003FFFFFU) << 10;
7598+ regs->u_regs[UREG_G1] = addr;
7599+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7600+
7601+ if (test_thread_flag(TIF_32BIT))
7602+ addr &= 0xFFFFFFFFUL;
7603+
7604+ regs->tpc = addr;
7605+ regs->tnpc = addr+4;
7606+ return 2;
7607+ }
7608+ } while (0);
7609+
7610+#endif
7611+
7612+ return 1;
7613+}
7614+
7615+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7616+{
7617+ unsigned long i;
7618+
7619+ printk(KERN_ERR "PAX: bytes at PC: ");
7620+ for (i = 0; i < 8; i++) {
7621+ unsigned int c;
7622+ if (get_user(c, (unsigned int *)pc+i))
7623+ printk(KERN_CONT "???????? ");
7624+ else
7625+ printk(KERN_CONT "%08x ", c);
7626+ }
7627+ printk("\n");
7628+}
7629+#endif
7630+
7631 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7632 {
7633 struct mm_struct *mm = current->mm;
7634@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7635 if (!vma)
7636 goto bad_area;
7637
7638+#ifdef CONFIG_PAX_PAGEEXEC
7639+ /* PaX: detect ITLB misses on non-exec pages */
7640+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7641+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7642+ {
7643+ if (address != regs->tpc)
7644+ goto good_area;
7645+
7646+ up_read(&mm->mmap_sem);
7647+ switch (pax_handle_fetch_fault(regs)) {
7648+
7649+#ifdef CONFIG_PAX_EMUPLT
7650+ case 2:
7651+ case 3:
7652+ return;
7653+#endif
7654+
7655+ }
7656+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7657+ do_group_exit(SIGKILL);
7658+ }
7659+#endif
7660+
7661 /* Pure DTLB misses do not tell us whether the fault causing
7662 * load/store/atomic was a write or not, it only says that there
7663 * was no match. So in such a case we (carefully) read the
7664diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7665index f27d103..1b06377 100644
7666--- a/arch/sparc/mm/hugetlbpage.c
7667+++ b/arch/sparc/mm/hugetlbpage.c
7668@@ -69,7 +69,7 @@ full_search:
7669 }
7670 return -ENOMEM;
7671 }
7672- if (likely(!vma || addr + len <= vma->vm_start)) {
7673+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7674 /*
7675 * Remember the place where we stopped the search:
7676 */
7677@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7678 /* make sure it can fit in the remaining address space */
7679 if (likely(addr > len)) {
7680 vma = find_vma(mm, addr-len);
7681- if (!vma || addr <= vma->vm_start) {
7682+ if (check_heap_stack_gap(vma, addr - len, len)) {
7683 /* remember the address as a hint for next time */
7684 return (mm->free_area_cache = addr-len);
7685 }
7686@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7687 if (unlikely(mm->mmap_base < len))
7688 goto bottomup;
7689
7690- addr = (mm->mmap_base-len) & HPAGE_MASK;
7691+ addr = mm->mmap_base - len;
7692
7693 do {
7694+ addr &= HPAGE_MASK;
7695 /*
7696 * Lookup failure means no vma is above this address,
7697 * else if new region fits below vma->vm_start,
7698 * return with success:
7699 */
7700 vma = find_vma(mm, addr);
7701- if (likely(!vma || addr+len <= vma->vm_start)) {
7702+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7703 /* remember the address as a hint for next time */
7704 return (mm->free_area_cache = addr);
7705 }
7706@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7707 mm->cached_hole_size = vma->vm_start - addr;
7708
7709 /* try just below the current vma->vm_start */
7710- addr = (vma->vm_start-len) & HPAGE_MASK;
7711- } while (likely(len < vma->vm_start));
7712+ addr = skip_heap_stack_gap(vma, len);
7713+ } while (!IS_ERR_VALUE(addr));
7714
7715 bottomup:
7716 /*
7717@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7718 if (addr) {
7719 addr = ALIGN(addr, HPAGE_SIZE);
7720 vma = find_vma(mm, addr);
7721- if (task_size - len >= addr &&
7722- (!vma || addr + len <= vma->vm_start))
7723+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7724 return addr;
7725 }
7726 if (mm->get_unmapped_area == arch_get_unmapped_area)
7727diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7728index dc7c3b1..34c0070 100644
7729--- a/arch/sparc/mm/init_32.c
7730+++ b/arch/sparc/mm/init_32.c
7731@@ -317,6 +317,9 @@ extern void device_scan(void);
7732 pgprot_t PAGE_SHARED __read_mostly;
7733 EXPORT_SYMBOL(PAGE_SHARED);
7734
7735+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7736+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7737+
7738 void __init paging_init(void)
7739 {
7740 switch(sparc_cpu_model) {
7741@@ -345,17 +348,17 @@ void __init paging_init(void)
7742
7743 /* Initialize the protection map with non-constant, MMU dependent values. */
7744 protection_map[0] = PAGE_NONE;
7745- protection_map[1] = PAGE_READONLY;
7746- protection_map[2] = PAGE_COPY;
7747- protection_map[3] = PAGE_COPY;
7748+ protection_map[1] = PAGE_READONLY_NOEXEC;
7749+ protection_map[2] = PAGE_COPY_NOEXEC;
7750+ protection_map[3] = PAGE_COPY_NOEXEC;
7751 protection_map[4] = PAGE_READONLY;
7752 protection_map[5] = PAGE_READONLY;
7753 protection_map[6] = PAGE_COPY;
7754 protection_map[7] = PAGE_COPY;
7755 protection_map[8] = PAGE_NONE;
7756- protection_map[9] = PAGE_READONLY;
7757- protection_map[10] = PAGE_SHARED;
7758- protection_map[11] = PAGE_SHARED;
7759+ protection_map[9] = PAGE_READONLY_NOEXEC;
7760+ protection_map[10] = PAGE_SHARED_NOEXEC;
7761+ protection_map[11] = PAGE_SHARED_NOEXEC;
7762 protection_map[12] = PAGE_READONLY;
7763 protection_map[13] = PAGE_READONLY;
7764 protection_map[14] = PAGE_SHARED;
7765diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7766index 509b1ff..bfd7118 100644
7767--- a/arch/sparc/mm/srmmu.c
7768+++ b/arch/sparc/mm/srmmu.c
7769@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7770 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7771 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7772 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7773+
7774+#ifdef CONFIG_PAX_PAGEEXEC
7775+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7776+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7777+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7778+#endif
7779+
7780 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7781 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7782
7783diff --git a/arch/um/Makefile b/arch/um/Makefile
7784index fc633db..5e1a1c2 100644
7785--- a/arch/um/Makefile
7786+++ b/arch/um/Makefile
7787@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7788 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7789 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
7790
7791+ifdef CONSTIFY_PLUGIN
7792+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7793+endif
7794+
7795 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
7796
7797 #This will adjust *FLAGS accordingly to the platform.
7798diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7799index 19e1bdd..3665b77 100644
7800--- a/arch/um/include/asm/cache.h
7801+++ b/arch/um/include/asm/cache.h
7802@@ -1,6 +1,7 @@
7803 #ifndef __UM_CACHE_H
7804 #define __UM_CACHE_H
7805
7806+#include <linux/const.h>
7807
7808 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7809 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7810@@ -12,6 +13,6 @@
7811 # define L1_CACHE_SHIFT 5
7812 #endif
7813
7814-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7815+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7816
7817 #endif
7818diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7819index 6c03acd..a5e0215 100644
7820--- a/arch/um/include/asm/kmap_types.h
7821+++ b/arch/um/include/asm/kmap_types.h
7822@@ -23,6 +23,7 @@ enum km_type {
7823 KM_IRQ1,
7824 KM_SOFTIRQ0,
7825 KM_SOFTIRQ1,
7826+ KM_CLEARPAGE,
7827 KM_TYPE_NR
7828 };
7829
7830diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7831index 4cc9b6c..02e5029 100644
7832--- a/arch/um/include/asm/page.h
7833+++ b/arch/um/include/asm/page.h
7834@@ -14,6 +14,9 @@
7835 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7836 #define PAGE_MASK (~(PAGE_SIZE-1))
7837
7838+#define ktla_ktva(addr) (addr)
7839+#define ktva_ktla(addr) (addr)
7840+
7841 #ifndef __ASSEMBLY__
7842
7843 struct page;
7844diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7845index 4a28a15..654dc2a 100644
7846--- a/arch/um/kernel/process.c
7847+++ b/arch/um/kernel/process.c
7848@@ -393,22 +393,6 @@ int singlestepping(void * t)
7849 return 2;
7850 }
7851
7852-/*
7853- * Only x86 and x86_64 have an arch_align_stack().
7854- * All other arches have "#define arch_align_stack(x) (x)"
7855- * in their asm/system.h
7856- * As this is included in UML from asm-um/system-generic.h,
7857- * we can use it to behave as the subarch does.
7858- */
7859-#ifndef arch_align_stack
7860-unsigned long arch_align_stack(unsigned long sp)
7861-{
7862- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7863- sp -= get_random_int() % 8192;
7864- return sp & ~0xf;
7865-}
7866-#endif
7867-
7868 unsigned long get_wchan(struct task_struct *p)
7869 {
7870 unsigned long stack_page, sp, ip;
7871diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
7872index d1b93c4..ae1b7fd 100644
7873--- a/arch/um/sys-i386/shared/sysdep/system.h
7874+++ b/arch/um/sys-i386/shared/sysdep/system.h
7875@@ -17,7 +17,7 @@
7876 # define AT_VECTOR_SIZE_ARCH 1
7877 #endif
7878
7879-extern unsigned long arch_align_stack(unsigned long sp);
7880+#define arch_align_stack(x) ((x) & ~0xfUL)
7881
7882 void default_idle(void);
7883
7884diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
7885index 857ca0b..9a2669d 100644
7886--- a/arch/um/sys-i386/syscalls.c
7887+++ b/arch/um/sys-i386/syscalls.c
7888@@ -11,6 +11,21 @@
7889 #include "asm/uaccess.h"
7890 #include "asm/unistd.h"
7891
7892+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
7893+{
7894+ unsigned long pax_task_size = TASK_SIZE;
7895+
7896+#ifdef CONFIG_PAX_SEGMEXEC
7897+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
7898+ pax_task_size = SEGMEXEC_TASK_SIZE;
7899+#endif
7900+
7901+ if (len > pax_task_size || addr > pax_task_size - len)
7902+ return -EINVAL;
7903+
7904+ return 0;
7905+}
7906+
7907 /*
7908 * Perform the select(nd, in, out, ex, tv) and mmap() system
7909 * calls. Linux/i386 didn't use to be able to handle more than
7910diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
7911index d1b93c4..ae1b7fd 100644
7912--- a/arch/um/sys-x86_64/shared/sysdep/system.h
7913+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
7914@@ -17,7 +17,7 @@
7915 # define AT_VECTOR_SIZE_ARCH 1
7916 #endif
7917
7918-extern unsigned long arch_align_stack(unsigned long sp);
7919+#define arch_align_stack(x) ((x) & ~0xfUL)
7920
7921 void default_idle(void);
7922
7923diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7924index 73ae02a..f932de5 100644
7925--- a/arch/x86/Kconfig
7926+++ b/arch/x86/Kconfig
7927@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
7928
7929 config X86_32_LAZY_GS
7930 def_bool y
7931- depends on X86_32 && !CC_STACKPROTECTOR
7932+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7933
7934 config KTIME_SCALAR
7935 def_bool X86_32
7936@@ -1008,7 +1008,7 @@ choice
7937
7938 config NOHIGHMEM
7939 bool "off"
7940- depends on !X86_NUMAQ
7941+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7942 ---help---
7943 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7944 However, the address space of 32-bit x86 processors is only 4
7945@@ -1045,7 +1045,7 @@ config NOHIGHMEM
7946
7947 config HIGHMEM4G
7948 bool "4GB"
7949- depends on !X86_NUMAQ
7950+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7951 ---help---
7952 Select this if you have a 32-bit processor and between 1 and 4
7953 gigabytes of physical RAM.
7954@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
7955 hex
7956 default 0xB0000000 if VMSPLIT_3G_OPT
7957 default 0x80000000 if VMSPLIT_2G
7958- default 0x78000000 if VMSPLIT_2G_OPT
7959+ default 0x70000000 if VMSPLIT_2G_OPT
7960 default 0x40000000 if VMSPLIT_1G
7961 default 0xC0000000
7962 depends on X86_32
7963@@ -1460,6 +1460,7 @@ config SECCOMP
7964
7965 config CC_STACKPROTECTOR
7966 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7967+ depends on X86_64 || !PAX_MEMORY_UDEREF
7968 ---help---
7969 This option turns on the -fstack-protector GCC feature. This
7970 feature puts, at the beginning of functions, a canary value on
7971@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
7972 config PHYSICAL_START
7973 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
7974 default "0x1000000"
7975+ range 0x400000 0x40000000
7976 ---help---
7977 This gives the physical address where the kernel is loaded.
7978
7979@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
7980 hex
7981 prompt "Alignment value to which kernel should be aligned" if X86_32
7982 default "0x1000000"
7983+ range 0x400000 0x1000000 if PAX_KERNEXEC
7984 range 0x2000 0x1000000
7985 ---help---
7986 This value puts the alignment restrictions on physical address
7987@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
7988 Say N if you want to disable CPU hotplug.
7989
7990 config COMPAT_VDSO
7991- def_bool y
7992+ def_bool n
7993 prompt "Compat VDSO support"
7994 depends on X86_32 || IA32_EMULATION
7995+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7996 ---help---
7997 Map the 32-bit VDSO to the predictable old-style address too.
7998 ---help---
7999diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
8000index 0e566103..1a6b57e 100644
8001--- a/arch/x86/Kconfig.cpu
8002+++ b/arch/x86/Kconfig.cpu
8003@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
8004
8005 config X86_F00F_BUG
8006 def_bool y
8007- depends on M586MMX || M586TSC || M586 || M486 || M386
8008+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
8009
8010 config X86_WP_WORKS_OK
8011 def_bool y
8012@@ -360,7 +360,7 @@ config X86_POPAD_OK
8013
8014 config X86_ALIGNMENT_16
8015 def_bool y
8016- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8017+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
8018
8019 config X86_INTEL_USERCOPY
8020 def_bool y
8021@@ -406,7 +406,7 @@ config X86_CMPXCHG64
8022 # generates cmov.
8023 config X86_CMOV
8024 def_bool y
8025- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
8026+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
8027
8028 config X86_MINIMUM_CPU_FAMILY
8029 int
8030diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
8031index d105f29..c928727 100644
8032--- a/arch/x86/Kconfig.debug
8033+++ b/arch/x86/Kconfig.debug
8034@@ -99,7 +99,7 @@ config X86_PTDUMP
8035 config DEBUG_RODATA
8036 bool "Write protect kernel read-only data structures"
8037 default y
8038- depends on DEBUG_KERNEL
8039+ depends on DEBUG_KERNEL && BROKEN
8040 ---help---
8041 Mark the kernel read-only data as write-protected in the pagetables,
8042 in order to catch accidental (and incorrect) writes to such const
8043diff --git a/arch/x86/Makefile b/arch/x86/Makefile
8044index d2d24c9..0f21f8d 100644
8045--- a/arch/x86/Makefile
8046+++ b/arch/x86/Makefile
8047@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
8048 else
8049 BITS := 64
8050 UTS_MACHINE := x86_64
8051+ biarch := $(call cc-option,-m64)
8052 CHECKFLAGS += -D__x86_64__ -m64
8053
8054 KBUILD_AFLAGS += -m64
8055@@ -189,3 +190,12 @@ define archhelp
8056 echo ' FDARGS="..." arguments for the booted kernel'
8057 echo ' FDINITRD=file initrd for the booted kernel'
8058 endef
8059+
8060+define OLD_LD
8061+
8062+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
8063+*** Please upgrade your binutils to 2.18 or newer
8064+endef
8065+
8066+archprepare:
8067+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
8068diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
8069index ec749c2..bbb5319 100644
8070--- a/arch/x86/boot/Makefile
8071+++ b/arch/x86/boot/Makefile
8072@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
8073 $(call cc-option, -fno-stack-protector) \
8074 $(call cc-option, -mpreferred-stack-boundary=2)
8075 KBUILD_CFLAGS += $(call cc-option, -m32)
8076+ifdef CONSTIFY_PLUGIN
8077+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
8078+endif
8079 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8080 GCOV_PROFILE := n
8081
8082diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
8083index 878e4b9..20537ab 100644
8084--- a/arch/x86/boot/bitops.h
8085+++ b/arch/x86/boot/bitops.h
8086@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8087 u8 v;
8088 const u32 *p = (const u32 *)addr;
8089
8090- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8091+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
8092 return v;
8093 }
8094
8095@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
8096
8097 static inline void set_bit(int nr, void *addr)
8098 {
8099- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8100+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
8101 }
8102
8103 #endif /* BOOT_BITOPS_H */
8104diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
8105index 98239d2..f40214c 100644
8106--- a/arch/x86/boot/boot.h
8107+++ b/arch/x86/boot/boot.h
8108@@ -82,7 +82,7 @@ static inline void io_delay(void)
8109 static inline u16 ds(void)
8110 {
8111 u16 seg;
8112- asm("movw %%ds,%0" : "=rm" (seg));
8113+ asm volatile("movw %%ds,%0" : "=rm" (seg));
8114 return seg;
8115 }
8116
8117@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
8118 static inline int memcmp(const void *s1, const void *s2, size_t len)
8119 {
8120 u8 diff;
8121- asm("repe; cmpsb; setnz %0"
8122+ asm volatile("repe; cmpsb; setnz %0"
8123 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
8124 return diff;
8125 }
8126diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
8127index f8ed065..5bf5ff3 100644
8128--- a/arch/x86/boot/compressed/Makefile
8129+++ b/arch/x86/boot/compressed/Makefile
8130@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
8131 KBUILD_CFLAGS += $(cflags-y)
8132 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
8133 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
8134+ifdef CONSTIFY_PLUGIN
8135+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
8136+endif
8137
8138 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
8139 GCOV_PROFILE := n
8140diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
8141index f543b70..b60fba8 100644
8142--- a/arch/x86/boot/compressed/head_32.S
8143+++ b/arch/x86/boot/compressed/head_32.S
8144@@ -76,7 +76,7 @@ ENTRY(startup_32)
8145 notl %eax
8146 andl %eax, %ebx
8147 #else
8148- movl $LOAD_PHYSICAL_ADDR, %ebx
8149+ movl $____LOAD_PHYSICAL_ADDR, %ebx
8150 #endif
8151
8152 /* Target address to relocate to for decompression */
8153@@ -149,7 +149,7 @@ relocated:
8154 * and where it was actually loaded.
8155 */
8156 movl %ebp, %ebx
8157- subl $LOAD_PHYSICAL_ADDR, %ebx
8158+ subl $____LOAD_PHYSICAL_ADDR, %ebx
8159 jz 2f /* Nothing to be done if loaded at compiled addr. */
8160 /*
8161 * Process relocations.
8162@@ -157,8 +157,7 @@ relocated:
8163
8164 1: subl $4, %edi
8165 movl (%edi), %ecx
8166- testl %ecx, %ecx
8167- jz 2f
8168+ jecxz 2f
8169 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
8170 jmp 1b
8171 2:
8172diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
8173index 077e1b6..2c6b13b 100644
8174--- a/arch/x86/boot/compressed/head_64.S
8175+++ b/arch/x86/boot/compressed/head_64.S
8176@@ -91,7 +91,7 @@ ENTRY(startup_32)
8177 notl %eax
8178 andl %eax, %ebx
8179 #else
8180- movl $LOAD_PHYSICAL_ADDR, %ebx
8181+ movl $____LOAD_PHYSICAL_ADDR, %ebx
8182 #endif
8183
8184 /* Target address to relocate to for decompression */
8185@@ -183,7 +183,7 @@ no_longmode:
8186 hlt
8187 jmp 1b
8188
8189-#include "../../kernel/verify_cpu_64.S"
8190+#include "../../kernel/verify_cpu.S"
8191
8192 /*
8193 * Be careful here startup_64 needs to be at a predictable
8194@@ -234,7 +234,7 @@ ENTRY(startup_64)
8195 notq %rax
8196 andq %rax, %rbp
8197 #else
8198- movq $LOAD_PHYSICAL_ADDR, %rbp
8199+ movq $____LOAD_PHYSICAL_ADDR, %rbp
8200 #endif
8201
8202 /* Target address to relocate to for decompression */
8203diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
8204index 842b2a3..f00178b 100644
8205--- a/arch/x86/boot/compressed/misc.c
8206+++ b/arch/x86/boot/compressed/misc.c
8207@@ -288,7 +288,7 @@ static void parse_elf(void *output)
8208 case PT_LOAD:
8209 #ifdef CONFIG_RELOCATABLE
8210 dest = output;
8211- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
8212+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
8213 #else
8214 dest = (void *)(phdr->p_paddr);
8215 #endif
8216@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
8217 error("Destination address too large");
8218 #endif
8219 #ifndef CONFIG_RELOCATABLE
8220- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
8221+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
8222 error("Wrong destination address");
8223 #endif
8224
8225diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
8226index bcbd36c..b1754af 100644
8227--- a/arch/x86/boot/compressed/mkpiggy.c
8228+++ b/arch/x86/boot/compressed/mkpiggy.c
8229@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
8230
8231 offs = (olen > ilen) ? olen - ilen : 0;
8232 offs += olen >> 12; /* Add 8 bytes for each 32K block */
8233- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
8234+ offs += 64*1024; /* Add 64K bytes slack */
8235 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
8236
8237 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
8238diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
8239index bbeb0c3..f5167ab 100644
8240--- a/arch/x86/boot/compressed/relocs.c
8241+++ b/arch/x86/boot/compressed/relocs.c
8242@@ -10,8 +10,11 @@
8243 #define USE_BSD
8244 #include <endian.h>
8245
8246+#include "../../../../include/linux/autoconf.h"
8247+
8248 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
8249 static Elf32_Ehdr ehdr;
8250+static Elf32_Phdr *phdr;
8251 static unsigned long reloc_count, reloc_idx;
8252 static unsigned long *relocs;
8253
8254@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
8255
8256 static int is_safe_abs_reloc(const char* sym_name)
8257 {
8258- int i;
8259+ unsigned int i;
8260
8261 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
8262 if (!strcmp(sym_name, safe_abs_relocs[i]))
8263@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
8264 }
8265 }
8266
8267+static void read_phdrs(FILE *fp)
8268+{
8269+ unsigned int i;
8270+
8271+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
8272+ if (!phdr) {
8273+ die("Unable to allocate %d program headers\n",
8274+ ehdr.e_phnum);
8275+ }
8276+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
8277+ die("Seek to %d failed: %s\n",
8278+ ehdr.e_phoff, strerror(errno));
8279+ }
8280+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
8281+ die("Cannot read ELF program headers: %s\n",
8282+ strerror(errno));
8283+ }
8284+ for(i = 0; i < ehdr.e_phnum; i++) {
8285+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
8286+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
8287+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
8288+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
8289+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
8290+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
8291+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
8292+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
8293+ }
8294+
8295+}
8296+
8297 static void read_shdrs(FILE *fp)
8298 {
8299- int i;
8300+ unsigned int i;
8301 Elf32_Shdr shdr;
8302
8303 secs = calloc(ehdr.e_shnum, sizeof(struct section));
8304@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
8305
8306 static void read_strtabs(FILE *fp)
8307 {
8308- int i;
8309+ unsigned int i;
8310 for (i = 0; i < ehdr.e_shnum; i++) {
8311 struct section *sec = &secs[i];
8312 if (sec->shdr.sh_type != SHT_STRTAB) {
8313@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
8314
8315 static void read_symtabs(FILE *fp)
8316 {
8317- int i,j;
8318+ unsigned int i,j;
8319 for (i = 0; i < ehdr.e_shnum; i++) {
8320 struct section *sec = &secs[i];
8321 if (sec->shdr.sh_type != SHT_SYMTAB) {
8322@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
8323
8324 static void read_relocs(FILE *fp)
8325 {
8326- int i,j;
8327+ unsigned int i,j;
8328+ uint32_t base;
8329+
8330 for (i = 0; i < ehdr.e_shnum; i++) {
8331 struct section *sec = &secs[i];
8332 if (sec->shdr.sh_type != SHT_REL) {
8333@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
8334 die("Cannot read symbol table: %s\n",
8335 strerror(errno));
8336 }
8337+ base = 0;
8338+ for (j = 0; j < ehdr.e_phnum; j++) {
8339+ if (phdr[j].p_type != PT_LOAD )
8340+ continue;
8341+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
8342+ continue;
8343+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
8344+ break;
8345+ }
8346 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
8347 Elf32_Rel *rel = &sec->reltab[j];
8348- rel->r_offset = elf32_to_cpu(rel->r_offset);
8349+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
8350 rel->r_info = elf32_to_cpu(rel->r_info);
8351 }
8352 }
8353@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
8354
8355 static void print_absolute_symbols(void)
8356 {
8357- int i;
8358+ unsigned int i;
8359 printf("Absolute symbols\n");
8360 printf(" Num: Value Size Type Bind Visibility Name\n");
8361 for (i = 0; i < ehdr.e_shnum; i++) {
8362 struct section *sec = &secs[i];
8363 char *sym_strtab;
8364 Elf32_Sym *sh_symtab;
8365- int j;
8366+ unsigned int j;
8367
8368 if (sec->shdr.sh_type != SHT_SYMTAB) {
8369 continue;
8370@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
8371
8372 static void print_absolute_relocs(void)
8373 {
8374- int i, printed = 0;
8375+ unsigned int i, printed = 0;
8376
8377 for (i = 0; i < ehdr.e_shnum; i++) {
8378 struct section *sec = &secs[i];
8379 struct section *sec_applies, *sec_symtab;
8380 char *sym_strtab;
8381 Elf32_Sym *sh_symtab;
8382- int j;
8383+ unsigned int j;
8384 if (sec->shdr.sh_type != SHT_REL) {
8385 continue;
8386 }
8387@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
8388
8389 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8390 {
8391- int i;
8392+ unsigned int i;
8393 /* Walk through the relocations */
8394 for (i = 0; i < ehdr.e_shnum; i++) {
8395 char *sym_strtab;
8396 Elf32_Sym *sh_symtab;
8397 struct section *sec_applies, *sec_symtab;
8398- int j;
8399+ unsigned int j;
8400 struct section *sec = &secs[i];
8401
8402 if (sec->shdr.sh_type != SHT_REL) {
8403@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
8404 if (sym->st_shndx == SHN_ABS) {
8405 continue;
8406 }
8407+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
8408+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
8409+ continue;
8410+
8411+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
8412+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
8413+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
8414+ continue;
8415+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
8416+ continue;
8417+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
8418+ continue;
8419+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
8420+ continue;
8421+#endif
8422 if (r_type == R_386_NONE || r_type == R_386_PC32) {
8423 /*
8424 * NONE can be ignored and and PC relative
8425@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
8426
8427 static void emit_relocs(int as_text)
8428 {
8429- int i;
8430+ unsigned int i;
8431 /* Count how many relocations I have and allocate space for them. */
8432 reloc_count = 0;
8433 walk_relocs(count_reloc);
8434@@ -634,6 +693,7 @@ int main(int argc, char **argv)
8435 fname, strerror(errno));
8436 }
8437 read_ehdr(fp);
8438+ read_phdrs(fp);
8439 read_shdrs(fp);
8440 read_strtabs(fp);
8441 read_symtabs(fp);
8442diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
8443index 4d3ff03..e4972ff 100644
8444--- a/arch/x86/boot/cpucheck.c
8445+++ b/arch/x86/boot/cpucheck.c
8446@@ -74,7 +74,7 @@ static int has_fpu(void)
8447 u16 fcw = -1, fsw = -1;
8448 u32 cr0;
8449
8450- asm("movl %%cr0,%0" : "=r" (cr0));
8451+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
8452 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
8453 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
8454 asm volatile("movl %0,%%cr0" : : "r" (cr0));
8455@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
8456 {
8457 u32 f0, f1;
8458
8459- asm("pushfl ; "
8460+ asm volatile("pushfl ; "
8461 "pushfl ; "
8462 "popl %0 ; "
8463 "movl %0,%1 ; "
8464@@ -115,7 +115,7 @@ static void get_flags(void)
8465 set_bit(X86_FEATURE_FPU, cpu.flags);
8466
8467 if (has_eflag(X86_EFLAGS_ID)) {
8468- asm("cpuid"
8469+ asm volatile("cpuid"
8470 : "=a" (max_intel_level),
8471 "=b" (cpu_vendor[0]),
8472 "=d" (cpu_vendor[1]),
8473@@ -124,7 +124,7 @@ static void get_flags(void)
8474
8475 if (max_intel_level >= 0x00000001 &&
8476 max_intel_level <= 0x0000ffff) {
8477- asm("cpuid"
8478+ asm volatile("cpuid"
8479 : "=a" (tfms),
8480 "=c" (cpu.flags[4]),
8481 "=d" (cpu.flags[0])
8482@@ -136,7 +136,7 @@ static void get_flags(void)
8483 cpu.model += ((tfms >> 16) & 0xf) << 4;
8484 }
8485
8486- asm("cpuid"
8487+ asm volatile("cpuid"
8488 : "=a" (max_amd_level)
8489 : "a" (0x80000000)
8490 : "ebx", "ecx", "edx");
8491@@ -144,7 +144,7 @@ static void get_flags(void)
8492 if (max_amd_level >= 0x80000001 &&
8493 max_amd_level <= 0x8000ffff) {
8494 u32 eax = 0x80000001;
8495- asm("cpuid"
8496+ asm volatile("cpuid"
8497 : "+a" (eax),
8498 "=c" (cpu.flags[6]),
8499 "=d" (cpu.flags[1])
8500@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8501 u32 ecx = MSR_K7_HWCR;
8502 u32 eax, edx;
8503
8504- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8505+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8506 eax &= ~(1 << 15);
8507- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8508+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8509
8510 get_flags(); /* Make sure it really did something */
8511 err = check_flags();
8512@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8513 u32 ecx = MSR_VIA_FCR;
8514 u32 eax, edx;
8515
8516- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8517+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8518 eax |= (1<<1)|(1<<7);
8519- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8520+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8521
8522 set_bit(X86_FEATURE_CX8, cpu.flags);
8523 err = check_flags();
8524@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8525 u32 eax, edx;
8526 u32 level = 1;
8527
8528- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8529- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8530- asm("cpuid"
8531+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8532+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8533+ asm volatile("cpuid"
8534 : "+a" (level), "=d" (cpu.flags[0])
8535 : : "ecx", "ebx");
8536- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8537+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8538
8539 err = check_flags();
8540 }
8541diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8542index b31cc54..8d69237 100644
8543--- a/arch/x86/boot/header.S
8544+++ b/arch/x86/boot/header.S
8545@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8546 # single linked list of
8547 # struct setup_data
8548
8549-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8550+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8551
8552 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8553 #define VO_INIT_SIZE (VO__end - VO__text)
8554diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8555index cae3feb..ff8ff2a 100644
8556--- a/arch/x86/boot/memory.c
8557+++ b/arch/x86/boot/memory.c
8558@@ -19,7 +19,7 @@
8559
8560 static int detect_memory_e820(void)
8561 {
8562- int count = 0;
8563+ unsigned int count = 0;
8564 struct biosregs ireg, oreg;
8565 struct e820entry *desc = boot_params.e820_map;
8566 static struct e820entry buf; /* static so it is zeroed */
8567diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8568index 11e8c6e..fdbb1ed 100644
8569--- a/arch/x86/boot/video-vesa.c
8570+++ b/arch/x86/boot/video-vesa.c
8571@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8572
8573 boot_params.screen_info.vesapm_seg = oreg.es;
8574 boot_params.screen_info.vesapm_off = oreg.di;
8575+ boot_params.screen_info.vesapm_size = oreg.cx;
8576 }
8577
8578 /*
8579diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8580index d42da38..787cdf3 100644
8581--- a/arch/x86/boot/video.c
8582+++ b/arch/x86/boot/video.c
8583@@ -90,7 +90,7 @@ static void store_mode_params(void)
8584 static unsigned int get_entry(void)
8585 {
8586 char entry_buf[4];
8587- int i, len = 0;
8588+ unsigned int i, len = 0;
8589 int key;
8590 unsigned int v;
8591
8592diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8593index 5b577d5..3c1fed4 100644
8594--- a/arch/x86/crypto/aes-x86_64-asm_64.S
8595+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8596@@ -8,6 +8,8 @@
8597 * including this sentence is retained in full.
8598 */
8599
8600+#include <asm/alternative-asm.h>
8601+
8602 .extern crypto_ft_tab
8603 .extern crypto_it_tab
8604 .extern crypto_fl_tab
8605@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8606 je B192; \
8607 leaq 32(r9),r9;
8608
8609+#define ret pax_force_retaddr 0, 1; ret
8610+
8611 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8612 movq r1,r2; \
8613 movq r3,r4; \
8614diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8615index eb0566e..e3ebad8 100644
8616--- a/arch/x86/crypto/aesni-intel_asm.S
8617+++ b/arch/x86/crypto/aesni-intel_asm.S
8618@@ -16,6 +16,7 @@
8619 */
8620
8621 #include <linux/linkage.h>
8622+#include <asm/alternative-asm.h>
8623
8624 .text
8625
8626@@ -52,6 +53,7 @@ _key_expansion_256a:
8627 pxor %xmm1, %xmm0
8628 movaps %xmm0, (%rcx)
8629 add $0x10, %rcx
8630+ pax_force_retaddr_bts
8631 ret
8632
8633 _key_expansion_192a:
8634@@ -75,6 +77,7 @@ _key_expansion_192a:
8635 shufps $0b01001110, %xmm2, %xmm1
8636 movaps %xmm1, 16(%rcx)
8637 add $0x20, %rcx
8638+ pax_force_retaddr_bts
8639 ret
8640
8641 _key_expansion_192b:
8642@@ -93,6 +96,7 @@ _key_expansion_192b:
8643
8644 movaps %xmm0, (%rcx)
8645 add $0x10, %rcx
8646+ pax_force_retaddr_bts
8647 ret
8648
8649 _key_expansion_256b:
8650@@ -104,6 +108,7 @@ _key_expansion_256b:
8651 pxor %xmm1, %xmm2
8652 movaps %xmm2, (%rcx)
8653 add $0x10, %rcx
8654+ pax_force_retaddr_bts
8655 ret
8656
8657 /*
8658@@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
8659 cmp %rcx, %rdi
8660 jb .Ldec_key_loop
8661 xor %rax, %rax
8662+ pax_force_retaddr 0, 1
8663 ret
8664+ENDPROC(aesni_set_key)
8665
8666 /*
8667 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8668@@ -249,7 +256,9 @@ ENTRY(aesni_enc)
8669 movups (INP), STATE # input
8670 call _aesni_enc1
8671 movups STATE, (OUTP) # output
8672+ pax_force_retaddr 0, 1
8673 ret
8674+ENDPROC(aesni_enc)
8675
8676 /*
8677 * _aesni_enc1: internal ABI
8678@@ -319,6 +328,7 @@ _aesni_enc1:
8679 movaps 0x70(TKEYP), KEY
8680 # aesenclast KEY, STATE # last round
8681 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
8682+ pax_force_retaddr_bts
8683 ret
8684
8685 /*
8686@@ -482,6 +492,7 @@ _aesni_enc4:
8687 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
8688 # aesenclast KEY, STATE4
8689 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
8690+ pax_force_retaddr_bts
8691 ret
8692
8693 /*
8694@@ -493,7 +504,9 @@ ENTRY(aesni_dec)
8695 movups (INP), STATE # input
8696 call _aesni_dec1
8697 movups STATE, (OUTP) #output
8698+ pax_force_retaddr 0, 1
8699 ret
8700+ENDPROC(aesni_dec)
8701
8702 /*
8703 * _aesni_dec1: internal ABI
8704@@ -563,6 +576,7 @@ _aesni_dec1:
8705 movaps 0x70(TKEYP), KEY
8706 # aesdeclast KEY, STATE # last round
8707 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
8708+ pax_force_retaddr_bts
8709 ret
8710
8711 /*
8712@@ -726,6 +740,7 @@ _aesni_dec4:
8713 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
8714 # aesdeclast KEY, STATE4
8715 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
8716+ pax_force_retaddr_bts
8717 ret
8718
8719 /*
8720@@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
8721 cmp $16, LEN
8722 jge .Lecb_enc_loop1
8723 .Lecb_enc_ret:
8724+ pax_force_retaddr 0, 1
8725 ret
8726+ENDPROC(aesni_ecb_enc)
8727
8728 /*
8729 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8730@@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
8731 cmp $16, LEN
8732 jge .Lecb_dec_loop1
8733 .Lecb_dec_ret:
8734+ pax_force_retaddr 0, 1
8735 ret
8736+ENDPROC(aesni_ecb_dec)
8737
8738 /*
8739 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8740@@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
8741 jge .Lcbc_enc_loop
8742 movups STATE, (IVP)
8743 .Lcbc_enc_ret:
8744+ pax_force_retaddr 0, 1
8745 ret
8746+ENDPROC(aesni_cbc_enc)
8747
8748 /*
8749 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8750@@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
8751 .Lcbc_dec_ret:
8752 movups IV, (IVP)
8753 .Lcbc_dec_just_ret:
8754+ pax_force_retaddr 0, 1
8755 ret
8756+ENDPROC(aesni_cbc_dec)
8757diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8758index 6214a9b..1f4fc9a 100644
8759--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8760+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8761@@ -1,3 +1,5 @@
8762+#include <asm/alternative-asm.h>
8763+
8764 # enter ECRYPT_encrypt_bytes
8765 .text
8766 .p2align 5
8767@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8768 add %r11,%rsp
8769 mov %rdi,%rax
8770 mov %rsi,%rdx
8771+ pax_force_retaddr 0, 1
8772 ret
8773 # bytesatleast65:
8774 ._bytesatleast65:
8775@@ -891,6 +894,7 @@ ECRYPT_keysetup:
8776 add %r11,%rsp
8777 mov %rdi,%rax
8778 mov %rsi,%rdx
8779+ pax_force_retaddr
8780 ret
8781 # enter ECRYPT_ivsetup
8782 .text
8783@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8784 add %r11,%rsp
8785 mov %rdi,%rax
8786 mov %rsi,%rdx
8787+ pax_force_retaddr
8788 ret
8789diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8790index 35974a5..5662ae2 100644
8791--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8792+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8793@@ -21,6 +21,7 @@
8794 .text
8795
8796 #include <asm/asm-offsets.h>
8797+#include <asm/alternative-asm.h>
8798
8799 #define a_offset 0
8800 #define b_offset 4
8801@@ -269,6 +270,7 @@ twofish_enc_blk:
8802
8803 popq R1
8804 movq $1,%rax
8805+ pax_force_retaddr 0, 1
8806 ret
8807
8808 twofish_dec_blk:
8809@@ -321,4 +323,5 @@ twofish_dec_blk:
8810
8811 popq R1
8812 movq $1,%rax
8813+ pax_force_retaddr 0, 1
8814 ret
8815diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8816index 14531ab..bc68a7b 100644
8817--- a/arch/x86/ia32/ia32_aout.c
8818+++ b/arch/x86/ia32/ia32_aout.c
8819@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8820 unsigned long dump_start, dump_size;
8821 struct user32 dump;
8822
8823+ memset(&dump, 0, sizeof(dump));
8824+
8825 fs = get_fs();
8826 set_fs(KERNEL_DS);
8827 has_dumped = 1;
8828@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8829 dump_size = dump.u_ssize << PAGE_SHIFT;
8830 DUMP_WRITE(dump_start, dump_size);
8831 }
8832- /*
8833- * Finally dump the task struct. Not be used by gdb, but
8834- * could be useful
8835- */
8836- set_fs(KERNEL_DS);
8837- DUMP_WRITE(current, sizeof(*current));
8838 end_coredump:
8839 set_fs(fs);
8840 return has_dumped;
8841@@ -327,6 +323,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
8842 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
8843 current->mm->cached_hole_size = 0;
8844
8845+ retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8846+ if (retval < 0) {
8847+ /* Someone check-me: is this error path enough? */
8848+ send_sig(SIGKILL, current, 0);
8849+ return retval;
8850+ }
8851+
8852 install_exec_creds(bprm);
8853 current->flags &= ~PF_FORKNOEXEC;
8854
8855@@ -422,13 +425,6 @@ beyond_if:
8856
8857 set_brk(current->mm->start_brk, current->mm->brk);
8858
8859- retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
8860- if (retval < 0) {
8861- /* Someone check-me: is this error path enough? */
8862- send_sig(SIGKILL, current, 0);
8863- return retval;
8864- }
8865-
8866 current->mm->start_stack =
8867 (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
8868 /* start thread */
8869diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8870index 588a7aa..a3468b0 100644
8871--- a/arch/x86/ia32/ia32_signal.c
8872+++ b/arch/x86/ia32/ia32_signal.c
8873@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8874 }
8875 seg = get_fs();
8876 set_fs(KERNEL_DS);
8877- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8878+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8879 set_fs(seg);
8880 if (ret >= 0 && uoss_ptr) {
8881 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8882@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8883 */
8884 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8885 size_t frame_size,
8886- void **fpstate)
8887+ void __user **fpstate)
8888 {
8889 unsigned long sp;
8890
8891@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8892
8893 if (used_math()) {
8894 sp = sp - sig_xstate_ia32_size;
8895- *fpstate = (struct _fpstate_ia32 *) sp;
8896+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8897 if (save_i387_xstate_ia32(*fpstate) < 0)
8898 return (void __user *) -1L;
8899 }
8900@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8901 sp -= frame_size;
8902 /* Align the stack pointer according to the i386 ABI,
8903 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8904- sp = ((sp + 4) & -16ul) - 4;
8905+ sp = ((sp - 12) & -16ul) - 4;
8906 return (void __user *) sp;
8907 }
8908
8909@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8910 * These are actually not used anymore, but left because some
8911 * gdb versions depend on them as a marker.
8912 */
8913- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8914+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8915 } put_user_catch(err);
8916
8917 if (err)
8918@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8919 0xb8,
8920 __NR_ia32_rt_sigreturn,
8921 0x80cd,
8922- 0,
8923+ 0
8924 };
8925
8926 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8927@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8928
8929 if (ka->sa.sa_flags & SA_RESTORER)
8930 restorer = ka->sa.sa_restorer;
8931+ else if (current->mm->context.vdso)
8932+ /* Return stub is in 32bit vsyscall page */
8933+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8934 else
8935- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8936- rt_sigreturn);
8937+ restorer = &frame->retcode;
8938 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8939
8940 /*
8941 * Not actually used anymore, but left because some gdb
8942 * versions need it.
8943 */
8944- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8945+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8946 } put_user_catch(err);
8947
8948 if (err)
8949diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8950index 4edd8eb..29124b4 100644
8951--- a/arch/x86/ia32/ia32entry.S
8952+++ b/arch/x86/ia32/ia32entry.S
8953@@ -13,7 +13,9 @@
8954 #include <asm/thread_info.h>
8955 #include <asm/segment.h>
8956 #include <asm/irqflags.h>
8957+#include <asm/pgtable.h>
8958 #include <linux/linkage.h>
8959+#include <asm/alternative-asm.h>
8960
8961 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8962 #include <linux/elf-em.h>
8963@@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
8964 ENDPROC(native_irq_enable_sysexit)
8965 #endif
8966
8967+ .macro pax_enter_kernel_user
8968+ pax_set_fptr_mask
8969+#ifdef CONFIG_PAX_MEMORY_UDEREF
8970+ call pax_enter_kernel_user
8971+#endif
8972+ .endm
8973+
8974+ .macro pax_exit_kernel_user
8975+#ifdef CONFIG_PAX_MEMORY_UDEREF
8976+ call pax_exit_kernel_user
8977+#endif
8978+#ifdef CONFIG_PAX_RANDKSTACK
8979+ pushq %rax
8980+ pushq %r11
8981+ call pax_randomize_kstack
8982+ popq %r11
8983+ popq %rax
8984+#endif
8985+ .endm
8986+
8987+.macro pax_erase_kstack
8988+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8989+ call pax_erase_kstack
8990+#endif
8991+.endm
8992+
8993 /*
8994 * 32bit SYSENTER instruction entry.
8995 *
8996@@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
8997 CFI_REGISTER rsp,rbp
8998 SWAPGS_UNSAFE_STACK
8999 movq PER_CPU_VAR(kernel_stack), %rsp
9000- addq $(KERNEL_STACK_OFFSET),%rsp
9001- /*
9002- * No need to follow this irqs on/off section: the syscall
9003- * disabled irqs, here we enable it straight after entry:
9004- */
9005- ENABLE_INTERRUPTS(CLBR_NONE)
9006 movl %ebp,%ebp /* zero extension */
9007 pushq $__USER32_DS
9008 CFI_ADJUST_CFA_OFFSET 8
9009@@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
9010 pushfq
9011 CFI_ADJUST_CFA_OFFSET 8
9012 /*CFI_REL_OFFSET rflags,0*/
9013- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
9014- CFI_REGISTER rip,r10
9015+ orl $X86_EFLAGS_IF,(%rsp)
9016+ GET_THREAD_INFO(%r11)
9017+ movl TI_sysenter_return(%r11), %r11d
9018+ CFI_REGISTER rip,r11
9019 pushq $__USER32_CS
9020 CFI_ADJUST_CFA_OFFSET 8
9021 /*CFI_REL_OFFSET cs,0*/
9022 movl %eax, %eax
9023- pushq %r10
9024+ pushq %r11
9025 CFI_ADJUST_CFA_OFFSET 8
9026 CFI_REL_OFFSET rip,0
9027 pushq %rax
9028 CFI_ADJUST_CFA_OFFSET 8
9029 cld
9030 SAVE_ARGS 0,0,1
9031+ pax_enter_kernel_user
9032+ /*
9033+ * No need to follow this irqs on/off section: the syscall
9034+ * disabled irqs, here we enable it straight after entry:
9035+ */
9036+ ENABLE_INTERRUPTS(CLBR_NONE)
9037 /* no need to do an access_ok check here because rbp has been
9038 32bit zero extended */
9039+
9040+#ifdef CONFIG_PAX_MEMORY_UDEREF
9041+ mov $PAX_USER_SHADOW_BASE,%r11
9042+ add %r11,%rbp
9043+#endif
9044+
9045 1: movl (%rbp),%ebp
9046 .section __ex_table,"a"
9047 .quad 1b,ia32_badarg
9048 .previous
9049- GET_THREAD_INFO(%r10)
9050- orl $TS_COMPAT,TI_status(%r10)
9051- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9052+ GET_THREAD_INFO(%r11)
9053+ orl $TS_COMPAT,TI_status(%r11)
9054+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9055 CFI_REMEMBER_STATE
9056 jnz sysenter_tracesys
9057 cmpq $(IA32_NR_syscalls-1),%rax
9058@@ -166,13 +202,15 @@ sysenter_do_call:
9059 sysenter_dispatch:
9060 call *ia32_sys_call_table(,%rax,8)
9061 movq %rax,RAX-ARGOFFSET(%rsp)
9062- GET_THREAD_INFO(%r10)
9063+ GET_THREAD_INFO(%r11)
9064 DISABLE_INTERRUPTS(CLBR_NONE)
9065 TRACE_IRQS_OFF
9066- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
9067+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9068 jnz sysexit_audit
9069 sysexit_from_sys_call:
9070- andl $~TS_COMPAT,TI_status(%r10)
9071+ pax_exit_kernel_user
9072+ pax_erase_kstack
9073+ andl $~TS_COMPAT,TI_status(%r11)
9074 /* clear IF, that popfq doesn't enable interrupts early */
9075 andl $~0x200,EFLAGS-R11(%rsp)
9076 movl RIP-R11(%rsp),%edx /* User %eip */
9077@@ -200,6 +238,9 @@ sysexit_from_sys_call:
9078 movl %eax,%esi /* 2nd arg: syscall number */
9079 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
9080 call audit_syscall_entry
9081+
9082+ pax_erase_kstack
9083+
9084 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
9085 cmpq $(IA32_NR_syscalls-1),%rax
9086 ja ia32_badsys
9087@@ -211,7 +252,7 @@ sysexit_from_sys_call:
9088 .endm
9089
9090 .macro auditsys_exit exit
9091- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9092+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9093 jnz ia32_ret_from_sys_call
9094 TRACE_IRQS_ON
9095 sti
9096@@ -221,12 +262,12 @@ sysexit_from_sys_call:
9097 movzbl %al,%edi /* zero-extend that into %edi */
9098 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
9099 call audit_syscall_exit
9100- GET_THREAD_INFO(%r10)
9101+ GET_THREAD_INFO(%r11)
9102 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
9103 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
9104 cli
9105 TRACE_IRQS_OFF
9106- testl %edi,TI_flags(%r10)
9107+ testl %edi,TI_flags(%r11)
9108 jz \exit
9109 CLEAR_RREGS -ARGOFFSET
9110 jmp int_with_check
9111@@ -244,7 +285,7 @@ sysexit_audit:
9112
9113 sysenter_tracesys:
9114 #ifdef CONFIG_AUDITSYSCALL
9115- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9116+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9117 jz sysenter_auditsys
9118 #endif
9119 SAVE_REST
9120@@ -252,6 +293,9 @@ sysenter_tracesys:
9121 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
9122 movq %rsp,%rdi /* &pt_regs -> arg1 */
9123 call syscall_trace_enter
9124+
9125+ pax_erase_kstack
9126+
9127 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9128 RESTORE_REST
9129 cmpq $(IA32_NR_syscalls-1),%rax
9130@@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
9131 ENTRY(ia32_cstar_target)
9132 CFI_STARTPROC32 simple
9133 CFI_SIGNAL_FRAME
9134- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
9135+ CFI_DEF_CFA rsp,0
9136 CFI_REGISTER rip,rcx
9137 /*CFI_REGISTER rflags,r11*/
9138 SWAPGS_UNSAFE_STACK
9139 movl %esp,%r8d
9140 CFI_REGISTER rsp,r8
9141 movq PER_CPU_VAR(kernel_stack),%rsp
9142+ SAVE_ARGS 8*6,1,1
9143+ pax_enter_kernel_user
9144 /*
9145 * No need to follow this irqs on/off section: the syscall
9146 * disabled irqs and here we enable it straight after entry:
9147 */
9148 ENABLE_INTERRUPTS(CLBR_NONE)
9149- SAVE_ARGS 8,1,1
9150 movl %eax,%eax /* zero extension */
9151 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
9152 movq %rcx,RIP-ARGOFFSET(%rsp)
9153@@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
9154 /* no need to do an access_ok check here because r8 has been
9155 32bit zero extended */
9156 /* hardware stack frame is complete now */
9157+
9158+#ifdef CONFIG_PAX_MEMORY_UDEREF
9159+ mov $PAX_USER_SHADOW_BASE,%r11
9160+ add %r11,%r8
9161+#endif
9162+
9163 1: movl (%r8),%r9d
9164 .section __ex_table,"a"
9165 .quad 1b,ia32_badarg
9166 .previous
9167- GET_THREAD_INFO(%r10)
9168- orl $TS_COMPAT,TI_status(%r10)
9169- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9170+ GET_THREAD_INFO(%r11)
9171+ orl $TS_COMPAT,TI_status(%r11)
9172+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9173 CFI_REMEMBER_STATE
9174 jnz cstar_tracesys
9175 cmpq $IA32_NR_syscalls-1,%rax
9176@@ -327,13 +378,15 @@ cstar_do_call:
9177 cstar_dispatch:
9178 call *ia32_sys_call_table(,%rax,8)
9179 movq %rax,RAX-ARGOFFSET(%rsp)
9180- GET_THREAD_INFO(%r10)
9181+ GET_THREAD_INFO(%r11)
9182 DISABLE_INTERRUPTS(CLBR_NONE)
9183 TRACE_IRQS_OFF
9184- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
9185+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
9186 jnz sysretl_audit
9187 sysretl_from_sys_call:
9188- andl $~TS_COMPAT,TI_status(%r10)
9189+ pax_exit_kernel_user
9190+ pax_erase_kstack
9191+ andl $~TS_COMPAT,TI_status(%r11)
9192 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
9193 movl RIP-ARGOFFSET(%rsp),%ecx
9194 CFI_REGISTER rip,rcx
9195@@ -361,7 +414,7 @@ sysretl_audit:
9196
9197 cstar_tracesys:
9198 #ifdef CONFIG_AUDITSYSCALL
9199- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
9200+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
9201 jz cstar_auditsys
9202 #endif
9203 xchgl %r9d,%ebp
9204@@ -370,6 +423,9 @@ cstar_tracesys:
9205 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9206 movq %rsp,%rdi /* &pt_regs -> arg1 */
9207 call syscall_trace_enter
9208+
9209+ pax_erase_kstack
9210+
9211 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
9212 RESTORE_REST
9213 xchgl %ebp,%r9d
9214@@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
9215 CFI_REL_OFFSET rip,RIP-RIP
9216 PARAVIRT_ADJUST_EXCEPTION_FRAME
9217 SWAPGS
9218- /*
9219- * No need to follow this irqs on/off section: the syscall
9220- * disabled irqs and here we enable it straight after entry:
9221- */
9222- ENABLE_INTERRUPTS(CLBR_NONE)
9223 movl %eax,%eax
9224 pushq %rax
9225 CFI_ADJUST_CFA_OFFSET 8
9226@@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
9227 /* note the registers are not zero extended to the sf.
9228 this could be a problem. */
9229 SAVE_ARGS 0,0,1
9230- GET_THREAD_INFO(%r10)
9231- orl $TS_COMPAT,TI_status(%r10)
9232- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
9233+ pax_enter_kernel_user
9234+ /*
9235+ * No need to follow this irqs on/off section: the syscall
9236+ * disabled irqs and here we enable it straight after entry:
9237+ */
9238+ ENABLE_INTERRUPTS(CLBR_NONE)
9239+ GET_THREAD_INFO(%r11)
9240+ orl $TS_COMPAT,TI_status(%r11)
9241+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
9242 jnz ia32_tracesys
9243 cmpq $(IA32_NR_syscalls-1),%rax
9244 ja ia32_badsys
9245@@ -448,6 +505,9 @@ ia32_tracesys:
9246 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
9247 movq %rsp,%rdi /* &pt_regs -> arg1 */
9248 call syscall_trace_enter
9249+
9250+ pax_erase_kstack
9251+
9252 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9253 RESTORE_REST
9254 cmpq $(IA32_NR_syscalls-1),%rax
9255@@ -462,6 +522,7 @@ ia32_badsys:
9256
9257 quiet_ni_syscall:
9258 movq $-ENOSYS,%rax
9259+ pax_force_retaddr
9260 ret
9261 CFI_ENDPROC
9262
9263diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
9264index 016218c..47ccbdd 100644
9265--- a/arch/x86/ia32/sys_ia32.c
9266+++ b/arch/x86/ia32/sys_ia32.c
9267@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
9268 */
9269 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
9270 {
9271- typeof(ubuf->st_uid) uid = 0;
9272- typeof(ubuf->st_gid) gid = 0;
9273+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
9274+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
9275 SET_UID(uid, stat->uid);
9276 SET_GID(gid, stat->gid);
9277 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
9278@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
9279 }
9280 set_fs(KERNEL_DS);
9281 ret = sys_rt_sigprocmask(how,
9282- set ? (sigset_t __user *)&s : NULL,
9283- oset ? (sigset_t __user *)&s : NULL,
9284+ set ? (sigset_t __force_user *)&s : NULL,
9285+ oset ? (sigset_t __force_user *)&s : NULL,
9286 sigsetsize);
9287 set_fs(old_fs);
9288 if (ret)
9289@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
9290 mm_segment_t old_fs = get_fs();
9291
9292 set_fs(KERNEL_DS);
9293- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
9294+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
9295 set_fs(old_fs);
9296 if (put_compat_timespec(&t, interval))
9297 return -EFAULT;
9298@@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
9299 mm_segment_t old_fs = get_fs();
9300
9301 set_fs(KERNEL_DS);
9302- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
9303+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
9304 set_fs(old_fs);
9305 if (!ret) {
9306 switch (_NSIG_WORDS) {
9307@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
9308 if (copy_siginfo_from_user32(&info, uinfo))
9309 return -EFAULT;
9310 set_fs(KERNEL_DS);
9311- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9312+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9313 set_fs(old_fs);
9314 return ret;
9315 }
9316@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
9317 return -EFAULT;
9318
9319 set_fs(KERNEL_DS);
9320- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9321+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9322 count);
9323 set_fs(old_fs);
9324
9325diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9326index e2077d3..17d07ad 100644
9327--- a/arch/x86/include/asm/alternative-asm.h
9328+++ b/arch/x86/include/asm/alternative-asm.h
9329@@ -8,10 +8,10 @@
9330
9331 #ifdef CONFIG_SMP
9332 .macro LOCK_PREFIX
9333-1: lock
9334+672: lock
9335 .section .smp_locks,"a"
9336 .align 4
9337- X86_ALIGN 1b
9338+ X86_ALIGN 672b
9339 .previous
9340 .endm
9341 #else
9342@@ -19,4 +19,43 @@
9343 .endm
9344 #endif
9345
9346+#ifdef KERNEXEC_PLUGIN
9347+ .macro pax_force_retaddr_bts rip=0
9348+ btsq $63,\rip(%rsp)
9349+ .endm
9350+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9351+ .macro pax_force_retaddr rip=0, reload=0
9352+ btsq $63,\rip(%rsp)
9353+ .endm
9354+ .macro pax_force_fptr ptr
9355+ btsq $63,\ptr
9356+ .endm
9357+ .macro pax_set_fptr_mask
9358+ .endm
9359+#endif
9360+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9361+ .macro pax_force_retaddr rip=0, reload=0
9362+ .if \reload
9363+ pax_set_fptr_mask
9364+ .endif
9365+ orq %r10,\rip(%rsp)
9366+ .endm
9367+ .macro pax_force_fptr ptr
9368+ orq %r10,\ptr
9369+ .endm
9370+ .macro pax_set_fptr_mask
9371+ movabs $0x8000000000000000,%r10
9372+ .endm
9373+#endif
9374+#else
9375+ .macro pax_force_retaddr rip=0, reload=0
9376+ .endm
9377+ .macro pax_force_fptr ptr
9378+ .endm
9379+ .macro pax_force_retaddr_bts rip=0
9380+ .endm
9381+ .macro pax_set_fptr_mask
9382+ .endm
9383+#endif
9384+
9385 #endif /* __ASSEMBLY__ */
9386diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9387index c240efc..fdfadf3 100644
9388--- a/arch/x86/include/asm/alternative.h
9389+++ b/arch/x86/include/asm/alternative.h
9390@@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
9391 " .byte 662b-661b\n" /* sourcelen */ \
9392 " .byte 664f-663f\n" /* replacementlen */ \
9393 ".previous\n" \
9394- ".section .altinstr_replacement, \"ax\"\n" \
9395+ ".section .altinstr_replacement, \"a\"\n" \
9396 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9397 ".previous"
9398
9399diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9400index 474d80d..1f97d58 100644
9401--- a/arch/x86/include/asm/apic.h
9402+++ b/arch/x86/include/asm/apic.h
9403@@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
9404
9405 #ifdef CONFIG_X86_LOCAL_APIC
9406
9407-extern unsigned int apic_verbosity;
9408+extern int apic_verbosity;
9409 extern int local_apic_timer_c2_ok;
9410
9411 extern int disable_apic;
9412diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9413index 20370c6..a2eb9b0 100644
9414--- a/arch/x86/include/asm/apm.h
9415+++ b/arch/x86/include/asm/apm.h
9416@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9417 __asm__ __volatile__(APM_DO_ZERO_SEGS
9418 "pushl %%edi\n\t"
9419 "pushl %%ebp\n\t"
9420- "lcall *%%cs:apm_bios_entry\n\t"
9421+ "lcall *%%ss:apm_bios_entry\n\t"
9422 "setc %%al\n\t"
9423 "popl %%ebp\n\t"
9424 "popl %%edi\n\t"
9425@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9426 __asm__ __volatile__(APM_DO_ZERO_SEGS
9427 "pushl %%edi\n\t"
9428 "pushl %%ebp\n\t"
9429- "lcall *%%cs:apm_bios_entry\n\t"
9430+ "lcall *%%ss:apm_bios_entry\n\t"
9431 "setc %%bl\n\t"
9432 "popl %%ebp\n\t"
9433 "popl %%edi\n\t"
9434diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
9435index dc5a667..939040c 100644
9436--- a/arch/x86/include/asm/atomic_32.h
9437+++ b/arch/x86/include/asm/atomic_32.h
9438@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
9439 }
9440
9441 /**
9442+ * atomic_read_unchecked - read atomic variable
9443+ * @v: pointer of type atomic_unchecked_t
9444+ *
9445+ * Atomically reads the value of @v.
9446+ */
9447+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9448+{
9449+ return v->counter;
9450+}
9451+
9452+/**
9453 * atomic_set - set atomic variable
9454 * @v: pointer of type atomic_t
9455 * @i: required value
9456@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
9457 }
9458
9459 /**
9460+ * atomic_set_unchecked - set atomic variable
9461+ * @v: pointer of type atomic_unchecked_t
9462+ * @i: required value
9463+ *
9464+ * Atomically sets the value of @v to @i.
9465+ */
9466+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9467+{
9468+ v->counter = i;
9469+}
9470+
9471+/**
9472 * atomic_add - add integer to atomic variable
9473 * @i: integer value to add
9474 * @v: pointer of type atomic_t
9475@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
9476 */
9477 static inline void atomic_add(int i, atomic_t *v)
9478 {
9479- asm volatile(LOCK_PREFIX "addl %1,%0"
9480+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9481+
9482+#ifdef CONFIG_PAX_REFCOUNT
9483+ "jno 0f\n"
9484+ LOCK_PREFIX "subl %1,%0\n"
9485+ "int $4\n0:\n"
9486+ _ASM_EXTABLE(0b, 0b)
9487+#endif
9488+
9489+ : "+m" (v->counter)
9490+ : "ir" (i));
9491+}
9492+
9493+/**
9494+ * atomic_add_unchecked - add integer to atomic variable
9495+ * @i: integer value to add
9496+ * @v: pointer of type atomic_unchecked_t
9497+ *
9498+ * Atomically adds @i to @v.
9499+ */
9500+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9501+{
9502+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9503 : "+m" (v->counter)
9504 : "ir" (i));
9505 }
9506@@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
9507 */
9508 static inline void atomic_sub(int i, atomic_t *v)
9509 {
9510- asm volatile(LOCK_PREFIX "subl %1,%0"
9511+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9512+
9513+#ifdef CONFIG_PAX_REFCOUNT
9514+ "jno 0f\n"
9515+ LOCK_PREFIX "addl %1,%0\n"
9516+ "int $4\n0:\n"
9517+ _ASM_EXTABLE(0b, 0b)
9518+#endif
9519+
9520+ : "+m" (v->counter)
9521+ : "ir" (i));
9522+}
9523+
9524+/**
9525+ * atomic_sub_unchecked - subtract integer from atomic variable
9526+ * @i: integer value to subtract
9527+ * @v: pointer of type atomic_unchecked_t
9528+ *
9529+ * Atomically subtracts @i from @v.
9530+ */
9531+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9532+{
9533+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9534 : "+m" (v->counter)
9535 : "ir" (i));
9536 }
9537@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9538 {
9539 unsigned char c;
9540
9541- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9542+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
9543+
9544+#ifdef CONFIG_PAX_REFCOUNT
9545+ "jno 0f\n"
9546+ LOCK_PREFIX "addl %2,%0\n"
9547+ "int $4\n0:\n"
9548+ _ASM_EXTABLE(0b, 0b)
9549+#endif
9550+
9551+ "sete %1\n"
9552 : "+m" (v->counter), "=qm" (c)
9553 : "ir" (i) : "memory");
9554 return c;
9555@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9556 */
9557 static inline void atomic_inc(atomic_t *v)
9558 {
9559- asm volatile(LOCK_PREFIX "incl %0"
9560+ asm volatile(LOCK_PREFIX "incl %0\n"
9561+
9562+#ifdef CONFIG_PAX_REFCOUNT
9563+ "jno 0f\n"
9564+ LOCK_PREFIX "decl %0\n"
9565+ "int $4\n0:\n"
9566+ _ASM_EXTABLE(0b, 0b)
9567+#endif
9568+
9569+ : "+m" (v->counter));
9570+}
9571+
9572+/**
9573+ * atomic_inc_unchecked - increment atomic variable
9574+ * @v: pointer of type atomic_unchecked_t
9575+ *
9576+ * Atomically increments @v by 1.
9577+ */
9578+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9579+{
9580+ asm volatile(LOCK_PREFIX "incl %0\n"
9581 : "+m" (v->counter));
9582 }
9583
9584@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
9585 */
9586 static inline void atomic_dec(atomic_t *v)
9587 {
9588- asm volatile(LOCK_PREFIX "decl %0"
9589+ asm volatile(LOCK_PREFIX "decl %0\n"
9590+
9591+#ifdef CONFIG_PAX_REFCOUNT
9592+ "jno 0f\n"
9593+ LOCK_PREFIX "incl %0\n"
9594+ "int $4\n0:\n"
9595+ _ASM_EXTABLE(0b, 0b)
9596+#endif
9597+
9598+ : "+m" (v->counter));
9599+}
9600+
9601+/**
9602+ * atomic_dec_unchecked - decrement atomic variable
9603+ * @v: pointer of type atomic_unchecked_t
9604+ *
9605+ * Atomically decrements @v by 1.
9606+ */
9607+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9608+{
9609+ asm volatile(LOCK_PREFIX "decl %0\n"
9610 : "+m" (v->counter));
9611 }
9612
9613@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9614 {
9615 unsigned char c;
9616
9617- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9618+ asm volatile(LOCK_PREFIX "decl %0\n"
9619+
9620+#ifdef CONFIG_PAX_REFCOUNT
9621+ "jno 0f\n"
9622+ LOCK_PREFIX "incl %0\n"
9623+ "int $4\n0:\n"
9624+ _ASM_EXTABLE(0b, 0b)
9625+#endif
9626+
9627+ "sete %1\n"
9628 : "+m" (v->counter), "=qm" (c)
9629 : : "memory");
9630 return c != 0;
9631@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9632 {
9633 unsigned char c;
9634
9635- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9636+ asm volatile(LOCK_PREFIX "incl %0\n"
9637+
9638+#ifdef CONFIG_PAX_REFCOUNT
9639+ "jno 0f\n"
9640+ LOCK_PREFIX "decl %0\n"
9641+ "into\n0:\n"
9642+ _ASM_EXTABLE(0b, 0b)
9643+#endif
9644+
9645+ "sete %1\n"
9646+ : "+m" (v->counter), "=qm" (c)
9647+ : : "memory");
9648+ return c != 0;
9649+}
9650+
9651+/**
9652+ * atomic_inc_and_test_unchecked - increment and test
9653+ * @v: pointer of type atomic_unchecked_t
9654+ *
9655+ * Atomically increments @v by 1
9656+ * and returns true if the result is zero, or false for all
9657+ * other cases.
9658+ */
9659+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9660+{
9661+ unsigned char c;
9662+
9663+ asm volatile(LOCK_PREFIX "incl %0\n"
9664+ "sete %1\n"
9665 : "+m" (v->counter), "=qm" (c)
9666 : : "memory");
9667 return c != 0;
9668@@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9669 {
9670 unsigned char c;
9671
9672- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9673+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9674+
9675+#ifdef CONFIG_PAX_REFCOUNT
9676+ "jno 0f\n"
9677+ LOCK_PREFIX "subl %2,%0\n"
9678+ "int $4\n0:\n"
9679+ _ASM_EXTABLE(0b, 0b)
9680+#endif
9681+
9682+ "sets %1\n"
9683 : "+m" (v->counter), "=qm" (c)
9684 : "ir" (i) : "memory");
9685 return c;
9686@@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
9687 #endif
9688 /* Modern 486+ processor */
9689 __i = i;
9690- asm volatile(LOCK_PREFIX "xaddl %0, %1"
9691+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9692+
9693+#ifdef CONFIG_PAX_REFCOUNT
9694+ "jno 0f\n"
9695+ "movl %0, %1\n"
9696+ "int $4\n0:\n"
9697+ _ASM_EXTABLE(0b, 0b)
9698+#endif
9699+
9700 : "+r" (i), "+m" (v->counter)
9701 : : "memory");
9702 return i + __i;
9703@@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
9704 }
9705
9706 /**
9707+ * atomic_add_return_unchecked - add integer and return
9708+ * @v: pointer of type atomic_unchecked_t
9709+ * @i: integer value to add
9710+ *
9711+ * Atomically adds @i to @v and returns @i + @v
9712+ */
9713+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9714+{
9715+ int __i;
9716+#ifdef CONFIG_M386
9717+ unsigned long flags;
9718+ if (unlikely(boot_cpu_data.x86 <= 3))
9719+ goto no_xadd;
9720+#endif
9721+ /* Modern 486+ processor */
9722+ __i = i;
9723+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
9724+ : "+r" (i), "+m" (v->counter)
9725+ : : "memory");
9726+ return i + __i;
9727+
9728+#ifdef CONFIG_M386
9729+no_xadd: /* Legacy 386 processor */
9730+ local_irq_save(flags);
9731+ __i = atomic_read_unchecked(v);
9732+ atomic_set_unchecked(v, i + __i);
9733+ local_irq_restore(flags);
9734+ return i + __i;
9735+#endif
9736+}
9737+
9738+/**
9739 * atomic_sub_return - subtract integer and return
9740 * @v: pointer of type atomic_t
9741 * @i: integer value to subtract
9742@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9743 return cmpxchg(&v->counter, old, new);
9744 }
9745
9746+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9747+{
9748+ return cmpxchg(&v->counter, old, new);
9749+}
9750+
9751 static inline int atomic_xchg(atomic_t *v, int new)
9752 {
9753 return xchg(&v->counter, new);
9754 }
9755
9756+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9757+{
9758+ return xchg(&v->counter, new);
9759+}
9760+
9761 /**
9762 * atomic_add_unless - add unless the number is already a given value
9763 * @v: pointer of type atomic_t
9764@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
9765 */
9766 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9767 {
9768- int c, old;
9769+ int c, old, new;
9770 c = atomic_read(v);
9771 for (;;) {
9772- if (unlikely(c == (u)))
9773+ if (unlikely(c == u))
9774 break;
9775- old = atomic_cmpxchg((v), c, c + (a));
9776+
9777+ asm volatile("addl %2,%0\n"
9778+
9779+#ifdef CONFIG_PAX_REFCOUNT
9780+ "jno 0f\n"
9781+ "subl %2,%0\n"
9782+ "int $4\n0:\n"
9783+ _ASM_EXTABLE(0b, 0b)
9784+#endif
9785+
9786+ : "=r" (new)
9787+ : "0" (c), "ir" (a));
9788+
9789+ old = atomic_cmpxchg(v, c, new);
9790 if (likely(old == c))
9791 break;
9792 c = old;
9793 }
9794- return c != (u);
9795+ return c != u;
9796 }
9797
9798 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9799
9800 #define atomic_inc_return(v) (atomic_add_return(1, v))
9801+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9802+{
9803+ return atomic_add_return_unchecked(1, v);
9804+}
9805 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9806
9807 /* These are x86-specific, used by some header files */
9808@@ -266,9 +495,18 @@ typedef struct {
9809 u64 __aligned(8) counter;
9810 } atomic64_t;
9811
9812+#ifdef CONFIG_PAX_REFCOUNT
9813+typedef struct {
9814+ u64 __aligned(8) counter;
9815+} atomic64_unchecked_t;
9816+#else
9817+typedef atomic64_t atomic64_unchecked_t;
9818+#endif
9819+
9820 #define ATOMIC64_INIT(val) { (val) }
9821
9822 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
9823+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
9824
9825 /**
9826 * atomic64_xchg - xchg atomic64 variable
9827@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
9828 * the old value.
9829 */
9830 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
9831+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
9832
9833 /**
9834 * atomic64_set - set atomic64 variable
9835@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
9836 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
9837
9838 /**
9839+ * atomic64_unchecked_set - set atomic64 variable
9840+ * @ptr: pointer to type atomic64_unchecked_t
9841+ * @new_val: value to assign
9842+ *
9843+ * Atomically sets the value of @ptr to @new_val.
9844+ */
9845+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
9846+
9847+/**
9848 * atomic64_read - read atomic64 variable
9849 * @ptr: pointer to type atomic64_t
9850 *
9851@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
9852 return res;
9853 }
9854
9855-extern u64 atomic64_read(atomic64_t *ptr);
9856+/**
9857+ * atomic64_read_unchecked - read atomic64 variable
9858+ * @ptr: pointer to type atomic64_unchecked_t
9859+ *
9860+ * Atomically reads the value of @ptr and returns it.
9861+ */
9862+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
9863+{
9864+ u64 res;
9865+
9866+ /*
9867+ * Note, we inline this atomic64_unchecked_t primitive because
9868+ * it only clobbers EAX/EDX and leaves the others
9869+ * untouched. We also (somewhat subtly) rely on the
9870+ * fact that cmpxchg8b returns the current 64-bit value
9871+ * of the memory location we are touching:
9872+ */
9873+ asm volatile(
9874+ "mov %%ebx, %%eax\n\t"
9875+ "mov %%ecx, %%edx\n\t"
9876+ LOCK_PREFIX "cmpxchg8b %1\n"
9877+ : "=&A" (res)
9878+ : "m" (*ptr)
9879+ );
9880+
9881+ return res;
9882+}
9883
9884 /**
9885 * atomic64_add_return - add and return
9886@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
9887 * Other variants with different arithmetic operators:
9888 */
9889 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
9890+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9891 extern u64 atomic64_inc_return(atomic64_t *ptr);
9892+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
9893 extern u64 atomic64_dec_return(atomic64_t *ptr);
9894+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
9895
9896 /**
9897 * atomic64_add - add integer to atomic64 variable
9898@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
9899 extern void atomic64_add(u64 delta, atomic64_t *ptr);
9900
9901 /**
9902+ * atomic64_add_unchecked - add integer to atomic64 variable
9903+ * @delta: integer value to add
9904+ * @ptr: pointer to type atomic64_unchecked_t
9905+ *
9906+ * Atomically adds @delta to @ptr.
9907+ */
9908+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9909+
9910+/**
9911 * atomic64_sub - subtract the atomic64 variable
9912 * @delta: integer value to subtract
9913 * @ptr: pointer to type atomic64_t
9914@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
9915 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
9916
9917 /**
9918+ * atomic64_sub_unchecked - subtract the atomic64 variable
9919+ * @delta: integer value to subtract
9920+ * @ptr: pointer to type atomic64_unchecked_t
9921+ *
9922+ * Atomically subtracts @delta from @ptr.
9923+ */
9924+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
9925+
9926+/**
9927 * atomic64_sub_and_test - subtract value from variable and test result
9928 * @delta: integer value to subtract
9929 * @ptr: pointer to type atomic64_t
9930@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
9931 extern void atomic64_inc(atomic64_t *ptr);
9932
9933 /**
9934+ * atomic64_inc_unchecked - increment atomic64 variable
9935+ * @ptr: pointer to type atomic64_unchecked_t
9936+ *
9937+ * Atomically increments @ptr by 1.
9938+ */
9939+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
9940+
9941+/**
9942 * atomic64_dec - decrement atomic64 variable
9943 * @ptr: pointer to type atomic64_t
9944 *
9945@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
9946 extern void atomic64_dec(atomic64_t *ptr);
9947
9948 /**
9949+ * atomic64_dec_unchecked - decrement atomic64 variable
9950+ * @ptr: pointer to type atomic64_unchecked_t
9951+ *
9952+ * Atomically decrements @ptr by 1.
9953+ */
9954+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
9955+
9956+/**
9957 * atomic64_dec_and_test - decrement and test
9958 * @ptr: pointer to type atomic64_t
9959 *
9960diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
9961index d605dc2..fafd7bd 100644
9962--- a/arch/x86/include/asm/atomic_64.h
9963+++ b/arch/x86/include/asm/atomic_64.h
9964@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
9965 }
9966
9967 /**
9968+ * atomic_read_unchecked - read atomic variable
9969+ * @v: pointer of type atomic_unchecked_t
9970+ *
9971+ * Atomically reads the value of @v.
9972+ */
9973+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9974+{
9975+ return v->counter;
9976+}
9977+
9978+/**
9979 * atomic_set - set atomic variable
9980 * @v: pointer of type atomic_t
9981 * @i: required value
9982@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
9983 }
9984
9985 /**
9986+ * atomic_set_unchecked - set atomic variable
9987+ * @v: pointer of type atomic_unchecked_t
9988+ * @i: required value
9989+ *
9990+ * Atomically sets the value of @v to @i.
9991+ */
9992+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9993+{
9994+ v->counter = i;
9995+}
9996+
9997+/**
9998 * atomic_add - add integer to atomic variable
9999 * @i: integer value to add
10000 * @v: pointer of type atomic_t
10001@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
10002 */
10003 static inline void atomic_add(int i, atomic_t *v)
10004 {
10005- asm volatile(LOCK_PREFIX "addl %1,%0"
10006+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10007+
10008+#ifdef CONFIG_PAX_REFCOUNT
10009+ "jno 0f\n"
10010+ LOCK_PREFIX "subl %1,%0\n"
10011+ "int $4\n0:\n"
10012+ _ASM_EXTABLE(0b, 0b)
10013+#endif
10014+
10015+ : "=m" (v->counter)
10016+ : "ir" (i), "m" (v->counter));
10017+}
10018+
10019+/**
10020+ * atomic_add_unchecked - add integer to atomic variable
10021+ * @i: integer value to add
10022+ * @v: pointer of type atomic_unchecked_t
10023+ *
10024+ * Atomically adds @i to @v.
10025+ */
10026+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
10027+{
10028+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
10029 : "=m" (v->counter)
10030 : "ir" (i), "m" (v->counter));
10031 }
10032@@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
10033 */
10034 static inline void atomic_sub(int i, atomic_t *v)
10035 {
10036- asm volatile(LOCK_PREFIX "subl %1,%0"
10037+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10038+
10039+#ifdef CONFIG_PAX_REFCOUNT
10040+ "jno 0f\n"
10041+ LOCK_PREFIX "addl %1,%0\n"
10042+ "int $4\n0:\n"
10043+ _ASM_EXTABLE(0b, 0b)
10044+#endif
10045+
10046+ : "=m" (v->counter)
10047+ : "ir" (i), "m" (v->counter));
10048+}
10049+
10050+/**
10051+ * atomic_sub_unchecked - subtract the atomic variable
10052+ * @i: integer value to subtract
10053+ * @v: pointer of type atomic_unchecked_t
10054+ *
10055+ * Atomically subtracts @i from @v.
10056+ */
10057+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
10058+{
10059+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
10060 : "=m" (v->counter)
10061 : "ir" (i), "m" (v->counter));
10062 }
10063@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10064 {
10065 unsigned char c;
10066
10067- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
10068+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
10069+
10070+#ifdef CONFIG_PAX_REFCOUNT
10071+ "jno 0f\n"
10072+ LOCK_PREFIX "addl %2,%0\n"
10073+ "int $4\n0:\n"
10074+ _ASM_EXTABLE(0b, 0b)
10075+#endif
10076+
10077+ "sete %1\n"
10078 : "=m" (v->counter), "=qm" (c)
10079 : "ir" (i), "m" (v->counter) : "memory");
10080 return c;
10081@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
10082 */
10083 static inline void atomic_inc(atomic_t *v)
10084 {
10085- asm volatile(LOCK_PREFIX "incl %0"
10086+ asm volatile(LOCK_PREFIX "incl %0\n"
10087+
10088+#ifdef CONFIG_PAX_REFCOUNT
10089+ "jno 0f\n"
10090+ LOCK_PREFIX "decl %0\n"
10091+ "int $4\n0:\n"
10092+ _ASM_EXTABLE(0b, 0b)
10093+#endif
10094+
10095+ : "=m" (v->counter)
10096+ : "m" (v->counter));
10097+}
10098+
10099+/**
10100+ * atomic_inc_unchecked - increment atomic variable
10101+ * @v: pointer of type atomic_unchecked_t
10102+ *
10103+ * Atomically increments @v by 1.
10104+ */
10105+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
10106+{
10107+ asm volatile(LOCK_PREFIX "incl %0\n"
10108 : "=m" (v->counter)
10109 : "m" (v->counter));
10110 }
10111@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
10112 */
10113 static inline void atomic_dec(atomic_t *v)
10114 {
10115- asm volatile(LOCK_PREFIX "decl %0"
10116+ asm volatile(LOCK_PREFIX "decl %0\n"
10117+
10118+#ifdef CONFIG_PAX_REFCOUNT
10119+ "jno 0f\n"
10120+ LOCK_PREFIX "incl %0\n"
10121+ "int $4\n0:\n"
10122+ _ASM_EXTABLE(0b, 0b)
10123+#endif
10124+
10125+ : "=m" (v->counter)
10126+ : "m" (v->counter));
10127+}
10128+
10129+/**
10130+ * atomic_dec_unchecked - decrement atomic variable
10131+ * @v: pointer of type atomic_unchecked_t
10132+ *
10133+ * Atomically decrements @v by 1.
10134+ */
10135+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
10136+{
10137+ asm volatile(LOCK_PREFIX "decl %0\n"
10138 : "=m" (v->counter)
10139 : "m" (v->counter));
10140 }
10141@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
10142 {
10143 unsigned char c;
10144
10145- asm volatile(LOCK_PREFIX "decl %0; sete %1"
10146+ asm volatile(LOCK_PREFIX "decl %0\n"
10147+
10148+#ifdef CONFIG_PAX_REFCOUNT
10149+ "jno 0f\n"
10150+ LOCK_PREFIX "incl %0\n"
10151+ "int $4\n0:\n"
10152+ _ASM_EXTABLE(0b, 0b)
10153+#endif
10154+
10155+ "sete %1\n"
10156 : "=m" (v->counter), "=qm" (c)
10157 : "m" (v->counter) : "memory");
10158 return c != 0;
10159@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
10160 {
10161 unsigned char c;
10162
10163- asm volatile(LOCK_PREFIX "incl %0; sete %1"
10164+ asm volatile(LOCK_PREFIX "incl %0\n"
10165+
10166+#ifdef CONFIG_PAX_REFCOUNT
10167+ "jno 0f\n"
10168+ LOCK_PREFIX "decl %0\n"
10169+ "int $4\n0:\n"
10170+ _ASM_EXTABLE(0b, 0b)
10171+#endif
10172+
10173+ "sete %1\n"
10174+ : "=m" (v->counter), "=qm" (c)
10175+ : "m" (v->counter) : "memory");
10176+ return c != 0;
10177+}
10178+
10179+/**
10180+ * atomic_inc_and_test_unchecked - increment and test
10181+ * @v: pointer of type atomic_unchecked_t
10182+ *
10183+ * Atomically increments @v by 1
10184+ * and returns true if the result is zero, or false for all
10185+ * other cases.
10186+ */
10187+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
10188+{
10189+ unsigned char c;
10190+
10191+ asm volatile(LOCK_PREFIX "incl %0\n"
10192+ "sete %1\n"
10193 : "=m" (v->counter), "=qm" (c)
10194 : "m" (v->counter) : "memory");
10195 return c != 0;
10196@@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10197 {
10198 unsigned char c;
10199
10200- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
10201+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
10202+
10203+#ifdef CONFIG_PAX_REFCOUNT
10204+ "jno 0f\n"
10205+ LOCK_PREFIX "subl %2,%0\n"
10206+ "int $4\n0:\n"
10207+ _ASM_EXTABLE(0b, 0b)
10208+#endif
10209+
10210+ "sets %1\n"
10211 : "=m" (v->counter), "=qm" (c)
10212 : "ir" (i), "m" (v->counter) : "memory");
10213 return c;
10214@@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
10215 static inline int atomic_add_return(int i, atomic_t *v)
10216 {
10217 int __i = i;
10218- asm volatile(LOCK_PREFIX "xaddl %0, %1"
10219+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
10220+
10221+#ifdef CONFIG_PAX_REFCOUNT
10222+ "jno 0f\n"
10223+ "movl %0, %1\n"
10224+ "int $4\n0:\n"
10225+ _ASM_EXTABLE(0b, 0b)
10226+#endif
10227+
10228+ : "+r" (i), "+m" (v->counter)
10229+ : : "memory");
10230+ return i + __i;
10231+}
10232+
10233+/**
10234+ * atomic_add_return_unchecked - add and return
10235+ * @i: integer value to add
10236+ * @v: pointer of type atomic_unchecked_t
10237+ *
10238+ * Atomically adds @i to @v and returns @i + @v
10239+ */
10240+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
10241+{
10242+ int __i = i;
10243+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
10244 : "+r" (i), "+m" (v->counter)
10245 : : "memory");
10246 return i + __i;
10247@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
10248 }
10249
10250 #define atomic_inc_return(v) (atomic_add_return(1, v))
10251+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
10252+{
10253+ return atomic_add_return_unchecked(1, v);
10254+}
10255 #define atomic_dec_return(v) (atomic_sub_return(1, v))
10256
10257 /* The 64-bit atomic type */
10258@@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
10259 }
10260
10261 /**
10262+ * atomic64_read_unchecked - read atomic64 variable
10263+ * @v: pointer of type atomic64_unchecked_t
10264+ *
10265+ * Atomically reads the value of @v.
10266+ * Doesn't imply a read memory barrier.
10267+ */
10268+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
10269+{
10270+ return v->counter;
10271+}
10272+
10273+/**
10274 * atomic64_set - set atomic64 variable
10275 * @v: pointer to type atomic64_t
10276 * @i: required value
10277@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
10278 }
10279
10280 /**
10281+ * atomic64_set_unchecked - set atomic64 variable
10282+ * @v: pointer to type atomic64_unchecked_t
10283+ * @i: required value
10284+ *
10285+ * Atomically sets the value of @v to @i.
10286+ */
10287+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
10288+{
10289+ v->counter = i;
10290+}
10291+
10292+/**
10293 * atomic64_add - add integer to atomic64 variable
10294 * @i: integer value to add
10295 * @v: pointer to type atomic64_t
10296@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
10297 */
10298 static inline void atomic64_add(long i, atomic64_t *v)
10299 {
10300+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
10301+
10302+#ifdef CONFIG_PAX_REFCOUNT
10303+ "jno 0f\n"
10304+ LOCK_PREFIX "subq %1,%0\n"
10305+ "int $4\n0:\n"
10306+ _ASM_EXTABLE(0b, 0b)
10307+#endif
10308+
10309+ : "=m" (v->counter)
10310+ : "er" (i), "m" (v->counter));
10311+}
10312+
10313+/**
10314+ * atomic64_add_unchecked - add integer to atomic64 variable
10315+ * @i: integer value to add
10316+ * @v: pointer to type atomic64_unchecked_t
10317+ *
10318+ * Atomically adds @i to @v.
10319+ */
10320+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
10321+{
10322 asm volatile(LOCK_PREFIX "addq %1,%0"
10323 : "=m" (v->counter)
10324 : "er" (i), "m" (v->counter));
10325@@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
10326 */
10327 static inline void atomic64_sub(long i, atomic64_t *v)
10328 {
10329- asm volatile(LOCK_PREFIX "subq %1,%0"
10330+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
10331+
10332+#ifdef CONFIG_PAX_REFCOUNT
10333+ "jno 0f\n"
10334+ LOCK_PREFIX "addq %1,%0\n"
10335+ "int $4\n0:\n"
10336+ _ASM_EXTABLE(0b, 0b)
10337+#endif
10338+
10339 : "=m" (v->counter)
10340 : "er" (i), "m" (v->counter));
10341 }
10342@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10343 {
10344 unsigned char c;
10345
10346- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
10347+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
10348+
10349+#ifdef CONFIG_PAX_REFCOUNT
10350+ "jno 0f\n"
10351+ LOCK_PREFIX "addq %2,%0\n"
10352+ "int $4\n0:\n"
10353+ _ASM_EXTABLE(0b, 0b)
10354+#endif
10355+
10356+ "sete %1\n"
10357 : "=m" (v->counter), "=qm" (c)
10358 : "er" (i), "m" (v->counter) : "memory");
10359 return c;
10360@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
10361 */
10362 static inline void atomic64_inc(atomic64_t *v)
10363 {
10364+ asm volatile(LOCK_PREFIX "incq %0\n"
10365+
10366+#ifdef CONFIG_PAX_REFCOUNT
10367+ "jno 0f\n"
10368+ LOCK_PREFIX "decq %0\n"
10369+ "int $4\n0:\n"
10370+ _ASM_EXTABLE(0b, 0b)
10371+#endif
10372+
10373+ : "=m" (v->counter)
10374+ : "m" (v->counter));
10375+}
10376+
10377+/**
10378+ * atomic64_inc_unchecked - increment atomic64 variable
10379+ * @v: pointer to type atomic64_unchecked_t
10380+ *
10381+ * Atomically increments @v by 1.
10382+ */
10383+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
10384+{
10385 asm volatile(LOCK_PREFIX "incq %0"
10386 : "=m" (v->counter)
10387 : "m" (v->counter));
10388@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
10389 */
10390 static inline void atomic64_dec(atomic64_t *v)
10391 {
10392- asm volatile(LOCK_PREFIX "decq %0"
10393+ asm volatile(LOCK_PREFIX "decq %0\n"
10394+
10395+#ifdef CONFIG_PAX_REFCOUNT
10396+ "jno 0f\n"
10397+ LOCK_PREFIX "incq %0\n"
10398+ "int $4\n0:\n"
10399+ _ASM_EXTABLE(0b, 0b)
10400+#endif
10401+
10402+ : "=m" (v->counter)
10403+ : "m" (v->counter));
10404+}
10405+
10406+/**
10407+ * atomic64_dec_unchecked - decrement atomic64 variable
10408+ * @v: pointer to type atomic64_t
10409+ *
10410+ * Atomically decrements @v by 1.
10411+ */
10412+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
10413+{
10414+ asm volatile(LOCK_PREFIX "decq %0\n"
10415 : "=m" (v->counter)
10416 : "m" (v->counter));
10417 }
10418@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
10419 {
10420 unsigned char c;
10421
10422- asm volatile(LOCK_PREFIX "decq %0; sete %1"
10423+ asm volatile(LOCK_PREFIX "decq %0\n"
10424+
10425+#ifdef CONFIG_PAX_REFCOUNT
10426+ "jno 0f\n"
10427+ LOCK_PREFIX "incq %0\n"
10428+ "int $4\n0:\n"
10429+ _ASM_EXTABLE(0b, 0b)
10430+#endif
10431+
10432+ "sete %1\n"
10433 : "=m" (v->counter), "=qm" (c)
10434 : "m" (v->counter) : "memory");
10435 return c != 0;
10436@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
10437 {
10438 unsigned char c;
10439
10440- asm volatile(LOCK_PREFIX "incq %0; sete %1"
10441+ asm volatile(LOCK_PREFIX "incq %0\n"
10442+
10443+#ifdef CONFIG_PAX_REFCOUNT
10444+ "jno 0f\n"
10445+ LOCK_PREFIX "decq %0\n"
10446+ "int $4\n0:\n"
10447+ _ASM_EXTABLE(0b, 0b)
10448+#endif
10449+
10450+ "sete %1\n"
10451 : "=m" (v->counter), "=qm" (c)
10452 : "m" (v->counter) : "memory");
10453 return c != 0;
10454@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10455 {
10456 unsigned char c;
10457
10458- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
10459+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
10460+
10461+#ifdef CONFIG_PAX_REFCOUNT
10462+ "jno 0f\n"
10463+ LOCK_PREFIX "subq %2,%0\n"
10464+ "int $4\n0:\n"
10465+ _ASM_EXTABLE(0b, 0b)
10466+#endif
10467+
10468+ "sets %1\n"
10469 : "=m" (v->counter), "=qm" (c)
10470 : "er" (i), "m" (v->counter) : "memory");
10471 return c;
10472@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10473 static inline long atomic64_add_return(long i, atomic64_t *v)
10474 {
10475 long __i = i;
10476- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
10477+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
10478+
10479+#ifdef CONFIG_PAX_REFCOUNT
10480+ "jno 0f\n"
10481+ "movq %0, %1\n"
10482+ "int $4\n0:\n"
10483+ _ASM_EXTABLE(0b, 0b)
10484+#endif
10485+
10486+ : "+r" (i), "+m" (v->counter)
10487+ : : "memory");
10488+ return i + __i;
10489+}
10490+
10491+/**
10492+ * atomic64_add_return_unchecked - add and return
10493+ * @i: integer value to add
10494+ * @v: pointer to type atomic64_unchecked_t
10495+ *
10496+ * Atomically adds @i to @v and returns @i + @v
10497+ */
10498+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10499+{
10500+ long __i = i;
10501+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
10502 : "+r" (i), "+m" (v->counter)
10503 : : "memory");
10504 return i + __i;
10505@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
10506 }
10507
10508 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10509+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10510+{
10511+ return atomic64_add_return_unchecked(1, v);
10512+}
10513 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10514
10515 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10516@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10517 return cmpxchg(&v->counter, old, new);
10518 }
10519
10520+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10521+{
10522+ return cmpxchg(&v->counter, old, new);
10523+}
10524+
10525 static inline long atomic64_xchg(atomic64_t *v, long new)
10526 {
10527 return xchg(&v->counter, new);
10528 }
10529
10530+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
10531+{
10532+ return xchg(&v->counter, new);
10533+}
10534+
10535 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
10536 {
10537 return cmpxchg(&v->counter, old, new);
10538 }
10539
10540+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
10541+{
10542+ return cmpxchg(&v->counter, old, new);
10543+}
10544+
10545 static inline long atomic_xchg(atomic_t *v, int new)
10546 {
10547 return xchg(&v->counter, new);
10548 }
10549
10550+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
10551+{
10552+ return xchg(&v->counter, new);
10553+}
10554+
10555 /**
10556 * atomic_add_unless - add unless the number is a given value
10557 * @v: pointer of type atomic_t
10558@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
10559 */
10560 static inline int atomic_add_unless(atomic_t *v, int a, int u)
10561 {
10562- int c, old;
10563+ int c, old, new;
10564 c = atomic_read(v);
10565 for (;;) {
10566- if (unlikely(c == (u)))
10567+ if (unlikely(c == u))
10568 break;
10569- old = atomic_cmpxchg((v), c, c + (a));
10570+
10571+ asm volatile("addl %2,%0\n"
10572+
10573+#ifdef CONFIG_PAX_REFCOUNT
10574+ "jno 0f\n"
10575+ "subl %2,%0\n"
10576+ "int $4\n0:\n"
10577+ _ASM_EXTABLE(0b, 0b)
10578+#endif
10579+
10580+ : "=r" (new)
10581+ : "0" (c), "ir" (a));
10582+
10583+ old = atomic_cmpxchg(v, c, new);
10584 if (likely(old == c))
10585 break;
10586 c = old;
10587 }
10588- return c != (u);
10589+ return c != u;
10590 }
10591
10592 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
10593@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
10594 */
10595 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10596 {
10597- long c, old;
10598+ long c, old, new;
10599 c = atomic64_read(v);
10600 for (;;) {
10601- if (unlikely(c == (u)))
10602+ if (unlikely(c == u))
10603 break;
10604- old = atomic64_cmpxchg((v), c, c + (a));
10605+
10606+ asm volatile("addq %2,%0\n"
10607+
10608+#ifdef CONFIG_PAX_REFCOUNT
10609+ "jno 0f\n"
10610+ "subq %2,%0\n"
10611+ "int $4\n0:\n"
10612+ _ASM_EXTABLE(0b, 0b)
10613+#endif
10614+
10615+ : "=r" (new)
10616+ : "0" (c), "er" (a));
10617+
10618+ old = atomic64_cmpxchg(v, c, new);
10619 if (likely(old == c))
10620 break;
10621 c = old;
10622 }
10623- return c != (u);
10624+ return c != u;
10625 }
10626
10627 /**
10628diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10629index 02b47a6..d5c4b15 100644
10630--- a/arch/x86/include/asm/bitops.h
10631+++ b/arch/x86/include/asm/bitops.h
10632@@ -38,7 +38,7 @@
10633 * a mask operation on a byte.
10634 */
10635 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10636-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10637+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10638 #define CONST_MASK(nr) (1 << ((nr) & 7))
10639
10640 /**
10641diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10642index 7a10659..8bbf355 100644
10643--- a/arch/x86/include/asm/boot.h
10644+++ b/arch/x86/include/asm/boot.h
10645@@ -11,10 +11,15 @@
10646 #include <asm/pgtable_types.h>
10647
10648 /* Physical address where kernel should be loaded. */
10649-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10650+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10651 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10652 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10653
10654+#ifndef __ASSEMBLY__
10655+extern unsigned char __LOAD_PHYSICAL_ADDR[];
10656+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10657+#endif
10658+
10659 /* Minimum kernel alignment, as a power of two */
10660 #ifdef CONFIG_X86_64
10661 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10662diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10663index 549860d..7d45f68 100644
10664--- a/arch/x86/include/asm/cache.h
10665+++ b/arch/x86/include/asm/cache.h
10666@@ -5,9 +5,10 @@
10667
10668 /* L1 cache line size */
10669 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10670-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10671+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10672
10673 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
10674+#define __read_only __attribute__((__section__(".data.read_only")))
10675
10676 #ifdef CONFIG_X86_VSMP
10677 /* vSMP Internode cacheline shift */
10678diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10679index b54f6af..5b376a6 100644
10680--- a/arch/x86/include/asm/cacheflush.h
10681+++ b/arch/x86/include/asm/cacheflush.h
10682@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
10683 static inline unsigned long get_page_memtype(struct page *pg)
10684 {
10685 if (!PageUncached(pg) && !PageWC(pg))
10686- return -1;
10687+ return ~0UL;
10688 else if (!PageUncached(pg) && PageWC(pg))
10689 return _PAGE_CACHE_WC;
10690 else if (PageUncached(pg) && !PageWC(pg))
10691@@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
10692 SetPageWC(pg);
10693 break;
10694 default:
10695- case -1:
10696+ case ~0UL:
10697 ClearPageUncached(pg);
10698 ClearPageWC(pg);
10699 break;
10700diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
10701index 0e63c9a..ab8d972 100644
10702--- a/arch/x86/include/asm/calling.h
10703+++ b/arch/x86/include/asm/calling.h
10704@@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
10705 * for assembly code:
10706 */
10707
10708-#define R15 0
10709-#define R14 8
10710-#define R13 16
10711-#define R12 24
10712-#define RBP 32
10713-#define RBX 40
10714+#define R15 (0)
10715+#define R14 (8)
10716+#define R13 (16)
10717+#define R12 (24)
10718+#define RBP (32)
10719+#define RBX (40)
10720
10721 /* arguments: interrupts/non tracing syscalls only save up to here: */
10722-#define R11 48
10723-#define R10 56
10724-#define R9 64
10725-#define R8 72
10726-#define RAX 80
10727-#define RCX 88
10728-#define RDX 96
10729-#define RSI 104
10730-#define RDI 112
10731-#define ORIG_RAX 120 /* + error_code */
10732+#define R11 (48)
10733+#define R10 (56)
10734+#define R9 (64)
10735+#define R8 (72)
10736+#define RAX (80)
10737+#define RCX (88)
10738+#define RDX (96)
10739+#define RSI (104)
10740+#define RDI (112)
10741+#define ORIG_RAX (120) /* + error_code */
10742 /* end of arguments */
10743
10744 /* cpu exception frame or undefined in case of fast syscall: */
10745-#define RIP 128
10746-#define CS 136
10747-#define EFLAGS 144
10748-#define RSP 152
10749-#define SS 160
10750+#define RIP (128)
10751+#define CS (136)
10752+#define EFLAGS (144)
10753+#define RSP (152)
10754+#define SS (160)
10755
10756 #define ARGOFFSET R11
10757 #define SWFRAME ORIG_RAX
10758diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10759index 46fc474..b02b0f9 100644
10760--- a/arch/x86/include/asm/checksum_32.h
10761+++ b/arch/x86/include/asm/checksum_32.h
10762@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10763 int len, __wsum sum,
10764 int *src_err_ptr, int *dst_err_ptr);
10765
10766+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10767+ int len, __wsum sum,
10768+ int *src_err_ptr, int *dst_err_ptr);
10769+
10770+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10771+ int len, __wsum sum,
10772+ int *src_err_ptr, int *dst_err_ptr);
10773+
10774 /*
10775 * Note: when you get a NULL pointer exception here this means someone
10776 * passed in an incorrect kernel address to one of these functions.
10777@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10778 int *err_ptr)
10779 {
10780 might_sleep();
10781- return csum_partial_copy_generic((__force void *)src, dst,
10782+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
10783 len, sum, err_ptr, NULL);
10784 }
10785
10786@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10787 {
10788 might_sleep();
10789 if (access_ok(VERIFY_WRITE, dst, len))
10790- return csum_partial_copy_generic(src, (__force void *)dst,
10791+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10792 len, sum, NULL, err_ptr);
10793
10794 if (len)
10795diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10796index 617bd56..7b047a1 100644
10797--- a/arch/x86/include/asm/desc.h
10798+++ b/arch/x86/include/asm/desc.h
10799@@ -4,6 +4,7 @@
10800 #include <asm/desc_defs.h>
10801 #include <asm/ldt.h>
10802 #include <asm/mmu.h>
10803+#include <asm/pgtable.h>
10804 #include <linux/smp.h>
10805
10806 static inline void fill_ldt(struct desc_struct *desc,
10807@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
10808 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
10809 desc->type = (info->read_exec_only ^ 1) << 1;
10810 desc->type |= info->contents << 2;
10811+ desc->type |= info->seg_not_present ^ 1;
10812 desc->s = 1;
10813 desc->dpl = 0x3;
10814 desc->p = info->seg_not_present ^ 1;
10815@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
10816 }
10817
10818 extern struct desc_ptr idt_descr;
10819-extern gate_desc idt_table[];
10820-
10821-struct gdt_page {
10822- struct desc_struct gdt[GDT_ENTRIES];
10823-} __attribute__((aligned(PAGE_SIZE)));
10824-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10825+extern gate_desc idt_table[256];
10826
10827+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10828 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10829 {
10830- return per_cpu(gdt_page, cpu).gdt;
10831+ return cpu_gdt_table[cpu];
10832 }
10833
10834 #ifdef CONFIG_X86_64
10835@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10836 unsigned long base, unsigned dpl, unsigned flags,
10837 unsigned short seg)
10838 {
10839- gate->a = (seg << 16) | (base & 0xffff);
10840- gate->b = (base & 0xffff0000) |
10841- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10842+ gate->gate.offset_low = base;
10843+ gate->gate.seg = seg;
10844+ gate->gate.reserved = 0;
10845+ gate->gate.type = type;
10846+ gate->gate.s = 0;
10847+ gate->gate.dpl = dpl;
10848+ gate->gate.p = 1;
10849+ gate->gate.offset_high = base >> 16;
10850 }
10851
10852 #endif
10853@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10854 static inline void native_write_idt_entry(gate_desc *idt, int entry,
10855 const gate_desc *gate)
10856 {
10857+ pax_open_kernel();
10858 memcpy(&idt[entry], gate, sizeof(*gate));
10859+ pax_close_kernel();
10860 }
10861
10862 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
10863 const void *desc)
10864 {
10865+ pax_open_kernel();
10866 memcpy(&ldt[entry], desc, 8);
10867+ pax_close_kernel();
10868 }
10869
10870 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10871@@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
10872 size = sizeof(struct desc_struct);
10873 break;
10874 }
10875+
10876+ pax_open_kernel();
10877 memcpy(&gdt[entry], desc, size);
10878+ pax_close_kernel();
10879 }
10880
10881 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10882@@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10883
10884 static inline void native_load_tr_desc(void)
10885 {
10886+ pax_open_kernel();
10887 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10888+ pax_close_kernel();
10889 }
10890
10891 static inline void native_load_gdt(const struct desc_ptr *dtr)
10892@@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10893 unsigned int i;
10894 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10895
10896+ pax_open_kernel();
10897 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10898 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10899+ pax_close_kernel();
10900 }
10901
10902 #define _LDT_empty(info) \
10903@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10904 desc->limit = (limit >> 16) & 0xf;
10905 }
10906
10907-static inline void _set_gate(int gate, unsigned type, void *addr,
10908+static inline void _set_gate(int gate, unsigned type, const void *addr,
10909 unsigned dpl, unsigned ist, unsigned seg)
10910 {
10911 gate_desc s;
10912@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10913 * Pentium F0 0F bugfix can have resulted in the mapped
10914 * IDT being write-protected.
10915 */
10916-static inline void set_intr_gate(unsigned int n, void *addr)
10917+static inline void set_intr_gate(unsigned int n, const void *addr)
10918 {
10919 BUG_ON((unsigned)n > 0xFF);
10920 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10921@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10922 /*
10923 * This routine sets up an interrupt gate at directory privilege level 3.
10924 */
10925-static inline void set_system_intr_gate(unsigned int n, void *addr)
10926+static inline void set_system_intr_gate(unsigned int n, const void *addr)
10927 {
10928 BUG_ON((unsigned)n > 0xFF);
10929 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10930 }
10931
10932-static inline void set_system_trap_gate(unsigned int n, void *addr)
10933+static inline void set_system_trap_gate(unsigned int n, const void *addr)
10934 {
10935 BUG_ON((unsigned)n > 0xFF);
10936 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10937 }
10938
10939-static inline void set_trap_gate(unsigned int n, void *addr)
10940+static inline void set_trap_gate(unsigned int n, const void *addr)
10941 {
10942 BUG_ON((unsigned)n > 0xFF);
10943 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10944@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10945 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10946 {
10947 BUG_ON((unsigned)n > 0xFF);
10948- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10949+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10950 }
10951
10952-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10953+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10954 {
10955 BUG_ON((unsigned)n > 0xFF);
10956 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10957 }
10958
10959-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10960+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10961 {
10962 BUG_ON((unsigned)n > 0xFF);
10963 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10964 }
10965
10966+#ifdef CONFIG_X86_32
10967+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10968+{
10969+ struct desc_struct d;
10970+
10971+ if (likely(limit))
10972+ limit = (limit - 1UL) >> PAGE_SHIFT;
10973+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
10974+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10975+}
10976+#endif
10977+
10978 #endif /* _ASM_X86_DESC_H */
10979diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10980index 9d66848..6b4a691 100644
10981--- a/arch/x86/include/asm/desc_defs.h
10982+++ b/arch/x86/include/asm/desc_defs.h
10983@@ -31,6 +31,12 @@ struct desc_struct {
10984 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10985 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10986 };
10987+ struct {
10988+ u16 offset_low;
10989+ u16 seg;
10990+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10991+ unsigned offset_high: 16;
10992+ } gate;
10993 };
10994 } __attribute__((packed));
10995
10996diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
10997index cee34e9..a7c3fa2 100644
10998--- a/arch/x86/include/asm/device.h
10999+++ b/arch/x86/include/asm/device.h
11000@@ -6,7 +6,7 @@ struct dev_archdata {
11001 void *acpi_handle;
11002 #endif
11003 #ifdef CONFIG_X86_64
11004-struct dma_map_ops *dma_ops;
11005+ const struct dma_map_ops *dma_ops;
11006 #endif
11007 #ifdef CONFIG_DMAR
11008 void *iommu; /* hook for IOMMU specific extension */
11009diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
11010index 6a25d5d..786b202 100644
11011--- a/arch/x86/include/asm/dma-mapping.h
11012+++ b/arch/x86/include/asm/dma-mapping.h
11013@@ -25,9 +25,9 @@ extern int iommu_merge;
11014 extern struct device x86_dma_fallback_dev;
11015 extern int panic_on_overflow;
11016
11017-extern struct dma_map_ops *dma_ops;
11018+extern const struct dma_map_ops *dma_ops;
11019
11020-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
11021+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
11022 {
11023 #ifdef CONFIG_X86_32
11024 return dma_ops;
11025@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
11026 /* Make sure we keep the same behaviour */
11027 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
11028 {
11029- struct dma_map_ops *ops = get_dma_ops(dev);
11030+ const struct dma_map_ops *ops = get_dma_ops(dev);
11031 if (ops->mapping_error)
11032 return ops->mapping_error(dev, dma_addr);
11033
11034@@ -122,7 +122,7 @@ static inline void *
11035 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
11036 gfp_t gfp)
11037 {
11038- struct dma_map_ops *ops = get_dma_ops(dev);
11039+ const struct dma_map_ops *ops = get_dma_ops(dev);
11040 void *memory;
11041
11042 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
11043@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
11044 static inline void dma_free_coherent(struct device *dev, size_t size,
11045 void *vaddr, dma_addr_t bus)
11046 {
11047- struct dma_map_ops *ops = get_dma_ops(dev);
11048+ const struct dma_map_ops *ops = get_dma_ops(dev);
11049
11050 WARN_ON(irqs_disabled()); /* for portability */
11051
11052diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
11053index 40b4e61..40d8133 100644
11054--- a/arch/x86/include/asm/e820.h
11055+++ b/arch/x86/include/asm/e820.h
11056@@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
11057 #define ISA_END_ADDRESS 0x100000
11058 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
11059
11060-#define BIOS_BEGIN 0x000a0000
11061+#define BIOS_BEGIN 0x000c0000
11062 #define BIOS_END 0x00100000
11063
11064 #ifdef __KERNEL__
11065diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
11066index 8ac9d9a..0a6c96e 100644
11067--- a/arch/x86/include/asm/elf.h
11068+++ b/arch/x86/include/asm/elf.h
11069@@ -257,7 +257,25 @@ extern int force_personality32;
11070 the loader. We need to make sure that it is out of the way of the program
11071 that it will "exec", and that there is sufficient room for the brk. */
11072
11073+#ifdef CONFIG_PAX_SEGMEXEC
11074+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
11075+#else
11076 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
11077+#endif
11078+
11079+#ifdef CONFIG_PAX_ASLR
11080+#ifdef CONFIG_X86_32
11081+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
11082+
11083+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11084+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
11085+#else
11086+#define PAX_ELF_ET_DYN_BASE 0x400000UL
11087+
11088+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11089+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
11090+#endif
11091+#endif
11092
11093 /* This yields a mask that user programs can use to figure out what
11094 instruction set this CPU supports. This could be done in user space,
11095@@ -310,9 +328,7 @@ do { \
11096
11097 #define ARCH_DLINFO \
11098 do { \
11099- if (vdso_enabled) \
11100- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
11101- (unsigned long)current->mm->context.vdso); \
11102+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
11103 } while (0)
11104
11105 #define AT_SYSINFO 32
11106@@ -323,7 +339,7 @@ do { \
11107
11108 #endif /* !CONFIG_X86_32 */
11109
11110-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
11111+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
11112
11113 #define VDSO_ENTRY \
11114 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
11115@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
11116 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
11117 #define compat_arch_setup_additional_pages syscall32_setup_pages
11118
11119-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
11120-#define arch_randomize_brk arch_randomize_brk
11121-
11122 #endif /* _ASM_X86_ELF_H */
11123diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
11124index cc70c1c..d96d011 100644
11125--- a/arch/x86/include/asm/emergency-restart.h
11126+++ b/arch/x86/include/asm/emergency-restart.h
11127@@ -15,6 +15,6 @@ enum reboot_type {
11128
11129 extern enum reboot_type reboot_type;
11130
11131-extern void machine_emergency_restart(void);
11132+extern void machine_emergency_restart(void) __noreturn;
11133
11134 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
11135diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
11136index 1f11ce4..7caabd1 100644
11137--- a/arch/x86/include/asm/futex.h
11138+++ b/arch/x86/include/asm/futex.h
11139@@ -12,16 +12,18 @@
11140 #include <asm/system.h>
11141
11142 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
11143+ typecheck(u32 __user *, uaddr); \
11144 asm volatile("1:\t" insn "\n" \
11145 "2:\t.section .fixup,\"ax\"\n" \
11146 "3:\tmov\t%3, %1\n" \
11147 "\tjmp\t2b\n" \
11148 "\t.previous\n" \
11149 _ASM_EXTABLE(1b, 3b) \
11150- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
11151+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
11152 : "i" (-EFAULT), "0" (oparg), "1" (0))
11153
11154 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
11155+ typecheck(u32 __user *, uaddr); \
11156 asm volatile("1:\tmovl %2, %0\n" \
11157 "\tmovl\t%0, %3\n" \
11158 "\t" insn "\n" \
11159@@ -34,10 +36,10 @@
11160 _ASM_EXTABLE(1b, 4b) \
11161 _ASM_EXTABLE(2b, 4b) \
11162 : "=&a" (oldval), "=&r" (ret), \
11163- "+m" (*uaddr), "=&r" (tem) \
11164+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
11165 : "r" (oparg), "i" (-EFAULT), "1" (0))
11166
11167-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11168+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
11169 {
11170 int op = (encoded_op >> 28) & 7;
11171 int cmp = (encoded_op >> 24) & 15;
11172@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11173
11174 switch (op) {
11175 case FUTEX_OP_SET:
11176- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
11177+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
11178 break;
11179 case FUTEX_OP_ADD:
11180- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
11181+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
11182 uaddr, oparg);
11183 break;
11184 case FUTEX_OP_OR:
11185@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
11186 return ret;
11187 }
11188
11189-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
11190+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
11191 int newval)
11192 {
11193
11194@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
11195 return -ENOSYS;
11196 #endif
11197
11198- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
11199+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
11200 return -EFAULT;
11201
11202- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
11203+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
11204 "2:\t.section .fixup, \"ax\"\n"
11205 "3:\tmov %2, %0\n"
11206 "\tjmp 2b\n"
11207 "\t.previous\n"
11208 _ASM_EXTABLE(1b, 3b)
11209- : "=a" (oldval), "+m" (*uaddr)
11210+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
11211 : "i" (-EFAULT), "r" (newval), "0" (oldval)
11212 : "memory"
11213 );
11214diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
11215index ba180d9..3bad351 100644
11216--- a/arch/x86/include/asm/hw_irq.h
11217+++ b/arch/x86/include/asm/hw_irq.h
11218@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
11219 extern void enable_IO_APIC(void);
11220
11221 /* Statistics */
11222-extern atomic_t irq_err_count;
11223-extern atomic_t irq_mis_count;
11224+extern atomic_unchecked_t irq_err_count;
11225+extern atomic_unchecked_t irq_mis_count;
11226
11227 /* EISA */
11228 extern void eisa_set_level_irq(unsigned int irq);
11229diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
11230index 0b20bbb..4cb1396 100644
11231--- a/arch/x86/include/asm/i387.h
11232+++ b/arch/x86/include/asm/i387.h
11233@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
11234 {
11235 int err;
11236
11237+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11238+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
11239+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
11240+#endif
11241+
11242 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
11243 "2:\n"
11244 ".section .fixup,\"ax\"\n"
11245@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
11246 {
11247 int err;
11248
11249+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11250+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
11251+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
11252+#endif
11253+
11254 asm volatile("1: rex64/fxsave (%[fx])\n\t"
11255 "2:\n"
11256 ".section .fixup,\"ax\"\n"
11257@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
11258 }
11259
11260 /* We need a safe address that is cheap to find and that is already
11261- in L1 during context switch. The best choices are unfortunately
11262- different for UP and SMP */
11263-#ifdef CONFIG_SMP
11264-#define safe_address (__per_cpu_offset[0])
11265-#else
11266-#define safe_address (kstat_cpu(0).cpustat.user)
11267-#endif
11268+ in L1 during context switch. */
11269+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
11270
11271 /*
11272 * These must be called with preempt disabled
11273@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
11274 struct thread_info *me = current_thread_info();
11275 preempt_disable();
11276 if (me->status & TS_USEDFPU)
11277- __save_init_fpu(me->task);
11278+ __save_init_fpu(current);
11279 else
11280 clts();
11281 }
11282diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
11283index a299900..15c5410 100644
11284--- a/arch/x86/include/asm/io_32.h
11285+++ b/arch/x86/include/asm/io_32.h
11286@@ -3,6 +3,7 @@
11287
11288 #include <linux/string.h>
11289 #include <linux/compiler.h>
11290+#include <asm/processor.h>
11291
11292 /*
11293 * This file contains the definitions for the x86 IO instructions
11294@@ -42,6 +43,17 @@
11295
11296 #ifdef __KERNEL__
11297
11298+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11299+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11300+{
11301+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11302+}
11303+
11304+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11305+{
11306+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11307+}
11308+
11309 #include <asm-generic/iomap.h>
11310
11311 #include <linux/vmalloc.h>
11312diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
11313index 2440678..c158b88 100644
11314--- a/arch/x86/include/asm/io_64.h
11315+++ b/arch/x86/include/asm/io_64.h
11316@@ -140,6 +140,17 @@ __OUTS(l)
11317
11318 #include <linux/vmalloc.h>
11319
11320+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
11321+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
11322+{
11323+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11324+}
11325+
11326+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
11327+{
11328+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
11329+}
11330+
11331 #include <asm-generic/iomap.h>
11332
11333 void __memcpy_fromio(void *, unsigned long, unsigned);
11334diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
11335index fd6d21b..8b13915 100644
11336--- a/arch/x86/include/asm/iommu.h
11337+++ b/arch/x86/include/asm/iommu.h
11338@@ -3,7 +3,7 @@
11339
11340 extern void pci_iommu_shutdown(void);
11341 extern void no_iommu_init(void);
11342-extern struct dma_map_ops nommu_dma_ops;
11343+extern const struct dma_map_ops nommu_dma_ops;
11344 extern int force_iommu, no_iommu;
11345 extern int iommu_detected;
11346 extern int iommu_pass_through;
11347diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
11348index 9e2b952..557206e 100644
11349--- a/arch/x86/include/asm/irqflags.h
11350+++ b/arch/x86/include/asm/irqflags.h
11351@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
11352 sti; \
11353 sysexit
11354
11355+#define GET_CR0_INTO_RDI mov %cr0, %rdi
11356+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
11357+#define GET_CR3_INTO_RDI mov %cr3, %rdi
11358+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
11359+
11360 #else
11361 #define INTERRUPT_RETURN iret
11362 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
11363diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
11364index 4fe681d..bb6d40c 100644
11365--- a/arch/x86/include/asm/kprobes.h
11366+++ b/arch/x86/include/asm/kprobes.h
11367@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
11368 #define BREAKPOINT_INSTRUCTION 0xcc
11369 #define RELATIVEJUMP_INSTRUCTION 0xe9
11370 #define MAX_INSN_SIZE 16
11371-#define MAX_STACK_SIZE 64
11372-#define MIN_STACK_SIZE(ADDR) \
11373- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
11374- THREAD_SIZE - (unsigned long)(ADDR))) \
11375- ? (MAX_STACK_SIZE) \
11376- : (((unsigned long)current_thread_info()) + \
11377- THREAD_SIZE - (unsigned long)(ADDR)))
11378+#define MAX_STACK_SIZE 64UL
11379+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
11380
11381 #define flush_insn_slot(p) do { } while (0)
11382
11383diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
11384index 08bc2ff..2e88d1f 100644
11385--- a/arch/x86/include/asm/kvm_host.h
11386+++ b/arch/x86/include/asm/kvm_host.h
11387@@ -534,9 +534,9 @@ struct kvm_x86_ops {
11388 bool (*gb_page_enable)(void);
11389
11390 const struct trace_print_flags *exit_reasons_str;
11391-};
11392+} __do_const;
11393
11394-extern struct kvm_x86_ops *kvm_x86_ops;
11395+extern const struct kvm_x86_ops *kvm_x86_ops;
11396
11397 int kvm_mmu_module_init(void);
11398 void kvm_mmu_module_exit(void);
11399diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
11400index 47b9b6f..815aaa1 100644
11401--- a/arch/x86/include/asm/local.h
11402+++ b/arch/x86/include/asm/local.h
11403@@ -18,26 +18,58 @@ typedef struct {
11404
11405 static inline void local_inc(local_t *l)
11406 {
11407- asm volatile(_ASM_INC "%0"
11408+ asm volatile(_ASM_INC "%0\n"
11409+
11410+#ifdef CONFIG_PAX_REFCOUNT
11411+ "jno 0f\n"
11412+ _ASM_DEC "%0\n"
11413+ "int $4\n0:\n"
11414+ _ASM_EXTABLE(0b, 0b)
11415+#endif
11416+
11417 : "+m" (l->a.counter));
11418 }
11419
11420 static inline void local_dec(local_t *l)
11421 {
11422- asm volatile(_ASM_DEC "%0"
11423+ asm volatile(_ASM_DEC "%0\n"
11424+
11425+#ifdef CONFIG_PAX_REFCOUNT
11426+ "jno 0f\n"
11427+ _ASM_INC "%0\n"
11428+ "int $4\n0:\n"
11429+ _ASM_EXTABLE(0b, 0b)
11430+#endif
11431+
11432 : "+m" (l->a.counter));
11433 }
11434
11435 static inline void local_add(long i, local_t *l)
11436 {
11437- asm volatile(_ASM_ADD "%1,%0"
11438+ asm volatile(_ASM_ADD "%1,%0\n"
11439+
11440+#ifdef CONFIG_PAX_REFCOUNT
11441+ "jno 0f\n"
11442+ _ASM_SUB "%1,%0\n"
11443+ "int $4\n0:\n"
11444+ _ASM_EXTABLE(0b, 0b)
11445+#endif
11446+
11447 : "+m" (l->a.counter)
11448 : "ir" (i));
11449 }
11450
11451 static inline void local_sub(long i, local_t *l)
11452 {
11453- asm volatile(_ASM_SUB "%1,%0"
11454+ asm volatile(_ASM_SUB "%1,%0\n"
11455+
11456+#ifdef CONFIG_PAX_REFCOUNT
11457+ "jno 0f\n"
11458+ _ASM_ADD "%1,%0\n"
11459+ "int $4\n0:\n"
11460+ _ASM_EXTABLE(0b, 0b)
11461+#endif
11462+
11463 : "+m" (l->a.counter)
11464 : "ir" (i));
11465 }
11466@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
11467 {
11468 unsigned char c;
11469
11470- asm volatile(_ASM_SUB "%2,%0; sete %1"
11471+ asm volatile(_ASM_SUB "%2,%0\n"
11472+
11473+#ifdef CONFIG_PAX_REFCOUNT
11474+ "jno 0f\n"
11475+ _ASM_ADD "%2,%0\n"
11476+ "int $4\n0:\n"
11477+ _ASM_EXTABLE(0b, 0b)
11478+#endif
11479+
11480+ "sete %1\n"
11481 : "+m" (l->a.counter), "=qm" (c)
11482 : "ir" (i) : "memory");
11483 return c;
11484@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
11485 {
11486 unsigned char c;
11487
11488- asm volatile(_ASM_DEC "%0; sete %1"
11489+ asm volatile(_ASM_DEC "%0\n"
11490+
11491+#ifdef CONFIG_PAX_REFCOUNT
11492+ "jno 0f\n"
11493+ _ASM_INC "%0\n"
11494+ "int $4\n0:\n"
11495+ _ASM_EXTABLE(0b, 0b)
11496+#endif
11497+
11498+ "sete %1\n"
11499 : "+m" (l->a.counter), "=qm" (c)
11500 : : "memory");
11501 return c != 0;
11502@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
11503 {
11504 unsigned char c;
11505
11506- asm volatile(_ASM_INC "%0; sete %1"
11507+ asm volatile(_ASM_INC "%0\n"
11508+
11509+#ifdef CONFIG_PAX_REFCOUNT
11510+ "jno 0f\n"
11511+ _ASM_DEC "%0\n"
11512+ "int $4\n0:\n"
11513+ _ASM_EXTABLE(0b, 0b)
11514+#endif
11515+
11516+ "sete %1\n"
11517 : "+m" (l->a.counter), "=qm" (c)
11518 : : "memory");
11519 return c != 0;
11520@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
11521 {
11522 unsigned char c;
11523
11524- asm volatile(_ASM_ADD "%2,%0; sets %1"
11525+ asm volatile(_ASM_ADD "%2,%0\n"
11526+
11527+#ifdef CONFIG_PAX_REFCOUNT
11528+ "jno 0f\n"
11529+ _ASM_SUB "%2,%0\n"
11530+ "int $4\n0:\n"
11531+ _ASM_EXTABLE(0b, 0b)
11532+#endif
11533+
11534+ "sets %1\n"
11535 : "+m" (l->a.counter), "=qm" (c)
11536 : "ir" (i) : "memory");
11537 return c;
11538@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
11539 #endif
11540 /* Modern 486+ processor */
11541 __i = i;
11542- asm volatile(_ASM_XADD "%0, %1;"
11543+ asm volatile(_ASM_XADD "%0, %1\n"
11544+
11545+#ifdef CONFIG_PAX_REFCOUNT
11546+ "jno 0f\n"
11547+ _ASM_MOV "%0,%1\n"
11548+ "int $4\n0:\n"
11549+ _ASM_EXTABLE(0b, 0b)
11550+#endif
11551+
11552 : "+r" (i), "+m" (l->a.counter)
11553 : : "memory");
11554 return i + __i;
11555diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
11556index ef51b50..514ba37 100644
11557--- a/arch/x86/include/asm/microcode.h
11558+++ b/arch/x86/include/asm/microcode.h
11559@@ -12,13 +12,13 @@ struct device;
11560 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
11561
11562 struct microcode_ops {
11563- enum ucode_state (*request_microcode_user) (int cpu,
11564+ enum ucode_state (* const request_microcode_user) (int cpu,
11565 const void __user *buf, size_t size);
11566
11567- enum ucode_state (*request_microcode_fw) (int cpu,
11568+ enum ucode_state (* const request_microcode_fw) (int cpu,
11569 struct device *device);
11570
11571- void (*microcode_fini_cpu) (int cpu);
11572+ void (* const microcode_fini_cpu) (int cpu);
11573
11574 /*
11575 * The generic 'microcode_core' part guarantees that
11576@@ -38,18 +38,18 @@ struct ucode_cpu_info {
11577 extern struct ucode_cpu_info ucode_cpu_info[];
11578
11579 #ifdef CONFIG_MICROCODE_INTEL
11580-extern struct microcode_ops * __init init_intel_microcode(void);
11581+extern const struct microcode_ops * __init init_intel_microcode(void);
11582 #else
11583-static inline struct microcode_ops * __init init_intel_microcode(void)
11584+static inline const struct microcode_ops * __init init_intel_microcode(void)
11585 {
11586 return NULL;
11587 }
11588 #endif /* CONFIG_MICROCODE_INTEL */
11589
11590 #ifdef CONFIG_MICROCODE_AMD
11591-extern struct microcode_ops * __init init_amd_microcode(void);
11592+extern const struct microcode_ops * __init init_amd_microcode(void);
11593 #else
11594-static inline struct microcode_ops * __init init_amd_microcode(void)
11595+static inline const struct microcode_ops * __init init_amd_microcode(void)
11596 {
11597 return NULL;
11598 }
11599diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
11600index 593e51d..fa69c9a 100644
11601--- a/arch/x86/include/asm/mman.h
11602+++ b/arch/x86/include/asm/mman.h
11603@@ -5,4 +5,14 @@
11604
11605 #include <asm-generic/mman.h>
11606
11607+#ifdef __KERNEL__
11608+#ifndef __ASSEMBLY__
11609+#ifdef CONFIG_X86_32
11610+#define arch_mmap_check i386_mmap_check
11611+int i386_mmap_check(unsigned long addr, unsigned long len,
11612+ unsigned long flags);
11613+#endif
11614+#endif
11615+#endif
11616+
11617 #endif /* _ASM_X86_MMAN_H */
11618diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
11619index 80a1dee..239c67d 100644
11620--- a/arch/x86/include/asm/mmu.h
11621+++ b/arch/x86/include/asm/mmu.h
11622@@ -9,10 +9,23 @@
11623 * we put the segment information here.
11624 */
11625 typedef struct {
11626- void *ldt;
11627+ struct desc_struct *ldt;
11628 int size;
11629 struct mutex lock;
11630- void *vdso;
11631+ unsigned long vdso;
11632+
11633+#ifdef CONFIG_X86_32
11634+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
11635+ unsigned long user_cs_base;
11636+ unsigned long user_cs_limit;
11637+
11638+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11639+ cpumask_t cpu_user_cs_mask;
11640+#endif
11641+
11642+#endif
11643+#endif
11644+
11645 } mm_context_t;
11646
11647 #ifdef CONFIG_SMP
11648diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
11649index 8b5393e..8143173 100644
11650--- a/arch/x86/include/asm/mmu_context.h
11651+++ b/arch/x86/include/asm/mmu_context.h
11652@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
11653
11654 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
11655 {
11656+
11657+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11658+ unsigned int i;
11659+ pgd_t *pgd;
11660+
11661+ pax_open_kernel();
11662+ pgd = get_cpu_pgd(smp_processor_id());
11663+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
11664+ set_pgd_batched(pgd+i, native_make_pgd(0));
11665+ pax_close_kernel();
11666+#endif
11667+
11668 #ifdef CONFIG_SMP
11669 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
11670 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
11671@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11672 struct task_struct *tsk)
11673 {
11674 unsigned cpu = smp_processor_id();
11675+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
11676+ int tlbstate = TLBSTATE_OK;
11677+#endif
11678
11679 if (likely(prev != next)) {
11680 #ifdef CONFIG_SMP
11681+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11682+ tlbstate = percpu_read(cpu_tlbstate.state);
11683+#endif
11684 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11685 percpu_write(cpu_tlbstate.active_mm, next);
11686 #endif
11687 cpumask_set_cpu(cpu, mm_cpumask(next));
11688
11689 /* Re-load page tables */
11690+#ifdef CONFIG_PAX_PER_CPU_PGD
11691+ pax_open_kernel();
11692+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11693+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11694+ pax_close_kernel();
11695+ load_cr3(get_cpu_pgd(cpu));
11696+#else
11697 load_cr3(next->pgd);
11698+#endif
11699
11700 /* stop flush ipis for the previous mm */
11701 cpumask_clear_cpu(cpu, mm_cpumask(prev));
11702@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11703 */
11704 if (unlikely(prev->context.ldt != next->context.ldt))
11705 load_LDT_nolock(&next->context);
11706- }
11707+
11708+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11709+ if (!nx_enabled) {
11710+ smp_mb__before_clear_bit();
11711+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11712+ smp_mb__after_clear_bit();
11713+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11714+ }
11715+#endif
11716+
11717+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11718+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11719+ prev->context.user_cs_limit != next->context.user_cs_limit))
11720+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11721 #ifdef CONFIG_SMP
11722+ else if (unlikely(tlbstate != TLBSTATE_OK))
11723+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11724+#endif
11725+#endif
11726+
11727+ }
11728 else {
11729+
11730+#ifdef CONFIG_PAX_PER_CPU_PGD
11731+ pax_open_kernel();
11732+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
11733+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
11734+ pax_close_kernel();
11735+ load_cr3(get_cpu_pgd(cpu));
11736+#endif
11737+
11738+#ifdef CONFIG_SMP
11739 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11740 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
11741
11742@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11743 * tlb flush IPI delivery. We must reload CR3
11744 * to make sure to use no freed page tables.
11745 */
11746+
11747+#ifndef CONFIG_PAX_PER_CPU_PGD
11748 load_cr3(next->pgd);
11749+#endif
11750+
11751 load_LDT_nolock(&next->context);
11752+
11753+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11754+ if (!nx_enabled)
11755+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11756+#endif
11757+
11758+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11759+#ifdef CONFIG_PAX_PAGEEXEC
11760+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
11761+#endif
11762+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11763+#endif
11764+
11765 }
11766+#endif
11767 }
11768-#endif
11769 }
11770
11771 #define activate_mm(prev, next) \
11772diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11773index 3e2ce58..caaf478 100644
11774--- a/arch/x86/include/asm/module.h
11775+++ b/arch/x86/include/asm/module.h
11776@@ -5,6 +5,7 @@
11777
11778 #ifdef CONFIG_X86_64
11779 /* X86_64 does not define MODULE_PROC_FAMILY */
11780+#define MODULE_PROC_FAMILY ""
11781 #elif defined CONFIG_M386
11782 #define MODULE_PROC_FAMILY "386 "
11783 #elif defined CONFIG_M486
11784@@ -59,13 +60,26 @@
11785 #error unknown processor family
11786 #endif
11787
11788-#ifdef CONFIG_X86_32
11789-# ifdef CONFIG_4KSTACKS
11790-# define MODULE_STACKSIZE "4KSTACKS "
11791-# else
11792-# define MODULE_STACKSIZE ""
11793-# endif
11794-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
11795+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
11796+#define MODULE_STACKSIZE "4KSTACKS "
11797+#else
11798+#define MODULE_STACKSIZE ""
11799 #endif
11800
11801+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11802+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11803+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11804+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11805+#else
11806+#define MODULE_PAX_KERNEXEC ""
11807+#endif
11808+
11809+#ifdef CONFIG_PAX_MEMORY_UDEREF
11810+#define MODULE_PAX_UDEREF "UDEREF "
11811+#else
11812+#define MODULE_PAX_UDEREF ""
11813+#endif
11814+
11815+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11816+
11817 #endif /* _ASM_X86_MODULE_H */
11818diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11819index 7639dbf..e08a58c 100644
11820--- a/arch/x86/include/asm/page_64_types.h
11821+++ b/arch/x86/include/asm/page_64_types.h
11822@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11823
11824 /* duplicated to the one in bootmem.h */
11825 extern unsigned long max_pfn;
11826-extern unsigned long phys_base;
11827+extern const unsigned long phys_base;
11828
11829 extern unsigned long __phys_addr(unsigned long);
11830 #define __phys_reloc_hide(x) (x)
11831diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11832index efb3899..ef30687 100644
11833--- a/arch/x86/include/asm/paravirt.h
11834+++ b/arch/x86/include/asm/paravirt.h
11835@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11836 val);
11837 }
11838
11839+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11840+{
11841+ pgdval_t val = native_pgd_val(pgd);
11842+
11843+ if (sizeof(pgdval_t) > sizeof(long))
11844+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11845+ val, (u64)val >> 32);
11846+ else
11847+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11848+ val);
11849+}
11850+
11851 static inline void pgd_clear(pgd_t *pgdp)
11852 {
11853 set_pgd(pgdp, __pgd(0));
11854@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11855 pv_mmu_ops.set_fixmap(idx, phys, flags);
11856 }
11857
11858+#ifdef CONFIG_PAX_KERNEXEC
11859+static inline unsigned long pax_open_kernel(void)
11860+{
11861+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11862+}
11863+
11864+static inline unsigned long pax_close_kernel(void)
11865+{
11866+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11867+}
11868+#else
11869+static inline unsigned long pax_open_kernel(void) { return 0; }
11870+static inline unsigned long pax_close_kernel(void) { return 0; }
11871+#endif
11872+
11873 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11874
11875 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
11876@@ -945,7 +972,7 @@ extern void default_banner(void);
11877
11878 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11879 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11880-#define PARA_INDIRECT(addr) *%cs:addr
11881+#define PARA_INDIRECT(addr) *%ss:addr
11882 #endif
11883
11884 #define INTERRUPT_RETURN \
11885@@ -1022,6 +1049,21 @@ extern void default_banner(void);
11886 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11887 CLBR_NONE, \
11888 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11889+
11890+#define GET_CR0_INTO_RDI \
11891+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11892+ mov %rax,%rdi
11893+
11894+#define SET_RDI_INTO_CR0 \
11895+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11896+
11897+#define GET_CR3_INTO_RDI \
11898+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11899+ mov %rax,%rdi
11900+
11901+#define SET_RDI_INTO_CR3 \
11902+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11903+
11904 #endif /* CONFIG_X86_32 */
11905
11906 #endif /* __ASSEMBLY__ */
11907diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11908index 9357473..aeb2de5 100644
11909--- a/arch/x86/include/asm/paravirt_types.h
11910+++ b/arch/x86/include/asm/paravirt_types.h
11911@@ -78,19 +78,19 @@ struct pv_init_ops {
11912 */
11913 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11914 unsigned long addr, unsigned len);
11915-};
11916+} __no_const;
11917
11918
11919 struct pv_lazy_ops {
11920 /* Set deferred update mode, used for batching operations. */
11921 void (*enter)(void);
11922 void (*leave)(void);
11923-};
11924+} __no_const;
11925
11926 struct pv_time_ops {
11927 unsigned long long (*sched_clock)(void);
11928 unsigned long (*get_tsc_khz)(void);
11929-};
11930+} __no_const;
11931
11932 struct pv_cpu_ops {
11933 /* hooks for various privileged instructions */
11934@@ -186,7 +186,7 @@ struct pv_cpu_ops {
11935
11936 void (*start_context_switch)(struct task_struct *prev);
11937 void (*end_context_switch)(struct task_struct *next);
11938-};
11939+} __no_const;
11940
11941 struct pv_irq_ops {
11942 /*
11943@@ -217,7 +217,7 @@ struct pv_apic_ops {
11944 unsigned long start_eip,
11945 unsigned long start_esp);
11946 #endif
11947-};
11948+} __no_const;
11949
11950 struct pv_mmu_ops {
11951 unsigned long (*read_cr2)(void);
11952@@ -301,6 +301,7 @@ struct pv_mmu_ops {
11953 struct paravirt_callee_save make_pud;
11954
11955 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11956+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11957 #endif /* PAGETABLE_LEVELS == 4 */
11958 #endif /* PAGETABLE_LEVELS >= 3 */
11959
11960@@ -316,6 +317,12 @@ struct pv_mmu_ops {
11961 an mfn. We can tell which is which from the index. */
11962 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11963 phys_addr_t phys, pgprot_t flags);
11964+
11965+#ifdef CONFIG_PAX_KERNEXEC
11966+ unsigned long (*pax_open_kernel)(void);
11967+ unsigned long (*pax_close_kernel)(void);
11968+#endif
11969+
11970 };
11971
11972 struct raw_spinlock;
11973@@ -326,7 +333,7 @@ struct pv_lock_ops {
11974 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
11975 int (*spin_trylock)(struct raw_spinlock *lock);
11976 void (*spin_unlock)(struct raw_spinlock *lock);
11977-};
11978+} __no_const;
11979
11980 /* This contains all the paravirt structures: we get a convenient
11981 * number for each function using the offset which we use to indicate
11982diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
11983index b399988..3f47c38 100644
11984--- a/arch/x86/include/asm/pci_x86.h
11985+++ b/arch/x86/include/asm/pci_x86.h
11986@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
11987 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
11988
11989 struct pci_raw_ops {
11990- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11991+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
11992 int reg, int len, u32 *val);
11993- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11994+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
11995 int reg, int len, u32 val);
11996 };
11997
11998-extern struct pci_raw_ops *raw_pci_ops;
11999-extern struct pci_raw_ops *raw_pci_ext_ops;
12000+extern const struct pci_raw_ops *raw_pci_ops;
12001+extern const struct pci_raw_ops *raw_pci_ext_ops;
12002
12003-extern struct pci_raw_ops pci_direct_conf1;
12004+extern const struct pci_raw_ops pci_direct_conf1;
12005 extern bool port_cf9_safe;
12006
12007 /* arch_initcall level */
12008diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
12009index b65a36d..50345a4 100644
12010--- a/arch/x86/include/asm/percpu.h
12011+++ b/arch/x86/include/asm/percpu.h
12012@@ -78,6 +78,7 @@ do { \
12013 if (0) { \
12014 T__ tmp__; \
12015 tmp__ = (val); \
12016+ (void)tmp__; \
12017 } \
12018 switch (sizeof(var)) { \
12019 case 1: \
12020diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
12021index 271de94..ef944d6 100644
12022--- a/arch/x86/include/asm/pgalloc.h
12023+++ b/arch/x86/include/asm/pgalloc.h
12024@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
12025 pmd_t *pmd, pte_t *pte)
12026 {
12027 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12028+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
12029+}
12030+
12031+static inline void pmd_populate_user(struct mm_struct *mm,
12032+ pmd_t *pmd, pte_t *pte)
12033+{
12034+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
12035 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
12036 }
12037
12038diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
12039index 2334982..70bc412 100644
12040--- a/arch/x86/include/asm/pgtable-2level.h
12041+++ b/arch/x86/include/asm/pgtable-2level.h
12042@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
12043
12044 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12045 {
12046+ pax_open_kernel();
12047 *pmdp = pmd;
12048+ pax_close_kernel();
12049 }
12050
12051 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12052diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
12053index 33927d2..ccde329 100644
12054--- a/arch/x86/include/asm/pgtable-3level.h
12055+++ b/arch/x86/include/asm/pgtable-3level.h
12056@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
12057
12058 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12059 {
12060+ pax_open_kernel();
12061 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
12062+ pax_close_kernel();
12063 }
12064
12065 static inline void native_set_pud(pud_t *pudp, pud_t pud)
12066 {
12067+ pax_open_kernel();
12068 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
12069+ pax_close_kernel();
12070 }
12071
12072 /*
12073diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
12074index af6fd36..867ff74 100644
12075--- a/arch/x86/include/asm/pgtable.h
12076+++ b/arch/x86/include/asm/pgtable.h
12077@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
12078
12079 #ifndef __PAGETABLE_PUD_FOLDED
12080 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
12081+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
12082 #define pgd_clear(pgd) native_pgd_clear(pgd)
12083 #endif
12084
12085@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
12086
12087 #define arch_end_context_switch(prev) do {} while(0)
12088
12089+#define pax_open_kernel() native_pax_open_kernel()
12090+#define pax_close_kernel() native_pax_close_kernel()
12091 #endif /* CONFIG_PARAVIRT */
12092
12093+#define __HAVE_ARCH_PAX_OPEN_KERNEL
12094+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
12095+
12096+#ifdef CONFIG_PAX_KERNEXEC
12097+static inline unsigned long native_pax_open_kernel(void)
12098+{
12099+ unsigned long cr0;
12100+
12101+ preempt_disable();
12102+ barrier();
12103+ cr0 = read_cr0() ^ X86_CR0_WP;
12104+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
12105+ write_cr0(cr0);
12106+ return cr0 ^ X86_CR0_WP;
12107+}
12108+
12109+static inline unsigned long native_pax_close_kernel(void)
12110+{
12111+ unsigned long cr0;
12112+
12113+ cr0 = read_cr0() ^ X86_CR0_WP;
12114+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
12115+ write_cr0(cr0);
12116+ barrier();
12117+ preempt_enable_no_resched();
12118+ return cr0 ^ X86_CR0_WP;
12119+}
12120+#else
12121+static inline unsigned long native_pax_open_kernel(void) { return 0; }
12122+static inline unsigned long native_pax_close_kernel(void) { return 0; }
12123+#endif
12124+
12125 /*
12126 * The following only work if pte_present() is true.
12127 * Undefined behaviour if not..
12128 */
12129+static inline int pte_user(pte_t pte)
12130+{
12131+ return pte_val(pte) & _PAGE_USER;
12132+}
12133+
12134 static inline int pte_dirty(pte_t pte)
12135 {
12136 return pte_flags(pte) & _PAGE_DIRTY;
12137@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
12138 return pte_clear_flags(pte, _PAGE_RW);
12139 }
12140
12141+static inline pte_t pte_mkread(pte_t pte)
12142+{
12143+ return __pte(pte_val(pte) | _PAGE_USER);
12144+}
12145+
12146 static inline pte_t pte_mkexec(pte_t pte)
12147 {
12148- return pte_clear_flags(pte, _PAGE_NX);
12149+#ifdef CONFIG_X86_PAE
12150+ if (__supported_pte_mask & _PAGE_NX)
12151+ return pte_clear_flags(pte, _PAGE_NX);
12152+ else
12153+#endif
12154+ return pte_set_flags(pte, _PAGE_USER);
12155+}
12156+
12157+static inline pte_t pte_exprotect(pte_t pte)
12158+{
12159+#ifdef CONFIG_X86_PAE
12160+ if (__supported_pte_mask & _PAGE_NX)
12161+ return pte_set_flags(pte, _PAGE_NX);
12162+ else
12163+#endif
12164+ return pte_clear_flags(pte, _PAGE_USER);
12165 }
12166
12167 static inline pte_t pte_mkdirty(pte_t pte)
12168@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
12169 #endif
12170
12171 #ifndef __ASSEMBLY__
12172+
12173+#ifdef CONFIG_PAX_PER_CPU_PGD
12174+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
12175+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
12176+{
12177+ return cpu_pgd[cpu];
12178+}
12179+#endif
12180+
12181 #include <linux/mm_types.h>
12182
12183 static inline int pte_none(pte_t pte)
12184@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
12185
12186 static inline int pgd_bad(pgd_t pgd)
12187 {
12188- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
12189+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
12190 }
12191
12192 static inline int pgd_none(pgd_t pgd)
12193@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
12194 * pgd_offset() returns a (pgd_t *)
12195 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
12196 */
12197-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
12198+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
12199+
12200+#ifdef CONFIG_PAX_PER_CPU_PGD
12201+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
12202+#endif
12203+
12204 /*
12205 * a shortcut which implies the use of the kernel's pgd, instead
12206 * of a process's
12207@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
12208 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
12209 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
12210
12211+#ifdef CONFIG_X86_32
12212+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
12213+#else
12214+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
12215+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
12216+
12217+#ifdef CONFIG_PAX_MEMORY_UDEREF
12218+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
12219+#else
12220+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
12221+#endif
12222+
12223+#endif
12224+
12225 #ifndef __ASSEMBLY__
12226
12227 extern int direct_gbpages;
12228@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
12229 * dst and src can be on the same page, but the range must not overlap,
12230 * and must not cross a page boundary.
12231 */
12232-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
12233+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
12234 {
12235- memcpy(dst, src, count * sizeof(pgd_t));
12236+ pax_open_kernel();
12237+ while (count--)
12238+ *dst++ = *src++;
12239+ pax_close_kernel();
12240 }
12241
12242+#ifdef CONFIG_PAX_PER_CPU_PGD
12243+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
12244+#endif
12245+
12246+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12247+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
12248+#else
12249+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
12250+#endif
12251
12252 #include <asm-generic/pgtable.h>
12253 #endif /* __ASSEMBLY__ */
12254diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
12255index 750f1bf..971e839 100644
12256--- a/arch/x86/include/asm/pgtable_32.h
12257+++ b/arch/x86/include/asm/pgtable_32.h
12258@@ -26,9 +26,6 @@
12259 struct mm_struct;
12260 struct vm_area_struct;
12261
12262-extern pgd_t swapper_pg_dir[1024];
12263-extern pgd_t trampoline_pg_dir[1024];
12264-
12265 static inline void pgtable_cache_init(void) { }
12266 static inline void check_pgt_cache(void) { }
12267 void paging_init(void);
12268@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12269 # include <asm/pgtable-2level.h>
12270 #endif
12271
12272+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
12273+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
12274+#ifdef CONFIG_X86_PAE
12275+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
12276+#endif
12277+
12278 #if defined(CONFIG_HIGHPTE)
12279 #define __KM_PTE \
12280 (in_nmi() ? KM_NMI_PTE : \
12281@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
12282 /* Clear a kernel PTE and flush it from the TLB */
12283 #define kpte_clear_flush(ptep, vaddr) \
12284 do { \
12285+ pax_open_kernel(); \
12286 pte_clear(&init_mm, (vaddr), (ptep)); \
12287+ pax_close_kernel(); \
12288 __flush_tlb_one((vaddr)); \
12289 } while (0)
12290
12291@@ -85,6 +90,9 @@ do { \
12292
12293 #endif /* !__ASSEMBLY__ */
12294
12295+#define HAVE_ARCH_UNMAPPED_AREA
12296+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
12297+
12298 /*
12299 * kern_addr_valid() is (1) for FLATMEM and (0) for
12300 * SPARSEMEM and DISCONTIGMEM
12301diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
12302index 5e67c15..12d5c47 100644
12303--- a/arch/x86/include/asm/pgtable_32_types.h
12304+++ b/arch/x86/include/asm/pgtable_32_types.h
12305@@ -8,7 +8,7 @@
12306 */
12307 #ifdef CONFIG_X86_PAE
12308 # include <asm/pgtable-3level_types.h>
12309-# define PMD_SIZE (1UL << PMD_SHIFT)
12310+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
12311 # define PMD_MASK (~(PMD_SIZE - 1))
12312 #else
12313 # include <asm/pgtable-2level_types.h>
12314@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
12315 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
12316 #endif
12317
12318+#ifdef CONFIG_PAX_KERNEXEC
12319+#ifndef __ASSEMBLY__
12320+extern unsigned char MODULES_EXEC_VADDR[];
12321+extern unsigned char MODULES_EXEC_END[];
12322+#endif
12323+#include <asm/boot.h>
12324+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
12325+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
12326+#else
12327+#define ktla_ktva(addr) (addr)
12328+#define ktva_ktla(addr) (addr)
12329+#endif
12330+
12331 #define MODULES_VADDR VMALLOC_START
12332 #define MODULES_END VMALLOC_END
12333 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
12334diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
12335index c57a301..6b414ff 100644
12336--- a/arch/x86/include/asm/pgtable_64.h
12337+++ b/arch/x86/include/asm/pgtable_64.h
12338@@ -16,10 +16,14 @@
12339
12340 extern pud_t level3_kernel_pgt[512];
12341 extern pud_t level3_ident_pgt[512];
12342+extern pud_t level3_vmalloc_start_pgt[512];
12343+extern pud_t level3_vmalloc_end_pgt[512];
12344+extern pud_t level3_vmemmap_pgt[512];
12345+extern pud_t level2_vmemmap_pgt[512];
12346 extern pmd_t level2_kernel_pgt[512];
12347 extern pmd_t level2_fixmap_pgt[512];
12348-extern pmd_t level2_ident_pgt[512];
12349-extern pgd_t init_level4_pgt[];
12350+extern pmd_t level2_ident_pgt[512*2];
12351+extern pgd_t init_level4_pgt[512];
12352
12353 #define swapper_pg_dir init_level4_pgt
12354
12355@@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
12356
12357 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
12358 {
12359+ pax_open_kernel();
12360 *pmdp = pmd;
12361+ pax_close_kernel();
12362 }
12363
12364 static inline void native_pmd_clear(pmd_t *pmd)
12365@@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
12366
12367 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
12368 {
12369+ pax_open_kernel();
12370+ *pgdp = pgd;
12371+ pax_close_kernel();
12372+}
12373+
12374+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
12375+{
12376 *pgdp = pgd;
12377 }
12378
12379diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
12380index 766ea16..5b96cb3 100644
12381--- a/arch/x86/include/asm/pgtable_64_types.h
12382+++ b/arch/x86/include/asm/pgtable_64_types.h
12383@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
12384 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
12385 #define MODULES_END _AC(0xffffffffff000000, UL)
12386 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
12387+#define MODULES_EXEC_VADDR MODULES_VADDR
12388+#define MODULES_EXEC_END MODULES_END
12389+
12390+#define ktla_ktva(addr) (addr)
12391+#define ktva_ktla(addr) (addr)
12392
12393 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
12394diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
12395index d1f4a76..2f46ba1 100644
12396--- a/arch/x86/include/asm/pgtable_types.h
12397+++ b/arch/x86/include/asm/pgtable_types.h
12398@@ -16,12 +16,11 @@
12399 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
12400 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
12401 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
12402-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
12403+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
12404 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
12405 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
12406 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
12407-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
12408-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
12409+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
12410 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
12411
12412 /* If _PAGE_BIT_PRESENT is clear, we use these: */
12413@@ -39,7 +38,6 @@
12414 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
12415 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
12416 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
12417-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
12418 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
12419 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
12420 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
12421@@ -55,8 +53,10 @@
12422
12423 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
12424 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
12425-#else
12426+#elif defined(CONFIG_KMEMCHECK)
12427 #define _PAGE_NX (_AT(pteval_t, 0))
12428+#else
12429+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
12430 #endif
12431
12432 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
12433@@ -93,6 +93,9 @@
12434 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
12435 _PAGE_ACCESSED)
12436
12437+#define PAGE_READONLY_NOEXEC PAGE_READONLY
12438+#define PAGE_SHARED_NOEXEC PAGE_SHARED
12439+
12440 #define __PAGE_KERNEL_EXEC \
12441 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
12442 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
12443@@ -103,8 +106,8 @@
12444 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
12445 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
12446 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
12447-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
12448-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
12449+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
12450+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
12451 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
12452 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
12453 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
12454@@ -163,8 +166,8 @@
12455 * bits are combined, this will alow user to access the high address mapped
12456 * VDSO in the presence of CONFIG_COMPAT_VDSO
12457 */
12458-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
12459-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
12460+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12461+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
12462 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
12463 #endif
12464
12465@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
12466 {
12467 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
12468 }
12469+#endif
12470
12471+#if PAGETABLE_LEVELS == 3
12472+#include <asm-generic/pgtable-nopud.h>
12473+#endif
12474+
12475+#if PAGETABLE_LEVELS == 2
12476+#include <asm-generic/pgtable-nopmd.h>
12477+#endif
12478+
12479+#ifndef __ASSEMBLY__
12480 #if PAGETABLE_LEVELS > 3
12481 typedef struct { pudval_t pud; } pud_t;
12482
12483@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
12484 return pud.pud;
12485 }
12486 #else
12487-#include <asm-generic/pgtable-nopud.h>
12488-
12489 static inline pudval_t native_pud_val(pud_t pud)
12490 {
12491 return native_pgd_val(pud.pgd);
12492@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
12493 return pmd.pmd;
12494 }
12495 #else
12496-#include <asm-generic/pgtable-nopmd.h>
12497-
12498 static inline pmdval_t native_pmd_val(pmd_t pmd)
12499 {
12500 return native_pgd_val(pmd.pud.pgd);
12501@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
12502
12503 extern pteval_t __supported_pte_mask;
12504 extern void set_nx(void);
12505+
12506+#ifdef CONFIG_X86_32
12507+#ifdef CONFIG_X86_PAE
12508 extern int nx_enabled;
12509+#else
12510+#define nx_enabled (0)
12511+#endif
12512+#else
12513+#define nx_enabled (1)
12514+#endif
12515
12516 #define pgprot_writecombine pgprot_writecombine
12517 extern pgprot_t pgprot_writecombine(pgprot_t prot);
12518diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
12519index fa04dea..5f823fc 100644
12520--- a/arch/x86/include/asm/processor.h
12521+++ b/arch/x86/include/asm/processor.h
12522@@ -272,7 +272,7 @@ struct tss_struct {
12523
12524 } ____cacheline_aligned;
12525
12526-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
12527+extern struct tss_struct init_tss[NR_CPUS];
12528
12529 /*
12530 * Save the original ist values for checking stack pointers during debugging
12531@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
12532 */
12533 #define TASK_SIZE PAGE_OFFSET
12534 #define TASK_SIZE_MAX TASK_SIZE
12535+
12536+#ifdef CONFIG_PAX_SEGMEXEC
12537+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
12538+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
12539+#else
12540 #define STACK_TOP TASK_SIZE
12541-#define STACK_TOP_MAX STACK_TOP
12542+#endif
12543+
12544+#define STACK_TOP_MAX TASK_SIZE
12545
12546 #define INIT_THREAD { \
12547- .sp0 = sizeof(init_stack) + (long)&init_stack, \
12548+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12549 .vm86_info = NULL, \
12550 .sysenter_cs = __KERNEL_CS, \
12551 .io_bitmap_ptr = NULL, \
12552@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
12553 */
12554 #define INIT_TSS { \
12555 .x86_tss = { \
12556- .sp0 = sizeof(init_stack) + (long)&init_stack, \
12557+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
12558 .ss0 = __KERNEL_DS, \
12559 .ss1 = __KERNEL_CS, \
12560 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
12561@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
12562 extern unsigned long thread_saved_pc(struct task_struct *tsk);
12563
12564 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
12565-#define KSTK_TOP(info) \
12566-({ \
12567- unsigned long *__ptr = (unsigned long *)(info); \
12568- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
12569-})
12570+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
12571
12572 /*
12573 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
12574@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12575 #define task_pt_regs(task) \
12576 ({ \
12577 struct pt_regs *__regs__; \
12578- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
12579+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
12580 __regs__ - 1; \
12581 })
12582
12583@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12584 /*
12585 * User space process size. 47bits minus one guard page.
12586 */
12587-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
12588+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
12589
12590 /* This decides where the kernel will search for a free chunk of vm
12591 * space during mmap's.
12592 */
12593 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
12594- 0xc0000000 : 0xFFFFe000)
12595+ 0xc0000000 : 0xFFFFf000)
12596
12597 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
12598 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
12599@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
12600 #define STACK_TOP_MAX TASK_SIZE_MAX
12601
12602 #define INIT_THREAD { \
12603- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12604+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12605 }
12606
12607 #define INIT_TSS { \
12608- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
12609+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
12610 }
12611
12612 /*
12613@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
12614 */
12615 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
12616
12617+#ifdef CONFIG_PAX_SEGMEXEC
12618+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
12619+#endif
12620+
12621 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
12622
12623 /* Get/set a process' ability to use the timestamp counter instruction */
12624diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
12625index 0f0d908..f2e3da2 100644
12626--- a/arch/x86/include/asm/ptrace.h
12627+++ b/arch/x86/include/asm/ptrace.h
12628@@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
12629 }
12630
12631 /*
12632- * user_mode_vm(regs) determines whether a register set came from user mode.
12633+ * user_mode(regs) determines whether a register set came from user mode.
12634 * This is true if V8086 mode was enabled OR if the register set was from
12635 * protected mode with RPL-3 CS value. This tricky test checks that with
12636 * one comparison. Many places in the kernel can bypass this full check
12637- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
12638+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
12639+ * be used.
12640 */
12641-static inline int user_mode(struct pt_regs *regs)
12642+static inline int user_mode_novm(struct pt_regs *regs)
12643 {
12644 #ifdef CONFIG_X86_32
12645 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
12646 #else
12647- return !!(regs->cs & 3);
12648+ return !!(regs->cs & SEGMENT_RPL_MASK);
12649 #endif
12650 }
12651
12652-static inline int user_mode_vm(struct pt_regs *regs)
12653+static inline int user_mode(struct pt_regs *regs)
12654 {
12655 #ifdef CONFIG_X86_32
12656 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
12657 USER_RPL;
12658 #else
12659- return user_mode(regs);
12660+ return user_mode_novm(regs);
12661 #endif
12662 }
12663
12664diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12665index 562d4fd..6e39df1 100644
12666--- a/arch/x86/include/asm/reboot.h
12667+++ b/arch/x86/include/asm/reboot.h
12668@@ -6,19 +6,19 @@
12669 struct pt_regs;
12670
12671 struct machine_ops {
12672- void (*restart)(char *cmd);
12673- void (*halt)(void);
12674- void (*power_off)(void);
12675+ void (* __noreturn restart)(char *cmd);
12676+ void (* __noreturn halt)(void);
12677+ void (* __noreturn power_off)(void);
12678 void (*shutdown)(void);
12679 void (*crash_shutdown)(struct pt_regs *);
12680- void (*emergency_restart)(void);
12681-};
12682+ void (* __noreturn emergency_restart)(void);
12683+} __no_const;
12684
12685 extern struct machine_ops machine_ops;
12686
12687 void native_machine_crash_shutdown(struct pt_regs *regs);
12688 void native_machine_shutdown(void);
12689-void machine_real_restart(const unsigned char *code, int length);
12690+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
12691
12692 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
12693 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
12694diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12695index 606ede1..dbfff37 100644
12696--- a/arch/x86/include/asm/rwsem.h
12697+++ b/arch/x86/include/asm/rwsem.h
12698@@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12699 {
12700 asm volatile("# beginning down_read\n\t"
12701 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12702+
12703+#ifdef CONFIG_PAX_REFCOUNT
12704+ "jno 0f\n"
12705+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
12706+ "int $4\n0:\n"
12707+ _ASM_EXTABLE(0b, 0b)
12708+#endif
12709+
12710 /* adds 0x00000001, returns the old value */
12711 " jns 1f\n"
12712 " call call_rwsem_down_read_failed\n"
12713@@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12714 "1:\n\t"
12715 " mov %1,%2\n\t"
12716 " add %3,%2\n\t"
12717+
12718+#ifdef CONFIG_PAX_REFCOUNT
12719+ "jno 0f\n"
12720+ "sub %3,%2\n"
12721+ "int $4\n0:\n"
12722+ _ASM_EXTABLE(0b, 0b)
12723+#endif
12724+
12725 " jle 2f\n\t"
12726 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12727 " jnz 1b\n\t"
12728@@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12729 tmp = RWSEM_ACTIVE_WRITE_BIAS;
12730 asm volatile("# beginning down_write\n\t"
12731 LOCK_PREFIX " xadd %1,(%2)\n\t"
12732+
12733+#ifdef CONFIG_PAX_REFCOUNT
12734+ "jno 0f\n"
12735+ "mov %1,(%2)\n"
12736+ "int $4\n0:\n"
12737+ _ASM_EXTABLE(0b, 0b)
12738+#endif
12739+
12740 /* subtract 0x0000ffff, returns the old value */
12741 " test %1,%1\n\t"
12742 /* was the count 0 before? */
12743@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12744 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
12745 asm volatile("# beginning __up_read\n\t"
12746 LOCK_PREFIX " xadd %1,(%2)\n\t"
12747+
12748+#ifdef CONFIG_PAX_REFCOUNT
12749+ "jno 0f\n"
12750+ "mov %1,(%2)\n"
12751+ "int $4\n0:\n"
12752+ _ASM_EXTABLE(0b, 0b)
12753+#endif
12754+
12755 /* subtracts 1, returns the old value */
12756 " jns 1f\n\t"
12757 " call call_rwsem_wake\n"
12758@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12759 rwsem_count_t tmp;
12760 asm volatile("# beginning __up_write\n\t"
12761 LOCK_PREFIX " xadd %1,(%2)\n\t"
12762+
12763+#ifdef CONFIG_PAX_REFCOUNT
12764+ "jno 0f\n"
12765+ "mov %1,(%2)\n"
12766+ "int $4\n0:\n"
12767+ _ASM_EXTABLE(0b, 0b)
12768+#endif
12769+
12770 /* tries to transition
12771 0xffff0001 -> 0x00000000 */
12772 " jz 1f\n"
12773@@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12774 {
12775 asm volatile("# beginning __downgrade_write\n\t"
12776 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12777+
12778+#ifdef CONFIG_PAX_REFCOUNT
12779+ "jno 0f\n"
12780+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12781+ "int $4\n0:\n"
12782+ _ASM_EXTABLE(0b, 0b)
12783+#endif
12784+
12785 /*
12786 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12787 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12788@@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12789 static inline void rwsem_atomic_add(rwsem_count_t delta,
12790 struct rw_semaphore *sem)
12791 {
12792- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12793+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12794+
12795+#ifdef CONFIG_PAX_REFCOUNT
12796+ "jno 0f\n"
12797+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
12798+ "int $4\n0:\n"
12799+ _ASM_EXTABLE(0b, 0b)
12800+#endif
12801+
12802 : "+m" (sem->count)
12803 : "er" (delta));
12804 }
12805@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
12806 {
12807 rwsem_count_t tmp = delta;
12808
12809- asm volatile(LOCK_PREFIX "xadd %0,%1"
12810+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
12811+
12812+#ifdef CONFIG_PAX_REFCOUNT
12813+ "jno 0f\n"
12814+ "mov %0,%1\n"
12815+ "int $4\n0:\n"
12816+ _ASM_EXTABLE(0b, 0b)
12817+#endif
12818+
12819 : "+r" (tmp), "+m" (sem->count)
12820 : : "memory");
12821
12822diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12823index 14e0ed8..7f7dd5e 100644
12824--- a/arch/x86/include/asm/segment.h
12825+++ b/arch/x86/include/asm/segment.h
12826@@ -62,10 +62,15 @@
12827 * 26 - ESPFIX small SS
12828 * 27 - per-cpu [ offset to per-cpu data area ]
12829 * 28 - stack_canary-20 [ for stack protector ]
12830- * 29 - unused
12831- * 30 - unused
12832+ * 29 - PCI BIOS CS
12833+ * 30 - PCI BIOS DS
12834 * 31 - TSS for double fault handler
12835 */
12836+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12837+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12838+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12839+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12840+
12841 #define GDT_ENTRY_TLS_MIN 6
12842 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12843
12844@@ -77,6 +82,8 @@
12845
12846 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
12847
12848+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12849+
12850 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
12851
12852 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
12853@@ -88,7 +95,7 @@
12854 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
12855 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
12856
12857-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12858+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
12859 #ifdef CONFIG_SMP
12860 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
12861 #else
12862@@ -102,6 +109,12 @@
12863 #define __KERNEL_STACK_CANARY 0
12864 #endif
12865
12866+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
12867+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12868+
12869+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
12870+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12871+
12872 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12873
12874 /*
12875@@ -139,7 +152,7 @@
12876 */
12877
12878 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12879-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12880+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12881
12882
12883 #else
12884@@ -163,6 +176,8 @@
12885 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
12886 #define __USER32_DS __USER_DS
12887
12888+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12889+
12890 #define GDT_ENTRY_TSS 8 /* needs two entries */
12891 #define GDT_ENTRY_LDT 10 /* needs two entries */
12892 #define GDT_ENTRY_TLS_MIN 12
12893@@ -183,6 +198,7 @@
12894 #endif
12895
12896 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
12897+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
12898 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
12899 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
12900 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
12901diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12902index 4c2f63c..5685db2 100644
12903--- a/arch/x86/include/asm/smp.h
12904+++ b/arch/x86/include/asm/smp.h
12905@@ -24,7 +24,7 @@ extern unsigned int num_processors;
12906 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
12907 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12908 DECLARE_PER_CPU(u16, cpu_llc_id);
12909-DECLARE_PER_CPU(int, cpu_number);
12910+DECLARE_PER_CPU(unsigned int, cpu_number);
12911
12912 static inline struct cpumask *cpu_sibling_mask(int cpu)
12913 {
12914@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
12915 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
12916
12917 /* Static state in head.S used to set up a CPU */
12918-extern struct {
12919- void *sp;
12920- unsigned short ss;
12921-} stack_start;
12922+extern unsigned long stack_start; /* Initial stack pointer address */
12923
12924 struct smp_ops {
12925 void (*smp_prepare_boot_cpu)(void);
12926@@ -60,7 +57,7 @@ struct smp_ops {
12927
12928 void (*send_call_func_ipi)(const struct cpumask *mask);
12929 void (*send_call_func_single_ipi)(int cpu);
12930-};
12931+} __no_const;
12932
12933 /* Globals due to paravirt */
12934 extern void set_cpu_sibling_map(int cpu);
12935@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12936 extern int safe_smp_processor_id(void);
12937
12938 #elif defined(CONFIG_X86_64_SMP)
12939-#define raw_smp_processor_id() (percpu_read(cpu_number))
12940-
12941-#define stack_smp_processor_id() \
12942-({ \
12943- struct thread_info *ti; \
12944- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12945- ti->cpu; \
12946-})
12947+#define raw_smp_processor_id() (percpu_read(cpu_number))
12948+#define stack_smp_processor_id() raw_smp_processor_id()
12949 #define safe_smp_processor_id() smp_processor_id()
12950
12951 #endif
12952diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12953index 4e77853..4359783 100644
12954--- a/arch/x86/include/asm/spinlock.h
12955+++ b/arch/x86/include/asm/spinlock.h
12956@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
12957 static inline void __raw_read_lock(raw_rwlock_t *rw)
12958 {
12959 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
12960+
12961+#ifdef CONFIG_PAX_REFCOUNT
12962+ "jno 0f\n"
12963+ LOCK_PREFIX " addl $1,(%0)\n"
12964+ "int $4\n0:\n"
12965+ _ASM_EXTABLE(0b, 0b)
12966+#endif
12967+
12968 "jns 1f\n"
12969 "call __read_lock_failed\n\t"
12970 "1:\n"
12971@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
12972 static inline void __raw_write_lock(raw_rwlock_t *rw)
12973 {
12974 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
12975+
12976+#ifdef CONFIG_PAX_REFCOUNT
12977+ "jno 0f\n"
12978+ LOCK_PREFIX " addl %1,(%0)\n"
12979+ "int $4\n0:\n"
12980+ _ASM_EXTABLE(0b, 0b)
12981+#endif
12982+
12983 "jz 1f\n"
12984 "call __write_lock_failed\n\t"
12985 "1:\n"
12986@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
12987
12988 static inline void __raw_read_unlock(raw_rwlock_t *rw)
12989 {
12990- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
12991+ asm volatile(LOCK_PREFIX "incl %0\n"
12992+
12993+#ifdef CONFIG_PAX_REFCOUNT
12994+ "jno 0f\n"
12995+ LOCK_PREFIX "decl %0\n"
12996+ "int $4\n0:\n"
12997+ _ASM_EXTABLE(0b, 0b)
12998+#endif
12999+
13000+ :"+m" (rw->lock) : : "memory");
13001 }
13002
13003 static inline void __raw_write_unlock(raw_rwlock_t *rw)
13004 {
13005- asm volatile(LOCK_PREFIX "addl %1, %0"
13006+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
13007+
13008+#ifdef CONFIG_PAX_REFCOUNT
13009+ "jno 0f\n"
13010+ LOCK_PREFIX "subl %1, %0\n"
13011+ "int $4\n0:\n"
13012+ _ASM_EXTABLE(0b, 0b)
13013+#endif
13014+
13015 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
13016 }
13017
13018diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
13019index 1575177..cb23f52 100644
13020--- a/arch/x86/include/asm/stackprotector.h
13021+++ b/arch/x86/include/asm/stackprotector.h
13022@@ -48,7 +48,7 @@
13023 * head_32 for boot CPU and setup_per_cpu_areas() for others.
13024 */
13025 #define GDT_STACK_CANARY_INIT \
13026- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
13027+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
13028
13029 /*
13030 * Initialize the stackprotector canary value.
13031@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
13032
13033 static inline void load_stack_canary_segment(void)
13034 {
13035-#ifdef CONFIG_X86_32
13036+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
13037 asm volatile ("mov %0, %%gs" : : "r" (0));
13038 #endif
13039 }
13040diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
13041index e0fbf29..858ef4a 100644
13042--- a/arch/x86/include/asm/system.h
13043+++ b/arch/x86/include/asm/system.h
13044@@ -132,7 +132,7 @@ do { \
13045 "thread_return:\n\t" \
13046 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
13047 __switch_canary \
13048- "movq %P[thread_info](%%rsi),%%r8\n\t" \
13049+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
13050 "movq %%rax,%%rdi\n\t" \
13051 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
13052 "jnz ret_from_fork\n\t" \
13053@@ -143,7 +143,7 @@ do { \
13054 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
13055 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
13056 [_tif_fork] "i" (_TIF_FORK), \
13057- [thread_info] "i" (offsetof(struct task_struct, stack)), \
13058+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
13059 [current_task] "m" (per_cpu_var(current_task)) \
13060 __switch_canary_iparam \
13061 : "memory", "cc" __EXTRA_CLOBBER)
13062@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
13063 {
13064 unsigned long __limit;
13065 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
13066- return __limit + 1;
13067+ return __limit;
13068 }
13069
13070 static inline void native_clts(void)
13071@@ -340,12 +340,12 @@ void enable_hlt(void);
13072
13073 void cpu_idle_wait(void);
13074
13075-extern unsigned long arch_align_stack(unsigned long sp);
13076+#define arch_align_stack(x) ((x) & ~0xfUL)
13077 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
13078
13079 void default_idle(void);
13080
13081-void stop_this_cpu(void *dummy);
13082+void stop_this_cpu(void *dummy) __noreturn;
13083
13084 /*
13085 * Force strict CPU ordering.
13086diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
13087index 19c3ce4..8962535 100644
13088--- a/arch/x86/include/asm/thread_info.h
13089+++ b/arch/x86/include/asm/thread_info.h
13090@@ -10,6 +10,7 @@
13091 #include <linux/compiler.h>
13092 #include <asm/page.h>
13093 #include <asm/types.h>
13094+#include <asm/percpu.h>
13095
13096 /*
13097 * low level task data that entry.S needs immediate access to
13098@@ -24,7 +25,6 @@ struct exec_domain;
13099 #include <asm/atomic.h>
13100
13101 struct thread_info {
13102- struct task_struct *task; /* main task structure */
13103 struct exec_domain *exec_domain; /* execution domain */
13104 __u32 flags; /* low level flags */
13105 __u32 status; /* thread synchronous flags */
13106@@ -34,18 +34,12 @@ struct thread_info {
13107 mm_segment_t addr_limit;
13108 struct restart_block restart_block;
13109 void __user *sysenter_return;
13110-#ifdef CONFIG_X86_32
13111- unsigned long previous_esp; /* ESP of the previous stack in
13112- case of nested (IRQ) stacks
13113- */
13114- __u8 supervisor_stack[0];
13115-#endif
13116+ unsigned long lowest_stack;
13117 int uaccess_err;
13118 };
13119
13120-#define INIT_THREAD_INFO(tsk) \
13121+#define INIT_THREAD_INFO \
13122 { \
13123- .task = &tsk, \
13124 .exec_domain = &default_exec_domain, \
13125 .flags = 0, \
13126 .cpu = 0, \
13127@@ -56,7 +50,7 @@ struct thread_info {
13128 }, \
13129 }
13130
13131-#define init_thread_info (init_thread_union.thread_info)
13132+#define init_thread_info (init_thread_union.stack)
13133 #define init_stack (init_thread_union.stack)
13134
13135 #else /* !__ASSEMBLY__ */
13136@@ -163,45 +157,40 @@ struct thread_info {
13137 #define alloc_thread_info(tsk) \
13138 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
13139
13140-#ifdef CONFIG_X86_32
13141-
13142-#define STACK_WARN (THREAD_SIZE/8)
13143-/*
13144- * macros/functions for gaining access to the thread information structure
13145- *
13146- * preempt_count needs to be 1 initially, until the scheduler is functional.
13147- */
13148-#ifndef __ASSEMBLY__
13149-
13150-
13151-/* how to get the current stack pointer from C */
13152-register unsigned long current_stack_pointer asm("esp") __used;
13153-
13154-/* how to get the thread information struct from C */
13155-static inline struct thread_info *current_thread_info(void)
13156-{
13157- return (struct thread_info *)
13158- (current_stack_pointer & ~(THREAD_SIZE - 1));
13159-}
13160-
13161-#else /* !__ASSEMBLY__ */
13162-
13163+#ifdef __ASSEMBLY__
13164 /* how to get the thread information struct from ASM */
13165 #define GET_THREAD_INFO(reg) \
13166- movl $-THREAD_SIZE, reg; \
13167- andl %esp, reg
13168+ mov PER_CPU_VAR(current_tinfo), reg
13169
13170 /* use this one if reg already contains %esp */
13171-#define GET_THREAD_INFO_WITH_ESP(reg) \
13172- andl $-THREAD_SIZE, reg
13173+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
13174+#else
13175+/* how to get the thread information struct from C */
13176+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
13177+
13178+static __always_inline struct thread_info *current_thread_info(void)
13179+{
13180+ return percpu_read_stable(current_tinfo);
13181+}
13182+#endif
13183+
13184+#ifdef CONFIG_X86_32
13185+
13186+#define STACK_WARN (THREAD_SIZE/8)
13187+/*
13188+ * macros/functions for gaining access to the thread information structure
13189+ *
13190+ * preempt_count needs to be 1 initially, until the scheduler is functional.
13191+ */
13192+#ifndef __ASSEMBLY__
13193+
13194+/* how to get the current stack pointer from C */
13195+register unsigned long current_stack_pointer asm("esp") __used;
13196
13197 #endif
13198
13199 #else /* X86_32 */
13200
13201-#include <asm/percpu.h>
13202-#define KERNEL_STACK_OFFSET (5*8)
13203-
13204 /*
13205 * macros/functions for gaining access to the thread information structure
13206 * preempt_count needs to be 1 initially, until the scheduler is functional.
13207@@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
13208 #ifndef __ASSEMBLY__
13209 DECLARE_PER_CPU(unsigned long, kernel_stack);
13210
13211-static inline struct thread_info *current_thread_info(void)
13212-{
13213- struct thread_info *ti;
13214- ti = (void *)(percpu_read_stable(kernel_stack) +
13215- KERNEL_STACK_OFFSET - THREAD_SIZE);
13216- return ti;
13217-}
13218-
13219-#else /* !__ASSEMBLY__ */
13220-
13221-/* how to get the thread information struct from ASM */
13222-#define GET_THREAD_INFO(reg) \
13223- movq PER_CPU_VAR(kernel_stack),reg ; \
13224- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
13225-
13226+/* how to get the current stack pointer from C */
13227+register unsigned long current_stack_pointer asm("rsp") __used;
13228 #endif
13229
13230 #endif /* !X86_32 */
13231@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
13232 extern void free_thread_info(struct thread_info *ti);
13233 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
13234 #define arch_task_cache_init arch_task_cache_init
13235+
13236+#define __HAVE_THREAD_FUNCTIONS
13237+#define task_thread_info(task) (&(task)->tinfo)
13238+#define task_stack_page(task) ((task)->stack)
13239+#define setup_thread_stack(p, org) do {} while (0)
13240+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
13241+
13242+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
13243+extern struct task_struct *alloc_task_struct(void);
13244+extern void free_task_struct(struct task_struct *);
13245+
13246 #endif
13247 #endif /* _ASM_X86_THREAD_INFO_H */
13248diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
13249index 61c5874..8a046e9 100644
13250--- a/arch/x86/include/asm/uaccess.h
13251+++ b/arch/x86/include/asm/uaccess.h
13252@@ -8,12 +8,15 @@
13253 #include <linux/thread_info.h>
13254 #include <linux/prefetch.h>
13255 #include <linux/string.h>
13256+#include <linux/sched.h>
13257 #include <asm/asm.h>
13258 #include <asm/page.h>
13259
13260 #define VERIFY_READ 0
13261 #define VERIFY_WRITE 1
13262
13263+extern void check_object_size(const void *ptr, unsigned long n, bool to);
13264+
13265 /*
13266 * The fs value determines whether argument validity checking should be
13267 * performed or not. If get_fs() == USER_DS, checking is performed, with
13268@@ -29,7 +32,12 @@
13269
13270 #define get_ds() (KERNEL_DS)
13271 #define get_fs() (current_thread_info()->addr_limit)
13272+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13273+void __set_fs(mm_segment_t x);
13274+void set_fs(mm_segment_t x);
13275+#else
13276 #define set_fs(x) (current_thread_info()->addr_limit = (x))
13277+#endif
13278
13279 #define segment_eq(a, b) ((a).seg == (b).seg)
13280
13281@@ -77,7 +85,33 @@
13282 * checks that the pointer is in the user space range - after calling
13283 * this function, memory access functions may still return -EFAULT.
13284 */
13285-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
13286+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
13287+#define access_ok(type, addr, size) \
13288+({ \
13289+ long __size = size; \
13290+ unsigned long __addr = (unsigned long)addr; \
13291+ unsigned long __addr_ao = __addr & PAGE_MASK; \
13292+ unsigned long __end_ao = __addr + __size - 1; \
13293+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
13294+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
13295+ while(__addr_ao <= __end_ao) { \
13296+ char __c_ao; \
13297+ __addr_ao += PAGE_SIZE; \
13298+ if (__size > PAGE_SIZE) \
13299+ cond_resched(); \
13300+ if (__get_user(__c_ao, (char __user *)__addr)) \
13301+ break; \
13302+ if (type != VERIFY_WRITE) { \
13303+ __addr = __addr_ao; \
13304+ continue; \
13305+ } \
13306+ if (__put_user(__c_ao, (char __user *)__addr)) \
13307+ break; \
13308+ __addr = __addr_ao; \
13309+ } \
13310+ } \
13311+ __ret_ao; \
13312+})
13313
13314 /*
13315 * The exception table consists of pairs of addresses: the first is the
13316@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
13317 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
13318 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
13319
13320-
13321+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
13322+#define __copyuser_seg "gs;"
13323+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
13324+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
13325+#else
13326+#define __copyuser_seg
13327+#define __COPYUSER_SET_ES
13328+#define __COPYUSER_RESTORE_ES
13329+#endif
13330
13331 #ifdef CONFIG_X86_32
13332 #define __put_user_asm_u64(x, addr, err, errret) \
13333- asm volatile("1: movl %%eax,0(%2)\n" \
13334- "2: movl %%edx,4(%2)\n" \
13335+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
13336+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
13337 "3:\n" \
13338 ".section .fixup,\"ax\"\n" \
13339 "4: movl %3,%0\n" \
13340@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
13341 : "A" (x), "r" (addr), "i" (errret), "0" (err))
13342
13343 #define __put_user_asm_ex_u64(x, addr) \
13344- asm volatile("1: movl %%eax,0(%1)\n" \
13345- "2: movl %%edx,4(%1)\n" \
13346+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
13347+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
13348 "3:\n" \
13349 _ASM_EXTABLE(1b, 2b - 1b) \
13350 _ASM_EXTABLE(2b, 3b - 2b) \
13351@@ -253,7 +295,7 @@ extern void __put_user_8(void);
13352 __typeof__(*(ptr)) __pu_val; \
13353 __chk_user_ptr(ptr); \
13354 might_fault(); \
13355- __pu_val = x; \
13356+ __pu_val = (x); \
13357 switch (sizeof(*(ptr))) { \
13358 case 1: \
13359 __put_user_x(1, __pu_val, ptr, __ret_pu); \
13360@@ -374,7 +416,7 @@ do { \
13361 } while (0)
13362
13363 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13364- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
13365+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
13366 "2:\n" \
13367 ".section .fixup,\"ax\"\n" \
13368 "3: mov %3,%0\n" \
13369@@ -382,7 +424,7 @@ do { \
13370 " jmp 2b\n" \
13371 ".previous\n" \
13372 _ASM_EXTABLE(1b, 3b) \
13373- : "=r" (err), ltype(x) \
13374+ : "=r" (err), ltype (x) \
13375 : "m" (__m(addr)), "i" (errret), "0" (err))
13376
13377 #define __get_user_size_ex(x, ptr, size) \
13378@@ -407,7 +449,7 @@ do { \
13379 } while (0)
13380
13381 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
13382- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
13383+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
13384 "2:\n" \
13385 _ASM_EXTABLE(1b, 2b - 1b) \
13386 : ltype(x) : "m" (__m(addr)))
13387@@ -424,13 +466,24 @@ do { \
13388 int __gu_err; \
13389 unsigned long __gu_val; \
13390 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
13391- (x) = (__force __typeof__(*(ptr)))__gu_val; \
13392+ (x) = (__typeof__(*(ptr)))__gu_val; \
13393 __gu_err; \
13394 })
13395
13396 /* FIXME: this hack is definitely wrong -AK */
13397 struct __large_struct { unsigned long buf[100]; };
13398-#define __m(x) (*(struct __large_struct __user *)(x))
13399+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13400+#define ____m(x) \
13401+({ \
13402+ unsigned long ____x = (unsigned long)(x); \
13403+ if (____x < PAX_USER_SHADOW_BASE) \
13404+ ____x += PAX_USER_SHADOW_BASE; \
13405+ (void __user *)____x; \
13406+})
13407+#else
13408+#define ____m(x) (x)
13409+#endif
13410+#define __m(x) (*(struct __large_struct __user *)____m(x))
13411
13412 /*
13413 * Tell gcc we read from memory instead of writing: this is because
13414@@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
13415 * aliasing issues.
13416 */
13417 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
13418- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
13419+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
13420 "2:\n" \
13421 ".section .fixup,\"ax\"\n" \
13422 "3: mov %3,%0\n" \
13423@@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
13424 ".previous\n" \
13425 _ASM_EXTABLE(1b, 3b) \
13426 : "=r"(err) \
13427- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
13428+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
13429
13430 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
13431- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
13432+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
13433 "2:\n" \
13434 _ASM_EXTABLE(1b, 2b - 1b) \
13435 : : ltype(x), "m" (__m(addr)))
13436@@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
13437 * On error, the variable @x is set to zero.
13438 */
13439
13440+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13441+#define __get_user(x, ptr) get_user((x), (ptr))
13442+#else
13443 #define __get_user(x, ptr) \
13444 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
13445+#endif
13446
13447 /**
13448 * __put_user: - Write a simple value into user space, with less checking.
13449@@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
13450 * Returns zero on success, or -EFAULT on error.
13451 */
13452
13453+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13454+#define __put_user(x, ptr) put_user((x), (ptr))
13455+#else
13456 #define __put_user(x, ptr) \
13457 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
13458+#endif
13459
13460 #define __get_user_unaligned __get_user
13461 #define __put_user_unaligned __put_user
13462@@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
13463 #define get_user_ex(x, ptr) do { \
13464 unsigned long __gue_val; \
13465 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
13466- (x) = (__force __typeof__(*(ptr)))__gue_val; \
13467+ (x) = (__typeof__(*(ptr)))__gue_val; \
13468 } while (0)
13469
13470 #ifdef CONFIG_X86_WP_WORKS_OK
13471@@ -567,6 +628,7 @@ extern struct movsl_mask {
13472
13473 #define ARCH_HAS_NOCACHE_UACCESS 1
13474
13475+#define ARCH_HAS_SORT_EXTABLE
13476 #ifdef CONFIG_X86_32
13477 # include "uaccess_32.h"
13478 #else
13479diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
13480index 632fb44..e30e334 100644
13481--- a/arch/x86/include/asm/uaccess_32.h
13482+++ b/arch/x86/include/asm/uaccess_32.h
13483@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
13484 static __always_inline unsigned long __must_check
13485 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13486 {
13487+ pax_track_stack();
13488+
13489+ if ((long)n < 0)
13490+ return n;
13491+
13492 if (__builtin_constant_p(n)) {
13493 unsigned long ret;
13494
13495@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
13496 return ret;
13497 }
13498 }
13499+ if (!__builtin_constant_p(n))
13500+ check_object_size(from, n, true);
13501 return __copy_to_user_ll(to, from, n);
13502 }
13503
13504@@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
13505 __copy_to_user(void __user *to, const void *from, unsigned long n)
13506 {
13507 might_fault();
13508+
13509 return __copy_to_user_inatomic(to, from, n);
13510 }
13511
13512 static __always_inline unsigned long
13513 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
13514 {
13515+ if ((long)n < 0)
13516+ return n;
13517+
13518 /* Avoid zeroing the tail if the copy fails..
13519 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
13520 * but as the zeroing behaviour is only significant when n is not
13521@@ -138,6 +149,12 @@ static __always_inline unsigned long
13522 __copy_from_user(void *to, const void __user *from, unsigned long n)
13523 {
13524 might_fault();
13525+
13526+ pax_track_stack();
13527+
13528+ if ((long)n < 0)
13529+ return n;
13530+
13531 if (__builtin_constant_p(n)) {
13532 unsigned long ret;
13533
13534@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
13535 return ret;
13536 }
13537 }
13538+ if (!__builtin_constant_p(n))
13539+ check_object_size(to, n, false);
13540 return __copy_from_user_ll(to, from, n);
13541 }
13542
13543@@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
13544 const void __user *from, unsigned long n)
13545 {
13546 might_fault();
13547+
13548+ if ((long)n < 0)
13549+ return n;
13550+
13551 if (__builtin_constant_p(n)) {
13552 unsigned long ret;
13553
13554@@ -182,14 +205,62 @@ static __always_inline unsigned long
13555 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
13556 unsigned long n)
13557 {
13558- return __copy_from_user_ll_nocache_nozero(to, from, n);
13559+ if ((long)n < 0)
13560+ return n;
13561+
13562+ return __copy_from_user_ll_nocache_nozero(to, from, n);
13563+}
13564+
13565+/**
13566+ * copy_to_user: - Copy a block of data into user space.
13567+ * @to: Destination address, in user space.
13568+ * @from: Source address, in kernel space.
13569+ * @n: Number of bytes to copy.
13570+ *
13571+ * Context: User context only. This function may sleep.
13572+ *
13573+ * Copy data from kernel space to user space.
13574+ *
13575+ * Returns number of bytes that could not be copied.
13576+ * On success, this will be zero.
13577+ */
13578+static __always_inline unsigned long __must_check
13579+copy_to_user(void __user *to, const void *from, unsigned long n)
13580+{
13581+ if (access_ok(VERIFY_WRITE, to, n))
13582+ n = __copy_to_user(to, from, n);
13583+ return n;
13584+}
13585+
13586+/**
13587+ * copy_from_user: - Copy a block of data from user space.
13588+ * @to: Destination address, in kernel space.
13589+ * @from: Source address, in user space.
13590+ * @n: Number of bytes to copy.
13591+ *
13592+ * Context: User context only. This function may sleep.
13593+ *
13594+ * Copy data from user space to kernel space.
13595+ *
13596+ * Returns number of bytes that could not be copied.
13597+ * On success, this will be zero.
13598+ *
13599+ * If some data could not be copied, this function will pad the copied
13600+ * data to the requested size using zero bytes.
13601+ */
13602+static __always_inline unsigned long __must_check
13603+copy_from_user(void *to, const void __user *from, unsigned long n)
13604+{
13605+ if (access_ok(VERIFY_READ, from, n))
13606+ n = __copy_from_user(to, from, n);
13607+ else if ((long)n > 0) {
13608+ if (!__builtin_constant_p(n))
13609+ check_object_size(to, n, false);
13610+ memset(to, 0, n);
13611+ }
13612+ return n;
13613 }
13614
13615-unsigned long __must_check copy_to_user(void __user *to,
13616- const void *from, unsigned long n);
13617-unsigned long __must_check copy_from_user(void *to,
13618- const void __user *from,
13619- unsigned long n);
13620 long __must_check strncpy_from_user(char *dst, const char __user *src,
13621 long count);
13622 long __must_check __strncpy_from_user(char *dst,
13623diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13624index db24b21..f595ae7 100644
13625--- a/arch/x86/include/asm/uaccess_64.h
13626+++ b/arch/x86/include/asm/uaccess_64.h
13627@@ -9,6 +9,9 @@
13628 #include <linux/prefetch.h>
13629 #include <linux/lockdep.h>
13630 #include <asm/page.h>
13631+#include <asm/pgtable.h>
13632+
13633+#define set_fs(x) (current_thread_info()->addr_limit = (x))
13634
13635 /*
13636 * Copy To/From Userspace
13637@@ -16,116 +19,205 @@
13638
13639 /* Handles exceptions in both to and from, but doesn't do access_ok */
13640 __must_check unsigned long
13641-copy_user_generic(void *to, const void *from, unsigned len);
13642+copy_user_generic(void *to, const void *from, unsigned long len);
13643
13644 __must_check unsigned long
13645-copy_to_user(void __user *to, const void *from, unsigned len);
13646-__must_check unsigned long
13647-copy_from_user(void *to, const void __user *from, unsigned len);
13648-__must_check unsigned long
13649-copy_in_user(void __user *to, const void __user *from, unsigned len);
13650+copy_in_user(void __user *to, const void __user *from, unsigned long len);
13651
13652 static __always_inline __must_check
13653-int __copy_from_user(void *dst, const void __user *src, unsigned size)
13654+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13655 {
13656- int ret = 0;
13657+ unsigned ret = 0;
13658
13659 might_fault();
13660- if (!__builtin_constant_p(size))
13661- return copy_user_generic(dst, (__force void *)src, size);
13662+
13663+ if (size > INT_MAX)
13664+ return size;
13665+
13666+#ifdef CONFIG_PAX_MEMORY_UDEREF
13667+ if (!__access_ok(VERIFY_READ, src, size))
13668+ return size;
13669+#endif
13670+
13671+ if (!__builtin_constant_p(size)) {
13672+ check_object_size(dst, size, false);
13673+
13674+#ifdef CONFIG_PAX_MEMORY_UDEREF
13675+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13676+ src += PAX_USER_SHADOW_BASE;
13677+#endif
13678+
13679+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13680+ }
13681 switch (size) {
13682- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13683+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13684 ret, "b", "b", "=q", 1);
13685 return ret;
13686- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13687+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13688 ret, "w", "w", "=r", 2);
13689 return ret;
13690- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13691+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13692 ret, "l", "k", "=r", 4);
13693 return ret;
13694- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13695+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13696 ret, "q", "", "=r", 8);
13697 return ret;
13698 case 10:
13699- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13700+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13701 ret, "q", "", "=r", 10);
13702 if (unlikely(ret))
13703 return ret;
13704 __get_user_asm(*(u16 *)(8 + (char *)dst),
13705- (u16 __user *)(8 + (char __user *)src),
13706+ (const u16 __user *)(8 + (const char __user *)src),
13707 ret, "w", "w", "=r", 2);
13708 return ret;
13709 case 16:
13710- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13711+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13712 ret, "q", "", "=r", 16);
13713 if (unlikely(ret))
13714 return ret;
13715 __get_user_asm(*(u64 *)(8 + (char *)dst),
13716- (u64 __user *)(8 + (char __user *)src),
13717+ (const u64 __user *)(8 + (const char __user *)src),
13718 ret, "q", "", "=r", 8);
13719 return ret;
13720 default:
13721- return copy_user_generic(dst, (__force void *)src, size);
13722+
13723+#ifdef CONFIG_PAX_MEMORY_UDEREF
13724+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13725+ src += PAX_USER_SHADOW_BASE;
13726+#endif
13727+
13728+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13729 }
13730 }
13731
13732 static __always_inline __must_check
13733-int __copy_to_user(void __user *dst, const void *src, unsigned size)
13734+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13735 {
13736- int ret = 0;
13737+ unsigned ret = 0;
13738
13739 might_fault();
13740- if (!__builtin_constant_p(size))
13741- return copy_user_generic((__force void *)dst, src, size);
13742+
13743+ pax_track_stack();
13744+
13745+ if (size > INT_MAX)
13746+ return size;
13747+
13748+#ifdef CONFIG_PAX_MEMORY_UDEREF
13749+ if (!__access_ok(VERIFY_WRITE, dst, size))
13750+ return size;
13751+#endif
13752+
13753+ if (!__builtin_constant_p(size)) {
13754+ check_object_size(src, size, true);
13755+
13756+#ifdef CONFIG_PAX_MEMORY_UDEREF
13757+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13758+ dst += PAX_USER_SHADOW_BASE;
13759+#endif
13760+
13761+ return copy_user_generic((__force_kernel void *)dst, src, size);
13762+ }
13763 switch (size) {
13764- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13765+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13766 ret, "b", "b", "iq", 1);
13767 return ret;
13768- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13769+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13770 ret, "w", "w", "ir", 2);
13771 return ret;
13772- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13773+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13774 ret, "l", "k", "ir", 4);
13775 return ret;
13776- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13777+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13778 ret, "q", "", "er", 8);
13779 return ret;
13780 case 10:
13781- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13782+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13783 ret, "q", "", "er", 10);
13784 if (unlikely(ret))
13785 return ret;
13786 asm("":::"memory");
13787- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13788+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13789 ret, "w", "w", "ir", 2);
13790 return ret;
13791 case 16:
13792- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13793+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13794 ret, "q", "", "er", 16);
13795 if (unlikely(ret))
13796 return ret;
13797 asm("":::"memory");
13798- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13799+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13800 ret, "q", "", "er", 8);
13801 return ret;
13802 default:
13803- return copy_user_generic((__force void *)dst, src, size);
13804+
13805+#ifdef CONFIG_PAX_MEMORY_UDEREF
13806+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13807+ dst += PAX_USER_SHADOW_BASE;
13808+#endif
13809+
13810+ return copy_user_generic((__force_kernel void *)dst, src, size);
13811+ }
13812+}
13813+
13814+static __always_inline __must_check
13815+unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
13816+{
13817+ if (access_ok(VERIFY_WRITE, to, len))
13818+ len = __copy_to_user(to, from, len);
13819+ return len;
13820+}
13821+
13822+static __always_inline __must_check
13823+unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
13824+{
13825+ might_fault();
13826+
13827+ if (access_ok(VERIFY_READ, from, len))
13828+ len = __copy_from_user(to, from, len);
13829+ else if (len < INT_MAX) {
13830+ if (!__builtin_constant_p(len))
13831+ check_object_size(to, len, false);
13832+ memset(to, 0, len);
13833 }
13834+ return len;
13835 }
13836
13837 static __always_inline __must_check
13838-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13839+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13840 {
13841- int ret = 0;
13842+ unsigned ret = 0;
13843
13844 might_fault();
13845- if (!__builtin_constant_p(size))
13846- return copy_user_generic((__force void *)dst,
13847- (__force void *)src, size);
13848+
13849+ pax_track_stack();
13850+
13851+ if (size > INT_MAX)
13852+ return size;
13853+
13854+#ifdef CONFIG_PAX_MEMORY_UDEREF
13855+ if (!__access_ok(VERIFY_READ, src, size))
13856+ return size;
13857+ if (!__access_ok(VERIFY_WRITE, dst, size))
13858+ return size;
13859+#endif
13860+
13861+ if (!__builtin_constant_p(size)) {
13862+
13863+#ifdef CONFIG_PAX_MEMORY_UDEREF
13864+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13865+ src += PAX_USER_SHADOW_BASE;
13866+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13867+ dst += PAX_USER_SHADOW_BASE;
13868+#endif
13869+
13870+ return copy_user_generic((__force_kernel void *)dst,
13871+ (__force_kernel const void *)src, size);
13872+ }
13873 switch (size) {
13874 case 1: {
13875 u8 tmp;
13876- __get_user_asm(tmp, (u8 __user *)src,
13877+ __get_user_asm(tmp, (const u8 __user *)src,
13878 ret, "b", "b", "=q", 1);
13879 if (likely(!ret))
13880 __put_user_asm(tmp, (u8 __user *)dst,
13881@@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13882 }
13883 case 2: {
13884 u16 tmp;
13885- __get_user_asm(tmp, (u16 __user *)src,
13886+ __get_user_asm(tmp, (const u16 __user *)src,
13887 ret, "w", "w", "=r", 2);
13888 if (likely(!ret))
13889 __put_user_asm(tmp, (u16 __user *)dst,
13890@@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13891
13892 case 4: {
13893 u32 tmp;
13894- __get_user_asm(tmp, (u32 __user *)src,
13895+ __get_user_asm(tmp, (const u32 __user *)src,
13896 ret, "l", "k", "=r", 4);
13897 if (likely(!ret))
13898 __put_user_asm(tmp, (u32 __user *)dst,
13899@@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13900 }
13901 case 8: {
13902 u64 tmp;
13903- __get_user_asm(tmp, (u64 __user *)src,
13904+ __get_user_asm(tmp, (const u64 __user *)src,
13905 ret, "q", "", "=r", 8);
13906 if (likely(!ret))
13907 __put_user_asm(tmp, (u64 __user *)dst,
13908@@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13909 return ret;
13910 }
13911 default:
13912- return copy_user_generic((__force void *)dst,
13913- (__force void *)src, size);
13914+
13915+#ifdef CONFIG_PAX_MEMORY_UDEREF
13916+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13917+ src += PAX_USER_SHADOW_BASE;
13918+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13919+ dst += PAX_USER_SHADOW_BASE;
13920+#endif
13921+
13922+ return copy_user_generic((__force_kernel void *)dst,
13923+ (__force_kernel const void *)src, size);
13924 }
13925 }
13926
13927@@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
13928 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
13929 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13930
13931-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
13932- unsigned size);
13933+static __must_check __always_inline unsigned long
13934+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13935+{
13936+ pax_track_stack();
13937+
13938+ if (size > INT_MAX)
13939+ return size;
13940+
13941+#ifdef CONFIG_PAX_MEMORY_UDEREF
13942+ if (!__access_ok(VERIFY_READ, src, size))
13943+ return size;
13944
13945-static __must_check __always_inline int
13946-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13947+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13948+ src += PAX_USER_SHADOW_BASE;
13949+#endif
13950+
13951+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
13952+}
13953+
13954+static __must_check __always_inline unsigned long
13955+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13956 {
13957- return copy_user_generic((__force void *)dst, src, size);
13958+ if (size > INT_MAX)
13959+ return size;
13960+
13961+#ifdef CONFIG_PAX_MEMORY_UDEREF
13962+ if (!__access_ok(VERIFY_WRITE, dst, size))
13963+ return size;
13964+
13965+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13966+ dst += PAX_USER_SHADOW_BASE;
13967+#endif
13968+
13969+ return copy_user_generic((__force_kernel void *)dst, src, size);
13970 }
13971
13972-extern long __copy_user_nocache(void *dst, const void __user *src,
13973- unsigned size, int zerorest);
13974+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13975+ unsigned long size, int zerorest);
13976
13977-static inline int
13978-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13979+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13980 {
13981 might_sleep();
13982+
13983+ if (size > INT_MAX)
13984+ return size;
13985+
13986+#ifdef CONFIG_PAX_MEMORY_UDEREF
13987+ if (!__access_ok(VERIFY_READ, src, size))
13988+ return size;
13989+#endif
13990+
13991 return __copy_user_nocache(dst, src, size, 1);
13992 }
13993
13994-static inline int
13995-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13996- unsigned size)
13997+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13998+ unsigned long size)
13999 {
14000+ if (size > INT_MAX)
14001+ return size;
14002+
14003+#ifdef CONFIG_PAX_MEMORY_UDEREF
14004+ if (!__access_ok(VERIFY_READ, src, size))
14005+ return size;
14006+#endif
14007+
14008 return __copy_user_nocache(dst, src, size, 0);
14009 }
14010
14011-unsigned long
14012-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
14013+extern unsigned long
14014+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
14015
14016 #endif /* _ASM_X86_UACCESS_64_H */
14017diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
14018index 9064052..786cfbc 100644
14019--- a/arch/x86/include/asm/vdso.h
14020+++ b/arch/x86/include/asm/vdso.h
14021@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
14022 #define VDSO32_SYMBOL(base, name) \
14023 ({ \
14024 extern const char VDSO32_##name[]; \
14025- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
14026+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
14027 })
14028 #endif
14029
14030diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
14031index 3d61e20..9507180 100644
14032--- a/arch/x86/include/asm/vgtod.h
14033+++ b/arch/x86/include/asm/vgtod.h
14034@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
14035 int sysctl_enabled;
14036 struct timezone sys_tz;
14037 struct { /* extract of a clocksource struct */
14038+ char name[8];
14039 cycle_t (*vread)(void);
14040 cycle_t cycle_last;
14041 cycle_t mask;
14042diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
14043index 61e08c0..b0da582 100644
14044--- a/arch/x86/include/asm/vmi.h
14045+++ b/arch/x86/include/asm/vmi.h
14046@@ -191,6 +191,7 @@ struct vrom_header {
14047 u8 reserved[96]; /* Reserved for headers */
14048 char vmi_init[8]; /* VMI_Init jump point */
14049 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
14050+ char rom_data[8048]; /* rest of the option ROM */
14051 } __attribute__((packed));
14052
14053 struct pnp_header {
14054diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
14055index c6e0bee..fcb9f74 100644
14056--- a/arch/x86/include/asm/vmi_time.h
14057+++ b/arch/x86/include/asm/vmi_time.h
14058@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
14059 int (*wallclock_updated)(void);
14060 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
14061 void (*cancel_alarm)(u32 flags);
14062-} vmi_timer_ops;
14063+} __no_const vmi_timer_ops;
14064
14065 /* Prototypes */
14066 extern void __init vmi_time_init(void);
14067diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
14068index d0983d2..1f7c9e9 100644
14069--- a/arch/x86/include/asm/vsyscall.h
14070+++ b/arch/x86/include/asm/vsyscall.h
14071@@ -15,9 +15,10 @@ enum vsyscall_num {
14072
14073 #ifdef __KERNEL__
14074 #include <linux/seqlock.h>
14075+#include <linux/getcpu.h>
14076+#include <linux/time.h>
14077
14078 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
14079-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
14080
14081 /* Definitions for CONFIG_GENERIC_TIME definitions */
14082 #define __section_vsyscall_gtod_data __attribute__ \
14083@@ -31,7 +32,6 @@ enum vsyscall_num {
14084 #define VGETCPU_LSL 2
14085
14086 extern int __vgetcpu_mode;
14087-extern volatile unsigned long __jiffies;
14088
14089 /* kernel space (writeable) */
14090 extern int vgetcpu_mode;
14091@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
14092
14093 extern void map_vsyscall(void);
14094
14095+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
14096+extern time_t vtime(time_t *t);
14097+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
14098 #endif /* __KERNEL__ */
14099
14100 #endif /* _ASM_X86_VSYSCALL_H */
14101diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
14102index 2c756fd..3377e37 100644
14103--- a/arch/x86/include/asm/x86_init.h
14104+++ b/arch/x86/include/asm/x86_init.h
14105@@ -28,7 +28,7 @@ struct x86_init_mpparse {
14106 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
14107 void (*find_smp_config)(unsigned int reserve);
14108 void (*get_smp_config)(unsigned int early);
14109-};
14110+} __no_const;
14111
14112 /**
14113 * struct x86_init_resources - platform specific resource related ops
14114@@ -42,7 +42,7 @@ struct x86_init_resources {
14115 void (*probe_roms)(void);
14116 void (*reserve_resources)(void);
14117 char *(*memory_setup)(void);
14118-};
14119+} __no_const;
14120
14121 /**
14122 * struct x86_init_irqs - platform specific interrupt setup
14123@@ -55,7 +55,7 @@ struct x86_init_irqs {
14124 void (*pre_vector_init)(void);
14125 void (*intr_init)(void);
14126 void (*trap_init)(void);
14127-};
14128+} __no_const;
14129
14130 /**
14131 * struct x86_init_oem - oem platform specific customizing functions
14132@@ -65,7 +65,7 @@ struct x86_init_irqs {
14133 struct x86_init_oem {
14134 void (*arch_setup)(void);
14135 void (*banner)(void);
14136-};
14137+} __no_const;
14138
14139 /**
14140 * struct x86_init_paging - platform specific paging functions
14141@@ -75,7 +75,7 @@ struct x86_init_oem {
14142 struct x86_init_paging {
14143 void (*pagetable_setup_start)(pgd_t *base);
14144 void (*pagetable_setup_done)(pgd_t *base);
14145-};
14146+} __no_const;
14147
14148 /**
14149 * struct x86_init_timers - platform specific timer setup
14150@@ -88,7 +88,7 @@ struct x86_init_timers {
14151 void (*setup_percpu_clockev)(void);
14152 void (*tsc_pre_init)(void);
14153 void (*timer_init)(void);
14154-};
14155+} __no_const;
14156
14157 /**
14158 * struct x86_init_ops - functions for platform specific setup
14159@@ -101,7 +101,7 @@ struct x86_init_ops {
14160 struct x86_init_oem oem;
14161 struct x86_init_paging paging;
14162 struct x86_init_timers timers;
14163-};
14164+} __no_const;
14165
14166 /**
14167 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
14168@@ -109,7 +109,7 @@ struct x86_init_ops {
14169 */
14170 struct x86_cpuinit_ops {
14171 void (*setup_percpu_clockev)(void);
14172-};
14173+} __no_const;
14174
14175 /**
14176 * struct x86_platform_ops - platform specific runtime functions
14177@@ -121,7 +121,7 @@ struct x86_platform_ops {
14178 unsigned long (*calibrate_tsc)(void);
14179 unsigned long (*get_wallclock)(void);
14180 int (*set_wallclock)(unsigned long nowtime);
14181-};
14182+} __no_const;
14183
14184 extern struct x86_init_ops x86_init;
14185 extern struct x86_cpuinit_ops x86_cpuinit;
14186diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
14187index 727acc1..554f3eb 100644
14188--- a/arch/x86/include/asm/xsave.h
14189+++ b/arch/x86/include/asm/xsave.h
14190@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
14191 static inline int xsave_user(struct xsave_struct __user *buf)
14192 {
14193 int err;
14194+
14195+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14196+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
14197+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
14198+#endif
14199+
14200 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
14201 "2:\n"
14202 ".section .fixup,\"ax\"\n"
14203@@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
14204 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
14205 {
14206 int err;
14207- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
14208+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
14209 u32 lmask = mask;
14210 u32 hmask = mask >> 32;
14211
14212+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14213+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
14214+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
14215+#endif
14216+
14217 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
14218 "2:\n"
14219 ".section .fixup,\"ax\"\n"
14220diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
14221index 6a564ac..9b1340c 100644
14222--- a/arch/x86/kernel/acpi/realmode/Makefile
14223+++ b/arch/x86/kernel/acpi/realmode/Makefile
14224@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
14225 $(call cc-option, -fno-stack-protector) \
14226 $(call cc-option, -mpreferred-stack-boundary=2)
14227 KBUILD_CFLAGS += $(call cc-option, -m32)
14228+ifdef CONSTIFY_PLUGIN
14229+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
14230+endif
14231 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
14232 GCOV_PROFILE := n
14233
14234diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
14235index 580b4e2..d4129e4 100644
14236--- a/arch/x86/kernel/acpi/realmode/wakeup.S
14237+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
14238@@ -91,6 +91,9 @@ _start:
14239 /* Do any other stuff... */
14240
14241 #ifndef CONFIG_64BIT
14242+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
14243+ call verify_cpu
14244+
14245 /* This could also be done in C code... */
14246 movl pmode_cr3, %eax
14247 movl %eax, %cr3
14248@@ -104,7 +107,7 @@ _start:
14249 movl %eax, %ecx
14250 orl %edx, %ecx
14251 jz 1f
14252- movl $0xc0000080, %ecx
14253+ mov $MSR_EFER, %ecx
14254 wrmsr
14255 1:
14256
14257@@ -114,6 +117,7 @@ _start:
14258 movl pmode_cr0, %eax
14259 movl %eax, %cr0
14260 jmp pmode_return
14261+# include "../../verify_cpu.S"
14262 #else
14263 pushw $0
14264 pushw trampoline_segment
14265diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
14266index ca93638..7042f24 100644
14267--- a/arch/x86/kernel/acpi/sleep.c
14268+++ b/arch/x86/kernel/acpi/sleep.c
14269@@ -11,11 +11,12 @@
14270 #include <linux/cpumask.h>
14271 #include <asm/segment.h>
14272 #include <asm/desc.h>
14273+#include <asm/e820.h>
14274
14275 #include "realmode/wakeup.h"
14276 #include "sleep.h"
14277
14278-unsigned long acpi_wakeup_address;
14279+unsigned long acpi_wakeup_address = 0x2000;
14280 unsigned long acpi_realmode_flags;
14281
14282 /* address in low memory of the wakeup routine. */
14283@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
14284 #else /* CONFIG_64BIT */
14285 header->trampoline_segment = setup_trampoline() >> 4;
14286 #ifdef CONFIG_SMP
14287- stack_start.sp = temp_stack + sizeof(temp_stack);
14288+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
14289+
14290+ pax_open_kernel();
14291 early_gdt_descr.address =
14292 (unsigned long)get_cpu_gdt_table(smp_processor_id());
14293+ pax_close_kernel();
14294+
14295 initial_gs = per_cpu_offset(smp_processor_id());
14296 #endif
14297 initial_code = (unsigned long)wakeup_long64;
14298@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
14299 return;
14300 }
14301
14302- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
14303-
14304- if (!acpi_realmode) {
14305- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
14306- return;
14307- }
14308-
14309- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
14310+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
14311+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
14312 }
14313
14314
14315diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
14316index 8ded418..079961e 100644
14317--- a/arch/x86/kernel/acpi/wakeup_32.S
14318+++ b/arch/x86/kernel/acpi/wakeup_32.S
14319@@ -30,13 +30,11 @@ wakeup_pmode_return:
14320 # and restore the stack ... but you need gdt for this to work
14321 movl saved_context_esp, %esp
14322
14323- movl %cs:saved_magic, %eax
14324- cmpl $0x12345678, %eax
14325+ cmpl $0x12345678, saved_magic
14326 jne bogus_magic
14327
14328 # jump to place where we left off
14329- movl saved_eip, %eax
14330- jmp *%eax
14331+ jmp *(saved_eip)
14332
14333 bogus_magic:
14334 jmp bogus_magic
14335diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
14336index de7353c..075da5f 100644
14337--- a/arch/x86/kernel/alternative.c
14338+++ b/arch/x86/kernel/alternative.c
14339@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
14340
14341 BUG_ON(p->len > MAX_PATCH_LEN);
14342 /* prep the buffer with the original instructions */
14343- memcpy(insnbuf, p->instr, p->len);
14344+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
14345 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
14346 (unsigned long)p->instr, p->len);
14347
14348@@ -475,7 +475,7 @@ void __init alternative_instructions(void)
14349 if (smp_alt_once)
14350 free_init_pages("SMP alternatives",
14351 (unsigned long)__smp_locks,
14352- (unsigned long)__smp_locks_end);
14353+ PAGE_ALIGN((unsigned long)__smp_locks_end));
14354
14355 restart_nmi();
14356 }
14357@@ -492,13 +492,17 @@ void __init alternative_instructions(void)
14358 * instructions. And on the local CPU you need to be protected again NMI or MCE
14359 * handlers seeing an inconsistent instruction while you patch.
14360 */
14361-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
14362+static void *__kprobes text_poke_early(void *addr, const void *opcode,
14363 size_t len)
14364 {
14365 unsigned long flags;
14366 local_irq_save(flags);
14367- memcpy(addr, opcode, len);
14368+
14369+ pax_open_kernel();
14370+ memcpy(ktla_ktva(addr), opcode, len);
14371 sync_core();
14372+ pax_close_kernel();
14373+
14374 local_irq_restore(flags);
14375 /* Could also do a CLFLUSH here to speed up CPU recovery; but
14376 that causes hangs on some VIA CPUs. */
14377@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
14378 */
14379 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
14380 {
14381- unsigned long flags;
14382- char *vaddr;
14383+ unsigned char *vaddr = ktla_ktva(addr);
14384 struct page *pages[2];
14385- int i;
14386+ size_t i;
14387
14388 if (!core_kernel_text((unsigned long)addr)) {
14389- pages[0] = vmalloc_to_page(addr);
14390- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
14391+ pages[0] = vmalloc_to_page(vaddr);
14392+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
14393 } else {
14394- pages[0] = virt_to_page(addr);
14395+ pages[0] = virt_to_page(vaddr);
14396 WARN_ON(!PageReserved(pages[0]));
14397- pages[1] = virt_to_page(addr + PAGE_SIZE);
14398+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
14399 }
14400 BUG_ON(!pages[0]);
14401- local_irq_save(flags);
14402- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
14403- if (pages[1])
14404- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
14405- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
14406- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
14407- clear_fixmap(FIX_TEXT_POKE0);
14408- if (pages[1])
14409- clear_fixmap(FIX_TEXT_POKE1);
14410- local_flush_tlb();
14411- sync_core();
14412- /* Could also do a CLFLUSH here to speed up CPU recovery; but
14413- that causes hangs on some VIA CPUs. */
14414+ text_poke_early(addr, opcode, len);
14415 for (i = 0; i < len; i++)
14416- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
14417- local_irq_restore(flags);
14418+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
14419 return addr;
14420 }
14421diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
14422index 3a44b75..1601800 100644
14423--- a/arch/x86/kernel/amd_iommu.c
14424+++ b/arch/x86/kernel/amd_iommu.c
14425@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
14426 }
14427 }
14428
14429-static struct dma_map_ops amd_iommu_dma_ops = {
14430+static const struct dma_map_ops amd_iommu_dma_ops = {
14431 .alloc_coherent = alloc_coherent,
14432 .free_coherent = free_coherent,
14433 .map_page = map_page,
14434diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
14435index 1d2d670..8e3f477 100644
14436--- a/arch/x86/kernel/apic/apic.c
14437+++ b/arch/x86/kernel/apic/apic.c
14438@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
14439 /*
14440 * Debug level, exported for io_apic.c
14441 */
14442-unsigned int apic_verbosity;
14443+int apic_verbosity;
14444
14445 int pic_mode;
14446
14447@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
14448 apic_write(APIC_ESR, 0);
14449 v1 = apic_read(APIC_ESR);
14450 ack_APIC_irq();
14451- atomic_inc(&irq_err_count);
14452+ atomic_inc_unchecked(&irq_err_count);
14453
14454 /*
14455 * Here is what the APIC error bits mean:
14456@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
14457 u16 *bios_cpu_apicid;
14458 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
14459
14460+ pax_track_stack();
14461+
14462 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
14463 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
14464
14465diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
14466index 8928d97..f799cea 100644
14467--- a/arch/x86/kernel/apic/io_apic.c
14468+++ b/arch/x86/kernel/apic/io_apic.c
14469@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
14470 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
14471 GFP_ATOMIC);
14472 if (!ioapic_entries)
14473- return 0;
14474+ return NULL;
14475
14476 for (apic = 0; apic < nr_ioapics; apic++) {
14477 ioapic_entries[apic] =
14478@@ -733,7 +733,7 @@ nomem:
14479 kfree(ioapic_entries[apic]);
14480 kfree(ioapic_entries);
14481
14482- return 0;
14483+ return NULL;
14484 }
14485
14486 /*
14487@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
14488 }
14489 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
14490
14491-void lock_vector_lock(void)
14492+void lock_vector_lock(void) __acquires(vector_lock)
14493 {
14494 /* Used to the online set of cpus does not change
14495 * during assign_irq_vector.
14496@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
14497 spin_lock(&vector_lock);
14498 }
14499
14500-void unlock_vector_lock(void)
14501+void unlock_vector_lock(void) __releases(vector_lock)
14502 {
14503 spin_unlock(&vector_lock);
14504 }
14505@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
14506 ack_APIC_irq();
14507 }
14508
14509-atomic_t irq_mis_count;
14510+atomic_unchecked_t irq_mis_count;
14511
14512 static void ack_apic_level(unsigned int irq)
14513 {
14514@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
14515
14516 /* Tail end of version 0x11 I/O APIC bug workaround */
14517 if (!(v & (1 << (i & 0x1f)))) {
14518- atomic_inc(&irq_mis_count);
14519+ atomic_inc_unchecked(&irq_mis_count);
14520 spin_lock(&ioapic_lock);
14521 __mask_and_edge_IO_APIC_irq(cfg);
14522 __unmask_and_level_IO_APIC_irq(cfg);
14523diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
14524index 151ace6..f317474 100644
14525--- a/arch/x86/kernel/apm_32.c
14526+++ b/arch/x86/kernel/apm_32.c
14527@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
14528 * This is for buggy BIOS's that refer to (real mode) segment 0x40
14529 * even though they are called in protected mode.
14530 */
14531-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
14532+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
14533 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
14534
14535 static const char driver_version[] = "1.16ac"; /* no spaces */
14536@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
14537 BUG_ON(cpu != 0);
14538 gdt = get_cpu_gdt_table(cpu);
14539 save_desc_40 = gdt[0x40 / 8];
14540+
14541+ pax_open_kernel();
14542 gdt[0x40 / 8] = bad_bios_desc;
14543+ pax_close_kernel();
14544
14545 apm_irq_save(flags);
14546 APM_DO_SAVE_SEGS;
14547@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
14548 &call->esi);
14549 APM_DO_RESTORE_SEGS;
14550 apm_irq_restore(flags);
14551+
14552+ pax_open_kernel();
14553 gdt[0x40 / 8] = save_desc_40;
14554+ pax_close_kernel();
14555+
14556 put_cpu();
14557
14558 return call->eax & 0xff;
14559@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
14560 BUG_ON(cpu != 0);
14561 gdt = get_cpu_gdt_table(cpu);
14562 save_desc_40 = gdt[0x40 / 8];
14563+
14564+ pax_open_kernel();
14565 gdt[0x40 / 8] = bad_bios_desc;
14566+ pax_close_kernel();
14567
14568 apm_irq_save(flags);
14569 APM_DO_SAVE_SEGS;
14570@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
14571 &call->eax);
14572 APM_DO_RESTORE_SEGS;
14573 apm_irq_restore(flags);
14574+
14575+ pax_open_kernel();
14576 gdt[0x40 / 8] = save_desc_40;
14577+ pax_close_kernel();
14578+
14579 put_cpu();
14580 return error;
14581 }
14582@@ -975,7 +989,7 @@ recalc:
14583
14584 static void apm_power_off(void)
14585 {
14586- unsigned char po_bios_call[] = {
14587+ const unsigned char po_bios_call[] = {
14588 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
14589 0x8e, 0xd0, /* movw ax,ss */
14590 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
14591@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
14592 * code to that CPU.
14593 */
14594 gdt = get_cpu_gdt_table(0);
14595+
14596+ pax_open_kernel();
14597 set_desc_base(&gdt[APM_CS >> 3],
14598 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14599 set_desc_base(&gdt[APM_CS_16 >> 3],
14600 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14601 set_desc_base(&gdt[APM_DS >> 3],
14602 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14603+ pax_close_kernel();
14604
14605 proc_create("apm", 0, NULL, &apm_file_ops);
14606
14607diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
14608index dfdbf64..9b2b6ce 100644
14609--- a/arch/x86/kernel/asm-offsets_32.c
14610+++ b/arch/x86/kernel/asm-offsets_32.c
14611@@ -51,7 +51,6 @@ void foo(void)
14612 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
14613 BLANK();
14614
14615- OFFSET(TI_task, thread_info, task);
14616 OFFSET(TI_exec_domain, thread_info, exec_domain);
14617 OFFSET(TI_flags, thread_info, flags);
14618 OFFSET(TI_status, thread_info, status);
14619@@ -60,6 +59,8 @@ void foo(void)
14620 OFFSET(TI_restart_block, thread_info, restart_block);
14621 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
14622 OFFSET(TI_cpu, thread_info, cpu);
14623+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14624+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14625 BLANK();
14626
14627 OFFSET(GDS_size, desc_ptr, size);
14628@@ -99,6 +100,7 @@ void foo(void)
14629
14630 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14631 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14632+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14633 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
14634 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
14635 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
14636@@ -115,6 +117,11 @@ void foo(void)
14637 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
14638 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14639 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14640+
14641+#ifdef CONFIG_PAX_KERNEXEC
14642+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14643+#endif
14644+
14645 #endif
14646
14647 #ifdef CONFIG_XEN
14648diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14649index 4a6aeed..371de20 100644
14650--- a/arch/x86/kernel/asm-offsets_64.c
14651+++ b/arch/x86/kernel/asm-offsets_64.c
14652@@ -44,6 +44,8 @@ int main(void)
14653 ENTRY(addr_limit);
14654 ENTRY(preempt_count);
14655 ENTRY(status);
14656+ ENTRY(lowest_stack);
14657+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14658 #ifdef CONFIG_IA32_EMULATION
14659 ENTRY(sysenter_return);
14660 #endif
14661@@ -63,6 +65,18 @@ int main(void)
14662 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14663 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
14664 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14665+
14666+#ifdef CONFIG_PAX_KERNEXEC
14667+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14668+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14669+#endif
14670+
14671+#ifdef CONFIG_PAX_MEMORY_UDEREF
14672+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14673+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14674+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14675+#endif
14676+
14677 #endif
14678
14679
14680@@ -115,6 +129,7 @@ int main(void)
14681 ENTRY(cr8);
14682 BLANK();
14683 #undef ENTRY
14684+ DEFINE(TSS_size, sizeof(struct tss_struct));
14685 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
14686 BLANK();
14687 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
14688@@ -130,6 +145,7 @@ int main(void)
14689
14690 BLANK();
14691 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14692+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14693 #ifdef CONFIG_XEN
14694 BLANK();
14695 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14696diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14697index ff502cc..dc5133e 100644
14698--- a/arch/x86/kernel/cpu/Makefile
14699+++ b/arch/x86/kernel/cpu/Makefile
14700@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
14701 CFLAGS_REMOVE_common.o = -pg
14702 endif
14703
14704-# Make sure load_percpu_segment has no stackprotector
14705-nostackp := $(call cc-option, -fno-stack-protector)
14706-CFLAGS_common.o := $(nostackp)
14707-
14708 obj-y := intel_cacheinfo.o addon_cpuid_features.o
14709 obj-y += proc.o capflags.o powerflags.o common.o
14710 obj-y += vmware.o hypervisor.o sched.o
14711diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14712index 6e082dc..a0b5f36 100644
14713--- a/arch/x86/kernel/cpu/amd.c
14714+++ b/arch/x86/kernel/cpu/amd.c
14715@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14716 unsigned int size)
14717 {
14718 /* AMD errata T13 (order #21922) */
14719- if ((c->x86 == 6)) {
14720+ if (c->x86 == 6) {
14721 /* Duron Rev A0 */
14722 if (c->x86_model == 3 && c->x86_mask == 0)
14723 size = 64;
14724diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14725index 4e34d10..ba6bc97 100644
14726--- a/arch/x86/kernel/cpu/common.c
14727+++ b/arch/x86/kernel/cpu/common.c
14728@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14729
14730 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14731
14732-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14733-#ifdef CONFIG_X86_64
14734- /*
14735- * We need valid kernel segments for data and code in long mode too
14736- * IRET will check the segment types kkeil 2000/10/28
14737- * Also sysret mandates a special GDT layout
14738- *
14739- * TLS descriptors are currently at a different place compared to i386.
14740- * Hopefully nobody expects them at a fixed place (Wine?)
14741- */
14742- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14743- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14744- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14745- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14746- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14747- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14748-#else
14749- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14750- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14751- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14752- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14753- /*
14754- * Segments used for calling PnP BIOS have byte granularity.
14755- * They code segments and data segments have fixed 64k limits,
14756- * the transfer segment sizes are set at run time.
14757- */
14758- /* 32-bit code */
14759- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14760- /* 16-bit code */
14761- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14762- /* 16-bit data */
14763- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14764- /* 16-bit data */
14765- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14766- /* 16-bit data */
14767- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14768- /*
14769- * The APM segments have byte granularity and their bases
14770- * are set at run time. All have 64k limits.
14771- */
14772- /* 32-bit code */
14773- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14774- /* 16-bit code */
14775- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14776- /* data */
14777- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14778-
14779- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14780- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14781- GDT_STACK_CANARY_INIT
14782-#endif
14783-} };
14784-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14785-
14786 static int __init x86_xsave_setup(char *s)
14787 {
14788 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14789@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
14790 {
14791 struct desc_ptr gdt_descr;
14792
14793- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14794+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14795 gdt_descr.size = GDT_SIZE - 1;
14796 load_gdt(&gdt_descr);
14797 /* Reload the per-cpu base */
14798@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14799 /* Filter out anything that depends on CPUID levels we don't have */
14800 filter_cpuid_features(c, true);
14801
14802+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14803+ setup_clear_cpu_cap(X86_FEATURE_SEP);
14804+#endif
14805+
14806 /* If the model name is still unset, do table lookup. */
14807 if (!c->x86_model_id[0]) {
14808 const char *p;
14809@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
14810 }
14811 __setup("clearcpuid=", setup_disablecpuid);
14812
14813+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14814+EXPORT_PER_CPU_SYMBOL(current_tinfo);
14815+
14816 #ifdef CONFIG_X86_64
14817 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14818
14819@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14820 EXPORT_PER_CPU_SYMBOL(current_task);
14821
14822 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14823- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14824+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14825 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14826
14827 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14828@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14829 {
14830 memset(regs, 0, sizeof(struct pt_regs));
14831 regs->fs = __KERNEL_PERCPU;
14832- regs->gs = __KERNEL_STACK_CANARY;
14833+ savesegment(gs, regs->gs);
14834
14835 return regs;
14836 }
14837@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
14838 int i;
14839
14840 cpu = stack_smp_processor_id();
14841- t = &per_cpu(init_tss, cpu);
14842+ t = init_tss + cpu;
14843 orig_ist = &per_cpu(orig_ist, cpu);
14844
14845 #ifdef CONFIG_NUMA
14846@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
14847 switch_to_new_gdt(cpu);
14848 loadsegment(fs, 0);
14849
14850- load_idt((const struct desc_ptr *)&idt_descr);
14851+ load_idt(&idt_descr);
14852
14853 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14854 syscall_init();
14855@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
14856 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14857 barrier();
14858
14859- check_efer();
14860 if (cpu != 0)
14861 enable_x2apic();
14862
14863@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
14864 {
14865 int cpu = smp_processor_id();
14866 struct task_struct *curr = current;
14867- struct tss_struct *t = &per_cpu(init_tss, cpu);
14868+ struct tss_struct *t = init_tss + cpu;
14869 struct thread_struct *thread = &curr->thread;
14870
14871 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14872diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14873index 6a77cca..4f4fca0 100644
14874--- a/arch/x86/kernel/cpu/intel.c
14875+++ b/arch/x86/kernel/cpu/intel.c
14876@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14877 * Update the IDT descriptor and reload the IDT so that
14878 * it uses the read-only mapped virtual address.
14879 */
14880- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14881+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14882 load_idt(&idt_descr);
14883 }
14884 #endif
14885diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
14886index 417990f..96dc36b 100644
14887--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
14888+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
14889@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
14890 return ret;
14891 }
14892
14893-static struct sysfs_ops sysfs_ops = {
14894+static const struct sysfs_ops sysfs_ops = {
14895 .show = show,
14896 .store = store,
14897 };
14898diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14899index 472763d..9831e11 100644
14900--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
14901+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14902@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14903 static int inject_init(void)
14904 {
14905 printk(KERN_INFO "Machine check injector initialized\n");
14906- mce_chrdev_ops.write = mce_write;
14907+ pax_open_kernel();
14908+ *(void **)&mce_chrdev_ops.write = mce_write;
14909+ pax_close_kernel();
14910 register_die_notifier(&mce_raise_nb);
14911 return 0;
14912 }
14913diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14914index 0f16a2b..21740f5 100644
14915--- a/arch/x86/kernel/cpu/mcheck/mce.c
14916+++ b/arch/x86/kernel/cpu/mcheck/mce.c
14917@@ -43,6 +43,7 @@
14918 #include <asm/ipi.h>
14919 #include <asm/mce.h>
14920 #include <asm/msr.h>
14921+#include <asm/local.h>
14922
14923 #include "mce-internal.h"
14924
14925@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
14926 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14927 m->cs, m->ip);
14928
14929- if (m->cs == __KERNEL_CS)
14930+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14931 print_symbol("{%s}", m->ip);
14932 pr_cont("\n");
14933 }
14934@@ -221,10 +222,10 @@ static void print_mce_tail(void)
14935
14936 #define PANIC_TIMEOUT 5 /* 5 seconds */
14937
14938-static atomic_t mce_paniced;
14939+static atomic_unchecked_t mce_paniced;
14940
14941 static int fake_panic;
14942-static atomic_t mce_fake_paniced;
14943+static atomic_unchecked_t mce_fake_paniced;
14944
14945 /* Panic in progress. Enable interrupts and wait for final IPI */
14946 static void wait_for_panic(void)
14947@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14948 /*
14949 * Make sure only one CPU runs in machine check panic
14950 */
14951- if (atomic_inc_return(&mce_paniced) > 1)
14952+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14953 wait_for_panic();
14954 barrier();
14955
14956@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14957 console_verbose();
14958 } else {
14959 /* Don't log too much for fake panic */
14960- if (atomic_inc_return(&mce_fake_paniced) > 1)
14961+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14962 return;
14963 }
14964 print_mce_head();
14965@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
14966 * might have been modified by someone else.
14967 */
14968 rmb();
14969- if (atomic_read(&mce_paniced))
14970+ if (atomic_read_unchecked(&mce_paniced))
14971 wait_for_panic();
14972 if (!monarch_timeout)
14973 goto out;
14974@@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14975 }
14976
14977 /* Call the installed machine check handler for this CPU setup. */
14978-void (*machine_check_vector)(struct pt_regs *, long error_code) =
14979+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14980 unexpected_machine_check;
14981
14982 /*
14983@@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14984 return;
14985 }
14986
14987+ pax_open_kernel();
14988 machine_check_vector = do_machine_check;
14989+ pax_close_kernel();
14990
14991 mce_init();
14992 mce_cpu_features(c);
14993@@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
14994 */
14995
14996 static DEFINE_SPINLOCK(mce_state_lock);
14997-static int open_count; /* #times opened */
14998+static local_t open_count; /* #times opened */
14999 static int open_exclu; /* already open exclusive? */
15000
15001 static int mce_open(struct inode *inode, struct file *file)
15002 {
15003 spin_lock(&mce_state_lock);
15004
15005- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
15006+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
15007 spin_unlock(&mce_state_lock);
15008
15009 return -EBUSY;
15010@@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
15011
15012 if (file->f_flags & O_EXCL)
15013 open_exclu = 1;
15014- open_count++;
15015+ local_inc(&open_count);
15016
15017 spin_unlock(&mce_state_lock);
15018
15019@@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
15020 {
15021 spin_lock(&mce_state_lock);
15022
15023- open_count--;
15024+ local_dec(&open_count);
15025 open_exclu = 0;
15026
15027 spin_unlock(&mce_state_lock);
15028@@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
15029 static void mce_reset(void)
15030 {
15031 cpu_missing = 0;
15032- atomic_set(&mce_fake_paniced, 0);
15033+ atomic_set_unchecked(&mce_fake_paniced, 0);
15034 atomic_set(&mce_executing, 0);
15035 atomic_set(&mce_callin, 0);
15036 atomic_set(&global_nwo, 0);
15037diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
15038index ef3cd31..9d2f6ab 100644
15039--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
15040+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
15041@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
15042 return ret;
15043 }
15044
15045-static struct sysfs_ops threshold_ops = {
15046+static const struct sysfs_ops threshold_ops = {
15047 .show = show,
15048 .store = store,
15049 };
15050diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
15051index 5c0e653..0882b0a 100644
15052--- a/arch/x86/kernel/cpu/mcheck/p5.c
15053+++ b/arch/x86/kernel/cpu/mcheck/p5.c
15054@@ -12,6 +12,7 @@
15055 #include <asm/system.h>
15056 #include <asm/mce.h>
15057 #include <asm/msr.h>
15058+#include <asm/pgtable.h>
15059
15060 /* By default disabled */
15061 int mce_p5_enabled __read_mostly;
15062@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
15063 if (!cpu_has(c, X86_FEATURE_MCE))
15064 return;
15065
15066+ pax_open_kernel();
15067 machine_check_vector = pentium_machine_check;
15068+ pax_close_kernel();
15069 /* Make sure the vector pointer is visible before we enable MCEs: */
15070 wmb();
15071
15072diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
15073index 54060f5..c1a7577 100644
15074--- a/arch/x86/kernel/cpu/mcheck/winchip.c
15075+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
15076@@ -11,6 +11,7 @@
15077 #include <asm/system.h>
15078 #include <asm/mce.h>
15079 #include <asm/msr.h>
15080+#include <asm/pgtable.h>
15081
15082 /* Machine check handler for WinChip C6: */
15083 static void winchip_machine_check(struct pt_regs *regs, long error_code)
15084@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
15085 {
15086 u32 lo, hi;
15087
15088+ pax_open_kernel();
15089 machine_check_vector = winchip_machine_check;
15090+ pax_close_kernel();
15091 /* Make sure the vector pointer is visible before we enable MCEs: */
15092 wmb();
15093
15094diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
15095index 33af141..92ba9cd 100644
15096--- a/arch/x86/kernel/cpu/mtrr/amd.c
15097+++ b/arch/x86/kernel/cpu/mtrr/amd.c
15098@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
15099 return 0;
15100 }
15101
15102-static struct mtrr_ops amd_mtrr_ops = {
15103+static const struct mtrr_ops amd_mtrr_ops = {
15104 .vendor = X86_VENDOR_AMD,
15105 .set = amd_set_mtrr,
15106 .get = amd_get_mtrr,
15107diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
15108index de89f14..316fe3e 100644
15109--- a/arch/x86/kernel/cpu/mtrr/centaur.c
15110+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
15111@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
15112 return 0;
15113 }
15114
15115-static struct mtrr_ops centaur_mtrr_ops = {
15116+static const struct mtrr_ops centaur_mtrr_ops = {
15117 .vendor = X86_VENDOR_CENTAUR,
15118 .set = centaur_set_mcr,
15119 .get = centaur_get_mcr,
15120diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
15121index 228d982..68a3343 100644
15122--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
15123+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
15124@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
15125 post_set();
15126 }
15127
15128-static struct mtrr_ops cyrix_mtrr_ops = {
15129+static const struct mtrr_ops cyrix_mtrr_ops = {
15130 .vendor = X86_VENDOR_CYRIX,
15131 .set_all = cyrix_set_all,
15132 .set = cyrix_set_arr,
15133diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
15134index 55da0c5..4d75584 100644
15135--- a/arch/x86/kernel/cpu/mtrr/generic.c
15136+++ b/arch/x86/kernel/cpu/mtrr/generic.c
15137@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
15138 /*
15139 * Generic structure...
15140 */
15141-struct mtrr_ops generic_mtrr_ops = {
15142+const struct mtrr_ops generic_mtrr_ops = {
15143 .use_intel_if = 1,
15144 .set_all = generic_set_all,
15145 .get = generic_get_mtrr,
15146diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
15147index fd60f09..c94ef52 100644
15148--- a/arch/x86/kernel/cpu/mtrr/main.c
15149+++ b/arch/x86/kernel/cpu/mtrr/main.c
15150@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
15151 u64 size_or_mask, size_and_mask;
15152 static bool mtrr_aps_delayed_init;
15153
15154-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
15155+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
15156
15157-struct mtrr_ops *mtrr_if;
15158+const struct mtrr_ops *mtrr_if;
15159
15160 static void set_mtrr(unsigned int reg, unsigned long base,
15161 unsigned long size, mtrr_type type);
15162
15163-void set_mtrr_ops(struct mtrr_ops *ops)
15164+void set_mtrr_ops(const struct mtrr_ops *ops)
15165 {
15166 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
15167 mtrr_ops[ops->vendor] = ops;
15168diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
15169index a501dee..816c719 100644
15170--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
15171+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
15172@@ -25,14 +25,14 @@ struct mtrr_ops {
15173 int (*validate_add_page)(unsigned long base, unsigned long size,
15174 unsigned int type);
15175 int (*have_wrcomb)(void);
15176-};
15177+} __do_const;
15178
15179 extern int generic_get_free_region(unsigned long base, unsigned long size,
15180 int replace_reg);
15181 extern int generic_validate_add_page(unsigned long base, unsigned long size,
15182 unsigned int type);
15183
15184-extern struct mtrr_ops generic_mtrr_ops;
15185+extern const struct mtrr_ops generic_mtrr_ops;
15186
15187 extern int positive_have_wrcomb(void);
15188
15189@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
15190 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
15191 void get_mtrr_state(void);
15192
15193-extern void set_mtrr_ops(struct mtrr_ops *ops);
15194+extern void set_mtrr_ops(const struct mtrr_ops *ops);
15195
15196 extern u64 size_or_mask, size_and_mask;
15197-extern struct mtrr_ops *mtrr_if;
15198+extern const struct mtrr_ops *mtrr_if;
15199
15200 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
15201 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
15202diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
15203index 0ff02ca..fc49a60 100644
15204--- a/arch/x86/kernel/cpu/perf_event.c
15205+++ b/arch/x86/kernel/cpu/perf_event.c
15206@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
15207 * count to the generic event atomically:
15208 */
15209 again:
15210- prev_raw_count = atomic64_read(&hwc->prev_count);
15211+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
15212 rdmsrl(hwc->event_base + idx, new_raw_count);
15213
15214- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
15215+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
15216 new_raw_count) != prev_raw_count)
15217 goto again;
15218
15219@@ -741,7 +741,7 @@ again:
15220 delta = (new_raw_count << shift) - (prev_raw_count << shift);
15221 delta >>= shift;
15222
15223- atomic64_add(delta, &event->count);
15224+ atomic64_add_unchecked(delta, &event->count);
15225 atomic64_sub(delta, &hwc->period_left);
15226
15227 return new_raw_count;
15228@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
15229 * The hw event starts counting from this event offset,
15230 * mark it to be able to extra future deltas:
15231 */
15232- atomic64_set(&hwc->prev_count, (u64)-left);
15233+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
15234
15235 err = checking_wrmsrl(hwc->event_base + idx,
15236 (u64)(-left) & x86_pmu.event_mask);
15237@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
15238 break;
15239
15240 callchain_store(entry, frame.return_address);
15241- fp = frame.next_frame;
15242+ fp = (__force const void __user *)frame.next_frame;
15243 }
15244 }
15245
15246diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
15247index 898df97..9e82503 100644
15248--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
15249+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
15250@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
15251
15252 /* Interface defining a CPU specific perfctr watchdog */
15253 struct wd_ops {
15254- int (*reserve)(void);
15255- void (*unreserve)(void);
15256- int (*setup)(unsigned nmi_hz);
15257- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
15258- void (*stop)(void);
15259+ int (* const reserve)(void);
15260+ void (* const unreserve)(void);
15261+ int (* const setup)(unsigned nmi_hz);
15262+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
15263+ void (* const stop)(void);
15264 unsigned perfctr;
15265 unsigned evntsel;
15266 u64 checkbit;
15267@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
15268 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
15269 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
15270
15271+/* cannot be const */
15272 static struct wd_ops intel_arch_wd_ops;
15273
15274 static int setup_intel_arch_watchdog(unsigned nmi_hz)
15275@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
15276 return 1;
15277 }
15278
15279+/* cannot be const */
15280 static struct wd_ops intel_arch_wd_ops __read_mostly = {
15281 .reserve = single_msr_reserve,
15282 .unreserve = single_msr_unreserve,
15283diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
15284index ff95824..2ffdcb5 100644
15285--- a/arch/x86/kernel/crash.c
15286+++ b/arch/x86/kernel/crash.c
15287@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
15288 regs = args->regs;
15289
15290 #ifdef CONFIG_X86_32
15291- if (!user_mode_vm(regs)) {
15292+ if (!user_mode(regs)) {
15293 crash_fixup_ss_esp(&fixed_regs, regs);
15294 regs = &fixed_regs;
15295 }
15296diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
15297index 37250fe..bf2ec74 100644
15298--- a/arch/x86/kernel/doublefault_32.c
15299+++ b/arch/x86/kernel/doublefault_32.c
15300@@ -11,7 +11,7 @@
15301
15302 #define DOUBLEFAULT_STACKSIZE (1024)
15303 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
15304-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
15305+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
15306
15307 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
15308
15309@@ -21,7 +21,7 @@ static void doublefault_fn(void)
15310 unsigned long gdt, tss;
15311
15312 store_gdt(&gdt_desc);
15313- gdt = gdt_desc.address;
15314+ gdt = (unsigned long)gdt_desc.address;
15315
15316 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
15317
15318@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
15319 /* 0x2 bit is always set */
15320 .flags = X86_EFLAGS_SF | 0x2,
15321 .sp = STACK_START,
15322- .es = __USER_DS,
15323+ .es = __KERNEL_DS,
15324 .cs = __KERNEL_CS,
15325 .ss = __KERNEL_DS,
15326- .ds = __USER_DS,
15327+ .ds = __KERNEL_DS,
15328 .fs = __KERNEL_PERCPU,
15329
15330 .__cr3 = __pa_nodebug(swapper_pg_dir),
15331diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
15332index 2d8a371..4fa6ae6 100644
15333--- a/arch/x86/kernel/dumpstack.c
15334+++ b/arch/x86/kernel/dumpstack.c
15335@@ -2,6 +2,9 @@
15336 * Copyright (C) 1991, 1992 Linus Torvalds
15337 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
15338 */
15339+#ifdef CONFIG_GRKERNSEC_HIDESYM
15340+#define __INCLUDED_BY_HIDESYM 1
15341+#endif
15342 #include <linux/kallsyms.h>
15343 #include <linux/kprobes.h>
15344 #include <linux/uaccess.h>
15345@@ -28,7 +31,7 @@ static int die_counter;
15346
15347 void printk_address(unsigned long address, int reliable)
15348 {
15349- printk(" [<%p>] %s%pS\n", (void *) address,
15350+ printk(" [<%p>] %s%pA\n", (void *) address,
15351 reliable ? "" : "? ", (void *) address);
15352 }
15353
15354@@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
15355 static void
15356 print_ftrace_graph_addr(unsigned long addr, void *data,
15357 const struct stacktrace_ops *ops,
15358- struct thread_info *tinfo, int *graph)
15359+ struct task_struct *task, int *graph)
15360 {
15361- struct task_struct *task = tinfo->task;
15362 unsigned long ret_addr;
15363 int index = task->curr_ret_stack;
15364
15365@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15366 static inline void
15367 print_ftrace_graph_addr(unsigned long addr, void *data,
15368 const struct stacktrace_ops *ops,
15369- struct thread_info *tinfo, int *graph)
15370+ struct task_struct *task, int *graph)
15371 { }
15372 #endif
15373
15374@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
15375 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
15376 */
15377
15378-static inline int valid_stack_ptr(struct thread_info *tinfo,
15379- void *p, unsigned int size, void *end)
15380+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
15381 {
15382- void *t = tinfo;
15383 if (end) {
15384 if (p < end && p >= (end-THREAD_SIZE))
15385 return 1;
15386@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
15387 }
15388
15389 unsigned long
15390-print_context_stack(struct thread_info *tinfo,
15391+print_context_stack(struct task_struct *task, void *stack_start,
15392 unsigned long *stack, unsigned long bp,
15393 const struct stacktrace_ops *ops, void *data,
15394 unsigned long *end, int *graph)
15395 {
15396 struct stack_frame *frame = (struct stack_frame *)bp;
15397
15398- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
15399+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
15400 unsigned long addr;
15401
15402 addr = *stack;
15403@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
15404 } else {
15405 ops->address(data, addr, 0);
15406 }
15407- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
15408+ print_ftrace_graph_addr(addr, data, ops, task, graph);
15409 }
15410 stack++;
15411 }
15412@@ -180,7 +180,7 @@ void dump_stack(void)
15413 #endif
15414
15415 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
15416- current->pid, current->comm, print_tainted(),
15417+ task_pid_nr(current), current->comm, print_tainted(),
15418 init_utsname()->release,
15419 (int)strcspn(init_utsname()->version, " "),
15420 init_utsname()->version);
15421@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
15422 return flags;
15423 }
15424
15425+extern void gr_handle_kernel_exploit(void);
15426+
15427 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15428 {
15429 if (regs && kexec_should_crash(current))
15430@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
15431 panic("Fatal exception in interrupt");
15432 if (panic_on_oops)
15433 panic("Fatal exception");
15434- do_exit(signr);
15435+
15436+ gr_handle_kernel_exploit();
15437+
15438+ do_group_exit(signr);
15439 }
15440
15441 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
15442@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
15443 unsigned long flags = oops_begin();
15444 int sig = SIGSEGV;
15445
15446- if (!user_mode_vm(regs))
15447+ if (!user_mode(regs))
15448 report_bug(regs->ip, regs);
15449
15450 if (__die(str, regs, err))
15451diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
15452index 81086c2..13e8b17 100644
15453--- a/arch/x86/kernel/dumpstack.h
15454+++ b/arch/x86/kernel/dumpstack.h
15455@@ -15,7 +15,7 @@
15456 #endif
15457
15458 extern unsigned long
15459-print_context_stack(struct thread_info *tinfo,
15460+print_context_stack(struct task_struct *task, void *stack_start,
15461 unsigned long *stack, unsigned long bp,
15462 const struct stacktrace_ops *ops, void *data,
15463 unsigned long *end, int *graph);
15464diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
15465index f7dd2a7..504f53b 100644
15466--- a/arch/x86/kernel/dumpstack_32.c
15467+++ b/arch/x86/kernel/dumpstack_32.c
15468@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15469 #endif
15470
15471 for (;;) {
15472- struct thread_info *context;
15473+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15474+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15475
15476- context = (struct thread_info *)
15477- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
15478- bp = print_context_stack(context, stack, bp, ops,
15479- data, NULL, &graph);
15480-
15481- stack = (unsigned long *)context->previous_esp;
15482- if (!stack)
15483+ if (stack_start == task_stack_page(task))
15484 break;
15485+ stack = *(unsigned long **)stack_start;
15486 if (ops->stack(data, "IRQ") < 0)
15487 break;
15488 touch_nmi_watchdog();
15489@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
15490 * When in-kernel, we also print out the stack and code at the
15491 * time of the fault..
15492 */
15493- if (!user_mode_vm(regs)) {
15494+ if (!user_mode(regs)) {
15495 unsigned int code_prologue = code_bytes * 43 / 64;
15496 unsigned int code_len = code_bytes;
15497 unsigned char c;
15498 u8 *ip;
15499+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
15500
15501 printk(KERN_EMERG "Stack:\n");
15502 show_stack_log_lvl(NULL, regs, &regs->sp,
15503@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
15504
15505 printk(KERN_EMERG "Code: ");
15506
15507- ip = (u8 *)regs->ip - code_prologue;
15508+ ip = (u8 *)regs->ip - code_prologue + cs_base;
15509 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
15510 /* try starting at IP */
15511- ip = (u8 *)regs->ip;
15512+ ip = (u8 *)regs->ip + cs_base;
15513 code_len = code_len - code_prologue + 1;
15514 }
15515 for (i = 0; i < code_len; i++, ip++) {
15516@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
15517 printk(" Bad EIP value.");
15518 break;
15519 }
15520- if (ip == (u8 *)regs->ip)
15521+ if (ip == (u8 *)regs->ip + cs_base)
15522 printk("<%02x> ", c);
15523 else
15524 printk("%02x ", c);
15525@@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
15526 printk("\n");
15527 }
15528
15529+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15530+void pax_check_alloca(unsigned long size)
15531+{
15532+ unsigned long sp = (unsigned long)&sp, stack_left;
15533+
15534+ /* all kernel stacks are of the same size */
15535+ stack_left = sp & (THREAD_SIZE - 1);
15536+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15537+}
15538+EXPORT_SYMBOL(pax_check_alloca);
15539+#endif
15540+
15541 int is_valid_bugaddr(unsigned long ip)
15542 {
15543 unsigned short ud2;
15544
15545+ ip = ktla_ktva(ip);
15546 if (ip < PAGE_OFFSET)
15547 return 0;
15548 if (probe_kernel_address((unsigned short *)ip, ud2))
15549diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
15550index a071e6b..36cd585 100644
15551--- a/arch/x86/kernel/dumpstack_64.c
15552+++ b/arch/x86/kernel/dumpstack_64.c
15553@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15554 unsigned long *irq_stack_end =
15555 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
15556 unsigned used = 0;
15557- struct thread_info *tinfo;
15558 int graph = 0;
15559+ void *stack_start;
15560
15561 if (!task)
15562 task = current;
15563@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15564 * current stack address. If the stacks consist of nested
15565 * exceptions
15566 */
15567- tinfo = task_thread_info(task);
15568 for (;;) {
15569 char *id;
15570 unsigned long *estack_end;
15571+
15572 estack_end = in_exception_stack(cpu, (unsigned long)stack,
15573 &used, &id);
15574
15575@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15576 if (ops->stack(data, id) < 0)
15577 break;
15578
15579- bp = print_context_stack(tinfo, stack, bp, ops,
15580+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
15581 data, estack_end, &graph);
15582 ops->stack(data, "<EOE>");
15583 /*
15584@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15585 if (stack >= irq_stack && stack < irq_stack_end) {
15586 if (ops->stack(data, "IRQ") < 0)
15587 break;
15588- bp = print_context_stack(tinfo, stack, bp,
15589+ bp = print_context_stack(task, irq_stack, stack, bp,
15590 ops, data, irq_stack_end, &graph);
15591 /*
15592 * We link to the next stack (which would be
15593@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
15594 /*
15595 * This handles the process stack:
15596 */
15597- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
15598+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
15599+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
15600 put_cpu();
15601 }
15602 EXPORT_SYMBOL(dump_trace);
15603@@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
15604 return ud2 == 0x0b0f;
15605 }
15606
15607+
15608+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15609+void pax_check_alloca(unsigned long size)
15610+{
15611+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
15612+ unsigned cpu, used;
15613+ char *id;
15614+
15615+ /* check the process stack first */
15616+ stack_start = (unsigned long)task_stack_page(current);
15617+ stack_end = stack_start + THREAD_SIZE;
15618+ if (likely(stack_start <= sp && sp < stack_end)) {
15619+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
15620+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15621+ return;
15622+ }
15623+
15624+ cpu = get_cpu();
15625+
15626+ /* check the irq stacks */
15627+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
15628+ stack_start = stack_end - IRQ_STACK_SIZE;
15629+ if (stack_start <= sp && sp < stack_end) {
15630+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
15631+ put_cpu();
15632+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15633+ return;
15634+ }
15635+
15636+ /* check the exception stacks */
15637+ used = 0;
15638+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
15639+ stack_start = stack_end - EXCEPTION_STKSZ;
15640+ if (stack_end && stack_start <= sp && sp < stack_end) {
15641+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
15642+ put_cpu();
15643+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
15644+ return;
15645+ }
15646+
15647+ put_cpu();
15648+
15649+ /* unknown stack */
15650+ BUG();
15651+}
15652+EXPORT_SYMBOL(pax_check_alloca);
15653+#endif
15654diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
15655index a89739a..95e0c48 100644
15656--- a/arch/x86/kernel/e820.c
15657+++ b/arch/x86/kernel/e820.c
15658@@ -733,7 +733,7 @@ struct early_res {
15659 };
15660 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
15661 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
15662- {}
15663+ { 0, 0, {0}, 0 }
15664 };
15665
15666 static int __init find_overlapped_early(u64 start, u64 end)
15667diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
15668index b9c830c..1e41a96 100644
15669--- a/arch/x86/kernel/early_printk.c
15670+++ b/arch/x86/kernel/early_printk.c
15671@@ -7,6 +7,7 @@
15672 #include <linux/pci_regs.h>
15673 #include <linux/pci_ids.h>
15674 #include <linux/errno.h>
15675+#include <linux/sched.h>
15676 #include <asm/io.h>
15677 #include <asm/processor.h>
15678 #include <asm/fcntl.h>
15679@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
15680 int n;
15681 va_list ap;
15682
15683+ pax_track_stack();
15684+
15685 va_start(ap, fmt);
15686 n = vscnprintf(buf, sizeof(buf), fmt, ap);
15687 early_console->write(early_console, buf, n);
15688diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
15689index 5cab48e..b025f9b 100644
15690--- a/arch/x86/kernel/efi_32.c
15691+++ b/arch/x86/kernel/efi_32.c
15692@@ -38,70 +38,56 @@
15693 */
15694
15695 static unsigned long efi_rt_eflags;
15696-static pgd_t efi_bak_pg_dir_pointer[2];
15697+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
15698
15699-void efi_call_phys_prelog(void)
15700+void __init efi_call_phys_prelog(void)
15701 {
15702- unsigned long cr4;
15703- unsigned long temp;
15704 struct desc_ptr gdt_descr;
15705
15706+#ifdef CONFIG_PAX_KERNEXEC
15707+ struct desc_struct d;
15708+#endif
15709+
15710 local_irq_save(efi_rt_eflags);
15711
15712- /*
15713- * If I don't have PAE, I should just duplicate two entries in page
15714- * directory. If I have PAE, I just need to duplicate one entry in
15715- * page directory.
15716- */
15717- cr4 = read_cr4_safe();
15718-
15719- if (cr4 & X86_CR4_PAE) {
15720- efi_bak_pg_dir_pointer[0].pgd =
15721- swapper_pg_dir[pgd_index(0)].pgd;
15722- swapper_pg_dir[0].pgd =
15723- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
15724- } else {
15725- efi_bak_pg_dir_pointer[0].pgd =
15726- swapper_pg_dir[pgd_index(0)].pgd;
15727- efi_bak_pg_dir_pointer[1].pgd =
15728- swapper_pg_dir[pgd_index(0x400000)].pgd;
15729- swapper_pg_dir[pgd_index(0)].pgd =
15730- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
15731- temp = PAGE_OFFSET + 0x400000;
15732- swapper_pg_dir[pgd_index(0x400000)].pgd =
15733- swapper_pg_dir[pgd_index(temp)].pgd;
15734- }
15735+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
15736+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15737+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
15738
15739 /*
15740 * After the lock is released, the original page table is restored.
15741 */
15742 __flush_tlb_all();
15743
15744+#ifdef CONFIG_PAX_KERNEXEC
15745+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
15746+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15747+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
15748+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15749+#endif
15750+
15751 gdt_descr.address = __pa(get_cpu_gdt_table(0));
15752 gdt_descr.size = GDT_SIZE - 1;
15753 load_gdt(&gdt_descr);
15754 }
15755
15756-void efi_call_phys_epilog(void)
15757+void __init efi_call_phys_epilog(void)
15758 {
15759- unsigned long cr4;
15760 struct desc_ptr gdt_descr;
15761
15762+#ifdef CONFIG_PAX_KERNEXEC
15763+ struct desc_struct d;
15764+
15765+ memset(&d, 0, sizeof d);
15766+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15767+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15768+#endif
15769+
15770 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
15771 gdt_descr.size = GDT_SIZE - 1;
15772 load_gdt(&gdt_descr);
15773
15774- cr4 = read_cr4_safe();
15775-
15776- if (cr4 & X86_CR4_PAE) {
15777- swapper_pg_dir[pgd_index(0)].pgd =
15778- efi_bak_pg_dir_pointer[0].pgd;
15779- } else {
15780- swapper_pg_dir[pgd_index(0)].pgd =
15781- efi_bak_pg_dir_pointer[0].pgd;
15782- swapper_pg_dir[pgd_index(0x400000)].pgd =
15783- efi_bak_pg_dir_pointer[1].pgd;
15784- }
15785+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
15786
15787 /*
15788 * After the lock is released, the original page table is restored.
15789diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
15790index fbe66e6..c5c0dd2 100644
15791--- a/arch/x86/kernel/efi_stub_32.S
15792+++ b/arch/x86/kernel/efi_stub_32.S
15793@@ -6,7 +6,9 @@
15794 */
15795
15796 #include <linux/linkage.h>
15797+#include <linux/init.h>
15798 #include <asm/page_types.h>
15799+#include <asm/segment.h>
15800
15801 /*
15802 * efi_call_phys(void *, ...) is a function with variable parameters.
15803@@ -20,7 +22,7 @@
15804 * service functions will comply with gcc calling convention, too.
15805 */
15806
15807-.text
15808+__INIT
15809 ENTRY(efi_call_phys)
15810 /*
15811 * 0. The function can only be called in Linux kernel. So CS has been
15812@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
15813 * The mapping of lower virtual memory has been created in prelog and
15814 * epilog.
15815 */
15816- movl $1f, %edx
15817- subl $__PAGE_OFFSET, %edx
15818- jmp *%edx
15819+ movl $(__KERNEXEC_EFI_DS), %edx
15820+ mov %edx, %ds
15821+ mov %edx, %es
15822+ mov %edx, %ss
15823+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
15824 1:
15825
15826 /*
15827@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
15828 * parameter 2, ..., param n. To make things easy, we save the return
15829 * address of efi_call_phys in a global variable.
15830 */
15831- popl %edx
15832- movl %edx, saved_return_addr
15833- /* get the function pointer into ECX*/
15834- popl %ecx
15835- movl %ecx, efi_rt_function_ptr
15836- movl $2f, %edx
15837- subl $__PAGE_OFFSET, %edx
15838- pushl %edx
15839+ popl (saved_return_addr)
15840+ popl (efi_rt_function_ptr)
15841
15842 /*
15843 * 3. Clear PG bit in %CR0.
15844@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
15845 /*
15846 * 5. Call the physical function.
15847 */
15848- jmp *%ecx
15849+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
15850
15851-2:
15852 /*
15853 * 6. After EFI runtime service returns, control will return to
15854 * following instruction. We'd better readjust stack pointer first.
15855@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
15856 movl %cr0, %edx
15857 orl $0x80000000, %edx
15858 movl %edx, %cr0
15859- jmp 1f
15860-1:
15861+
15862 /*
15863 * 8. Now restore the virtual mode from flat mode by
15864 * adding EIP with PAGE_OFFSET.
15865 */
15866- movl $1f, %edx
15867- jmp *%edx
15868+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
15869 1:
15870+ movl $(__KERNEL_DS), %edx
15871+ mov %edx, %ds
15872+ mov %edx, %es
15873+ mov %edx, %ss
15874
15875 /*
15876 * 9. Balance the stack. And because EAX contain the return value,
15877 * we'd better not clobber it.
15878 */
15879- leal efi_rt_function_ptr, %edx
15880- movl (%edx), %ecx
15881- pushl %ecx
15882+ pushl (efi_rt_function_ptr)
15883
15884 /*
15885- * 10. Push the saved return address onto the stack and return.
15886+ * 10. Return to the saved return address.
15887 */
15888- leal saved_return_addr, %edx
15889- movl (%edx), %ecx
15890- pushl %ecx
15891- ret
15892+ jmpl *(saved_return_addr)
15893 ENDPROC(efi_call_phys)
15894 .previous
15895
15896-.data
15897+__INITDATA
15898 saved_return_addr:
15899 .long 0
15900 efi_rt_function_ptr:
15901diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
15902index 4c07cca..2c8427d 100644
15903--- a/arch/x86/kernel/efi_stub_64.S
15904+++ b/arch/x86/kernel/efi_stub_64.S
15905@@ -7,6 +7,7 @@
15906 */
15907
15908 #include <linux/linkage.h>
15909+#include <asm/alternative-asm.h>
15910
15911 #define SAVE_XMM \
15912 mov %rsp, %rax; \
15913@@ -40,6 +41,7 @@ ENTRY(efi_call0)
15914 call *%rdi
15915 addq $32, %rsp
15916 RESTORE_XMM
15917+ pax_force_retaddr 0, 1
15918 ret
15919 ENDPROC(efi_call0)
15920
15921@@ -50,6 +52,7 @@ ENTRY(efi_call1)
15922 call *%rdi
15923 addq $32, %rsp
15924 RESTORE_XMM
15925+ pax_force_retaddr 0, 1
15926 ret
15927 ENDPROC(efi_call1)
15928
15929@@ -60,6 +63,7 @@ ENTRY(efi_call2)
15930 call *%rdi
15931 addq $32, %rsp
15932 RESTORE_XMM
15933+ pax_force_retaddr 0, 1
15934 ret
15935 ENDPROC(efi_call2)
15936
15937@@ -71,6 +75,7 @@ ENTRY(efi_call3)
15938 call *%rdi
15939 addq $32, %rsp
15940 RESTORE_XMM
15941+ pax_force_retaddr 0, 1
15942 ret
15943 ENDPROC(efi_call3)
15944
15945@@ -83,6 +88,7 @@ ENTRY(efi_call4)
15946 call *%rdi
15947 addq $32, %rsp
15948 RESTORE_XMM
15949+ pax_force_retaddr 0, 1
15950 ret
15951 ENDPROC(efi_call4)
15952
15953@@ -96,6 +102,7 @@ ENTRY(efi_call5)
15954 call *%rdi
15955 addq $48, %rsp
15956 RESTORE_XMM
15957+ pax_force_retaddr 0, 1
15958 ret
15959 ENDPROC(efi_call5)
15960
15961@@ -112,5 +119,6 @@ ENTRY(efi_call6)
15962 call *%rdi
15963 addq $48, %rsp
15964 RESTORE_XMM
15965+ pax_force_retaddr 0, 1
15966 ret
15967 ENDPROC(efi_call6)
15968diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
15969index c097e7d..c689cf4 100644
15970--- a/arch/x86/kernel/entry_32.S
15971+++ b/arch/x86/kernel/entry_32.S
15972@@ -185,13 +185,146 @@
15973 /*CFI_REL_OFFSET gs, PT_GS*/
15974 .endm
15975 .macro SET_KERNEL_GS reg
15976+
15977+#ifdef CONFIG_CC_STACKPROTECTOR
15978 movl $(__KERNEL_STACK_CANARY), \reg
15979+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
15980+ movl $(__USER_DS), \reg
15981+#else
15982+ xorl \reg, \reg
15983+#endif
15984+
15985 movl \reg, %gs
15986 .endm
15987
15988 #endif /* CONFIG_X86_32_LAZY_GS */
15989
15990-.macro SAVE_ALL
15991+.macro pax_enter_kernel
15992+#ifdef CONFIG_PAX_KERNEXEC
15993+ call pax_enter_kernel
15994+#endif
15995+.endm
15996+
15997+.macro pax_exit_kernel
15998+#ifdef CONFIG_PAX_KERNEXEC
15999+ call pax_exit_kernel
16000+#endif
16001+.endm
16002+
16003+#ifdef CONFIG_PAX_KERNEXEC
16004+ENTRY(pax_enter_kernel)
16005+#ifdef CONFIG_PARAVIRT
16006+ pushl %eax
16007+ pushl %ecx
16008+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
16009+ mov %eax, %esi
16010+#else
16011+ mov %cr0, %esi
16012+#endif
16013+ bts $16, %esi
16014+ jnc 1f
16015+ mov %cs, %esi
16016+ cmp $__KERNEL_CS, %esi
16017+ jz 3f
16018+ ljmp $__KERNEL_CS, $3f
16019+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
16020+2:
16021+#ifdef CONFIG_PARAVIRT
16022+ mov %esi, %eax
16023+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16024+#else
16025+ mov %esi, %cr0
16026+#endif
16027+3:
16028+#ifdef CONFIG_PARAVIRT
16029+ popl %ecx
16030+ popl %eax
16031+#endif
16032+ ret
16033+ENDPROC(pax_enter_kernel)
16034+
16035+ENTRY(pax_exit_kernel)
16036+#ifdef CONFIG_PARAVIRT
16037+ pushl %eax
16038+ pushl %ecx
16039+#endif
16040+ mov %cs, %esi
16041+ cmp $__KERNEXEC_KERNEL_CS, %esi
16042+ jnz 2f
16043+#ifdef CONFIG_PARAVIRT
16044+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
16045+ mov %eax, %esi
16046+#else
16047+ mov %cr0, %esi
16048+#endif
16049+ btr $16, %esi
16050+ ljmp $__KERNEL_CS, $1f
16051+1:
16052+#ifdef CONFIG_PARAVIRT
16053+ mov %esi, %eax
16054+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
16055+#else
16056+ mov %esi, %cr0
16057+#endif
16058+2:
16059+#ifdef CONFIG_PARAVIRT
16060+ popl %ecx
16061+ popl %eax
16062+#endif
16063+ ret
16064+ENDPROC(pax_exit_kernel)
16065+#endif
16066+
16067+.macro pax_erase_kstack
16068+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16069+ call pax_erase_kstack
16070+#endif
16071+.endm
16072+
16073+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16074+/*
16075+ * ebp: thread_info
16076+ * ecx, edx: can be clobbered
16077+ */
16078+ENTRY(pax_erase_kstack)
16079+ pushl %edi
16080+ pushl %eax
16081+
16082+ mov TI_lowest_stack(%ebp), %edi
16083+ mov $-0xBEEF, %eax
16084+ std
16085+
16086+1: mov %edi, %ecx
16087+ and $THREAD_SIZE_asm - 1, %ecx
16088+ shr $2, %ecx
16089+ repne scasl
16090+ jecxz 2f
16091+
16092+ cmp $2*16, %ecx
16093+ jc 2f
16094+
16095+ mov $2*16, %ecx
16096+ repe scasl
16097+ jecxz 2f
16098+ jne 1b
16099+
16100+2: cld
16101+ mov %esp, %ecx
16102+ sub %edi, %ecx
16103+ shr $2, %ecx
16104+ rep stosl
16105+
16106+ mov TI_task_thread_sp0(%ebp), %edi
16107+ sub $128, %edi
16108+ mov %edi, TI_lowest_stack(%ebp)
16109+
16110+ popl %eax
16111+ popl %edi
16112+ ret
16113+ENDPROC(pax_erase_kstack)
16114+#endif
16115+
16116+.macro __SAVE_ALL _DS
16117 cld
16118 PUSH_GS
16119 pushl %fs
16120@@ -224,7 +357,7 @@
16121 pushl %ebx
16122 CFI_ADJUST_CFA_OFFSET 4
16123 CFI_REL_OFFSET ebx, 0
16124- movl $(__USER_DS), %edx
16125+ movl $\_DS, %edx
16126 movl %edx, %ds
16127 movl %edx, %es
16128 movl $(__KERNEL_PERCPU), %edx
16129@@ -232,6 +365,15 @@
16130 SET_KERNEL_GS %edx
16131 .endm
16132
16133+.macro SAVE_ALL
16134+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
16135+ __SAVE_ALL __KERNEL_DS
16136+ pax_enter_kernel
16137+#else
16138+ __SAVE_ALL __USER_DS
16139+#endif
16140+.endm
16141+
16142 .macro RESTORE_INT_REGS
16143 popl %ebx
16144 CFI_ADJUST_CFA_OFFSET -4
16145@@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
16146 CFI_ADJUST_CFA_OFFSET -4
16147 jmp syscall_exit
16148 CFI_ENDPROC
16149-END(ret_from_fork)
16150+ENDPROC(ret_from_fork)
16151
16152 /*
16153 * Return to user mode is not as complex as all this looks,
16154@@ -352,7 +494,15 @@ check_userspace:
16155 movb PT_CS(%esp), %al
16156 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
16157 cmpl $USER_RPL, %eax
16158+
16159+#ifdef CONFIG_PAX_KERNEXEC
16160+ jae resume_userspace
16161+
16162+ PAX_EXIT_KERNEL
16163+ jmp resume_kernel
16164+#else
16165 jb resume_kernel # not returning to v8086 or userspace
16166+#endif
16167
16168 ENTRY(resume_userspace)
16169 LOCKDEP_SYS_EXIT
16170@@ -364,8 +514,8 @@ ENTRY(resume_userspace)
16171 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
16172 # int/exception return?
16173 jne work_pending
16174- jmp restore_all
16175-END(ret_from_exception)
16176+ jmp restore_all_pax
16177+ENDPROC(ret_from_exception)
16178
16179 #ifdef CONFIG_PREEMPT
16180 ENTRY(resume_kernel)
16181@@ -380,7 +530,7 @@ need_resched:
16182 jz restore_all
16183 call preempt_schedule_irq
16184 jmp need_resched
16185-END(resume_kernel)
16186+ENDPROC(resume_kernel)
16187 #endif
16188 CFI_ENDPROC
16189
16190@@ -414,25 +564,36 @@ sysenter_past_esp:
16191 /*CFI_REL_OFFSET cs, 0*/
16192 /*
16193 * Push current_thread_info()->sysenter_return to the stack.
16194- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
16195- * pushed above; +8 corresponds to copy_thread's esp0 setting.
16196 */
16197- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
16198+ pushl $0
16199 CFI_ADJUST_CFA_OFFSET 4
16200 CFI_REL_OFFSET eip, 0
16201
16202 pushl %eax
16203 CFI_ADJUST_CFA_OFFSET 4
16204 SAVE_ALL
16205+ GET_THREAD_INFO(%ebp)
16206+ movl TI_sysenter_return(%ebp),%ebp
16207+ movl %ebp,PT_EIP(%esp)
16208 ENABLE_INTERRUPTS(CLBR_NONE)
16209
16210 /*
16211 * Load the potential sixth argument from user stack.
16212 * Careful about security.
16213 */
16214+ movl PT_OLDESP(%esp),%ebp
16215+
16216+#ifdef CONFIG_PAX_MEMORY_UDEREF
16217+ mov PT_OLDSS(%esp),%ds
16218+1: movl %ds:(%ebp),%ebp
16219+ push %ss
16220+ pop %ds
16221+#else
16222 cmpl $__PAGE_OFFSET-3,%ebp
16223 jae syscall_fault
16224 1: movl (%ebp),%ebp
16225+#endif
16226+
16227 movl %ebp,PT_EBP(%esp)
16228 .section __ex_table,"a"
16229 .align 4
16230@@ -455,12 +616,24 @@ sysenter_do_call:
16231 testl $_TIF_ALLWORK_MASK, %ecx
16232 jne sysexit_audit
16233 sysenter_exit:
16234+
16235+#ifdef CONFIG_PAX_RANDKSTACK
16236+ pushl_cfi %eax
16237+ movl %esp, %eax
16238+ call pax_randomize_kstack
16239+ popl_cfi %eax
16240+#endif
16241+
16242+ pax_erase_kstack
16243+
16244 /* if something modifies registers it must also disable sysexit */
16245 movl PT_EIP(%esp), %edx
16246 movl PT_OLDESP(%esp), %ecx
16247 xorl %ebp,%ebp
16248 TRACE_IRQS_ON
16249 1: mov PT_FS(%esp), %fs
16250+2: mov PT_DS(%esp), %ds
16251+3: mov PT_ES(%esp), %es
16252 PTGS_TO_GS
16253 ENABLE_INTERRUPTS_SYSEXIT
16254
16255@@ -477,6 +650,9 @@ sysenter_audit:
16256 movl %eax,%edx /* 2nd arg: syscall number */
16257 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
16258 call audit_syscall_entry
16259+
16260+ pax_erase_kstack
16261+
16262 pushl %ebx
16263 CFI_ADJUST_CFA_OFFSET 4
16264 movl PT_EAX(%esp),%eax /* reload syscall number */
16265@@ -504,11 +680,17 @@ sysexit_audit:
16266
16267 CFI_ENDPROC
16268 .pushsection .fixup,"ax"
16269-2: movl $0,PT_FS(%esp)
16270+4: movl $0,PT_FS(%esp)
16271+ jmp 1b
16272+5: movl $0,PT_DS(%esp)
16273+ jmp 1b
16274+6: movl $0,PT_ES(%esp)
16275 jmp 1b
16276 .section __ex_table,"a"
16277 .align 4
16278- .long 1b,2b
16279+ .long 1b,4b
16280+ .long 2b,5b
16281+ .long 3b,6b
16282 .popsection
16283 PTGS_TO_GS_EX
16284 ENDPROC(ia32_sysenter_target)
16285@@ -538,6 +720,15 @@ syscall_exit:
16286 testl $_TIF_ALLWORK_MASK, %ecx # current->work
16287 jne syscall_exit_work
16288
16289+restore_all_pax:
16290+
16291+#ifdef CONFIG_PAX_RANDKSTACK
16292+ movl %esp, %eax
16293+ call pax_randomize_kstack
16294+#endif
16295+
16296+ pax_erase_kstack
16297+
16298 restore_all:
16299 TRACE_IRQS_IRET
16300 restore_all_notrace:
16301@@ -602,10 +793,29 @@ ldt_ss:
16302 mov PT_OLDESP(%esp), %eax /* load userspace esp */
16303 mov %dx, %ax /* eax: new kernel esp */
16304 sub %eax, %edx /* offset (low word is 0) */
16305- PER_CPU(gdt_page, %ebx)
16306+#ifdef CONFIG_SMP
16307+ movl PER_CPU_VAR(cpu_number), %ebx
16308+ shll $PAGE_SHIFT_asm, %ebx
16309+ addl $cpu_gdt_table, %ebx
16310+#else
16311+ movl $cpu_gdt_table, %ebx
16312+#endif
16313 shr $16, %edx
16314+
16315+#ifdef CONFIG_PAX_KERNEXEC
16316+ mov %cr0, %esi
16317+ btr $16, %esi
16318+ mov %esi, %cr0
16319+#endif
16320+
16321 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
16322 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
16323+
16324+#ifdef CONFIG_PAX_KERNEXEC
16325+ bts $16, %esi
16326+ mov %esi, %cr0
16327+#endif
16328+
16329 pushl $__ESPFIX_SS
16330 CFI_ADJUST_CFA_OFFSET 4
16331 push %eax /* new kernel esp */
16332@@ -636,36 +846,30 @@ work_resched:
16333 movl TI_flags(%ebp), %ecx
16334 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
16335 # than syscall tracing?
16336- jz restore_all
16337+ jz restore_all_pax
16338 testb $_TIF_NEED_RESCHED, %cl
16339 jnz work_resched
16340
16341 work_notifysig: # deal with pending signals and
16342 # notify-resume requests
16343+ movl %esp, %eax
16344 #ifdef CONFIG_VM86
16345 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
16346- movl %esp, %eax
16347- jne work_notifysig_v86 # returning to kernel-space or
16348+ jz 1f # returning to kernel-space or
16349 # vm86-space
16350- xorl %edx, %edx
16351- call do_notify_resume
16352- jmp resume_userspace_sig
16353
16354- ALIGN
16355-work_notifysig_v86:
16356 pushl %ecx # save ti_flags for do_notify_resume
16357 CFI_ADJUST_CFA_OFFSET 4
16358 call save_v86_state # %eax contains pt_regs pointer
16359 popl %ecx
16360 CFI_ADJUST_CFA_OFFSET -4
16361 movl %eax, %esp
16362-#else
16363- movl %esp, %eax
16364+1:
16365 #endif
16366 xorl %edx, %edx
16367 call do_notify_resume
16368 jmp resume_userspace_sig
16369-END(work_pending)
16370+ENDPROC(work_pending)
16371
16372 # perform syscall exit tracing
16373 ALIGN
16374@@ -673,11 +877,14 @@ syscall_trace_entry:
16375 movl $-ENOSYS,PT_EAX(%esp)
16376 movl %esp, %eax
16377 call syscall_trace_enter
16378+
16379+ pax_erase_kstack
16380+
16381 /* What it returned is what we'll actually use. */
16382 cmpl $(nr_syscalls), %eax
16383 jnae syscall_call
16384 jmp syscall_exit
16385-END(syscall_trace_entry)
16386+ENDPROC(syscall_trace_entry)
16387
16388 # perform syscall exit tracing
16389 ALIGN
16390@@ -690,20 +897,24 @@ syscall_exit_work:
16391 movl %esp, %eax
16392 call syscall_trace_leave
16393 jmp resume_userspace
16394-END(syscall_exit_work)
16395+ENDPROC(syscall_exit_work)
16396 CFI_ENDPROC
16397
16398 RING0_INT_FRAME # can't unwind into user space anyway
16399 syscall_fault:
16400+#ifdef CONFIG_PAX_MEMORY_UDEREF
16401+ push %ss
16402+ pop %ds
16403+#endif
16404 GET_THREAD_INFO(%ebp)
16405 movl $-EFAULT,PT_EAX(%esp)
16406 jmp resume_userspace
16407-END(syscall_fault)
16408+ENDPROC(syscall_fault)
16409
16410 syscall_badsys:
16411 movl $-ENOSYS,PT_EAX(%esp)
16412 jmp resume_userspace
16413-END(syscall_badsys)
16414+ENDPROC(syscall_badsys)
16415 CFI_ENDPROC
16416
16417 /*
16418@@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
16419 PTREGSCALL(vm86)
16420 PTREGSCALL(vm86old)
16421
16422+ ALIGN;
16423+ENTRY(kernel_execve)
16424+ push %ebp
16425+ sub $PT_OLDSS+4,%esp
16426+ push %edi
16427+ push %ecx
16428+ push %eax
16429+ lea 3*4(%esp),%edi
16430+ mov $PT_OLDSS/4+1,%ecx
16431+ xorl %eax,%eax
16432+ rep stosl
16433+ pop %eax
16434+ pop %ecx
16435+ pop %edi
16436+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
16437+ mov %eax,PT_EBX(%esp)
16438+ mov %edx,PT_ECX(%esp)
16439+ mov %ecx,PT_EDX(%esp)
16440+ mov %esp,%eax
16441+ call sys_execve
16442+ GET_THREAD_INFO(%ebp)
16443+ test %eax,%eax
16444+ jz syscall_exit
16445+ add $PT_OLDSS+4,%esp
16446+ pop %ebp
16447+ ret
16448+
16449 .macro FIXUP_ESPFIX_STACK
16450 /*
16451 * Switch back for ESPFIX stack to the normal zerobased stack
16452@@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
16453 * normal stack and adjusts ESP with the matching offset.
16454 */
16455 /* fixup the stack */
16456- PER_CPU(gdt_page, %ebx)
16457+#ifdef CONFIG_SMP
16458+ movl PER_CPU_VAR(cpu_number), %ebx
16459+ shll $PAGE_SHIFT_asm, %ebx
16460+ addl $cpu_gdt_table, %ebx
16461+#else
16462+ movl $cpu_gdt_table, %ebx
16463+#endif
16464 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
16465 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
16466 shl $16, %eax
16467@@ -793,7 +1037,7 @@ vector=vector+1
16468 .endr
16469 2: jmp common_interrupt
16470 .endr
16471-END(irq_entries_start)
16472+ENDPROC(irq_entries_start)
16473
16474 .previous
16475 END(interrupt)
16476@@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
16477 CFI_ADJUST_CFA_OFFSET 4
16478 jmp error_code
16479 CFI_ENDPROC
16480-END(coprocessor_error)
16481+ENDPROC(coprocessor_error)
16482
16483 ENTRY(simd_coprocessor_error)
16484 RING0_INT_FRAME
16485@@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
16486 CFI_ADJUST_CFA_OFFSET 4
16487 jmp error_code
16488 CFI_ENDPROC
16489-END(simd_coprocessor_error)
16490+ENDPROC(simd_coprocessor_error)
16491
16492 ENTRY(device_not_available)
16493 RING0_INT_FRAME
16494@@ -860,7 +1104,7 @@ ENTRY(device_not_available)
16495 CFI_ADJUST_CFA_OFFSET 4
16496 jmp error_code
16497 CFI_ENDPROC
16498-END(device_not_available)
16499+ENDPROC(device_not_available)
16500
16501 #ifdef CONFIG_PARAVIRT
16502 ENTRY(native_iret)
16503@@ -869,12 +1113,12 @@ ENTRY(native_iret)
16504 .align 4
16505 .long native_iret, iret_exc
16506 .previous
16507-END(native_iret)
16508+ENDPROC(native_iret)
16509
16510 ENTRY(native_irq_enable_sysexit)
16511 sti
16512 sysexit
16513-END(native_irq_enable_sysexit)
16514+ENDPROC(native_irq_enable_sysexit)
16515 #endif
16516
16517 ENTRY(overflow)
16518@@ -885,7 +1129,7 @@ ENTRY(overflow)
16519 CFI_ADJUST_CFA_OFFSET 4
16520 jmp error_code
16521 CFI_ENDPROC
16522-END(overflow)
16523+ENDPROC(overflow)
16524
16525 ENTRY(bounds)
16526 RING0_INT_FRAME
16527@@ -895,7 +1139,7 @@ ENTRY(bounds)
16528 CFI_ADJUST_CFA_OFFSET 4
16529 jmp error_code
16530 CFI_ENDPROC
16531-END(bounds)
16532+ENDPROC(bounds)
16533
16534 ENTRY(invalid_op)
16535 RING0_INT_FRAME
16536@@ -905,7 +1149,7 @@ ENTRY(invalid_op)
16537 CFI_ADJUST_CFA_OFFSET 4
16538 jmp error_code
16539 CFI_ENDPROC
16540-END(invalid_op)
16541+ENDPROC(invalid_op)
16542
16543 ENTRY(coprocessor_segment_overrun)
16544 RING0_INT_FRAME
16545@@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
16546 CFI_ADJUST_CFA_OFFSET 4
16547 jmp error_code
16548 CFI_ENDPROC
16549-END(coprocessor_segment_overrun)
16550+ENDPROC(coprocessor_segment_overrun)
16551
16552 ENTRY(invalid_TSS)
16553 RING0_EC_FRAME
16554@@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
16555 CFI_ADJUST_CFA_OFFSET 4
16556 jmp error_code
16557 CFI_ENDPROC
16558-END(invalid_TSS)
16559+ENDPROC(invalid_TSS)
16560
16561 ENTRY(segment_not_present)
16562 RING0_EC_FRAME
16563@@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
16564 CFI_ADJUST_CFA_OFFSET 4
16565 jmp error_code
16566 CFI_ENDPROC
16567-END(segment_not_present)
16568+ENDPROC(segment_not_present)
16569
16570 ENTRY(stack_segment)
16571 RING0_EC_FRAME
16572@@ -939,7 +1183,7 @@ ENTRY(stack_segment)
16573 CFI_ADJUST_CFA_OFFSET 4
16574 jmp error_code
16575 CFI_ENDPROC
16576-END(stack_segment)
16577+ENDPROC(stack_segment)
16578
16579 ENTRY(alignment_check)
16580 RING0_EC_FRAME
16581@@ -947,7 +1191,7 @@ ENTRY(alignment_check)
16582 CFI_ADJUST_CFA_OFFSET 4
16583 jmp error_code
16584 CFI_ENDPROC
16585-END(alignment_check)
16586+ENDPROC(alignment_check)
16587
16588 ENTRY(divide_error)
16589 RING0_INT_FRAME
16590@@ -957,7 +1201,7 @@ ENTRY(divide_error)
16591 CFI_ADJUST_CFA_OFFSET 4
16592 jmp error_code
16593 CFI_ENDPROC
16594-END(divide_error)
16595+ENDPROC(divide_error)
16596
16597 #ifdef CONFIG_X86_MCE
16598 ENTRY(machine_check)
16599@@ -968,7 +1212,7 @@ ENTRY(machine_check)
16600 CFI_ADJUST_CFA_OFFSET 4
16601 jmp error_code
16602 CFI_ENDPROC
16603-END(machine_check)
16604+ENDPROC(machine_check)
16605 #endif
16606
16607 ENTRY(spurious_interrupt_bug)
16608@@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
16609 CFI_ADJUST_CFA_OFFSET 4
16610 jmp error_code
16611 CFI_ENDPROC
16612-END(spurious_interrupt_bug)
16613+ENDPROC(spurious_interrupt_bug)
16614
16615 ENTRY(kernel_thread_helper)
16616 pushl $0 # fake return address for unwinder
16617@@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
16618
16619 ENTRY(mcount)
16620 ret
16621-END(mcount)
16622+ENDPROC(mcount)
16623
16624 ENTRY(ftrace_caller)
16625 cmpl $0, function_trace_stop
16626@@ -1124,7 +1368,7 @@ ftrace_graph_call:
16627 .globl ftrace_stub
16628 ftrace_stub:
16629 ret
16630-END(ftrace_caller)
16631+ENDPROC(ftrace_caller)
16632
16633 #else /* ! CONFIG_DYNAMIC_FTRACE */
16634
16635@@ -1160,7 +1404,7 @@ trace:
16636 popl %ecx
16637 popl %eax
16638 jmp ftrace_stub
16639-END(mcount)
16640+ENDPROC(mcount)
16641 #endif /* CONFIG_DYNAMIC_FTRACE */
16642 #endif /* CONFIG_FUNCTION_TRACER */
16643
16644@@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
16645 popl %ecx
16646 popl %eax
16647 ret
16648-END(ftrace_graph_caller)
16649+ENDPROC(ftrace_graph_caller)
16650
16651 .globl return_to_handler
16652 return_to_handler:
16653@@ -1198,7 +1442,6 @@ return_to_handler:
16654 ret
16655 #endif
16656
16657-.section .rodata,"a"
16658 #include "syscall_table_32.S"
16659
16660 syscall_table_size=(.-sys_call_table)
16661@@ -1255,15 +1498,18 @@ error_code:
16662 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
16663 REG_TO_PTGS %ecx
16664 SET_KERNEL_GS %ecx
16665- movl $(__USER_DS), %ecx
16666+ movl $(__KERNEL_DS), %ecx
16667 movl %ecx, %ds
16668 movl %ecx, %es
16669+
16670+ pax_enter_kernel
16671+
16672 TRACE_IRQS_OFF
16673 movl %esp,%eax # pt_regs pointer
16674 call *%edi
16675 jmp ret_from_exception
16676 CFI_ENDPROC
16677-END(page_fault)
16678+ENDPROC(page_fault)
16679
16680 /*
16681 * Debug traps and NMI can happen at the one SYSENTER instruction
16682@@ -1309,7 +1555,7 @@ debug_stack_correct:
16683 call do_debug
16684 jmp ret_from_exception
16685 CFI_ENDPROC
16686-END(debug)
16687+ENDPROC(debug)
16688
16689 /*
16690 * NMI is doubly nasty. It can happen _while_ we're handling
16691@@ -1351,6 +1597,9 @@ nmi_stack_correct:
16692 xorl %edx,%edx # zero error code
16693 movl %esp,%eax # pt_regs pointer
16694 call do_nmi
16695+
16696+ pax_exit_kernel
16697+
16698 jmp restore_all_notrace
16699 CFI_ENDPROC
16700
16701@@ -1391,12 +1640,15 @@ nmi_espfix_stack:
16702 FIXUP_ESPFIX_STACK # %eax == %esp
16703 xorl %edx,%edx # zero error code
16704 call do_nmi
16705+
16706+ pax_exit_kernel
16707+
16708 RESTORE_REGS
16709 lss 12+4(%esp), %esp # back to espfix stack
16710 CFI_ADJUST_CFA_OFFSET -24
16711 jmp irq_return
16712 CFI_ENDPROC
16713-END(nmi)
16714+ENDPROC(nmi)
16715
16716 ENTRY(int3)
16717 RING0_INT_FRAME
16718@@ -1409,7 +1661,7 @@ ENTRY(int3)
16719 call do_int3
16720 jmp ret_from_exception
16721 CFI_ENDPROC
16722-END(int3)
16723+ENDPROC(int3)
16724
16725 ENTRY(general_protection)
16726 RING0_EC_FRAME
16727@@ -1417,7 +1669,7 @@ ENTRY(general_protection)
16728 CFI_ADJUST_CFA_OFFSET 4
16729 jmp error_code
16730 CFI_ENDPROC
16731-END(general_protection)
16732+ENDPROC(general_protection)
16733
16734 /*
16735 * End of kprobes section
16736diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
16737index 34a56a9..74613c5 100644
16738--- a/arch/x86/kernel/entry_64.S
16739+++ b/arch/x86/kernel/entry_64.S
16740@@ -53,6 +53,8 @@
16741 #include <asm/paravirt.h>
16742 #include <asm/ftrace.h>
16743 #include <asm/percpu.h>
16744+#include <asm/pgtable.h>
16745+#include <asm/alternative-asm.h>
16746
16747 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
16748 #include <linux/elf-em.h>
16749@@ -64,8 +66,9 @@
16750 #ifdef CONFIG_FUNCTION_TRACER
16751 #ifdef CONFIG_DYNAMIC_FTRACE
16752 ENTRY(mcount)
16753+ pax_force_retaddr
16754 retq
16755-END(mcount)
16756+ENDPROC(mcount)
16757
16758 ENTRY(ftrace_caller)
16759 cmpl $0, function_trace_stop
16760@@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
16761 #endif
16762
16763 GLOBAL(ftrace_stub)
16764+ pax_force_retaddr
16765 retq
16766-END(ftrace_caller)
16767+ENDPROC(ftrace_caller)
16768
16769 #else /* ! CONFIG_DYNAMIC_FTRACE */
16770 ENTRY(mcount)
16771@@ -108,6 +112,7 @@ ENTRY(mcount)
16772 #endif
16773
16774 GLOBAL(ftrace_stub)
16775+ pax_force_retaddr
16776 retq
16777
16778 trace:
16779@@ -117,12 +122,13 @@ trace:
16780 movq 8(%rbp), %rsi
16781 subq $MCOUNT_INSN_SIZE, %rdi
16782
16783+ pax_force_fptr ftrace_trace_function
16784 call *ftrace_trace_function
16785
16786 MCOUNT_RESTORE_FRAME
16787
16788 jmp ftrace_stub
16789-END(mcount)
16790+ENDPROC(mcount)
16791 #endif /* CONFIG_DYNAMIC_FTRACE */
16792 #endif /* CONFIG_FUNCTION_TRACER */
16793
16794@@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
16795
16796 MCOUNT_RESTORE_FRAME
16797
16798+ pax_force_retaddr
16799 retq
16800-END(ftrace_graph_caller)
16801+ENDPROC(ftrace_graph_caller)
16802
16803 GLOBAL(return_to_handler)
16804 subq $24, %rsp
16805@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
16806 movq 8(%rsp), %rdx
16807 movq (%rsp), %rax
16808 addq $16, %rsp
16809+ pax_force_retaddr
16810 retq
16811 #endif
16812
16813@@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
16814 ENDPROC(native_usergs_sysret64)
16815 #endif /* CONFIG_PARAVIRT */
16816
16817+ .macro ljmpq sel, off
16818+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
16819+ .byte 0x48; ljmp *1234f(%rip)
16820+ .pushsection .rodata
16821+ .align 16
16822+ 1234: .quad \off; .word \sel
16823+ .popsection
16824+#else
16825+ pushq $\sel
16826+ pushq $\off
16827+ lretq
16828+#endif
16829+ .endm
16830+
16831+ .macro pax_enter_kernel
16832+ pax_set_fptr_mask
16833+#ifdef CONFIG_PAX_KERNEXEC
16834+ call pax_enter_kernel
16835+#endif
16836+ .endm
16837+
16838+ .macro pax_exit_kernel
16839+#ifdef CONFIG_PAX_KERNEXEC
16840+ call pax_exit_kernel
16841+#endif
16842+ .endm
16843+
16844+#ifdef CONFIG_PAX_KERNEXEC
16845+ENTRY(pax_enter_kernel)
16846+ pushq %rdi
16847+
16848+#ifdef CONFIG_PARAVIRT
16849+ PV_SAVE_REGS(CLBR_RDI)
16850+#endif
16851+
16852+ GET_CR0_INTO_RDI
16853+ bts $16,%rdi
16854+ jnc 3f
16855+ mov %cs,%edi
16856+ cmp $__KERNEL_CS,%edi
16857+ jnz 2f
16858+1:
16859+
16860+#ifdef CONFIG_PARAVIRT
16861+ PV_RESTORE_REGS(CLBR_RDI)
16862+#endif
16863+
16864+ popq %rdi
16865+ pax_force_retaddr
16866+ retq
16867+
16868+2: ljmpq __KERNEL_CS,1f
16869+3: ljmpq __KERNEXEC_KERNEL_CS,4f
16870+4: SET_RDI_INTO_CR0
16871+ jmp 1b
16872+ENDPROC(pax_enter_kernel)
16873+
16874+ENTRY(pax_exit_kernel)
16875+ pushq %rdi
16876+
16877+#ifdef CONFIG_PARAVIRT
16878+ PV_SAVE_REGS(CLBR_RDI)
16879+#endif
16880+
16881+ mov %cs,%rdi
16882+ cmp $__KERNEXEC_KERNEL_CS,%edi
16883+ jz 2f
16884+1:
16885+
16886+#ifdef CONFIG_PARAVIRT
16887+ PV_RESTORE_REGS(CLBR_RDI);
16888+#endif
16889+
16890+ popq %rdi
16891+ pax_force_retaddr
16892+ retq
16893+
16894+2: GET_CR0_INTO_RDI
16895+ btr $16,%rdi
16896+ ljmpq __KERNEL_CS,3f
16897+3: SET_RDI_INTO_CR0
16898+ jmp 1b
16899+#ifdef CONFIG_PARAVIRT
16900+ PV_RESTORE_REGS(CLBR_RDI);
16901+#endif
16902+
16903+ popq %rdi
16904+ pax_force_retaddr
16905+ retq
16906+ENDPROC(pax_exit_kernel)
16907+#endif
16908+
16909+ .macro pax_enter_kernel_user
16910+ pax_set_fptr_mask
16911+#ifdef CONFIG_PAX_MEMORY_UDEREF
16912+ call pax_enter_kernel_user
16913+#endif
16914+ .endm
16915+
16916+ .macro pax_exit_kernel_user
16917+#ifdef CONFIG_PAX_MEMORY_UDEREF
16918+ call pax_exit_kernel_user
16919+#endif
16920+#ifdef CONFIG_PAX_RANDKSTACK
16921+ pushq %rax
16922+ call pax_randomize_kstack
16923+ popq %rax
16924+#endif
16925+ .endm
16926+
16927+#ifdef CONFIG_PAX_MEMORY_UDEREF
16928+ENTRY(pax_enter_kernel_user)
16929+ pushq %rdi
16930+ pushq %rbx
16931+
16932+#ifdef CONFIG_PARAVIRT
16933+ PV_SAVE_REGS(CLBR_RDI)
16934+#endif
16935+
16936+ GET_CR3_INTO_RDI
16937+ mov %rdi,%rbx
16938+ add $__START_KERNEL_map,%rbx
16939+ sub phys_base(%rip),%rbx
16940+
16941+#ifdef CONFIG_PARAVIRT
16942+ pushq %rdi
16943+ cmpl $0, pv_info+PARAVIRT_enabled
16944+ jz 1f
16945+ i = 0
16946+ .rept USER_PGD_PTRS
16947+ mov i*8(%rbx),%rsi
16948+ mov $0,%sil
16949+ lea i*8(%rbx),%rdi
16950+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
16951+ i = i + 1
16952+ .endr
16953+ jmp 2f
16954+1:
16955+#endif
16956+
16957+ i = 0
16958+ .rept USER_PGD_PTRS
16959+ movb $0,i*8(%rbx)
16960+ i = i + 1
16961+ .endr
16962+
16963+#ifdef CONFIG_PARAVIRT
16964+2: popq %rdi
16965+#endif
16966+ SET_RDI_INTO_CR3
16967+
16968+#ifdef CONFIG_PAX_KERNEXEC
16969+ GET_CR0_INTO_RDI
16970+ bts $16,%rdi
16971+ SET_RDI_INTO_CR0
16972+#endif
16973+
16974+#ifdef CONFIG_PARAVIRT
16975+ PV_RESTORE_REGS(CLBR_RDI)
16976+#endif
16977+
16978+ popq %rbx
16979+ popq %rdi
16980+ pax_force_retaddr
16981+ retq
16982+ENDPROC(pax_enter_kernel_user)
16983+
16984+ENTRY(pax_exit_kernel_user)
16985+ push %rdi
16986+
16987+#ifdef CONFIG_PARAVIRT
16988+ pushq %rbx
16989+ PV_SAVE_REGS(CLBR_RDI)
16990+#endif
16991+
16992+#ifdef CONFIG_PAX_KERNEXEC
16993+ GET_CR0_INTO_RDI
16994+ btr $16,%rdi
16995+ SET_RDI_INTO_CR0
16996+#endif
16997+
16998+ GET_CR3_INTO_RDI
16999+ add $__START_KERNEL_map,%rdi
17000+ sub phys_base(%rip),%rdi
17001+
17002+#ifdef CONFIG_PARAVIRT
17003+ cmpl $0, pv_info+PARAVIRT_enabled
17004+ jz 1f
17005+ mov %rdi,%rbx
17006+ i = 0
17007+ .rept USER_PGD_PTRS
17008+ mov i*8(%rbx),%rsi
17009+ mov $0x67,%sil
17010+ lea i*8(%rbx),%rdi
17011+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
17012+ i = i + 1
17013+ .endr
17014+ jmp 2f
17015+1:
17016+#endif
17017+
17018+ i = 0
17019+ .rept USER_PGD_PTRS
17020+ movb $0x67,i*8(%rdi)
17021+ i = i + 1
17022+ .endr
17023+
17024+#ifdef CONFIG_PARAVIRT
17025+2: PV_RESTORE_REGS(CLBR_RDI)
17026+ popq %rbx
17027+#endif
17028+
17029+ popq %rdi
17030+ pax_force_retaddr
17031+ retq
17032+ENDPROC(pax_exit_kernel_user)
17033+#endif
17034+
17035+.macro pax_erase_kstack
17036+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17037+ call pax_erase_kstack
17038+#endif
17039+.endm
17040+
17041+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17042+/*
17043+ * r11: thread_info
17044+ * rcx, rdx: can be clobbered
17045+ */
17046+ENTRY(pax_erase_kstack)
17047+ pushq %rdi
17048+ pushq %rax
17049+ pushq %r11
17050+
17051+ GET_THREAD_INFO(%r11)
17052+ mov TI_lowest_stack(%r11), %rdi
17053+ mov $-0xBEEF, %rax
17054+ std
17055+
17056+1: mov %edi, %ecx
17057+ and $THREAD_SIZE_asm - 1, %ecx
17058+ shr $3, %ecx
17059+ repne scasq
17060+ jecxz 2f
17061+
17062+ cmp $2*8, %ecx
17063+ jc 2f
17064+
17065+ mov $2*8, %ecx
17066+ repe scasq
17067+ jecxz 2f
17068+ jne 1b
17069+
17070+2: cld
17071+ mov %esp, %ecx
17072+ sub %edi, %ecx
17073+
17074+ cmp $THREAD_SIZE_asm, %rcx
17075+ jb 3f
17076+ ud2
17077+3:
17078+
17079+ shr $3, %ecx
17080+ rep stosq
17081+
17082+ mov TI_task_thread_sp0(%r11), %rdi
17083+ sub $256, %rdi
17084+ mov %rdi, TI_lowest_stack(%r11)
17085+
17086+ popq %r11
17087+ popq %rax
17088+ popq %rdi
17089+ pax_force_retaddr
17090+ ret
17091+ENDPROC(pax_erase_kstack)
17092+#endif
17093
17094 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
17095 #ifdef CONFIG_TRACE_IRQFLAGS
17096@@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
17097 .endm
17098
17099 .macro UNFAKE_STACK_FRAME
17100- addq $8*6, %rsp
17101- CFI_ADJUST_CFA_OFFSET -(6*8)
17102+ addq $8*6 + ARG_SKIP, %rsp
17103+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
17104 .endm
17105
17106 /*
17107@@ -317,7 +601,7 @@ ENTRY(save_args)
17108 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
17109 movq_cfi rbp, 8 /* push %rbp */
17110 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
17111- testl $3, CS(%rdi)
17112+ testb $3, CS(%rdi)
17113 je 1f
17114 SWAPGS
17115 /*
17116@@ -337,9 +621,10 @@ ENTRY(save_args)
17117 * We entered an interrupt context - irqs are off:
17118 */
17119 2: TRACE_IRQS_OFF
17120+ pax_force_retaddr_bts
17121 ret
17122 CFI_ENDPROC
17123-END(save_args)
17124+ENDPROC(save_args)
17125
17126 ENTRY(save_rest)
17127 PARTIAL_FRAME 1 REST_SKIP+8
17128@@ -352,9 +637,10 @@ ENTRY(save_rest)
17129 movq_cfi r15, R15+16
17130 movq %r11, 8(%rsp) /* return address */
17131 FIXUP_TOP_OF_STACK %r11, 16
17132+ pax_force_retaddr
17133 ret
17134 CFI_ENDPROC
17135-END(save_rest)
17136+ENDPROC(save_rest)
17137
17138 /* save complete stack frame */
17139 .pushsection .kprobes.text, "ax"
17140@@ -383,9 +669,10 @@ ENTRY(save_paranoid)
17141 js 1f /* negative -> in kernel */
17142 SWAPGS
17143 xorl %ebx,%ebx
17144-1: ret
17145+1: pax_force_retaddr_bts
17146+ ret
17147 CFI_ENDPROC
17148-END(save_paranoid)
17149+ENDPROC(save_paranoid)
17150 .popsection
17151
17152 /*
17153@@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
17154
17155 RESTORE_REST
17156
17157- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17158+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
17159 je int_ret_from_sys_call
17160
17161 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
17162@@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
17163 jmp ret_from_sys_call # go to the SYSRET fastpath
17164
17165 CFI_ENDPROC
17166-END(ret_from_fork)
17167+ENDPROC(ret_from_fork)
17168
17169 /*
17170 * System call entry. Upto 6 arguments in registers are supported.
17171@@ -455,7 +742,7 @@ END(ret_from_fork)
17172 ENTRY(system_call)
17173 CFI_STARTPROC simple
17174 CFI_SIGNAL_FRAME
17175- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
17176+ CFI_DEF_CFA rsp,0
17177 CFI_REGISTER rip,rcx
17178 /*CFI_REGISTER rflags,r11*/
17179 SWAPGS_UNSAFE_STACK
17180@@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
17181
17182 movq %rsp,PER_CPU_VAR(old_rsp)
17183 movq PER_CPU_VAR(kernel_stack),%rsp
17184+ SAVE_ARGS 8*6,1
17185+ pax_enter_kernel_user
17186 /*
17187 * No need to follow this irqs off/on section - it's straight
17188 * and short:
17189 */
17190 ENABLE_INTERRUPTS(CLBR_NONE)
17191- SAVE_ARGS 8,1
17192 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
17193 movq %rcx,RIP-ARGOFFSET(%rsp)
17194 CFI_REL_OFFSET rip,RIP-ARGOFFSET
17195@@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
17196 system_call_fastpath:
17197 cmpq $__NR_syscall_max,%rax
17198 ja badsys
17199- movq %r10,%rcx
17200+ movq R10-ARGOFFSET(%rsp),%rcx
17201 call *sys_call_table(,%rax,8) # XXX: rip relative
17202 movq %rax,RAX-ARGOFFSET(%rsp)
17203 /*
17204@@ -502,6 +790,8 @@ sysret_check:
17205 andl %edi,%edx
17206 jnz sysret_careful
17207 CFI_REMEMBER_STATE
17208+ pax_exit_kernel_user
17209+ pax_erase_kstack
17210 /*
17211 * sysretq will re-enable interrupts:
17212 */
17213@@ -555,14 +845,18 @@ badsys:
17214 * jump back to the normal fast path.
17215 */
17216 auditsys:
17217- movq %r10,%r9 /* 6th arg: 4th syscall arg */
17218+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
17219 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
17220 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
17221 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
17222 movq %rax,%rsi /* 2nd arg: syscall number */
17223 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
17224 call audit_syscall_entry
17225+
17226+ pax_erase_kstack
17227+
17228 LOAD_ARGS 0 /* reload call-clobbered registers */
17229+ pax_set_fptr_mask
17230 jmp system_call_fastpath
17231
17232 /*
17233@@ -592,16 +886,20 @@ tracesys:
17234 FIXUP_TOP_OF_STACK %rdi
17235 movq %rsp,%rdi
17236 call syscall_trace_enter
17237+
17238+ pax_erase_kstack
17239+
17240 /*
17241 * Reload arg registers from stack in case ptrace changed them.
17242 * We don't reload %rax because syscall_trace_enter() returned
17243 * the value it wants us to use in the table lookup.
17244 */
17245 LOAD_ARGS ARGOFFSET, 1
17246+ pax_set_fptr_mask
17247 RESTORE_REST
17248 cmpq $__NR_syscall_max,%rax
17249 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
17250- movq %r10,%rcx /* fixup for C */
17251+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
17252 call *sys_call_table(,%rax,8)
17253 movq %rax,RAX-ARGOFFSET(%rsp)
17254 /* Use IRET because user could have changed frame */
17255@@ -613,7 +911,7 @@ tracesys:
17256 GLOBAL(int_ret_from_sys_call)
17257 DISABLE_INTERRUPTS(CLBR_NONE)
17258 TRACE_IRQS_OFF
17259- testl $3,CS-ARGOFFSET(%rsp)
17260+ testb $3,CS-ARGOFFSET(%rsp)
17261 je retint_restore_args
17262 movl $_TIF_ALLWORK_MASK,%edi
17263 /* edi: mask to check */
17264@@ -624,6 +922,7 @@ GLOBAL(int_with_check)
17265 andl %edi,%edx
17266 jnz int_careful
17267 andl $~TS_COMPAT,TI_status(%rcx)
17268+ pax_erase_kstack
17269 jmp retint_swapgs
17270
17271 /* Either reschedule or signal or syscall exit tracking needed. */
17272@@ -674,7 +973,7 @@ int_restore_rest:
17273 TRACE_IRQS_OFF
17274 jmp int_with_check
17275 CFI_ENDPROC
17276-END(system_call)
17277+ENDPROC(system_call)
17278
17279 /*
17280 * Certain special system calls that need to save a complete full stack frame.
17281@@ -690,7 +989,7 @@ ENTRY(\label)
17282 call \func
17283 jmp ptregscall_common
17284 CFI_ENDPROC
17285-END(\label)
17286+ENDPROC(\label)
17287 .endm
17288
17289 PTREGSCALL stub_clone, sys_clone, %r8
17290@@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
17291 movq_cfi_restore R12+8, r12
17292 movq_cfi_restore RBP+8, rbp
17293 movq_cfi_restore RBX+8, rbx
17294+ pax_force_retaddr
17295 ret $REST_SKIP /* pop extended registers */
17296 CFI_ENDPROC
17297-END(ptregscall_common)
17298+ENDPROC(ptregscall_common)
17299
17300 ENTRY(stub_execve)
17301 CFI_STARTPROC
17302@@ -726,7 +1026,7 @@ ENTRY(stub_execve)
17303 RESTORE_REST
17304 jmp int_ret_from_sys_call
17305 CFI_ENDPROC
17306-END(stub_execve)
17307+ENDPROC(stub_execve)
17308
17309 /*
17310 * sigreturn is special because it needs to restore all registers on return.
17311@@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
17312 RESTORE_REST
17313 jmp int_ret_from_sys_call
17314 CFI_ENDPROC
17315-END(stub_rt_sigreturn)
17316+ENDPROC(stub_rt_sigreturn)
17317
17318 /*
17319 * Build the entry stubs and pointer table with some assembler magic.
17320@@ -780,7 +1080,7 @@ vector=vector+1
17321 2: jmp common_interrupt
17322 .endr
17323 CFI_ENDPROC
17324-END(irq_entries_start)
17325+ENDPROC(irq_entries_start)
17326
17327 .previous
17328 END(interrupt)
17329@@ -800,6 +1100,16 @@ END(interrupt)
17330 CFI_ADJUST_CFA_OFFSET 10*8
17331 call save_args
17332 PARTIAL_FRAME 0
17333+#ifdef CONFIG_PAX_MEMORY_UDEREF
17334+ testb $3, CS(%rdi)
17335+ jnz 1f
17336+ pax_enter_kernel
17337+ jmp 2f
17338+1: pax_enter_kernel_user
17339+2:
17340+#else
17341+ pax_enter_kernel
17342+#endif
17343 call \func
17344 .endm
17345
17346@@ -822,7 +1132,7 @@ ret_from_intr:
17347 CFI_ADJUST_CFA_OFFSET -8
17348 exit_intr:
17349 GET_THREAD_INFO(%rcx)
17350- testl $3,CS-ARGOFFSET(%rsp)
17351+ testb $3,CS-ARGOFFSET(%rsp)
17352 je retint_kernel
17353
17354 /* Interrupt came from user space */
17355@@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
17356 * The iretq could re-enable interrupts:
17357 */
17358 DISABLE_INTERRUPTS(CLBR_ANY)
17359+ pax_exit_kernel_user
17360 TRACE_IRQS_IRETQ
17361 SWAPGS
17362 jmp restore_args
17363
17364 retint_restore_args: /* return to kernel space */
17365 DISABLE_INTERRUPTS(CLBR_ANY)
17366+ pax_exit_kernel
17367+ pax_force_retaddr RIP-ARGOFFSET
17368 /*
17369 * The iretq could re-enable interrupts:
17370 */
17371@@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
17372 #endif
17373
17374 CFI_ENDPROC
17375-END(common_interrupt)
17376+ENDPROC(common_interrupt)
17377
17378 /*
17379 * APIC interrupts.
17380@@ -953,7 +1266,7 @@ ENTRY(\sym)
17381 interrupt \do_sym
17382 jmp ret_from_intr
17383 CFI_ENDPROC
17384-END(\sym)
17385+ENDPROC(\sym)
17386 .endm
17387
17388 #ifdef CONFIG_SMP
17389@@ -1032,12 +1345,22 @@ ENTRY(\sym)
17390 CFI_ADJUST_CFA_OFFSET 15*8
17391 call error_entry
17392 DEFAULT_FRAME 0
17393+#ifdef CONFIG_PAX_MEMORY_UDEREF
17394+ testb $3, CS(%rsp)
17395+ jnz 1f
17396+ pax_enter_kernel
17397+ jmp 2f
17398+1: pax_enter_kernel_user
17399+2:
17400+#else
17401+ pax_enter_kernel
17402+#endif
17403 movq %rsp,%rdi /* pt_regs pointer */
17404 xorl %esi,%esi /* no error code */
17405 call \do_sym
17406 jmp error_exit /* %ebx: no swapgs flag */
17407 CFI_ENDPROC
17408-END(\sym)
17409+ENDPROC(\sym)
17410 .endm
17411
17412 .macro paranoidzeroentry sym do_sym
17413@@ -1049,12 +1372,22 @@ ENTRY(\sym)
17414 subq $15*8, %rsp
17415 call save_paranoid
17416 TRACE_IRQS_OFF
17417+#ifdef CONFIG_PAX_MEMORY_UDEREF
17418+ testb $3, CS(%rsp)
17419+ jnz 1f
17420+ pax_enter_kernel
17421+ jmp 2f
17422+1: pax_enter_kernel_user
17423+2:
17424+#else
17425+ pax_enter_kernel
17426+#endif
17427 movq %rsp,%rdi /* pt_regs pointer */
17428 xorl %esi,%esi /* no error code */
17429 call \do_sym
17430 jmp paranoid_exit /* %ebx: no swapgs flag */
17431 CFI_ENDPROC
17432-END(\sym)
17433+ENDPROC(\sym)
17434 .endm
17435
17436 .macro paranoidzeroentry_ist sym do_sym ist
17437@@ -1066,15 +1399,30 @@ ENTRY(\sym)
17438 subq $15*8, %rsp
17439 call save_paranoid
17440 TRACE_IRQS_OFF
17441+#ifdef CONFIG_PAX_MEMORY_UDEREF
17442+ testb $3, CS(%rsp)
17443+ jnz 1f
17444+ pax_enter_kernel
17445+ jmp 2f
17446+1: pax_enter_kernel_user
17447+2:
17448+#else
17449+ pax_enter_kernel
17450+#endif
17451 movq %rsp,%rdi /* pt_regs pointer */
17452 xorl %esi,%esi /* no error code */
17453- PER_CPU(init_tss, %rbp)
17454+#ifdef CONFIG_SMP
17455+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
17456+ lea init_tss(%rbp), %rbp
17457+#else
17458+ lea init_tss(%rip), %rbp
17459+#endif
17460 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
17461 call \do_sym
17462 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
17463 jmp paranoid_exit /* %ebx: no swapgs flag */
17464 CFI_ENDPROC
17465-END(\sym)
17466+ENDPROC(\sym)
17467 .endm
17468
17469 .macro errorentry sym do_sym
17470@@ -1085,13 +1433,23 @@ ENTRY(\sym)
17471 CFI_ADJUST_CFA_OFFSET 15*8
17472 call error_entry
17473 DEFAULT_FRAME 0
17474+#ifdef CONFIG_PAX_MEMORY_UDEREF
17475+ testb $3, CS(%rsp)
17476+ jnz 1f
17477+ pax_enter_kernel
17478+ jmp 2f
17479+1: pax_enter_kernel_user
17480+2:
17481+#else
17482+ pax_enter_kernel
17483+#endif
17484 movq %rsp,%rdi /* pt_regs pointer */
17485 movq ORIG_RAX(%rsp),%rsi /* get error code */
17486 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17487 call \do_sym
17488 jmp error_exit /* %ebx: no swapgs flag */
17489 CFI_ENDPROC
17490-END(\sym)
17491+ENDPROC(\sym)
17492 .endm
17493
17494 /* error code is on the stack already */
17495@@ -1104,13 +1462,23 @@ ENTRY(\sym)
17496 call save_paranoid
17497 DEFAULT_FRAME 0
17498 TRACE_IRQS_OFF
17499+#ifdef CONFIG_PAX_MEMORY_UDEREF
17500+ testb $3, CS(%rsp)
17501+ jnz 1f
17502+ pax_enter_kernel
17503+ jmp 2f
17504+1: pax_enter_kernel_user
17505+2:
17506+#else
17507+ pax_enter_kernel
17508+#endif
17509 movq %rsp,%rdi /* pt_regs pointer */
17510 movq ORIG_RAX(%rsp),%rsi /* get error code */
17511 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
17512 call \do_sym
17513 jmp paranoid_exit /* %ebx: no swapgs flag */
17514 CFI_ENDPROC
17515-END(\sym)
17516+ENDPROC(\sym)
17517 .endm
17518
17519 zeroentry divide_error do_divide_error
17520@@ -1141,9 +1509,10 @@ gs_change:
17521 SWAPGS
17522 popf
17523 CFI_ADJUST_CFA_OFFSET -8
17524+ pax_force_retaddr
17525 ret
17526 CFI_ENDPROC
17527-END(native_load_gs_index)
17528+ENDPROC(native_load_gs_index)
17529
17530 .section __ex_table,"a"
17531 .align 8
17532@@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
17533 * of hacks for example to fork off the per-CPU idle tasks.
17534 * [Hopefully no generic code relies on the reschedule -AK]
17535 */
17536- RESTORE_ALL
17537+ RESTORE_REST
17538 UNFAKE_STACK_FRAME
17539+ pax_force_retaddr
17540 ret
17541 CFI_ENDPROC
17542-END(kernel_thread)
17543+ENDPROC(kernel_thread)
17544
17545 ENTRY(child_rip)
17546 pushq $0 # fake return address
17547@@ -1208,13 +1578,14 @@ ENTRY(child_rip)
17548 */
17549 movq %rdi, %rax
17550 movq %rsi, %rdi
17551+ pax_force_fptr %rax
17552 call *%rax
17553 # exit
17554 mov %eax, %edi
17555 call do_exit
17556 ud2 # padding for call trace
17557 CFI_ENDPROC
17558-END(child_rip)
17559+ENDPROC(child_rip)
17560
17561 /*
17562 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
17563@@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
17564 RESTORE_REST
17565 testq %rax,%rax
17566 je int_ret_from_sys_call
17567- RESTORE_ARGS
17568 UNFAKE_STACK_FRAME
17569+ pax_force_retaddr
17570 ret
17571 CFI_ENDPROC
17572-END(kernel_execve)
17573+ENDPROC(kernel_execve)
17574
17575 /* Call softirq on interrupt stack. Interrupts are off. */
17576 ENTRY(call_softirq)
17577@@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
17578 CFI_DEF_CFA_REGISTER rsp
17579 CFI_ADJUST_CFA_OFFSET -8
17580 decl PER_CPU_VAR(irq_count)
17581+ pax_force_retaddr
17582 ret
17583 CFI_ENDPROC
17584-END(call_softirq)
17585+ENDPROC(call_softirq)
17586
17587 #ifdef CONFIG_XEN
17588 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
17589@@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
17590 decl PER_CPU_VAR(irq_count)
17591 jmp error_exit
17592 CFI_ENDPROC
17593-END(xen_do_hypervisor_callback)
17594+ENDPROC(xen_do_hypervisor_callback)
17595
17596 /*
17597 * Hypervisor uses this for application faults while it executes.
17598@@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
17599 SAVE_ALL
17600 jmp error_exit
17601 CFI_ENDPROC
17602-END(xen_failsafe_callback)
17603+ENDPROC(xen_failsafe_callback)
17604
17605 #endif /* CONFIG_XEN */
17606
17607@@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
17608 TRACE_IRQS_OFF
17609 testl %ebx,%ebx /* swapgs needed? */
17610 jnz paranoid_restore
17611- testl $3,CS(%rsp)
17612+ testb $3,CS(%rsp)
17613 jnz paranoid_userspace
17614+#ifdef CONFIG_PAX_MEMORY_UDEREF
17615+ pax_exit_kernel
17616+ TRACE_IRQS_IRETQ 0
17617+ SWAPGS_UNSAFE_STACK
17618+ RESTORE_ALL 8
17619+ pax_force_retaddr_bts
17620+ jmp irq_return
17621+#endif
17622 paranoid_swapgs:
17623+#ifdef CONFIG_PAX_MEMORY_UDEREF
17624+ pax_exit_kernel_user
17625+#else
17626+ pax_exit_kernel
17627+#endif
17628 TRACE_IRQS_IRETQ 0
17629 SWAPGS_UNSAFE_STACK
17630 RESTORE_ALL 8
17631 jmp irq_return
17632 paranoid_restore:
17633+ pax_exit_kernel
17634 TRACE_IRQS_IRETQ 0
17635 RESTORE_ALL 8
17636+ pax_force_retaddr_bts
17637 jmp irq_return
17638 paranoid_userspace:
17639 GET_THREAD_INFO(%rcx)
17640@@ -1443,7 +1830,7 @@ paranoid_schedule:
17641 TRACE_IRQS_OFF
17642 jmp paranoid_userspace
17643 CFI_ENDPROC
17644-END(paranoid_exit)
17645+ENDPROC(paranoid_exit)
17646
17647 /*
17648 * Exception entry point. This expects an error code/orig_rax on the stack.
17649@@ -1470,12 +1857,13 @@ ENTRY(error_entry)
17650 movq_cfi r14, R14+8
17651 movq_cfi r15, R15+8
17652 xorl %ebx,%ebx
17653- testl $3,CS+8(%rsp)
17654+ testb $3,CS+8(%rsp)
17655 je error_kernelspace
17656 error_swapgs:
17657 SWAPGS
17658 error_sti:
17659 TRACE_IRQS_OFF
17660+ pax_force_retaddr_bts
17661 ret
17662 CFI_ENDPROC
17663
17664@@ -1497,7 +1885,7 @@ error_kernelspace:
17665 cmpq $gs_change,RIP+8(%rsp)
17666 je error_swapgs
17667 jmp error_sti
17668-END(error_entry)
17669+ENDPROC(error_entry)
17670
17671
17672 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
17673@@ -1517,7 +1905,7 @@ ENTRY(error_exit)
17674 jnz retint_careful
17675 jmp retint_swapgs
17676 CFI_ENDPROC
17677-END(error_exit)
17678+ENDPROC(error_exit)
17679
17680
17681 /* runs on exception stack */
17682@@ -1529,6 +1917,16 @@ ENTRY(nmi)
17683 CFI_ADJUST_CFA_OFFSET 15*8
17684 call save_paranoid
17685 DEFAULT_FRAME 0
17686+#ifdef CONFIG_PAX_MEMORY_UDEREF
17687+ testb $3, CS(%rsp)
17688+ jnz 1f
17689+ pax_enter_kernel
17690+ jmp 2f
17691+1: pax_enter_kernel_user
17692+2:
17693+#else
17694+ pax_enter_kernel
17695+#endif
17696 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
17697 movq %rsp,%rdi
17698 movq $-1,%rsi
17699@@ -1539,12 +1937,28 @@ ENTRY(nmi)
17700 DISABLE_INTERRUPTS(CLBR_NONE)
17701 testl %ebx,%ebx /* swapgs needed? */
17702 jnz nmi_restore
17703- testl $3,CS(%rsp)
17704+ testb $3,CS(%rsp)
17705 jnz nmi_userspace
17706+#ifdef CONFIG_PAX_MEMORY_UDEREF
17707+ pax_exit_kernel
17708+ SWAPGS_UNSAFE_STACK
17709+ RESTORE_ALL 8
17710+ pax_force_retaddr_bts
17711+ jmp irq_return
17712+#endif
17713 nmi_swapgs:
17714+#ifdef CONFIG_PAX_MEMORY_UDEREF
17715+ pax_exit_kernel_user
17716+#else
17717+ pax_exit_kernel
17718+#endif
17719 SWAPGS_UNSAFE_STACK
17720+ RESTORE_ALL 8
17721+ jmp irq_return
17722 nmi_restore:
17723+ pax_exit_kernel
17724 RESTORE_ALL 8
17725+ pax_force_retaddr_bts
17726 jmp irq_return
17727 nmi_userspace:
17728 GET_THREAD_INFO(%rcx)
17729@@ -1573,14 +1987,14 @@ nmi_schedule:
17730 jmp paranoid_exit
17731 CFI_ENDPROC
17732 #endif
17733-END(nmi)
17734+ENDPROC(nmi)
17735
17736 ENTRY(ignore_sysret)
17737 CFI_STARTPROC
17738 mov $-ENOSYS,%eax
17739 sysret
17740 CFI_ENDPROC
17741-END(ignore_sysret)
17742+ENDPROC(ignore_sysret)
17743
17744 /*
17745 * End of kprobes section
17746diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
17747index 9dbb527..7b3615a 100644
17748--- a/arch/x86/kernel/ftrace.c
17749+++ b/arch/x86/kernel/ftrace.c
17750@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
17751 static void *mod_code_newcode; /* holds the text to write to the IP */
17752
17753 static unsigned nmi_wait_count;
17754-static atomic_t nmi_update_count = ATOMIC_INIT(0);
17755+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
17756
17757 int ftrace_arch_read_dyn_info(char *buf, int size)
17758 {
17759@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
17760
17761 r = snprintf(buf, size, "%u %u",
17762 nmi_wait_count,
17763- atomic_read(&nmi_update_count));
17764+ atomic_read_unchecked(&nmi_update_count));
17765 return r;
17766 }
17767
17768@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
17769 {
17770 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
17771 smp_rmb();
17772+ pax_open_kernel();
17773 ftrace_mod_code();
17774- atomic_inc(&nmi_update_count);
17775+ pax_close_kernel();
17776+ atomic_inc_unchecked(&nmi_update_count);
17777 }
17778 /* Must have previous changes seen before executions */
17779 smp_mb();
17780@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
17781
17782
17783
17784-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
17785+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
17786
17787 static unsigned char *ftrace_nop_replace(void)
17788 {
17789@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
17790 {
17791 unsigned char replaced[MCOUNT_INSN_SIZE];
17792
17793+ ip = ktla_ktva(ip);
17794+
17795 /*
17796 * Note: Due to modules and __init, code can
17797 * disappear and change, we need to protect against faulting
17798@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
17799 unsigned char old[MCOUNT_INSN_SIZE], *new;
17800 int ret;
17801
17802- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
17803+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
17804 new = ftrace_call_replace(ip, (unsigned long)func);
17805 ret = ftrace_modify_code(ip, old, new);
17806
17807@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
17808 switch (faulted) {
17809 case 0:
17810 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
17811- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
17812+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
17813 break;
17814 case 1:
17815 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
17816- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
17817+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
17818 break;
17819 case 2:
17820 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
17821- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
17822+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
17823 break;
17824 }
17825
17826@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
17827 {
17828 unsigned char code[MCOUNT_INSN_SIZE];
17829
17830+ ip = ktla_ktva(ip);
17831+
17832 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
17833 return -EFAULT;
17834
17835diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
17836index 4f8e250..df24706 100644
17837--- a/arch/x86/kernel/head32.c
17838+++ b/arch/x86/kernel/head32.c
17839@@ -16,6 +16,7 @@
17840 #include <asm/apic.h>
17841 #include <asm/io_apic.h>
17842 #include <asm/bios_ebda.h>
17843+#include <asm/boot.h>
17844
17845 static void __init i386_default_early_setup(void)
17846 {
17847@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
17848 {
17849 reserve_trampoline_memory();
17850
17851- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
17852+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
17853
17854 #ifdef CONFIG_BLK_DEV_INITRD
17855 /* Reserve INITRD */
17856diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
17857index 34c3308..6fc4e76 100644
17858--- a/arch/x86/kernel/head_32.S
17859+++ b/arch/x86/kernel/head_32.S
17860@@ -19,10 +19,17 @@
17861 #include <asm/setup.h>
17862 #include <asm/processor-flags.h>
17863 #include <asm/percpu.h>
17864+#include <asm/msr-index.h>
17865
17866 /* Physical address */
17867 #define pa(X) ((X) - __PAGE_OFFSET)
17868
17869+#ifdef CONFIG_PAX_KERNEXEC
17870+#define ta(X) (X)
17871+#else
17872+#define ta(X) ((X) - __PAGE_OFFSET)
17873+#endif
17874+
17875 /*
17876 * References to members of the new_cpu_data structure.
17877 */
17878@@ -52,11 +59,7 @@
17879 * and small than max_low_pfn, otherwise will waste some page table entries
17880 */
17881
17882-#if PTRS_PER_PMD > 1
17883-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
17884-#else
17885-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
17886-#endif
17887+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
17888
17889 /* Enough space to fit pagetables for the low memory linear map */
17890 MAPPING_BEYOND_END = \
17891@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
17892 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17893
17894 /*
17895+ * Real beginning of normal "text" segment
17896+ */
17897+ENTRY(stext)
17898+ENTRY(_stext)
17899+
17900+/*
17901 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
17902 * %esi points to the real-mode code as a 32-bit pointer.
17903 * CS and DS must be 4 GB flat segments, but we don't depend on
17904@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
17905 * can.
17906 */
17907 __HEAD
17908+
17909+#ifdef CONFIG_PAX_KERNEXEC
17910+ jmp startup_32
17911+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
17912+.fill PAGE_SIZE-5,1,0xcc
17913+#endif
17914+
17915 ENTRY(startup_32)
17916+ movl pa(stack_start),%ecx
17917+
17918 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
17919 us to not reload segments */
17920 testb $(1<<6), BP_loadflags(%esi)
17921@@ -95,7 +113,60 @@ ENTRY(startup_32)
17922 movl %eax,%es
17923 movl %eax,%fs
17924 movl %eax,%gs
17925+ movl %eax,%ss
17926 2:
17927+ leal -__PAGE_OFFSET(%ecx),%esp
17928+
17929+#ifdef CONFIG_SMP
17930+ movl $pa(cpu_gdt_table),%edi
17931+ movl $__per_cpu_load,%eax
17932+ movw %ax,__KERNEL_PERCPU + 2(%edi)
17933+ rorl $16,%eax
17934+ movb %al,__KERNEL_PERCPU + 4(%edi)
17935+ movb %ah,__KERNEL_PERCPU + 7(%edi)
17936+ movl $__per_cpu_end - 1,%eax
17937+ subl $__per_cpu_start,%eax
17938+ movw %ax,__KERNEL_PERCPU + 0(%edi)
17939+#endif
17940+
17941+#ifdef CONFIG_PAX_MEMORY_UDEREF
17942+ movl $NR_CPUS,%ecx
17943+ movl $pa(cpu_gdt_table),%edi
17944+1:
17945+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
17946+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
17947+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
17948+ addl $PAGE_SIZE_asm,%edi
17949+ loop 1b
17950+#endif
17951+
17952+#ifdef CONFIG_PAX_KERNEXEC
17953+ movl $pa(boot_gdt),%edi
17954+ movl $__LOAD_PHYSICAL_ADDR,%eax
17955+ movw %ax,__BOOT_CS + 2(%edi)
17956+ rorl $16,%eax
17957+ movb %al,__BOOT_CS + 4(%edi)
17958+ movb %ah,__BOOT_CS + 7(%edi)
17959+ rorl $16,%eax
17960+
17961+ ljmp $(__BOOT_CS),$1f
17962+1:
17963+
17964+ movl $NR_CPUS,%ecx
17965+ movl $pa(cpu_gdt_table),%edi
17966+ addl $__PAGE_OFFSET,%eax
17967+1:
17968+ movw %ax,__KERNEL_CS + 2(%edi)
17969+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
17970+ rorl $16,%eax
17971+ movb %al,__KERNEL_CS + 4(%edi)
17972+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
17973+ movb %ah,__KERNEL_CS + 7(%edi)
17974+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
17975+ rorl $16,%eax
17976+ addl $PAGE_SIZE_asm,%edi
17977+ loop 1b
17978+#endif
17979
17980 /*
17981 * Clear BSS first so that there are no surprises...
17982@@ -140,9 +211,7 @@ ENTRY(startup_32)
17983 cmpl $num_subarch_entries, %eax
17984 jae bad_subarch
17985
17986- movl pa(subarch_entries)(,%eax,4), %eax
17987- subl $__PAGE_OFFSET, %eax
17988- jmp *%eax
17989+ jmp *pa(subarch_entries)(,%eax,4)
17990
17991 bad_subarch:
17992 WEAK(lguest_entry)
17993@@ -154,10 +223,10 @@ WEAK(xen_entry)
17994 __INITDATA
17995
17996 subarch_entries:
17997- .long default_entry /* normal x86/PC */
17998- .long lguest_entry /* lguest hypervisor */
17999- .long xen_entry /* Xen hypervisor */
18000- .long default_entry /* Moorestown MID */
18001+ .long ta(default_entry) /* normal x86/PC */
18002+ .long ta(lguest_entry) /* lguest hypervisor */
18003+ .long ta(xen_entry) /* Xen hypervisor */
18004+ .long ta(default_entry) /* Moorestown MID */
18005 num_subarch_entries = (. - subarch_entries) / 4
18006 .previous
18007 #endif /* CONFIG_PARAVIRT */
18008@@ -218,8 +287,11 @@ default_entry:
18009 movl %eax, pa(max_pfn_mapped)
18010
18011 /* Do early initialization of the fixmap area */
18012- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
18013- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18014+#ifdef CONFIG_COMPAT_VDSO
18015+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18016+#else
18017+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
18018+#endif
18019 #else /* Not PAE */
18020
18021 page_pde_offset = (__PAGE_OFFSET >> 20);
18022@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
18023 movl %eax, pa(max_pfn_mapped)
18024
18025 /* Do early initialization of the fixmap area */
18026- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
18027- movl %eax,pa(swapper_pg_dir+0xffc)
18028+#ifdef CONFIG_COMPAT_VDSO
18029+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
18030+#else
18031+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
18032+#endif
18033 #endif
18034 jmp 3f
18035 /*
18036@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
18037 movl %eax,%es
18038 movl %eax,%fs
18039 movl %eax,%gs
18040+ movl pa(stack_start),%ecx
18041+ movl %eax,%ss
18042+ leal -__PAGE_OFFSET(%ecx),%esp
18043 #endif /* CONFIG_SMP */
18044 3:
18045
18046@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
18047 orl %edx,%eax
18048 movl %eax,%cr4
18049
18050+#ifdef CONFIG_X86_PAE
18051 btl $5, %eax # check if PAE is enabled
18052 jnc 6f
18053
18054@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
18055 cpuid
18056 cmpl $0x80000000, %eax
18057 jbe 6f
18058+
18059+ /* Clear bogus XD_DISABLE bits */
18060+ call verify_cpu
18061+
18062 mov $0x80000001, %eax
18063 cpuid
18064 /* Execute Disable bit supported? */
18065@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
18066 jnc 6f
18067
18068 /* Setup EFER (Extended Feature Enable Register) */
18069- movl $0xc0000080, %ecx
18070+ movl $MSR_EFER, %ecx
18071 rdmsr
18072
18073 btsl $11, %eax
18074 /* Make changes effective */
18075 wrmsr
18076
18077+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
18078+ movl $1,pa(nx_enabled)
18079+#endif
18080+
18081 6:
18082
18083 /*
18084@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
18085 movl %eax,%cr0 /* ..and set paging (PG) bit */
18086 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
18087 1:
18088- /* Set up the stack pointer */
18089- lss stack_start,%esp
18090+ /* Shift the stack pointer to a virtual address */
18091+ addl $__PAGE_OFFSET, %esp
18092
18093 /*
18094 * Initialize eflags. Some BIOS's leave bits like NT set. This would
18095@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
18096
18097 #ifdef CONFIG_SMP
18098 cmpb $0, ready
18099- jz 1f /* Initial CPU cleans BSS */
18100- jmp checkCPUtype
18101-1:
18102+ jnz checkCPUtype
18103 #endif /* CONFIG_SMP */
18104
18105 /*
18106@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
18107 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
18108 movl %eax,%ss # after changing gdt.
18109
18110- movl $(__USER_DS),%eax # DS/ES contains default USER segment
18111+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
18112 movl %eax,%ds
18113 movl %eax,%es
18114
18115@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
18116 */
18117 cmpb $0,ready
18118 jne 1f
18119- movl $per_cpu__gdt_page,%eax
18120+ movl $cpu_gdt_table,%eax
18121 movl $per_cpu__stack_canary,%ecx
18122+#ifdef CONFIG_SMP
18123+ addl $__per_cpu_load,%ecx
18124+#endif
18125 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
18126 shrl $16, %ecx
18127 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
18128 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
18129 1:
18130-#endif
18131 movl $(__KERNEL_STACK_CANARY),%eax
18132+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18133+ movl $(__USER_DS),%eax
18134+#else
18135+ xorl %eax,%eax
18136+#endif
18137 movl %eax,%gs
18138
18139 xorl %eax,%eax # Clear LDT
18140@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
18141
18142 cld # gcc2 wants the direction flag cleared at all times
18143 pushl $0 # fake return address for unwinder
18144-#ifdef CONFIG_SMP
18145- movb ready, %cl
18146 movb $1, ready
18147- cmpb $0,%cl # the first CPU calls start_kernel
18148- je 1f
18149- movl (stack_start), %esp
18150-1:
18151-#endif /* CONFIG_SMP */
18152 jmp *(initial_code)
18153
18154 /*
18155@@ -546,22 +631,22 @@ early_page_fault:
18156 jmp early_fault
18157
18158 early_fault:
18159- cld
18160 #ifdef CONFIG_PRINTK
18161+ cmpl $1,%ss:early_recursion_flag
18162+ je hlt_loop
18163+ incl %ss:early_recursion_flag
18164+ cld
18165 pusha
18166 movl $(__KERNEL_DS),%eax
18167 movl %eax,%ds
18168 movl %eax,%es
18169- cmpl $2,early_recursion_flag
18170- je hlt_loop
18171- incl early_recursion_flag
18172 movl %cr2,%eax
18173 pushl %eax
18174 pushl %edx /* trapno */
18175 pushl $fault_msg
18176 call printk
18177+; call dump_stack
18178 #endif
18179- call dump_stack
18180 hlt_loop:
18181 hlt
18182 jmp hlt_loop
18183@@ -569,8 +654,11 @@ hlt_loop:
18184 /* This is the default interrupt "handler" :-) */
18185 ALIGN
18186 ignore_int:
18187- cld
18188 #ifdef CONFIG_PRINTK
18189+ cmpl $2,%ss:early_recursion_flag
18190+ je hlt_loop
18191+ incl %ss:early_recursion_flag
18192+ cld
18193 pushl %eax
18194 pushl %ecx
18195 pushl %edx
18196@@ -579,9 +667,6 @@ ignore_int:
18197 movl $(__KERNEL_DS),%eax
18198 movl %eax,%ds
18199 movl %eax,%es
18200- cmpl $2,early_recursion_flag
18201- je hlt_loop
18202- incl early_recursion_flag
18203 pushl 16(%esp)
18204 pushl 24(%esp)
18205 pushl 32(%esp)
18206@@ -600,6 +685,8 @@ ignore_int:
18207 #endif
18208 iret
18209
18210+#include "verify_cpu.S"
18211+
18212 __REFDATA
18213 .align 4
18214 ENTRY(initial_code)
18215@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
18216 /*
18217 * BSS section
18218 */
18219-__PAGE_ALIGNED_BSS
18220- .align PAGE_SIZE_asm
18221 #ifdef CONFIG_X86_PAE
18222+.section .swapper_pg_pmd,"a",@progbits
18223 swapper_pg_pmd:
18224 .fill 1024*KPMDS,4,0
18225 #else
18226+.section .swapper_pg_dir,"a",@progbits
18227 ENTRY(swapper_pg_dir)
18228 .fill 1024,4,0
18229 #endif
18230+.section .swapper_pg_fixmap,"a",@progbits
18231 swapper_pg_fixmap:
18232 .fill 1024,4,0
18233 #ifdef CONFIG_X86_TRAMPOLINE
18234+.section .trampoline_pg_dir,"a",@progbits
18235 ENTRY(trampoline_pg_dir)
18236+#ifdef CONFIG_X86_PAE
18237+ .fill 4,8,0
18238+#else
18239 .fill 1024,4,0
18240 #endif
18241+#endif
18242+
18243+.section .empty_zero_page,"a",@progbits
18244 ENTRY(empty_zero_page)
18245 .fill 4096,1,0
18246
18247 /*
18248+ * The IDT has to be page-aligned to simplify the Pentium
18249+ * F0 0F bug workaround.. We have a special link segment
18250+ * for this.
18251+ */
18252+.section .idt,"a",@progbits
18253+ENTRY(idt_table)
18254+ .fill 256,8,0
18255+
18256+/*
18257 * This starts the data section.
18258 */
18259 #ifdef CONFIG_X86_PAE
18260-__PAGE_ALIGNED_DATA
18261- /* Page-aligned for the benefit of paravirt? */
18262- .align PAGE_SIZE_asm
18263+.section .swapper_pg_dir,"a",@progbits
18264+
18265 ENTRY(swapper_pg_dir)
18266 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
18267 # if KPMDS == 3
18268@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
18269 # error "Kernel PMDs should be 1, 2 or 3"
18270 # endif
18271 .align PAGE_SIZE_asm /* needs to be page-sized too */
18272+
18273+#ifdef CONFIG_PAX_PER_CPU_PGD
18274+ENTRY(cpu_pgd)
18275+ .rept NR_CPUS
18276+ .fill 4,8,0
18277+ .endr
18278+#endif
18279+
18280 #endif
18281
18282 .data
18283+.balign 4
18284 ENTRY(stack_start)
18285- .long init_thread_union+THREAD_SIZE
18286- .long __BOOT_DS
18287+ .long init_thread_union+THREAD_SIZE-8
18288
18289 ready: .byte 0
18290
18291+.section .rodata,"a",@progbits
18292 early_recursion_flag:
18293 .long 0
18294
18295@@ -697,7 +809,7 @@ fault_msg:
18296 .word 0 # 32 bit align gdt_desc.address
18297 boot_gdt_descr:
18298 .word __BOOT_DS+7
18299- .long boot_gdt - __PAGE_OFFSET
18300+ .long pa(boot_gdt)
18301
18302 .word 0 # 32-bit align idt_desc.address
18303 idt_descr:
18304@@ -708,7 +820,7 @@ idt_descr:
18305 .word 0 # 32 bit align gdt_desc.address
18306 ENTRY(early_gdt_descr)
18307 .word GDT_ENTRIES*8-1
18308- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
18309+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
18310
18311 /*
18312 * The boot_gdt must mirror the equivalent in setup.S and is
18313@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
18314 .align L1_CACHE_BYTES
18315 ENTRY(boot_gdt)
18316 .fill GDT_ENTRY_BOOT_CS,8,0
18317- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
18318- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
18319+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
18320+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
18321+
18322+ .align PAGE_SIZE_asm
18323+ENTRY(cpu_gdt_table)
18324+ .rept NR_CPUS
18325+ .quad 0x0000000000000000 /* NULL descriptor */
18326+ .quad 0x0000000000000000 /* 0x0b reserved */
18327+ .quad 0x0000000000000000 /* 0x13 reserved */
18328+ .quad 0x0000000000000000 /* 0x1b reserved */
18329+
18330+#ifdef CONFIG_PAX_KERNEXEC
18331+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
18332+#else
18333+ .quad 0x0000000000000000 /* 0x20 unused */
18334+#endif
18335+
18336+ .quad 0x0000000000000000 /* 0x28 unused */
18337+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
18338+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
18339+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
18340+ .quad 0x0000000000000000 /* 0x4b reserved */
18341+ .quad 0x0000000000000000 /* 0x53 reserved */
18342+ .quad 0x0000000000000000 /* 0x5b reserved */
18343+
18344+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
18345+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
18346+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
18347+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
18348+
18349+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
18350+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
18351+
18352+ /*
18353+ * Segments used for calling PnP BIOS have byte granularity.
18354+ * The code segments and data segments have fixed 64k limits,
18355+ * the transfer segment sizes are set at run time.
18356+ */
18357+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
18358+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
18359+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
18360+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
18361+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
18362+
18363+ /*
18364+ * The APM segments have byte granularity and their bases
18365+ * are set at run time. All have 64k limits.
18366+ */
18367+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
18368+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
18369+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
18370+
18371+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
18372+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
18373+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
18374+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
18375+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
18376+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
18377+
18378+ /* Be sure this is zeroed to avoid false validations in Xen */
18379+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
18380+ .endr
18381diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
18382index 780cd92..758b2a6 100644
18383--- a/arch/x86/kernel/head_64.S
18384+++ b/arch/x86/kernel/head_64.S
18385@@ -19,6 +19,8 @@
18386 #include <asm/cache.h>
18387 #include <asm/processor-flags.h>
18388 #include <asm/percpu.h>
18389+#include <asm/cpufeature.h>
18390+#include <asm/alternative-asm.h>
18391
18392 #ifdef CONFIG_PARAVIRT
18393 #include <asm/asm-offsets.h>
18394@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
18395 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
18396 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
18397 L3_START_KERNEL = pud_index(__START_KERNEL_map)
18398+L4_VMALLOC_START = pgd_index(VMALLOC_START)
18399+L3_VMALLOC_START = pud_index(VMALLOC_START)
18400+L4_VMALLOC_END = pgd_index(VMALLOC_END)
18401+L3_VMALLOC_END = pud_index(VMALLOC_END)
18402+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
18403+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
18404
18405 .text
18406 __HEAD
18407@@ -85,35 +93,23 @@ startup_64:
18408 */
18409 addq %rbp, init_level4_pgt + 0(%rip)
18410 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
18411+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
18412+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
18413+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
18414 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
18415
18416 addq %rbp, level3_ident_pgt + 0(%rip)
18417+#ifndef CONFIG_XEN
18418+ addq %rbp, level3_ident_pgt + 8(%rip)
18419+#endif
18420
18421- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
18422- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
18423+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
18424+
18425+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
18426+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
18427
18428 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
18429-
18430- /* Add an Identity mapping if I am above 1G */
18431- leaq _text(%rip), %rdi
18432- andq $PMD_PAGE_MASK, %rdi
18433-
18434- movq %rdi, %rax
18435- shrq $PUD_SHIFT, %rax
18436- andq $(PTRS_PER_PUD - 1), %rax
18437- jz ident_complete
18438-
18439- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
18440- leaq level3_ident_pgt(%rip), %rbx
18441- movq %rdx, 0(%rbx, %rax, 8)
18442-
18443- movq %rdi, %rax
18444- shrq $PMD_SHIFT, %rax
18445- andq $(PTRS_PER_PMD - 1), %rax
18446- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
18447- leaq level2_spare_pgt(%rip), %rbx
18448- movq %rdx, 0(%rbx, %rax, 8)
18449-ident_complete:
18450+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
18451
18452 /*
18453 * Fixup the kernel text+data virtual addresses. Note that
18454@@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
18455 * after the boot processor executes this code.
18456 */
18457
18458- /* Enable PAE mode and PGE */
18459- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
18460+ /* Enable PAE mode and PSE/PGE */
18461+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
18462 movq %rax, %cr4
18463
18464 /* Setup early boot stage 4 level pagetables. */
18465@@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
18466 movl $MSR_EFER, %ecx
18467 rdmsr
18468 btsl $_EFER_SCE, %eax /* Enable System Call */
18469- btl $20,%edi /* No Execute supported? */
18470+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
18471 jnc 1f
18472 btsl $_EFER_NX, %eax
18473+ leaq init_level4_pgt(%rip), %rdi
18474+#ifndef CONFIG_EFI
18475+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
18476+#endif
18477+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
18478+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
18479+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
18480 1: wrmsr /* Make changes effective */
18481
18482 /* Setup cr0 */
18483@@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
18484 * jump. In addition we need to ensure %cs is set so we make this
18485 * a far return.
18486 */
18487+ pax_set_fptr_mask
18488 movq initial_code(%rip),%rax
18489 pushq $0 # fake return address to stop unwinder
18490 pushq $__KERNEL_CS # set correct cs
18491@@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
18492 .quad x86_64_start_kernel
18493 ENTRY(initial_gs)
18494 .quad INIT_PER_CPU_VAR(irq_stack_union)
18495- __FINITDATA
18496
18497 ENTRY(stack_start)
18498 .quad init_thread_union+THREAD_SIZE-8
18499 .word 0
18500+ __FINITDATA
18501
18502 bad_address:
18503 jmp bad_address
18504
18505- .section ".init.text","ax"
18506+ __INIT
18507 #ifdef CONFIG_EARLY_PRINTK
18508 .globl early_idt_handlers
18509 early_idt_handlers:
18510@@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
18511 #endif /* EARLY_PRINTK */
18512 1: hlt
18513 jmp 1b
18514+ .previous
18515
18516 #ifdef CONFIG_EARLY_PRINTK
18517+ __INITDATA
18518 early_recursion_flag:
18519 .long 0
18520+ .previous
18521
18522+ .section .rodata,"a",@progbits
18523 early_idt_msg:
18524 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
18525 early_idt_ripmsg:
18526 .asciz "RIP %s\n"
18527+ .previous
18528 #endif /* CONFIG_EARLY_PRINTK */
18529- .previous
18530
18531+ .section .rodata,"a",@progbits
18532 #define NEXT_PAGE(name) \
18533 .balign PAGE_SIZE; \
18534 ENTRY(name)
18535@@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
18536 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18537 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
18538 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18539+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
18540+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
18541+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
18542+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
18543+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
18544+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18545 .org init_level4_pgt + L4_START_KERNEL*8, 0
18546 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
18547 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
18548
18549+#ifdef CONFIG_PAX_PER_CPU_PGD
18550+NEXT_PAGE(cpu_pgd)
18551+ .rept NR_CPUS
18552+ .fill 512,8,0
18553+ .endr
18554+#endif
18555+
18556 NEXT_PAGE(level3_ident_pgt)
18557 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
18558+#ifdef CONFIG_XEN
18559 .fill 511,8,0
18560+#else
18561+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
18562+ .fill 510,8,0
18563+#endif
18564+
18565+NEXT_PAGE(level3_vmalloc_start_pgt)
18566+ .fill 512,8,0
18567+
18568+NEXT_PAGE(level3_vmalloc_end_pgt)
18569+ .fill 512,8,0
18570+
18571+NEXT_PAGE(level3_vmemmap_pgt)
18572+ .fill L3_VMEMMAP_START,8,0
18573+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
18574
18575 NEXT_PAGE(level3_kernel_pgt)
18576 .fill L3_START_KERNEL,8,0
18577@@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
18578 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
18579 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18580
18581+NEXT_PAGE(level2_vmemmap_pgt)
18582+ .fill 512,8,0
18583+
18584 NEXT_PAGE(level2_fixmap_pgt)
18585- .fill 506,8,0
18586- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
18587- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
18588- .fill 5,8,0
18589+ .fill 507,8,0
18590+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
18591+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
18592+ .fill 4,8,0
18593
18594-NEXT_PAGE(level1_fixmap_pgt)
18595+NEXT_PAGE(level1_vsyscall_pgt)
18596 .fill 512,8,0
18597
18598-NEXT_PAGE(level2_ident_pgt)
18599- /* Since I easily can, map the first 1G.
18600+ /* Since I easily can, map the first 2G.
18601 * Don't set NX because code runs from these pages.
18602 */
18603- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
18604+NEXT_PAGE(level2_ident_pgt)
18605+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
18606
18607 NEXT_PAGE(level2_kernel_pgt)
18608 /*
18609@@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
18610 * If you want to increase this then increase MODULES_VADDR
18611 * too.)
18612 */
18613- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
18614- KERNEL_IMAGE_SIZE/PMD_SIZE)
18615-
18616-NEXT_PAGE(level2_spare_pgt)
18617- .fill 512, 8, 0
18618+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
18619
18620 #undef PMDS
18621 #undef NEXT_PAGE
18622
18623- .data
18624+ .align PAGE_SIZE
18625+ENTRY(cpu_gdt_table)
18626+ .rept NR_CPUS
18627+ .quad 0x0000000000000000 /* NULL descriptor */
18628+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
18629+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
18630+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
18631+ .quad 0x00cffb000000ffff /* __USER32_CS */
18632+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
18633+ .quad 0x00affb000000ffff /* __USER_CS */
18634+
18635+#ifdef CONFIG_PAX_KERNEXEC
18636+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
18637+#else
18638+ .quad 0x0 /* unused */
18639+#endif
18640+
18641+ .quad 0,0 /* TSS */
18642+ .quad 0,0 /* LDT */
18643+ .quad 0,0,0 /* three TLS descriptors */
18644+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
18645+ /* asm/segment.h:GDT_ENTRIES must match this */
18646+
18647+ /* zero the remaining page */
18648+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
18649+ .endr
18650+
18651 .align 16
18652 .globl early_gdt_descr
18653 early_gdt_descr:
18654 .word GDT_ENTRIES*8-1
18655 early_gdt_descr_base:
18656- .quad INIT_PER_CPU_VAR(gdt_page)
18657+ .quad cpu_gdt_table
18658
18659 ENTRY(phys_base)
18660 /* This must match the first entry in level2_kernel_pgt */
18661 .quad 0x0000000000000000
18662
18663 #include "../../x86/xen/xen-head.S"
18664-
18665- .section .bss, "aw", @nobits
18666+
18667+ .section .rodata,"a",@progbits
18668 .align L1_CACHE_BYTES
18669 ENTRY(idt_table)
18670- .skip IDT_ENTRIES * 16
18671+ .fill 512,8,0
18672
18673 __PAGE_ALIGNED_BSS
18674 .align PAGE_SIZE
18675diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
18676index 9c3bd4a..e1d9b35 100644
18677--- a/arch/x86/kernel/i386_ksyms_32.c
18678+++ b/arch/x86/kernel/i386_ksyms_32.c
18679@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
18680 EXPORT_SYMBOL(cmpxchg8b_emu);
18681 #endif
18682
18683+EXPORT_SYMBOL_GPL(cpu_gdt_table);
18684+
18685 /* Networking helper routines. */
18686 EXPORT_SYMBOL(csum_partial_copy_generic);
18687+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
18688+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
18689
18690 EXPORT_SYMBOL(__get_user_1);
18691 EXPORT_SYMBOL(__get_user_2);
18692@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
18693
18694 EXPORT_SYMBOL(csum_partial);
18695 EXPORT_SYMBOL(empty_zero_page);
18696+
18697+#ifdef CONFIG_PAX_KERNEXEC
18698+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
18699+#endif
18700diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
18701index df89102..a244320 100644
18702--- a/arch/x86/kernel/i8259.c
18703+++ b/arch/x86/kernel/i8259.c
18704@@ -208,7 +208,7 @@ spurious_8259A_irq:
18705 "spurious 8259A interrupt: IRQ%d.\n", irq);
18706 spurious_irq_mask |= irqmask;
18707 }
18708- atomic_inc(&irq_err_count);
18709+ atomic_inc_unchecked(&irq_err_count);
18710 /*
18711 * Theoretically we do not have to handle this IRQ,
18712 * but in Linux this does not cause problems and is
18713diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
18714index 3a54dcb..1c22348 100644
18715--- a/arch/x86/kernel/init_task.c
18716+++ b/arch/x86/kernel/init_task.c
18717@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
18718 * way process stacks are handled. This is done by having a special
18719 * "init_task" linker map entry..
18720 */
18721-union thread_union init_thread_union __init_task_data =
18722- { INIT_THREAD_INFO(init_task) };
18723+union thread_union init_thread_union __init_task_data;
18724
18725 /*
18726 * Initial task structure.
18727@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
18728 * section. Since TSS's are completely CPU-local, we want them
18729 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
18730 */
18731-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
18732-
18733+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
18734+EXPORT_SYMBOL(init_tss);
18735diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
18736index 99c4d30..74c84e9 100644
18737--- a/arch/x86/kernel/ioport.c
18738+++ b/arch/x86/kernel/ioport.c
18739@@ -6,6 +6,7 @@
18740 #include <linux/sched.h>
18741 #include <linux/kernel.h>
18742 #include <linux/capability.h>
18743+#include <linux/security.h>
18744 #include <linux/errno.h>
18745 #include <linux/types.h>
18746 #include <linux/ioport.h>
18747@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18748
18749 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
18750 return -EINVAL;
18751+#ifdef CONFIG_GRKERNSEC_IO
18752+ if (turn_on && grsec_disable_privio) {
18753+ gr_handle_ioperm();
18754+ return -EPERM;
18755+ }
18756+#endif
18757 if (turn_on && !capable(CAP_SYS_RAWIO))
18758 return -EPERM;
18759
18760@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
18761 * because the ->io_bitmap_max value must match the bitmap
18762 * contents:
18763 */
18764- tss = &per_cpu(init_tss, get_cpu());
18765+ tss = init_tss + get_cpu();
18766
18767 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
18768
18769@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
18770 return -EINVAL;
18771 /* Trying to gain more privileges? */
18772 if (level > old) {
18773+#ifdef CONFIG_GRKERNSEC_IO
18774+ if (grsec_disable_privio) {
18775+ gr_handle_iopl();
18776+ return -EPERM;
18777+ }
18778+#endif
18779 if (!capable(CAP_SYS_RAWIO))
18780 return -EPERM;
18781 }
18782diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
18783index 04bbd52..83a07d9 100644
18784--- a/arch/x86/kernel/irq.c
18785+++ b/arch/x86/kernel/irq.c
18786@@ -15,7 +15,7 @@
18787 #include <asm/mce.h>
18788 #include <asm/hw_irq.h>
18789
18790-atomic_t irq_err_count;
18791+atomic_unchecked_t irq_err_count;
18792
18793 /* Function pointer for generic interrupt vector handling */
18794 void (*generic_interrupt_extension)(void) = NULL;
18795@@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
18796 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
18797 seq_printf(p, " Machine check polls\n");
18798 #endif
18799- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
18800+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
18801 #if defined(CONFIG_X86_IO_APIC)
18802- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
18803+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
18804 #endif
18805 return 0;
18806 }
18807@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
18808
18809 u64 arch_irq_stat(void)
18810 {
18811- u64 sum = atomic_read(&irq_err_count);
18812+ u64 sum = atomic_read_unchecked(&irq_err_count);
18813
18814 #ifdef CONFIG_X86_IO_APIC
18815- sum += atomic_read(&irq_mis_count);
18816+ sum += atomic_read_unchecked(&irq_mis_count);
18817 #endif
18818 return sum;
18819 }
18820diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
18821index 7d35d0f..03f1d52 100644
18822--- a/arch/x86/kernel/irq_32.c
18823+++ b/arch/x86/kernel/irq_32.c
18824@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
18825 __asm__ __volatile__("andl %%esp,%0" :
18826 "=r" (sp) : "0" (THREAD_SIZE - 1));
18827
18828- return sp < (sizeof(struct thread_info) + STACK_WARN);
18829+ return sp < STACK_WARN;
18830 }
18831
18832 static void print_stack_overflow(void)
18833@@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
18834 * per-CPU IRQ handling contexts (thread information and stack)
18835 */
18836 union irq_ctx {
18837- struct thread_info tinfo;
18838- u32 stack[THREAD_SIZE/sizeof(u32)];
18839-} __attribute__((aligned(PAGE_SIZE)));
18840+ unsigned long previous_esp;
18841+ u32 stack[THREAD_SIZE/sizeof(u32)];
18842+} __attribute__((aligned(THREAD_SIZE)));
18843
18844 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
18845 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
18846@@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
18847 static inline int
18848 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18849 {
18850- union irq_ctx *curctx, *irqctx;
18851+ union irq_ctx *irqctx;
18852 u32 *isp, arg1, arg2;
18853
18854- curctx = (union irq_ctx *) current_thread_info();
18855 irqctx = __get_cpu_var(hardirq_ctx);
18856
18857 /*
18858@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18859 * handler) we can't do that and just have to keep using the
18860 * current stack (which is the irq stack already after all)
18861 */
18862- if (unlikely(curctx == irqctx))
18863+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
18864 return 0;
18865
18866 /* build the stack frame on the IRQ stack */
18867- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18868- irqctx->tinfo.task = curctx->tinfo.task;
18869- irqctx->tinfo.previous_esp = current_stack_pointer;
18870+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18871+ irqctx->previous_esp = current_stack_pointer;
18872
18873- /*
18874- * Copy the softirq bits in preempt_count so that the
18875- * softirq checks work in the hardirq context.
18876- */
18877- irqctx->tinfo.preempt_count =
18878- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
18879- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
18880+#ifdef CONFIG_PAX_MEMORY_UDEREF
18881+ __set_fs(MAKE_MM_SEG(0));
18882+#endif
18883
18884 if (unlikely(overflow))
18885 call_on_stack(print_stack_overflow, isp);
18886@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18887 : "0" (irq), "1" (desc), "2" (isp),
18888 "D" (desc->handle_irq)
18889 : "memory", "cc", "ecx");
18890+
18891+#ifdef CONFIG_PAX_MEMORY_UDEREF
18892+ __set_fs(current_thread_info()->addr_limit);
18893+#endif
18894+
18895 return 1;
18896 }
18897
18898@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
18899 */
18900 void __cpuinit irq_ctx_init(int cpu)
18901 {
18902- union irq_ctx *irqctx;
18903-
18904 if (per_cpu(hardirq_ctx, cpu))
18905 return;
18906
18907- irqctx = &per_cpu(hardirq_stack, cpu);
18908- irqctx->tinfo.task = NULL;
18909- irqctx->tinfo.exec_domain = NULL;
18910- irqctx->tinfo.cpu = cpu;
18911- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
18912- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18913-
18914- per_cpu(hardirq_ctx, cpu) = irqctx;
18915-
18916- irqctx = &per_cpu(softirq_stack, cpu);
18917- irqctx->tinfo.task = NULL;
18918- irqctx->tinfo.exec_domain = NULL;
18919- irqctx->tinfo.cpu = cpu;
18920- irqctx->tinfo.preempt_count = 0;
18921- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
18922-
18923- per_cpu(softirq_ctx, cpu) = irqctx;
18924+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
18925+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
18926
18927 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
18928 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
18929@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
18930 asmlinkage void do_softirq(void)
18931 {
18932 unsigned long flags;
18933- struct thread_info *curctx;
18934 union irq_ctx *irqctx;
18935 u32 *isp;
18936
18937@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
18938 local_irq_save(flags);
18939
18940 if (local_softirq_pending()) {
18941- curctx = current_thread_info();
18942 irqctx = __get_cpu_var(softirq_ctx);
18943- irqctx->tinfo.task = curctx->task;
18944- irqctx->tinfo.previous_esp = current_stack_pointer;
18945+ irqctx->previous_esp = current_stack_pointer;
18946
18947 /* build the stack frame on the softirq stack */
18948- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
18949+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
18950+
18951+#ifdef CONFIG_PAX_MEMORY_UDEREF
18952+ __set_fs(MAKE_MM_SEG(0));
18953+#endif
18954
18955 call_on_stack(__do_softirq, isp);
18956+
18957+#ifdef CONFIG_PAX_MEMORY_UDEREF
18958+ __set_fs(current_thread_info()->addr_limit);
18959+#endif
18960+
18961 /*
18962 * Shouldnt happen, we returned above if in_interrupt():
18963 */
18964diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
18965index 8d82a77..0baf312 100644
18966--- a/arch/x86/kernel/kgdb.c
18967+++ b/arch/x86/kernel/kgdb.c
18968@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
18969
18970 /* clear the trace bit */
18971 linux_regs->flags &= ~X86_EFLAGS_TF;
18972- atomic_set(&kgdb_cpu_doing_single_step, -1);
18973+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
18974
18975 /* set the trace bit if we're stepping */
18976 if (remcomInBuffer[0] == 's') {
18977 linux_regs->flags |= X86_EFLAGS_TF;
18978 kgdb_single_step = 1;
18979- atomic_set(&kgdb_cpu_doing_single_step,
18980+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
18981 raw_smp_processor_id());
18982 }
18983
18984@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
18985 break;
18986
18987 case DIE_DEBUG:
18988- if (atomic_read(&kgdb_cpu_doing_single_step) ==
18989+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
18990 raw_smp_processor_id()) {
18991 if (user_mode(regs))
18992 return single_step_cont(regs, args);
18993@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
18994 return instruction_pointer(regs);
18995 }
18996
18997-struct kgdb_arch arch_kgdb_ops = {
18998+const struct kgdb_arch arch_kgdb_ops = {
18999 /* Breakpoint instruction: */
19000 .gdb_bpt_instr = { 0xcc },
19001 .flags = KGDB_HW_BREAKPOINT,
19002diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
19003index 7a67820..70ea187 100644
19004--- a/arch/x86/kernel/kprobes.c
19005+++ b/arch/x86/kernel/kprobes.c
19006@@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
19007 char op;
19008 s32 raddr;
19009 } __attribute__((packed)) * jop;
19010- jop = (struct __arch_jmp_op *)from;
19011+
19012+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
19013+
19014+ pax_open_kernel();
19015 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
19016 jop->op = RELATIVEJUMP_INSTRUCTION;
19017+ pax_close_kernel();
19018 }
19019
19020 /*
19021@@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
19022 kprobe_opcode_t opcode;
19023 kprobe_opcode_t *orig_opcodes = opcodes;
19024
19025- if (search_exception_tables((unsigned long)opcodes))
19026+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
19027 return 0; /* Page fault may occur on this address. */
19028
19029 retry:
19030@@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
19031 disp = (u8 *) p->addr + *((s32 *) insn) -
19032 (u8 *) p->ainsn.insn;
19033 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
19034+ pax_open_kernel();
19035 *(s32 *)insn = (s32) disp;
19036+ pax_close_kernel();
19037 }
19038 }
19039 #endif
19040@@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
19041
19042 static void __kprobes arch_copy_kprobe(struct kprobe *p)
19043 {
19044- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19045+ pax_open_kernel();
19046+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
19047+ pax_close_kernel();
19048
19049 fix_riprel(p);
19050
19051- if (can_boost(p->addr))
19052+ if (can_boost(ktla_ktva(p->addr)))
19053 p->ainsn.boostable = 0;
19054 else
19055 p->ainsn.boostable = -1;
19056
19057- p->opcode = *p->addr;
19058+ p->opcode = *(ktla_ktva(p->addr));
19059 }
19060
19061 int __kprobes arch_prepare_kprobe(struct kprobe *p)
19062@@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
19063 if (p->opcode == BREAKPOINT_INSTRUCTION)
19064 regs->ip = (unsigned long)p->addr;
19065 else
19066- regs->ip = (unsigned long)p->ainsn.insn;
19067+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19068 }
19069
19070 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
19071@@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
19072 if (p->ainsn.boostable == 1 && !p->post_handler) {
19073 /* Boost up -- we can execute copied instructions directly */
19074 reset_current_kprobe();
19075- regs->ip = (unsigned long)p->ainsn.insn;
19076+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
19077 preempt_enable_no_resched();
19078 return;
19079 }
19080@@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
19081 struct kprobe_ctlblk *kcb;
19082
19083 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
19084- if (*addr != BREAKPOINT_INSTRUCTION) {
19085+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
19086 /*
19087 * The breakpoint instruction was removed right
19088 * after we hit it. Another cpu has removed
19089@@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
19090 /* Skip orig_ax, ip, cs */
19091 " addq $24, %rsp\n"
19092 " popfq\n"
19093+#ifdef KERNEXEC_PLUGIN
19094+ " btsq $63,(%rsp)\n"
19095+#endif
19096 #else
19097 " pushf\n"
19098 /*
19099@@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
19100 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
19101 {
19102 unsigned long *tos = stack_addr(regs);
19103- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
19104+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
19105 unsigned long orig_ip = (unsigned long)p->addr;
19106 kprobe_opcode_t *insn = p->ainsn.insn;
19107
19108@@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
19109 struct die_args *args = data;
19110 int ret = NOTIFY_DONE;
19111
19112- if (args->regs && user_mode_vm(args->regs))
19113+ if (args->regs && user_mode(args->regs))
19114 return ret;
19115
19116 switch (val) {
19117diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
19118index 63b0ec8..6d92227 100644
19119--- a/arch/x86/kernel/kvm.c
19120+++ b/arch/x86/kernel/kvm.c
19121@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
19122 pv_mmu_ops.set_pud = kvm_set_pud;
19123 #if PAGETABLE_LEVELS == 4
19124 pv_mmu_ops.set_pgd = kvm_set_pgd;
19125+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
19126 #endif
19127 #endif
19128 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
19129diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
19130index ec6ef60..ab2c824 100644
19131--- a/arch/x86/kernel/ldt.c
19132+++ b/arch/x86/kernel/ldt.c
19133@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
19134 if (reload) {
19135 #ifdef CONFIG_SMP
19136 preempt_disable();
19137- load_LDT(pc);
19138+ load_LDT_nolock(pc);
19139 if (!cpumask_equal(mm_cpumask(current->mm),
19140 cpumask_of(smp_processor_id())))
19141 smp_call_function(flush_ldt, current->mm, 1);
19142 preempt_enable();
19143 #else
19144- load_LDT(pc);
19145+ load_LDT_nolock(pc);
19146 #endif
19147 }
19148 if (oldsize) {
19149@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
19150 return err;
19151
19152 for (i = 0; i < old->size; i++)
19153- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
19154+ write_ldt_entry(new->ldt, i, old->ldt + i);
19155 return 0;
19156 }
19157
19158@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
19159 retval = copy_ldt(&mm->context, &old_mm->context);
19160 mutex_unlock(&old_mm->context.lock);
19161 }
19162+
19163+ if (tsk == current) {
19164+ mm->context.vdso = 0;
19165+
19166+#ifdef CONFIG_X86_32
19167+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19168+ mm->context.user_cs_base = 0UL;
19169+ mm->context.user_cs_limit = ~0UL;
19170+
19171+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
19172+ cpus_clear(mm->context.cpu_user_cs_mask);
19173+#endif
19174+
19175+#endif
19176+#endif
19177+
19178+ }
19179+
19180 return retval;
19181 }
19182
19183@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
19184 }
19185 }
19186
19187+#ifdef CONFIG_PAX_SEGMEXEC
19188+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
19189+ error = -EINVAL;
19190+ goto out_unlock;
19191+ }
19192+#endif
19193+
19194 fill_ldt(&ldt, &ldt_info);
19195 if (oldmode)
19196 ldt.avl = 0;
19197diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
19198index c1c429d..f02eaf9 100644
19199--- a/arch/x86/kernel/machine_kexec_32.c
19200+++ b/arch/x86/kernel/machine_kexec_32.c
19201@@ -26,7 +26,7 @@
19202 #include <asm/system.h>
19203 #include <asm/cacheflush.h>
19204
19205-static void set_idt(void *newidt, __u16 limit)
19206+static void set_idt(struct desc_struct *newidt, __u16 limit)
19207 {
19208 struct desc_ptr curidt;
19209
19210@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
19211 }
19212
19213
19214-static void set_gdt(void *newgdt, __u16 limit)
19215+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
19216 {
19217 struct desc_ptr curgdt;
19218
19219@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
19220 }
19221
19222 control_page = page_address(image->control_code_page);
19223- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
19224+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
19225
19226 relocate_kernel_ptr = control_page;
19227 page_list[PA_CONTROL_PAGE] = __pa(control_page);
19228diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
19229index 1e47679..e73449d 100644
19230--- a/arch/x86/kernel/microcode_amd.c
19231+++ b/arch/x86/kernel/microcode_amd.c
19232@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
19233 uci->mc = NULL;
19234 }
19235
19236-static struct microcode_ops microcode_amd_ops = {
19237+static const struct microcode_ops microcode_amd_ops = {
19238 .request_microcode_user = request_microcode_user,
19239 .request_microcode_fw = request_microcode_fw,
19240 .collect_cpu_info = collect_cpu_info_amd,
19241@@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
19242 .microcode_fini_cpu = microcode_fini_cpu_amd,
19243 };
19244
19245-struct microcode_ops * __init init_amd_microcode(void)
19246+const struct microcode_ops * __init init_amd_microcode(void)
19247 {
19248 return &microcode_amd_ops;
19249 }
19250diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
19251index 378e9a8..b5a6ea9 100644
19252--- a/arch/x86/kernel/microcode_core.c
19253+++ b/arch/x86/kernel/microcode_core.c
19254@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
19255
19256 #define MICROCODE_VERSION "2.00"
19257
19258-static struct microcode_ops *microcode_ops;
19259+static const struct microcode_ops *microcode_ops;
19260
19261 /*
19262 * Synchronization.
19263diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
19264index 0d334dd..14cedaf 100644
19265--- a/arch/x86/kernel/microcode_intel.c
19266+++ b/arch/x86/kernel/microcode_intel.c
19267@@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
19268
19269 static int get_ucode_user(void *to, const void *from, size_t n)
19270 {
19271- return copy_from_user(to, from, n);
19272+ return copy_from_user(to, (const void __force_user *)from, n);
19273 }
19274
19275 static enum ucode_state
19276 request_microcode_user(int cpu, const void __user *buf, size_t size)
19277 {
19278- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
19279+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
19280 }
19281
19282 static void microcode_fini_cpu(int cpu)
19283@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
19284 uci->mc = NULL;
19285 }
19286
19287-static struct microcode_ops microcode_intel_ops = {
19288+static const struct microcode_ops microcode_intel_ops = {
19289 .request_microcode_user = request_microcode_user,
19290 .request_microcode_fw = request_microcode_fw,
19291 .collect_cpu_info = collect_cpu_info,
19292@@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
19293 .microcode_fini_cpu = microcode_fini_cpu,
19294 };
19295
19296-struct microcode_ops * __init init_intel_microcode(void)
19297+const struct microcode_ops * __init init_intel_microcode(void)
19298 {
19299 return &microcode_intel_ops;
19300 }
19301diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
19302index 89f386f..9028f51 100644
19303--- a/arch/x86/kernel/module.c
19304+++ b/arch/x86/kernel/module.c
19305@@ -34,7 +34,7 @@
19306 #define DEBUGP(fmt...)
19307 #endif
19308
19309-void *module_alloc(unsigned long size)
19310+static void *__module_alloc(unsigned long size, pgprot_t prot)
19311 {
19312 struct vm_struct *area;
19313
19314@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
19315 if (!area)
19316 return NULL;
19317
19318- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
19319- PAGE_KERNEL_EXEC);
19320+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
19321+}
19322+
19323+void *module_alloc(unsigned long size)
19324+{
19325+
19326+#ifdef CONFIG_PAX_KERNEXEC
19327+ return __module_alloc(size, PAGE_KERNEL);
19328+#else
19329+ return __module_alloc(size, PAGE_KERNEL_EXEC);
19330+#endif
19331+
19332 }
19333
19334 /* Free memory returned from module_alloc */
19335@@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
19336 vfree(module_region);
19337 }
19338
19339+#ifdef CONFIG_PAX_KERNEXEC
19340+#ifdef CONFIG_X86_32
19341+void *module_alloc_exec(unsigned long size)
19342+{
19343+ struct vm_struct *area;
19344+
19345+ if (size == 0)
19346+ return NULL;
19347+
19348+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
19349+ return area ? area->addr : NULL;
19350+}
19351+EXPORT_SYMBOL(module_alloc_exec);
19352+
19353+void module_free_exec(struct module *mod, void *module_region)
19354+{
19355+ vunmap(module_region);
19356+}
19357+EXPORT_SYMBOL(module_free_exec);
19358+#else
19359+void module_free_exec(struct module *mod, void *module_region)
19360+{
19361+ module_free(mod, module_region);
19362+}
19363+EXPORT_SYMBOL(module_free_exec);
19364+
19365+void *module_alloc_exec(unsigned long size)
19366+{
19367+ return __module_alloc(size, PAGE_KERNEL_RX);
19368+}
19369+EXPORT_SYMBOL(module_alloc_exec);
19370+#endif
19371+#endif
19372+
19373 /* We don't need anything special. */
19374 int module_frob_arch_sections(Elf_Ehdr *hdr,
19375 Elf_Shdr *sechdrs,
19376@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19377 unsigned int i;
19378 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
19379 Elf32_Sym *sym;
19380- uint32_t *location;
19381+ uint32_t *plocation, location;
19382
19383 DEBUGP("Applying relocate section %u to %u\n", relsec,
19384 sechdrs[relsec].sh_info);
19385 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
19386 /* This is where to make the change */
19387- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
19388- + rel[i].r_offset;
19389+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
19390+ location = (uint32_t)plocation;
19391+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
19392+ plocation = ktla_ktva((void *)plocation);
19393 /* This is the symbol it is referring to. Note that all
19394 undefined symbols have been resolved. */
19395 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
19396@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
19397 switch (ELF32_R_TYPE(rel[i].r_info)) {
19398 case R_386_32:
19399 /* We add the value into the location given */
19400- *location += sym->st_value;
19401+ pax_open_kernel();
19402+ *plocation += sym->st_value;
19403+ pax_close_kernel();
19404 break;
19405 case R_386_PC32:
19406 /* Add the value, subtract its postition */
19407- *location += sym->st_value - (uint32_t)location;
19408+ pax_open_kernel();
19409+ *plocation += sym->st_value - location;
19410+ pax_close_kernel();
19411 break;
19412 default:
19413 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
19414@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
19415 case R_X86_64_NONE:
19416 break;
19417 case R_X86_64_64:
19418+ pax_open_kernel();
19419 *(u64 *)loc = val;
19420+ pax_close_kernel();
19421 break;
19422 case R_X86_64_32:
19423+ pax_open_kernel();
19424 *(u32 *)loc = val;
19425+ pax_close_kernel();
19426 if (val != *(u32 *)loc)
19427 goto overflow;
19428 break;
19429 case R_X86_64_32S:
19430+ pax_open_kernel();
19431 *(s32 *)loc = val;
19432+ pax_close_kernel();
19433 if ((s64)val != *(s32 *)loc)
19434 goto overflow;
19435 break;
19436 case R_X86_64_PC32:
19437 val -= (u64)loc;
19438+ pax_open_kernel();
19439 *(u32 *)loc = val;
19440+ pax_close_kernel();
19441+
19442 #if 0
19443 if ((s64)val != *(s32 *)loc)
19444 goto overflow;
19445diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
19446index 3a7c5a4..9191528 100644
19447--- a/arch/x86/kernel/paravirt-spinlocks.c
19448+++ b/arch/x86/kernel/paravirt-spinlocks.c
19449@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
19450 __raw_spin_lock(lock);
19451 }
19452
19453-struct pv_lock_ops pv_lock_ops = {
19454+struct pv_lock_ops pv_lock_ops __read_only = {
19455 #ifdef CONFIG_SMP
19456 .spin_is_locked = __ticket_spin_is_locked,
19457 .spin_is_contended = __ticket_spin_is_contended,
19458diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
19459index 1b1739d..dea6077 100644
19460--- a/arch/x86/kernel/paravirt.c
19461+++ b/arch/x86/kernel/paravirt.c
19462@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
19463 {
19464 return x;
19465 }
19466+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19467+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
19468+#endif
19469
19470 void __init default_banner(void)
19471 {
19472@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
19473 * corresponding structure. */
19474 static void *get_call_destination(u8 type)
19475 {
19476- struct paravirt_patch_template tmpl = {
19477+ const struct paravirt_patch_template tmpl = {
19478 .pv_init_ops = pv_init_ops,
19479 .pv_time_ops = pv_time_ops,
19480 .pv_cpu_ops = pv_cpu_ops,
19481@@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
19482 .pv_lock_ops = pv_lock_ops,
19483 #endif
19484 };
19485+
19486+ pax_track_stack();
19487 return *((void **)&tmpl + type);
19488 }
19489
19490@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
19491 if (opfunc == NULL)
19492 /* If there's no function, patch it with a ud2a (BUG) */
19493 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
19494- else if (opfunc == _paravirt_nop)
19495+ else if (opfunc == (void *)_paravirt_nop)
19496 /* If the operation is a nop, then nop the callsite */
19497 ret = paravirt_patch_nop();
19498
19499 /* identity functions just return their single argument */
19500- else if (opfunc == _paravirt_ident_32)
19501+ else if (opfunc == (void *)_paravirt_ident_32)
19502 ret = paravirt_patch_ident_32(insnbuf, len);
19503- else if (opfunc == _paravirt_ident_64)
19504+ else if (opfunc == (void *)_paravirt_ident_64)
19505 ret = paravirt_patch_ident_64(insnbuf, len);
19506+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19507+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
19508+ ret = paravirt_patch_ident_64(insnbuf, len);
19509+#endif
19510
19511 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
19512 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
19513@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
19514 if (insn_len > len || start == NULL)
19515 insn_len = len;
19516 else
19517- memcpy(insnbuf, start, insn_len);
19518+ memcpy(insnbuf, ktla_ktva(start), insn_len);
19519
19520 return insn_len;
19521 }
19522@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
19523 preempt_enable();
19524 }
19525
19526-struct pv_info pv_info = {
19527+struct pv_info pv_info __read_only = {
19528 .name = "bare hardware",
19529 .paravirt_enabled = 0,
19530 .kernel_rpl = 0,
19531 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
19532 };
19533
19534-struct pv_init_ops pv_init_ops = {
19535+struct pv_init_ops pv_init_ops __read_only = {
19536 .patch = native_patch,
19537 };
19538
19539-struct pv_time_ops pv_time_ops = {
19540+struct pv_time_ops pv_time_ops __read_only = {
19541 .sched_clock = native_sched_clock,
19542 };
19543
19544-struct pv_irq_ops pv_irq_ops = {
19545+struct pv_irq_ops pv_irq_ops __read_only = {
19546 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
19547 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
19548 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
19549@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
19550 #endif
19551 };
19552
19553-struct pv_cpu_ops pv_cpu_ops = {
19554+struct pv_cpu_ops pv_cpu_ops __read_only = {
19555 .cpuid = native_cpuid,
19556 .get_debugreg = native_get_debugreg,
19557 .set_debugreg = native_set_debugreg,
19558@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
19559 .end_context_switch = paravirt_nop,
19560 };
19561
19562-struct pv_apic_ops pv_apic_ops = {
19563+struct pv_apic_ops pv_apic_ops __read_only = {
19564 #ifdef CONFIG_X86_LOCAL_APIC
19565 .startup_ipi_hook = paravirt_nop,
19566 #endif
19567 };
19568
19569-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
19570+#ifdef CONFIG_X86_32
19571+#ifdef CONFIG_X86_PAE
19572+/* 64-bit pagetable entries */
19573+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
19574+#else
19575 /* 32-bit pagetable entries */
19576 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
19577+#endif
19578 #else
19579 /* 64-bit pagetable entries */
19580 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
19581 #endif
19582
19583-struct pv_mmu_ops pv_mmu_ops = {
19584+struct pv_mmu_ops pv_mmu_ops __read_only = {
19585
19586 .read_cr2 = native_read_cr2,
19587 .write_cr2 = native_write_cr2,
19588@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
19589 .make_pud = PTE_IDENT,
19590
19591 .set_pgd = native_set_pgd,
19592+ .set_pgd_batched = native_set_pgd_batched,
19593 #endif
19594 #endif /* PAGETABLE_LEVELS >= 3 */
19595
19596@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
19597 },
19598
19599 .set_fixmap = native_set_fixmap,
19600+
19601+#ifdef CONFIG_PAX_KERNEXEC
19602+ .pax_open_kernel = native_pax_open_kernel,
19603+ .pax_close_kernel = native_pax_close_kernel,
19604+#endif
19605+
19606 };
19607
19608 EXPORT_SYMBOL_GPL(pv_time_ops);
19609diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
19610index 1a2d4b1..6a0dd55 100644
19611--- a/arch/x86/kernel/pci-calgary_64.c
19612+++ b/arch/x86/kernel/pci-calgary_64.c
19613@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
19614 free_pages((unsigned long)vaddr, get_order(size));
19615 }
19616
19617-static struct dma_map_ops calgary_dma_ops = {
19618+static const struct dma_map_ops calgary_dma_ops = {
19619 .alloc_coherent = calgary_alloc_coherent,
19620 .free_coherent = calgary_free_coherent,
19621 .map_sg = calgary_map_sg,
19622diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
19623index 6ac3931..42b4414 100644
19624--- a/arch/x86/kernel/pci-dma.c
19625+++ b/arch/x86/kernel/pci-dma.c
19626@@ -14,7 +14,7 @@
19627
19628 static int forbid_dac __read_mostly;
19629
19630-struct dma_map_ops *dma_ops;
19631+const struct dma_map_ops *dma_ops;
19632 EXPORT_SYMBOL(dma_ops);
19633
19634 static int iommu_sac_force __read_mostly;
19635@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
19636
19637 int dma_supported(struct device *dev, u64 mask)
19638 {
19639- struct dma_map_ops *ops = get_dma_ops(dev);
19640+ const struct dma_map_ops *ops = get_dma_ops(dev);
19641
19642 #ifdef CONFIG_PCI
19643 if (mask > 0xffffffff && forbid_dac > 0) {
19644diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
19645index 1c76691..e3632db 100644
19646--- a/arch/x86/kernel/pci-gart_64.c
19647+++ b/arch/x86/kernel/pci-gart_64.c
19648@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
19649 return -1;
19650 }
19651
19652-static struct dma_map_ops gart_dma_ops = {
19653+static const struct dma_map_ops gart_dma_ops = {
19654 .map_sg = gart_map_sg,
19655 .unmap_sg = gart_unmap_sg,
19656 .map_page = gart_map_page,
19657diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
19658index a3933d4..c898869 100644
19659--- a/arch/x86/kernel/pci-nommu.c
19660+++ b/arch/x86/kernel/pci-nommu.c
19661@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
19662 flush_write_buffers();
19663 }
19664
19665-struct dma_map_ops nommu_dma_ops = {
19666+const struct dma_map_ops nommu_dma_ops = {
19667 .alloc_coherent = dma_generic_alloc_coherent,
19668 .free_coherent = nommu_free_coherent,
19669 .map_sg = nommu_map_sg,
19670diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
19671index aaa6b78..4de1881 100644
19672--- a/arch/x86/kernel/pci-swiotlb.c
19673+++ b/arch/x86/kernel/pci-swiotlb.c
19674@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
19675 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
19676 }
19677
19678-static struct dma_map_ops swiotlb_dma_ops = {
19679+static const struct dma_map_ops swiotlb_dma_ops = {
19680 .mapping_error = swiotlb_dma_mapping_error,
19681 .alloc_coherent = x86_swiotlb_alloc_coherent,
19682 .free_coherent = swiotlb_free_coherent,
19683diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
19684index fc6c84d..0312ca2 100644
19685--- a/arch/x86/kernel/process.c
19686+++ b/arch/x86/kernel/process.c
19687@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
19688
19689 void free_thread_info(struct thread_info *ti)
19690 {
19691- free_thread_xstate(ti->task);
19692 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
19693 }
19694
19695+static struct kmem_cache *task_struct_cachep;
19696+
19697 void arch_task_cache_init(void)
19698 {
19699- task_xstate_cachep =
19700- kmem_cache_create("task_xstate", xstate_size,
19701+ /* create a slab on which task_structs can be allocated */
19702+ task_struct_cachep =
19703+ kmem_cache_create("task_struct", sizeof(struct task_struct),
19704+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
19705+
19706+ task_xstate_cachep =
19707+ kmem_cache_create("task_xstate", xstate_size,
19708 __alignof__(union thread_xstate),
19709- SLAB_PANIC | SLAB_NOTRACK, NULL);
19710+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
19711+}
19712+
19713+struct task_struct *alloc_task_struct(void)
19714+{
19715+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
19716+}
19717+
19718+void free_task_struct(struct task_struct *task)
19719+{
19720+ free_thread_xstate(task);
19721+ kmem_cache_free(task_struct_cachep, task);
19722 }
19723
19724 /*
19725@@ -73,7 +90,7 @@ void exit_thread(void)
19726 unsigned long *bp = t->io_bitmap_ptr;
19727
19728 if (bp) {
19729- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
19730+ struct tss_struct *tss = init_tss + get_cpu();
19731
19732 t->io_bitmap_ptr = NULL;
19733 clear_thread_flag(TIF_IO_BITMAP);
19734@@ -93,6 +110,9 @@ void flush_thread(void)
19735
19736 clear_tsk_thread_flag(tsk, TIF_DEBUG);
19737
19738+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
19739+ loadsegment(gs, 0);
19740+#endif
19741 tsk->thread.debugreg0 = 0;
19742 tsk->thread.debugreg1 = 0;
19743 tsk->thread.debugreg2 = 0;
19744@@ -307,7 +327,7 @@ void default_idle(void)
19745 EXPORT_SYMBOL(default_idle);
19746 #endif
19747
19748-void stop_this_cpu(void *dummy)
19749+__noreturn void stop_this_cpu(void *dummy)
19750 {
19751 local_irq_disable();
19752 /*
19753@@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
19754 }
19755 early_param("idle", idle_setup);
19756
19757-unsigned long arch_align_stack(unsigned long sp)
19758+#ifdef CONFIG_PAX_RANDKSTACK
19759+void pax_randomize_kstack(struct pt_regs *regs)
19760 {
19761- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
19762- sp -= get_random_int() % 8192;
19763- return sp & ~0xf;
19764-}
19765+ struct thread_struct *thread = &current->thread;
19766+ unsigned long time;
19767
19768-unsigned long arch_randomize_brk(struct mm_struct *mm)
19769-{
19770- unsigned long range_end = mm->brk + 0x02000000;
19771- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
19772+ if (!randomize_va_space)
19773+ return;
19774+
19775+ if (v8086_mode(regs))
19776+ return;
19777+
19778+ rdtscl(time);
19779+
19780+ /* P4 seems to return a 0 LSB, ignore it */
19781+#ifdef CONFIG_MPENTIUM4
19782+ time &= 0x3EUL;
19783+ time <<= 2;
19784+#elif defined(CONFIG_X86_64)
19785+ time &= 0xFUL;
19786+ time <<= 4;
19787+#else
19788+ time &= 0x1FUL;
19789+ time <<= 3;
19790+#endif
19791+
19792+ thread->sp0 ^= time;
19793+ load_sp0(init_tss + smp_processor_id(), thread);
19794+
19795+#ifdef CONFIG_X86_64
19796+ percpu_write(kernel_stack, thread->sp0);
19797+#endif
19798 }
19799+#endif
19800
19801diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
19802index c40c432..6e1df72 100644
19803--- a/arch/x86/kernel/process_32.c
19804+++ b/arch/x86/kernel/process_32.c
19805@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
19806 unsigned long thread_saved_pc(struct task_struct *tsk)
19807 {
19808 return ((unsigned long *)tsk->thread.sp)[3];
19809+//XXX return tsk->thread.eip;
19810 }
19811
19812 #ifndef CONFIG_SMP
19813@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
19814 unsigned short ss, gs;
19815 const char *board;
19816
19817- if (user_mode_vm(regs)) {
19818+ if (user_mode(regs)) {
19819 sp = regs->sp;
19820 ss = regs->ss & 0xffff;
19821- gs = get_user_gs(regs);
19822 } else {
19823 sp = (unsigned long) (&regs->sp);
19824 savesegment(ss, ss);
19825- savesegment(gs, gs);
19826 }
19827+ gs = get_user_gs(regs);
19828
19829 printk("\n");
19830
19831@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
19832 regs.bx = (unsigned long) fn;
19833 regs.dx = (unsigned long) arg;
19834
19835- regs.ds = __USER_DS;
19836- regs.es = __USER_DS;
19837+ regs.ds = __KERNEL_DS;
19838+ regs.es = __KERNEL_DS;
19839 regs.fs = __KERNEL_PERCPU;
19840- regs.gs = __KERNEL_STACK_CANARY;
19841+ savesegment(gs, regs.gs);
19842 regs.orig_ax = -1;
19843 regs.ip = (unsigned long) kernel_thread_helper;
19844 regs.cs = __KERNEL_CS | get_kernel_rpl();
19845@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19846 struct task_struct *tsk;
19847 int err;
19848
19849- childregs = task_pt_regs(p);
19850+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
19851 *childregs = *regs;
19852 childregs->ax = 0;
19853 childregs->sp = sp;
19854
19855 p->thread.sp = (unsigned long) childregs;
19856 p->thread.sp0 = (unsigned long) (childregs+1);
19857+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19858
19859 p->thread.ip = (unsigned long) ret_from_fork;
19860
19861@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19862 struct thread_struct *prev = &prev_p->thread,
19863 *next = &next_p->thread;
19864 int cpu = smp_processor_id();
19865- struct tss_struct *tss = &per_cpu(init_tss, cpu);
19866+ struct tss_struct *tss = init_tss + cpu;
19867 bool preload_fpu;
19868
19869 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
19870@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19871 */
19872 lazy_save_gs(prev->gs);
19873
19874+#ifdef CONFIG_PAX_MEMORY_UDEREF
19875+ __set_fs(task_thread_info(next_p)->addr_limit);
19876+#endif
19877+
19878 /*
19879 * Load the per-thread Thread-Local Storage descriptor.
19880 */
19881@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19882 */
19883 arch_end_context_switch(next_p);
19884
19885+ percpu_write(current_task, next_p);
19886+ percpu_write(current_tinfo, &next_p->tinfo);
19887+
19888 if (preload_fpu)
19889 __math_state_restore();
19890
19891@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19892 if (prev->gs | next->gs)
19893 lazy_load_gs(next->gs);
19894
19895- percpu_write(current_task, next_p);
19896-
19897 return prev_p;
19898 }
19899
19900@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
19901 } while (count++ < 16);
19902 return 0;
19903 }
19904-
19905diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
19906index 39493bc..196816d 100644
19907--- a/arch/x86/kernel/process_64.c
19908+++ b/arch/x86/kernel/process_64.c
19909@@ -91,7 +91,7 @@ static void __exit_idle(void)
19910 void exit_idle(void)
19911 {
19912 /* idle loop has pid 0 */
19913- if (current->pid)
19914+ if (task_pid_nr(current))
19915 return;
19916 __exit_idle();
19917 }
19918@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
19919 if (!board)
19920 board = "";
19921 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
19922- current->pid, current->comm, print_tainted(),
19923+ task_pid_nr(current), current->comm, print_tainted(),
19924 init_utsname()->release,
19925 (int)strcspn(init_utsname()->version, " "),
19926 init_utsname()->version, board);
19927@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19928 struct pt_regs *childregs;
19929 struct task_struct *me = current;
19930
19931- childregs = ((struct pt_regs *)
19932- (THREAD_SIZE + task_stack_page(p))) - 1;
19933+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
19934 *childregs = *regs;
19935
19936 childregs->ax = 0;
19937@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
19938 p->thread.sp = (unsigned long) childregs;
19939 p->thread.sp0 = (unsigned long) (childregs+1);
19940 p->thread.usersp = me->thread.usersp;
19941+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
19942
19943 set_tsk_thread_flag(p, TIF_FORK);
19944
19945@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19946 struct thread_struct *prev = &prev_p->thread;
19947 struct thread_struct *next = &next_p->thread;
19948 int cpu = smp_processor_id();
19949- struct tss_struct *tss = &per_cpu(init_tss, cpu);
19950+ struct tss_struct *tss = init_tss + cpu;
19951 unsigned fsindex, gsindex;
19952 bool preload_fpu;
19953
19954@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
19955 prev->usersp = percpu_read(old_rsp);
19956 percpu_write(old_rsp, next->usersp);
19957 percpu_write(current_task, next_p);
19958+ percpu_write(current_tinfo, &next_p->tinfo);
19959
19960- percpu_write(kernel_stack,
19961- (unsigned long)task_stack_page(next_p) +
19962- THREAD_SIZE - KERNEL_STACK_OFFSET);
19963+ percpu_write(kernel_stack, next->sp0);
19964
19965 /*
19966 * Now maybe reload the debug registers and handle I/O bitmaps
19967@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
19968 if (!p || p == current || p->state == TASK_RUNNING)
19969 return 0;
19970 stack = (unsigned long)task_stack_page(p);
19971- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
19972+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
19973 return 0;
19974 fp = *(u64 *)(p->thread.sp);
19975 do {
19976- if (fp < (unsigned long)stack ||
19977- fp >= (unsigned long)stack+THREAD_SIZE)
19978+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
19979 return 0;
19980 ip = *(u64 *)(fp+8);
19981 if (!in_sched_functions(ip))
19982diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
19983index c06acdd..3f5fff5 100644
19984--- a/arch/x86/kernel/ptrace.c
19985+++ b/arch/x86/kernel/ptrace.c
19986@@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
19987 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19988 {
19989 int ret;
19990- unsigned long __user *datap = (unsigned long __user *)data;
19991+ unsigned long __user *datap = (__force unsigned long __user *)data;
19992
19993 switch (request) {
19994 /* read the word at location addr in the USER area. */
19995@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
19996 if (addr < 0)
19997 return -EIO;
19998 ret = do_get_thread_area(child, addr,
19999- (struct user_desc __user *) data);
20000+ (__force struct user_desc __user *) data);
20001 break;
20002
20003 case PTRACE_SET_THREAD_AREA:
20004 if (addr < 0)
20005 return -EIO;
20006 ret = do_set_thread_area(child, addr,
20007- (struct user_desc __user *) data, 0);
20008+ (__force struct user_desc __user *) data, 0);
20009 break;
20010 #endif
20011
20012@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20013 #ifdef CONFIG_X86_PTRACE_BTS
20014 case PTRACE_BTS_CONFIG:
20015 ret = ptrace_bts_config
20016- (child, data, (struct ptrace_bts_config __user *)addr);
20017+ (child, data, (__force struct ptrace_bts_config __user *)addr);
20018 break;
20019
20020 case PTRACE_BTS_STATUS:
20021 ret = ptrace_bts_status
20022- (child, data, (struct ptrace_bts_config __user *)addr);
20023+ (child, data, (__force struct ptrace_bts_config __user *)addr);
20024 break;
20025
20026 case PTRACE_BTS_SIZE:
20027@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20028
20029 case PTRACE_BTS_GET:
20030 ret = ptrace_bts_read_record
20031- (child, data, (struct bts_struct __user *) addr);
20032+ (child, data, (__force struct bts_struct __user *) addr);
20033 break;
20034
20035 case PTRACE_BTS_CLEAR:
20036@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
20037
20038 case PTRACE_BTS_DRAIN:
20039 ret = ptrace_bts_drain
20040- (child, data, (struct bts_struct __user *) addr);
20041+ (child, data, (__force struct bts_struct __user *) addr);
20042 break;
20043 #endif /* CONFIG_X86_PTRACE_BTS */
20044
20045@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
20046 info.si_code = si_code;
20047
20048 /* User-mode ip? */
20049- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
20050+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
20051
20052 /* Send us the fake SIGTRAP */
20053 force_sig_info(SIGTRAP, &info, tsk);
20054@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
20055 * We must return the syscall number to actually look up in the table.
20056 * This can be -1L to skip running any syscall at all.
20057 */
20058-asmregparm long syscall_trace_enter(struct pt_regs *regs)
20059+long syscall_trace_enter(struct pt_regs *regs)
20060 {
20061 long ret = 0;
20062
20063@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
20064 return ret ?: regs->orig_ax;
20065 }
20066
20067-asmregparm void syscall_trace_leave(struct pt_regs *regs)
20068+void syscall_trace_leave(struct pt_regs *regs)
20069 {
20070 if (unlikely(current->audit_context))
20071 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
20072diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
20073index cf98100..e76e03d 100644
20074--- a/arch/x86/kernel/reboot.c
20075+++ b/arch/x86/kernel/reboot.c
20076@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
20077 EXPORT_SYMBOL(pm_power_off);
20078
20079 static const struct desc_ptr no_idt = {};
20080-static int reboot_mode;
20081+static unsigned short reboot_mode;
20082 enum reboot_type reboot_type = BOOT_KBD;
20083 int reboot_force;
20084
20085@@ -292,12 +292,12 @@ core_initcall(reboot_init);
20086 controller to pulse the CPU reset line, which is more thorough, but
20087 doesn't work with at least one type of 486 motherboard. It is easy
20088 to stop this code working; hence the copious comments. */
20089-static const unsigned long long
20090-real_mode_gdt_entries [3] =
20091+static struct desc_struct
20092+real_mode_gdt_entries [3] __read_only =
20093 {
20094- 0x0000000000000000ULL, /* Null descriptor */
20095- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
20096- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
20097+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
20098+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
20099+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
20100 };
20101
20102 static const struct desc_ptr
20103@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
20104 * specified by the code and length parameters.
20105 * We assume that length will aways be less that 100!
20106 */
20107-void machine_real_restart(const unsigned char *code, int length)
20108+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
20109 {
20110 local_irq_disable();
20111
20112@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
20113 /* Remap the kernel at virtual address zero, as well as offset zero
20114 from the kernel segment. This assumes the kernel segment starts at
20115 virtual address PAGE_OFFSET. */
20116- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20117- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
20118+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20119+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
20120
20121 /*
20122 * Use `swapper_pg_dir' as our page directory.
20123@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
20124 boot)". This seems like a fairly standard thing that gets set by
20125 REBOOT.COM programs, and the previous reset routine did this
20126 too. */
20127- *((unsigned short *)0x472) = reboot_mode;
20128+ *(unsigned short *)(__va(0x472)) = reboot_mode;
20129
20130 /* For the switch to real mode, copy some code to low memory. It has
20131 to be in the first 64k because it is running in 16-bit mode, and it
20132 has to have the same physical and virtual address, because it turns
20133 off paging. Copy it near the end of the first page, out of the way
20134 of BIOS variables. */
20135- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
20136- real_mode_switch, sizeof (real_mode_switch));
20137- memcpy((void *)(0x1000 - 100), code, length);
20138+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
20139+ memcpy(__va(0x1000 - 100), code, length);
20140
20141 /* Set up the IDT for real mode. */
20142 load_idt(&real_mode_idt);
20143@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
20144 __asm__ __volatile__ ("ljmp $0x0008,%0"
20145 :
20146 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
20147+ do { } while (1);
20148 }
20149 #ifdef CONFIG_APM_MODULE
20150 EXPORT_SYMBOL(machine_real_restart);
20151@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
20152 {
20153 }
20154
20155-static void native_machine_emergency_restart(void)
20156+__noreturn static void native_machine_emergency_restart(void)
20157 {
20158 int i;
20159
20160@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
20161 #endif
20162 }
20163
20164-static void __machine_emergency_restart(int emergency)
20165+static __noreturn void __machine_emergency_restart(int emergency)
20166 {
20167 reboot_emergency = emergency;
20168 machine_ops.emergency_restart();
20169 }
20170
20171-static void native_machine_restart(char *__unused)
20172+static __noreturn void native_machine_restart(char *__unused)
20173 {
20174 printk("machine restart\n");
20175
20176@@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
20177 __machine_emergency_restart(0);
20178 }
20179
20180-static void native_machine_halt(void)
20181+static __noreturn void native_machine_halt(void)
20182 {
20183 /* stop other cpus and apics */
20184 machine_shutdown();
20185@@ -685,7 +685,7 @@ static void native_machine_halt(void)
20186 stop_this_cpu(NULL);
20187 }
20188
20189-static void native_machine_power_off(void)
20190+__noreturn static void native_machine_power_off(void)
20191 {
20192 if (pm_power_off) {
20193 if (!reboot_force)
20194@@ -694,6 +694,7 @@ static void native_machine_power_off(void)
20195 }
20196 /* a fallback in case there is no PM info available */
20197 tboot_shutdown(TB_SHUTDOWN_HALT);
20198+ do { } while (1);
20199 }
20200
20201 struct machine_ops machine_ops = {
20202diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
20203index 7a6f3b3..976a959 100644
20204--- a/arch/x86/kernel/relocate_kernel_64.S
20205+++ b/arch/x86/kernel/relocate_kernel_64.S
20206@@ -11,6 +11,7 @@
20207 #include <asm/kexec.h>
20208 #include <asm/processor-flags.h>
20209 #include <asm/pgtable_types.h>
20210+#include <asm/alternative-asm.h>
20211
20212 /*
20213 * Must be relocatable PIC code callable as a C function
20214@@ -167,6 +168,7 @@ identity_mapped:
20215 xorq %r14, %r14
20216 xorq %r15, %r15
20217
20218+ pax_force_retaddr 0, 1
20219 ret
20220
20221 1:
20222diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
20223index 5449a26..0b6c759 100644
20224--- a/arch/x86/kernel/setup.c
20225+++ b/arch/x86/kernel/setup.c
20226@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
20227
20228 if (!boot_params.hdr.root_flags)
20229 root_mountflags &= ~MS_RDONLY;
20230- init_mm.start_code = (unsigned long) _text;
20231- init_mm.end_code = (unsigned long) _etext;
20232+ init_mm.start_code = ktla_ktva((unsigned long) _text);
20233+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
20234 init_mm.end_data = (unsigned long) _edata;
20235 init_mm.brk = _brk_end;
20236
20237- code_resource.start = virt_to_phys(_text);
20238- code_resource.end = virt_to_phys(_etext)-1;
20239- data_resource.start = virt_to_phys(_etext);
20240+ code_resource.start = virt_to_phys(ktla_ktva(_text));
20241+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
20242+ data_resource.start = virt_to_phys(_sdata);
20243 data_resource.end = virt_to_phys(_edata)-1;
20244 bss_resource.start = virt_to_phys(&__bss_start);
20245 bss_resource.end = virt_to_phys(&__bss_stop)-1;
20246diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
20247index d559af9..524c6ad 100644
20248--- a/arch/x86/kernel/setup_percpu.c
20249+++ b/arch/x86/kernel/setup_percpu.c
20250@@ -25,19 +25,17 @@
20251 # define DBG(x...)
20252 #endif
20253
20254-DEFINE_PER_CPU(int, cpu_number);
20255+#ifdef CONFIG_SMP
20256+DEFINE_PER_CPU(unsigned int, cpu_number);
20257 EXPORT_PER_CPU_SYMBOL(cpu_number);
20258+#endif
20259
20260-#ifdef CONFIG_X86_64
20261 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
20262-#else
20263-#define BOOT_PERCPU_OFFSET 0
20264-#endif
20265
20266 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
20267 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
20268
20269-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
20270+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
20271 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
20272 };
20273 EXPORT_SYMBOL(__per_cpu_offset);
20274@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
20275 {
20276 #ifdef CONFIG_X86_32
20277 struct desc_struct gdt;
20278+ unsigned long base = per_cpu_offset(cpu);
20279
20280- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
20281- 0x2 | DESCTYPE_S, 0x8);
20282- gdt.s = 1;
20283+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
20284+ 0x83 | DESCTYPE_S, 0xC);
20285 write_gdt_entry(get_cpu_gdt_table(cpu),
20286 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
20287 #endif
20288@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
20289 /* alrighty, percpu areas up and running */
20290 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
20291 for_each_possible_cpu(cpu) {
20292+#ifdef CONFIG_CC_STACKPROTECTOR
20293+#ifdef CONFIG_X86_32
20294+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
20295+#endif
20296+#endif
20297 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
20298 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
20299 per_cpu(cpu_number, cpu) = cpu;
20300@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
20301 early_per_cpu_map(x86_cpu_to_node_map, cpu);
20302 #endif
20303 #endif
20304+#ifdef CONFIG_CC_STACKPROTECTOR
20305+#ifdef CONFIG_X86_32
20306+ if (!cpu)
20307+ per_cpu(stack_canary.canary, cpu) = canary;
20308+#endif
20309+#endif
20310 /*
20311 * Up to this point, the boot CPU has been using .data.init
20312 * area. Reload any changed state for the boot CPU.
20313diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
20314index 6a44a76..a9287a1 100644
20315--- a/arch/x86/kernel/signal.c
20316+++ b/arch/x86/kernel/signal.c
20317@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
20318 * Align the stack pointer according to the i386 ABI,
20319 * i.e. so that on function entry ((sp + 4) & 15) == 0.
20320 */
20321- sp = ((sp + 4) & -16ul) - 4;
20322+ sp = ((sp - 12) & -16ul) - 4;
20323 #else /* !CONFIG_X86_32 */
20324 sp = round_down(sp, 16) - 8;
20325 #endif
20326@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
20327 * Return an always-bogus address instead so we will die with SIGSEGV.
20328 */
20329 if (onsigstack && !likely(on_sig_stack(sp)))
20330- return (void __user *)-1L;
20331+ return (__force void __user *)-1L;
20332
20333 /* save i387 state */
20334 if (used_math() && save_i387_xstate(*fpstate) < 0)
20335- return (void __user *)-1L;
20336+ return (__force void __user *)-1L;
20337
20338 return (void __user *)sp;
20339 }
20340@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20341 }
20342
20343 if (current->mm->context.vdso)
20344- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20345+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
20346 else
20347- restorer = &frame->retcode;
20348+ restorer = (void __user *)&frame->retcode;
20349 if (ka->sa.sa_flags & SA_RESTORER)
20350 restorer = ka->sa.sa_restorer;
20351
20352@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
20353 * reasons and because gdb uses it as a signature to notice
20354 * signal handler stack frames.
20355 */
20356- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
20357+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
20358
20359 if (err)
20360 return -EFAULT;
20361@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20362 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
20363
20364 /* Set up to return from userspace. */
20365- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20366+ if (current->mm->context.vdso)
20367+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
20368+ else
20369+ restorer = (void __user *)&frame->retcode;
20370 if (ka->sa.sa_flags & SA_RESTORER)
20371 restorer = ka->sa.sa_restorer;
20372 put_user_ex(restorer, &frame->pretcode);
20373@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
20374 * reasons and because gdb uses it as a signature to notice
20375 * signal handler stack frames.
20376 */
20377- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
20378+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
20379 } put_user_catch(err);
20380
20381 if (err)
20382@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
20383 int signr;
20384 sigset_t *oldset;
20385
20386+ pax_track_stack();
20387+
20388 /*
20389 * We want the common case to go fast, which is why we may in certain
20390 * cases get here from kernel mode. Just return without doing anything
20391@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
20392 * X86_32: vm86 regs switched out by assembly code before reaching
20393 * here, so testing against kernel CS suffices.
20394 */
20395- if (!user_mode(regs))
20396+ if (!user_mode_novm(regs))
20397 return;
20398
20399 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
20400diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
20401index 7e8e905..64d5c32 100644
20402--- a/arch/x86/kernel/smpboot.c
20403+++ b/arch/x86/kernel/smpboot.c
20404@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
20405 */
20406 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
20407
20408-void cpu_hotplug_driver_lock()
20409+void cpu_hotplug_driver_lock(void)
20410 {
20411- mutex_lock(&x86_cpu_hotplug_driver_mutex);
20412+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
20413 }
20414
20415-void cpu_hotplug_driver_unlock()
20416+void cpu_hotplug_driver_unlock(void)
20417 {
20418- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
20419+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
20420 }
20421
20422 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
20423@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
20424 * target processor state.
20425 */
20426 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
20427- (unsigned long)stack_start.sp);
20428+ stack_start);
20429
20430 /*
20431 * Run STARTUP IPI loop.
20432@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
20433 set_idle_for_cpu(cpu, c_idle.idle);
20434 do_rest:
20435 per_cpu(current_task, cpu) = c_idle.idle;
20436+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
20437 #ifdef CONFIG_X86_32
20438 /* Stack for startup_32 can be just as for start_secondary onwards */
20439 irq_ctx_init(cpu);
20440@@ -750,13 +751,15 @@ do_rest:
20441 #else
20442 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
20443 initial_gs = per_cpu_offset(cpu);
20444- per_cpu(kernel_stack, cpu) =
20445- (unsigned long)task_stack_page(c_idle.idle) -
20446- KERNEL_STACK_OFFSET + THREAD_SIZE;
20447+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
20448 #endif
20449+
20450+ pax_open_kernel();
20451 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20452+ pax_close_kernel();
20453+
20454 initial_code = (unsigned long)start_secondary;
20455- stack_start.sp = (void *) c_idle.idle->thread.sp;
20456+ stack_start = c_idle.idle->thread.sp;
20457
20458 /* start_ip had better be page-aligned! */
20459 start_ip = setup_trampoline();
20460@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
20461
20462 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
20463
20464+#ifdef CONFIG_PAX_PER_CPU_PGD
20465+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
20466+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20467+ KERNEL_PGD_PTRS);
20468+#endif
20469+
20470 err = do_boot_cpu(apicid, cpu);
20471
20472 if (err) {
20473diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
20474index 3149032..14f1053 100644
20475--- a/arch/x86/kernel/step.c
20476+++ b/arch/x86/kernel/step.c
20477@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20478 struct desc_struct *desc;
20479 unsigned long base;
20480
20481- seg &= ~7UL;
20482+ seg >>= 3;
20483
20484 mutex_lock(&child->mm->context.lock);
20485- if (unlikely((seg >> 3) >= child->mm->context.size))
20486+ if (unlikely(seg >= child->mm->context.size))
20487 addr = -1L; /* bogus selector, access would fault */
20488 else {
20489 desc = child->mm->context.ldt + seg;
20490@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
20491 addr += base;
20492 }
20493 mutex_unlock(&child->mm->context.lock);
20494- }
20495+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
20496+ addr = ktla_ktva(addr);
20497
20498 return addr;
20499 }
20500@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20501 unsigned char opcode[15];
20502 unsigned long addr = convert_ip_to_linear(child, regs);
20503
20504+ if (addr == -EINVAL)
20505+ return 0;
20506+
20507 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
20508 for (i = 0; i < copied; i++) {
20509 switch (opcode[i]) {
20510@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
20511
20512 #ifdef CONFIG_X86_64
20513 case 0x40 ... 0x4f:
20514- if (regs->cs != __USER_CS)
20515+ if ((regs->cs & 0xffff) != __USER_CS)
20516 /* 32-bit mode: register increment */
20517 return 0;
20518 /* 64-bit mode: REX prefix */
20519diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
20520index dee1ff7..a397f7f 100644
20521--- a/arch/x86/kernel/sys_i386_32.c
20522+++ b/arch/x86/kernel/sys_i386_32.c
20523@@ -24,6 +24,21 @@
20524
20525 #include <asm/syscalls.h>
20526
20527+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
20528+{
20529+ unsigned long pax_task_size = TASK_SIZE;
20530+
20531+#ifdef CONFIG_PAX_SEGMEXEC
20532+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
20533+ pax_task_size = SEGMEXEC_TASK_SIZE;
20534+#endif
20535+
20536+ if (len > pax_task_size || addr > pax_task_size - len)
20537+ return -EINVAL;
20538+
20539+ return 0;
20540+}
20541+
20542 /*
20543 * Perform the select(nd, in, out, ex, tv) and mmap() system
20544 * calls. Linux/i386 didn't use to be able to handle more than
20545@@ -58,6 +73,212 @@ out:
20546 return err;
20547 }
20548
20549+unsigned long
20550+arch_get_unmapped_area(struct file *filp, unsigned long addr,
20551+ unsigned long len, unsigned long pgoff, unsigned long flags)
20552+{
20553+ struct mm_struct *mm = current->mm;
20554+ struct vm_area_struct *vma;
20555+ unsigned long start_addr, pax_task_size = TASK_SIZE;
20556+
20557+#ifdef CONFIG_PAX_SEGMEXEC
20558+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20559+ pax_task_size = SEGMEXEC_TASK_SIZE;
20560+#endif
20561+
20562+ pax_task_size -= PAGE_SIZE;
20563+
20564+ if (len > pax_task_size)
20565+ return -ENOMEM;
20566+
20567+ if (flags & MAP_FIXED)
20568+ return addr;
20569+
20570+#ifdef CONFIG_PAX_RANDMMAP
20571+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20572+#endif
20573+
20574+ if (addr) {
20575+ addr = PAGE_ALIGN(addr);
20576+ if (pax_task_size - len >= addr) {
20577+ vma = find_vma(mm, addr);
20578+ if (check_heap_stack_gap(vma, addr, len))
20579+ return addr;
20580+ }
20581+ }
20582+ if (len > mm->cached_hole_size) {
20583+ start_addr = addr = mm->free_area_cache;
20584+ } else {
20585+ start_addr = addr = mm->mmap_base;
20586+ mm->cached_hole_size = 0;
20587+ }
20588+
20589+#ifdef CONFIG_PAX_PAGEEXEC
20590+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
20591+ start_addr = 0x00110000UL;
20592+
20593+#ifdef CONFIG_PAX_RANDMMAP
20594+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20595+ start_addr += mm->delta_mmap & 0x03FFF000UL;
20596+#endif
20597+
20598+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
20599+ start_addr = addr = mm->mmap_base;
20600+ else
20601+ addr = start_addr;
20602+ }
20603+#endif
20604+
20605+full_search:
20606+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20607+ /* At this point: (!vma || addr < vma->vm_end). */
20608+ if (pax_task_size - len < addr) {
20609+ /*
20610+ * Start a new search - just in case we missed
20611+ * some holes.
20612+ */
20613+ if (start_addr != mm->mmap_base) {
20614+ start_addr = addr = mm->mmap_base;
20615+ mm->cached_hole_size = 0;
20616+ goto full_search;
20617+ }
20618+ return -ENOMEM;
20619+ }
20620+ if (check_heap_stack_gap(vma, addr, len))
20621+ break;
20622+ if (addr + mm->cached_hole_size < vma->vm_start)
20623+ mm->cached_hole_size = vma->vm_start - addr;
20624+ addr = vma->vm_end;
20625+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
20626+ start_addr = addr = mm->mmap_base;
20627+ mm->cached_hole_size = 0;
20628+ goto full_search;
20629+ }
20630+ }
20631+
20632+ /*
20633+ * Remember the place where we stopped the search:
20634+ */
20635+ mm->free_area_cache = addr + len;
20636+ return addr;
20637+}
20638+
20639+unsigned long
20640+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20641+ const unsigned long len, const unsigned long pgoff,
20642+ const unsigned long flags)
20643+{
20644+ struct vm_area_struct *vma;
20645+ struct mm_struct *mm = current->mm;
20646+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
20647+
20648+#ifdef CONFIG_PAX_SEGMEXEC
20649+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20650+ pax_task_size = SEGMEXEC_TASK_SIZE;
20651+#endif
20652+
20653+ pax_task_size -= PAGE_SIZE;
20654+
20655+ /* requested length too big for entire address space */
20656+ if (len > pax_task_size)
20657+ return -ENOMEM;
20658+
20659+ if (flags & MAP_FIXED)
20660+ return addr;
20661+
20662+#ifdef CONFIG_PAX_PAGEEXEC
20663+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
20664+ goto bottomup;
20665+#endif
20666+
20667+#ifdef CONFIG_PAX_RANDMMAP
20668+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20669+#endif
20670+
20671+ /* requesting a specific address */
20672+ if (addr) {
20673+ addr = PAGE_ALIGN(addr);
20674+ if (pax_task_size - len >= addr) {
20675+ vma = find_vma(mm, addr);
20676+ if (check_heap_stack_gap(vma, addr, len))
20677+ return addr;
20678+ }
20679+ }
20680+
20681+ /* check if free_area_cache is useful for us */
20682+ if (len <= mm->cached_hole_size) {
20683+ mm->cached_hole_size = 0;
20684+ mm->free_area_cache = mm->mmap_base;
20685+ }
20686+
20687+ /* either no address requested or can't fit in requested address hole */
20688+ addr = mm->free_area_cache;
20689+
20690+ /* make sure it can fit in the remaining address space */
20691+ if (addr > len) {
20692+ vma = find_vma(mm, addr-len);
20693+ if (check_heap_stack_gap(vma, addr - len, len))
20694+ /* remember the address as a hint for next time */
20695+ return (mm->free_area_cache = addr-len);
20696+ }
20697+
20698+ if (mm->mmap_base < len)
20699+ goto bottomup;
20700+
20701+ addr = mm->mmap_base-len;
20702+
20703+ do {
20704+ /*
20705+ * Lookup failure means no vma is above this address,
20706+ * else if new region fits below vma->vm_start,
20707+ * return with success:
20708+ */
20709+ vma = find_vma(mm, addr);
20710+ if (check_heap_stack_gap(vma, addr, len))
20711+ /* remember the address as a hint for next time */
20712+ return (mm->free_area_cache = addr);
20713+
20714+ /* remember the largest hole we saw so far */
20715+ if (addr + mm->cached_hole_size < vma->vm_start)
20716+ mm->cached_hole_size = vma->vm_start - addr;
20717+
20718+ /* try just below the current vma->vm_start */
20719+ addr = skip_heap_stack_gap(vma, len);
20720+ } while (!IS_ERR_VALUE(addr));
20721+
20722+bottomup:
20723+ /*
20724+ * A failed mmap() very likely causes application failure,
20725+ * so fall back to the bottom-up function here. This scenario
20726+ * can happen with large stack limits and large mmap()
20727+ * allocations.
20728+ */
20729+
20730+#ifdef CONFIG_PAX_SEGMEXEC
20731+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20732+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20733+ else
20734+#endif
20735+
20736+ mm->mmap_base = TASK_UNMAPPED_BASE;
20737+
20738+#ifdef CONFIG_PAX_RANDMMAP
20739+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20740+ mm->mmap_base += mm->delta_mmap;
20741+#endif
20742+
20743+ mm->free_area_cache = mm->mmap_base;
20744+ mm->cached_hole_size = ~0UL;
20745+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20746+ /*
20747+ * Restore the topdown base:
20748+ */
20749+ mm->mmap_base = base;
20750+ mm->free_area_cache = base;
20751+ mm->cached_hole_size = ~0UL;
20752+
20753+ return addr;
20754+}
20755
20756 struct sel_arg_struct {
20757 unsigned long n;
20758@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
20759 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
20760 case SEMTIMEDOP:
20761 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
20762- (const struct timespec __user *)fifth);
20763+ (__force const struct timespec __user *)fifth);
20764
20765 case SEMGET:
20766 return sys_semget(first, second, third);
20767@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
20768 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
20769 if (ret)
20770 return ret;
20771- return put_user(raddr, (ulong __user *) third);
20772+ return put_user(raddr, (__force ulong __user *) third);
20773 }
20774 case 1: /* iBCS2 emulator entry point */
20775 if (!segment_eq(get_fs(), get_ds()))
20776@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
20777
20778 return error;
20779 }
20780-
20781-
20782-/*
20783- * Do a system call from kernel instead of calling sys_execve so we
20784- * end up with proper pt_regs.
20785- */
20786-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
20787-{
20788- long __res;
20789- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
20790- : "=a" (__res)
20791- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
20792- return __res;
20793-}
20794diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
20795index 8aa2057..b604bc1 100644
20796--- a/arch/x86/kernel/sys_x86_64.c
20797+++ b/arch/x86/kernel/sys_x86_64.c
20798@@ -32,8 +32,8 @@ out:
20799 return error;
20800 }
20801
20802-static void find_start_end(unsigned long flags, unsigned long *begin,
20803- unsigned long *end)
20804+static void find_start_end(struct mm_struct *mm, unsigned long flags,
20805+ unsigned long *begin, unsigned long *end)
20806 {
20807 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
20808 unsigned long new_begin;
20809@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
20810 *begin = new_begin;
20811 }
20812 } else {
20813- *begin = TASK_UNMAPPED_BASE;
20814+ *begin = mm->mmap_base;
20815 *end = TASK_SIZE;
20816 }
20817 }
20818@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
20819 if (flags & MAP_FIXED)
20820 return addr;
20821
20822- find_start_end(flags, &begin, &end);
20823+ find_start_end(mm, flags, &begin, &end);
20824
20825 if (len > end)
20826 return -ENOMEM;
20827
20828+#ifdef CONFIG_PAX_RANDMMAP
20829+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20830+#endif
20831+
20832 if (addr) {
20833 addr = PAGE_ALIGN(addr);
20834 vma = find_vma(mm, addr);
20835- if (end - len >= addr &&
20836- (!vma || addr + len <= vma->vm_start))
20837+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
20838 return addr;
20839 }
20840 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
20841@@ -106,7 +109,7 @@ full_search:
20842 }
20843 return -ENOMEM;
20844 }
20845- if (!vma || addr + len <= vma->vm_start) {
20846+ if (check_heap_stack_gap(vma, addr, len)) {
20847 /*
20848 * Remember the place where we stopped the search:
20849 */
20850@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20851 {
20852 struct vm_area_struct *vma;
20853 struct mm_struct *mm = current->mm;
20854- unsigned long addr = addr0;
20855+ unsigned long base = mm->mmap_base, addr = addr0;
20856
20857 /* requested length too big for entire address space */
20858 if (len > TASK_SIZE)
20859@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20860 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
20861 goto bottomup;
20862
20863+#ifdef CONFIG_PAX_RANDMMAP
20864+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
20865+#endif
20866+
20867 /* requesting a specific address */
20868 if (addr) {
20869 addr = PAGE_ALIGN(addr);
20870- vma = find_vma(mm, addr);
20871- if (TASK_SIZE - len >= addr &&
20872- (!vma || addr + len <= vma->vm_start))
20873- return addr;
20874+ if (TASK_SIZE - len >= addr) {
20875+ vma = find_vma(mm, addr);
20876+ if (check_heap_stack_gap(vma, addr, len))
20877+ return addr;
20878+ }
20879 }
20880
20881 /* check if free_area_cache is useful for us */
20882@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20883 /* make sure it can fit in the remaining address space */
20884 if (addr > len) {
20885 vma = find_vma(mm, addr-len);
20886- if (!vma || addr <= vma->vm_start)
20887+ if (check_heap_stack_gap(vma, addr - len, len))
20888 /* remember the address as a hint for next time */
20889 return mm->free_area_cache = addr-len;
20890 }
20891@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20892 * return with success:
20893 */
20894 vma = find_vma(mm, addr);
20895- if (!vma || addr+len <= vma->vm_start)
20896+ if (check_heap_stack_gap(vma, addr, len))
20897 /* remember the address as a hint for next time */
20898 return mm->free_area_cache = addr;
20899
20900@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
20901 mm->cached_hole_size = vma->vm_start - addr;
20902
20903 /* try just below the current vma->vm_start */
20904- addr = vma->vm_start-len;
20905- } while (len < vma->vm_start);
20906+ addr = skip_heap_stack_gap(vma, len);
20907+ } while (!IS_ERR_VALUE(addr));
20908
20909 bottomup:
20910 /*
20911@@ -198,13 +206,21 @@ bottomup:
20912 * can happen with large stack limits and large mmap()
20913 * allocations.
20914 */
20915+ mm->mmap_base = TASK_UNMAPPED_BASE;
20916+
20917+#ifdef CONFIG_PAX_RANDMMAP
20918+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20919+ mm->mmap_base += mm->delta_mmap;
20920+#endif
20921+
20922+ mm->free_area_cache = mm->mmap_base;
20923 mm->cached_hole_size = ~0UL;
20924- mm->free_area_cache = TASK_UNMAPPED_BASE;
20925 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
20926 /*
20927 * Restore the topdown base:
20928 */
20929- mm->free_area_cache = mm->mmap_base;
20930+ mm->mmap_base = base;
20931+ mm->free_area_cache = base;
20932 mm->cached_hole_size = ~0UL;
20933
20934 return addr;
20935diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
20936index 76d70a4..4c94a44 100644
20937--- a/arch/x86/kernel/syscall_table_32.S
20938+++ b/arch/x86/kernel/syscall_table_32.S
20939@@ -1,3 +1,4 @@
20940+.section .rodata,"a",@progbits
20941 ENTRY(sys_call_table)
20942 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
20943 .long sys_exit
20944diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
20945index 46b8277..3349d55 100644
20946--- a/arch/x86/kernel/tboot.c
20947+++ b/arch/x86/kernel/tboot.c
20948@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
20949
20950 void tboot_shutdown(u32 shutdown_type)
20951 {
20952- void (*shutdown)(void);
20953+ void (* __noreturn shutdown)(void);
20954
20955 if (!tboot_enabled())
20956 return;
20957@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
20958
20959 switch_to_tboot_pt();
20960
20961- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
20962+ shutdown = (void *)tboot->shutdown_entry;
20963 shutdown();
20964
20965 /* should not reach here */
20966@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
20967 tboot_shutdown(acpi_shutdown_map[sleep_state]);
20968 }
20969
20970-static atomic_t ap_wfs_count;
20971+static atomic_unchecked_t ap_wfs_count;
20972
20973 static int tboot_wait_for_aps(int num_aps)
20974 {
20975@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
20976 {
20977 switch (action) {
20978 case CPU_DYING:
20979- atomic_inc(&ap_wfs_count);
20980+ atomic_inc_unchecked(&ap_wfs_count);
20981 if (num_online_cpus() == 1)
20982- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
20983+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
20984 return NOTIFY_BAD;
20985 break;
20986 }
20987@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
20988
20989 tboot_create_trampoline();
20990
20991- atomic_set(&ap_wfs_count, 0);
20992+ atomic_set_unchecked(&ap_wfs_count, 0);
20993 register_hotcpu_notifier(&tboot_cpu_notifier);
20994 return 0;
20995 }
20996diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
20997index be25734..87fe232 100644
20998--- a/arch/x86/kernel/time.c
20999+++ b/arch/x86/kernel/time.c
21000@@ -26,17 +26,13 @@
21001 int timer_ack;
21002 #endif
21003
21004-#ifdef CONFIG_X86_64
21005-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
21006-#endif
21007-
21008 unsigned long profile_pc(struct pt_regs *regs)
21009 {
21010 unsigned long pc = instruction_pointer(regs);
21011
21012- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
21013+ if (!user_mode(regs) && in_lock_functions(pc)) {
21014 #ifdef CONFIG_FRAME_POINTER
21015- return *(unsigned long *)(regs->bp + sizeof(long));
21016+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
21017 #else
21018 unsigned long *sp =
21019 (unsigned long *)kernel_stack_pointer(regs);
21020@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
21021 * or above a saved flags. Eflags has bits 22-31 zero,
21022 * kernel addresses don't.
21023 */
21024+
21025+#ifdef CONFIG_PAX_KERNEXEC
21026+ return ktla_ktva(sp[0]);
21027+#else
21028 if (sp[0] >> 22)
21029 return sp[0];
21030 if (sp[1] >> 22)
21031 return sp[1];
21032 #endif
21033+
21034+#endif
21035 }
21036 return pc;
21037 }
21038diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
21039index 6bb7b85..dd853e1 100644
21040--- a/arch/x86/kernel/tls.c
21041+++ b/arch/x86/kernel/tls.c
21042@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
21043 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
21044 return -EINVAL;
21045
21046+#ifdef CONFIG_PAX_SEGMEXEC
21047+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
21048+ return -EINVAL;
21049+#endif
21050+
21051 set_tls_desc(p, idx, &info, 1);
21052
21053 return 0;
21054diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
21055index 8508237..229b664 100644
21056--- a/arch/x86/kernel/trampoline_32.S
21057+++ b/arch/x86/kernel/trampoline_32.S
21058@@ -32,6 +32,12 @@
21059 #include <asm/segment.h>
21060 #include <asm/page_types.h>
21061
21062+#ifdef CONFIG_PAX_KERNEXEC
21063+#define ta(X) (X)
21064+#else
21065+#define ta(X) ((X) - __PAGE_OFFSET)
21066+#endif
21067+
21068 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
21069 __CPUINITRODATA
21070 .code16
21071@@ -60,7 +66,7 @@ r_base = .
21072 inc %ax # protected mode (PE) bit
21073 lmsw %ax # into protected mode
21074 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
21075- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
21076+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
21077
21078 # These need to be in the same 64K segment as the above;
21079 # hence we don't use the boot_gdt_descr defined in head.S
21080diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
21081index 3af2dff..ba8aa49 100644
21082--- a/arch/x86/kernel/trampoline_64.S
21083+++ b/arch/x86/kernel/trampoline_64.S
21084@@ -91,7 +91,7 @@ startup_32:
21085 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
21086 movl %eax, %ds
21087
21088- movl $X86_CR4_PAE, %eax
21089+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
21090 movl %eax, %cr4 # Enable PAE mode
21091
21092 # Setup trampoline 4 level pagetables
21093@@ -127,7 +127,7 @@ startup_64:
21094 no_longmode:
21095 hlt
21096 jmp no_longmode
21097-#include "verify_cpu_64.S"
21098+#include "verify_cpu.S"
21099
21100 # Careful these need to be in the same 64K segment as the above;
21101 tidt:
21102@@ -138,7 +138,7 @@ tidt:
21103 # so the kernel can live anywhere
21104 .balign 4
21105 tgdt:
21106- .short tgdt_end - tgdt # gdt limit
21107+ .short tgdt_end - tgdt - 1 # gdt limit
21108 .long tgdt - r_base
21109 .short 0
21110 .quad 0x00cf9b000000ffff # __KERNEL32_CS
21111diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
21112index 7e37dce..ec3f8e5 100644
21113--- a/arch/x86/kernel/traps.c
21114+++ b/arch/x86/kernel/traps.c
21115@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
21116
21117 /* Do we ignore FPU interrupts ? */
21118 char ignore_fpu_irq;
21119-
21120-/*
21121- * The IDT has to be page-aligned to simplify the Pentium
21122- * F0 0F bug workaround.
21123- */
21124-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
21125 #endif
21126
21127 DECLARE_BITMAP(used_vectors, NR_VECTORS);
21128@@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
21129 static inline void
21130 die_if_kernel(const char *str, struct pt_regs *regs, long err)
21131 {
21132- if (!user_mode_vm(regs))
21133+ if (!user_mode(regs))
21134 die(str, regs, err);
21135 }
21136 #endif
21137
21138 static void __kprobes
21139-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21140+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
21141 long error_code, siginfo_t *info)
21142 {
21143 struct task_struct *tsk = current;
21144
21145 #ifdef CONFIG_X86_32
21146- if (regs->flags & X86_VM_MASK) {
21147+ if (v8086_mode(regs)) {
21148 /*
21149 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
21150 * On nmi (interrupt 2), do_trap should not be called.
21151@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
21152 }
21153 #endif
21154
21155- if (!user_mode(regs))
21156+ if (!user_mode_novm(regs))
21157 goto kernel_trap;
21158
21159 #ifdef CONFIG_X86_32
21160@@ -158,7 +152,7 @@ trap_signal:
21161 printk_ratelimit()) {
21162 printk(KERN_INFO
21163 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
21164- tsk->comm, tsk->pid, str,
21165+ tsk->comm, task_pid_nr(tsk), str,
21166 regs->ip, regs->sp, error_code);
21167 print_vma_addr(" in ", regs->ip);
21168 printk("\n");
21169@@ -175,8 +169,20 @@ kernel_trap:
21170 if (!fixup_exception(regs)) {
21171 tsk->thread.error_code = error_code;
21172 tsk->thread.trap_no = trapnr;
21173+
21174+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21175+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
21176+ str = "PAX: suspicious stack segment fault";
21177+#endif
21178+
21179 die(str, regs, error_code);
21180 }
21181+
21182+#ifdef CONFIG_PAX_REFCOUNT
21183+ if (trapnr == 4)
21184+ pax_report_refcount_overflow(regs);
21185+#endif
21186+
21187 return;
21188
21189 #ifdef CONFIG_X86_32
21190@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
21191 conditional_sti(regs);
21192
21193 #ifdef CONFIG_X86_32
21194- if (regs->flags & X86_VM_MASK)
21195+ if (v8086_mode(regs))
21196 goto gp_in_vm86;
21197 #endif
21198
21199 tsk = current;
21200- if (!user_mode(regs))
21201+ if (!user_mode_novm(regs))
21202 goto gp_in_kernel;
21203
21204+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21205+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
21206+ struct mm_struct *mm = tsk->mm;
21207+ unsigned long limit;
21208+
21209+ down_write(&mm->mmap_sem);
21210+ limit = mm->context.user_cs_limit;
21211+ if (limit < TASK_SIZE) {
21212+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
21213+ up_write(&mm->mmap_sem);
21214+ return;
21215+ }
21216+ up_write(&mm->mmap_sem);
21217+ }
21218+#endif
21219+
21220 tsk->thread.error_code = error_code;
21221 tsk->thread.trap_no = 13;
21222
21223@@ -305,6 +327,13 @@ gp_in_kernel:
21224 if (notify_die(DIE_GPF, "general protection fault", regs,
21225 error_code, 13, SIGSEGV) == NOTIFY_STOP)
21226 return;
21227+
21228+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21229+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
21230+ die("PAX: suspicious general protection fault", regs, error_code);
21231+ else
21232+#endif
21233+
21234 die("general protection fault", regs, error_code);
21235 }
21236
21237@@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
21238 dotraplinkage notrace __kprobes void
21239 do_nmi(struct pt_regs *regs, long error_code)
21240 {
21241+
21242+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21243+ if (!user_mode(regs)) {
21244+ unsigned long cs = regs->cs & 0xFFFF;
21245+ unsigned long ip = ktva_ktla(regs->ip);
21246+
21247+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21248+ regs->ip = ip;
21249+ }
21250+#endif
21251+
21252 nmi_enter();
21253
21254 inc_irq_stat(__nmi_count);
21255@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21256 }
21257
21258 #ifdef CONFIG_X86_32
21259- if (regs->flags & X86_VM_MASK)
21260+ if (v8086_mode(regs))
21261 goto debug_vm86;
21262 #endif
21263
21264@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
21265 * kernel space (but re-enable TF when returning to user mode).
21266 */
21267 if (condition & DR_STEP) {
21268- if (!user_mode(regs))
21269+ if (!user_mode_novm(regs))
21270 goto clear_TF_reenable;
21271 }
21272
21273@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
21274 * Handle strange cache flush from user space exception
21275 * in all other cases. This is undocumented behaviour.
21276 */
21277- if (regs->flags & X86_VM_MASK) {
21278+ if (v8086_mode(regs)) {
21279 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
21280 return;
21281 }
21282@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
21283 void __math_state_restore(void)
21284 {
21285 struct thread_info *thread = current_thread_info();
21286- struct task_struct *tsk = thread->task;
21287+ struct task_struct *tsk = current;
21288
21289 /*
21290 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
21291@@ -825,8 +865,7 @@ void __math_state_restore(void)
21292 */
21293 asmlinkage void math_state_restore(void)
21294 {
21295- struct thread_info *thread = current_thread_info();
21296- struct task_struct *tsk = thread->task;
21297+ struct task_struct *tsk = current;
21298
21299 if (!tsk_used_math(tsk)) {
21300 local_irq_enable();
21301diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
21302new file mode 100644
21303index 0000000..50c5edd
21304--- /dev/null
21305+++ b/arch/x86/kernel/verify_cpu.S
21306@@ -0,0 +1,140 @@
21307+/*
21308+ *
21309+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
21310+ * code has been borrowed from boot/setup.S and was introduced by
21311+ * Andi Kleen.
21312+ *
21313+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
21314+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
21315+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
21316+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
21317+ *
21318+ * This source code is licensed under the GNU General Public License,
21319+ * Version 2. See the file COPYING for more details.
21320+ *
21321+ * This is a common code for verification whether CPU supports
21322+ * long mode and SSE or not. It is not called directly instead this
21323+ * file is included at various places and compiled in that context.
21324+ * This file is expected to run in 32bit code. Currently:
21325+ *
21326+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
21327+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
21328+ * arch/x86/kernel/head_32.S: processor startup
21329+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
21330+ *
21331+ * verify_cpu, returns the status of longmode and SSE in register %eax.
21332+ * 0: Success 1: Failure
21333+ *
21334+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
21335+ *
21336+ * The caller needs to check for the error code and take the action
21337+ * appropriately. Either display a message or halt.
21338+ */
21339+
21340+#include <asm/cpufeature.h>
21341+#include <asm/msr-index.h>
21342+
21343+verify_cpu:
21344+ pushfl # Save caller passed flags
21345+ pushl $0 # Kill any dangerous flags
21346+ popfl
21347+
21348+ pushfl # standard way to check for cpuid
21349+ popl %eax
21350+ movl %eax,%ebx
21351+ xorl $0x200000,%eax
21352+ pushl %eax
21353+ popfl
21354+ pushfl
21355+ popl %eax
21356+ cmpl %eax,%ebx
21357+ jz verify_cpu_no_longmode # cpu has no cpuid
21358+
21359+ movl $0x0,%eax # See if cpuid 1 is implemented
21360+ cpuid
21361+ cmpl $0x1,%eax
21362+ jb verify_cpu_no_longmode # no cpuid 1
21363+
21364+ xor %di,%di
21365+ cmpl $0x68747541,%ebx # AuthenticAMD
21366+ jnz verify_cpu_noamd
21367+ cmpl $0x69746e65,%edx
21368+ jnz verify_cpu_noamd
21369+ cmpl $0x444d4163,%ecx
21370+ jnz verify_cpu_noamd
21371+ mov $1,%di # cpu is from AMD
21372+ jmp verify_cpu_check
21373+
21374+verify_cpu_noamd:
21375+ cmpl $0x756e6547,%ebx # GenuineIntel?
21376+ jnz verify_cpu_check
21377+ cmpl $0x49656e69,%edx
21378+ jnz verify_cpu_check
21379+ cmpl $0x6c65746e,%ecx
21380+ jnz verify_cpu_check
21381+
21382+ # only call IA32_MISC_ENABLE when:
21383+ # family > 6 || (family == 6 && model >= 0xd)
21384+ movl $0x1, %eax # check CPU family and model
21385+ cpuid
21386+ movl %eax, %ecx
21387+
21388+ andl $0x0ff00f00, %eax # mask family and extended family
21389+ shrl $8, %eax
21390+ cmpl $6, %eax
21391+ ja verify_cpu_clear_xd # family > 6, ok
21392+ jb verify_cpu_check # family < 6, skip
21393+
21394+ andl $0x000f00f0, %ecx # mask model and extended model
21395+ shrl $4, %ecx
21396+ cmpl $0xd, %ecx
21397+ jb verify_cpu_check # family == 6, model < 0xd, skip
21398+
21399+verify_cpu_clear_xd:
21400+ movl $MSR_IA32_MISC_ENABLE, %ecx
21401+ rdmsr
21402+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
21403+ jnc verify_cpu_check # only write MSR if bit was changed
21404+ wrmsr
21405+
21406+verify_cpu_check:
21407+ movl $0x1,%eax # Does the cpu have what it takes
21408+ cpuid
21409+ andl $REQUIRED_MASK0,%edx
21410+ xorl $REQUIRED_MASK0,%edx
21411+ jnz verify_cpu_no_longmode
21412+
21413+ movl $0x80000000,%eax # See if extended cpuid is implemented
21414+ cpuid
21415+ cmpl $0x80000001,%eax
21416+ jb verify_cpu_no_longmode # no extended cpuid
21417+
21418+ movl $0x80000001,%eax # Does the cpu have what it takes
21419+ cpuid
21420+ andl $REQUIRED_MASK1,%edx
21421+ xorl $REQUIRED_MASK1,%edx
21422+ jnz verify_cpu_no_longmode
21423+
21424+verify_cpu_sse_test:
21425+ movl $1,%eax
21426+ cpuid
21427+ andl $SSE_MASK,%edx
21428+ cmpl $SSE_MASK,%edx
21429+ je verify_cpu_sse_ok
21430+ test %di,%di
21431+ jz verify_cpu_no_longmode # only try to force SSE on AMD
21432+ movl $MSR_K7_HWCR,%ecx
21433+ rdmsr
21434+ btr $15,%eax # enable SSE
21435+ wrmsr
21436+ xor %di,%di # don't loop
21437+ jmp verify_cpu_sse_test # try again
21438+
21439+verify_cpu_no_longmode:
21440+ popfl # Restore caller passed flags
21441+ movl $1,%eax
21442+ ret
21443+verify_cpu_sse_ok:
21444+ popfl # Restore caller passed flags
21445+ xorl %eax, %eax
21446+ ret
21447diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
21448deleted file mode 100644
21449index 45b6f8a..0000000
21450--- a/arch/x86/kernel/verify_cpu_64.S
21451+++ /dev/null
21452@@ -1,105 +0,0 @@
21453-/*
21454- *
21455- * verify_cpu.S - Code for cpu long mode and SSE verification. This
21456- * code has been borrowed from boot/setup.S and was introduced by
21457- * Andi Kleen.
21458- *
21459- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
21460- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
21461- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
21462- *
21463- * This source code is licensed under the GNU General Public License,
21464- * Version 2. See the file COPYING for more details.
21465- *
21466- * This is a common code for verification whether CPU supports
21467- * long mode and SSE or not. It is not called directly instead this
21468- * file is included at various places and compiled in that context.
21469- * Following are the current usage.
21470- *
21471- * This file is included by both 16bit and 32bit code.
21472- *
21473- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
21474- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
21475- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
21476- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
21477- *
21478- * verify_cpu, returns the status of cpu check in register %eax.
21479- * 0: Success 1: Failure
21480- *
21481- * The caller needs to check for the error code and take the action
21482- * appropriately. Either display a message or halt.
21483- */
21484-
21485-#include <asm/cpufeature.h>
21486-
21487-verify_cpu:
21488- pushfl # Save caller passed flags
21489- pushl $0 # Kill any dangerous flags
21490- popfl
21491-
21492- pushfl # standard way to check for cpuid
21493- popl %eax
21494- movl %eax,%ebx
21495- xorl $0x200000,%eax
21496- pushl %eax
21497- popfl
21498- pushfl
21499- popl %eax
21500- cmpl %eax,%ebx
21501- jz verify_cpu_no_longmode # cpu has no cpuid
21502-
21503- movl $0x0,%eax # See if cpuid 1 is implemented
21504- cpuid
21505- cmpl $0x1,%eax
21506- jb verify_cpu_no_longmode # no cpuid 1
21507-
21508- xor %di,%di
21509- cmpl $0x68747541,%ebx # AuthenticAMD
21510- jnz verify_cpu_noamd
21511- cmpl $0x69746e65,%edx
21512- jnz verify_cpu_noamd
21513- cmpl $0x444d4163,%ecx
21514- jnz verify_cpu_noamd
21515- mov $1,%di # cpu is from AMD
21516-
21517-verify_cpu_noamd:
21518- movl $0x1,%eax # Does the cpu have what it takes
21519- cpuid
21520- andl $REQUIRED_MASK0,%edx
21521- xorl $REQUIRED_MASK0,%edx
21522- jnz verify_cpu_no_longmode
21523-
21524- movl $0x80000000,%eax # See if extended cpuid is implemented
21525- cpuid
21526- cmpl $0x80000001,%eax
21527- jb verify_cpu_no_longmode # no extended cpuid
21528-
21529- movl $0x80000001,%eax # Does the cpu have what it takes
21530- cpuid
21531- andl $REQUIRED_MASK1,%edx
21532- xorl $REQUIRED_MASK1,%edx
21533- jnz verify_cpu_no_longmode
21534-
21535-verify_cpu_sse_test:
21536- movl $1,%eax
21537- cpuid
21538- andl $SSE_MASK,%edx
21539- cmpl $SSE_MASK,%edx
21540- je verify_cpu_sse_ok
21541- test %di,%di
21542- jz verify_cpu_no_longmode # only try to force SSE on AMD
21543- movl $0xc0010015,%ecx # HWCR
21544- rdmsr
21545- btr $15,%eax # enable SSE
21546- wrmsr
21547- xor %di,%di # don't loop
21548- jmp verify_cpu_sse_test # try again
21549-
21550-verify_cpu_no_longmode:
21551- popfl # Restore caller passed flags
21552- movl $1,%eax
21553- ret
21554-verify_cpu_sse_ok:
21555- popfl # Restore caller passed flags
21556- xorl %eax, %eax
21557- ret
21558diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
21559index 9c4e625..c992817 100644
21560--- a/arch/x86/kernel/vm86_32.c
21561+++ b/arch/x86/kernel/vm86_32.c
21562@@ -41,6 +41,7 @@
21563 #include <linux/ptrace.h>
21564 #include <linux/audit.h>
21565 #include <linux/stddef.h>
21566+#include <linux/grsecurity.h>
21567
21568 #include <asm/uaccess.h>
21569 #include <asm/io.h>
21570@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
21571 do_exit(SIGSEGV);
21572 }
21573
21574- tss = &per_cpu(init_tss, get_cpu());
21575+ tss = init_tss + get_cpu();
21576 current->thread.sp0 = current->thread.saved_sp0;
21577 current->thread.sysenter_cs = __KERNEL_CS;
21578 load_sp0(tss, &current->thread);
21579@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
21580 struct task_struct *tsk;
21581 int tmp, ret = -EPERM;
21582
21583+#ifdef CONFIG_GRKERNSEC_VM86
21584+ if (!capable(CAP_SYS_RAWIO)) {
21585+ gr_handle_vm86();
21586+ goto out;
21587+ }
21588+#endif
21589+
21590 tsk = current;
21591 if (tsk->thread.saved_sp0)
21592 goto out;
21593@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
21594 int tmp, ret;
21595 struct vm86plus_struct __user *v86;
21596
21597+#ifdef CONFIG_GRKERNSEC_VM86
21598+ if (!capable(CAP_SYS_RAWIO)) {
21599+ gr_handle_vm86();
21600+ ret = -EPERM;
21601+ goto out;
21602+ }
21603+#endif
21604+
21605 tsk = current;
21606 switch (regs->bx) {
21607 case VM86_REQUEST_IRQ:
21608@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
21609 tsk->thread.saved_fs = info->regs32->fs;
21610 tsk->thread.saved_gs = get_user_gs(info->regs32);
21611
21612- tss = &per_cpu(init_tss, get_cpu());
21613+ tss = init_tss + get_cpu();
21614 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
21615 if (cpu_has_sep)
21616 tsk->thread.sysenter_cs = 0;
21617@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
21618 goto cannot_handle;
21619 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
21620 goto cannot_handle;
21621- intr_ptr = (unsigned long __user *) (i << 2);
21622+ intr_ptr = (__force unsigned long __user *) (i << 2);
21623 if (get_user(segoffs, intr_ptr))
21624 goto cannot_handle;
21625 if ((segoffs >> 16) == BIOSSEG)
21626diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
21627index d430e4c..831f817 100644
21628--- a/arch/x86/kernel/vmi_32.c
21629+++ b/arch/x86/kernel/vmi_32.c
21630@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
21631 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
21632
21633 #define call_vrom_func(rom,func) \
21634- (((VROMFUNC *)(rom->func))())
21635+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
21636
21637 #define call_vrom_long_func(rom,func,arg) \
21638- (((VROMLONGFUNC *)(rom->func)) (arg))
21639+({\
21640+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
21641+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
21642+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
21643+ __reloc;\
21644+})
21645
21646-static struct vrom_header *vmi_rom;
21647+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
21648 static int disable_pge;
21649 static int disable_pse;
21650 static int disable_sep;
21651@@ -76,10 +81,10 @@ static struct {
21652 void (*set_initial_ap_state)(int, int);
21653 void (*halt)(void);
21654 void (*set_lazy_mode)(int mode);
21655-} vmi_ops;
21656+} __no_const vmi_ops __read_only;
21657
21658 /* Cached VMI operations */
21659-struct vmi_timer_ops vmi_timer_ops;
21660+struct vmi_timer_ops vmi_timer_ops __read_only;
21661
21662 /*
21663 * VMI patching routines.
21664@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
21665 static inline void patch_offset(void *insnbuf,
21666 unsigned long ip, unsigned long dest)
21667 {
21668- *(unsigned long *)(insnbuf+1) = dest-ip-5;
21669+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
21670 }
21671
21672 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
21673@@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
21674 {
21675 u64 reloc;
21676 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
21677+
21678 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
21679 switch(rel->type) {
21680 case VMI_RELOCATION_CALL_REL:
21681@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
21682
21683 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
21684 {
21685- const pte_t pte = { .pte = 0 };
21686+ const pte_t pte = __pte(0ULL);
21687 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
21688 }
21689
21690 static void vmi_pmd_clear(pmd_t *pmd)
21691 {
21692- const pte_t pte = { .pte = 0 };
21693+ const pte_t pte = __pte(0ULL);
21694 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
21695 }
21696 #endif
21697@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
21698 ap.ss = __KERNEL_DS;
21699 ap.esp = (unsigned long) start_esp;
21700
21701- ap.ds = __USER_DS;
21702- ap.es = __USER_DS;
21703+ ap.ds = __KERNEL_DS;
21704+ ap.es = __KERNEL_DS;
21705 ap.fs = __KERNEL_PERCPU;
21706- ap.gs = __KERNEL_STACK_CANARY;
21707+ savesegment(gs, ap.gs);
21708
21709 ap.eflags = 0;
21710
21711@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
21712 paravirt_leave_lazy_mmu();
21713 }
21714
21715+#ifdef CONFIG_PAX_KERNEXEC
21716+static unsigned long vmi_pax_open_kernel(void)
21717+{
21718+ return 0;
21719+}
21720+
21721+static unsigned long vmi_pax_close_kernel(void)
21722+{
21723+ return 0;
21724+}
21725+#endif
21726+
21727 static inline int __init check_vmi_rom(struct vrom_header *rom)
21728 {
21729 struct pci_header *pci;
21730@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
21731 return 0;
21732 if (rom->vrom_signature != VMI_SIGNATURE)
21733 return 0;
21734+ if (rom->rom_length * 512 > sizeof(*rom)) {
21735+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
21736+ return 0;
21737+ }
21738 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
21739 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
21740 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
21741@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
21742 struct vrom_header *romstart;
21743 romstart = (struct vrom_header *)isa_bus_to_virt(base);
21744 if (check_vmi_rom(romstart)) {
21745- vmi_rom = romstart;
21746+ vmi_rom = *romstart;
21747 return 1;
21748 }
21749 }
21750@@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
21751
21752 para_fill(pv_irq_ops.safe_halt, Halt);
21753
21754+#ifdef CONFIG_PAX_KERNEXEC
21755+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
21756+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
21757+#endif
21758+
21759 /*
21760 * Alternative instruction rewriting doesn't happen soon enough
21761 * to convert VMI_IRET to a call instead of a jump; so we have
21762@@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
21763
21764 void __init vmi_init(void)
21765 {
21766- if (!vmi_rom)
21767+ if (!vmi_rom.rom_signature)
21768 probe_vmi_rom();
21769 else
21770- check_vmi_rom(vmi_rom);
21771+ check_vmi_rom(&vmi_rom);
21772
21773 /* In case probing for or validating the ROM failed, basil */
21774- if (!vmi_rom)
21775+ if (!vmi_rom.rom_signature)
21776 return;
21777
21778- reserve_top_address(-vmi_rom->virtual_top);
21779+ reserve_top_address(-vmi_rom.virtual_top);
21780
21781 #ifdef CONFIG_X86_IO_APIC
21782 /* This is virtual hardware; timer routing is wired correctly */
21783@@ -874,7 +901,7 @@ void __init vmi_activate(void)
21784 {
21785 unsigned long flags;
21786
21787- if (!vmi_rom)
21788+ if (!vmi_rom.rom_signature)
21789 return;
21790
21791 local_irq_save(flags);
21792diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
21793index 3c68fe2..12c8280 100644
21794--- a/arch/x86/kernel/vmlinux.lds.S
21795+++ b/arch/x86/kernel/vmlinux.lds.S
21796@@ -26,6 +26,13 @@
21797 #include <asm/page_types.h>
21798 #include <asm/cache.h>
21799 #include <asm/boot.h>
21800+#include <asm/segment.h>
21801+
21802+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21803+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
21804+#else
21805+#define __KERNEL_TEXT_OFFSET 0
21806+#endif
21807
21808 #undef i386 /* in case the preprocessor is a 32bit one */
21809
21810@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
21811 #ifdef CONFIG_X86_32
21812 OUTPUT_ARCH(i386)
21813 ENTRY(phys_startup_32)
21814-jiffies = jiffies_64;
21815 #else
21816 OUTPUT_ARCH(i386:x86-64)
21817 ENTRY(phys_startup_64)
21818-jiffies_64 = jiffies;
21819 #endif
21820
21821 PHDRS {
21822 text PT_LOAD FLAGS(5); /* R_E */
21823- data PT_LOAD FLAGS(7); /* RWE */
21824+#ifdef CONFIG_X86_32
21825+ module PT_LOAD FLAGS(5); /* R_E */
21826+#endif
21827+#ifdef CONFIG_XEN
21828+ rodata PT_LOAD FLAGS(5); /* R_E */
21829+#else
21830+ rodata PT_LOAD FLAGS(4); /* R__ */
21831+#endif
21832+ data PT_LOAD FLAGS(6); /* RW_ */
21833 #ifdef CONFIG_X86_64
21834 user PT_LOAD FLAGS(5); /* R_E */
21835+#endif
21836+ init.begin PT_LOAD FLAGS(6); /* RW_ */
21837 #ifdef CONFIG_SMP
21838 percpu PT_LOAD FLAGS(6); /* RW_ */
21839 #endif
21840+ text.init PT_LOAD FLAGS(5); /* R_E */
21841+ text.exit PT_LOAD FLAGS(5); /* R_E */
21842 init PT_LOAD FLAGS(7); /* RWE */
21843-#endif
21844 note PT_NOTE FLAGS(0); /* ___ */
21845 }
21846
21847 SECTIONS
21848 {
21849 #ifdef CONFIG_X86_32
21850- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
21851- phys_startup_32 = startup_32 - LOAD_OFFSET;
21852+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
21853 #else
21854- . = __START_KERNEL;
21855- phys_startup_64 = startup_64 - LOAD_OFFSET;
21856+ . = __START_KERNEL;
21857 #endif
21858
21859 /* Text and read-only data */
21860- .text : AT(ADDR(.text) - LOAD_OFFSET) {
21861- _text = .;
21862+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
21863 /* bootstrapping code */
21864+#ifdef CONFIG_X86_32
21865+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21866+#else
21867+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21868+#endif
21869+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
21870+ _text = .;
21871 HEAD_TEXT
21872 #ifdef CONFIG_X86_32
21873 . = ALIGN(PAGE_SIZE);
21874@@ -82,28 +102,71 @@ SECTIONS
21875 IRQENTRY_TEXT
21876 *(.fixup)
21877 *(.gnu.warning)
21878- /* End of text section */
21879- _etext = .;
21880 } :text = 0x9090
21881
21882- NOTES :text :note
21883+ . += __KERNEL_TEXT_OFFSET;
21884
21885- EXCEPTION_TABLE(16) :text = 0x9090
21886+#ifdef CONFIG_X86_32
21887+ . = ALIGN(PAGE_SIZE);
21888+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
21889+ *(.vmi.rom)
21890+ } :module
21891+
21892+ . = ALIGN(PAGE_SIZE);
21893+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
21894+
21895+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
21896+ MODULES_EXEC_VADDR = .;
21897+ BYTE(0)
21898+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
21899+ . = ALIGN(HPAGE_SIZE);
21900+ MODULES_EXEC_END = . - 1;
21901+#endif
21902+
21903+ } :module
21904+#endif
21905+
21906+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
21907+ /* End of text section */
21908+ _etext = . - __KERNEL_TEXT_OFFSET;
21909+ }
21910+
21911+#ifdef CONFIG_X86_32
21912+ . = ALIGN(PAGE_SIZE);
21913+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
21914+ *(.idt)
21915+ . = ALIGN(PAGE_SIZE);
21916+ *(.empty_zero_page)
21917+ *(.swapper_pg_fixmap)
21918+ *(.swapper_pg_pmd)
21919+ *(.swapper_pg_dir)
21920+ *(.trampoline_pg_dir)
21921+ } :rodata
21922+#endif
21923+
21924+ . = ALIGN(PAGE_SIZE);
21925+ NOTES :rodata :note
21926+
21927+ EXCEPTION_TABLE(16) :rodata
21928
21929 RO_DATA(PAGE_SIZE)
21930
21931 /* Data */
21932 .data : AT(ADDR(.data) - LOAD_OFFSET) {
21933+
21934+#ifdef CONFIG_PAX_KERNEXEC
21935+ . = ALIGN(HPAGE_SIZE);
21936+#else
21937+ . = ALIGN(PAGE_SIZE);
21938+#endif
21939+
21940 /* Start of data section */
21941 _sdata = .;
21942
21943 /* init_task */
21944 INIT_TASK_DATA(THREAD_SIZE)
21945
21946-#ifdef CONFIG_X86_32
21947- /* 32 bit has nosave before _edata */
21948 NOSAVE_DATA
21949-#endif
21950
21951 PAGE_ALIGNED_DATA(PAGE_SIZE)
21952
21953@@ -112,6 +175,8 @@ SECTIONS
21954 DATA_DATA
21955 CONSTRUCTORS
21956
21957+ jiffies = jiffies_64;
21958+
21959 /* rarely changed data like cpu maps */
21960 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
21961
21962@@ -166,12 +231,6 @@ SECTIONS
21963 }
21964 vgetcpu_mode = VVIRT(.vgetcpu_mode);
21965
21966- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
21967- .jiffies : AT(VLOAD(.jiffies)) {
21968- *(.jiffies)
21969- }
21970- jiffies = VVIRT(.jiffies);
21971-
21972 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
21973 *(.vsyscall_3)
21974 }
21975@@ -187,12 +246,19 @@ SECTIONS
21976 #endif /* CONFIG_X86_64 */
21977
21978 /* Init code and data - will be freed after init */
21979- . = ALIGN(PAGE_SIZE);
21980 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
21981+ BYTE(0)
21982+
21983+#ifdef CONFIG_PAX_KERNEXEC
21984+ . = ALIGN(HPAGE_SIZE);
21985+#else
21986+ . = ALIGN(PAGE_SIZE);
21987+#endif
21988+
21989 __init_begin = .; /* paired with __init_end */
21990- }
21991+ } :init.begin
21992
21993-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
21994+#ifdef CONFIG_SMP
21995 /*
21996 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
21997 * output PHDR, so the next output section - .init.text - should
21998@@ -201,12 +267,27 @@ SECTIONS
21999 PERCPU_VADDR(0, :percpu)
22000 #endif
22001
22002- INIT_TEXT_SECTION(PAGE_SIZE)
22003-#ifdef CONFIG_X86_64
22004- :init
22005-#endif
22006+ . = ALIGN(PAGE_SIZE);
22007+ init_begin = .;
22008+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
22009+ VMLINUX_SYMBOL(_sinittext) = .;
22010+ INIT_TEXT
22011+ VMLINUX_SYMBOL(_einittext) = .;
22012+ . = ALIGN(PAGE_SIZE);
22013+ } :text.init
22014
22015- INIT_DATA_SECTION(16)
22016+ /*
22017+ * .exit.text is discard at runtime, not link time, to deal with
22018+ * references from .altinstructions and .eh_frame
22019+ */
22020+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
22021+ EXIT_TEXT
22022+ . = ALIGN(16);
22023+ } :text.exit
22024+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
22025+
22026+ . = ALIGN(PAGE_SIZE);
22027+ INIT_DATA_SECTION(16) :init
22028
22029 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
22030 __x86_cpu_dev_start = .;
22031@@ -232,19 +313,11 @@ SECTIONS
22032 *(.altinstr_replacement)
22033 }
22034
22035- /*
22036- * .exit.text is discard at runtime, not link time, to deal with
22037- * references from .altinstructions and .eh_frame
22038- */
22039- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
22040- EXIT_TEXT
22041- }
22042-
22043 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
22044 EXIT_DATA
22045 }
22046
22047-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
22048+#ifndef CONFIG_SMP
22049 PERCPU(PAGE_SIZE)
22050 #endif
22051
22052@@ -267,12 +340,6 @@ SECTIONS
22053 . = ALIGN(PAGE_SIZE);
22054 }
22055
22056-#ifdef CONFIG_X86_64
22057- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
22058- NOSAVE_DATA
22059- }
22060-#endif
22061-
22062 /* BSS */
22063 . = ALIGN(PAGE_SIZE);
22064 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
22065@@ -288,6 +355,7 @@ SECTIONS
22066 __brk_base = .;
22067 . += 64 * 1024; /* 64k alignment slop space */
22068 *(.brk_reservation) /* areas brk users have reserved */
22069+ . = ALIGN(HPAGE_SIZE);
22070 __brk_limit = .;
22071 }
22072
22073@@ -316,13 +384,12 @@ SECTIONS
22074 * for the boot processor.
22075 */
22076 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
22077-INIT_PER_CPU(gdt_page);
22078 INIT_PER_CPU(irq_stack_union);
22079
22080 /*
22081 * Build-time check on the image size:
22082 */
22083-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
22084+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
22085 "kernel image bigger than KERNEL_IMAGE_SIZE");
22086
22087 #ifdef CONFIG_SMP
22088diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
22089index 62f39d7..3bc46a1 100644
22090--- a/arch/x86/kernel/vsyscall_64.c
22091+++ b/arch/x86/kernel/vsyscall_64.c
22092@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
22093
22094 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
22095 /* copy vsyscall data */
22096+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
22097 vsyscall_gtod_data.clock.vread = clock->vread;
22098 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
22099 vsyscall_gtod_data.clock.mask = clock->mask;
22100@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
22101 We do this here because otherwise user space would do it on
22102 its own in a likely inferior way (no access to jiffies).
22103 If you don't like it pass NULL. */
22104- if (tcache && tcache->blob[0] == (j = __jiffies)) {
22105+ if (tcache && tcache->blob[0] == (j = jiffies)) {
22106 p = tcache->blob[1];
22107 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
22108 /* Load per CPU data from RDTSCP */
22109diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
22110index 3909e3b..5433a97 100644
22111--- a/arch/x86/kernel/x8664_ksyms_64.c
22112+++ b/arch/x86/kernel/x8664_ksyms_64.c
22113@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
22114
22115 EXPORT_SYMBOL(copy_user_generic);
22116 EXPORT_SYMBOL(__copy_user_nocache);
22117-EXPORT_SYMBOL(copy_from_user);
22118-EXPORT_SYMBOL(copy_to_user);
22119 EXPORT_SYMBOL(__copy_from_user_inatomic);
22120
22121 EXPORT_SYMBOL(copy_page);
22122diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
22123index c5ee17e..d63218f 100644
22124--- a/arch/x86/kernel/xsave.c
22125+++ b/arch/x86/kernel/xsave.c
22126@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
22127 fx_sw_user->xstate_size > fx_sw_user->extended_size)
22128 return -1;
22129
22130- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
22131+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
22132 fx_sw_user->extended_size -
22133 FP_XSTATE_MAGIC2_SIZE));
22134 /*
22135@@ -196,7 +196,7 @@ fx_only:
22136 * the other extended state.
22137 */
22138 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
22139- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
22140+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
22141 }
22142
22143 /*
22144@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
22145 if (task_thread_info(tsk)->status & TS_XSAVE)
22146 err = restore_user_xstate(buf);
22147 else
22148- err = fxrstor_checking((__force struct i387_fxsave_struct *)
22149+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
22150 buf);
22151 if (unlikely(err)) {
22152 /*
22153diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
22154index 1350e43..a94b011 100644
22155--- a/arch/x86/kvm/emulate.c
22156+++ b/arch/x86/kvm/emulate.c
22157@@ -81,8 +81,8 @@
22158 #define Src2CL (1<<29)
22159 #define Src2ImmByte (2<<29)
22160 #define Src2One (3<<29)
22161-#define Src2Imm16 (4<<29)
22162-#define Src2Mask (7<<29)
22163+#define Src2Imm16 (4U<<29)
22164+#define Src2Mask (7U<<29)
22165
22166 enum {
22167 Group1_80, Group1_81, Group1_82, Group1_83,
22168@@ -411,6 +411,7 @@ static u32 group2_table[] = {
22169
22170 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
22171 do { \
22172+ unsigned long _tmp; \
22173 __asm__ __volatile__ ( \
22174 _PRE_EFLAGS("0", "4", "2") \
22175 _op _suffix " %"_x"3,%1; " \
22176@@ -424,8 +425,6 @@ static u32 group2_table[] = {
22177 /* Raw emulation: instruction has two explicit operands. */
22178 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
22179 do { \
22180- unsigned long _tmp; \
22181- \
22182 switch ((_dst).bytes) { \
22183 case 2: \
22184 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
22185@@ -441,7 +440,6 @@ static u32 group2_table[] = {
22186
22187 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
22188 do { \
22189- unsigned long _tmp; \
22190 switch ((_dst).bytes) { \
22191 case 1: \
22192 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
22193diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
22194index 8dfeaaa..4daa395 100644
22195--- a/arch/x86/kvm/lapic.c
22196+++ b/arch/x86/kvm/lapic.c
22197@@ -52,7 +52,7 @@
22198 #define APIC_BUS_CYCLE_NS 1
22199
22200 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
22201-#define apic_debug(fmt, arg...)
22202+#define apic_debug(fmt, arg...) do {} while (0)
22203
22204 #define APIC_LVT_NUM 6
22205 /* 14 is the version for Xeon and Pentium 8.4.8*/
22206diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
22207index 3bc2707..dd157e2 100644
22208--- a/arch/x86/kvm/paging_tmpl.h
22209+++ b/arch/x86/kvm/paging_tmpl.h
22210@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
22211 int level = PT_PAGE_TABLE_LEVEL;
22212 unsigned long mmu_seq;
22213
22214+ pax_track_stack();
22215+
22216 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
22217 kvm_mmu_audit(vcpu, "pre page fault");
22218
22219@@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
22220 kvm_mmu_free_some_pages(vcpu);
22221 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
22222 level, &write_pt, pfn);
22223+ (void)sptep;
22224 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
22225 sptep, *sptep, write_pt);
22226
22227diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
22228index 7c6e63e..c5d92c1 100644
22229--- a/arch/x86/kvm/svm.c
22230+++ b/arch/x86/kvm/svm.c
22231@@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
22232 int cpu = raw_smp_processor_id();
22233
22234 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
22235+
22236+ pax_open_kernel();
22237 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
22238+ pax_close_kernel();
22239+
22240 load_TR_desc();
22241 }
22242
22243@@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
22244 return true;
22245 }
22246
22247-static struct kvm_x86_ops svm_x86_ops = {
22248+static const struct kvm_x86_ops svm_x86_ops = {
22249 .cpu_has_kvm_support = has_svm,
22250 .disabled_by_bios = is_disabled,
22251 .hardware_setup = svm_hardware_setup,
22252diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
22253index e6d925f..e7a4af8 100644
22254--- a/arch/x86/kvm/vmx.c
22255+++ b/arch/x86/kvm/vmx.c
22256@@ -570,7 +570,11 @@ static void reload_tss(void)
22257
22258 kvm_get_gdt(&gdt);
22259 descs = (void *)gdt.base;
22260+
22261+ pax_open_kernel();
22262 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
22263+ pax_close_kernel();
22264+
22265 load_TR_desc();
22266 }
22267
22268@@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
22269 if (!cpu_has_vmx_flexpriority())
22270 flexpriority_enabled = 0;
22271
22272- if (!cpu_has_vmx_tpr_shadow())
22273- kvm_x86_ops->update_cr8_intercept = NULL;
22274+ if (!cpu_has_vmx_tpr_shadow()) {
22275+ pax_open_kernel();
22276+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
22277+ pax_close_kernel();
22278+ }
22279
22280 if (enable_ept && !cpu_has_vmx_ept_2m_page())
22281 kvm_disable_largepages();
22282@@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
22283 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
22284
22285 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
22286- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
22287+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
22288 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
22289 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
22290 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
22291@@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22292 "jmp .Lkvm_vmx_return \n\t"
22293 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
22294 ".Lkvm_vmx_return: "
22295+
22296+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22297+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
22298+ ".Lkvm_vmx_return2: "
22299+#endif
22300+
22301 /* Save guest registers, load host registers, keep flags */
22302 "xchg %0, (%%"R"sp) \n\t"
22303 "mov %%"R"ax, %c[rax](%0) \n\t"
22304@@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22305 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
22306 #endif
22307 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
22308+
22309+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22310+ ,[cs]"i"(__KERNEL_CS)
22311+#endif
22312+
22313 : "cc", "memory"
22314- , R"bx", R"di", R"si"
22315+ , R"ax", R"bx", R"di", R"si"
22316 #ifdef CONFIG_X86_64
22317 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
22318 #endif
22319@@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
22320 if (vmx->rmode.irq.pending)
22321 fixup_rmode_irq(vmx);
22322
22323- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
22324+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
22325+
22326+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22327+ loadsegment(fs, __KERNEL_PERCPU);
22328+#endif
22329+
22330+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22331+ __set_fs(current_thread_info()->addr_limit);
22332+#endif
22333+
22334 vmx->launched = 1;
22335
22336 vmx_complete_interrupts(vmx);
22337@@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
22338 return false;
22339 }
22340
22341-static struct kvm_x86_ops vmx_x86_ops = {
22342+static const struct kvm_x86_ops vmx_x86_ops = {
22343 .cpu_has_kvm_support = cpu_has_kvm_support,
22344 .disabled_by_bios = vmx_disabled_by_bios,
22345 .hardware_setup = hardware_setup,
22346diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
22347index df1cefb..5e882ad 100644
22348--- a/arch/x86/kvm/x86.c
22349+++ b/arch/x86/kvm/x86.c
22350@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
22351 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
22352 struct kvm_cpuid_entry2 __user *entries);
22353
22354-struct kvm_x86_ops *kvm_x86_ops;
22355+const struct kvm_x86_ops *kvm_x86_ops;
22356 EXPORT_SYMBOL_GPL(kvm_x86_ops);
22357
22358 int ignore_msrs = 0;
22359@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
22360 struct kvm_cpuid2 *cpuid,
22361 struct kvm_cpuid_entry2 __user *entries)
22362 {
22363- int r;
22364+ int r, i;
22365
22366 r = -E2BIG;
22367 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
22368 goto out;
22369 r = -EFAULT;
22370- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
22371- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
22372+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
22373 goto out;
22374+ for (i = 0; i < cpuid->nent; ++i) {
22375+ struct kvm_cpuid_entry2 cpuid_entry;
22376+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
22377+ goto out;
22378+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
22379+ }
22380 vcpu->arch.cpuid_nent = cpuid->nent;
22381 kvm_apic_set_version(vcpu);
22382 return 0;
22383@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
22384 struct kvm_cpuid2 *cpuid,
22385 struct kvm_cpuid_entry2 __user *entries)
22386 {
22387- int r;
22388+ int r, i;
22389
22390 vcpu_load(vcpu);
22391 r = -E2BIG;
22392 if (cpuid->nent < vcpu->arch.cpuid_nent)
22393 goto out;
22394 r = -EFAULT;
22395- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
22396- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
22397+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
22398 goto out;
22399+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
22400+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
22401+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
22402+ goto out;
22403+ }
22404 return 0;
22405
22406 out:
22407@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
22408 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
22409 struct kvm_interrupt *irq)
22410 {
22411- if (irq->irq < 0 || irq->irq >= 256)
22412+ if (irq->irq >= 256)
22413 return -EINVAL;
22414 if (irqchip_in_kernel(vcpu->kvm))
22415 return -ENXIO;
22416@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
22417 .notifier_call = kvmclock_cpufreq_notifier
22418 };
22419
22420-int kvm_arch_init(void *opaque)
22421+int kvm_arch_init(const void *opaque)
22422 {
22423 int r, cpu;
22424- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
22425+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
22426
22427 if (kvm_x86_ops) {
22428 printk(KERN_ERR "kvm: already loaded the other module\n");
22429diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
22430index 7e59dc1..b88c98f 100644
22431--- a/arch/x86/lguest/boot.c
22432+++ b/arch/x86/lguest/boot.c
22433@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
22434 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
22435 * Launcher to reboot us.
22436 */
22437-static void lguest_restart(char *reason)
22438+static __noreturn void lguest_restart(char *reason)
22439 {
22440 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
22441+ BUG();
22442 }
22443
22444 /*G:050
22445diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
22446index 824fa0b..c619e96 100644
22447--- a/arch/x86/lib/atomic64_32.c
22448+++ b/arch/x86/lib/atomic64_32.c
22449@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
22450 }
22451 EXPORT_SYMBOL(atomic64_cmpxchg);
22452
22453+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
22454+{
22455+ return cmpxchg8b(&ptr->counter, old_val, new_val);
22456+}
22457+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
22458+
22459 /**
22460 * atomic64_xchg - xchg atomic64 variable
22461 * @ptr: pointer to type atomic64_t
22462@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
22463 EXPORT_SYMBOL(atomic64_xchg);
22464
22465 /**
22466+ * atomic64_xchg_unchecked - xchg atomic64 variable
22467+ * @ptr: pointer to type atomic64_unchecked_t
22468+ * @new_val: value to assign
22469+ *
22470+ * Atomically xchgs the value of @ptr to @new_val and returns
22471+ * the old value.
22472+ */
22473+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
22474+{
22475+ /*
22476+ * Try first with a (possibly incorrect) assumption about
22477+ * what we have there. We'll do two loops most likely,
22478+ * but we'll get an ownership MESI transaction straight away
22479+ * instead of a read transaction followed by a
22480+ * flush-for-ownership transaction:
22481+ */
22482+ u64 old_val, real_val = 0;
22483+
22484+ do {
22485+ old_val = real_val;
22486+
22487+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
22488+
22489+ } while (real_val != old_val);
22490+
22491+ return old_val;
22492+}
22493+EXPORT_SYMBOL(atomic64_xchg_unchecked);
22494+
22495+/**
22496 * atomic64_set - set atomic64 variable
22497 * @ptr: pointer to type atomic64_t
22498 * @new_val: value to assign
22499@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
22500 EXPORT_SYMBOL(atomic64_set);
22501
22502 /**
22503-EXPORT_SYMBOL(atomic64_read);
22504+ * atomic64_unchecked_set - set atomic64 variable
22505+ * @ptr: pointer to type atomic64_unchecked_t
22506+ * @new_val: value to assign
22507+ *
22508+ * Atomically sets the value of @ptr to @new_val.
22509+ */
22510+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
22511+{
22512+ atomic64_xchg_unchecked(ptr, new_val);
22513+}
22514+EXPORT_SYMBOL(atomic64_set_unchecked);
22515+
22516+/**
22517 * atomic64_add_return - add and return
22518 * @delta: integer value to add
22519 * @ptr: pointer to type atomic64_t
22520@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
22521 }
22522 EXPORT_SYMBOL(atomic64_add_return);
22523
22524+/**
22525+ * atomic64_add_return_unchecked - add and return
22526+ * @delta: integer value to add
22527+ * @ptr: pointer to type atomic64_unchecked_t
22528+ *
22529+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
22530+ */
22531+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22532+{
22533+ /*
22534+ * Try first with a (possibly incorrect) assumption about
22535+ * what we have there. We'll do two loops most likely,
22536+ * but we'll get an ownership MESI transaction straight away
22537+ * instead of a read transaction followed by a
22538+ * flush-for-ownership transaction:
22539+ */
22540+ u64 old_val, new_val, real_val = 0;
22541+
22542+ do {
22543+ old_val = real_val;
22544+ new_val = old_val + delta;
22545+
22546+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
22547+
22548+ } while (real_val != old_val);
22549+
22550+ return new_val;
22551+}
22552+EXPORT_SYMBOL(atomic64_add_return_unchecked);
22553+
22554 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
22555 {
22556 return atomic64_add_return(-delta, ptr);
22557 }
22558 EXPORT_SYMBOL(atomic64_sub_return);
22559
22560+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22561+{
22562+ return atomic64_add_return_unchecked(-delta, ptr);
22563+}
22564+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
22565+
22566 u64 atomic64_inc_return(atomic64_t *ptr)
22567 {
22568 return atomic64_add_return(1, ptr);
22569 }
22570 EXPORT_SYMBOL(atomic64_inc_return);
22571
22572+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
22573+{
22574+ return atomic64_add_return_unchecked(1, ptr);
22575+}
22576+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
22577+
22578 u64 atomic64_dec_return(atomic64_t *ptr)
22579 {
22580 return atomic64_sub_return(1, ptr);
22581 }
22582 EXPORT_SYMBOL(atomic64_dec_return);
22583
22584+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
22585+{
22586+ return atomic64_sub_return_unchecked(1, ptr);
22587+}
22588+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
22589+
22590 /**
22591 * atomic64_add - add integer to atomic64 variable
22592 * @delta: integer value to add
22593@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
22594 EXPORT_SYMBOL(atomic64_add);
22595
22596 /**
22597+ * atomic64_add_unchecked - add integer to atomic64 variable
22598+ * @delta: integer value to add
22599+ * @ptr: pointer to type atomic64_unchecked_t
22600+ *
22601+ * Atomically adds @delta to @ptr.
22602+ */
22603+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22604+{
22605+ atomic64_add_return_unchecked(delta, ptr);
22606+}
22607+EXPORT_SYMBOL(atomic64_add_unchecked);
22608+
22609+/**
22610 * atomic64_sub - subtract the atomic64 variable
22611 * @delta: integer value to subtract
22612 * @ptr: pointer to type atomic64_t
22613@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
22614 EXPORT_SYMBOL(atomic64_sub);
22615
22616 /**
22617+ * atomic64_sub_unchecked - subtract the atomic64 variable
22618+ * @delta: integer value to subtract
22619+ * @ptr: pointer to type atomic64_unchecked_t
22620+ *
22621+ * Atomically subtracts @delta from @ptr.
22622+ */
22623+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
22624+{
22625+ atomic64_add_unchecked(-delta, ptr);
22626+}
22627+EXPORT_SYMBOL(atomic64_sub_unchecked);
22628+
22629+/**
22630 * atomic64_sub_and_test - subtract value from variable and test result
22631 * @delta: integer value to subtract
22632 * @ptr: pointer to type atomic64_t
22633@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
22634 EXPORT_SYMBOL(atomic64_inc);
22635
22636 /**
22637+ * atomic64_inc_unchecked - increment atomic64 variable
22638+ * @ptr: pointer to type atomic64_unchecked_t
22639+ *
22640+ * Atomically increments @ptr by 1.
22641+ */
22642+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
22643+{
22644+ atomic64_add_unchecked(1, ptr);
22645+}
22646+EXPORT_SYMBOL(atomic64_inc_unchecked);
22647+
22648+/**
22649 * atomic64_dec - decrement atomic64 variable
22650 * @ptr: pointer to type atomic64_t
22651 *
22652@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
22653 EXPORT_SYMBOL(atomic64_dec);
22654
22655 /**
22656+ * atomic64_dec_unchecked - decrement atomic64 variable
22657+ * @ptr: pointer to type atomic64_unchecked_t
22658+ *
22659+ * Atomically decrements @ptr by 1.
22660+ */
22661+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
22662+{
22663+ atomic64_sub_unchecked(1, ptr);
22664+}
22665+EXPORT_SYMBOL(atomic64_dec_unchecked);
22666+
22667+/**
22668 * atomic64_dec_and_test - decrement and test
22669 * @ptr: pointer to type atomic64_t
22670 *
22671diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
22672index adbccd0..98f96c8 100644
22673--- a/arch/x86/lib/checksum_32.S
22674+++ b/arch/x86/lib/checksum_32.S
22675@@ -28,7 +28,8 @@
22676 #include <linux/linkage.h>
22677 #include <asm/dwarf2.h>
22678 #include <asm/errno.h>
22679-
22680+#include <asm/segment.h>
22681+
22682 /*
22683 * computes a partial checksum, e.g. for TCP/UDP fragments
22684 */
22685@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
22686
22687 #define ARGBASE 16
22688 #define FP 12
22689-
22690-ENTRY(csum_partial_copy_generic)
22691+
22692+ENTRY(csum_partial_copy_generic_to_user)
22693 CFI_STARTPROC
22694+
22695+#ifdef CONFIG_PAX_MEMORY_UDEREF
22696+ pushl %gs
22697+ CFI_ADJUST_CFA_OFFSET 4
22698+ popl %es
22699+ CFI_ADJUST_CFA_OFFSET -4
22700+ jmp csum_partial_copy_generic
22701+#endif
22702+
22703+ENTRY(csum_partial_copy_generic_from_user)
22704+
22705+#ifdef CONFIG_PAX_MEMORY_UDEREF
22706+ pushl %gs
22707+ CFI_ADJUST_CFA_OFFSET 4
22708+ popl %ds
22709+ CFI_ADJUST_CFA_OFFSET -4
22710+#endif
22711+
22712+ENTRY(csum_partial_copy_generic)
22713 subl $4,%esp
22714 CFI_ADJUST_CFA_OFFSET 4
22715 pushl %edi
22716@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
22717 jmp 4f
22718 SRC(1: movw (%esi), %bx )
22719 addl $2, %esi
22720-DST( movw %bx, (%edi) )
22721+DST( movw %bx, %es:(%edi) )
22722 addl $2, %edi
22723 addw %bx, %ax
22724 adcl $0, %eax
22725@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
22726 SRC(1: movl (%esi), %ebx )
22727 SRC( movl 4(%esi), %edx )
22728 adcl %ebx, %eax
22729-DST( movl %ebx, (%edi) )
22730+DST( movl %ebx, %es:(%edi) )
22731 adcl %edx, %eax
22732-DST( movl %edx, 4(%edi) )
22733+DST( movl %edx, %es:4(%edi) )
22734
22735 SRC( movl 8(%esi), %ebx )
22736 SRC( movl 12(%esi), %edx )
22737 adcl %ebx, %eax
22738-DST( movl %ebx, 8(%edi) )
22739+DST( movl %ebx, %es:8(%edi) )
22740 adcl %edx, %eax
22741-DST( movl %edx, 12(%edi) )
22742+DST( movl %edx, %es:12(%edi) )
22743
22744 SRC( movl 16(%esi), %ebx )
22745 SRC( movl 20(%esi), %edx )
22746 adcl %ebx, %eax
22747-DST( movl %ebx, 16(%edi) )
22748+DST( movl %ebx, %es:16(%edi) )
22749 adcl %edx, %eax
22750-DST( movl %edx, 20(%edi) )
22751+DST( movl %edx, %es:20(%edi) )
22752
22753 SRC( movl 24(%esi), %ebx )
22754 SRC( movl 28(%esi), %edx )
22755 adcl %ebx, %eax
22756-DST( movl %ebx, 24(%edi) )
22757+DST( movl %ebx, %es:24(%edi) )
22758 adcl %edx, %eax
22759-DST( movl %edx, 28(%edi) )
22760+DST( movl %edx, %es:28(%edi) )
22761
22762 lea 32(%esi), %esi
22763 lea 32(%edi), %edi
22764@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
22765 shrl $2, %edx # This clears CF
22766 SRC(3: movl (%esi), %ebx )
22767 adcl %ebx, %eax
22768-DST( movl %ebx, (%edi) )
22769+DST( movl %ebx, %es:(%edi) )
22770 lea 4(%esi), %esi
22771 lea 4(%edi), %edi
22772 dec %edx
22773@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
22774 jb 5f
22775 SRC( movw (%esi), %cx )
22776 leal 2(%esi), %esi
22777-DST( movw %cx, (%edi) )
22778+DST( movw %cx, %es:(%edi) )
22779 leal 2(%edi), %edi
22780 je 6f
22781 shll $16,%ecx
22782 SRC(5: movb (%esi), %cl )
22783-DST( movb %cl, (%edi) )
22784+DST( movb %cl, %es:(%edi) )
22785 6: addl %ecx, %eax
22786 adcl $0, %eax
22787 7:
22788@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
22789
22790 6001:
22791 movl ARGBASE+20(%esp), %ebx # src_err_ptr
22792- movl $-EFAULT, (%ebx)
22793+ movl $-EFAULT, %ss:(%ebx)
22794
22795 # zero the complete destination - computing the rest
22796 # is too much work
22797@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
22798
22799 6002:
22800 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22801- movl $-EFAULT,(%ebx)
22802+ movl $-EFAULT,%ss:(%ebx)
22803 jmp 5000b
22804
22805 .previous
22806
22807+ pushl %ss
22808+ CFI_ADJUST_CFA_OFFSET 4
22809+ popl %ds
22810+ CFI_ADJUST_CFA_OFFSET -4
22811+ pushl %ss
22812+ CFI_ADJUST_CFA_OFFSET 4
22813+ popl %es
22814+ CFI_ADJUST_CFA_OFFSET -4
22815 popl %ebx
22816 CFI_ADJUST_CFA_OFFSET -4
22817 CFI_RESTORE ebx
22818@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
22819 CFI_ADJUST_CFA_OFFSET -4
22820 ret
22821 CFI_ENDPROC
22822-ENDPROC(csum_partial_copy_generic)
22823+ENDPROC(csum_partial_copy_generic_to_user)
22824
22825 #else
22826
22827 /* Version for PentiumII/PPro */
22828
22829 #define ROUND1(x) \
22830+ nop; nop; nop; \
22831 SRC(movl x(%esi), %ebx ) ; \
22832 addl %ebx, %eax ; \
22833- DST(movl %ebx, x(%edi) ) ;
22834+ DST(movl %ebx, %es:x(%edi)) ;
22835
22836 #define ROUND(x) \
22837+ nop; nop; nop; \
22838 SRC(movl x(%esi), %ebx ) ; \
22839 adcl %ebx, %eax ; \
22840- DST(movl %ebx, x(%edi) ) ;
22841+ DST(movl %ebx, %es:x(%edi)) ;
22842
22843 #define ARGBASE 12
22844-
22845-ENTRY(csum_partial_copy_generic)
22846+
22847+ENTRY(csum_partial_copy_generic_to_user)
22848 CFI_STARTPROC
22849+
22850+#ifdef CONFIG_PAX_MEMORY_UDEREF
22851+ pushl %gs
22852+ CFI_ADJUST_CFA_OFFSET 4
22853+ popl %es
22854+ CFI_ADJUST_CFA_OFFSET -4
22855+ jmp csum_partial_copy_generic
22856+#endif
22857+
22858+ENTRY(csum_partial_copy_generic_from_user)
22859+
22860+#ifdef CONFIG_PAX_MEMORY_UDEREF
22861+ pushl %gs
22862+ CFI_ADJUST_CFA_OFFSET 4
22863+ popl %ds
22864+ CFI_ADJUST_CFA_OFFSET -4
22865+#endif
22866+
22867+ENTRY(csum_partial_copy_generic)
22868 pushl %ebx
22869 CFI_ADJUST_CFA_OFFSET 4
22870 CFI_REL_OFFSET ebx, 0
22871@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
22872 subl %ebx, %edi
22873 lea -1(%esi),%edx
22874 andl $-32,%edx
22875- lea 3f(%ebx,%ebx), %ebx
22876+ lea 3f(%ebx,%ebx,2), %ebx
22877 testl %esi, %esi
22878 jmp *%ebx
22879 1: addl $64,%esi
22880@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
22881 jb 5f
22882 SRC( movw (%esi), %dx )
22883 leal 2(%esi), %esi
22884-DST( movw %dx, (%edi) )
22885+DST( movw %dx, %es:(%edi) )
22886 leal 2(%edi), %edi
22887 je 6f
22888 shll $16,%edx
22889 5:
22890 SRC( movb (%esi), %dl )
22891-DST( movb %dl, (%edi) )
22892+DST( movb %dl, %es:(%edi) )
22893 6: addl %edx, %eax
22894 adcl $0, %eax
22895 7:
22896 .section .fixup, "ax"
22897 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
22898- movl $-EFAULT, (%ebx)
22899+ movl $-EFAULT, %ss:(%ebx)
22900 # zero the complete destination (computing the rest is too much work)
22901 movl ARGBASE+8(%esp),%edi # dst
22902 movl ARGBASE+12(%esp),%ecx # len
22903@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
22904 rep; stosb
22905 jmp 7b
22906 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
22907- movl $-EFAULT, (%ebx)
22908+ movl $-EFAULT, %ss:(%ebx)
22909 jmp 7b
22910 .previous
22911
22912+#ifdef CONFIG_PAX_MEMORY_UDEREF
22913+ pushl %ss
22914+ CFI_ADJUST_CFA_OFFSET 4
22915+ popl %ds
22916+ CFI_ADJUST_CFA_OFFSET -4
22917+ pushl %ss
22918+ CFI_ADJUST_CFA_OFFSET 4
22919+ popl %es
22920+ CFI_ADJUST_CFA_OFFSET -4
22921+#endif
22922+
22923 popl %esi
22924 CFI_ADJUST_CFA_OFFSET -4
22925 CFI_RESTORE esi
22926@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
22927 CFI_RESTORE ebx
22928 ret
22929 CFI_ENDPROC
22930-ENDPROC(csum_partial_copy_generic)
22931+ENDPROC(csum_partial_copy_generic_to_user)
22932
22933 #undef ROUND
22934 #undef ROUND1
22935diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
22936index ebeafcc..1e3a402 100644
22937--- a/arch/x86/lib/clear_page_64.S
22938+++ b/arch/x86/lib/clear_page_64.S
22939@@ -1,5 +1,6 @@
22940 #include <linux/linkage.h>
22941 #include <asm/dwarf2.h>
22942+#include <asm/alternative-asm.h>
22943
22944 /*
22945 * Zero a page.
22946@@ -10,6 +11,7 @@ ENTRY(clear_page_c)
22947 movl $4096/8,%ecx
22948 xorl %eax,%eax
22949 rep stosq
22950+ pax_force_retaddr
22951 ret
22952 CFI_ENDPROC
22953 ENDPROC(clear_page_c)
22954@@ -33,6 +35,7 @@ ENTRY(clear_page)
22955 leaq 64(%rdi),%rdi
22956 jnz .Lloop
22957 nop
22958+ pax_force_retaddr
22959 ret
22960 CFI_ENDPROC
22961 .Lclear_page_end:
22962@@ -43,7 +46,7 @@ ENDPROC(clear_page)
22963
22964 #include <asm/cpufeature.h>
22965
22966- .section .altinstr_replacement,"ax"
22967+ .section .altinstr_replacement,"a"
22968 1: .byte 0xeb /* jmp <disp8> */
22969 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
22970 2:
22971diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
22972index 727a5d4..333818a 100644
22973--- a/arch/x86/lib/copy_page_64.S
22974+++ b/arch/x86/lib/copy_page_64.S
22975@@ -2,12 +2,14 @@
22976
22977 #include <linux/linkage.h>
22978 #include <asm/dwarf2.h>
22979+#include <asm/alternative-asm.h>
22980
22981 ALIGN
22982 copy_page_c:
22983 CFI_STARTPROC
22984 movl $4096/8,%ecx
22985 rep movsq
22986+ pax_force_retaddr
22987 ret
22988 CFI_ENDPROC
22989 ENDPROC(copy_page_c)
22990@@ -38,7 +40,7 @@ ENTRY(copy_page)
22991 movq 16 (%rsi), %rdx
22992 movq 24 (%rsi), %r8
22993 movq 32 (%rsi), %r9
22994- movq 40 (%rsi), %r10
22995+ movq 40 (%rsi), %r13
22996 movq 48 (%rsi), %r11
22997 movq 56 (%rsi), %r12
22998
22999@@ -49,7 +51,7 @@ ENTRY(copy_page)
23000 movq %rdx, 16 (%rdi)
23001 movq %r8, 24 (%rdi)
23002 movq %r9, 32 (%rdi)
23003- movq %r10, 40 (%rdi)
23004+ movq %r13, 40 (%rdi)
23005 movq %r11, 48 (%rdi)
23006 movq %r12, 56 (%rdi)
23007
23008@@ -68,7 +70,7 @@ ENTRY(copy_page)
23009 movq 16 (%rsi), %rdx
23010 movq 24 (%rsi), %r8
23011 movq 32 (%rsi), %r9
23012- movq 40 (%rsi), %r10
23013+ movq 40 (%rsi), %r13
23014 movq 48 (%rsi), %r11
23015 movq 56 (%rsi), %r12
23016
23017@@ -77,7 +79,7 @@ ENTRY(copy_page)
23018 movq %rdx, 16 (%rdi)
23019 movq %r8, 24 (%rdi)
23020 movq %r9, 32 (%rdi)
23021- movq %r10, 40 (%rdi)
23022+ movq %r13, 40 (%rdi)
23023 movq %r11, 48 (%rdi)
23024 movq %r12, 56 (%rdi)
23025
23026@@ -94,6 +96,7 @@ ENTRY(copy_page)
23027 CFI_RESTORE r13
23028 addq $3*8,%rsp
23029 CFI_ADJUST_CFA_OFFSET -3*8
23030+ pax_force_retaddr
23031 ret
23032 .Lcopy_page_end:
23033 CFI_ENDPROC
23034@@ -104,7 +107,7 @@ ENDPROC(copy_page)
23035
23036 #include <asm/cpufeature.h>
23037
23038- .section .altinstr_replacement,"ax"
23039+ .section .altinstr_replacement,"a"
23040 1: .byte 0xeb /* jmp <disp8> */
23041 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
23042 2:
23043diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
23044index af8debd..40c75f3 100644
23045--- a/arch/x86/lib/copy_user_64.S
23046+++ b/arch/x86/lib/copy_user_64.S
23047@@ -15,13 +15,15 @@
23048 #include <asm/asm-offsets.h>
23049 #include <asm/thread_info.h>
23050 #include <asm/cpufeature.h>
23051+#include <asm/pgtable.h>
23052+#include <asm/alternative-asm.h>
23053
23054 .macro ALTERNATIVE_JUMP feature,orig,alt
23055 0:
23056 .byte 0xe9 /* 32bit jump */
23057 .long \orig-1f /* by default jump to orig */
23058 1:
23059- .section .altinstr_replacement,"ax"
23060+ .section .altinstr_replacement,"a"
23061 2: .byte 0xe9 /* near jump with 32bit immediate */
23062 .long \alt-1b /* offset */ /* or alternatively to alt */
23063 .previous
23064@@ -64,55 +66,26 @@
23065 #endif
23066 .endm
23067
23068-/* Standard copy_to_user with segment limit checking */
23069-ENTRY(copy_to_user)
23070- CFI_STARTPROC
23071- GET_THREAD_INFO(%rax)
23072- movq %rdi,%rcx
23073- addq %rdx,%rcx
23074- jc bad_to_user
23075- cmpq TI_addr_limit(%rax),%rcx
23076- ja bad_to_user
23077- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23078- CFI_ENDPROC
23079-ENDPROC(copy_to_user)
23080-
23081-/* Standard copy_from_user with segment limit checking */
23082-ENTRY(copy_from_user)
23083- CFI_STARTPROC
23084- GET_THREAD_INFO(%rax)
23085- movq %rsi,%rcx
23086- addq %rdx,%rcx
23087- jc bad_from_user
23088- cmpq TI_addr_limit(%rax),%rcx
23089- ja bad_from_user
23090- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23091- CFI_ENDPROC
23092-ENDPROC(copy_from_user)
23093-
23094 ENTRY(copy_user_generic)
23095 CFI_STARTPROC
23096 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23097 CFI_ENDPROC
23098 ENDPROC(copy_user_generic)
23099
23100-ENTRY(__copy_from_user_inatomic)
23101- CFI_STARTPROC
23102- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
23103- CFI_ENDPROC
23104-ENDPROC(__copy_from_user_inatomic)
23105-
23106 .section .fixup,"ax"
23107 /* must zero dest */
23108 ENTRY(bad_from_user)
23109 bad_from_user:
23110 CFI_STARTPROC
23111+ testl %edx,%edx
23112+ js bad_to_user
23113 movl %edx,%ecx
23114 xorl %eax,%eax
23115 rep
23116 stosb
23117 bad_to_user:
23118 movl %edx,%eax
23119+ pax_force_retaddr
23120 ret
23121 CFI_ENDPROC
23122 ENDPROC(bad_from_user)
23123@@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
23124 jz 17f
23125 1: movq (%rsi),%r8
23126 2: movq 1*8(%rsi),%r9
23127-3: movq 2*8(%rsi),%r10
23128+3: movq 2*8(%rsi),%rax
23129 4: movq 3*8(%rsi),%r11
23130 5: movq %r8,(%rdi)
23131 6: movq %r9,1*8(%rdi)
23132-7: movq %r10,2*8(%rdi)
23133+7: movq %rax,2*8(%rdi)
23134 8: movq %r11,3*8(%rdi)
23135 9: movq 4*8(%rsi),%r8
23136 10: movq 5*8(%rsi),%r9
23137-11: movq 6*8(%rsi),%r10
23138+11: movq 6*8(%rsi),%rax
23139 12: movq 7*8(%rsi),%r11
23140 13: movq %r8,4*8(%rdi)
23141 14: movq %r9,5*8(%rdi)
23142-15: movq %r10,6*8(%rdi)
23143+15: movq %rax,6*8(%rdi)
23144 16: movq %r11,7*8(%rdi)
23145 leaq 64(%rsi),%rsi
23146 leaq 64(%rdi),%rdi
23147@@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
23148 decl %ecx
23149 jnz 21b
23150 23: xor %eax,%eax
23151+ pax_force_retaddr
23152 ret
23153
23154 .section .fixup,"ax"
23155@@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
23156 3: rep
23157 movsb
23158 4: xorl %eax,%eax
23159+ pax_force_retaddr
23160 ret
23161
23162 .section .fixup,"ax"
23163diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
23164index cb0c112..e3a6895 100644
23165--- a/arch/x86/lib/copy_user_nocache_64.S
23166+++ b/arch/x86/lib/copy_user_nocache_64.S
23167@@ -8,12 +8,14 @@
23168
23169 #include <linux/linkage.h>
23170 #include <asm/dwarf2.h>
23171+#include <asm/alternative-asm.h>
23172
23173 #define FIX_ALIGNMENT 1
23174
23175 #include <asm/current.h>
23176 #include <asm/asm-offsets.h>
23177 #include <asm/thread_info.h>
23178+#include <asm/pgtable.h>
23179
23180 .macro ALIGN_DESTINATION
23181 #ifdef FIX_ALIGNMENT
23182@@ -50,6 +52,15 @@
23183 */
23184 ENTRY(__copy_user_nocache)
23185 CFI_STARTPROC
23186+
23187+#ifdef CONFIG_PAX_MEMORY_UDEREF
23188+ mov $PAX_USER_SHADOW_BASE,%rcx
23189+ cmp %rcx,%rsi
23190+ jae 1f
23191+ add %rcx,%rsi
23192+1:
23193+#endif
23194+
23195 cmpl $8,%edx
23196 jb 20f /* less then 8 bytes, go to byte copy loop */
23197 ALIGN_DESTINATION
23198@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
23199 jz 17f
23200 1: movq (%rsi),%r8
23201 2: movq 1*8(%rsi),%r9
23202-3: movq 2*8(%rsi),%r10
23203+3: movq 2*8(%rsi),%rax
23204 4: movq 3*8(%rsi),%r11
23205 5: movnti %r8,(%rdi)
23206 6: movnti %r9,1*8(%rdi)
23207-7: movnti %r10,2*8(%rdi)
23208+7: movnti %rax,2*8(%rdi)
23209 8: movnti %r11,3*8(%rdi)
23210 9: movq 4*8(%rsi),%r8
23211 10: movq 5*8(%rsi),%r9
23212-11: movq 6*8(%rsi),%r10
23213+11: movq 6*8(%rsi),%rax
23214 12: movq 7*8(%rsi),%r11
23215 13: movnti %r8,4*8(%rdi)
23216 14: movnti %r9,5*8(%rdi)
23217-15: movnti %r10,6*8(%rdi)
23218+15: movnti %rax,6*8(%rdi)
23219 16: movnti %r11,7*8(%rdi)
23220 leaq 64(%rsi),%rsi
23221 leaq 64(%rdi),%rdi
23222@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
23223 jnz 21b
23224 23: xorl %eax,%eax
23225 sfence
23226+ pax_force_retaddr
23227 ret
23228
23229 .section .fixup,"ax"
23230diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
23231index f0dba36..48cb4d6 100644
23232--- a/arch/x86/lib/csum-copy_64.S
23233+++ b/arch/x86/lib/csum-copy_64.S
23234@@ -8,6 +8,7 @@
23235 #include <linux/linkage.h>
23236 #include <asm/dwarf2.h>
23237 #include <asm/errno.h>
23238+#include <asm/alternative-asm.h>
23239
23240 /*
23241 * Checksum copy with exception handling.
23242@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
23243 CFI_RESTORE rbp
23244 addq $7*8,%rsp
23245 CFI_ADJUST_CFA_OFFSET -7*8
23246+ pax_force_retaddr 0, 1
23247 ret
23248 CFI_RESTORE_STATE
23249
23250diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
23251index 459b58a..9570bc7 100644
23252--- a/arch/x86/lib/csum-wrappers_64.c
23253+++ b/arch/x86/lib/csum-wrappers_64.c
23254@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
23255 len -= 2;
23256 }
23257 }
23258- isum = csum_partial_copy_generic((__force const void *)src,
23259+
23260+#ifdef CONFIG_PAX_MEMORY_UDEREF
23261+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23262+ src += PAX_USER_SHADOW_BASE;
23263+#endif
23264+
23265+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
23266 dst, len, isum, errp, NULL);
23267 if (unlikely(*errp))
23268 goto out_err;
23269@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
23270 }
23271
23272 *errp = 0;
23273- return csum_partial_copy_generic(src, (void __force *)dst,
23274+
23275+#ifdef CONFIG_PAX_MEMORY_UDEREF
23276+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
23277+ dst += PAX_USER_SHADOW_BASE;
23278+#endif
23279+
23280+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
23281 len, isum, NULL, errp);
23282 }
23283 EXPORT_SYMBOL(csum_partial_copy_to_user);
23284diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
23285index 51f1504..ddac4c1 100644
23286--- a/arch/x86/lib/getuser.S
23287+++ b/arch/x86/lib/getuser.S
23288@@ -33,15 +33,38 @@
23289 #include <asm/asm-offsets.h>
23290 #include <asm/thread_info.h>
23291 #include <asm/asm.h>
23292+#include <asm/segment.h>
23293+#include <asm/pgtable.h>
23294+#include <asm/alternative-asm.h>
23295+
23296+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23297+#define __copyuser_seg gs;
23298+#else
23299+#define __copyuser_seg
23300+#endif
23301
23302 .text
23303 ENTRY(__get_user_1)
23304 CFI_STARTPROC
23305+
23306+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23307 GET_THREAD_INFO(%_ASM_DX)
23308 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23309 jae bad_get_user
23310-1: movzb (%_ASM_AX),%edx
23311+
23312+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23313+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23314+ cmp %_ASM_DX,%_ASM_AX
23315+ jae 1234f
23316+ add %_ASM_DX,%_ASM_AX
23317+1234:
23318+#endif
23319+
23320+#endif
23321+
23322+1: __copyuser_seg movzb (%_ASM_AX),%edx
23323 xor %eax,%eax
23324+ pax_force_retaddr
23325 ret
23326 CFI_ENDPROC
23327 ENDPROC(__get_user_1)
23328@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
23329 ENTRY(__get_user_2)
23330 CFI_STARTPROC
23331 add $1,%_ASM_AX
23332+
23333+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23334 jc bad_get_user
23335 GET_THREAD_INFO(%_ASM_DX)
23336 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23337 jae bad_get_user
23338-2: movzwl -1(%_ASM_AX),%edx
23339+
23340+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23341+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23342+ cmp %_ASM_DX,%_ASM_AX
23343+ jae 1234f
23344+ add %_ASM_DX,%_ASM_AX
23345+1234:
23346+#endif
23347+
23348+#endif
23349+
23350+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
23351 xor %eax,%eax
23352+ pax_force_retaddr
23353 ret
23354 CFI_ENDPROC
23355 ENDPROC(__get_user_2)
23356@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
23357 ENTRY(__get_user_4)
23358 CFI_STARTPROC
23359 add $3,%_ASM_AX
23360+
23361+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23362 jc bad_get_user
23363 GET_THREAD_INFO(%_ASM_DX)
23364 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23365 jae bad_get_user
23366-3: mov -3(%_ASM_AX),%edx
23367+
23368+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23369+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23370+ cmp %_ASM_DX,%_ASM_AX
23371+ jae 1234f
23372+ add %_ASM_DX,%_ASM_AX
23373+1234:
23374+#endif
23375+
23376+#endif
23377+
23378+3: __copyuser_seg mov -3(%_ASM_AX),%edx
23379 xor %eax,%eax
23380+ pax_force_retaddr
23381 ret
23382 CFI_ENDPROC
23383 ENDPROC(__get_user_4)
23384@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
23385 GET_THREAD_INFO(%_ASM_DX)
23386 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
23387 jae bad_get_user
23388+
23389+#ifdef CONFIG_PAX_MEMORY_UDEREF
23390+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
23391+ cmp %_ASM_DX,%_ASM_AX
23392+ jae 1234f
23393+ add %_ASM_DX,%_ASM_AX
23394+1234:
23395+#endif
23396+
23397 4: movq -7(%_ASM_AX),%_ASM_DX
23398 xor %eax,%eax
23399+ pax_force_retaddr
23400 ret
23401 CFI_ENDPROC
23402 ENDPROC(__get_user_8)
23403@@ -91,6 +152,7 @@ bad_get_user:
23404 CFI_STARTPROC
23405 xor %edx,%edx
23406 mov $(-EFAULT),%_ASM_AX
23407+ pax_force_retaddr
23408 ret
23409 CFI_ENDPROC
23410 END(bad_get_user)
23411diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
23412index 05a95e7..326f2fa 100644
23413--- a/arch/x86/lib/iomap_copy_64.S
23414+++ b/arch/x86/lib/iomap_copy_64.S
23415@@ -17,6 +17,7 @@
23416
23417 #include <linux/linkage.h>
23418 #include <asm/dwarf2.h>
23419+#include <asm/alternative-asm.h>
23420
23421 /*
23422 * override generic version in lib/iomap_copy.c
23423@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
23424 CFI_STARTPROC
23425 movl %edx,%ecx
23426 rep movsd
23427+ pax_force_retaddr
23428 ret
23429 CFI_ENDPROC
23430 ENDPROC(__iowrite32_copy)
23431diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
23432index ad5441e..610e351 100644
23433--- a/arch/x86/lib/memcpy_64.S
23434+++ b/arch/x86/lib/memcpy_64.S
23435@@ -4,6 +4,7 @@
23436
23437 #include <asm/cpufeature.h>
23438 #include <asm/dwarf2.h>
23439+#include <asm/alternative-asm.h>
23440
23441 /*
23442 * memcpy - Copy a memory block.
23443@@ -34,6 +35,7 @@ memcpy_c:
23444 rep movsq
23445 movl %edx, %ecx
23446 rep movsb
23447+ pax_force_retaddr
23448 ret
23449 CFI_ENDPROC
23450 ENDPROC(memcpy_c)
23451@@ -118,6 +120,7 @@ ENTRY(memcpy)
23452 jnz .Lloop_1
23453
23454 .Lend:
23455+ pax_force_retaddr 0, 1
23456 ret
23457 CFI_ENDPROC
23458 ENDPROC(memcpy)
23459@@ -128,7 +131,7 @@ ENDPROC(__memcpy)
23460 * It is also a lot simpler. Use this when possible:
23461 */
23462
23463- .section .altinstr_replacement, "ax"
23464+ .section .altinstr_replacement, "a"
23465 1: .byte 0xeb /* jmp <disp8> */
23466 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
23467 2:
23468diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
23469index 2c59481..7e9ba4e 100644
23470--- a/arch/x86/lib/memset_64.S
23471+++ b/arch/x86/lib/memset_64.S
23472@@ -2,6 +2,7 @@
23473
23474 #include <linux/linkage.h>
23475 #include <asm/dwarf2.h>
23476+#include <asm/alternative-asm.h>
23477
23478 /*
23479 * ISO C memset - set a memory block to a byte value.
23480@@ -28,6 +29,7 @@ memset_c:
23481 movl %r8d,%ecx
23482 rep stosb
23483 movq %r9,%rax
23484+ pax_force_retaddr
23485 ret
23486 CFI_ENDPROC
23487 ENDPROC(memset_c)
23488@@ -35,13 +37,13 @@ ENDPROC(memset_c)
23489 ENTRY(memset)
23490 ENTRY(__memset)
23491 CFI_STARTPROC
23492- movq %rdi,%r10
23493 movq %rdx,%r11
23494
23495 /* expand byte value */
23496 movzbl %sil,%ecx
23497 movabs $0x0101010101010101,%rax
23498 mul %rcx /* with rax, clobbers rdx */
23499+ movq %rdi,%rdx
23500
23501 /* align dst */
23502 movl %edi,%r9d
23503@@ -95,7 +97,8 @@ ENTRY(__memset)
23504 jnz .Lloop_1
23505
23506 .Lende:
23507- movq %r10,%rax
23508+ movq %rdx,%rax
23509+ pax_force_retaddr
23510 ret
23511
23512 CFI_RESTORE_STATE
23513@@ -118,7 +121,7 @@ ENDPROC(__memset)
23514
23515 #include <asm/cpufeature.h>
23516
23517- .section .altinstr_replacement,"ax"
23518+ .section .altinstr_replacement,"a"
23519 1: .byte 0xeb /* jmp <disp8> */
23520 .byte (memset_c - memset) - (2f - 1b) /* offset */
23521 2:
23522diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
23523index c9f2d9b..e7fd2c0 100644
23524--- a/arch/x86/lib/mmx_32.c
23525+++ b/arch/x86/lib/mmx_32.c
23526@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23527 {
23528 void *p;
23529 int i;
23530+ unsigned long cr0;
23531
23532 if (unlikely(in_interrupt()))
23533 return __memcpy(to, from, len);
23534@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
23535 kernel_fpu_begin();
23536
23537 __asm__ __volatile__ (
23538- "1: prefetch (%0)\n" /* This set is 28 bytes */
23539- " prefetch 64(%0)\n"
23540- " prefetch 128(%0)\n"
23541- " prefetch 192(%0)\n"
23542- " prefetch 256(%0)\n"
23543+ "1: prefetch (%1)\n" /* This set is 28 bytes */
23544+ " prefetch 64(%1)\n"
23545+ " prefetch 128(%1)\n"
23546+ " prefetch 192(%1)\n"
23547+ " prefetch 256(%1)\n"
23548 "2: \n"
23549 ".section .fixup, \"ax\"\n"
23550- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23551+ "3: \n"
23552+
23553+#ifdef CONFIG_PAX_KERNEXEC
23554+ " movl %%cr0, %0\n"
23555+ " movl %0, %%eax\n"
23556+ " andl $0xFFFEFFFF, %%eax\n"
23557+ " movl %%eax, %%cr0\n"
23558+#endif
23559+
23560+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23561+
23562+#ifdef CONFIG_PAX_KERNEXEC
23563+ " movl %0, %%cr0\n"
23564+#endif
23565+
23566 " jmp 2b\n"
23567 ".previous\n"
23568 _ASM_EXTABLE(1b, 3b)
23569- : : "r" (from));
23570+ : "=&r" (cr0) : "r" (from) : "ax");
23571
23572 for ( ; i > 5; i--) {
23573 __asm__ __volatile__ (
23574- "1: prefetch 320(%0)\n"
23575- "2: movq (%0), %%mm0\n"
23576- " movq 8(%0), %%mm1\n"
23577- " movq 16(%0), %%mm2\n"
23578- " movq 24(%0), %%mm3\n"
23579- " movq %%mm0, (%1)\n"
23580- " movq %%mm1, 8(%1)\n"
23581- " movq %%mm2, 16(%1)\n"
23582- " movq %%mm3, 24(%1)\n"
23583- " movq 32(%0), %%mm0\n"
23584- " movq 40(%0), %%mm1\n"
23585- " movq 48(%0), %%mm2\n"
23586- " movq 56(%0), %%mm3\n"
23587- " movq %%mm0, 32(%1)\n"
23588- " movq %%mm1, 40(%1)\n"
23589- " movq %%mm2, 48(%1)\n"
23590- " movq %%mm3, 56(%1)\n"
23591+ "1: prefetch 320(%1)\n"
23592+ "2: movq (%1), %%mm0\n"
23593+ " movq 8(%1), %%mm1\n"
23594+ " movq 16(%1), %%mm2\n"
23595+ " movq 24(%1), %%mm3\n"
23596+ " movq %%mm0, (%2)\n"
23597+ " movq %%mm1, 8(%2)\n"
23598+ " movq %%mm2, 16(%2)\n"
23599+ " movq %%mm3, 24(%2)\n"
23600+ " movq 32(%1), %%mm0\n"
23601+ " movq 40(%1), %%mm1\n"
23602+ " movq 48(%1), %%mm2\n"
23603+ " movq 56(%1), %%mm3\n"
23604+ " movq %%mm0, 32(%2)\n"
23605+ " movq %%mm1, 40(%2)\n"
23606+ " movq %%mm2, 48(%2)\n"
23607+ " movq %%mm3, 56(%2)\n"
23608 ".section .fixup, \"ax\"\n"
23609- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23610+ "3:\n"
23611+
23612+#ifdef CONFIG_PAX_KERNEXEC
23613+ " movl %%cr0, %0\n"
23614+ " movl %0, %%eax\n"
23615+ " andl $0xFFFEFFFF, %%eax\n"
23616+ " movl %%eax, %%cr0\n"
23617+#endif
23618+
23619+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23620+
23621+#ifdef CONFIG_PAX_KERNEXEC
23622+ " movl %0, %%cr0\n"
23623+#endif
23624+
23625 " jmp 2b\n"
23626 ".previous\n"
23627 _ASM_EXTABLE(1b, 3b)
23628- : : "r" (from), "r" (to) : "memory");
23629+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23630
23631 from += 64;
23632 to += 64;
23633@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
23634 static void fast_copy_page(void *to, void *from)
23635 {
23636 int i;
23637+ unsigned long cr0;
23638
23639 kernel_fpu_begin();
23640
23641@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
23642 * but that is for later. -AV
23643 */
23644 __asm__ __volatile__(
23645- "1: prefetch (%0)\n"
23646- " prefetch 64(%0)\n"
23647- " prefetch 128(%0)\n"
23648- " prefetch 192(%0)\n"
23649- " prefetch 256(%0)\n"
23650+ "1: prefetch (%1)\n"
23651+ " prefetch 64(%1)\n"
23652+ " prefetch 128(%1)\n"
23653+ " prefetch 192(%1)\n"
23654+ " prefetch 256(%1)\n"
23655 "2: \n"
23656 ".section .fixup, \"ax\"\n"
23657- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23658+ "3: \n"
23659+
23660+#ifdef CONFIG_PAX_KERNEXEC
23661+ " movl %%cr0, %0\n"
23662+ " movl %0, %%eax\n"
23663+ " andl $0xFFFEFFFF, %%eax\n"
23664+ " movl %%eax, %%cr0\n"
23665+#endif
23666+
23667+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23668+
23669+#ifdef CONFIG_PAX_KERNEXEC
23670+ " movl %0, %%cr0\n"
23671+#endif
23672+
23673 " jmp 2b\n"
23674 ".previous\n"
23675- _ASM_EXTABLE(1b, 3b) : : "r" (from));
23676+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
23677
23678 for (i = 0; i < (4096-320)/64; i++) {
23679 __asm__ __volatile__ (
23680- "1: prefetch 320(%0)\n"
23681- "2: movq (%0), %%mm0\n"
23682- " movntq %%mm0, (%1)\n"
23683- " movq 8(%0), %%mm1\n"
23684- " movntq %%mm1, 8(%1)\n"
23685- " movq 16(%0), %%mm2\n"
23686- " movntq %%mm2, 16(%1)\n"
23687- " movq 24(%0), %%mm3\n"
23688- " movntq %%mm3, 24(%1)\n"
23689- " movq 32(%0), %%mm4\n"
23690- " movntq %%mm4, 32(%1)\n"
23691- " movq 40(%0), %%mm5\n"
23692- " movntq %%mm5, 40(%1)\n"
23693- " movq 48(%0), %%mm6\n"
23694- " movntq %%mm6, 48(%1)\n"
23695- " movq 56(%0), %%mm7\n"
23696- " movntq %%mm7, 56(%1)\n"
23697+ "1: prefetch 320(%1)\n"
23698+ "2: movq (%1), %%mm0\n"
23699+ " movntq %%mm0, (%2)\n"
23700+ " movq 8(%1), %%mm1\n"
23701+ " movntq %%mm1, 8(%2)\n"
23702+ " movq 16(%1), %%mm2\n"
23703+ " movntq %%mm2, 16(%2)\n"
23704+ " movq 24(%1), %%mm3\n"
23705+ " movntq %%mm3, 24(%2)\n"
23706+ " movq 32(%1), %%mm4\n"
23707+ " movntq %%mm4, 32(%2)\n"
23708+ " movq 40(%1), %%mm5\n"
23709+ " movntq %%mm5, 40(%2)\n"
23710+ " movq 48(%1), %%mm6\n"
23711+ " movntq %%mm6, 48(%2)\n"
23712+ " movq 56(%1), %%mm7\n"
23713+ " movntq %%mm7, 56(%2)\n"
23714 ".section .fixup, \"ax\"\n"
23715- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23716+ "3:\n"
23717+
23718+#ifdef CONFIG_PAX_KERNEXEC
23719+ " movl %%cr0, %0\n"
23720+ " movl %0, %%eax\n"
23721+ " andl $0xFFFEFFFF, %%eax\n"
23722+ " movl %%eax, %%cr0\n"
23723+#endif
23724+
23725+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23726+
23727+#ifdef CONFIG_PAX_KERNEXEC
23728+ " movl %0, %%cr0\n"
23729+#endif
23730+
23731 " jmp 2b\n"
23732 ".previous\n"
23733- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
23734+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23735
23736 from += 64;
23737 to += 64;
23738@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
23739 static void fast_copy_page(void *to, void *from)
23740 {
23741 int i;
23742+ unsigned long cr0;
23743
23744 kernel_fpu_begin();
23745
23746 __asm__ __volatile__ (
23747- "1: prefetch (%0)\n"
23748- " prefetch 64(%0)\n"
23749- " prefetch 128(%0)\n"
23750- " prefetch 192(%0)\n"
23751- " prefetch 256(%0)\n"
23752+ "1: prefetch (%1)\n"
23753+ " prefetch 64(%1)\n"
23754+ " prefetch 128(%1)\n"
23755+ " prefetch 192(%1)\n"
23756+ " prefetch 256(%1)\n"
23757 "2: \n"
23758 ".section .fixup, \"ax\"\n"
23759- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23760+ "3: \n"
23761+
23762+#ifdef CONFIG_PAX_KERNEXEC
23763+ " movl %%cr0, %0\n"
23764+ " movl %0, %%eax\n"
23765+ " andl $0xFFFEFFFF, %%eax\n"
23766+ " movl %%eax, %%cr0\n"
23767+#endif
23768+
23769+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
23770+
23771+#ifdef CONFIG_PAX_KERNEXEC
23772+ " movl %0, %%cr0\n"
23773+#endif
23774+
23775 " jmp 2b\n"
23776 ".previous\n"
23777- _ASM_EXTABLE(1b, 3b) : : "r" (from));
23778+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
23779
23780 for (i = 0; i < 4096/64; i++) {
23781 __asm__ __volatile__ (
23782- "1: prefetch 320(%0)\n"
23783- "2: movq (%0), %%mm0\n"
23784- " movq 8(%0), %%mm1\n"
23785- " movq 16(%0), %%mm2\n"
23786- " movq 24(%0), %%mm3\n"
23787- " movq %%mm0, (%1)\n"
23788- " movq %%mm1, 8(%1)\n"
23789- " movq %%mm2, 16(%1)\n"
23790- " movq %%mm3, 24(%1)\n"
23791- " movq 32(%0), %%mm0\n"
23792- " movq 40(%0), %%mm1\n"
23793- " movq 48(%0), %%mm2\n"
23794- " movq 56(%0), %%mm3\n"
23795- " movq %%mm0, 32(%1)\n"
23796- " movq %%mm1, 40(%1)\n"
23797- " movq %%mm2, 48(%1)\n"
23798- " movq %%mm3, 56(%1)\n"
23799+ "1: prefetch 320(%1)\n"
23800+ "2: movq (%1), %%mm0\n"
23801+ " movq 8(%1), %%mm1\n"
23802+ " movq 16(%1), %%mm2\n"
23803+ " movq 24(%1), %%mm3\n"
23804+ " movq %%mm0, (%2)\n"
23805+ " movq %%mm1, 8(%2)\n"
23806+ " movq %%mm2, 16(%2)\n"
23807+ " movq %%mm3, 24(%2)\n"
23808+ " movq 32(%1), %%mm0\n"
23809+ " movq 40(%1), %%mm1\n"
23810+ " movq 48(%1), %%mm2\n"
23811+ " movq 56(%1), %%mm3\n"
23812+ " movq %%mm0, 32(%2)\n"
23813+ " movq %%mm1, 40(%2)\n"
23814+ " movq %%mm2, 48(%2)\n"
23815+ " movq %%mm3, 56(%2)\n"
23816 ".section .fixup, \"ax\"\n"
23817- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23818+ "3:\n"
23819+
23820+#ifdef CONFIG_PAX_KERNEXEC
23821+ " movl %%cr0, %0\n"
23822+ " movl %0, %%eax\n"
23823+ " andl $0xFFFEFFFF, %%eax\n"
23824+ " movl %%eax, %%cr0\n"
23825+#endif
23826+
23827+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
23828+
23829+#ifdef CONFIG_PAX_KERNEXEC
23830+ " movl %0, %%cr0\n"
23831+#endif
23832+
23833 " jmp 2b\n"
23834 ".previous\n"
23835 _ASM_EXTABLE(1b, 3b)
23836- : : "r" (from), "r" (to) : "memory");
23837+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
23838
23839 from += 64;
23840 to += 64;
23841diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
23842index 69fa106..adda88b 100644
23843--- a/arch/x86/lib/msr-reg.S
23844+++ b/arch/x86/lib/msr-reg.S
23845@@ -3,6 +3,7 @@
23846 #include <asm/dwarf2.h>
23847 #include <asm/asm.h>
23848 #include <asm/msr.h>
23849+#include <asm/alternative-asm.h>
23850
23851 #ifdef CONFIG_X86_64
23852 /*
23853@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
23854 CFI_STARTPROC
23855 pushq_cfi %rbx
23856 pushq_cfi %rbp
23857- movq %rdi, %r10 /* Save pointer */
23858+ movq %rdi, %r9 /* Save pointer */
23859 xorl %r11d, %r11d /* Return value */
23860 movl (%rdi), %eax
23861 movl 4(%rdi), %ecx
23862@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
23863 movl 28(%rdi), %edi
23864 CFI_REMEMBER_STATE
23865 1: \op
23866-2: movl %eax, (%r10)
23867+2: movl %eax, (%r9)
23868 movl %r11d, %eax /* Return value */
23869- movl %ecx, 4(%r10)
23870- movl %edx, 8(%r10)
23871- movl %ebx, 12(%r10)
23872- movl %ebp, 20(%r10)
23873- movl %esi, 24(%r10)
23874- movl %edi, 28(%r10)
23875+ movl %ecx, 4(%r9)
23876+ movl %edx, 8(%r9)
23877+ movl %ebx, 12(%r9)
23878+ movl %ebp, 20(%r9)
23879+ movl %esi, 24(%r9)
23880+ movl %edi, 28(%r9)
23881 popq_cfi %rbp
23882 popq_cfi %rbx
23883+ pax_force_retaddr
23884 ret
23885 3:
23886 CFI_RESTORE_STATE
23887diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
23888index 36b0d15..d381858 100644
23889--- a/arch/x86/lib/putuser.S
23890+++ b/arch/x86/lib/putuser.S
23891@@ -15,7 +15,9 @@
23892 #include <asm/thread_info.h>
23893 #include <asm/errno.h>
23894 #include <asm/asm.h>
23895-
23896+#include <asm/segment.h>
23897+#include <asm/pgtable.h>
23898+#include <asm/alternative-asm.h>
23899
23900 /*
23901 * __put_user_X
23902@@ -29,52 +31,119 @@
23903 * as they get called from within inline assembly.
23904 */
23905
23906-#define ENTER CFI_STARTPROC ; \
23907- GET_THREAD_INFO(%_ASM_BX)
23908-#define EXIT ret ; \
23909+#define ENTER CFI_STARTPROC
23910+#define EXIT pax_force_retaddr; ret ; \
23911 CFI_ENDPROC
23912
23913+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23914+#define _DEST %_ASM_CX,%_ASM_BX
23915+#else
23916+#define _DEST %_ASM_CX
23917+#endif
23918+
23919+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23920+#define __copyuser_seg gs;
23921+#else
23922+#define __copyuser_seg
23923+#endif
23924+
23925 .text
23926 ENTRY(__put_user_1)
23927 ENTER
23928+
23929+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23930+ GET_THREAD_INFO(%_ASM_BX)
23931 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
23932 jae bad_put_user
23933-1: movb %al,(%_ASM_CX)
23934+
23935+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23936+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23937+ cmp %_ASM_BX,%_ASM_CX
23938+ jb 1234f
23939+ xor %ebx,%ebx
23940+1234:
23941+#endif
23942+
23943+#endif
23944+
23945+1: __copyuser_seg movb %al,(_DEST)
23946 xor %eax,%eax
23947 EXIT
23948 ENDPROC(__put_user_1)
23949
23950 ENTRY(__put_user_2)
23951 ENTER
23952+
23953+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23954+ GET_THREAD_INFO(%_ASM_BX)
23955 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23956 sub $1,%_ASM_BX
23957 cmp %_ASM_BX,%_ASM_CX
23958 jae bad_put_user
23959-2: movw %ax,(%_ASM_CX)
23960+
23961+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23962+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23963+ cmp %_ASM_BX,%_ASM_CX
23964+ jb 1234f
23965+ xor %ebx,%ebx
23966+1234:
23967+#endif
23968+
23969+#endif
23970+
23971+2: __copyuser_seg movw %ax,(_DEST)
23972 xor %eax,%eax
23973 EXIT
23974 ENDPROC(__put_user_2)
23975
23976 ENTRY(__put_user_4)
23977 ENTER
23978+
23979+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
23980+ GET_THREAD_INFO(%_ASM_BX)
23981 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
23982 sub $3,%_ASM_BX
23983 cmp %_ASM_BX,%_ASM_CX
23984 jae bad_put_user
23985-3: movl %eax,(%_ASM_CX)
23986+
23987+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23988+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
23989+ cmp %_ASM_BX,%_ASM_CX
23990+ jb 1234f
23991+ xor %ebx,%ebx
23992+1234:
23993+#endif
23994+
23995+#endif
23996+
23997+3: __copyuser_seg movl %eax,(_DEST)
23998 xor %eax,%eax
23999 EXIT
24000 ENDPROC(__put_user_4)
24001
24002 ENTRY(__put_user_8)
24003 ENTER
24004+
24005+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24006+ GET_THREAD_INFO(%_ASM_BX)
24007 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
24008 sub $7,%_ASM_BX
24009 cmp %_ASM_BX,%_ASM_CX
24010 jae bad_put_user
24011-4: mov %_ASM_AX,(%_ASM_CX)
24012+
24013+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24014+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
24015+ cmp %_ASM_BX,%_ASM_CX
24016+ jb 1234f
24017+ xor %ebx,%ebx
24018+1234:
24019+#endif
24020+
24021+#endif
24022+
24023+4: __copyuser_seg mov %_ASM_AX,(_DEST)
24024 #ifdef CONFIG_X86_32
24025-5: movl %edx,4(%_ASM_CX)
24026+5: __copyuser_seg movl %edx,4(_DEST)
24027 #endif
24028 xor %eax,%eax
24029 EXIT
24030diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
24031index 05ea55f..6345b9a 100644
24032--- a/arch/x86/lib/rwlock_64.S
24033+++ b/arch/x86/lib/rwlock_64.S
24034@@ -2,6 +2,7 @@
24035
24036 #include <linux/linkage.h>
24037 #include <asm/rwlock.h>
24038+#include <asm/asm.h>
24039 #include <asm/alternative-asm.h>
24040 #include <asm/dwarf2.h>
24041
24042@@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
24043 CFI_STARTPROC
24044 LOCK_PREFIX
24045 addl $RW_LOCK_BIAS,(%rdi)
24046+
24047+#ifdef CONFIG_PAX_REFCOUNT
24048+ jno 1234f
24049+ LOCK_PREFIX
24050+ subl $RW_LOCK_BIAS,(%rdi)
24051+ int $4
24052+1234:
24053+ _ASM_EXTABLE(1234b, 1234b)
24054+#endif
24055+
24056 1: rep
24057 nop
24058 cmpl $RW_LOCK_BIAS,(%rdi)
24059 jne 1b
24060 LOCK_PREFIX
24061 subl $RW_LOCK_BIAS,(%rdi)
24062+
24063+#ifdef CONFIG_PAX_REFCOUNT
24064+ jno 1234f
24065+ LOCK_PREFIX
24066+ addl $RW_LOCK_BIAS,(%rdi)
24067+ int $4
24068+1234:
24069+ _ASM_EXTABLE(1234b, 1234b)
24070+#endif
24071+
24072 jnz __write_lock_failed
24073+ pax_force_retaddr
24074 ret
24075 CFI_ENDPROC
24076 END(__write_lock_failed)
24077@@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
24078 CFI_STARTPROC
24079 LOCK_PREFIX
24080 incl (%rdi)
24081+
24082+#ifdef CONFIG_PAX_REFCOUNT
24083+ jno 1234f
24084+ LOCK_PREFIX
24085+ decl (%rdi)
24086+ int $4
24087+1234:
24088+ _ASM_EXTABLE(1234b, 1234b)
24089+#endif
24090+
24091 1: rep
24092 nop
24093 cmpl $1,(%rdi)
24094 js 1b
24095 LOCK_PREFIX
24096 decl (%rdi)
24097+
24098+#ifdef CONFIG_PAX_REFCOUNT
24099+ jno 1234f
24100+ LOCK_PREFIX
24101+ incl (%rdi)
24102+ int $4
24103+1234:
24104+ _ASM_EXTABLE(1234b, 1234b)
24105+#endif
24106+
24107 js __read_lock_failed
24108+ pax_force_retaddr
24109 ret
24110 CFI_ENDPROC
24111 END(__read_lock_failed)
24112diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
24113index 15acecf..f768b10 100644
24114--- a/arch/x86/lib/rwsem_64.S
24115+++ b/arch/x86/lib/rwsem_64.S
24116@@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
24117 call rwsem_down_read_failed
24118 popq %rdx
24119 restore_common_regs
24120+ pax_force_retaddr
24121 ret
24122 ENDPROC(call_rwsem_down_read_failed)
24123
24124@@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
24125 movq %rax,%rdi
24126 call rwsem_down_write_failed
24127 restore_common_regs
24128+ pax_force_retaddr
24129 ret
24130 ENDPROC(call_rwsem_down_write_failed)
24131
24132@@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
24133 movq %rax,%rdi
24134 call rwsem_wake
24135 restore_common_regs
24136-1: ret
24137+1: pax_force_retaddr
24138+ ret
24139 ENDPROC(call_rwsem_wake)
24140
24141 /* Fix up special calling conventions */
24142@@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
24143 call rwsem_downgrade_wake
24144 popq %rdx
24145 restore_common_regs
24146+ pax_force_retaddr
24147 ret
24148 ENDPROC(call_rwsem_downgrade_wake)
24149diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
24150index bf9a7d5..fb06ab5 100644
24151--- a/arch/x86/lib/thunk_64.S
24152+++ b/arch/x86/lib/thunk_64.S
24153@@ -10,7 +10,8 @@
24154 #include <asm/dwarf2.h>
24155 #include <asm/calling.h>
24156 #include <asm/rwlock.h>
24157-
24158+ #include <asm/alternative-asm.h>
24159+
24160 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
24161 .macro thunk name,func
24162 .globl \name
24163@@ -70,6 +71,7 @@
24164 SAVE_ARGS
24165 restore:
24166 RESTORE_ARGS
24167+ pax_force_retaddr
24168 ret
24169 CFI_ENDPROC
24170
24171@@ -77,5 +79,6 @@ restore:
24172 SAVE_ARGS
24173 restore_norax:
24174 RESTORE_ARGS 1
24175+ pax_force_retaddr
24176 ret
24177 CFI_ENDPROC
24178diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
24179index 1f118d4..ec4a953 100644
24180--- a/arch/x86/lib/usercopy_32.c
24181+++ b/arch/x86/lib/usercopy_32.c
24182@@ -43,7 +43,7 @@ do { \
24183 __asm__ __volatile__( \
24184 " testl %1,%1\n" \
24185 " jz 2f\n" \
24186- "0: lodsb\n" \
24187+ "0: "__copyuser_seg"lodsb\n" \
24188 " stosb\n" \
24189 " testb %%al,%%al\n" \
24190 " jz 1f\n" \
24191@@ -128,10 +128,12 @@ do { \
24192 int __d0; \
24193 might_fault(); \
24194 __asm__ __volatile__( \
24195+ __COPYUSER_SET_ES \
24196 "0: rep; stosl\n" \
24197 " movl %2,%0\n" \
24198 "1: rep; stosb\n" \
24199 "2:\n" \
24200+ __COPYUSER_RESTORE_ES \
24201 ".section .fixup,\"ax\"\n" \
24202 "3: lea 0(%2,%0,4),%0\n" \
24203 " jmp 2b\n" \
24204@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
24205 might_fault();
24206
24207 __asm__ __volatile__(
24208+ __COPYUSER_SET_ES
24209 " testl %0, %0\n"
24210 " jz 3f\n"
24211 " andl %0,%%ecx\n"
24212@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
24213 " subl %%ecx,%0\n"
24214 " addl %0,%%eax\n"
24215 "1:\n"
24216+ __COPYUSER_RESTORE_ES
24217 ".section .fixup,\"ax\"\n"
24218 "2: xorl %%eax,%%eax\n"
24219 " jmp 1b\n"
24220@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
24221
24222 #ifdef CONFIG_X86_INTEL_USERCOPY
24223 static unsigned long
24224-__copy_user_intel(void __user *to, const void *from, unsigned long size)
24225+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
24226 {
24227 int d0, d1;
24228 __asm__ __volatile__(
24229@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24230 " .align 2,0x90\n"
24231 "3: movl 0(%4), %%eax\n"
24232 "4: movl 4(%4), %%edx\n"
24233- "5: movl %%eax, 0(%3)\n"
24234- "6: movl %%edx, 4(%3)\n"
24235+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
24236+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
24237 "7: movl 8(%4), %%eax\n"
24238 "8: movl 12(%4),%%edx\n"
24239- "9: movl %%eax, 8(%3)\n"
24240- "10: movl %%edx, 12(%3)\n"
24241+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
24242+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
24243 "11: movl 16(%4), %%eax\n"
24244 "12: movl 20(%4), %%edx\n"
24245- "13: movl %%eax, 16(%3)\n"
24246- "14: movl %%edx, 20(%3)\n"
24247+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
24248+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
24249 "15: movl 24(%4), %%eax\n"
24250 "16: movl 28(%4), %%edx\n"
24251- "17: movl %%eax, 24(%3)\n"
24252- "18: movl %%edx, 28(%3)\n"
24253+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
24254+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
24255 "19: movl 32(%4), %%eax\n"
24256 "20: movl 36(%4), %%edx\n"
24257- "21: movl %%eax, 32(%3)\n"
24258- "22: movl %%edx, 36(%3)\n"
24259+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
24260+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
24261 "23: movl 40(%4), %%eax\n"
24262 "24: movl 44(%4), %%edx\n"
24263- "25: movl %%eax, 40(%3)\n"
24264- "26: movl %%edx, 44(%3)\n"
24265+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
24266+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
24267 "27: movl 48(%4), %%eax\n"
24268 "28: movl 52(%4), %%edx\n"
24269- "29: movl %%eax, 48(%3)\n"
24270- "30: movl %%edx, 52(%3)\n"
24271+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
24272+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
24273 "31: movl 56(%4), %%eax\n"
24274 "32: movl 60(%4), %%edx\n"
24275- "33: movl %%eax, 56(%3)\n"
24276- "34: movl %%edx, 60(%3)\n"
24277+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
24278+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
24279 " addl $-64, %0\n"
24280 " addl $64, %4\n"
24281 " addl $64, %3\n"
24282@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
24283 " shrl $2, %0\n"
24284 " andl $3, %%eax\n"
24285 " cld\n"
24286+ __COPYUSER_SET_ES
24287 "99: rep; movsl\n"
24288 "36: movl %%eax, %0\n"
24289 "37: rep; movsb\n"
24290 "100:\n"
24291+ __COPYUSER_RESTORE_ES
24292+ ".section .fixup,\"ax\"\n"
24293+ "101: lea 0(%%eax,%0,4),%0\n"
24294+ " jmp 100b\n"
24295+ ".previous\n"
24296+ ".section __ex_table,\"a\"\n"
24297+ " .align 4\n"
24298+ " .long 1b,100b\n"
24299+ " .long 2b,100b\n"
24300+ " .long 3b,100b\n"
24301+ " .long 4b,100b\n"
24302+ " .long 5b,100b\n"
24303+ " .long 6b,100b\n"
24304+ " .long 7b,100b\n"
24305+ " .long 8b,100b\n"
24306+ " .long 9b,100b\n"
24307+ " .long 10b,100b\n"
24308+ " .long 11b,100b\n"
24309+ " .long 12b,100b\n"
24310+ " .long 13b,100b\n"
24311+ " .long 14b,100b\n"
24312+ " .long 15b,100b\n"
24313+ " .long 16b,100b\n"
24314+ " .long 17b,100b\n"
24315+ " .long 18b,100b\n"
24316+ " .long 19b,100b\n"
24317+ " .long 20b,100b\n"
24318+ " .long 21b,100b\n"
24319+ " .long 22b,100b\n"
24320+ " .long 23b,100b\n"
24321+ " .long 24b,100b\n"
24322+ " .long 25b,100b\n"
24323+ " .long 26b,100b\n"
24324+ " .long 27b,100b\n"
24325+ " .long 28b,100b\n"
24326+ " .long 29b,100b\n"
24327+ " .long 30b,100b\n"
24328+ " .long 31b,100b\n"
24329+ " .long 32b,100b\n"
24330+ " .long 33b,100b\n"
24331+ " .long 34b,100b\n"
24332+ " .long 35b,100b\n"
24333+ " .long 36b,100b\n"
24334+ " .long 37b,100b\n"
24335+ " .long 99b,101b\n"
24336+ ".previous"
24337+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
24338+ : "1"(to), "2"(from), "0"(size)
24339+ : "eax", "edx", "memory");
24340+ return size;
24341+}
24342+
24343+static unsigned long
24344+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
24345+{
24346+ int d0, d1;
24347+ __asm__ __volatile__(
24348+ " .align 2,0x90\n"
24349+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
24350+ " cmpl $67, %0\n"
24351+ " jbe 3f\n"
24352+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
24353+ " .align 2,0x90\n"
24354+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
24355+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
24356+ "5: movl %%eax, 0(%3)\n"
24357+ "6: movl %%edx, 4(%3)\n"
24358+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
24359+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
24360+ "9: movl %%eax, 8(%3)\n"
24361+ "10: movl %%edx, 12(%3)\n"
24362+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
24363+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
24364+ "13: movl %%eax, 16(%3)\n"
24365+ "14: movl %%edx, 20(%3)\n"
24366+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
24367+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
24368+ "17: movl %%eax, 24(%3)\n"
24369+ "18: movl %%edx, 28(%3)\n"
24370+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
24371+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
24372+ "21: movl %%eax, 32(%3)\n"
24373+ "22: movl %%edx, 36(%3)\n"
24374+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
24375+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
24376+ "25: movl %%eax, 40(%3)\n"
24377+ "26: movl %%edx, 44(%3)\n"
24378+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
24379+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
24380+ "29: movl %%eax, 48(%3)\n"
24381+ "30: movl %%edx, 52(%3)\n"
24382+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
24383+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
24384+ "33: movl %%eax, 56(%3)\n"
24385+ "34: movl %%edx, 60(%3)\n"
24386+ " addl $-64, %0\n"
24387+ " addl $64, %4\n"
24388+ " addl $64, %3\n"
24389+ " cmpl $63, %0\n"
24390+ " ja 1b\n"
24391+ "35: movl %0, %%eax\n"
24392+ " shrl $2, %0\n"
24393+ " andl $3, %%eax\n"
24394+ " cld\n"
24395+ "99: rep; "__copyuser_seg" movsl\n"
24396+ "36: movl %%eax, %0\n"
24397+ "37: rep; "__copyuser_seg" movsb\n"
24398+ "100:\n"
24399 ".section .fixup,\"ax\"\n"
24400 "101: lea 0(%%eax,%0,4),%0\n"
24401 " jmp 100b\n"
24402@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24403 int d0, d1;
24404 __asm__ __volatile__(
24405 " .align 2,0x90\n"
24406- "0: movl 32(%4), %%eax\n"
24407+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24408 " cmpl $67, %0\n"
24409 " jbe 2f\n"
24410- "1: movl 64(%4), %%eax\n"
24411+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24412 " .align 2,0x90\n"
24413- "2: movl 0(%4), %%eax\n"
24414- "21: movl 4(%4), %%edx\n"
24415+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24416+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24417 " movl %%eax, 0(%3)\n"
24418 " movl %%edx, 4(%3)\n"
24419- "3: movl 8(%4), %%eax\n"
24420- "31: movl 12(%4),%%edx\n"
24421+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24422+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24423 " movl %%eax, 8(%3)\n"
24424 " movl %%edx, 12(%3)\n"
24425- "4: movl 16(%4), %%eax\n"
24426- "41: movl 20(%4), %%edx\n"
24427+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24428+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24429 " movl %%eax, 16(%3)\n"
24430 " movl %%edx, 20(%3)\n"
24431- "10: movl 24(%4), %%eax\n"
24432- "51: movl 28(%4), %%edx\n"
24433+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24434+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24435 " movl %%eax, 24(%3)\n"
24436 " movl %%edx, 28(%3)\n"
24437- "11: movl 32(%4), %%eax\n"
24438- "61: movl 36(%4), %%edx\n"
24439+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24440+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24441 " movl %%eax, 32(%3)\n"
24442 " movl %%edx, 36(%3)\n"
24443- "12: movl 40(%4), %%eax\n"
24444- "71: movl 44(%4), %%edx\n"
24445+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24446+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24447 " movl %%eax, 40(%3)\n"
24448 " movl %%edx, 44(%3)\n"
24449- "13: movl 48(%4), %%eax\n"
24450- "81: movl 52(%4), %%edx\n"
24451+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24452+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24453 " movl %%eax, 48(%3)\n"
24454 " movl %%edx, 52(%3)\n"
24455- "14: movl 56(%4), %%eax\n"
24456- "91: movl 60(%4), %%edx\n"
24457+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24458+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24459 " movl %%eax, 56(%3)\n"
24460 " movl %%edx, 60(%3)\n"
24461 " addl $-64, %0\n"
24462@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
24463 " shrl $2, %0\n"
24464 " andl $3, %%eax\n"
24465 " cld\n"
24466- "6: rep; movsl\n"
24467+ "6: rep; "__copyuser_seg" movsl\n"
24468 " movl %%eax,%0\n"
24469- "7: rep; movsb\n"
24470+ "7: rep; "__copyuser_seg" movsb\n"
24471 "8:\n"
24472 ".section .fixup,\"ax\"\n"
24473 "9: lea 0(%%eax,%0,4),%0\n"
24474@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24475
24476 __asm__ __volatile__(
24477 " .align 2,0x90\n"
24478- "0: movl 32(%4), %%eax\n"
24479+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24480 " cmpl $67, %0\n"
24481 " jbe 2f\n"
24482- "1: movl 64(%4), %%eax\n"
24483+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24484 " .align 2,0x90\n"
24485- "2: movl 0(%4), %%eax\n"
24486- "21: movl 4(%4), %%edx\n"
24487+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24488+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24489 " movnti %%eax, 0(%3)\n"
24490 " movnti %%edx, 4(%3)\n"
24491- "3: movl 8(%4), %%eax\n"
24492- "31: movl 12(%4),%%edx\n"
24493+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24494+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24495 " movnti %%eax, 8(%3)\n"
24496 " movnti %%edx, 12(%3)\n"
24497- "4: movl 16(%4), %%eax\n"
24498- "41: movl 20(%4), %%edx\n"
24499+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24500+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24501 " movnti %%eax, 16(%3)\n"
24502 " movnti %%edx, 20(%3)\n"
24503- "10: movl 24(%4), %%eax\n"
24504- "51: movl 28(%4), %%edx\n"
24505+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24506+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24507 " movnti %%eax, 24(%3)\n"
24508 " movnti %%edx, 28(%3)\n"
24509- "11: movl 32(%4), %%eax\n"
24510- "61: movl 36(%4), %%edx\n"
24511+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24512+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24513 " movnti %%eax, 32(%3)\n"
24514 " movnti %%edx, 36(%3)\n"
24515- "12: movl 40(%4), %%eax\n"
24516- "71: movl 44(%4), %%edx\n"
24517+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24518+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24519 " movnti %%eax, 40(%3)\n"
24520 " movnti %%edx, 44(%3)\n"
24521- "13: movl 48(%4), %%eax\n"
24522- "81: movl 52(%4), %%edx\n"
24523+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24524+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24525 " movnti %%eax, 48(%3)\n"
24526 " movnti %%edx, 52(%3)\n"
24527- "14: movl 56(%4), %%eax\n"
24528- "91: movl 60(%4), %%edx\n"
24529+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24530+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24531 " movnti %%eax, 56(%3)\n"
24532 " movnti %%edx, 60(%3)\n"
24533 " addl $-64, %0\n"
24534@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
24535 " shrl $2, %0\n"
24536 " andl $3, %%eax\n"
24537 " cld\n"
24538- "6: rep; movsl\n"
24539+ "6: rep; "__copyuser_seg" movsl\n"
24540 " movl %%eax,%0\n"
24541- "7: rep; movsb\n"
24542+ "7: rep; "__copyuser_seg" movsb\n"
24543 "8:\n"
24544 ".section .fixup,\"ax\"\n"
24545 "9: lea 0(%%eax,%0,4),%0\n"
24546@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
24547
24548 __asm__ __volatile__(
24549 " .align 2,0x90\n"
24550- "0: movl 32(%4), %%eax\n"
24551+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
24552 " cmpl $67, %0\n"
24553 " jbe 2f\n"
24554- "1: movl 64(%4), %%eax\n"
24555+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
24556 " .align 2,0x90\n"
24557- "2: movl 0(%4), %%eax\n"
24558- "21: movl 4(%4), %%edx\n"
24559+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
24560+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
24561 " movnti %%eax, 0(%3)\n"
24562 " movnti %%edx, 4(%3)\n"
24563- "3: movl 8(%4), %%eax\n"
24564- "31: movl 12(%4),%%edx\n"
24565+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
24566+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
24567 " movnti %%eax, 8(%3)\n"
24568 " movnti %%edx, 12(%3)\n"
24569- "4: movl 16(%4), %%eax\n"
24570- "41: movl 20(%4), %%edx\n"
24571+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
24572+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
24573 " movnti %%eax, 16(%3)\n"
24574 " movnti %%edx, 20(%3)\n"
24575- "10: movl 24(%4), %%eax\n"
24576- "51: movl 28(%4), %%edx\n"
24577+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
24578+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
24579 " movnti %%eax, 24(%3)\n"
24580 " movnti %%edx, 28(%3)\n"
24581- "11: movl 32(%4), %%eax\n"
24582- "61: movl 36(%4), %%edx\n"
24583+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
24584+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
24585 " movnti %%eax, 32(%3)\n"
24586 " movnti %%edx, 36(%3)\n"
24587- "12: movl 40(%4), %%eax\n"
24588- "71: movl 44(%4), %%edx\n"
24589+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
24590+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
24591 " movnti %%eax, 40(%3)\n"
24592 " movnti %%edx, 44(%3)\n"
24593- "13: movl 48(%4), %%eax\n"
24594- "81: movl 52(%4), %%edx\n"
24595+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
24596+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
24597 " movnti %%eax, 48(%3)\n"
24598 " movnti %%edx, 52(%3)\n"
24599- "14: movl 56(%4), %%eax\n"
24600- "91: movl 60(%4), %%edx\n"
24601+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
24602+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
24603 " movnti %%eax, 56(%3)\n"
24604 " movnti %%edx, 60(%3)\n"
24605 " addl $-64, %0\n"
24606@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
24607 " shrl $2, %0\n"
24608 " andl $3, %%eax\n"
24609 " cld\n"
24610- "6: rep; movsl\n"
24611+ "6: rep; "__copyuser_seg" movsl\n"
24612 " movl %%eax,%0\n"
24613- "7: rep; movsb\n"
24614+ "7: rep; "__copyuser_seg" movsb\n"
24615 "8:\n"
24616 ".section .fixup,\"ax\"\n"
24617 "9: lea 0(%%eax,%0,4),%0\n"
24618@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
24619 */
24620 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
24621 unsigned long size);
24622-unsigned long __copy_user_intel(void __user *to, const void *from,
24623+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
24624+ unsigned long size);
24625+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
24626 unsigned long size);
24627 unsigned long __copy_user_zeroing_intel_nocache(void *to,
24628 const void __user *from, unsigned long size);
24629 #endif /* CONFIG_X86_INTEL_USERCOPY */
24630
24631 /* Generic arbitrary sized copy. */
24632-#define __copy_user(to, from, size) \
24633+#define __copy_user(to, from, size, prefix, set, restore) \
24634 do { \
24635 int __d0, __d1, __d2; \
24636 __asm__ __volatile__( \
24637+ set \
24638 " cmp $7,%0\n" \
24639 " jbe 1f\n" \
24640 " movl %1,%0\n" \
24641 " negl %0\n" \
24642 " andl $7,%0\n" \
24643 " subl %0,%3\n" \
24644- "4: rep; movsb\n" \
24645+ "4: rep; "prefix"movsb\n" \
24646 " movl %3,%0\n" \
24647 " shrl $2,%0\n" \
24648 " andl $3,%3\n" \
24649 " .align 2,0x90\n" \
24650- "0: rep; movsl\n" \
24651+ "0: rep; "prefix"movsl\n" \
24652 " movl %3,%0\n" \
24653- "1: rep; movsb\n" \
24654+ "1: rep; "prefix"movsb\n" \
24655 "2:\n" \
24656+ restore \
24657 ".section .fixup,\"ax\"\n" \
24658 "5: addl %3,%0\n" \
24659 " jmp 2b\n" \
24660@@ -682,14 +799,14 @@ do { \
24661 " negl %0\n" \
24662 " andl $7,%0\n" \
24663 " subl %0,%3\n" \
24664- "4: rep; movsb\n" \
24665+ "4: rep; "__copyuser_seg"movsb\n" \
24666 " movl %3,%0\n" \
24667 " shrl $2,%0\n" \
24668 " andl $3,%3\n" \
24669 " .align 2,0x90\n" \
24670- "0: rep; movsl\n" \
24671+ "0: rep; "__copyuser_seg"movsl\n" \
24672 " movl %3,%0\n" \
24673- "1: rep; movsb\n" \
24674+ "1: rep; "__copyuser_seg"movsb\n" \
24675 "2:\n" \
24676 ".section .fixup,\"ax\"\n" \
24677 "5: addl %3,%0\n" \
24678@@ -775,9 +892,9 @@ survive:
24679 }
24680 #endif
24681 if (movsl_is_ok(to, from, n))
24682- __copy_user(to, from, n);
24683+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
24684 else
24685- n = __copy_user_intel(to, from, n);
24686+ n = __generic_copy_to_user_intel(to, from, n);
24687 return n;
24688 }
24689 EXPORT_SYMBOL(__copy_to_user_ll);
24690@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
24691 unsigned long n)
24692 {
24693 if (movsl_is_ok(to, from, n))
24694- __copy_user(to, from, n);
24695+ __copy_user(to, from, n, __copyuser_seg, "", "");
24696 else
24697- n = __copy_user_intel((void __user *)to,
24698- (const void *)from, n);
24699+ n = __generic_copy_from_user_intel(to, from, n);
24700 return n;
24701 }
24702 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
24703@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
24704 if (n > 64 && cpu_has_xmm2)
24705 n = __copy_user_intel_nocache(to, from, n);
24706 else
24707- __copy_user(to, from, n);
24708+ __copy_user(to, from, n, __copyuser_seg, "", "");
24709 #else
24710- __copy_user(to, from, n);
24711+ __copy_user(to, from, n, __copyuser_seg, "", "");
24712 #endif
24713 return n;
24714 }
24715 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
24716
24717-/**
24718- * copy_to_user: - Copy a block of data into user space.
24719- * @to: Destination address, in user space.
24720- * @from: Source address, in kernel space.
24721- * @n: Number of bytes to copy.
24722- *
24723- * Context: User context only. This function may sleep.
24724- *
24725- * Copy data from kernel space to user space.
24726- *
24727- * Returns number of bytes that could not be copied.
24728- * On success, this will be zero.
24729- */
24730-unsigned long
24731-copy_to_user(void __user *to, const void *from, unsigned long n)
24732+#ifdef CONFIG_PAX_MEMORY_UDEREF
24733+void __set_fs(mm_segment_t x)
24734 {
24735- if (access_ok(VERIFY_WRITE, to, n))
24736- n = __copy_to_user(to, from, n);
24737- return n;
24738+ switch (x.seg) {
24739+ case 0:
24740+ loadsegment(gs, 0);
24741+ break;
24742+ case TASK_SIZE_MAX:
24743+ loadsegment(gs, __USER_DS);
24744+ break;
24745+ case -1UL:
24746+ loadsegment(gs, __KERNEL_DS);
24747+ break;
24748+ default:
24749+ BUG();
24750+ }
24751+ return;
24752 }
24753-EXPORT_SYMBOL(copy_to_user);
24754+EXPORT_SYMBOL(__set_fs);
24755
24756-/**
24757- * copy_from_user: - Copy a block of data from user space.
24758- * @to: Destination address, in kernel space.
24759- * @from: Source address, in user space.
24760- * @n: Number of bytes to copy.
24761- *
24762- * Context: User context only. This function may sleep.
24763- *
24764- * Copy data from user space to kernel space.
24765- *
24766- * Returns number of bytes that could not be copied.
24767- * On success, this will be zero.
24768- *
24769- * If some data could not be copied, this function will pad the copied
24770- * data to the requested size using zero bytes.
24771- */
24772-unsigned long
24773-copy_from_user(void *to, const void __user *from, unsigned long n)
24774+void set_fs(mm_segment_t x)
24775 {
24776- if (access_ok(VERIFY_READ, from, n))
24777- n = __copy_from_user(to, from, n);
24778- else
24779- memset(to, 0, n);
24780- return n;
24781+ current_thread_info()->addr_limit = x;
24782+ __set_fs(x);
24783 }
24784-EXPORT_SYMBOL(copy_from_user);
24785+EXPORT_SYMBOL(set_fs);
24786+#endif
24787diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
24788index b7c2849..8633ad8 100644
24789--- a/arch/x86/lib/usercopy_64.c
24790+++ b/arch/x86/lib/usercopy_64.c
24791@@ -42,6 +42,12 @@ long
24792 __strncpy_from_user(char *dst, const char __user *src, long count)
24793 {
24794 long res;
24795+
24796+#ifdef CONFIG_PAX_MEMORY_UDEREF
24797+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
24798+ src += PAX_USER_SHADOW_BASE;
24799+#endif
24800+
24801 __do_strncpy_from_user(dst, src, count, res);
24802 return res;
24803 }
24804@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
24805 {
24806 long __d0;
24807 might_fault();
24808+
24809+#ifdef CONFIG_PAX_MEMORY_UDEREF
24810+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
24811+ addr += PAX_USER_SHADOW_BASE;
24812+#endif
24813+
24814 /* no memory constraint because it doesn't change any memory gcc knows
24815 about */
24816 asm volatile(
24817@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
24818 }
24819 EXPORT_SYMBOL(strlen_user);
24820
24821-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
24822+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
24823 {
24824- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
24825- return copy_user_generic((__force void *)to, (__force void *)from, len);
24826- }
24827- return len;
24828+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
24829+
24830+#ifdef CONFIG_PAX_MEMORY_UDEREF
24831+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
24832+ to += PAX_USER_SHADOW_BASE;
24833+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
24834+ from += PAX_USER_SHADOW_BASE;
24835+#endif
24836+
24837+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
24838+ }
24839+ return len;
24840 }
24841 EXPORT_SYMBOL(copy_in_user);
24842
24843@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
24844 * it is not necessary to optimize tail handling.
24845 */
24846 unsigned long
24847-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
24848+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
24849 {
24850 char c;
24851 unsigned zero_len;
24852diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
24853index 61b41ca..5fef66a 100644
24854--- a/arch/x86/mm/extable.c
24855+++ b/arch/x86/mm/extable.c
24856@@ -1,14 +1,71 @@
24857 #include <linux/module.h>
24858 #include <linux/spinlock.h>
24859+#include <linux/sort.h>
24860 #include <asm/uaccess.h>
24861+#include <asm/pgtable.h>
24862
24863+/*
24864+ * The exception table needs to be sorted so that the binary
24865+ * search that we use to find entries in it works properly.
24866+ * This is used both for the kernel exception table and for
24867+ * the exception tables of modules that get loaded.
24868+ */
24869+static int cmp_ex(const void *a, const void *b)
24870+{
24871+ const struct exception_table_entry *x = a, *y = b;
24872+
24873+ /* avoid overflow */
24874+ if (x->insn > y->insn)
24875+ return 1;
24876+ if (x->insn < y->insn)
24877+ return -1;
24878+ return 0;
24879+}
24880+
24881+static void swap_ex(void *a, void *b, int size)
24882+{
24883+ struct exception_table_entry t, *x = a, *y = b;
24884+
24885+ t = *x;
24886+
24887+ pax_open_kernel();
24888+ *x = *y;
24889+ *y = t;
24890+ pax_close_kernel();
24891+}
24892+
24893+void sort_extable(struct exception_table_entry *start,
24894+ struct exception_table_entry *finish)
24895+{
24896+ sort(start, finish - start, sizeof(struct exception_table_entry),
24897+ cmp_ex, swap_ex);
24898+}
24899+
24900+#ifdef CONFIG_MODULES
24901+/*
24902+ * If the exception table is sorted, any referring to the module init
24903+ * will be at the beginning or the end.
24904+ */
24905+void trim_init_extable(struct module *m)
24906+{
24907+ /*trim the beginning*/
24908+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
24909+ m->extable++;
24910+ m->num_exentries--;
24911+ }
24912+ /*trim the end*/
24913+ while (m->num_exentries &&
24914+ within_module_init(m->extable[m->num_exentries-1].insn, m))
24915+ m->num_exentries--;
24916+}
24917+#endif /* CONFIG_MODULES */
24918
24919 int fixup_exception(struct pt_regs *regs)
24920 {
24921 const struct exception_table_entry *fixup;
24922
24923 #ifdef CONFIG_PNPBIOS
24924- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
24925+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
24926 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
24927 extern u32 pnp_bios_is_utter_crap;
24928 pnp_bios_is_utter_crap = 1;
24929diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
24930index 8ac0d76..ca501e2 100644
24931--- a/arch/x86/mm/fault.c
24932+++ b/arch/x86/mm/fault.c
24933@@ -11,10 +11,19 @@
24934 #include <linux/kprobes.h> /* __kprobes, ... */
24935 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
24936 #include <linux/perf_event.h> /* perf_sw_event */
24937+#include <linux/unistd.h>
24938+#include <linux/compiler.h>
24939
24940 #include <asm/traps.h> /* dotraplinkage, ... */
24941 #include <asm/pgalloc.h> /* pgd_*(), ... */
24942 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
24943+#include <asm/vsyscall.h>
24944+#include <asm/tlbflush.h>
24945+
24946+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24947+#include <asm/stacktrace.h>
24948+#include "../kernel/dumpstack.h"
24949+#endif
24950
24951 /*
24952 * Page fault error code bits:
24953@@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
24954 int ret = 0;
24955
24956 /* kprobe_running() needs smp_processor_id() */
24957- if (kprobes_built_in() && !user_mode_vm(regs)) {
24958+ if (kprobes_built_in() && !user_mode(regs)) {
24959 preempt_disable();
24960 if (kprobe_running() && kprobe_fault_handler(regs, 14))
24961 ret = 1;
24962@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
24963 return !instr_lo || (instr_lo>>1) == 1;
24964 case 0x00:
24965 /* Prefetch instruction is 0x0F0D or 0x0F18 */
24966- if (probe_kernel_address(instr, opcode))
24967+ if (user_mode(regs)) {
24968+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24969+ return 0;
24970+ } else if (probe_kernel_address(instr, opcode))
24971 return 0;
24972
24973 *prefetch = (instr_lo == 0xF) &&
24974@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
24975 while (instr < max_instr) {
24976 unsigned char opcode;
24977
24978- if (probe_kernel_address(instr, opcode))
24979+ if (user_mode(regs)) {
24980+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
24981+ break;
24982+ } else if (probe_kernel_address(instr, opcode))
24983 break;
24984
24985 instr++;
24986@@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
24987 force_sig_info(si_signo, &info, tsk);
24988 }
24989
24990+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24991+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
24992+#endif
24993+
24994+#ifdef CONFIG_PAX_EMUTRAMP
24995+static int pax_handle_fetch_fault(struct pt_regs *regs);
24996+#endif
24997+
24998+#ifdef CONFIG_PAX_PAGEEXEC
24999+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
25000+{
25001+ pgd_t *pgd;
25002+ pud_t *pud;
25003+ pmd_t *pmd;
25004+
25005+ pgd = pgd_offset(mm, address);
25006+ if (!pgd_present(*pgd))
25007+ return NULL;
25008+ pud = pud_offset(pgd, address);
25009+ if (!pud_present(*pud))
25010+ return NULL;
25011+ pmd = pmd_offset(pud, address);
25012+ if (!pmd_present(*pmd))
25013+ return NULL;
25014+ return pmd;
25015+}
25016+#endif
25017+
25018 DEFINE_SPINLOCK(pgd_lock);
25019 LIST_HEAD(pgd_list);
25020
25021@@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
25022 address += PMD_SIZE) {
25023
25024 unsigned long flags;
25025+
25026+#ifdef CONFIG_PAX_PER_CPU_PGD
25027+ unsigned long cpu;
25028+#else
25029 struct page *page;
25030+#endif
25031
25032 spin_lock_irqsave(&pgd_lock, flags);
25033+
25034+#ifdef CONFIG_PAX_PER_CPU_PGD
25035+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25036+ pgd_t *pgd = get_cpu_pgd(cpu);
25037+#else
25038 list_for_each_entry(page, &pgd_list, lru) {
25039- if (!vmalloc_sync_one(page_address(page), address))
25040+ pgd_t *pgd = page_address(page);
25041+#endif
25042+
25043+ if (!vmalloc_sync_one(pgd, address))
25044 break;
25045 }
25046 spin_unlock_irqrestore(&pgd_lock, flags);
25047@@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
25048 * an interrupt in the middle of a task switch..
25049 */
25050 pgd_paddr = read_cr3();
25051+
25052+#ifdef CONFIG_PAX_PER_CPU_PGD
25053+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
25054+#endif
25055+
25056 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
25057 if (!pmd_k)
25058 return -1;
25059@@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
25060
25061 const pgd_t *pgd_ref = pgd_offset_k(address);
25062 unsigned long flags;
25063+
25064+#ifdef CONFIG_PAX_PER_CPU_PGD
25065+ unsigned long cpu;
25066+#else
25067 struct page *page;
25068+#endif
25069
25070 if (pgd_none(*pgd_ref))
25071 continue;
25072
25073 spin_lock_irqsave(&pgd_lock, flags);
25074+
25075+#ifdef CONFIG_PAX_PER_CPU_PGD
25076+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25077+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
25078+#else
25079 list_for_each_entry(page, &pgd_list, lru) {
25080 pgd_t *pgd;
25081 pgd = (pgd_t *)page_address(page) + pgd_index(address);
25082+#endif
25083+
25084 if (pgd_none(*pgd))
25085 set_pgd(pgd, *pgd_ref);
25086 else
25087@@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
25088 * happen within a race in page table update. In the later
25089 * case just flush:
25090 */
25091+
25092+#ifdef CONFIG_PAX_PER_CPU_PGD
25093+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
25094+ pgd = pgd_offset_cpu(smp_processor_id(), address);
25095+#else
25096 pgd = pgd_offset(current->active_mm, address);
25097+#endif
25098+
25099 pgd_ref = pgd_offset_k(address);
25100 if (pgd_none(*pgd_ref))
25101 return -1;
25102@@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
25103 static int is_errata100(struct pt_regs *regs, unsigned long address)
25104 {
25105 #ifdef CONFIG_X86_64
25106- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
25107+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
25108 return 1;
25109 #endif
25110 return 0;
25111@@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
25112 }
25113
25114 static const char nx_warning[] = KERN_CRIT
25115-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
25116+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
25117
25118 static void
25119 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25120@@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
25121 if (!oops_may_print())
25122 return;
25123
25124- if (error_code & PF_INSTR) {
25125+ if (nx_enabled && (error_code & PF_INSTR)) {
25126 unsigned int level;
25127
25128 pte_t *pte = lookup_address(address, &level);
25129
25130 if (pte && pte_present(*pte) && !pte_exec(*pte))
25131- printk(nx_warning, current_uid());
25132+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
25133 }
25134
25135+#ifdef CONFIG_PAX_KERNEXEC
25136+ if (init_mm.start_code <= address && address < init_mm.end_code) {
25137+ if (current->signal->curr_ip)
25138+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25139+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
25140+ else
25141+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
25142+ current->comm, task_pid_nr(current), current_uid(), current_euid());
25143+ }
25144+#endif
25145+
25146 printk(KERN_ALERT "BUG: unable to handle kernel ");
25147 if (address < PAGE_SIZE)
25148 printk(KERN_CONT "NULL pointer dereference");
25149@@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
25150 {
25151 struct task_struct *tsk = current;
25152
25153+#ifdef CONFIG_X86_64
25154+ struct mm_struct *mm = tsk->mm;
25155+
25156+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
25157+ if (regs->ip == (unsigned long)vgettimeofday) {
25158+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
25159+ return;
25160+ } else if (regs->ip == (unsigned long)vtime) {
25161+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
25162+ return;
25163+ } else if (regs->ip == (unsigned long)vgetcpu) {
25164+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
25165+ return;
25166+ }
25167+ }
25168+#endif
25169+
25170 /* User mode accesses just cause a SIGSEGV */
25171 if (error_code & PF_USER) {
25172 /*
25173@@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
25174 if (is_errata100(regs, address))
25175 return;
25176
25177+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25178+ if (pax_is_fetch_fault(regs, error_code, address)) {
25179+
25180+#ifdef CONFIG_PAX_EMUTRAMP
25181+ switch (pax_handle_fetch_fault(regs)) {
25182+ case 2:
25183+ return;
25184+ }
25185+#endif
25186+
25187+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25188+ do_group_exit(SIGKILL);
25189+ }
25190+#endif
25191+
25192 if (unlikely(show_unhandled_signals))
25193 show_signal_msg(regs, error_code, address, tsk);
25194
25195@@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
25196 if (fault & VM_FAULT_HWPOISON) {
25197 printk(KERN_ERR
25198 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
25199- tsk->comm, tsk->pid, address);
25200+ tsk->comm, task_pid_nr(tsk), address);
25201 code = BUS_MCEERR_AR;
25202 }
25203 #endif
25204@@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
25205 return 1;
25206 }
25207
25208+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25209+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
25210+{
25211+ pte_t *pte;
25212+ pmd_t *pmd;
25213+ spinlock_t *ptl;
25214+ unsigned char pte_mask;
25215+
25216+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
25217+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
25218+ return 0;
25219+
25220+ /* PaX: it's our fault, let's handle it if we can */
25221+
25222+ /* PaX: take a look at read faults before acquiring any locks */
25223+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
25224+ /* instruction fetch attempt from a protected page in user mode */
25225+ up_read(&mm->mmap_sem);
25226+
25227+#ifdef CONFIG_PAX_EMUTRAMP
25228+ switch (pax_handle_fetch_fault(regs)) {
25229+ case 2:
25230+ return 1;
25231+ }
25232+#endif
25233+
25234+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
25235+ do_group_exit(SIGKILL);
25236+ }
25237+
25238+ pmd = pax_get_pmd(mm, address);
25239+ if (unlikely(!pmd))
25240+ return 0;
25241+
25242+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
25243+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
25244+ pte_unmap_unlock(pte, ptl);
25245+ return 0;
25246+ }
25247+
25248+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
25249+ /* write attempt to a protected page in user mode */
25250+ pte_unmap_unlock(pte, ptl);
25251+ return 0;
25252+ }
25253+
25254+#ifdef CONFIG_SMP
25255+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
25256+#else
25257+ if (likely(address > get_limit(regs->cs)))
25258+#endif
25259+ {
25260+ set_pte(pte, pte_mkread(*pte));
25261+ __flush_tlb_one(address);
25262+ pte_unmap_unlock(pte, ptl);
25263+ up_read(&mm->mmap_sem);
25264+ return 1;
25265+ }
25266+
25267+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
25268+
25269+ /*
25270+ * PaX: fill DTLB with user rights and retry
25271+ */
25272+ __asm__ __volatile__ (
25273+ "orb %2,(%1)\n"
25274+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
25275+/*
25276+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
25277+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
25278+ * page fault when examined during a TLB load attempt. this is true not only
25279+ * for PTEs holding a non-present entry but also present entries that will
25280+ * raise a page fault (such as those set up by PaX, or the copy-on-write
25281+ * mechanism). in effect it means that we do *not* need to flush the TLBs
25282+ * for our target pages since their PTEs are simply not in the TLBs at all.
25283+
25284+ * the best thing in omitting it is that we gain around 15-20% speed in the
25285+ * fast path of the page fault handler and can get rid of tracing since we
25286+ * can no longer flush unintended entries.
25287+ */
25288+ "invlpg (%0)\n"
25289+#endif
25290+ __copyuser_seg"testb $0,(%0)\n"
25291+ "xorb %3,(%1)\n"
25292+ :
25293+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
25294+ : "memory", "cc");
25295+ pte_unmap_unlock(pte, ptl);
25296+ up_read(&mm->mmap_sem);
25297+ return 1;
25298+}
25299+#endif
25300+
25301 /*
25302 * Handle a spurious fault caused by a stale TLB entry.
25303 *
25304@@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
25305 static inline int
25306 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
25307 {
25308+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
25309+ return 1;
25310+
25311 if (write) {
25312 /* write, present and write, not present: */
25313 if (unlikely(!(vma->vm_flags & VM_WRITE)))
25314@@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25315 {
25316 struct vm_area_struct *vma;
25317 struct task_struct *tsk;
25318- unsigned long address;
25319 struct mm_struct *mm;
25320 int write;
25321 int fault;
25322
25323- tsk = current;
25324- mm = tsk->mm;
25325-
25326 /* Get the faulting address: */
25327- address = read_cr2();
25328+ unsigned long address = read_cr2();
25329+
25330+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25331+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
25332+ if (!search_exception_tables(regs->ip)) {
25333+ bad_area_nosemaphore(regs, error_code, address);
25334+ return;
25335+ }
25336+ if (address < PAX_USER_SHADOW_BASE) {
25337+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
25338+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
25339+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
25340+ } else
25341+ address -= PAX_USER_SHADOW_BASE;
25342+ }
25343+#endif
25344+
25345+ tsk = current;
25346+ mm = tsk->mm;
25347
25348 /*
25349 * Detect and handle instructions that would cause a page fault for
25350@@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25351 * User-mode registers count as a user access even for any
25352 * potential system fault or CPU buglet:
25353 */
25354- if (user_mode_vm(regs)) {
25355+ if (user_mode(regs)) {
25356 local_irq_enable();
25357 error_code |= PF_USER;
25358 } else {
25359@@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25360 might_sleep();
25361 }
25362
25363+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
25364+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
25365+ return;
25366+#endif
25367+
25368 vma = find_vma(mm, address);
25369 if (unlikely(!vma)) {
25370 bad_area(regs, error_code, address);
25371@@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
25372 bad_area(regs, error_code, address);
25373 return;
25374 }
25375- if (error_code & PF_USER) {
25376- /*
25377- * Accessing the stack below %sp is always a bug.
25378- * The large cushion allows instructions like enter
25379- * and pusha to work. ("enter $65535, $31" pushes
25380- * 32 pointers and then decrements %sp by 65535.)
25381- */
25382- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
25383- bad_area(regs, error_code, address);
25384- return;
25385- }
25386+ /*
25387+ * Accessing the stack below %sp is always a bug.
25388+ * The large cushion allows instructions like enter
25389+ * and pusha to work. ("enter $65535, $31" pushes
25390+ * 32 pointers and then decrements %sp by 65535.)
25391+ */
25392+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
25393+ bad_area(regs, error_code, address);
25394+ return;
25395 }
25396+
25397+#ifdef CONFIG_PAX_SEGMEXEC
25398+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
25399+ bad_area(regs, error_code, address);
25400+ return;
25401+ }
25402+#endif
25403+
25404 if (unlikely(expand_stack(vma, address))) {
25405 bad_area(regs, error_code, address);
25406 return;
25407@@ -1146,3 +1390,292 @@ good_area:
25408
25409 up_read(&mm->mmap_sem);
25410 }
25411+
25412+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25413+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
25414+{
25415+ struct mm_struct *mm = current->mm;
25416+ unsigned long ip = regs->ip;
25417+
25418+ if (v8086_mode(regs))
25419+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
25420+
25421+#ifdef CONFIG_PAX_PAGEEXEC
25422+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
25423+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
25424+ return true;
25425+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
25426+ return true;
25427+ return false;
25428+ }
25429+#endif
25430+
25431+#ifdef CONFIG_PAX_SEGMEXEC
25432+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
25433+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
25434+ return true;
25435+ return false;
25436+ }
25437+#endif
25438+
25439+ return false;
25440+}
25441+#endif
25442+
25443+#ifdef CONFIG_PAX_EMUTRAMP
25444+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
25445+{
25446+ int err;
25447+
25448+ do { /* PaX: libffi trampoline emulation */
25449+ unsigned char mov, jmp;
25450+ unsigned int addr1, addr2;
25451+
25452+#ifdef CONFIG_X86_64
25453+ if ((regs->ip + 9) >> 32)
25454+ break;
25455+#endif
25456+
25457+ err = get_user(mov, (unsigned char __user *)regs->ip);
25458+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25459+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25460+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25461+
25462+ if (err)
25463+ break;
25464+
25465+ if (mov == 0xB8 && jmp == 0xE9) {
25466+ regs->ax = addr1;
25467+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25468+ return 2;
25469+ }
25470+ } while (0);
25471+
25472+ do { /* PaX: gcc trampoline emulation #1 */
25473+ unsigned char mov1, mov2;
25474+ unsigned short jmp;
25475+ unsigned int addr1, addr2;
25476+
25477+#ifdef CONFIG_X86_64
25478+ if ((regs->ip + 11) >> 32)
25479+ break;
25480+#endif
25481+
25482+ err = get_user(mov1, (unsigned char __user *)regs->ip);
25483+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25484+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
25485+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25486+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
25487+
25488+ if (err)
25489+ break;
25490+
25491+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
25492+ regs->cx = addr1;
25493+ regs->ax = addr2;
25494+ regs->ip = addr2;
25495+ return 2;
25496+ }
25497+ } while (0);
25498+
25499+ do { /* PaX: gcc trampoline emulation #2 */
25500+ unsigned char mov, jmp;
25501+ unsigned int addr1, addr2;
25502+
25503+#ifdef CONFIG_X86_64
25504+ if ((regs->ip + 9) >> 32)
25505+ break;
25506+#endif
25507+
25508+ err = get_user(mov, (unsigned char __user *)regs->ip);
25509+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
25510+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
25511+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
25512+
25513+ if (err)
25514+ break;
25515+
25516+ if (mov == 0xB9 && jmp == 0xE9) {
25517+ regs->cx = addr1;
25518+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
25519+ return 2;
25520+ }
25521+ } while (0);
25522+
25523+ return 1; /* PaX in action */
25524+}
25525+
25526+#ifdef CONFIG_X86_64
25527+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
25528+{
25529+ int err;
25530+
25531+ do { /* PaX: libffi trampoline emulation */
25532+ unsigned short mov1, mov2, jmp1;
25533+ unsigned char stcclc, jmp2;
25534+ unsigned long addr1, addr2;
25535+
25536+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25537+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25538+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25539+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25540+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
25541+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
25542+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
25543+
25544+ if (err)
25545+ break;
25546+
25547+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25548+ regs->r11 = addr1;
25549+ regs->r10 = addr2;
25550+ if (stcclc == 0xF8)
25551+ regs->flags &= ~X86_EFLAGS_CF;
25552+ else
25553+ regs->flags |= X86_EFLAGS_CF;
25554+ regs->ip = addr1;
25555+ return 2;
25556+ }
25557+ } while (0);
25558+
25559+ do { /* PaX: gcc trampoline emulation #1 */
25560+ unsigned short mov1, mov2, jmp1;
25561+ unsigned char jmp2;
25562+ unsigned int addr1;
25563+ unsigned long addr2;
25564+
25565+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25566+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
25567+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
25568+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
25569+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
25570+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
25571+
25572+ if (err)
25573+ break;
25574+
25575+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25576+ regs->r11 = addr1;
25577+ regs->r10 = addr2;
25578+ regs->ip = addr1;
25579+ return 2;
25580+ }
25581+ } while (0);
25582+
25583+ do { /* PaX: gcc trampoline emulation #2 */
25584+ unsigned short mov1, mov2, jmp1;
25585+ unsigned char jmp2;
25586+ unsigned long addr1, addr2;
25587+
25588+ err = get_user(mov1, (unsigned short __user *)regs->ip);
25589+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
25590+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
25591+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
25592+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
25593+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
25594+
25595+ if (err)
25596+ break;
25597+
25598+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
25599+ regs->r11 = addr1;
25600+ regs->r10 = addr2;
25601+ regs->ip = addr1;
25602+ return 2;
25603+ }
25604+ } while (0);
25605+
25606+ return 1; /* PaX in action */
25607+}
25608+#endif
25609+
25610+/*
25611+ * PaX: decide what to do with offenders (regs->ip = fault address)
25612+ *
25613+ * returns 1 when task should be killed
25614+ * 2 when gcc trampoline was detected
25615+ */
25616+static int pax_handle_fetch_fault(struct pt_regs *regs)
25617+{
25618+ if (v8086_mode(regs))
25619+ return 1;
25620+
25621+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
25622+ return 1;
25623+
25624+#ifdef CONFIG_X86_32
25625+ return pax_handle_fetch_fault_32(regs);
25626+#else
25627+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
25628+ return pax_handle_fetch_fault_32(regs);
25629+ else
25630+ return pax_handle_fetch_fault_64(regs);
25631+#endif
25632+}
25633+#endif
25634+
25635+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25636+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
25637+{
25638+ long i;
25639+
25640+ printk(KERN_ERR "PAX: bytes at PC: ");
25641+ for (i = 0; i < 20; i++) {
25642+ unsigned char c;
25643+ if (get_user(c, (unsigned char __force_user *)pc+i))
25644+ printk(KERN_CONT "?? ");
25645+ else
25646+ printk(KERN_CONT "%02x ", c);
25647+ }
25648+ printk("\n");
25649+
25650+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
25651+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
25652+ unsigned long c;
25653+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
25654+#ifdef CONFIG_X86_32
25655+ printk(KERN_CONT "???????? ");
25656+#else
25657+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
25658+ printk(KERN_CONT "???????? ???????? ");
25659+ else
25660+ printk(KERN_CONT "???????????????? ");
25661+#endif
25662+ } else {
25663+#ifdef CONFIG_X86_64
25664+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
25665+ printk(KERN_CONT "%08x ", (unsigned int)c);
25666+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
25667+ } else
25668+#endif
25669+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
25670+ }
25671+ }
25672+ printk("\n");
25673+}
25674+#endif
25675+
25676+/**
25677+ * probe_kernel_write(): safely attempt to write to a location
25678+ * @dst: address to write to
25679+ * @src: pointer to the data that shall be written
25680+ * @size: size of the data chunk
25681+ *
25682+ * Safely write to address @dst from the buffer at @src. If a kernel fault
25683+ * happens, handle that and return -EFAULT.
25684+ */
25685+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
25686+{
25687+ long ret;
25688+ mm_segment_t old_fs = get_fs();
25689+
25690+ set_fs(KERNEL_DS);
25691+ pagefault_disable();
25692+ pax_open_kernel();
25693+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
25694+ pax_close_kernel();
25695+ pagefault_enable();
25696+ set_fs(old_fs);
25697+
25698+ return ret ? -EFAULT : 0;
25699+}
25700diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
25701index 71da1bc..7a16bf4 100644
25702--- a/arch/x86/mm/gup.c
25703+++ b/arch/x86/mm/gup.c
25704@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
25705 addr = start;
25706 len = (unsigned long) nr_pages << PAGE_SHIFT;
25707 end = start + len;
25708- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
25709+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
25710 (void __user *)start, len)))
25711 return 0;
25712
25713diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
25714index 63a6ba6..79abd7a 100644
25715--- a/arch/x86/mm/highmem_32.c
25716+++ b/arch/x86/mm/highmem_32.c
25717@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
25718 idx = type + KM_TYPE_NR*smp_processor_id();
25719 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25720 BUG_ON(!pte_none(*(kmap_pte-idx)));
25721+
25722+ pax_open_kernel();
25723 set_pte(kmap_pte-idx, mk_pte(page, prot));
25724+ pax_close_kernel();
25725
25726 return (void *)vaddr;
25727 }
25728diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
25729index f46c3407..6ff9a26 100644
25730--- a/arch/x86/mm/hugetlbpage.c
25731+++ b/arch/x86/mm/hugetlbpage.c
25732@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
25733 struct hstate *h = hstate_file(file);
25734 struct mm_struct *mm = current->mm;
25735 struct vm_area_struct *vma;
25736- unsigned long start_addr;
25737+ unsigned long start_addr, pax_task_size = TASK_SIZE;
25738+
25739+#ifdef CONFIG_PAX_SEGMEXEC
25740+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25741+ pax_task_size = SEGMEXEC_TASK_SIZE;
25742+#endif
25743+
25744+ pax_task_size -= PAGE_SIZE;
25745
25746 if (len > mm->cached_hole_size) {
25747- start_addr = mm->free_area_cache;
25748+ start_addr = mm->free_area_cache;
25749 } else {
25750- start_addr = TASK_UNMAPPED_BASE;
25751- mm->cached_hole_size = 0;
25752+ start_addr = mm->mmap_base;
25753+ mm->cached_hole_size = 0;
25754 }
25755
25756 full_search:
25757@@ -281,26 +288,27 @@ full_search:
25758
25759 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
25760 /* At this point: (!vma || addr < vma->vm_end). */
25761- if (TASK_SIZE - len < addr) {
25762+ if (pax_task_size - len < addr) {
25763 /*
25764 * Start a new search - just in case we missed
25765 * some holes.
25766 */
25767- if (start_addr != TASK_UNMAPPED_BASE) {
25768- start_addr = TASK_UNMAPPED_BASE;
25769+ if (start_addr != mm->mmap_base) {
25770+ start_addr = mm->mmap_base;
25771 mm->cached_hole_size = 0;
25772 goto full_search;
25773 }
25774 return -ENOMEM;
25775 }
25776- if (!vma || addr + len <= vma->vm_start) {
25777- mm->free_area_cache = addr + len;
25778- return addr;
25779- }
25780+ if (check_heap_stack_gap(vma, addr, len))
25781+ break;
25782 if (addr + mm->cached_hole_size < vma->vm_start)
25783 mm->cached_hole_size = vma->vm_start - addr;
25784 addr = ALIGN(vma->vm_end, huge_page_size(h));
25785 }
25786+
25787+ mm->free_area_cache = addr + len;
25788+ return addr;
25789 }
25790
25791 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25792@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25793 {
25794 struct hstate *h = hstate_file(file);
25795 struct mm_struct *mm = current->mm;
25796- struct vm_area_struct *vma, *prev_vma;
25797- unsigned long base = mm->mmap_base, addr = addr0;
25798+ struct vm_area_struct *vma;
25799+ unsigned long base = mm->mmap_base, addr;
25800 unsigned long largest_hole = mm->cached_hole_size;
25801- int first_time = 1;
25802
25803 /* don't allow allocations above current base */
25804 if (mm->free_area_cache > base)
25805@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
25806 largest_hole = 0;
25807 mm->free_area_cache = base;
25808 }
25809-try_again:
25810+
25811 /* make sure it can fit in the remaining address space */
25812 if (mm->free_area_cache < len)
25813 goto fail;
25814
25815 /* either no address requested or cant fit in requested address hole */
25816- addr = (mm->free_area_cache - len) & huge_page_mask(h);
25817+ addr = (mm->free_area_cache - len);
25818 do {
25819+ addr &= huge_page_mask(h);
25820+ vma = find_vma(mm, addr);
25821 /*
25822 * Lookup failure means no vma is above this address,
25823 * i.e. return with success:
25824- */
25825- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
25826- return addr;
25827-
25828- /*
25829 * new region fits between prev_vma->vm_end and
25830 * vma->vm_start, use it:
25831 */
25832- if (addr + len <= vma->vm_start &&
25833- (!prev_vma || (addr >= prev_vma->vm_end))) {
25834+ if (check_heap_stack_gap(vma, addr, len)) {
25835 /* remember the address as a hint for next time */
25836- mm->cached_hole_size = largest_hole;
25837- return (mm->free_area_cache = addr);
25838- } else {
25839- /* pull free_area_cache down to the first hole */
25840- if (mm->free_area_cache == vma->vm_end) {
25841- mm->free_area_cache = vma->vm_start;
25842- mm->cached_hole_size = largest_hole;
25843- }
25844+ mm->cached_hole_size = largest_hole;
25845+ return (mm->free_area_cache = addr);
25846+ }
25847+ /* pull free_area_cache down to the first hole */
25848+ if (mm->free_area_cache == vma->vm_end) {
25849+ mm->free_area_cache = vma->vm_start;
25850+ mm->cached_hole_size = largest_hole;
25851 }
25852
25853 /* remember the largest hole we saw so far */
25854 if (addr + largest_hole < vma->vm_start)
25855- largest_hole = vma->vm_start - addr;
25856+ largest_hole = vma->vm_start - addr;
25857
25858 /* try just below the current vma->vm_start */
25859- addr = (vma->vm_start - len) & huge_page_mask(h);
25860- } while (len <= vma->vm_start);
25861+ addr = skip_heap_stack_gap(vma, len);
25862+ } while (!IS_ERR_VALUE(addr));
25863
25864 fail:
25865 /*
25866- * if hint left us with no space for the requested
25867- * mapping then try again:
25868- */
25869- if (first_time) {
25870- mm->free_area_cache = base;
25871- largest_hole = 0;
25872- first_time = 0;
25873- goto try_again;
25874- }
25875- /*
25876 * A failed mmap() very likely causes application failure,
25877 * so fall back to the bottom-up function here. This scenario
25878 * can happen with large stack limits and large mmap()
25879 * allocations.
25880 */
25881- mm->free_area_cache = TASK_UNMAPPED_BASE;
25882+
25883+#ifdef CONFIG_PAX_SEGMEXEC
25884+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25885+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
25886+ else
25887+#endif
25888+
25889+ mm->mmap_base = TASK_UNMAPPED_BASE;
25890+
25891+#ifdef CONFIG_PAX_RANDMMAP
25892+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25893+ mm->mmap_base += mm->delta_mmap;
25894+#endif
25895+
25896+ mm->free_area_cache = mm->mmap_base;
25897 mm->cached_hole_size = ~0UL;
25898 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
25899 len, pgoff, flags);
25900@@ -387,6 +393,7 @@ fail:
25901 /*
25902 * Restore the topdown base:
25903 */
25904+ mm->mmap_base = base;
25905 mm->free_area_cache = base;
25906 mm->cached_hole_size = ~0UL;
25907
25908@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25909 struct hstate *h = hstate_file(file);
25910 struct mm_struct *mm = current->mm;
25911 struct vm_area_struct *vma;
25912+ unsigned long pax_task_size = TASK_SIZE;
25913
25914 if (len & ~huge_page_mask(h))
25915 return -EINVAL;
25916- if (len > TASK_SIZE)
25917+
25918+#ifdef CONFIG_PAX_SEGMEXEC
25919+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25920+ pax_task_size = SEGMEXEC_TASK_SIZE;
25921+#endif
25922+
25923+ pax_task_size -= PAGE_SIZE;
25924+
25925+ if (len > pax_task_size)
25926 return -ENOMEM;
25927
25928 if (flags & MAP_FIXED) {
25929@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
25930 if (addr) {
25931 addr = ALIGN(addr, huge_page_size(h));
25932 vma = find_vma(mm, addr);
25933- if (TASK_SIZE - len >= addr &&
25934- (!vma || addr + len <= vma->vm_start))
25935+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
25936 return addr;
25937 }
25938 if (mm->get_unmapped_area == arch_get_unmapped_area)
25939diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
25940index 73ffd55..f61c2a7 100644
25941--- a/arch/x86/mm/init.c
25942+++ b/arch/x86/mm/init.c
25943@@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
25944 * cause a hotspot and fill up ZONE_DMA. The page tables
25945 * need roughly 0.5KB per GB.
25946 */
25947-#ifdef CONFIG_X86_32
25948- start = 0x7000;
25949-#else
25950- start = 0x8000;
25951-#endif
25952+ start = 0x100000;
25953 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
25954 tables, PAGE_SIZE);
25955 if (e820_table_start == -1UL)
25956@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25957 #endif
25958
25959 set_nx();
25960- if (nx_enabled)
25961+ if (nx_enabled && cpu_has_nx)
25962 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
25963
25964 /* Enable PSE if available */
25965@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
25966 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
25967 * mmio resources as well as potential bios/acpi data regions.
25968 */
25969+
25970 int devmem_is_allowed(unsigned long pagenr)
25971 {
25972+#ifdef CONFIG_GRKERNSEC_KMEM
25973+ /* allow BDA */
25974+ if (!pagenr)
25975+ return 1;
25976+ /* allow EBDA */
25977+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
25978+ return 1;
25979+ /* allow ISA/video mem */
25980+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25981+ return 1;
25982+ /* throw out everything else below 1MB */
25983+ if (pagenr <= 256)
25984+ return 0;
25985+#else
25986 if (pagenr <= 256)
25987 return 1;
25988+#endif
25989+
25990 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
25991 return 0;
25992 if (!page_is_ram(pagenr))
25993@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
25994
25995 void free_initmem(void)
25996 {
25997+
25998+#ifdef CONFIG_PAX_KERNEXEC
25999+#ifdef CONFIG_X86_32
26000+ /* PaX: limit KERNEL_CS to actual size */
26001+ unsigned long addr, limit;
26002+ struct desc_struct d;
26003+ int cpu;
26004+
26005+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
26006+ limit = (limit - 1UL) >> PAGE_SHIFT;
26007+
26008+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
26009+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26010+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
26011+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
26012+ }
26013+
26014+ /* PaX: make KERNEL_CS read-only */
26015+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
26016+ if (!paravirt_enabled())
26017+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
26018+/*
26019+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
26020+ pgd = pgd_offset_k(addr);
26021+ pud = pud_offset(pgd, addr);
26022+ pmd = pmd_offset(pud, addr);
26023+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26024+ }
26025+*/
26026+#ifdef CONFIG_X86_PAE
26027+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
26028+/*
26029+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
26030+ pgd = pgd_offset_k(addr);
26031+ pud = pud_offset(pgd, addr);
26032+ pmd = pmd_offset(pud, addr);
26033+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26034+ }
26035+*/
26036+#endif
26037+
26038+#ifdef CONFIG_MODULES
26039+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
26040+#endif
26041+
26042+#else
26043+ pgd_t *pgd;
26044+ pud_t *pud;
26045+ pmd_t *pmd;
26046+ unsigned long addr, end;
26047+
26048+ /* PaX: make kernel code/rodata read-only, rest non-executable */
26049+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
26050+ pgd = pgd_offset_k(addr);
26051+ pud = pud_offset(pgd, addr);
26052+ pmd = pmd_offset(pud, addr);
26053+ if (!pmd_present(*pmd))
26054+ continue;
26055+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
26056+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26057+ else
26058+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
26059+ }
26060+
26061+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
26062+ end = addr + KERNEL_IMAGE_SIZE;
26063+ for (; addr < end; addr += PMD_SIZE) {
26064+ pgd = pgd_offset_k(addr);
26065+ pud = pud_offset(pgd, addr);
26066+ pmd = pmd_offset(pud, addr);
26067+ if (!pmd_present(*pmd))
26068+ continue;
26069+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
26070+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
26071+ }
26072+#endif
26073+
26074+ flush_tlb_all();
26075+#endif
26076+
26077 free_init_pages("unused kernel memory",
26078 (unsigned long)(&__init_begin),
26079 (unsigned long)(&__init_end));
26080diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
26081index 30938c1..bda3d5d 100644
26082--- a/arch/x86/mm/init_32.c
26083+++ b/arch/x86/mm/init_32.c
26084@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
26085 }
26086
26087 /*
26088- * Creates a middle page table and puts a pointer to it in the
26089- * given global directory entry. This only returns the gd entry
26090- * in non-PAE compilation mode, since the middle layer is folded.
26091- */
26092-static pmd_t * __init one_md_table_init(pgd_t *pgd)
26093-{
26094- pud_t *pud;
26095- pmd_t *pmd_table;
26096-
26097-#ifdef CONFIG_X86_PAE
26098- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
26099- if (after_bootmem)
26100- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
26101- else
26102- pmd_table = (pmd_t *)alloc_low_page();
26103- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
26104- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
26105- pud = pud_offset(pgd, 0);
26106- BUG_ON(pmd_table != pmd_offset(pud, 0));
26107-
26108- return pmd_table;
26109- }
26110-#endif
26111- pud = pud_offset(pgd, 0);
26112- pmd_table = pmd_offset(pud, 0);
26113-
26114- return pmd_table;
26115-}
26116-
26117-/*
26118 * Create a page table and place a pointer to it in a middle page
26119 * directory entry:
26120 */
26121@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
26122 page_table = (pte_t *)alloc_low_page();
26123
26124 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
26125+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26126+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
26127+#else
26128 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
26129+#endif
26130 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
26131 }
26132
26133 return pte_offset_kernel(pmd, 0);
26134 }
26135
26136+static pmd_t * __init one_md_table_init(pgd_t *pgd)
26137+{
26138+ pud_t *pud;
26139+ pmd_t *pmd_table;
26140+
26141+ pud = pud_offset(pgd, 0);
26142+ pmd_table = pmd_offset(pud, 0);
26143+
26144+ return pmd_table;
26145+}
26146+
26147 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
26148 {
26149 int pgd_idx = pgd_index(vaddr);
26150@@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26151 int pgd_idx, pmd_idx;
26152 unsigned long vaddr;
26153 pgd_t *pgd;
26154+ pud_t *pud;
26155 pmd_t *pmd;
26156 pte_t *pte = NULL;
26157
26158@@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26159 pgd = pgd_base + pgd_idx;
26160
26161 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
26162- pmd = one_md_table_init(pgd);
26163- pmd = pmd + pmd_index(vaddr);
26164+ pud = pud_offset(pgd, vaddr);
26165+ pmd = pmd_offset(pud, vaddr);
26166+
26167+#ifdef CONFIG_X86_PAE
26168+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26169+#endif
26170+
26171 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
26172 pmd++, pmd_idx++) {
26173 pte = page_table_kmap_check(one_page_table_init(pmd),
26174@@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
26175 }
26176 }
26177
26178-static inline int is_kernel_text(unsigned long addr)
26179+static inline int is_kernel_text(unsigned long start, unsigned long end)
26180 {
26181- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
26182- return 1;
26183- return 0;
26184+ if ((start > ktla_ktva((unsigned long)_etext) ||
26185+ end <= ktla_ktva((unsigned long)_stext)) &&
26186+ (start > ktla_ktva((unsigned long)_einittext) ||
26187+ end <= ktla_ktva((unsigned long)_sinittext)) &&
26188+
26189+#ifdef CONFIG_ACPI_SLEEP
26190+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
26191+#endif
26192+
26193+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
26194+ return 0;
26195+ return 1;
26196 }
26197
26198 /*
26199@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
26200 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
26201 unsigned long start_pfn, end_pfn;
26202 pgd_t *pgd_base = swapper_pg_dir;
26203- int pgd_idx, pmd_idx, pte_ofs;
26204+ unsigned int pgd_idx, pmd_idx, pte_ofs;
26205 unsigned long pfn;
26206 pgd_t *pgd;
26207+ pud_t *pud;
26208 pmd_t *pmd;
26209 pte_t *pte;
26210 unsigned pages_2m, pages_4k;
26211@@ -278,8 +279,13 @@ repeat:
26212 pfn = start_pfn;
26213 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26214 pgd = pgd_base + pgd_idx;
26215- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
26216- pmd = one_md_table_init(pgd);
26217+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
26218+ pud = pud_offset(pgd, 0);
26219+ pmd = pmd_offset(pud, 0);
26220+
26221+#ifdef CONFIG_X86_PAE
26222+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
26223+#endif
26224
26225 if (pfn >= end_pfn)
26226 continue;
26227@@ -291,14 +297,13 @@ repeat:
26228 #endif
26229 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
26230 pmd++, pmd_idx++) {
26231- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
26232+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
26233
26234 /*
26235 * Map with big pages if possible, otherwise
26236 * create normal page tables:
26237 */
26238 if (use_pse) {
26239- unsigned int addr2;
26240 pgprot_t prot = PAGE_KERNEL_LARGE;
26241 /*
26242 * first pass will use the same initial
26243@@ -308,11 +313,7 @@ repeat:
26244 __pgprot(PTE_IDENT_ATTR |
26245 _PAGE_PSE);
26246
26247- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
26248- PAGE_OFFSET + PAGE_SIZE-1;
26249-
26250- if (is_kernel_text(addr) ||
26251- is_kernel_text(addr2))
26252+ if (is_kernel_text(address, address + PMD_SIZE))
26253 prot = PAGE_KERNEL_LARGE_EXEC;
26254
26255 pages_2m++;
26256@@ -329,7 +330,7 @@ repeat:
26257 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
26258 pte += pte_ofs;
26259 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
26260- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
26261+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
26262 pgprot_t prot = PAGE_KERNEL;
26263 /*
26264 * first pass will use the same initial
26265@@ -337,7 +338,7 @@ repeat:
26266 */
26267 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
26268
26269- if (is_kernel_text(addr))
26270+ if (is_kernel_text(address, address + PAGE_SIZE))
26271 prot = PAGE_KERNEL_EXEC;
26272
26273 pages_4k++;
26274@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
26275
26276 pud = pud_offset(pgd, va);
26277 pmd = pmd_offset(pud, va);
26278- if (!pmd_present(*pmd))
26279+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
26280 break;
26281
26282 pte = pte_offset_kernel(pmd, va);
26283@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
26284
26285 static void __init pagetable_init(void)
26286 {
26287- pgd_t *pgd_base = swapper_pg_dir;
26288-
26289- permanent_kmaps_init(pgd_base);
26290+ permanent_kmaps_init(swapper_pg_dir);
26291 }
26292
26293 #ifdef CONFIG_ACPI_SLEEP
26294@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
26295 * ACPI suspend needs this for resume, because things like the intel-agp
26296 * driver might have split up a kernel 4MB mapping.
26297 */
26298-char swsusp_pg_dir[PAGE_SIZE]
26299+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
26300 __attribute__ ((aligned(PAGE_SIZE)));
26301
26302 static inline void save_pg_dir(void)
26303 {
26304- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
26305+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
26306 }
26307 #else /* !CONFIG_ACPI_SLEEP */
26308 static inline void save_pg_dir(void)
26309@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
26310 flush_tlb_all();
26311 }
26312
26313-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26314+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
26315 EXPORT_SYMBOL_GPL(__supported_pte_mask);
26316
26317 /* user-defined highmem size */
26318@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
26319 * Initialize the boot-time allocator (with low memory only):
26320 */
26321 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
26322- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
26323+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
26324 PAGE_SIZE);
26325 if (bootmap == -1L)
26326 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
26327@@ -864,6 +863,12 @@ void __init mem_init(void)
26328
26329 pci_iommu_alloc();
26330
26331+#ifdef CONFIG_PAX_PER_CPU_PGD
26332+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26333+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26334+ KERNEL_PGD_PTRS);
26335+#endif
26336+
26337 #ifdef CONFIG_FLATMEM
26338 BUG_ON(!mem_map);
26339 #endif
26340@@ -881,7 +886,7 @@ void __init mem_init(void)
26341 set_highmem_pages_init();
26342
26343 codesize = (unsigned long) &_etext - (unsigned long) &_text;
26344- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
26345+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
26346 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
26347
26348 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
26349@@ -923,10 +928,10 @@ void __init mem_init(void)
26350 ((unsigned long)&__init_end -
26351 (unsigned long)&__init_begin) >> 10,
26352
26353- (unsigned long)&_etext, (unsigned long)&_edata,
26354- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
26355+ (unsigned long)&_sdata, (unsigned long)&_edata,
26356+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
26357
26358- (unsigned long)&_text, (unsigned long)&_etext,
26359+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
26360 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
26361
26362 /*
26363@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
26364 if (!kernel_set_to_readonly)
26365 return;
26366
26367+ start = ktla_ktva(start);
26368 pr_debug("Set kernel text: %lx - %lx for read write\n",
26369 start, start+size);
26370
26371@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
26372 if (!kernel_set_to_readonly)
26373 return;
26374
26375+ start = ktla_ktva(start);
26376 pr_debug("Set kernel text: %lx - %lx for read only\n",
26377 start, start+size);
26378
26379@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
26380 unsigned long start = PFN_ALIGN(_text);
26381 unsigned long size = PFN_ALIGN(_etext) - start;
26382
26383+ start = ktla_ktva(start);
26384 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
26385 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
26386 size >> 10);
26387diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
26388index 7d095ad..25d2549 100644
26389--- a/arch/x86/mm/init_64.c
26390+++ b/arch/x86/mm/init_64.c
26391@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
26392 pmd = fill_pmd(pud, vaddr);
26393 pte = fill_pte(pmd, vaddr);
26394
26395+ pax_open_kernel();
26396 set_pte(pte, new_pte);
26397+ pax_close_kernel();
26398
26399 /*
26400 * It's enough to flush this one mapping.
26401@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
26402 pgd = pgd_offset_k((unsigned long)__va(phys));
26403 if (pgd_none(*pgd)) {
26404 pud = (pud_t *) spp_getpage();
26405- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
26406- _PAGE_USER));
26407+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
26408 }
26409 pud = pud_offset(pgd, (unsigned long)__va(phys));
26410 if (pud_none(*pud)) {
26411 pmd = (pmd_t *) spp_getpage();
26412- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
26413- _PAGE_USER));
26414+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
26415 }
26416 pmd = pmd_offset(pud, phys);
26417 BUG_ON(!pmd_none(*pmd));
26418@@ -675,6 +675,12 @@ void __init mem_init(void)
26419
26420 pci_iommu_alloc();
26421
26422+#ifdef CONFIG_PAX_PER_CPU_PGD
26423+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
26424+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26425+ KERNEL_PGD_PTRS);
26426+#endif
26427+
26428 /* clear_bss() already clear the empty_zero_page */
26429
26430 reservedpages = 0;
26431@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
26432 static struct vm_area_struct gate_vma = {
26433 .vm_start = VSYSCALL_START,
26434 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
26435- .vm_page_prot = PAGE_READONLY_EXEC,
26436- .vm_flags = VM_READ | VM_EXEC
26437+ .vm_page_prot = PAGE_READONLY,
26438+ .vm_flags = VM_READ
26439 };
26440
26441 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26442@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
26443
26444 const char *arch_vma_name(struct vm_area_struct *vma)
26445 {
26446- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26447+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26448 return "[vdso]";
26449 if (vma == &gate_vma)
26450 return "[vsyscall]";
26451diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
26452index 84e236c..69bd3f6 100644
26453--- a/arch/x86/mm/iomap_32.c
26454+++ b/arch/x86/mm/iomap_32.c
26455@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
26456 debug_kmap_atomic(type);
26457 idx = type + KM_TYPE_NR * smp_processor_id();
26458 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
26459+
26460+ pax_open_kernel();
26461 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
26462+ pax_close_kernel();
26463+
26464 arch_flush_lazy_mmu_mode();
26465
26466 return (void *)vaddr;
26467diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
26468index 2feb9bd..ab91e7b 100644
26469--- a/arch/x86/mm/ioremap.c
26470+++ b/arch/x86/mm/ioremap.c
26471@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
26472 * Second special case: Some BIOSen report the PC BIOS
26473 * area (640->1Mb) as ram even though it is not.
26474 */
26475- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
26476- pagenr < (BIOS_END >> PAGE_SHIFT))
26477+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
26478+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
26479 return 0;
26480
26481 for (i = 0; i < e820.nr_map; i++) {
26482@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
26483 /*
26484 * Don't allow anybody to remap normal RAM that we're using..
26485 */
26486- for (pfn = phys_addr >> PAGE_SHIFT;
26487- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
26488- pfn++) {
26489-
26490+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
26491 int is_ram = page_is_ram(pfn);
26492
26493- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
26494+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
26495 return NULL;
26496 WARN_ON_ONCE(is_ram);
26497 }
26498@@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
26499
26500 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
26501 if (page_is_ram(start >> PAGE_SHIFT))
26502+#ifdef CONFIG_HIGHMEM
26503+ if ((start >> PAGE_SHIFT) < max_low_pfn)
26504+#endif
26505 return __va(phys);
26506
26507 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
26508@@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
26509 early_param("early_ioremap_debug", early_ioremap_debug_setup);
26510
26511 static __initdata int after_paging_init;
26512-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
26513+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
26514
26515 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
26516 {
26517@@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
26518 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
26519
26520 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
26521- memset(bm_pte, 0, sizeof(bm_pte));
26522- pmd_populate_kernel(&init_mm, pmd, bm_pte);
26523+ pmd_populate_user(&init_mm, pmd, bm_pte);
26524
26525 /*
26526 * The boot-ioremap range spans multiple pmds, for which
26527diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
26528index 8cc1833..1abbc5b 100644
26529--- a/arch/x86/mm/kmemcheck/kmemcheck.c
26530+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
26531@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
26532 * memory (e.g. tracked pages)? For now, we need this to avoid
26533 * invoking kmemcheck for PnP BIOS calls.
26534 */
26535- if (regs->flags & X86_VM_MASK)
26536+ if (v8086_mode(regs))
26537 return false;
26538- if (regs->cs != __KERNEL_CS)
26539+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
26540 return false;
26541
26542 pte = kmemcheck_pte_lookup(address);
26543diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
26544index c9e57af..07a321b 100644
26545--- a/arch/x86/mm/mmap.c
26546+++ b/arch/x86/mm/mmap.c
26547@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
26548 * Leave an at least ~128 MB hole with possible stack randomization.
26549 */
26550 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
26551-#define MAX_GAP (TASK_SIZE/6*5)
26552+#define MAX_GAP (pax_task_size/6*5)
26553
26554 /*
26555 * True on X86_32 or when emulating IA32 on X86_64
26556@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
26557 return rnd << PAGE_SHIFT;
26558 }
26559
26560-static unsigned long mmap_base(void)
26561+static unsigned long mmap_base(struct mm_struct *mm)
26562 {
26563 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
26564+ unsigned long pax_task_size = TASK_SIZE;
26565+
26566+#ifdef CONFIG_PAX_SEGMEXEC
26567+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26568+ pax_task_size = SEGMEXEC_TASK_SIZE;
26569+#endif
26570
26571 if (gap < MIN_GAP)
26572 gap = MIN_GAP;
26573 else if (gap > MAX_GAP)
26574 gap = MAX_GAP;
26575
26576- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
26577+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
26578 }
26579
26580 /*
26581 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
26582 * does, but not when emulating X86_32
26583 */
26584-static unsigned long mmap_legacy_base(void)
26585+static unsigned long mmap_legacy_base(struct mm_struct *mm)
26586 {
26587- if (mmap_is_ia32())
26588+ if (mmap_is_ia32()) {
26589+
26590+#ifdef CONFIG_PAX_SEGMEXEC
26591+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
26592+ return SEGMEXEC_TASK_UNMAPPED_BASE;
26593+ else
26594+#endif
26595+
26596 return TASK_UNMAPPED_BASE;
26597- else
26598+ } else
26599 return TASK_UNMAPPED_BASE + mmap_rnd();
26600 }
26601
26602@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
26603 void arch_pick_mmap_layout(struct mm_struct *mm)
26604 {
26605 if (mmap_is_legacy()) {
26606- mm->mmap_base = mmap_legacy_base();
26607+ mm->mmap_base = mmap_legacy_base(mm);
26608+
26609+#ifdef CONFIG_PAX_RANDMMAP
26610+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26611+ mm->mmap_base += mm->delta_mmap;
26612+#endif
26613+
26614 mm->get_unmapped_area = arch_get_unmapped_area;
26615 mm->unmap_area = arch_unmap_area;
26616 } else {
26617- mm->mmap_base = mmap_base();
26618+ mm->mmap_base = mmap_base(mm);
26619+
26620+#ifdef CONFIG_PAX_RANDMMAP
26621+ if (mm->pax_flags & MF_PAX_RANDMMAP)
26622+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
26623+#endif
26624+
26625 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
26626 mm->unmap_area = arch_unmap_area_topdown;
26627 }
26628diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
26629index 132772a..b961f11 100644
26630--- a/arch/x86/mm/mmio-mod.c
26631+++ b/arch/x86/mm/mmio-mod.c
26632@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
26633 break;
26634 default:
26635 {
26636- unsigned char *ip = (unsigned char *)instptr;
26637+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
26638 my_trace->opcode = MMIO_UNKNOWN_OP;
26639 my_trace->width = 0;
26640 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
26641@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
26642 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
26643 void __iomem *addr)
26644 {
26645- static atomic_t next_id;
26646+ static atomic_unchecked_t next_id;
26647 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
26648 /* These are page-unaligned. */
26649 struct mmiotrace_map map = {
26650@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
26651 .private = trace
26652 },
26653 .phys = offset,
26654- .id = atomic_inc_return(&next_id)
26655+ .id = atomic_inc_return_unchecked(&next_id)
26656 };
26657 map.map_id = trace->id;
26658
26659diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
26660index d253006..e56dd6a 100644
26661--- a/arch/x86/mm/numa_32.c
26662+++ b/arch/x86/mm/numa_32.c
26663@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
26664 }
26665 #endif
26666
26667-extern unsigned long find_max_low_pfn(void);
26668 extern unsigned long highend_pfn, highstart_pfn;
26669
26670 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
26671diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
26672index e1d1069..2251ff3 100644
26673--- a/arch/x86/mm/pageattr-test.c
26674+++ b/arch/x86/mm/pageattr-test.c
26675@@ -36,7 +36,7 @@ enum {
26676
26677 static int pte_testbit(pte_t pte)
26678 {
26679- return pte_flags(pte) & _PAGE_UNUSED1;
26680+ return pte_flags(pte) & _PAGE_CPA_TEST;
26681 }
26682
26683 struct split_state {
26684diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
26685index dd38bfb..b72c63e 100644
26686--- a/arch/x86/mm/pageattr.c
26687+++ b/arch/x86/mm/pageattr.c
26688@@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26689 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
26690 */
26691 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
26692- pgprot_val(forbidden) |= _PAGE_NX;
26693+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26694
26695 /*
26696 * The kernel text needs to be executable for obvious reasons
26697 * Does not cover __inittext since that is gone later on. On
26698 * 64bit we do not enforce !NX on the low mapping
26699 */
26700- if (within(address, (unsigned long)_text, (unsigned long)_etext))
26701- pgprot_val(forbidden) |= _PAGE_NX;
26702+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
26703+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26704
26705+#ifdef CONFIG_DEBUG_RODATA
26706 /*
26707 * The .rodata section needs to be read-only. Using the pfn
26708 * catches all aliases.
26709@@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
26710 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
26711 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
26712 pgprot_val(forbidden) |= _PAGE_RW;
26713+#endif
26714+
26715+#ifdef CONFIG_PAX_KERNEXEC
26716+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
26717+ pgprot_val(forbidden) |= _PAGE_RW;
26718+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
26719+ }
26720+#endif
26721
26722 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
26723
26724@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
26725 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
26726 {
26727 /* change init_mm */
26728+ pax_open_kernel();
26729 set_pte_atomic(kpte, pte);
26730+
26731 #ifdef CONFIG_X86_32
26732 if (!SHARED_KERNEL_PMD) {
26733+
26734+#ifdef CONFIG_PAX_PER_CPU_PGD
26735+ unsigned long cpu;
26736+#else
26737 struct page *page;
26738+#endif
26739
26740+#ifdef CONFIG_PAX_PER_CPU_PGD
26741+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
26742+ pgd_t *pgd = get_cpu_pgd(cpu);
26743+#else
26744 list_for_each_entry(page, &pgd_list, lru) {
26745- pgd_t *pgd;
26746+ pgd_t *pgd = (pgd_t *)page_address(page);
26747+#endif
26748+
26749 pud_t *pud;
26750 pmd_t *pmd;
26751
26752- pgd = (pgd_t *)page_address(page) + pgd_index(address);
26753+ pgd += pgd_index(address);
26754 pud = pud_offset(pgd, address);
26755 pmd = pmd_offset(pud, address);
26756 set_pte_atomic((pte_t *)pmd, pte);
26757 }
26758 }
26759 #endif
26760+ pax_close_kernel();
26761 }
26762
26763 static int
26764diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
26765index e78cd0e..de0a817 100644
26766--- a/arch/x86/mm/pat.c
26767+++ b/arch/x86/mm/pat.c
26768@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
26769
26770 conflict:
26771 printk(KERN_INFO "%s:%d conflicting memory types "
26772- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
26773+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
26774 new->end, cattr_name(new->type), cattr_name(entry->type));
26775 return -EBUSY;
26776 }
26777@@ -559,7 +559,7 @@ unlock_ret:
26778
26779 if (err) {
26780 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
26781- current->comm, current->pid, start, end);
26782+ current->comm, task_pid_nr(current), start, end);
26783 }
26784
26785 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
26786@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26787 while (cursor < to) {
26788 if (!devmem_is_allowed(pfn)) {
26789 printk(KERN_INFO
26790- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26791- current->comm, from, to);
26792+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
26793+ current->comm, from, to, cursor);
26794 return 0;
26795 }
26796 cursor += PAGE_SIZE;
26797@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
26798 printk(KERN_INFO
26799 "%s:%d ioremap_change_attr failed %s "
26800 "for %Lx-%Lx\n",
26801- current->comm, current->pid,
26802+ current->comm, task_pid_nr(current),
26803 cattr_name(flags),
26804 base, (unsigned long long)(base + size));
26805 return -EINVAL;
26806@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
26807 free_memtype(paddr, paddr + size);
26808 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
26809 " for %Lx-%Lx, got %s\n",
26810- current->comm, current->pid,
26811+ current->comm, task_pid_nr(current),
26812 cattr_name(want_flags),
26813 (unsigned long long)paddr,
26814 (unsigned long long)(paddr + size),
26815diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
26816index df3d5c8..c2223e1 100644
26817--- a/arch/x86/mm/pf_in.c
26818+++ b/arch/x86/mm/pf_in.c
26819@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
26820 int i;
26821 enum reason_type rv = OTHERS;
26822
26823- p = (unsigned char *)ins_addr;
26824+ p = (unsigned char *)ktla_ktva(ins_addr);
26825 p += skip_prefix(p, &prf);
26826 p += get_opcode(p, &opcode);
26827
26828@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
26829 struct prefix_bits prf;
26830 int i;
26831
26832- p = (unsigned char *)ins_addr;
26833+ p = (unsigned char *)ktla_ktva(ins_addr);
26834 p += skip_prefix(p, &prf);
26835 p += get_opcode(p, &opcode);
26836
26837@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
26838 struct prefix_bits prf;
26839 int i;
26840
26841- p = (unsigned char *)ins_addr;
26842+ p = (unsigned char *)ktla_ktva(ins_addr);
26843 p += skip_prefix(p, &prf);
26844 p += get_opcode(p, &opcode);
26845
26846@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
26847 int i;
26848 unsigned long rv;
26849
26850- p = (unsigned char *)ins_addr;
26851+ p = (unsigned char *)ktla_ktva(ins_addr);
26852 p += skip_prefix(p, &prf);
26853 p += get_opcode(p, &opcode);
26854 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
26855@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
26856 int i;
26857 unsigned long rv;
26858
26859- p = (unsigned char *)ins_addr;
26860+ p = (unsigned char *)ktla_ktva(ins_addr);
26861 p += skip_prefix(p, &prf);
26862 p += get_opcode(p, &opcode);
26863 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
26864diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
26865index e0e6fad..c56b495 100644
26866--- a/arch/x86/mm/pgtable.c
26867+++ b/arch/x86/mm/pgtable.c
26868@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
26869 list_del(&page->lru);
26870 }
26871
26872-#define UNSHARED_PTRS_PER_PGD \
26873- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26874+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26875+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
26876
26877+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26878+{
26879+ while (count--)
26880+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
26881+}
26882+#endif
26883+
26884+#ifdef CONFIG_PAX_PER_CPU_PGD
26885+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
26886+{
26887+ while (count--)
26888+
26889+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26890+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
26891+#else
26892+ *dst++ = *src++;
26893+#endif
26894+
26895+}
26896+#endif
26897+
26898+#ifdef CONFIG_X86_64
26899+#define pxd_t pud_t
26900+#define pyd_t pgd_t
26901+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
26902+#define pxd_free(mm, pud) pud_free((mm), (pud))
26903+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
26904+#define pyd_offset(mm, address) pgd_offset((mm), (address))
26905+#define PYD_SIZE PGDIR_SIZE
26906+#else
26907+#define pxd_t pmd_t
26908+#define pyd_t pud_t
26909+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
26910+#define pxd_free(mm, pud) pmd_free((mm), (pud))
26911+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
26912+#define pyd_offset(mm, address) pud_offset((mm), (address))
26913+#define PYD_SIZE PUD_SIZE
26914+#endif
26915+
26916+#ifdef CONFIG_PAX_PER_CPU_PGD
26917+static inline void pgd_ctor(pgd_t *pgd) {}
26918+static inline void pgd_dtor(pgd_t *pgd) {}
26919+#else
26920 static void pgd_ctor(pgd_t *pgd)
26921 {
26922 /* If the pgd points to a shared pagetable level (either the
26923@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
26924 pgd_list_del(pgd);
26925 spin_unlock_irqrestore(&pgd_lock, flags);
26926 }
26927+#endif
26928
26929 /*
26930 * List of all pgd's needed for non-PAE so it can invalidate entries
26931@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
26932 * -- wli
26933 */
26934
26935-#ifdef CONFIG_X86_PAE
26936+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26937 /*
26938 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
26939 * updating the top-level pagetable entries to guarantee the
26940@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
26941 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
26942 * and initialize the kernel pmds here.
26943 */
26944-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
26945+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
26946
26947 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26948 {
26949@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
26950 */
26951 flush_tlb_mm(mm);
26952 }
26953+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
26954+#define PREALLOCATED_PXDS USER_PGD_PTRS
26955 #else /* !CONFIG_X86_PAE */
26956
26957 /* No need to prepopulate any pagetable entries in non-PAE modes. */
26958-#define PREALLOCATED_PMDS 0
26959+#define PREALLOCATED_PXDS 0
26960
26961 #endif /* CONFIG_X86_PAE */
26962
26963-static void free_pmds(pmd_t *pmds[])
26964+static void free_pxds(pxd_t *pxds[])
26965 {
26966 int i;
26967
26968- for(i = 0; i < PREALLOCATED_PMDS; i++)
26969- if (pmds[i])
26970- free_page((unsigned long)pmds[i]);
26971+ for(i = 0; i < PREALLOCATED_PXDS; i++)
26972+ if (pxds[i])
26973+ free_page((unsigned long)pxds[i]);
26974 }
26975
26976-static int preallocate_pmds(pmd_t *pmds[])
26977+static int preallocate_pxds(pxd_t *pxds[])
26978 {
26979 int i;
26980 bool failed = false;
26981
26982- for(i = 0; i < PREALLOCATED_PMDS; i++) {
26983- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
26984- if (pmd == NULL)
26985+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
26986+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
26987+ if (pxd == NULL)
26988 failed = true;
26989- pmds[i] = pmd;
26990+ pxds[i] = pxd;
26991 }
26992
26993 if (failed) {
26994- free_pmds(pmds);
26995+ free_pxds(pxds);
26996 return -ENOMEM;
26997 }
26998
26999@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
27000 * preallocate which never got a corresponding vma will need to be
27001 * freed manually.
27002 */
27003-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
27004+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
27005 {
27006 int i;
27007
27008- for(i = 0; i < PREALLOCATED_PMDS; i++) {
27009+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
27010 pgd_t pgd = pgdp[i];
27011
27012 if (pgd_val(pgd) != 0) {
27013- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
27014+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
27015
27016- pgdp[i] = native_make_pgd(0);
27017+ set_pgd(pgdp + i, native_make_pgd(0));
27018
27019- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
27020- pmd_free(mm, pmd);
27021+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
27022+ pxd_free(mm, pxd);
27023 }
27024 }
27025 }
27026
27027-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
27028+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
27029 {
27030- pud_t *pud;
27031+ pyd_t *pyd;
27032 unsigned long addr;
27033 int i;
27034
27035- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
27036+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
27037 return;
27038
27039- pud = pud_offset(pgd, 0);
27040+#ifdef CONFIG_X86_64
27041+ pyd = pyd_offset(mm, 0L);
27042+#else
27043+ pyd = pyd_offset(pgd, 0L);
27044+#endif
27045
27046- for (addr = i = 0; i < PREALLOCATED_PMDS;
27047- i++, pud++, addr += PUD_SIZE) {
27048- pmd_t *pmd = pmds[i];
27049+ for (addr = i = 0; i < PREALLOCATED_PXDS;
27050+ i++, pyd++, addr += PYD_SIZE) {
27051+ pxd_t *pxd = pxds[i];
27052
27053 if (i >= KERNEL_PGD_BOUNDARY)
27054- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27055- sizeof(pmd_t) * PTRS_PER_PMD);
27056+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
27057+ sizeof(pxd_t) * PTRS_PER_PMD);
27058
27059- pud_populate(mm, pud, pmd);
27060+ pyd_populate(mm, pyd, pxd);
27061 }
27062 }
27063
27064 pgd_t *pgd_alloc(struct mm_struct *mm)
27065 {
27066 pgd_t *pgd;
27067- pmd_t *pmds[PREALLOCATED_PMDS];
27068+ pxd_t *pxds[PREALLOCATED_PXDS];
27069+
27070 unsigned long flags;
27071
27072 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
27073@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27074
27075 mm->pgd = pgd;
27076
27077- if (preallocate_pmds(pmds) != 0)
27078+ if (preallocate_pxds(pxds) != 0)
27079 goto out_free_pgd;
27080
27081 if (paravirt_pgd_alloc(mm) != 0)
27082- goto out_free_pmds;
27083+ goto out_free_pxds;
27084
27085 /*
27086 * Make sure that pre-populating the pmds is atomic with
27087@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
27088 spin_lock_irqsave(&pgd_lock, flags);
27089
27090 pgd_ctor(pgd);
27091- pgd_prepopulate_pmd(mm, pgd, pmds);
27092+ pgd_prepopulate_pxd(mm, pgd, pxds);
27093
27094 spin_unlock_irqrestore(&pgd_lock, flags);
27095
27096 return pgd;
27097
27098-out_free_pmds:
27099- free_pmds(pmds);
27100+out_free_pxds:
27101+ free_pxds(pxds);
27102 out_free_pgd:
27103 free_page((unsigned long)pgd);
27104 out:
27105@@ -287,7 +338,7 @@ out:
27106
27107 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
27108 {
27109- pgd_mop_up_pmds(mm, pgd);
27110+ pgd_mop_up_pxds(mm, pgd);
27111 pgd_dtor(pgd);
27112 paravirt_pgd_free(mm, pgd);
27113 free_page((unsigned long)pgd);
27114diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
27115index 46c8834..fcab43d 100644
27116--- a/arch/x86/mm/pgtable_32.c
27117+++ b/arch/x86/mm/pgtable_32.c
27118@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
27119 return;
27120 }
27121 pte = pte_offset_kernel(pmd, vaddr);
27122+
27123+ pax_open_kernel();
27124 if (pte_val(pteval))
27125 set_pte_at(&init_mm, vaddr, pte, pteval);
27126 else
27127 pte_clear(&init_mm, vaddr, pte);
27128+ pax_close_kernel();
27129
27130 /*
27131 * It's enough to flush this one mapping.
27132diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
27133index 513d8ed..978c161 100644
27134--- a/arch/x86/mm/setup_nx.c
27135+++ b/arch/x86/mm/setup_nx.c
27136@@ -4,11 +4,10 @@
27137
27138 #include <asm/pgtable.h>
27139
27140+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
27141 int nx_enabled;
27142
27143-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27144-static int disable_nx __cpuinitdata;
27145-
27146+#ifndef CONFIG_PAX_PAGEEXEC
27147 /*
27148 * noexec = on|off
27149 *
27150@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
27151 if (!str)
27152 return -EINVAL;
27153 if (!strncmp(str, "on", 2)) {
27154- __supported_pte_mask |= _PAGE_NX;
27155- disable_nx = 0;
27156+ nx_enabled = 1;
27157 } else if (!strncmp(str, "off", 3)) {
27158- disable_nx = 1;
27159- __supported_pte_mask &= ~_PAGE_NX;
27160+ nx_enabled = 0;
27161 }
27162 return 0;
27163 }
27164 early_param("noexec", noexec_setup);
27165 #endif
27166+#endif
27167
27168 #ifdef CONFIG_X86_PAE
27169 void __init set_nx(void)
27170 {
27171- unsigned int v[4], l, h;
27172+ if (!nx_enabled && cpu_has_nx) {
27173+ unsigned l, h;
27174
27175- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
27176- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
27177-
27178- if ((v[3] & (1 << 20)) && !disable_nx) {
27179- rdmsr(MSR_EFER, l, h);
27180- l |= EFER_NX;
27181- wrmsr(MSR_EFER, l, h);
27182- nx_enabled = 1;
27183- __supported_pte_mask |= _PAGE_NX;
27184- }
27185+ __supported_pte_mask &= ~_PAGE_NX;
27186+ rdmsr(MSR_EFER, l, h);
27187+ l &= ~EFER_NX;
27188+ wrmsr(MSR_EFER, l, h);
27189 }
27190 }
27191 #else
27192@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
27193 unsigned long efer;
27194
27195 rdmsrl(MSR_EFER, efer);
27196- if (!(efer & EFER_NX) || disable_nx)
27197+ if (!(efer & EFER_NX) || !nx_enabled)
27198 __supported_pte_mask &= ~_PAGE_NX;
27199 }
27200 #endif
27201diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
27202index 36fe08e..b123d3a 100644
27203--- a/arch/x86/mm/tlb.c
27204+++ b/arch/x86/mm/tlb.c
27205@@ -61,7 +61,11 @@ void leave_mm(int cpu)
27206 BUG();
27207 cpumask_clear_cpu(cpu,
27208 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
27209+
27210+#ifndef CONFIG_PAX_PER_CPU_PGD
27211 load_cr3(swapper_pg_dir);
27212+#endif
27213+
27214 }
27215 EXPORT_SYMBOL_GPL(leave_mm);
27216
27217diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
27218index 829edf0..672adb3 100644
27219--- a/arch/x86/oprofile/backtrace.c
27220+++ b/arch/x86/oprofile/backtrace.c
27221@@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
27222 {
27223 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
27224
27225- if (!user_mode_vm(regs)) {
27226+ if (!user_mode(regs)) {
27227 unsigned long stack = kernel_stack_pointer(regs);
27228 if (depth)
27229 dump_trace(NULL, regs, (unsigned long *)stack, 0,
27230diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
27231index e6a160a..36deff6 100644
27232--- a/arch/x86/oprofile/op_model_p4.c
27233+++ b/arch/x86/oprofile/op_model_p4.c
27234@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
27235 #endif
27236 }
27237
27238-static int inline addr_increment(void)
27239+static inline int addr_increment(void)
27240 {
27241 #ifdef CONFIG_SMP
27242 return smp_num_siblings == 2 ? 2 : 1;
27243diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
27244index 1331fcf..03901b2 100644
27245--- a/arch/x86/pci/common.c
27246+++ b/arch/x86/pci/common.c
27247@@ -31,8 +31,8 @@ int noioapicreroute = 1;
27248 int pcibios_last_bus = -1;
27249 unsigned long pirq_table_addr;
27250 struct pci_bus *pci_root_bus;
27251-struct pci_raw_ops *raw_pci_ops;
27252-struct pci_raw_ops *raw_pci_ext_ops;
27253+const struct pci_raw_ops *raw_pci_ops;
27254+const struct pci_raw_ops *raw_pci_ext_ops;
27255
27256 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
27257 int reg, int len, u32 *val)
27258diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
27259index 347d882..4baf6b6 100644
27260--- a/arch/x86/pci/direct.c
27261+++ b/arch/x86/pci/direct.c
27262@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
27263
27264 #undef PCI_CONF1_ADDRESS
27265
27266-struct pci_raw_ops pci_direct_conf1 = {
27267+const struct pci_raw_ops pci_direct_conf1 = {
27268 .read = pci_conf1_read,
27269 .write = pci_conf1_write,
27270 };
27271@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
27272
27273 #undef PCI_CONF2_ADDRESS
27274
27275-struct pci_raw_ops pci_direct_conf2 = {
27276+const struct pci_raw_ops pci_direct_conf2 = {
27277 .read = pci_conf2_read,
27278 .write = pci_conf2_write,
27279 };
27280@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
27281 * This should be close to trivial, but it isn't, because there are buggy
27282 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
27283 */
27284-static int __init pci_sanity_check(struct pci_raw_ops *o)
27285+static int __init pci_sanity_check(const struct pci_raw_ops *o)
27286 {
27287 u32 x = 0;
27288 int year, devfn;
27289diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
27290index f10a7e9..0425342 100644
27291--- a/arch/x86/pci/mmconfig_32.c
27292+++ b/arch/x86/pci/mmconfig_32.c
27293@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
27294 return 0;
27295 }
27296
27297-static struct pci_raw_ops pci_mmcfg = {
27298+static const struct pci_raw_ops pci_mmcfg = {
27299 .read = pci_mmcfg_read,
27300 .write = pci_mmcfg_write,
27301 };
27302diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
27303index 94349f8..41600a7 100644
27304--- a/arch/x86/pci/mmconfig_64.c
27305+++ b/arch/x86/pci/mmconfig_64.c
27306@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
27307 return 0;
27308 }
27309
27310-static struct pci_raw_ops pci_mmcfg = {
27311+static const struct pci_raw_ops pci_mmcfg = {
27312 .read = pci_mmcfg_read,
27313 .write = pci_mmcfg_write,
27314 };
27315diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
27316index 8eb295e..86bd657 100644
27317--- a/arch/x86/pci/numaq_32.c
27318+++ b/arch/x86/pci/numaq_32.c
27319@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
27320
27321 #undef PCI_CONF1_MQ_ADDRESS
27322
27323-static struct pci_raw_ops pci_direct_conf1_mq = {
27324+static const struct pci_raw_ops pci_direct_conf1_mq = {
27325 .read = pci_conf1_mq_read,
27326 .write = pci_conf1_mq_write
27327 };
27328diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
27329index b889d82..5a58a0a 100644
27330--- a/arch/x86/pci/olpc.c
27331+++ b/arch/x86/pci/olpc.c
27332@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
27333 return 0;
27334 }
27335
27336-static struct pci_raw_ops pci_olpc_conf = {
27337+static const struct pci_raw_ops pci_olpc_conf = {
27338 .read = pci_olpc_read,
27339 .write = pci_olpc_write,
27340 };
27341diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
27342index 1c975cc..b8e16c2 100644
27343--- a/arch/x86/pci/pcbios.c
27344+++ b/arch/x86/pci/pcbios.c
27345@@ -56,50 +56,93 @@ union bios32 {
27346 static struct {
27347 unsigned long address;
27348 unsigned short segment;
27349-} bios32_indirect = { 0, __KERNEL_CS };
27350+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
27351
27352 /*
27353 * Returns the entry point for the given service, NULL on error
27354 */
27355
27356-static unsigned long bios32_service(unsigned long service)
27357+static unsigned long __devinit bios32_service(unsigned long service)
27358 {
27359 unsigned char return_code; /* %al */
27360 unsigned long address; /* %ebx */
27361 unsigned long length; /* %ecx */
27362 unsigned long entry; /* %edx */
27363 unsigned long flags;
27364+ struct desc_struct d, *gdt;
27365
27366 local_irq_save(flags);
27367- __asm__("lcall *(%%edi); cld"
27368+
27369+ gdt = get_cpu_gdt_table(smp_processor_id());
27370+
27371+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
27372+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27373+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
27374+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27375+
27376+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
27377 : "=a" (return_code),
27378 "=b" (address),
27379 "=c" (length),
27380 "=d" (entry)
27381 : "0" (service),
27382 "1" (0),
27383- "D" (&bios32_indirect));
27384+ "D" (&bios32_indirect),
27385+ "r"(__PCIBIOS_DS)
27386+ : "memory");
27387+
27388+ pax_open_kernel();
27389+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
27390+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
27391+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
27392+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
27393+ pax_close_kernel();
27394+
27395 local_irq_restore(flags);
27396
27397 switch (return_code) {
27398- case 0:
27399- return address + entry;
27400- case 0x80: /* Not present */
27401- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27402- return 0;
27403- default: /* Shouldn't happen */
27404- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27405- service, return_code);
27406+ case 0: {
27407+ int cpu;
27408+ unsigned char flags;
27409+
27410+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
27411+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
27412+ printk(KERN_WARNING "bios32_service: not valid\n");
27413 return 0;
27414+ }
27415+ address = address + PAGE_OFFSET;
27416+ length += 16UL; /* some BIOSs underreport this... */
27417+ flags = 4;
27418+ if (length >= 64*1024*1024) {
27419+ length >>= PAGE_SHIFT;
27420+ flags |= 8;
27421+ }
27422+
27423+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
27424+ gdt = get_cpu_gdt_table(cpu);
27425+ pack_descriptor(&d, address, length, 0x9b, flags);
27426+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
27427+ pack_descriptor(&d, address, length, 0x93, flags);
27428+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
27429+ }
27430+ return entry;
27431+ }
27432+ case 0x80: /* Not present */
27433+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
27434+ return 0;
27435+ default: /* Shouldn't happen */
27436+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
27437+ service, return_code);
27438+ return 0;
27439 }
27440 }
27441
27442 static struct {
27443 unsigned long address;
27444 unsigned short segment;
27445-} pci_indirect = { 0, __KERNEL_CS };
27446+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
27447
27448-static int pci_bios_present;
27449+static int pci_bios_present __read_only;
27450
27451 static int __devinit check_pcibios(void)
27452 {
27453@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
27454 unsigned long flags, pcibios_entry;
27455
27456 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
27457- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
27458+ pci_indirect.address = pcibios_entry;
27459
27460 local_irq_save(flags);
27461- __asm__(
27462- "lcall *(%%edi); cld\n\t"
27463+ __asm__("movw %w6, %%ds\n\t"
27464+ "lcall *%%ss:(%%edi); cld\n\t"
27465+ "push %%ss\n\t"
27466+ "pop %%ds\n\t"
27467 "jc 1f\n\t"
27468 "xor %%ah, %%ah\n"
27469 "1:"
27470@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
27471 "=b" (ebx),
27472 "=c" (ecx)
27473 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
27474- "D" (&pci_indirect)
27475+ "D" (&pci_indirect),
27476+ "r" (__PCIBIOS_DS)
27477 : "memory");
27478 local_irq_restore(flags);
27479
27480@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27481
27482 switch (len) {
27483 case 1:
27484- __asm__("lcall *(%%esi); cld\n\t"
27485+ __asm__("movw %w6, %%ds\n\t"
27486+ "lcall *%%ss:(%%esi); cld\n\t"
27487+ "push %%ss\n\t"
27488+ "pop %%ds\n\t"
27489 "jc 1f\n\t"
27490 "xor %%ah, %%ah\n"
27491 "1:"
27492@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27493 : "1" (PCIBIOS_READ_CONFIG_BYTE),
27494 "b" (bx),
27495 "D" ((long)reg),
27496- "S" (&pci_indirect));
27497+ "S" (&pci_indirect),
27498+ "r" (__PCIBIOS_DS));
27499 /*
27500 * Zero-extend the result beyond 8 bits, do not trust the
27501 * BIOS having done it:
27502@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27503 *value &= 0xff;
27504 break;
27505 case 2:
27506- __asm__("lcall *(%%esi); cld\n\t"
27507+ __asm__("movw %w6, %%ds\n\t"
27508+ "lcall *%%ss:(%%esi); cld\n\t"
27509+ "push %%ss\n\t"
27510+ "pop %%ds\n\t"
27511 "jc 1f\n\t"
27512 "xor %%ah, %%ah\n"
27513 "1:"
27514@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27515 : "1" (PCIBIOS_READ_CONFIG_WORD),
27516 "b" (bx),
27517 "D" ((long)reg),
27518- "S" (&pci_indirect));
27519+ "S" (&pci_indirect),
27520+ "r" (__PCIBIOS_DS));
27521 /*
27522 * Zero-extend the result beyond 16 bits, do not trust the
27523 * BIOS having done it:
27524@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27525 *value &= 0xffff;
27526 break;
27527 case 4:
27528- __asm__("lcall *(%%esi); cld\n\t"
27529+ __asm__("movw %w6, %%ds\n\t"
27530+ "lcall *%%ss:(%%esi); cld\n\t"
27531+ "push %%ss\n\t"
27532+ "pop %%ds\n\t"
27533 "jc 1f\n\t"
27534 "xor %%ah, %%ah\n"
27535 "1:"
27536@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
27537 : "1" (PCIBIOS_READ_CONFIG_DWORD),
27538 "b" (bx),
27539 "D" ((long)reg),
27540- "S" (&pci_indirect));
27541+ "S" (&pci_indirect),
27542+ "r" (__PCIBIOS_DS));
27543 break;
27544 }
27545
27546@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27547
27548 switch (len) {
27549 case 1:
27550- __asm__("lcall *(%%esi); cld\n\t"
27551+ __asm__("movw %w6, %%ds\n\t"
27552+ "lcall *%%ss:(%%esi); cld\n\t"
27553+ "push %%ss\n\t"
27554+ "pop %%ds\n\t"
27555 "jc 1f\n\t"
27556 "xor %%ah, %%ah\n"
27557 "1:"
27558@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27559 "c" (value),
27560 "b" (bx),
27561 "D" ((long)reg),
27562- "S" (&pci_indirect));
27563+ "S" (&pci_indirect),
27564+ "r" (__PCIBIOS_DS));
27565 break;
27566 case 2:
27567- __asm__("lcall *(%%esi); cld\n\t"
27568+ __asm__("movw %w6, %%ds\n\t"
27569+ "lcall *%%ss:(%%esi); cld\n\t"
27570+ "push %%ss\n\t"
27571+ "pop %%ds\n\t"
27572 "jc 1f\n\t"
27573 "xor %%ah, %%ah\n"
27574 "1:"
27575@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27576 "c" (value),
27577 "b" (bx),
27578 "D" ((long)reg),
27579- "S" (&pci_indirect));
27580+ "S" (&pci_indirect),
27581+ "r" (__PCIBIOS_DS));
27582 break;
27583 case 4:
27584- __asm__("lcall *(%%esi); cld\n\t"
27585+ __asm__("movw %w6, %%ds\n\t"
27586+ "lcall *%%ss:(%%esi); cld\n\t"
27587+ "push %%ss\n\t"
27588+ "pop %%ds\n\t"
27589 "jc 1f\n\t"
27590 "xor %%ah, %%ah\n"
27591 "1:"
27592@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27593 "c" (value),
27594 "b" (bx),
27595 "D" ((long)reg),
27596- "S" (&pci_indirect));
27597+ "S" (&pci_indirect),
27598+ "r" (__PCIBIOS_DS));
27599 break;
27600 }
27601
27602@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
27603 * Function table for BIOS32 access
27604 */
27605
27606-static struct pci_raw_ops pci_bios_access = {
27607+static const struct pci_raw_ops pci_bios_access = {
27608 .read = pci_bios_read,
27609 .write = pci_bios_write
27610 };
27611@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
27612 * Try to find PCI BIOS.
27613 */
27614
27615-static struct pci_raw_ops * __devinit pci_find_bios(void)
27616+static const struct pci_raw_ops * __devinit pci_find_bios(void)
27617 {
27618 union bios32 *check;
27619 unsigned char sum;
27620@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
27621
27622 DBG("PCI: Fetching IRQ routing table... ");
27623 __asm__("push %%es\n\t"
27624+ "movw %w8, %%ds\n\t"
27625 "push %%ds\n\t"
27626 "pop %%es\n\t"
27627- "lcall *(%%esi); cld\n\t"
27628+ "lcall *%%ss:(%%esi); cld\n\t"
27629 "pop %%es\n\t"
27630+ "push %%ss\n\t"
27631+ "pop %%ds\n"
27632 "jc 1f\n\t"
27633 "xor %%ah, %%ah\n"
27634 "1:"
27635@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
27636 "1" (0),
27637 "D" ((long) &opt),
27638 "S" (&pci_indirect),
27639- "m" (opt)
27640+ "m" (opt),
27641+ "r" (__PCIBIOS_DS)
27642 : "memory");
27643 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
27644 if (ret & 0xff00)
27645@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
27646 {
27647 int ret;
27648
27649- __asm__("lcall *(%%esi); cld\n\t"
27650+ __asm__("movw %w5, %%ds\n\t"
27651+ "lcall *%%ss:(%%esi); cld\n\t"
27652+ "push %%ss\n\t"
27653+ "pop %%ds\n"
27654 "jc 1f\n\t"
27655 "xor %%ah, %%ah\n"
27656 "1:"
27657@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
27658 : "0" (PCIBIOS_SET_PCI_HW_INT),
27659 "b" ((dev->bus->number << 8) | dev->devfn),
27660 "c" ((irq << 8) | (pin + 10)),
27661- "S" (&pci_indirect));
27662+ "S" (&pci_indirect),
27663+ "r" (__PCIBIOS_DS));
27664 return !(ret & 0xff00);
27665 }
27666 EXPORT_SYMBOL(pcibios_set_irq_routing);
27667diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
27668index fa0f651..9d8f3d9 100644
27669--- a/arch/x86/power/cpu.c
27670+++ b/arch/x86/power/cpu.c
27671@@ -129,7 +129,7 @@ static void do_fpu_end(void)
27672 static void fix_processor_context(void)
27673 {
27674 int cpu = smp_processor_id();
27675- struct tss_struct *t = &per_cpu(init_tss, cpu);
27676+ struct tss_struct *t = init_tss + cpu;
27677
27678 set_tss_desc(cpu, t); /*
27679 * This just modifies memory; should not be
27680@@ -139,7 +139,9 @@ static void fix_processor_context(void)
27681 */
27682
27683 #ifdef CONFIG_X86_64
27684+ pax_open_kernel();
27685 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
27686+ pax_close_kernel();
27687
27688 syscall_init(); /* This sets MSR_*STAR and related */
27689 #endif
27690diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
27691index dd78ef6..f9d928d 100644
27692--- a/arch/x86/vdso/Makefile
27693+++ b/arch/x86/vdso/Makefile
27694@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
27695 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
27696 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
27697
27698-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
27699+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
27700 GCOV_PROFILE := n
27701
27702 #
27703diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
27704index ee55754..0013b2e 100644
27705--- a/arch/x86/vdso/vclock_gettime.c
27706+++ b/arch/x86/vdso/vclock_gettime.c
27707@@ -22,24 +22,48 @@
27708 #include <asm/hpet.h>
27709 #include <asm/unistd.h>
27710 #include <asm/io.h>
27711+#include <asm/fixmap.h>
27712 #include "vextern.h"
27713
27714 #define gtod vdso_vsyscall_gtod_data
27715
27716+notrace noinline long __vdso_fallback_time(long *t)
27717+{
27718+ long secs;
27719+ asm volatile("syscall"
27720+ : "=a" (secs)
27721+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
27722+ return secs;
27723+}
27724+
27725 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
27726 {
27727 long ret;
27728 asm("syscall" : "=a" (ret) :
27729- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
27730+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
27731 return ret;
27732 }
27733
27734+notrace static inline cycle_t __vdso_vread_hpet(void)
27735+{
27736+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
27737+}
27738+
27739+notrace static inline cycle_t __vdso_vread_tsc(void)
27740+{
27741+ cycle_t ret = (cycle_t)vget_cycles();
27742+
27743+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
27744+}
27745+
27746 notrace static inline long vgetns(void)
27747 {
27748 long v;
27749- cycles_t (*vread)(void);
27750- vread = gtod->clock.vread;
27751- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
27752+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
27753+ v = __vdso_vread_tsc();
27754+ else
27755+ v = __vdso_vread_hpet();
27756+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
27757 return (v * gtod->clock.mult) >> gtod->clock.shift;
27758 }
27759
27760@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
27761
27762 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
27763 {
27764- if (likely(gtod->sysctl_enabled))
27765+ if (likely(gtod->sysctl_enabled &&
27766+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
27767+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
27768 switch (clock) {
27769 case CLOCK_REALTIME:
27770 if (likely(gtod->clock.vread))
27771@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
27772 int clock_gettime(clockid_t, struct timespec *)
27773 __attribute__((weak, alias("__vdso_clock_gettime")));
27774
27775+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
27776+{
27777+ long ret;
27778+ asm("syscall" : "=a" (ret) :
27779+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
27780+ return ret;
27781+}
27782+
27783 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
27784 {
27785- long ret;
27786- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
27787+ if (likely(gtod->sysctl_enabled &&
27788+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
27789+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
27790+ {
27791 if (likely(tv != NULL)) {
27792 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
27793 offsetof(struct timespec, tv_nsec) ||
27794@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
27795 }
27796 return 0;
27797 }
27798- asm("syscall" : "=a" (ret) :
27799- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
27800- return ret;
27801+ return __vdso_fallback_gettimeofday(tv, tz);
27802 }
27803 int gettimeofday(struct timeval *, struct timezone *)
27804 __attribute__((weak, alias("__vdso_gettimeofday")));
27805diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
27806index 4e5dd3b..00ba15e 100644
27807--- a/arch/x86/vdso/vdso.lds.S
27808+++ b/arch/x86/vdso/vdso.lds.S
27809@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
27810 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
27811 #include "vextern.h"
27812 #undef VEXTERN
27813+
27814+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
27815+VEXTERN(fallback_gettimeofday)
27816+VEXTERN(fallback_time)
27817+VEXTERN(getcpu)
27818+#undef VEXTERN
27819diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
27820index 58bc00f..d53fb48 100644
27821--- a/arch/x86/vdso/vdso32-setup.c
27822+++ b/arch/x86/vdso/vdso32-setup.c
27823@@ -25,6 +25,7 @@
27824 #include <asm/tlbflush.h>
27825 #include <asm/vdso.h>
27826 #include <asm/proto.h>
27827+#include <asm/mman.h>
27828
27829 enum {
27830 VDSO_DISABLED = 0,
27831@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
27832 void enable_sep_cpu(void)
27833 {
27834 int cpu = get_cpu();
27835- struct tss_struct *tss = &per_cpu(init_tss, cpu);
27836+ struct tss_struct *tss = init_tss + cpu;
27837
27838 if (!boot_cpu_has(X86_FEATURE_SEP)) {
27839 put_cpu();
27840@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
27841 gate_vma.vm_start = FIXADDR_USER_START;
27842 gate_vma.vm_end = FIXADDR_USER_END;
27843 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
27844- gate_vma.vm_page_prot = __P101;
27845+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
27846 /*
27847 * Make sure the vDSO gets into every core dump.
27848 * Dumping its contents makes post-mortem fully interpretable later
27849@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27850 if (compat)
27851 addr = VDSO_HIGH_BASE;
27852 else {
27853- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
27854+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
27855 if (IS_ERR_VALUE(addr)) {
27856 ret = addr;
27857 goto up_fail;
27858 }
27859 }
27860
27861- current->mm->context.vdso = (void *)addr;
27862+ current->mm->context.vdso = addr;
27863
27864 if (compat_uses_vma || !compat) {
27865 /*
27866@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27867 }
27868
27869 current_thread_info()->sysenter_return =
27870- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27871+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27872
27873 up_fail:
27874 if (ret)
27875- current->mm->context.vdso = NULL;
27876+ current->mm->context.vdso = 0;
27877
27878 up_write(&mm->mmap_sem);
27879
27880@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
27881
27882 const char *arch_vma_name(struct vm_area_struct *vma)
27883 {
27884- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27885+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27886 return "[vdso]";
27887+
27888+#ifdef CONFIG_PAX_SEGMEXEC
27889+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27890+ return "[vdso]";
27891+#endif
27892+
27893 return NULL;
27894 }
27895
27896@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
27897 struct mm_struct *mm = tsk->mm;
27898
27899 /* Check to see if this task was created in compat vdso mode */
27900- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27901+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27902 return &gate_vma;
27903 return NULL;
27904 }
27905diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
27906index 1683ba2..48d07f3 100644
27907--- a/arch/x86/vdso/vextern.h
27908+++ b/arch/x86/vdso/vextern.h
27909@@ -11,6 +11,5 @@
27910 put into vextern.h and be referenced as a pointer with vdso prefix.
27911 The main kernel later fills in the values. */
27912
27913-VEXTERN(jiffies)
27914 VEXTERN(vgetcpu_mode)
27915 VEXTERN(vsyscall_gtod_data)
27916diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27917index 21e1aeb..2c0b3c4 100644
27918--- a/arch/x86/vdso/vma.c
27919+++ b/arch/x86/vdso/vma.c
27920@@ -17,8 +17,6 @@
27921 #include "vextern.h" /* Just for VMAGIC. */
27922 #undef VEXTERN
27923
27924-unsigned int __read_mostly vdso_enabled = 1;
27925-
27926 extern char vdso_start[], vdso_end[];
27927 extern unsigned short vdso_sync_cpuid;
27928
27929@@ -27,10 +25,8 @@ static unsigned vdso_size;
27930
27931 static inline void *var_ref(void *p, char *name)
27932 {
27933- if (*(void **)p != (void *)VMAGIC) {
27934- printk("VDSO: variable %s broken\n", name);
27935- vdso_enabled = 0;
27936- }
27937+ if (*(void **)p != (void *)VMAGIC)
27938+ panic("VDSO: variable %s broken\n", name);
27939 return p;
27940 }
27941
27942@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
27943 if (!vbase)
27944 goto oom;
27945
27946- if (memcmp(vbase, "\177ELF", 4)) {
27947- printk("VDSO: I'm broken; not ELF\n");
27948- vdso_enabled = 0;
27949- }
27950+ if (memcmp(vbase, ELFMAG, SELFMAG))
27951+ panic("VDSO: I'm broken; not ELF\n");
27952
27953 #define VEXTERN(x) \
27954 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
27955 #include "vextern.h"
27956 #undef VEXTERN
27957+ vunmap(vbase);
27958 return 0;
27959
27960 oom:
27961- printk("Cannot allocate vdso\n");
27962- vdso_enabled = 0;
27963- return -ENOMEM;
27964+ panic("Cannot allocate vdso\n");
27965 }
27966 __initcall(init_vdso_vars);
27967
27968@@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27969 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27970 {
27971 struct mm_struct *mm = current->mm;
27972- unsigned long addr;
27973+ unsigned long addr = 0;
27974 int ret;
27975
27976- if (!vdso_enabled)
27977- return 0;
27978-
27979 down_write(&mm->mmap_sem);
27980+
27981+#ifdef CONFIG_PAX_RANDMMAP
27982+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27983+#endif
27984+
27985 addr = vdso_addr(mm->start_stack, vdso_size);
27986 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
27987 if (IS_ERR_VALUE(addr)) {
27988@@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27989 goto up_fail;
27990 }
27991
27992- current->mm->context.vdso = (void *)addr;
27993+ current->mm->context.vdso = addr;
27994
27995 ret = install_special_mapping(mm, addr, vdso_size,
27996 VM_READ|VM_EXEC|
27997@@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27998 VM_ALWAYSDUMP,
27999 vdso_pages);
28000 if (ret) {
28001- current->mm->context.vdso = NULL;
28002+ current->mm->context.vdso = 0;
28003 goto up_fail;
28004 }
28005
28006@@ -132,10 +127,3 @@ up_fail:
28007 up_write(&mm->mmap_sem);
28008 return ret;
28009 }
28010-
28011-static __init int vdso_setup(char *s)
28012-{
28013- vdso_enabled = simple_strtoul(s, NULL, 0);
28014- return 0;
28015-}
28016-__setup("vdso=", vdso_setup);
28017diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
28018index 0087b00..eecb34f 100644
28019--- a/arch/x86/xen/enlighten.c
28020+++ b/arch/x86/xen/enlighten.c
28021@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
28022
28023 struct shared_info xen_dummy_shared_info;
28024
28025-void *xen_initial_gdt;
28026-
28027 /*
28028 * Point at some empty memory to start with. We map the real shared_info
28029 * page as soon as fixmap is up and running.
28030@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
28031
28032 preempt_disable();
28033
28034- start = __get_cpu_var(idt_desc).address;
28035+ start = (unsigned long)__get_cpu_var(idt_desc).address;
28036 end = start + __get_cpu_var(idt_desc).size + 1;
28037
28038 xen_mc_flush();
28039@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
28040 #endif
28041 };
28042
28043-static void xen_reboot(int reason)
28044+static __noreturn void xen_reboot(int reason)
28045 {
28046 struct sched_shutdown r = { .reason = reason };
28047
28048@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
28049 BUG();
28050 }
28051
28052-static void xen_restart(char *msg)
28053+static __noreturn void xen_restart(char *msg)
28054 {
28055 xen_reboot(SHUTDOWN_reboot);
28056 }
28057
28058-static void xen_emergency_restart(void)
28059+static __noreturn void xen_emergency_restart(void)
28060 {
28061 xen_reboot(SHUTDOWN_reboot);
28062 }
28063
28064-static void xen_machine_halt(void)
28065+static __noreturn void xen_machine_halt(void)
28066 {
28067 xen_reboot(SHUTDOWN_poweroff);
28068 }
28069@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
28070 */
28071 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
28072
28073-#ifdef CONFIG_X86_64
28074 /* Work out if we support NX */
28075- check_efer();
28076+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
28077+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
28078+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
28079+ unsigned l, h;
28080+
28081+#ifdef CONFIG_X86_PAE
28082+ nx_enabled = 1;
28083+#endif
28084+ __supported_pte_mask |= _PAGE_NX;
28085+ rdmsr(MSR_EFER, l, h);
28086+ l |= EFER_NX;
28087+ wrmsr(MSR_EFER, l, h);
28088+ }
28089 #endif
28090
28091 xen_setup_features();
28092@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
28093
28094 machine_ops = xen_machine_ops;
28095
28096- /*
28097- * The only reliable way to retain the initial address of the
28098- * percpu gdt_page is to remember it here, so we can go and
28099- * mark it RW later, when the initial percpu area is freed.
28100- */
28101- xen_initial_gdt = &per_cpu(gdt_page, 0);
28102-
28103 xen_smp_init();
28104
28105 pgd = (pgd_t *)xen_start_info->pt_base;
28106diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
28107index 3f90a2c..2c2ad84 100644
28108--- a/arch/x86/xen/mmu.c
28109+++ b/arch/x86/xen/mmu.c
28110@@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
28111 convert_pfn_mfn(init_level4_pgt);
28112 convert_pfn_mfn(level3_ident_pgt);
28113 convert_pfn_mfn(level3_kernel_pgt);
28114+ convert_pfn_mfn(level3_vmalloc_start_pgt);
28115+ convert_pfn_mfn(level3_vmalloc_end_pgt);
28116+ convert_pfn_mfn(level3_vmemmap_pgt);
28117
28118 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
28119 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
28120@@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
28121 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
28122 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
28123 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
28124+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
28125+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
28126+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
28127 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
28128+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
28129 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
28130 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
28131
28132@@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
28133 pv_mmu_ops.set_pud = xen_set_pud;
28134 #if PAGETABLE_LEVELS == 4
28135 pv_mmu_ops.set_pgd = xen_set_pgd;
28136+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
28137 #endif
28138
28139 /* This will work as long as patching hasn't happened yet
28140@@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
28141 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
28142 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
28143 .set_pgd = xen_set_pgd_hyper,
28144+ .set_pgd_batched = xen_set_pgd_hyper,
28145
28146 .alloc_pud = xen_alloc_pmd_init,
28147 .release_pud = xen_release_pmd_init,
28148diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
28149index a96204a..fca9b8e 100644
28150--- a/arch/x86/xen/smp.c
28151+++ b/arch/x86/xen/smp.c
28152@@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
28153 {
28154 BUG_ON(smp_processor_id() != 0);
28155 native_smp_prepare_boot_cpu();
28156-
28157- /* We've switched to the "real" per-cpu gdt, so make sure the
28158- old memory can be recycled */
28159- make_lowmem_page_readwrite(xen_initial_gdt);
28160-
28161 xen_setup_vcpu_info_placement();
28162 }
28163
28164@@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
28165 gdt = get_cpu_gdt_table(cpu);
28166
28167 ctxt->flags = VGCF_IN_KERNEL;
28168- ctxt->user_regs.ds = __USER_DS;
28169- ctxt->user_regs.es = __USER_DS;
28170+ ctxt->user_regs.ds = __KERNEL_DS;
28171+ ctxt->user_regs.es = __KERNEL_DS;
28172 ctxt->user_regs.ss = __KERNEL_DS;
28173 #ifdef CONFIG_X86_32
28174 ctxt->user_regs.fs = __KERNEL_PERCPU;
28175- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
28176+ savesegment(gs, ctxt->user_regs.gs);
28177 #else
28178 ctxt->gs_base_kernel = per_cpu_offset(cpu);
28179 #endif
28180@@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
28181 int rc;
28182
28183 per_cpu(current_task, cpu) = idle;
28184+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
28185 #ifdef CONFIG_X86_32
28186 irq_ctx_init(cpu);
28187 #else
28188 clear_tsk_thread_flag(idle, TIF_FORK);
28189- per_cpu(kernel_stack, cpu) =
28190- (unsigned long)task_stack_page(idle) -
28191- KERNEL_STACK_OFFSET + THREAD_SIZE;
28192+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
28193 #endif
28194 xen_setup_runstate_info(cpu);
28195 xen_setup_timer(cpu);
28196diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
28197index 9a95a9c..4f39e774 100644
28198--- a/arch/x86/xen/xen-asm_32.S
28199+++ b/arch/x86/xen/xen-asm_32.S
28200@@ -83,14 +83,14 @@ ENTRY(xen_iret)
28201 ESP_OFFSET=4 # bytes pushed onto stack
28202
28203 /*
28204- * Store vcpu_info pointer for easy access. Do it this way to
28205- * avoid having to reload %fs
28206+ * Store vcpu_info pointer for easy access.
28207 */
28208 #ifdef CONFIG_SMP
28209- GET_THREAD_INFO(%eax)
28210- movl TI_cpu(%eax), %eax
28211- movl __per_cpu_offset(,%eax,4), %eax
28212- mov per_cpu__xen_vcpu(%eax), %eax
28213+ push %fs
28214+ mov $(__KERNEL_PERCPU), %eax
28215+ mov %eax, %fs
28216+ mov PER_CPU_VAR(xen_vcpu), %eax
28217+ pop %fs
28218 #else
28219 movl per_cpu__xen_vcpu, %eax
28220 #endif
28221diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
28222index 1a5ff24..a187d40 100644
28223--- a/arch/x86/xen/xen-head.S
28224+++ b/arch/x86/xen/xen-head.S
28225@@ -19,6 +19,17 @@ ENTRY(startup_xen)
28226 #ifdef CONFIG_X86_32
28227 mov %esi,xen_start_info
28228 mov $init_thread_union+THREAD_SIZE,%esp
28229+#ifdef CONFIG_SMP
28230+ movl $cpu_gdt_table,%edi
28231+ movl $__per_cpu_load,%eax
28232+ movw %ax,__KERNEL_PERCPU + 2(%edi)
28233+ rorl $16,%eax
28234+ movb %al,__KERNEL_PERCPU + 4(%edi)
28235+ movb %ah,__KERNEL_PERCPU + 7(%edi)
28236+ movl $__per_cpu_end - 1,%eax
28237+ subl $__per_cpu_start,%eax
28238+ movw %ax,__KERNEL_PERCPU + 0(%edi)
28239+#endif
28240 #else
28241 mov %rsi,xen_start_info
28242 mov $init_thread_union+THREAD_SIZE,%rsp
28243diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
28244index f9153a3..51eab3d 100644
28245--- a/arch/x86/xen/xen-ops.h
28246+++ b/arch/x86/xen/xen-ops.h
28247@@ -10,8 +10,6 @@
28248 extern const char xen_hypervisor_callback[];
28249 extern const char xen_failsafe_callback[];
28250
28251-extern void *xen_initial_gdt;
28252-
28253 struct trap_info;
28254 void xen_copy_trap_info(struct trap_info *traps);
28255
28256diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
28257index 525bd3d..ef888b1 100644
28258--- a/arch/xtensa/variants/dc232b/include/variant/core.h
28259+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
28260@@ -119,9 +119,9 @@
28261 ----------------------------------------------------------------------*/
28262
28263 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
28264-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
28265 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
28266 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
28267+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28268
28269 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
28270 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
28271diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
28272index 2f33760..835e50a 100644
28273--- a/arch/xtensa/variants/fsf/include/variant/core.h
28274+++ b/arch/xtensa/variants/fsf/include/variant/core.h
28275@@ -11,6 +11,7 @@
28276 #ifndef _XTENSA_CORE_H
28277 #define _XTENSA_CORE_H
28278
28279+#include <linux/const.h>
28280
28281 /****************************************************************************
28282 Parameters Useful for Any Code, USER or PRIVILEGED
28283@@ -112,9 +113,9 @@
28284 ----------------------------------------------------------------------*/
28285
28286 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
28287-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
28288 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
28289 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
28290+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28291
28292 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
28293 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
28294diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
28295index af00795..2bb8105 100644
28296--- a/arch/xtensa/variants/s6000/include/variant/core.h
28297+++ b/arch/xtensa/variants/s6000/include/variant/core.h
28298@@ -11,6 +11,7 @@
28299 #ifndef _XTENSA_CORE_CONFIGURATION_H
28300 #define _XTENSA_CORE_CONFIGURATION_H
28301
28302+#include <linux/const.h>
28303
28304 /****************************************************************************
28305 Parameters Useful for Any Code, USER or PRIVILEGED
28306@@ -118,9 +119,9 @@
28307 ----------------------------------------------------------------------*/
28308
28309 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
28310-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
28311 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
28312 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
28313+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
28314
28315 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
28316 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
28317diff --git a/block/blk-integrity.c b/block/blk-integrity.c
28318index 15c6308..96e83c2 100644
28319--- a/block/blk-integrity.c
28320+++ b/block/blk-integrity.c
28321@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
28322 NULL,
28323 };
28324
28325-static struct sysfs_ops integrity_ops = {
28326+static const struct sysfs_ops integrity_ops = {
28327 .show = &integrity_attr_show,
28328 .store = &integrity_attr_store,
28329 };
28330diff --git a/block/blk-ioc.c b/block/blk-ioc.c
28331index d4ed600..cbdabb0 100644
28332--- a/block/blk-ioc.c
28333+++ b/block/blk-ioc.c
28334@@ -66,22 +66,22 @@ static void cfq_exit(struct io_context *ioc)
28335 }
28336
28337 /* Called by the exitting task */
28338-void exit_io_context(void)
28339+void exit_io_context(struct task_struct *task)
28340 {
28341 struct io_context *ioc;
28342
28343- task_lock(current);
28344- ioc = current->io_context;
28345- current->io_context = NULL;
28346- task_unlock(current);
28347+ task_lock(task);
28348+ ioc = task->io_context;
28349+ task->io_context = NULL;
28350+ task_unlock(task);
28351
28352 if (atomic_dec_and_test(&ioc->nr_tasks)) {
28353 if (ioc->aic && ioc->aic->exit)
28354 ioc->aic->exit(ioc->aic);
28355 cfq_exit(ioc);
28356
28357- put_io_context(ioc);
28358 }
28359+ put_io_context(ioc);
28360 }
28361
28362 struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
28363diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
28364index ca56420..f2fc409 100644
28365--- a/block/blk-iopoll.c
28366+++ b/block/blk-iopoll.c
28367@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
28368 }
28369 EXPORT_SYMBOL(blk_iopoll_complete);
28370
28371-static void blk_iopoll_softirq(struct softirq_action *h)
28372+static void blk_iopoll_softirq(void)
28373 {
28374 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
28375 int rearm = 0, budget = blk_iopoll_budget;
28376diff --git a/block/blk-map.c b/block/blk-map.c
28377index 30a7e51..0aeec6a 100644
28378--- a/block/blk-map.c
28379+++ b/block/blk-map.c
28380@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
28381 * direct dma. else, set up kernel bounce buffers
28382 */
28383 uaddr = (unsigned long) ubuf;
28384- if (blk_rq_aligned(q, ubuf, len) && !map_data)
28385+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
28386 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
28387 else
28388 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
28389@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
28390 for (i = 0; i < iov_count; i++) {
28391 unsigned long uaddr = (unsigned long)iov[i].iov_base;
28392
28393+ if (!iov[i].iov_len)
28394+ return -EINVAL;
28395+
28396 if (uaddr & queue_dma_alignment(q)) {
28397 unaligned = 1;
28398 break;
28399 }
28400- if (!iov[i].iov_len)
28401- return -EINVAL;
28402 }
28403
28404 if (unaligned || (q->dma_pad_mask & len) || map_data)
28405@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
28406 if (!len || !kbuf)
28407 return -EINVAL;
28408
28409- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
28410+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
28411 if (do_copy)
28412 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
28413 else
28414diff --git a/block/blk-softirq.c b/block/blk-softirq.c
28415index ee9c216..58d410a 100644
28416--- a/block/blk-softirq.c
28417+++ b/block/blk-softirq.c
28418@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
28419 * Softirq action handler - move entries to local list and loop over them
28420 * while passing them to the queue registered handler.
28421 */
28422-static void blk_done_softirq(struct softirq_action *h)
28423+static void blk_done_softirq(void)
28424 {
28425 struct list_head *cpu_list, local_list;
28426
28427diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
28428index bb9c5ea..5330d48 100644
28429--- a/block/blk-sysfs.c
28430+++ b/block/blk-sysfs.c
28431@@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
28432 kmem_cache_free(blk_requestq_cachep, q);
28433 }
28434
28435-static struct sysfs_ops queue_sysfs_ops = {
28436+static const struct sysfs_ops queue_sysfs_ops = {
28437 .show = queue_attr_show,
28438 .store = queue_attr_store,
28439 };
28440diff --git a/block/bsg.c b/block/bsg.c
28441index 7154a7a..08ac2f0 100644
28442--- a/block/bsg.c
28443+++ b/block/bsg.c
28444@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
28445 struct sg_io_v4 *hdr, struct bsg_device *bd,
28446 fmode_t has_write_perm)
28447 {
28448+ unsigned char tmpcmd[sizeof(rq->__cmd)];
28449+ unsigned char *cmdptr;
28450+
28451 if (hdr->request_len > BLK_MAX_CDB) {
28452 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
28453 if (!rq->cmd)
28454 return -ENOMEM;
28455- }
28456+ cmdptr = rq->cmd;
28457+ } else
28458+ cmdptr = tmpcmd;
28459
28460- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
28461+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
28462 hdr->request_len))
28463 return -EFAULT;
28464
28465+ if (cmdptr != rq->cmd)
28466+ memcpy(rq->cmd, cmdptr, hdr->request_len);
28467+
28468 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
28469 if (blk_verify_command(rq->cmd, has_write_perm))
28470 return -EPERM;
28471@@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
28472 rq->next_rq = next_rq;
28473 next_rq->cmd_type = rq->cmd_type;
28474
28475- dxferp = (void*)(unsigned long)hdr->din_xferp;
28476+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
28477 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
28478 hdr->din_xfer_len, GFP_KERNEL);
28479 if (ret)
28480@@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
28481
28482 if (hdr->dout_xfer_len) {
28483 dxfer_len = hdr->dout_xfer_len;
28484- dxferp = (void*)(unsigned long)hdr->dout_xferp;
28485+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
28486 } else if (hdr->din_xfer_len) {
28487 dxfer_len = hdr->din_xfer_len;
28488- dxferp = (void*)(unsigned long)hdr->din_xferp;
28489+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
28490 } else
28491 dxfer_len = 0;
28492
28493@@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
28494 int len = min_t(unsigned int, hdr->max_response_len,
28495 rq->sense_len);
28496
28497- ret = copy_to_user((void*)(unsigned long)hdr->response,
28498+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
28499 rq->sense, len);
28500 if (!ret)
28501 hdr->response_len = len;
28502diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
28503index 9bd086c..ca1fc22 100644
28504--- a/block/compat_ioctl.c
28505+++ b/block/compat_ioctl.c
28506@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
28507 err |= __get_user(f->spec1, &uf->spec1);
28508 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
28509 err |= __get_user(name, &uf->name);
28510- f->name = compat_ptr(name);
28511+ f->name = (void __force_kernel *)compat_ptr(name);
28512 if (err) {
28513 err = -EFAULT;
28514 goto out;
28515diff --git a/block/elevator.c b/block/elevator.c
28516index a847046..75a1746 100644
28517--- a/block/elevator.c
28518+++ b/block/elevator.c
28519@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
28520 return error;
28521 }
28522
28523-static struct sysfs_ops elv_sysfs_ops = {
28524+static const struct sysfs_ops elv_sysfs_ops = {
28525 .show = elv_attr_show,
28526 .store = elv_attr_store,
28527 };
28528diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
28529index 2be0a97..bded3fd 100644
28530--- a/block/scsi_ioctl.c
28531+++ b/block/scsi_ioctl.c
28532@@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
28533 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
28534 struct sg_io_hdr *hdr, fmode_t mode)
28535 {
28536- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
28537+ unsigned char tmpcmd[sizeof(rq->__cmd)];
28538+ unsigned char *cmdptr;
28539+
28540+ if (rq->cmd != rq->__cmd)
28541+ cmdptr = rq->cmd;
28542+ else
28543+ cmdptr = tmpcmd;
28544+
28545+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
28546 return -EFAULT;
28547+
28548+ if (cmdptr != rq->cmd)
28549+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
28550+
28551 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
28552 return -EPERM;
28553
28554@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
28555 int err;
28556 unsigned int in_len, out_len, bytes, opcode, cmdlen;
28557 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
28558+ unsigned char tmpcmd[sizeof(rq->__cmd)];
28559+ unsigned char *cmdptr;
28560
28561 if (!sic)
28562 return -EINVAL;
28563@@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
28564 */
28565 err = -EFAULT;
28566 rq->cmd_len = cmdlen;
28567- if (copy_from_user(rq->cmd, sic->data, cmdlen))
28568+
28569+ if (rq->cmd != rq->__cmd)
28570+ cmdptr = rq->cmd;
28571+ else
28572+ cmdptr = tmpcmd;
28573+
28574+ if (copy_from_user(cmdptr, sic->data, cmdlen))
28575 goto error;
28576
28577+ if (rq->cmd != cmdptr)
28578+ memcpy(rq->cmd, cmdptr, cmdlen);
28579+
28580 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
28581 goto error;
28582
28583diff --git a/crypto/cryptd.c b/crypto/cryptd.c
28584index 3533582..f143117 100644
28585--- a/crypto/cryptd.c
28586+++ b/crypto/cryptd.c
28587@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
28588
28589 struct cryptd_blkcipher_request_ctx {
28590 crypto_completion_t complete;
28591-};
28592+} __no_const;
28593
28594 struct cryptd_hash_ctx {
28595 struct crypto_shash *child;
28596diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
28597index a90d260..7a9765e 100644
28598--- a/crypto/gf128mul.c
28599+++ b/crypto/gf128mul.c
28600@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
28601 for (i = 0; i < 7; ++i)
28602 gf128mul_x_lle(&p[i + 1], &p[i]);
28603
28604- memset(r, 0, sizeof(r));
28605+ memset(r, 0, sizeof(*r));
28606 for (i = 0;;) {
28607 u8 ch = ((u8 *)b)[15 - i];
28608
28609@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
28610 for (i = 0; i < 7; ++i)
28611 gf128mul_x_bbe(&p[i + 1], &p[i]);
28612
28613- memset(r, 0, sizeof(r));
28614+ memset(r, 0, sizeof(*r));
28615 for (i = 0;;) {
28616 u8 ch = ((u8 *)b)[i];
28617
28618diff --git a/crypto/serpent.c b/crypto/serpent.c
28619index b651a55..023297d 100644
28620--- a/crypto/serpent.c
28621+++ b/crypto/serpent.c
28622@@ -21,6 +21,7 @@
28623 #include <asm/byteorder.h>
28624 #include <linux/crypto.h>
28625 #include <linux/types.h>
28626+#include <linux/sched.h>
28627
28628 /* Key is padded to the maximum of 256 bits before round key generation.
28629 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
28630@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
28631 u32 r0,r1,r2,r3,r4;
28632 int i;
28633
28634+ pax_track_stack();
28635+
28636 /* Copy key, add padding */
28637
28638 for (i = 0; i < keylen; ++i)
28639diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
28640index 0d2cdb8..d8de48d 100644
28641--- a/drivers/acpi/acpi_pad.c
28642+++ b/drivers/acpi/acpi_pad.c
28643@@ -30,7 +30,7 @@
28644 #include <acpi/acpi_bus.h>
28645 #include <acpi/acpi_drivers.h>
28646
28647-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
28648+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
28649 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
28650 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
28651 static DEFINE_MUTEX(isolated_cpus_lock);
28652diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
28653index 3f4602b..2e41d36 100644
28654--- a/drivers/acpi/battery.c
28655+++ b/drivers/acpi/battery.c
28656@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
28657 }
28658
28659 static struct battery_file {
28660- struct file_operations ops;
28661+ const struct file_operations ops;
28662 mode_t mode;
28663 const char *name;
28664 } acpi_battery_file[] = {
28665diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
28666index 7338b6a..82f0257 100644
28667--- a/drivers/acpi/dock.c
28668+++ b/drivers/acpi/dock.c
28669@@ -77,7 +77,7 @@ struct dock_dependent_device {
28670 struct list_head list;
28671 struct list_head hotplug_list;
28672 acpi_handle handle;
28673- struct acpi_dock_ops *ops;
28674+ const struct acpi_dock_ops *ops;
28675 void *context;
28676 };
28677
28678@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
28679 * the dock driver after _DCK is executed.
28680 */
28681 int
28682-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
28683+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
28684 void *context)
28685 {
28686 struct dock_dependent_device *dd;
28687diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
28688index 7c1c59e..2993595 100644
28689--- a/drivers/acpi/osl.c
28690+++ b/drivers/acpi/osl.c
28691@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
28692 void __iomem *virt_addr;
28693
28694 virt_addr = ioremap(phys_addr, width);
28695+ if (!virt_addr)
28696+ return AE_NO_MEMORY;
28697 if (!value)
28698 value = &dummy;
28699
28700@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
28701 void __iomem *virt_addr;
28702
28703 virt_addr = ioremap(phys_addr, width);
28704+ if (!virt_addr)
28705+ return AE_NO_MEMORY;
28706
28707 switch (width) {
28708 case 8:
28709diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
28710index c216062..eec10d2 100644
28711--- a/drivers/acpi/power_meter.c
28712+++ b/drivers/acpi/power_meter.c
28713@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
28714 return res;
28715
28716 temp /= 1000;
28717- if (temp < 0)
28718- return -EINVAL;
28719
28720 mutex_lock(&resource->lock);
28721 resource->trip[attr->index - 7] = temp;
28722diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
28723index d0d25e2..961643d 100644
28724--- a/drivers/acpi/proc.c
28725+++ b/drivers/acpi/proc.c
28726@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
28727 size_t count, loff_t * ppos)
28728 {
28729 struct list_head *node, *next;
28730- char strbuf[5];
28731- char str[5] = "";
28732- unsigned int len = count;
28733+ char strbuf[5] = {0};
28734 struct acpi_device *found_dev = NULL;
28735
28736- if (len > 4)
28737- len = 4;
28738- if (len < 0)
28739- return -EFAULT;
28740+ if (count > 4)
28741+ count = 4;
28742
28743- if (copy_from_user(strbuf, buffer, len))
28744+ if (copy_from_user(strbuf, buffer, count))
28745 return -EFAULT;
28746- strbuf[len] = '\0';
28747- sscanf(strbuf, "%s", str);
28748+ strbuf[count] = '\0';
28749
28750 mutex_lock(&acpi_device_lock);
28751 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
28752@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
28753 if (!dev->wakeup.flags.valid)
28754 continue;
28755
28756- if (!strncmp(dev->pnp.bus_id, str, 4)) {
28757+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
28758 dev->wakeup.state.enabled =
28759 dev->wakeup.state.enabled ? 0 : 1;
28760 found_dev = dev;
28761diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
28762index 7102474..de8ad22 100644
28763--- a/drivers/acpi/processor_core.c
28764+++ b/drivers/acpi/processor_core.c
28765@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
28766 return 0;
28767 }
28768
28769- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
28770+ BUG_ON(pr->id >= nr_cpu_ids);
28771
28772 /*
28773 * Buggy BIOS check
28774diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
28775index d933980..5761f13 100644
28776--- a/drivers/acpi/sbshc.c
28777+++ b/drivers/acpi/sbshc.c
28778@@ -17,7 +17,7 @@
28779
28780 #define PREFIX "ACPI: "
28781
28782-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
28783+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
28784 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
28785
28786 struct acpi_smb_hc {
28787diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
28788index 0458094..6978e7b 100644
28789--- a/drivers/acpi/sleep.c
28790+++ b/drivers/acpi/sleep.c
28791@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
28792 }
28793 }
28794
28795-static struct platform_suspend_ops acpi_suspend_ops = {
28796+static const struct platform_suspend_ops acpi_suspend_ops = {
28797 .valid = acpi_suspend_state_valid,
28798 .begin = acpi_suspend_begin,
28799 .prepare_late = acpi_pm_prepare,
28800@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
28801 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
28802 * been requested.
28803 */
28804-static struct platform_suspend_ops acpi_suspend_ops_old = {
28805+static const struct platform_suspend_ops acpi_suspend_ops_old = {
28806 .valid = acpi_suspend_state_valid,
28807 .begin = acpi_suspend_begin_old,
28808 .prepare_late = acpi_pm_disable_gpes,
28809@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
28810 acpi_enable_all_runtime_gpes();
28811 }
28812
28813-static struct platform_hibernation_ops acpi_hibernation_ops = {
28814+static const struct platform_hibernation_ops acpi_hibernation_ops = {
28815 .begin = acpi_hibernation_begin,
28816 .end = acpi_pm_end,
28817 .pre_snapshot = acpi_hibernation_pre_snapshot,
28818@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
28819 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
28820 * been requested.
28821 */
28822-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
28823+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
28824 .begin = acpi_hibernation_begin_old,
28825 .end = acpi_pm_end,
28826 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
28827diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
28828index 05dff63..b662ab7 100644
28829--- a/drivers/acpi/video.c
28830+++ b/drivers/acpi/video.c
28831@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
28832 vd->brightness->levels[request_level]);
28833 }
28834
28835-static struct backlight_ops acpi_backlight_ops = {
28836+static const struct backlight_ops acpi_backlight_ops = {
28837 .get_brightness = acpi_video_get_brightness,
28838 .update_status = acpi_video_set_brightness,
28839 };
28840diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
28841index 6787aab..23ffb0e 100644
28842--- a/drivers/ata/ahci.c
28843+++ b/drivers/ata/ahci.c
28844@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
28845 .sdev_attrs = ahci_sdev_attrs,
28846 };
28847
28848-static struct ata_port_operations ahci_ops = {
28849+static const struct ata_port_operations ahci_ops = {
28850 .inherits = &sata_pmp_port_ops,
28851
28852 .qc_defer = sata_pmp_qc_defer_cmd_switch,
28853@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
28854 .port_stop = ahci_port_stop,
28855 };
28856
28857-static struct ata_port_operations ahci_vt8251_ops = {
28858+static const struct ata_port_operations ahci_vt8251_ops = {
28859 .inherits = &ahci_ops,
28860 .hardreset = ahci_vt8251_hardreset,
28861 };
28862
28863-static struct ata_port_operations ahci_p5wdh_ops = {
28864+static const struct ata_port_operations ahci_p5wdh_ops = {
28865 .inherits = &ahci_ops,
28866 .hardreset = ahci_p5wdh_hardreset,
28867 };
28868
28869-static struct ata_port_operations ahci_sb600_ops = {
28870+static const struct ata_port_operations ahci_sb600_ops = {
28871 .inherits = &ahci_ops,
28872 .softreset = ahci_sb600_softreset,
28873 .pmp_softreset = ahci_sb600_softreset,
28874diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
28875index 99e7196..4968c77 100644
28876--- a/drivers/ata/ata_generic.c
28877+++ b/drivers/ata/ata_generic.c
28878@@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
28879 ATA_BMDMA_SHT(DRV_NAME),
28880 };
28881
28882-static struct ata_port_operations generic_port_ops = {
28883+static const struct ata_port_operations generic_port_ops = {
28884 .inherits = &ata_bmdma_port_ops,
28885 .cable_detect = ata_cable_unknown,
28886 .set_mode = generic_set_mode,
28887diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
28888index c33591d..000c121 100644
28889--- a/drivers/ata/ata_piix.c
28890+++ b/drivers/ata/ata_piix.c
28891@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
28892 ATA_BMDMA_SHT(DRV_NAME),
28893 };
28894
28895-static struct ata_port_operations piix_pata_ops = {
28896+static const struct ata_port_operations piix_pata_ops = {
28897 .inherits = &ata_bmdma32_port_ops,
28898 .cable_detect = ata_cable_40wire,
28899 .set_piomode = piix_set_piomode,
28900@@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
28901 .prereset = piix_pata_prereset,
28902 };
28903
28904-static struct ata_port_operations piix_vmw_ops = {
28905+static const struct ata_port_operations piix_vmw_ops = {
28906 .inherits = &piix_pata_ops,
28907 .bmdma_status = piix_vmw_bmdma_status,
28908 };
28909
28910-static struct ata_port_operations ich_pata_ops = {
28911+static const struct ata_port_operations ich_pata_ops = {
28912 .inherits = &piix_pata_ops,
28913 .cable_detect = ich_pata_cable_detect,
28914 .set_dmamode = ich_set_dmamode,
28915 };
28916
28917-static struct ata_port_operations piix_sata_ops = {
28918+static const struct ata_port_operations piix_sata_ops = {
28919 .inherits = &ata_bmdma_port_ops,
28920 };
28921
28922-static struct ata_port_operations piix_sidpr_sata_ops = {
28923+static const struct ata_port_operations piix_sidpr_sata_ops = {
28924 .inherits = &piix_sata_ops,
28925 .hardreset = sata_std_hardreset,
28926 .scr_read = piix_sidpr_scr_read,
28927diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
28928index b0882cd..c295d65 100644
28929--- a/drivers/ata/libata-acpi.c
28930+++ b/drivers/ata/libata-acpi.c
28931@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
28932 ata_acpi_uevent(dev->link->ap, dev, event);
28933 }
28934
28935-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
28936+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
28937 .handler = ata_acpi_dev_notify_dock,
28938 .uevent = ata_acpi_dev_uevent,
28939 };
28940
28941-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
28942+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
28943 .handler = ata_acpi_ap_notify_dock,
28944 .uevent = ata_acpi_ap_uevent,
28945 };
28946diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
28947index d4f7f99..94f603e 100644
28948--- a/drivers/ata/libata-core.c
28949+++ b/drivers/ata/libata-core.c
28950@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
28951 struct ata_port *ap;
28952 unsigned int tag;
28953
28954- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28955+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28956 ap = qc->ap;
28957
28958 qc->flags = 0;
28959@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
28960 struct ata_port *ap;
28961 struct ata_link *link;
28962
28963- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28964+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
28965 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
28966 ap = qc->ap;
28967 link = qc->dev->link;
28968@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
28969 * LOCKING:
28970 * None.
28971 */
28972-static void ata_finalize_port_ops(struct ata_port_operations *ops)
28973+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
28974 {
28975 static DEFINE_SPINLOCK(lock);
28976 const struct ata_port_operations *cur;
28977@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28978 return;
28979
28980 spin_lock(&lock);
28981+ pax_open_kernel();
28982
28983 for (cur = ops->inherits; cur; cur = cur->inherits) {
28984 void **inherit = (void **)cur;
28985@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
28986 if (IS_ERR(*pp))
28987 *pp = NULL;
28988
28989- ops->inherits = NULL;
28990+ *(struct ata_port_operations **)&ops->inherits = NULL;
28991
28992+ pax_close_kernel();
28993 spin_unlock(&lock);
28994 }
28995
28996@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
28997 */
28998 /* KILLME - the only user left is ipr */
28999 void ata_host_init(struct ata_host *host, struct device *dev,
29000- unsigned long flags, struct ata_port_operations *ops)
29001+ unsigned long flags, const struct ata_port_operations *ops)
29002 {
29003 spin_lock_init(&host->lock);
29004 host->dev = dev;
29005@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
29006 /* truly dummy */
29007 }
29008
29009-struct ata_port_operations ata_dummy_port_ops = {
29010+const struct ata_port_operations ata_dummy_port_ops = {
29011 .qc_prep = ata_noop_qc_prep,
29012 .qc_issue = ata_dummy_qc_issue,
29013 .error_handler = ata_dummy_error_handler,
29014diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
29015index e5bdb9b..45a8e72 100644
29016--- a/drivers/ata/libata-eh.c
29017+++ b/drivers/ata/libata-eh.c
29018@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
29019 {
29020 struct ata_link *link;
29021
29022+ pax_track_stack();
29023+
29024 ata_for_each_link(link, ap, HOST_FIRST)
29025 ata_eh_link_report(link);
29026 }
29027@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
29028 */
29029 void ata_std_error_handler(struct ata_port *ap)
29030 {
29031- struct ata_port_operations *ops = ap->ops;
29032+ const struct ata_port_operations *ops = ap->ops;
29033 ata_reset_fn_t hardreset = ops->hardreset;
29034
29035 /* ignore built-in hardreset if SCR access is not available */
29036diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
29037index 51f0ffb..19ce3e3 100644
29038--- a/drivers/ata/libata-pmp.c
29039+++ b/drivers/ata/libata-pmp.c
29040@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
29041 */
29042 static int sata_pmp_eh_recover(struct ata_port *ap)
29043 {
29044- struct ata_port_operations *ops = ap->ops;
29045+ const struct ata_port_operations *ops = ap->ops;
29046 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
29047 struct ata_link *pmp_link = &ap->link;
29048 struct ata_device *pmp_dev = pmp_link->device;
29049diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
29050index d8f35fe..288180a 100644
29051--- a/drivers/ata/pata_acpi.c
29052+++ b/drivers/ata/pata_acpi.c
29053@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
29054 ATA_BMDMA_SHT(DRV_NAME),
29055 };
29056
29057-static struct ata_port_operations pacpi_ops = {
29058+static const struct ata_port_operations pacpi_ops = {
29059 .inherits = &ata_bmdma_port_ops,
29060 .qc_issue = pacpi_qc_issue,
29061 .cable_detect = pacpi_cable_detect,
29062diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
29063index 9434114..1f2f364 100644
29064--- a/drivers/ata/pata_ali.c
29065+++ b/drivers/ata/pata_ali.c
29066@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
29067 * Port operations for PIO only ALi
29068 */
29069
29070-static struct ata_port_operations ali_early_port_ops = {
29071+static const struct ata_port_operations ali_early_port_ops = {
29072 .inherits = &ata_sff_port_ops,
29073 .cable_detect = ata_cable_40wire,
29074 .set_piomode = ali_set_piomode,
29075@@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
29076 * Port operations for DMA capable ALi without cable
29077 * detect
29078 */
29079-static struct ata_port_operations ali_20_port_ops = {
29080+static const struct ata_port_operations ali_20_port_ops = {
29081 .inherits = &ali_dma_base_ops,
29082 .cable_detect = ata_cable_40wire,
29083 .mode_filter = ali_20_filter,
29084@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
29085 /*
29086 * Port operations for DMA capable ALi with cable detect
29087 */
29088-static struct ata_port_operations ali_c2_port_ops = {
29089+static const struct ata_port_operations ali_c2_port_ops = {
29090 .inherits = &ali_dma_base_ops,
29091 .check_atapi_dma = ali_check_atapi_dma,
29092 .cable_detect = ali_c2_cable_detect,
29093@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
29094 /*
29095 * Port operations for DMA capable ALi with cable detect
29096 */
29097-static struct ata_port_operations ali_c4_port_ops = {
29098+static const struct ata_port_operations ali_c4_port_ops = {
29099 .inherits = &ali_dma_base_ops,
29100 .check_atapi_dma = ali_check_atapi_dma,
29101 .cable_detect = ali_c2_cable_detect,
29102@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
29103 /*
29104 * Port operations for DMA capable ALi with cable detect and LBA48
29105 */
29106-static struct ata_port_operations ali_c5_port_ops = {
29107+static const struct ata_port_operations ali_c5_port_ops = {
29108 .inherits = &ali_dma_base_ops,
29109 .check_atapi_dma = ali_check_atapi_dma,
29110 .dev_config = ali_warn_atapi_dma,
29111diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
29112index 567f3f7..c8ee0da 100644
29113--- a/drivers/ata/pata_amd.c
29114+++ b/drivers/ata/pata_amd.c
29115@@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
29116 .prereset = amd_pre_reset,
29117 };
29118
29119-static struct ata_port_operations amd33_port_ops = {
29120+static const struct ata_port_operations amd33_port_ops = {
29121 .inherits = &amd_base_port_ops,
29122 .cable_detect = ata_cable_40wire,
29123 .set_piomode = amd33_set_piomode,
29124 .set_dmamode = amd33_set_dmamode,
29125 };
29126
29127-static struct ata_port_operations amd66_port_ops = {
29128+static const struct ata_port_operations amd66_port_ops = {
29129 .inherits = &amd_base_port_ops,
29130 .cable_detect = ata_cable_unknown,
29131 .set_piomode = amd66_set_piomode,
29132 .set_dmamode = amd66_set_dmamode,
29133 };
29134
29135-static struct ata_port_operations amd100_port_ops = {
29136+static const struct ata_port_operations amd100_port_ops = {
29137 .inherits = &amd_base_port_ops,
29138 .cable_detect = ata_cable_unknown,
29139 .set_piomode = amd100_set_piomode,
29140 .set_dmamode = amd100_set_dmamode,
29141 };
29142
29143-static struct ata_port_operations amd133_port_ops = {
29144+static const struct ata_port_operations amd133_port_ops = {
29145 .inherits = &amd_base_port_ops,
29146 .cable_detect = amd_cable_detect,
29147 .set_piomode = amd133_set_piomode,
29148@@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
29149 .host_stop = nv_host_stop,
29150 };
29151
29152-static struct ata_port_operations nv100_port_ops = {
29153+static const struct ata_port_operations nv100_port_ops = {
29154 .inherits = &nv_base_port_ops,
29155 .set_piomode = nv100_set_piomode,
29156 .set_dmamode = nv100_set_dmamode,
29157 };
29158
29159-static struct ata_port_operations nv133_port_ops = {
29160+static const struct ata_port_operations nv133_port_ops = {
29161 .inherits = &nv_base_port_ops,
29162 .set_piomode = nv133_set_piomode,
29163 .set_dmamode = nv133_set_dmamode,
29164diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
29165index d332cfd..4b7eaae 100644
29166--- a/drivers/ata/pata_artop.c
29167+++ b/drivers/ata/pata_artop.c
29168@@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
29169 ATA_BMDMA_SHT(DRV_NAME),
29170 };
29171
29172-static struct ata_port_operations artop6210_ops = {
29173+static const struct ata_port_operations artop6210_ops = {
29174 .inherits = &ata_bmdma_port_ops,
29175 .cable_detect = ata_cable_40wire,
29176 .set_piomode = artop6210_set_piomode,
29177@@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
29178 .qc_defer = artop6210_qc_defer,
29179 };
29180
29181-static struct ata_port_operations artop6260_ops = {
29182+static const struct ata_port_operations artop6260_ops = {
29183 .inherits = &ata_bmdma_port_ops,
29184 .cable_detect = artop6260_cable_detect,
29185 .set_piomode = artop6260_set_piomode,
29186diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
29187index 5c129f9..7bb7ccb 100644
29188--- a/drivers/ata/pata_at32.c
29189+++ b/drivers/ata/pata_at32.c
29190@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
29191 ATA_PIO_SHT(DRV_NAME),
29192 };
29193
29194-static struct ata_port_operations at32_port_ops = {
29195+static const struct ata_port_operations at32_port_ops = {
29196 .inherits = &ata_sff_port_ops,
29197 .cable_detect = ata_cable_40wire,
29198 .set_piomode = pata_at32_set_piomode,
29199diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
29200index 41c94b1..829006d 100644
29201--- a/drivers/ata/pata_at91.c
29202+++ b/drivers/ata/pata_at91.c
29203@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
29204 ATA_PIO_SHT(DRV_NAME),
29205 };
29206
29207-static struct ata_port_operations pata_at91_port_ops = {
29208+static const struct ata_port_operations pata_at91_port_ops = {
29209 .inherits = &ata_sff_port_ops,
29210
29211 .sff_data_xfer = pata_at91_data_xfer_noirq,
29212diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
29213index ae4454d..d391eb4 100644
29214--- a/drivers/ata/pata_atiixp.c
29215+++ b/drivers/ata/pata_atiixp.c
29216@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
29217 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29218 };
29219
29220-static struct ata_port_operations atiixp_port_ops = {
29221+static const struct ata_port_operations atiixp_port_ops = {
29222 .inherits = &ata_bmdma_port_ops,
29223
29224 .qc_prep = ata_sff_dumb_qc_prep,
29225diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
29226index 6fe7ded..2a425dc 100644
29227--- a/drivers/ata/pata_atp867x.c
29228+++ b/drivers/ata/pata_atp867x.c
29229@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
29230 ATA_BMDMA_SHT(DRV_NAME),
29231 };
29232
29233-static struct ata_port_operations atp867x_ops = {
29234+static const struct ata_port_operations atp867x_ops = {
29235 .inherits = &ata_bmdma_port_ops,
29236 .cable_detect = atp867x_cable_detect,
29237 .set_piomode = atp867x_set_piomode,
29238diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
29239index c4b47a3..b27a367 100644
29240--- a/drivers/ata/pata_bf54x.c
29241+++ b/drivers/ata/pata_bf54x.c
29242@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
29243 .dma_boundary = ATA_DMA_BOUNDARY,
29244 };
29245
29246-static struct ata_port_operations bfin_pata_ops = {
29247+static const struct ata_port_operations bfin_pata_ops = {
29248 .inherits = &ata_sff_port_ops,
29249
29250 .set_piomode = bfin_set_piomode,
29251diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
29252index 5acf9fa..84248be 100644
29253--- a/drivers/ata/pata_cmd640.c
29254+++ b/drivers/ata/pata_cmd640.c
29255@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
29256 ATA_BMDMA_SHT(DRV_NAME),
29257 };
29258
29259-static struct ata_port_operations cmd640_port_ops = {
29260+static const struct ata_port_operations cmd640_port_ops = {
29261 .inherits = &ata_bmdma_port_ops,
29262 /* In theory xfer_noirq is not needed once we kill the prefetcher */
29263 .sff_data_xfer = ata_sff_data_xfer_noirq,
29264diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
29265index ccd2694..c869c3d 100644
29266--- a/drivers/ata/pata_cmd64x.c
29267+++ b/drivers/ata/pata_cmd64x.c
29268@@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
29269 .set_dmamode = cmd64x_set_dmamode,
29270 };
29271
29272-static struct ata_port_operations cmd64x_port_ops = {
29273+static const struct ata_port_operations cmd64x_port_ops = {
29274 .inherits = &cmd64x_base_ops,
29275 .cable_detect = ata_cable_40wire,
29276 };
29277
29278-static struct ata_port_operations cmd646r1_port_ops = {
29279+static const struct ata_port_operations cmd646r1_port_ops = {
29280 .inherits = &cmd64x_base_ops,
29281 .bmdma_stop = cmd646r1_bmdma_stop,
29282 .cable_detect = ata_cable_40wire,
29283 };
29284
29285-static struct ata_port_operations cmd648_port_ops = {
29286+static const struct ata_port_operations cmd648_port_ops = {
29287 .inherits = &cmd64x_base_ops,
29288 .bmdma_stop = cmd648_bmdma_stop,
29289 .cable_detect = cmd648_cable_detect,
29290diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
29291index 0df83cf..d7595b0 100644
29292--- a/drivers/ata/pata_cs5520.c
29293+++ b/drivers/ata/pata_cs5520.c
29294@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
29295 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29296 };
29297
29298-static struct ata_port_operations cs5520_port_ops = {
29299+static const struct ata_port_operations cs5520_port_ops = {
29300 .inherits = &ata_bmdma_port_ops,
29301 .qc_prep = ata_sff_dumb_qc_prep,
29302 .cable_detect = ata_cable_40wire,
29303diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
29304index c974b05..6d26b11 100644
29305--- a/drivers/ata/pata_cs5530.c
29306+++ b/drivers/ata/pata_cs5530.c
29307@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
29308 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
29309 };
29310
29311-static struct ata_port_operations cs5530_port_ops = {
29312+static const struct ata_port_operations cs5530_port_ops = {
29313 .inherits = &ata_bmdma_port_ops,
29314
29315 .qc_prep = ata_sff_dumb_qc_prep,
29316diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
29317index 403f561..aacd26b 100644
29318--- a/drivers/ata/pata_cs5535.c
29319+++ b/drivers/ata/pata_cs5535.c
29320@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
29321 ATA_BMDMA_SHT(DRV_NAME),
29322 };
29323
29324-static struct ata_port_operations cs5535_port_ops = {
29325+static const struct ata_port_operations cs5535_port_ops = {
29326 .inherits = &ata_bmdma_port_ops,
29327 .cable_detect = cs5535_cable_detect,
29328 .set_piomode = cs5535_set_piomode,
29329diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
29330index 6da4cb4..de24a25 100644
29331--- a/drivers/ata/pata_cs5536.c
29332+++ b/drivers/ata/pata_cs5536.c
29333@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
29334 ATA_BMDMA_SHT(DRV_NAME),
29335 };
29336
29337-static struct ata_port_operations cs5536_port_ops = {
29338+static const struct ata_port_operations cs5536_port_ops = {
29339 .inherits = &ata_bmdma_port_ops,
29340 .cable_detect = cs5536_cable_detect,
29341 .set_piomode = cs5536_set_piomode,
29342diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
29343index 8fb040b..b16a9c9 100644
29344--- a/drivers/ata/pata_cypress.c
29345+++ b/drivers/ata/pata_cypress.c
29346@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
29347 ATA_BMDMA_SHT(DRV_NAME),
29348 };
29349
29350-static struct ata_port_operations cy82c693_port_ops = {
29351+static const struct ata_port_operations cy82c693_port_ops = {
29352 .inherits = &ata_bmdma_port_ops,
29353 .cable_detect = ata_cable_40wire,
29354 .set_piomode = cy82c693_set_piomode,
29355diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
29356index 2a6412f..555ee11 100644
29357--- a/drivers/ata/pata_efar.c
29358+++ b/drivers/ata/pata_efar.c
29359@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
29360 ATA_BMDMA_SHT(DRV_NAME),
29361 };
29362
29363-static struct ata_port_operations efar_ops = {
29364+static const struct ata_port_operations efar_ops = {
29365 .inherits = &ata_bmdma_port_ops,
29366 .cable_detect = efar_cable_detect,
29367 .set_piomode = efar_set_piomode,
29368diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
29369index b9d8836..0b92030 100644
29370--- a/drivers/ata/pata_hpt366.c
29371+++ b/drivers/ata/pata_hpt366.c
29372@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
29373 * Configuration for HPT366/68
29374 */
29375
29376-static struct ata_port_operations hpt366_port_ops = {
29377+static const struct ata_port_operations hpt366_port_ops = {
29378 .inherits = &ata_bmdma_port_ops,
29379 .cable_detect = hpt36x_cable_detect,
29380 .mode_filter = hpt366_filter,
29381diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
29382index 5af7f19..00c4980 100644
29383--- a/drivers/ata/pata_hpt37x.c
29384+++ b/drivers/ata/pata_hpt37x.c
29385@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
29386 * Configuration for HPT370
29387 */
29388
29389-static struct ata_port_operations hpt370_port_ops = {
29390+static const struct ata_port_operations hpt370_port_ops = {
29391 .inherits = &ata_bmdma_port_ops,
29392
29393 .bmdma_stop = hpt370_bmdma_stop,
29394@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
29395 * Configuration for HPT370A. Close to 370 but less filters
29396 */
29397
29398-static struct ata_port_operations hpt370a_port_ops = {
29399+static const struct ata_port_operations hpt370a_port_ops = {
29400 .inherits = &hpt370_port_ops,
29401 .mode_filter = hpt370a_filter,
29402 };
29403@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
29404 * and DMA mode setting functionality.
29405 */
29406
29407-static struct ata_port_operations hpt372_port_ops = {
29408+static const struct ata_port_operations hpt372_port_ops = {
29409 .inherits = &ata_bmdma_port_ops,
29410
29411 .bmdma_stop = hpt37x_bmdma_stop,
29412@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
29413 * but we have a different cable detection procedure for function 1.
29414 */
29415
29416-static struct ata_port_operations hpt374_fn1_port_ops = {
29417+static const struct ata_port_operations hpt374_fn1_port_ops = {
29418 .inherits = &hpt372_port_ops,
29419 .prereset = hpt374_fn1_pre_reset,
29420 };
29421diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
29422index 100f227..2e39382 100644
29423--- a/drivers/ata/pata_hpt3x2n.c
29424+++ b/drivers/ata/pata_hpt3x2n.c
29425@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
29426 * Configuration for HPT3x2n.
29427 */
29428
29429-static struct ata_port_operations hpt3x2n_port_ops = {
29430+static const struct ata_port_operations hpt3x2n_port_ops = {
29431 .inherits = &ata_bmdma_port_ops,
29432
29433 .bmdma_stop = hpt3x2n_bmdma_stop,
29434diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
29435index 7e31025..6fca8f4 100644
29436--- a/drivers/ata/pata_hpt3x3.c
29437+++ b/drivers/ata/pata_hpt3x3.c
29438@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
29439 ATA_BMDMA_SHT(DRV_NAME),
29440 };
29441
29442-static struct ata_port_operations hpt3x3_port_ops = {
29443+static const struct ata_port_operations hpt3x3_port_ops = {
29444 .inherits = &ata_bmdma_port_ops,
29445 .cable_detect = ata_cable_40wire,
29446 .set_piomode = hpt3x3_set_piomode,
29447diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
29448index b663b7f..9a26c2a 100644
29449--- a/drivers/ata/pata_icside.c
29450+++ b/drivers/ata/pata_icside.c
29451@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
29452 }
29453 }
29454
29455-static struct ata_port_operations pata_icside_port_ops = {
29456+static const struct ata_port_operations pata_icside_port_ops = {
29457 .inherits = &ata_sff_port_ops,
29458 /* no need to build any PRD tables for DMA */
29459 .qc_prep = ata_noop_qc_prep,
29460diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
29461index 4bceb88..457dfb6 100644
29462--- a/drivers/ata/pata_isapnp.c
29463+++ b/drivers/ata/pata_isapnp.c
29464@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
29465 ATA_PIO_SHT(DRV_NAME),
29466 };
29467
29468-static struct ata_port_operations isapnp_port_ops = {
29469+static const struct ata_port_operations isapnp_port_ops = {
29470 .inherits = &ata_sff_port_ops,
29471 .cable_detect = ata_cable_40wire,
29472 };
29473
29474-static struct ata_port_operations isapnp_noalt_port_ops = {
29475+static const struct ata_port_operations isapnp_noalt_port_ops = {
29476 .inherits = &ata_sff_port_ops,
29477 .cable_detect = ata_cable_40wire,
29478 /* No altstatus so we don't want to use the lost interrupt poll */
29479diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
29480index f156da8..24976e2 100644
29481--- a/drivers/ata/pata_it8213.c
29482+++ b/drivers/ata/pata_it8213.c
29483@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
29484 };
29485
29486
29487-static struct ata_port_operations it8213_ops = {
29488+static const struct ata_port_operations it8213_ops = {
29489 .inherits = &ata_bmdma_port_ops,
29490 .cable_detect = it8213_cable_detect,
29491 .set_piomode = it8213_set_piomode,
29492diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
29493index 188bc2f..ca9e785 100644
29494--- a/drivers/ata/pata_it821x.c
29495+++ b/drivers/ata/pata_it821x.c
29496@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
29497 ATA_BMDMA_SHT(DRV_NAME),
29498 };
29499
29500-static struct ata_port_operations it821x_smart_port_ops = {
29501+static const struct ata_port_operations it821x_smart_port_ops = {
29502 .inherits = &ata_bmdma_port_ops,
29503
29504 .check_atapi_dma= it821x_check_atapi_dma,
29505@@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
29506 .port_start = it821x_port_start,
29507 };
29508
29509-static struct ata_port_operations it821x_passthru_port_ops = {
29510+static const struct ata_port_operations it821x_passthru_port_ops = {
29511 .inherits = &ata_bmdma_port_ops,
29512
29513 .check_atapi_dma= it821x_check_atapi_dma,
29514@@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
29515 .port_start = it821x_port_start,
29516 };
29517
29518-static struct ata_port_operations it821x_rdc_port_ops = {
29519+static const struct ata_port_operations it821x_rdc_port_ops = {
29520 .inherits = &ata_bmdma_port_ops,
29521
29522 .check_atapi_dma= it821x_check_atapi_dma,
29523diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
29524index ba54b08..4b952b7 100644
29525--- a/drivers/ata/pata_ixp4xx_cf.c
29526+++ b/drivers/ata/pata_ixp4xx_cf.c
29527@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
29528 ATA_PIO_SHT(DRV_NAME),
29529 };
29530
29531-static struct ata_port_operations ixp4xx_port_ops = {
29532+static const struct ata_port_operations ixp4xx_port_ops = {
29533 .inherits = &ata_sff_port_ops,
29534 .sff_data_xfer = ixp4xx_mmio_data_xfer,
29535 .cable_detect = ata_cable_40wire,
29536diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
29537index 3a1474a..434b0ff 100644
29538--- a/drivers/ata/pata_jmicron.c
29539+++ b/drivers/ata/pata_jmicron.c
29540@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
29541 ATA_BMDMA_SHT(DRV_NAME),
29542 };
29543
29544-static struct ata_port_operations jmicron_ops = {
29545+static const struct ata_port_operations jmicron_ops = {
29546 .inherits = &ata_bmdma_port_ops,
29547 .prereset = jmicron_pre_reset,
29548 };
29549diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
29550index 6932e56..220e71d 100644
29551--- a/drivers/ata/pata_legacy.c
29552+++ b/drivers/ata/pata_legacy.c
29553@@ -106,7 +106,7 @@ struct legacy_probe {
29554
29555 struct legacy_controller {
29556 const char *name;
29557- struct ata_port_operations *ops;
29558+ const struct ata_port_operations *ops;
29559 unsigned int pio_mask;
29560 unsigned int flags;
29561 unsigned int pflags;
29562@@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
29563 * pio_mask as well.
29564 */
29565
29566-static struct ata_port_operations simple_port_ops = {
29567+static const struct ata_port_operations simple_port_ops = {
29568 .inherits = &legacy_base_port_ops,
29569 .sff_data_xfer = ata_sff_data_xfer_noirq,
29570 };
29571
29572-static struct ata_port_operations legacy_port_ops = {
29573+static const struct ata_port_operations legacy_port_ops = {
29574 .inherits = &legacy_base_port_ops,
29575 .sff_data_xfer = ata_sff_data_xfer_noirq,
29576 .set_mode = legacy_set_mode,
29577@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
29578 return buflen;
29579 }
29580
29581-static struct ata_port_operations pdc20230_port_ops = {
29582+static const struct ata_port_operations pdc20230_port_ops = {
29583 .inherits = &legacy_base_port_ops,
29584 .set_piomode = pdc20230_set_piomode,
29585 .sff_data_xfer = pdc_data_xfer_vlb,
29586@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
29587 ioread8(ap->ioaddr.status_addr);
29588 }
29589
29590-static struct ata_port_operations ht6560a_port_ops = {
29591+static const struct ata_port_operations ht6560a_port_ops = {
29592 .inherits = &legacy_base_port_ops,
29593 .set_piomode = ht6560a_set_piomode,
29594 };
29595@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
29596 ioread8(ap->ioaddr.status_addr);
29597 }
29598
29599-static struct ata_port_operations ht6560b_port_ops = {
29600+static const struct ata_port_operations ht6560b_port_ops = {
29601 .inherits = &legacy_base_port_ops,
29602 .set_piomode = ht6560b_set_piomode,
29603 };
29604@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
29605 }
29606
29607
29608-static struct ata_port_operations opti82c611a_port_ops = {
29609+static const struct ata_port_operations opti82c611a_port_ops = {
29610 .inherits = &legacy_base_port_ops,
29611 .set_piomode = opti82c611a_set_piomode,
29612 };
29613@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
29614 return ata_sff_qc_issue(qc);
29615 }
29616
29617-static struct ata_port_operations opti82c46x_port_ops = {
29618+static const struct ata_port_operations opti82c46x_port_ops = {
29619 .inherits = &legacy_base_port_ops,
29620 .set_piomode = opti82c46x_set_piomode,
29621 .qc_issue = opti82c46x_qc_issue,
29622@@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
29623 return 0;
29624 }
29625
29626-static struct ata_port_operations qdi6500_port_ops = {
29627+static const struct ata_port_operations qdi6500_port_ops = {
29628 .inherits = &legacy_base_port_ops,
29629 .set_piomode = qdi6500_set_piomode,
29630 .qc_issue = qdi_qc_issue,
29631 .sff_data_xfer = vlb32_data_xfer,
29632 };
29633
29634-static struct ata_port_operations qdi6580_port_ops = {
29635+static const struct ata_port_operations qdi6580_port_ops = {
29636 .inherits = &legacy_base_port_ops,
29637 .set_piomode = qdi6580_set_piomode,
29638 .sff_data_xfer = vlb32_data_xfer,
29639 };
29640
29641-static struct ata_port_operations qdi6580dp_port_ops = {
29642+static const struct ata_port_operations qdi6580dp_port_ops = {
29643 .inherits = &legacy_base_port_ops,
29644 .set_piomode = qdi6580dp_set_piomode,
29645 .sff_data_xfer = vlb32_data_xfer,
29646@@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
29647 return 0;
29648 }
29649
29650-static struct ata_port_operations winbond_port_ops = {
29651+static const struct ata_port_operations winbond_port_ops = {
29652 .inherits = &legacy_base_port_ops,
29653 .set_piomode = winbond_set_piomode,
29654 .sff_data_xfer = vlb32_data_xfer,
29655@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
29656 int pio_modes = controller->pio_mask;
29657 unsigned long io = probe->port;
29658 u32 mask = (1 << probe->slot);
29659- struct ata_port_operations *ops = controller->ops;
29660+ const struct ata_port_operations *ops = controller->ops;
29661 struct legacy_data *ld = &legacy_data[probe->slot];
29662 struct ata_host *host = NULL;
29663 struct ata_port *ap;
29664diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
29665index 2096fb7..4d090fc 100644
29666--- a/drivers/ata/pata_marvell.c
29667+++ b/drivers/ata/pata_marvell.c
29668@@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
29669 ATA_BMDMA_SHT(DRV_NAME),
29670 };
29671
29672-static struct ata_port_operations marvell_ops = {
29673+static const struct ata_port_operations marvell_ops = {
29674 .inherits = &ata_bmdma_port_ops,
29675 .cable_detect = marvell_cable_detect,
29676 .prereset = marvell_pre_reset,
29677diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
29678index 99d41be..7d56aa8 100644
29679--- a/drivers/ata/pata_mpc52xx.c
29680+++ b/drivers/ata/pata_mpc52xx.c
29681@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
29682 ATA_PIO_SHT(DRV_NAME),
29683 };
29684
29685-static struct ata_port_operations mpc52xx_ata_port_ops = {
29686+static const struct ata_port_operations mpc52xx_ata_port_ops = {
29687 .inherits = &ata_bmdma_port_ops,
29688 .sff_dev_select = mpc52xx_ata_dev_select,
29689 .set_piomode = mpc52xx_ata_set_piomode,
29690diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
29691index b21f002..0a27e7f 100644
29692--- a/drivers/ata/pata_mpiix.c
29693+++ b/drivers/ata/pata_mpiix.c
29694@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
29695 ATA_PIO_SHT(DRV_NAME),
29696 };
29697
29698-static struct ata_port_operations mpiix_port_ops = {
29699+static const struct ata_port_operations mpiix_port_ops = {
29700 .inherits = &ata_sff_port_ops,
29701 .qc_issue = mpiix_qc_issue,
29702 .cable_detect = ata_cable_40wire,
29703diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
29704index f0d52f7..89c3be3 100644
29705--- a/drivers/ata/pata_netcell.c
29706+++ b/drivers/ata/pata_netcell.c
29707@@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
29708 ATA_BMDMA_SHT(DRV_NAME),
29709 };
29710
29711-static struct ata_port_operations netcell_ops = {
29712+static const struct ata_port_operations netcell_ops = {
29713 .inherits = &ata_bmdma_port_ops,
29714 .cable_detect = ata_cable_80wire,
29715 .read_id = netcell_read_id,
29716diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
29717index dd53a66..a3f4317 100644
29718--- a/drivers/ata/pata_ninja32.c
29719+++ b/drivers/ata/pata_ninja32.c
29720@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
29721 ATA_BMDMA_SHT(DRV_NAME),
29722 };
29723
29724-static struct ata_port_operations ninja32_port_ops = {
29725+static const struct ata_port_operations ninja32_port_ops = {
29726 .inherits = &ata_bmdma_port_ops,
29727 .sff_dev_select = ninja32_dev_select,
29728 .cable_detect = ata_cable_40wire,
29729diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
29730index ca53fac..9aa93ef 100644
29731--- a/drivers/ata/pata_ns87410.c
29732+++ b/drivers/ata/pata_ns87410.c
29733@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
29734 ATA_PIO_SHT(DRV_NAME),
29735 };
29736
29737-static struct ata_port_operations ns87410_port_ops = {
29738+static const struct ata_port_operations ns87410_port_ops = {
29739 .inherits = &ata_sff_port_ops,
29740 .qc_issue = ns87410_qc_issue,
29741 .cable_detect = ata_cable_40wire,
29742diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
29743index 773b159..55f454e 100644
29744--- a/drivers/ata/pata_ns87415.c
29745+++ b/drivers/ata/pata_ns87415.c
29746@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
29747 }
29748 #endif /* 87560 SuperIO Support */
29749
29750-static struct ata_port_operations ns87415_pata_ops = {
29751+static const struct ata_port_operations ns87415_pata_ops = {
29752 .inherits = &ata_bmdma_port_ops,
29753
29754 .check_atapi_dma = ns87415_check_atapi_dma,
29755@@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
29756 };
29757
29758 #if defined(CONFIG_SUPERIO)
29759-static struct ata_port_operations ns87560_pata_ops = {
29760+static const struct ata_port_operations ns87560_pata_ops = {
29761 .inherits = &ns87415_pata_ops,
29762 .sff_tf_read = ns87560_tf_read,
29763 .sff_check_status = ns87560_check_status,
29764diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
29765index d6f6956..639295b 100644
29766--- a/drivers/ata/pata_octeon_cf.c
29767+++ b/drivers/ata/pata_octeon_cf.c
29768@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
29769 return 0;
29770 }
29771
29772+/* cannot be const */
29773 static struct ata_port_operations octeon_cf_ops = {
29774 .inherits = &ata_sff_port_ops,
29775 .check_atapi_dma = octeon_cf_check_atapi_dma,
29776diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
29777index 84ac503..adee1cd 100644
29778--- a/drivers/ata/pata_oldpiix.c
29779+++ b/drivers/ata/pata_oldpiix.c
29780@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
29781 ATA_BMDMA_SHT(DRV_NAME),
29782 };
29783
29784-static struct ata_port_operations oldpiix_pata_ops = {
29785+static const struct ata_port_operations oldpiix_pata_ops = {
29786 .inherits = &ata_bmdma_port_ops,
29787 .qc_issue = oldpiix_qc_issue,
29788 .cable_detect = ata_cable_40wire,
29789diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
29790index 99eddda..3a4c0aa 100644
29791--- a/drivers/ata/pata_opti.c
29792+++ b/drivers/ata/pata_opti.c
29793@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
29794 ATA_PIO_SHT(DRV_NAME),
29795 };
29796
29797-static struct ata_port_operations opti_port_ops = {
29798+static const struct ata_port_operations opti_port_ops = {
29799 .inherits = &ata_sff_port_ops,
29800 .cable_detect = ata_cable_40wire,
29801 .set_piomode = opti_set_piomode,
29802diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
29803index 86885a4..8e9968d 100644
29804--- a/drivers/ata/pata_optidma.c
29805+++ b/drivers/ata/pata_optidma.c
29806@@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
29807 ATA_BMDMA_SHT(DRV_NAME),
29808 };
29809
29810-static struct ata_port_operations optidma_port_ops = {
29811+static const struct ata_port_operations optidma_port_ops = {
29812 .inherits = &ata_bmdma_port_ops,
29813 .cable_detect = ata_cable_40wire,
29814 .set_piomode = optidma_set_pio_mode,
29815@@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
29816 .prereset = optidma_pre_reset,
29817 };
29818
29819-static struct ata_port_operations optiplus_port_ops = {
29820+static const struct ata_port_operations optiplus_port_ops = {
29821 .inherits = &optidma_port_ops,
29822 .set_piomode = optiplus_set_pio_mode,
29823 .set_dmamode = optiplus_set_dma_mode,
29824diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
29825index 11fb4cc..1a14022 100644
29826--- a/drivers/ata/pata_palmld.c
29827+++ b/drivers/ata/pata_palmld.c
29828@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
29829 ATA_PIO_SHT(DRV_NAME),
29830 };
29831
29832-static struct ata_port_operations palmld_port_ops = {
29833+static const struct ata_port_operations palmld_port_ops = {
29834 .inherits = &ata_sff_port_ops,
29835 .sff_data_xfer = ata_sff_data_xfer_noirq,
29836 .cable_detect = ata_cable_40wire,
29837diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
29838index dc99e26..7f4b1e4 100644
29839--- a/drivers/ata/pata_pcmcia.c
29840+++ b/drivers/ata/pata_pcmcia.c
29841@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
29842 ATA_PIO_SHT(DRV_NAME),
29843 };
29844
29845-static struct ata_port_operations pcmcia_port_ops = {
29846+static const struct ata_port_operations pcmcia_port_ops = {
29847 .inherits = &ata_sff_port_ops,
29848 .sff_data_xfer = ata_sff_data_xfer_noirq,
29849 .cable_detect = ata_cable_40wire,
29850 .set_mode = pcmcia_set_mode,
29851 };
29852
29853-static struct ata_port_operations pcmcia_8bit_port_ops = {
29854+static const struct ata_port_operations pcmcia_8bit_port_ops = {
29855 .inherits = &ata_sff_port_ops,
29856 .sff_data_xfer = ata_data_xfer_8bit,
29857 .cable_detect = ata_cable_40wire,
29858@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
29859 unsigned long io_base, ctl_base;
29860 void __iomem *io_addr, *ctl_addr;
29861 int n_ports = 1;
29862- struct ata_port_operations *ops = &pcmcia_port_ops;
29863+ const struct ata_port_operations *ops = &pcmcia_port_ops;
29864
29865 info = kzalloc(sizeof(*info), GFP_KERNEL);
29866 if (info == NULL)
29867diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
29868index ca5cad0..3a1f125 100644
29869--- a/drivers/ata/pata_pdc2027x.c
29870+++ b/drivers/ata/pata_pdc2027x.c
29871@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
29872 ATA_BMDMA_SHT(DRV_NAME),
29873 };
29874
29875-static struct ata_port_operations pdc2027x_pata100_ops = {
29876+static const struct ata_port_operations pdc2027x_pata100_ops = {
29877 .inherits = &ata_bmdma_port_ops,
29878 .check_atapi_dma = pdc2027x_check_atapi_dma,
29879 .cable_detect = pdc2027x_cable_detect,
29880 .prereset = pdc2027x_prereset,
29881 };
29882
29883-static struct ata_port_operations pdc2027x_pata133_ops = {
29884+static const struct ata_port_operations pdc2027x_pata133_ops = {
29885 .inherits = &pdc2027x_pata100_ops,
29886 .mode_filter = pdc2027x_mode_filter,
29887 .set_piomode = pdc2027x_set_piomode,
29888diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
29889index 2911120..4bf62aa 100644
29890--- a/drivers/ata/pata_pdc202xx_old.c
29891+++ b/drivers/ata/pata_pdc202xx_old.c
29892@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
29893 ATA_BMDMA_SHT(DRV_NAME),
29894 };
29895
29896-static struct ata_port_operations pdc2024x_port_ops = {
29897+static const struct ata_port_operations pdc2024x_port_ops = {
29898 .inherits = &ata_bmdma_port_ops,
29899
29900 .cable_detect = ata_cable_40wire,
29901@@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
29902 .sff_exec_command = pdc202xx_exec_command,
29903 };
29904
29905-static struct ata_port_operations pdc2026x_port_ops = {
29906+static const struct ata_port_operations pdc2026x_port_ops = {
29907 .inherits = &pdc2024x_port_ops,
29908
29909 .check_atapi_dma = pdc2026x_check_atapi_dma,
29910diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
29911index 3f6ebc6..a18c358 100644
29912--- a/drivers/ata/pata_platform.c
29913+++ b/drivers/ata/pata_platform.c
29914@@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
29915 ATA_PIO_SHT(DRV_NAME),
29916 };
29917
29918-static struct ata_port_operations pata_platform_port_ops = {
29919+static const struct ata_port_operations pata_platform_port_ops = {
29920 .inherits = &ata_sff_port_ops,
29921 .sff_data_xfer = ata_sff_data_xfer_noirq,
29922 .cable_detect = ata_cable_unknown,
29923diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
29924index 45879dc..165a9f9 100644
29925--- a/drivers/ata/pata_qdi.c
29926+++ b/drivers/ata/pata_qdi.c
29927@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
29928 ATA_PIO_SHT(DRV_NAME),
29929 };
29930
29931-static struct ata_port_operations qdi6500_port_ops = {
29932+static const struct ata_port_operations qdi6500_port_ops = {
29933 .inherits = &ata_sff_port_ops,
29934 .qc_issue = qdi_qc_issue,
29935 .sff_data_xfer = qdi_data_xfer,
29936@@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
29937 .set_piomode = qdi6500_set_piomode,
29938 };
29939
29940-static struct ata_port_operations qdi6580_port_ops = {
29941+static const struct ata_port_operations qdi6580_port_ops = {
29942 .inherits = &qdi6500_port_ops,
29943 .set_piomode = qdi6580_set_piomode,
29944 };
29945diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
29946index 4401b33..716c5cc 100644
29947--- a/drivers/ata/pata_radisys.c
29948+++ b/drivers/ata/pata_radisys.c
29949@@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
29950 ATA_BMDMA_SHT(DRV_NAME),
29951 };
29952
29953-static struct ata_port_operations radisys_pata_ops = {
29954+static const struct ata_port_operations radisys_pata_ops = {
29955 .inherits = &ata_bmdma_port_ops,
29956 .qc_issue = radisys_qc_issue,
29957 .cable_detect = ata_cable_unknown,
29958diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
29959index 45f1e10..fab6bca 100644
29960--- a/drivers/ata/pata_rb532_cf.c
29961+++ b/drivers/ata/pata_rb532_cf.c
29962@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
29963 return IRQ_HANDLED;
29964 }
29965
29966-static struct ata_port_operations rb532_pata_port_ops = {
29967+static const struct ata_port_operations rb532_pata_port_ops = {
29968 .inherits = &ata_sff_port_ops,
29969 .sff_data_xfer = ata_sff_data_xfer32,
29970 };
29971diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
29972index c843a1e..b5853c3 100644
29973--- a/drivers/ata/pata_rdc.c
29974+++ b/drivers/ata/pata_rdc.c
29975@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
29976 pci_write_config_byte(dev, 0x48, udma_enable);
29977 }
29978
29979-static struct ata_port_operations rdc_pata_ops = {
29980+static const struct ata_port_operations rdc_pata_ops = {
29981 .inherits = &ata_bmdma32_port_ops,
29982 .cable_detect = rdc_pata_cable_detect,
29983 .set_piomode = rdc_set_piomode,
29984diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
29985index a5e4dfe..080c8c9 100644
29986--- a/drivers/ata/pata_rz1000.c
29987+++ b/drivers/ata/pata_rz1000.c
29988@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
29989 ATA_PIO_SHT(DRV_NAME),
29990 };
29991
29992-static struct ata_port_operations rz1000_port_ops = {
29993+static const struct ata_port_operations rz1000_port_ops = {
29994 .inherits = &ata_sff_port_ops,
29995 .cable_detect = ata_cable_40wire,
29996 .set_mode = rz1000_set_mode,
29997diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
29998index 3bbed83..e309daf 100644
29999--- a/drivers/ata/pata_sc1200.c
30000+++ b/drivers/ata/pata_sc1200.c
30001@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
30002 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
30003 };
30004
30005-static struct ata_port_operations sc1200_port_ops = {
30006+static const struct ata_port_operations sc1200_port_ops = {
30007 .inherits = &ata_bmdma_port_ops,
30008 .qc_prep = ata_sff_dumb_qc_prep,
30009 .qc_issue = sc1200_qc_issue,
30010diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
30011index 4257d6b..4c1d9d5 100644
30012--- a/drivers/ata/pata_scc.c
30013+++ b/drivers/ata/pata_scc.c
30014@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
30015 ATA_BMDMA_SHT(DRV_NAME),
30016 };
30017
30018-static struct ata_port_operations scc_pata_ops = {
30019+static const struct ata_port_operations scc_pata_ops = {
30020 .inherits = &ata_bmdma_port_ops,
30021
30022 .set_piomode = scc_set_piomode,
30023diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
30024index 99cceb4..e2e0a87 100644
30025--- a/drivers/ata/pata_sch.c
30026+++ b/drivers/ata/pata_sch.c
30027@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
30028 ATA_BMDMA_SHT(DRV_NAME),
30029 };
30030
30031-static struct ata_port_operations sch_pata_ops = {
30032+static const struct ata_port_operations sch_pata_ops = {
30033 .inherits = &ata_bmdma_port_ops,
30034 .cable_detect = ata_cable_unknown,
30035 .set_piomode = sch_set_piomode,
30036diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
30037index beaed12..39969f1 100644
30038--- a/drivers/ata/pata_serverworks.c
30039+++ b/drivers/ata/pata_serverworks.c
30040@@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
30041 ATA_BMDMA_SHT(DRV_NAME),
30042 };
30043
30044-static struct ata_port_operations serverworks_osb4_port_ops = {
30045+static const struct ata_port_operations serverworks_osb4_port_ops = {
30046 .inherits = &ata_bmdma_port_ops,
30047 .cable_detect = serverworks_cable_detect,
30048 .mode_filter = serverworks_osb4_filter,
30049@@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
30050 .set_dmamode = serverworks_set_dmamode,
30051 };
30052
30053-static struct ata_port_operations serverworks_csb_port_ops = {
30054+static const struct ata_port_operations serverworks_csb_port_ops = {
30055 .inherits = &serverworks_osb4_port_ops,
30056 .mode_filter = serverworks_csb_filter,
30057 };
30058diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
30059index a2ace48..0463b44 100644
30060--- a/drivers/ata/pata_sil680.c
30061+++ b/drivers/ata/pata_sil680.c
30062@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
30063 ATA_BMDMA_SHT(DRV_NAME),
30064 };
30065
30066-static struct ata_port_operations sil680_port_ops = {
30067+static const struct ata_port_operations sil680_port_ops = {
30068 .inherits = &ata_bmdma32_port_ops,
30069 .cable_detect = sil680_cable_detect,
30070 .set_piomode = sil680_set_piomode,
30071diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
30072index 488e77b..b3724d5 100644
30073--- a/drivers/ata/pata_sis.c
30074+++ b/drivers/ata/pata_sis.c
30075@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
30076 ATA_BMDMA_SHT(DRV_NAME),
30077 };
30078
30079-static struct ata_port_operations sis_133_for_sata_ops = {
30080+static const struct ata_port_operations sis_133_for_sata_ops = {
30081 .inherits = &ata_bmdma_port_ops,
30082 .set_piomode = sis_133_set_piomode,
30083 .set_dmamode = sis_133_set_dmamode,
30084 .cable_detect = sis_133_cable_detect,
30085 };
30086
30087-static struct ata_port_operations sis_base_ops = {
30088+static const struct ata_port_operations sis_base_ops = {
30089 .inherits = &ata_bmdma_port_ops,
30090 .prereset = sis_pre_reset,
30091 };
30092
30093-static struct ata_port_operations sis_133_ops = {
30094+static const struct ata_port_operations sis_133_ops = {
30095 .inherits = &sis_base_ops,
30096 .set_piomode = sis_133_set_piomode,
30097 .set_dmamode = sis_133_set_dmamode,
30098 .cable_detect = sis_133_cable_detect,
30099 };
30100
30101-static struct ata_port_operations sis_133_early_ops = {
30102+static const struct ata_port_operations sis_133_early_ops = {
30103 .inherits = &sis_base_ops,
30104 .set_piomode = sis_100_set_piomode,
30105 .set_dmamode = sis_133_early_set_dmamode,
30106 .cable_detect = sis_66_cable_detect,
30107 };
30108
30109-static struct ata_port_operations sis_100_ops = {
30110+static const struct ata_port_operations sis_100_ops = {
30111 .inherits = &sis_base_ops,
30112 .set_piomode = sis_100_set_piomode,
30113 .set_dmamode = sis_100_set_dmamode,
30114 .cable_detect = sis_66_cable_detect,
30115 };
30116
30117-static struct ata_port_operations sis_66_ops = {
30118+static const struct ata_port_operations sis_66_ops = {
30119 .inherits = &sis_base_ops,
30120 .set_piomode = sis_old_set_piomode,
30121 .set_dmamode = sis_66_set_dmamode,
30122 .cable_detect = sis_66_cable_detect,
30123 };
30124
30125-static struct ata_port_operations sis_old_ops = {
30126+static const struct ata_port_operations sis_old_ops = {
30127 .inherits = &sis_base_ops,
30128 .set_piomode = sis_old_set_piomode,
30129 .set_dmamode = sis_old_set_dmamode,
30130diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
30131index 29f733c..43e9ca0 100644
30132--- a/drivers/ata/pata_sl82c105.c
30133+++ b/drivers/ata/pata_sl82c105.c
30134@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
30135 ATA_BMDMA_SHT(DRV_NAME),
30136 };
30137
30138-static struct ata_port_operations sl82c105_port_ops = {
30139+static const struct ata_port_operations sl82c105_port_ops = {
30140 .inherits = &ata_bmdma_port_ops,
30141 .qc_defer = sl82c105_qc_defer,
30142 .bmdma_start = sl82c105_bmdma_start,
30143diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
30144index f1f13ff..df39e99 100644
30145--- a/drivers/ata/pata_triflex.c
30146+++ b/drivers/ata/pata_triflex.c
30147@@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
30148 ATA_BMDMA_SHT(DRV_NAME),
30149 };
30150
30151-static struct ata_port_operations triflex_port_ops = {
30152+static const struct ata_port_operations triflex_port_ops = {
30153 .inherits = &ata_bmdma_port_ops,
30154 .bmdma_start = triflex_bmdma_start,
30155 .bmdma_stop = triflex_bmdma_stop,
30156diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
30157index 1d73b8d..98a4b29 100644
30158--- a/drivers/ata/pata_via.c
30159+++ b/drivers/ata/pata_via.c
30160@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
30161 ATA_BMDMA_SHT(DRV_NAME),
30162 };
30163
30164-static struct ata_port_operations via_port_ops = {
30165+static const struct ata_port_operations via_port_ops = {
30166 .inherits = &ata_bmdma_port_ops,
30167 .cable_detect = via_cable_detect,
30168 .set_piomode = via_set_piomode,
30169@@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
30170 .port_start = via_port_start,
30171 };
30172
30173-static struct ata_port_operations via_port_ops_noirq = {
30174+static const struct ata_port_operations via_port_ops_noirq = {
30175 .inherits = &via_port_ops,
30176 .sff_data_xfer = ata_sff_data_xfer_noirq,
30177 };
30178diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
30179index 6d8619b..ad511c4 100644
30180--- a/drivers/ata/pata_winbond.c
30181+++ b/drivers/ata/pata_winbond.c
30182@@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
30183 ATA_PIO_SHT(DRV_NAME),
30184 };
30185
30186-static struct ata_port_operations winbond_port_ops = {
30187+static const struct ata_port_operations winbond_port_ops = {
30188 .inherits = &ata_sff_port_ops,
30189 .sff_data_xfer = winbond_data_xfer,
30190 .cable_detect = ata_cable_40wire,
30191diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
30192index 6c65b07..f996ec7 100644
30193--- a/drivers/ata/pdc_adma.c
30194+++ b/drivers/ata/pdc_adma.c
30195@@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
30196 .dma_boundary = ADMA_DMA_BOUNDARY,
30197 };
30198
30199-static struct ata_port_operations adma_ata_ops = {
30200+static const struct ata_port_operations adma_ata_ops = {
30201 .inherits = &ata_sff_port_ops,
30202
30203 .lost_interrupt = ATA_OP_NULL,
30204diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
30205index 172b57e..c49bc1e 100644
30206--- a/drivers/ata/sata_fsl.c
30207+++ b/drivers/ata/sata_fsl.c
30208@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
30209 .dma_boundary = ATA_DMA_BOUNDARY,
30210 };
30211
30212-static struct ata_port_operations sata_fsl_ops = {
30213+static const struct ata_port_operations sata_fsl_ops = {
30214 .inherits = &sata_pmp_port_ops,
30215
30216 .qc_defer = ata_std_qc_defer,
30217diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
30218index 4406902..60603ef 100644
30219--- a/drivers/ata/sata_inic162x.c
30220+++ b/drivers/ata/sata_inic162x.c
30221@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
30222 return 0;
30223 }
30224
30225-static struct ata_port_operations inic_port_ops = {
30226+static const struct ata_port_operations inic_port_ops = {
30227 .inherits = &sata_port_ops,
30228
30229 .check_atapi_dma = inic_check_atapi_dma,
30230diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
30231index cf41126..8107be6 100644
30232--- a/drivers/ata/sata_mv.c
30233+++ b/drivers/ata/sata_mv.c
30234@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
30235 .dma_boundary = MV_DMA_BOUNDARY,
30236 };
30237
30238-static struct ata_port_operations mv5_ops = {
30239+static const struct ata_port_operations mv5_ops = {
30240 .inherits = &ata_sff_port_ops,
30241
30242 .lost_interrupt = ATA_OP_NULL,
30243@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
30244 .port_stop = mv_port_stop,
30245 };
30246
30247-static struct ata_port_operations mv6_ops = {
30248+static const struct ata_port_operations mv6_ops = {
30249 .inherits = &mv5_ops,
30250 .dev_config = mv6_dev_config,
30251 .scr_read = mv_scr_read,
30252@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
30253 .bmdma_status = mv_bmdma_status,
30254 };
30255
30256-static struct ata_port_operations mv_iie_ops = {
30257+static const struct ata_port_operations mv_iie_ops = {
30258 .inherits = &mv6_ops,
30259 .dev_config = ATA_OP_NULL,
30260 .qc_prep = mv_qc_prep_iie,
30261diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
30262index ae2297c..d5c9c33 100644
30263--- a/drivers/ata/sata_nv.c
30264+++ b/drivers/ata/sata_nv.c
30265@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
30266 * cases. Define nv_hardreset() which only kicks in for post-boot
30267 * probing and use it for all variants.
30268 */
30269-static struct ata_port_operations nv_generic_ops = {
30270+static const struct ata_port_operations nv_generic_ops = {
30271 .inherits = &ata_bmdma_port_ops,
30272 .lost_interrupt = ATA_OP_NULL,
30273 .scr_read = nv_scr_read,
30274@@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
30275 .hardreset = nv_hardreset,
30276 };
30277
30278-static struct ata_port_operations nv_nf2_ops = {
30279+static const struct ata_port_operations nv_nf2_ops = {
30280 .inherits = &nv_generic_ops,
30281 .freeze = nv_nf2_freeze,
30282 .thaw = nv_nf2_thaw,
30283 };
30284
30285-static struct ata_port_operations nv_ck804_ops = {
30286+static const struct ata_port_operations nv_ck804_ops = {
30287 .inherits = &nv_generic_ops,
30288 .freeze = nv_ck804_freeze,
30289 .thaw = nv_ck804_thaw,
30290 .host_stop = nv_ck804_host_stop,
30291 };
30292
30293-static struct ata_port_operations nv_adma_ops = {
30294+static const struct ata_port_operations nv_adma_ops = {
30295 .inherits = &nv_ck804_ops,
30296
30297 .check_atapi_dma = nv_adma_check_atapi_dma,
30298@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
30299 .host_stop = nv_adma_host_stop,
30300 };
30301
30302-static struct ata_port_operations nv_swncq_ops = {
30303+static const struct ata_port_operations nv_swncq_ops = {
30304 .inherits = &nv_generic_ops,
30305
30306 .qc_defer = ata_std_qc_defer,
30307diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
30308index 07d8d00..6cc70bb 100644
30309--- a/drivers/ata/sata_promise.c
30310+++ b/drivers/ata/sata_promise.c
30311@@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
30312 .error_handler = pdc_error_handler,
30313 };
30314
30315-static struct ata_port_operations pdc_sata_ops = {
30316+static const struct ata_port_operations pdc_sata_ops = {
30317 .inherits = &pdc_common_ops,
30318 .cable_detect = pdc_sata_cable_detect,
30319 .freeze = pdc_sata_freeze,
30320@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
30321
30322 /* First-generation chips need a more restrictive ->check_atapi_dma op,
30323 and ->freeze/thaw that ignore the hotplug controls. */
30324-static struct ata_port_operations pdc_old_sata_ops = {
30325+static const struct ata_port_operations pdc_old_sata_ops = {
30326 .inherits = &pdc_sata_ops,
30327 .freeze = pdc_freeze,
30328 .thaw = pdc_thaw,
30329 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
30330 };
30331
30332-static struct ata_port_operations pdc_pata_ops = {
30333+static const struct ata_port_operations pdc_pata_ops = {
30334 .inherits = &pdc_common_ops,
30335 .cable_detect = pdc_pata_cable_detect,
30336 .freeze = pdc_freeze,
30337diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
30338index 326c0cf..36ecebe 100644
30339--- a/drivers/ata/sata_qstor.c
30340+++ b/drivers/ata/sata_qstor.c
30341@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
30342 .dma_boundary = QS_DMA_BOUNDARY,
30343 };
30344
30345-static struct ata_port_operations qs_ata_ops = {
30346+static const struct ata_port_operations qs_ata_ops = {
30347 .inherits = &ata_sff_port_ops,
30348
30349 .check_atapi_dma = qs_check_atapi_dma,
30350diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
30351index 3cb69d5..0871d3c 100644
30352--- a/drivers/ata/sata_sil.c
30353+++ b/drivers/ata/sata_sil.c
30354@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
30355 .sg_tablesize = ATA_MAX_PRD
30356 };
30357
30358-static struct ata_port_operations sil_ops = {
30359+static const struct ata_port_operations sil_ops = {
30360 .inherits = &ata_bmdma32_port_ops,
30361 .dev_config = sil_dev_config,
30362 .set_mode = sil_set_mode,
30363diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
30364index e6946fc..eddb794 100644
30365--- a/drivers/ata/sata_sil24.c
30366+++ b/drivers/ata/sata_sil24.c
30367@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
30368 .dma_boundary = ATA_DMA_BOUNDARY,
30369 };
30370
30371-static struct ata_port_operations sil24_ops = {
30372+static const struct ata_port_operations sil24_ops = {
30373 .inherits = &sata_pmp_port_ops,
30374
30375 .qc_defer = sil24_qc_defer,
30376diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
30377index f8a91bf..9cb06b6 100644
30378--- a/drivers/ata/sata_sis.c
30379+++ b/drivers/ata/sata_sis.c
30380@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
30381 ATA_BMDMA_SHT(DRV_NAME),
30382 };
30383
30384-static struct ata_port_operations sis_ops = {
30385+static const struct ata_port_operations sis_ops = {
30386 .inherits = &ata_bmdma_port_ops,
30387 .scr_read = sis_scr_read,
30388 .scr_write = sis_scr_write,
30389diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
30390index 7257f2d..d04c6f5 100644
30391--- a/drivers/ata/sata_svw.c
30392+++ b/drivers/ata/sata_svw.c
30393@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
30394 };
30395
30396
30397-static struct ata_port_operations k2_sata_ops = {
30398+static const struct ata_port_operations k2_sata_ops = {
30399 .inherits = &ata_bmdma_port_ops,
30400 .sff_tf_load = k2_sata_tf_load,
30401 .sff_tf_read = k2_sata_tf_read,
30402diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
30403index bbcf970..cd0df0d 100644
30404--- a/drivers/ata/sata_sx4.c
30405+++ b/drivers/ata/sata_sx4.c
30406@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
30407 };
30408
30409 /* TODO: inherit from base port_ops after converting to new EH */
30410-static struct ata_port_operations pdc_20621_ops = {
30411+static const struct ata_port_operations pdc_20621_ops = {
30412 .inherits = &ata_sff_port_ops,
30413
30414 .check_atapi_dma = pdc_check_atapi_dma,
30415diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
30416index e5bff47..089d859 100644
30417--- a/drivers/ata/sata_uli.c
30418+++ b/drivers/ata/sata_uli.c
30419@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
30420 ATA_BMDMA_SHT(DRV_NAME),
30421 };
30422
30423-static struct ata_port_operations uli_ops = {
30424+static const struct ata_port_operations uli_ops = {
30425 .inherits = &ata_bmdma_port_ops,
30426 .scr_read = uli_scr_read,
30427 .scr_write = uli_scr_write,
30428diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
30429index f5dcca7..77b94eb 100644
30430--- a/drivers/ata/sata_via.c
30431+++ b/drivers/ata/sata_via.c
30432@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
30433 ATA_BMDMA_SHT(DRV_NAME),
30434 };
30435
30436-static struct ata_port_operations svia_base_ops = {
30437+static const struct ata_port_operations svia_base_ops = {
30438 .inherits = &ata_bmdma_port_ops,
30439 .sff_tf_load = svia_tf_load,
30440 };
30441
30442-static struct ata_port_operations vt6420_sata_ops = {
30443+static const struct ata_port_operations vt6420_sata_ops = {
30444 .inherits = &svia_base_ops,
30445 .freeze = svia_noop_freeze,
30446 .prereset = vt6420_prereset,
30447 .bmdma_start = vt6420_bmdma_start,
30448 };
30449
30450-static struct ata_port_operations vt6421_pata_ops = {
30451+static const struct ata_port_operations vt6421_pata_ops = {
30452 .inherits = &svia_base_ops,
30453 .cable_detect = vt6421_pata_cable_detect,
30454 .set_piomode = vt6421_set_pio_mode,
30455 .set_dmamode = vt6421_set_dma_mode,
30456 };
30457
30458-static struct ata_port_operations vt6421_sata_ops = {
30459+static const struct ata_port_operations vt6421_sata_ops = {
30460 .inherits = &svia_base_ops,
30461 .scr_read = svia_scr_read,
30462 .scr_write = svia_scr_write,
30463 };
30464
30465-static struct ata_port_operations vt8251_ops = {
30466+static const struct ata_port_operations vt8251_ops = {
30467 .inherits = &svia_base_ops,
30468 .hardreset = sata_std_hardreset,
30469 .scr_read = vt8251_scr_read,
30470diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
30471index 8b2a278..51e65d3 100644
30472--- a/drivers/ata/sata_vsc.c
30473+++ b/drivers/ata/sata_vsc.c
30474@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
30475 };
30476
30477
30478-static struct ata_port_operations vsc_sata_ops = {
30479+static const struct ata_port_operations vsc_sata_ops = {
30480 .inherits = &ata_bmdma_port_ops,
30481 /* The IRQ handling is not quite standard SFF behaviour so we
30482 cannot use the default lost interrupt handler */
30483diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
30484index 5effec6..7e4019a 100644
30485--- a/drivers/atm/adummy.c
30486+++ b/drivers/atm/adummy.c
30487@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
30488 vcc->pop(vcc, skb);
30489 else
30490 dev_kfree_skb_any(skb);
30491- atomic_inc(&vcc->stats->tx);
30492+ atomic_inc_unchecked(&vcc->stats->tx);
30493
30494 return 0;
30495 }
30496diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
30497index 66e1813..26a27c6 100644
30498--- a/drivers/atm/ambassador.c
30499+++ b/drivers/atm/ambassador.c
30500@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
30501 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
30502
30503 // VC layer stats
30504- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30505+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30506
30507 // free the descriptor
30508 kfree (tx_descr);
30509@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
30510 dump_skb ("<<<", vc, skb);
30511
30512 // VC layer stats
30513- atomic_inc(&atm_vcc->stats->rx);
30514+ atomic_inc_unchecked(&atm_vcc->stats->rx);
30515 __net_timestamp(skb);
30516 // end of our responsability
30517 atm_vcc->push (atm_vcc, skb);
30518@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
30519 } else {
30520 PRINTK (KERN_INFO, "dropped over-size frame");
30521 // should we count this?
30522- atomic_inc(&atm_vcc->stats->rx_drop);
30523+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30524 }
30525
30526 } else {
30527@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
30528 }
30529
30530 if (check_area (skb->data, skb->len)) {
30531- atomic_inc(&atm_vcc->stats->tx_err);
30532+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
30533 return -ENOMEM; // ?
30534 }
30535
30536diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
30537index 02ad83d..6daffeb 100644
30538--- a/drivers/atm/atmtcp.c
30539+++ b/drivers/atm/atmtcp.c
30540@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
30541 if (vcc->pop) vcc->pop(vcc,skb);
30542 else dev_kfree_skb(skb);
30543 if (dev_data) return 0;
30544- atomic_inc(&vcc->stats->tx_err);
30545+ atomic_inc_unchecked(&vcc->stats->tx_err);
30546 return -ENOLINK;
30547 }
30548 size = skb->len+sizeof(struct atmtcp_hdr);
30549@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
30550 if (!new_skb) {
30551 if (vcc->pop) vcc->pop(vcc,skb);
30552 else dev_kfree_skb(skb);
30553- atomic_inc(&vcc->stats->tx_err);
30554+ atomic_inc_unchecked(&vcc->stats->tx_err);
30555 return -ENOBUFS;
30556 }
30557 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
30558@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
30559 if (vcc->pop) vcc->pop(vcc,skb);
30560 else dev_kfree_skb(skb);
30561 out_vcc->push(out_vcc,new_skb);
30562- atomic_inc(&vcc->stats->tx);
30563- atomic_inc(&out_vcc->stats->rx);
30564+ atomic_inc_unchecked(&vcc->stats->tx);
30565+ atomic_inc_unchecked(&out_vcc->stats->rx);
30566 return 0;
30567 }
30568
30569@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
30570 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
30571 read_unlock(&vcc_sklist_lock);
30572 if (!out_vcc) {
30573- atomic_inc(&vcc->stats->tx_err);
30574+ atomic_inc_unchecked(&vcc->stats->tx_err);
30575 goto done;
30576 }
30577 skb_pull(skb,sizeof(struct atmtcp_hdr));
30578@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
30579 __net_timestamp(new_skb);
30580 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
30581 out_vcc->push(out_vcc,new_skb);
30582- atomic_inc(&vcc->stats->tx);
30583- atomic_inc(&out_vcc->stats->rx);
30584+ atomic_inc_unchecked(&vcc->stats->tx);
30585+ atomic_inc_unchecked(&out_vcc->stats->rx);
30586 done:
30587 if (vcc->pop) vcc->pop(vcc,skb);
30588 else dev_kfree_skb(skb);
30589diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
30590index 0c30261..3da356e 100644
30591--- a/drivers/atm/eni.c
30592+++ b/drivers/atm/eni.c
30593@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
30594 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
30595 vcc->dev->number);
30596 length = 0;
30597- atomic_inc(&vcc->stats->rx_err);
30598+ atomic_inc_unchecked(&vcc->stats->rx_err);
30599 }
30600 else {
30601 length = ATM_CELL_SIZE-1; /* no HEC */
30602@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
30603 size);
30604 }
30605 eff = length = 0;
30606- atomic_inc(&vcc->stats->rx_err);
30607+ atomic_inc_unchecked(&vcc->stats->rx_err);
30608 }
30609 else {
30610 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
30611@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
30612 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
30613 vcc->dev->number,vcc->vci,length,size << 2,descr);
30614 length = eff = 0;
30615- atomic_inc(&vcc->stats->rx_err);
30616+ atomic_inc_unchecked(&vcc->stats->rx_err);
30617 }
30618 }
30619 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
30620@@ -770,7 +770,7 @@ rx_dequeued++;
30621 vcc->push(vcc,skb);
30622 pushed++;
30623 }
30624- atomic_inc(&vcc->stats->rx);
30625+ atomic_inc_unchecked(&vcc->stats->rx);
30626 }
30627 wake_up(&eni_dev->rx_wait);
30628 }
30629@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
30630 PCI_DMA_TODEVICE);
30631 if (vcc->pop) vcc->pop(vcc,skb);
30632 else dev_kfree_skb_irq(skb);
30633- atomic_inc(&vcc->stats->tx);
30634+ atomic_inc_unchecked(&vcc->stats->tx);
30635 wake_up(&eni_dev->tx_wait);
30636 dma_complete++;
30637 }
30638@@ -1570,7 +1570,7 @@ tx_complete++;
30639 /*--------------------------------- entries ---------------------------------*/
30640
30641
30642-static const char *media_name[] __devinitdata = {
30643+static const char *media_name[] __devinitconst = {
30644 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
30645 "UTP", "05?", "06?", "07?", /* 4- 7 */
30646 "TAXI","09?", "10?", "11?", /* 8-11 */
30647diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
30648index cd5049a..a51209f 100644
30649--- a/drivers/atm/firestream.c
30650+++ b/drivers/atm/firestream.c
30651@@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
30652 }
30653 }
30654
30655- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30656+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30657
30658 fs_dprintk (FS_DEBUG_TXMEM, "i");
30659 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
30660@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
30661 #endif
30662 skb_put (skb, qe->p1 & 0xffff);
30663 ATM_SKB(skb)->vcc = atm_vcc;
30664- atomic_inc(&atm_vcc->stats->rx);
30665+ atomic_inc_unchecked(&atm_vcc->stats->rx);
30666 __net_timestamp(skb);
30667 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
30668 atm_vcc->push (atm_vcc, skb);
30669@@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
30670 kfree (pe);
30671 }
30672 if (atm_vcc)
30673- atomic_inc(&atm_vcc->stats->rx_drop);
30674+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30675 break;
30676 case 0x1f: /* Reassembly abort: no buffers. */
30677 /* Silently increment error counter. */
30678 if (atm_vcc)
30679- atomic_inc(&atm_vcc->stats->rx_drop);
30680+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
30681 break;
30682 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
30683 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
30684diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
30685index f766cc4..a34002e 100644
30686--- a/drivers/atm/fore200e.c
30687+++ b/drivers/atm/fore200e.c
30688@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
30689 #endif
30690 /* check error condition */
30691 if (*entry->status & STATUS_ERROR)
30692- atomic_inc(&vcc->stats->tx_err);
30693+ atomic_inc_unchecked(&vcc->stats->tx_err);
30694 else
30695- atomic_inc(&vcc->stats->tx);
30696+ atomic_inc_unchecked(&vcc->stats->tx);
30697 }
30698 }
30699
30700@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
30701 if (skb == NULL) {
30702 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
30703
30704- atomic_inc(&vcc->stats->rx_drop);
30705+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30706 return -ENOMEM;
30707 }
30708
30709@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
30710
30711 dev_kfree_skb_any(skb);
30712
30713- atomic_inc(&vcc->stats->rx_drop);
30714+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30715 return -ENOMEM;
30716 }
30717
30718 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
30719
30720 vcc->push(vcc, skb);
30721- atomic_inc(&vcc->stats->rx);
30722+ atomic_inc_unchecked(&vcc->stats->rx);
30723
30724 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
30725
30726@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
30727 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
30728 fore200e->atm_dev->number,
30729 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
30730- atomic_inc(&vcc->stats->rx_err);
30731+ atomic_inc_unchecked(&vcc->stats->rx_err);
30732 }
30733 }
30734
30735@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
30736 goto retry_here;
30737 }
30738
30739- atomic_inc(&vcc->stats->tx_err);
30740+ atomic_inc_unchecked(&vcc->stats->tx_err);
30741
30742 fore200e->tx_sat++;
30743 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
30744diff --git a/drivers/atm/he.c b/drivers/atm/he.c
30745index 7066703..2b130de 100644
30746--- a/drivers/atm/he.c
30747+++ b/drivers/atm/he.c
30748@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30749
30750 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
30751 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
30752- atomic_inc(&vcc->stats->rx_drop);
30753+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30754 goto return_host_buffers;
30755 }
30756
30757@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30758 RBRQ_LEN_ERR(he_dev->rbrq_head)
30759 ? "LEN_ERR" : "",
30760 vcc->vpi, vcc->vci);
30761- atomic_inc(&vcc->stats->rx_err);
30762+ atomic_inc_unchecked(&vcc->stats->rx_err);
30763 goto return_host_buffers;
30764 }
30765
30766@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
30767 vcc->push(vcc, skb);
30768 spin_lock(&he_dev->global_lock);
30769
30770- atomic_inc(&vcc->stats->rx);
30771+ atomic_inc_unchecked(&vcc->stats->rx);
30772
30773 return_host_buffers:
30774 ++pdus_assembled;
30775@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
30776 tpd->vcc->pop(tpd->vcc, tpd->skb);
30777 else
30778 dev_kfree_skb_any(tpd->skb);
30779- atomic_inc(&tpd->vcc->stats->tx_err);
30780+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
30781 }
30782 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
30783 return;
30784@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30785 vcc->pop(vcc, skb);
30786 else
30787 dev_kfree_skb_any(skb);
30788- atomic_inc(&vcc->stats->tx_err);
30789+ atomic_inc_unchecked(&vcc->stats->tx_err);
30790 return -EINVAL;
30791 }
30792
30793@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30794 vcc->pop(vcc, skb);
30795 else
30796 dev_kfree_skb_any(skb);
30797- atomic_inc(&vcc->stats->tx_err);
30798+ atomic_inc_unchecked(&vcc->stats->tx_err);
30799 return -EINVAL;
30800 }
30801 #endif
30802@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30803 vcc->pop(vcc, skb);
30804 else
30805 dev_kfree_skb_any(skb);
30806- atomic_inc(&vcc->stats->tx_err);
30807+ atomic_inc_unchecked(&vcc->stats->tx_err);
30808 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30809 return -ENOMEM;
30810 }
30811@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30812 vcc->pop(vcc, skb);
30813 else
30814 dev_kfree_skb_any(skb);
30815- atomic_inc(&vcc->stats->tx_err);
30816+ atomic_inc_unchecked(&vcc->stats->tx_err);
30817 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30818 return -ENOMEM;
30819 }
30820@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
30821 __enqueue_tpd(he_dev, tpd, cid);
30822 spin_unlock_irqrestore(&he_dev->global_lock, flags);
30823
30824- atomic_inc(&vcc->stats->tx);
30825+ atomic_inc_unchecked(&vcc->stats->tx);
30826
30827 return 0;
30828 }
30829diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
30830index 4e49021..01b1512 100644
30831--- a/drivers/atm/horizon.c
30832+++ b/drivers/atm/horizon.c
30833@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
30834 {
30835 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
30836 // VC layer stats
30837- atomic_inc(&vcc->stats->rx);
30838+ atomic_inc_unchecked(&vcc->stats->rx);
30839 __net_timestamp(skb);
30840 // end of our responsability
30841 vcc->push (vcc, skb);
30842@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
30843 dev->tx_iovec = NULL;
30844
30845 // VC layer stats
30846- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
30847+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
30848
30849 // free the skb
30850 hrz_kfree_skb (skb);
30851diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
30852index e33ae00..9deb4ab 100644
30853--- a/drivers/atm/idt77252.c
30854+++ b/drivers/atm/idt77252.c
30855@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
30856 else
30857 dev_kfree_skb(skb);
30858
30859- atomic_inc(&vcc->stats->tx);
30860+ atomic_inc_unchecked(&vcc->stats->tx);
30861 }
30862
30863 atomic_dec(&scq->used);
30864@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30865 if ((sb = dev_alloc_skb(64)) == NULL) {
30866 printk("%s: Can't allocate buffers for aal0.\n",
30867 card->name);
30868- atomic_add(i, &vcc->stats->rx_drop);
30869+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
30870 break;
30871 }
30872 if (!atm_charge(vcc, sb->truesize)) {
30873 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
30874 card->name);
30875- atomic_add(i - 1, &vcc->stats->rx_drop);
30876+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
30877 dev_kfree_skb(sb);
30878 break;
30879 }
30880@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30881 ATM_SKB(sb)->vcc = vcc;
30882 __net_timestamp(sb);
30883 vcc->push(vcc, sb);
30884- atomic_inc(&vcc->stats->rx);
30885+ atomic_inc_unchecked(&vcc->stats->rx);
30886
30887 cell += ATM_CELL_PAYLOAD;
30888 }
30889@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30890 "(CDC: %08x)\n",
30891 card->name, len, rpp->len, readl(SAR_REG_CDC));
30892 recycle_rx_pool_skb(card, rpp);
30893- atomic_inc(&vcc->stats->rx_err);
30894+ atomic_inc_unchecked(&vcc->stats->rx_err);
30895 return;
30896 }
30897 if (stat & SAR_RSQE_CRC) {
30898 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
30899 recycle_rx_pool_skb(card, rpp);
30900- atomic_inc(&vcc->stats->rx_err);
30901+ atomic_inc_unchecked(&vcc->stats->rx_err);
30902 return;
30903 }
30904 if (skb_queue_len(&rpp->queue) > 1) {
30905@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30906 RXPRINTK("%s: Can't alloc RX skb.\n",
30907 card->name);
30908 recycle_rx_pool_skb(card, rpp);
30909- atomic_inc(&vcc->stats->rx_err);
30910+ atomic_inc_unchecked(&vcc->stats->rx_err);
30911 return;
30912 }
30913 if (!atm_charge(vcc, skb->truesize)) {
30914@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30915 __net_timestamp(skb);
30916
30917 vcc->push(vcc, skb);
30918- atomic_inc(&vcc->stats->rx);
30919+ atomic_inc_unchecked(&vcc->stats->rx);
30920
30921 return;
30922 }
30923@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
30924 __net_timestamp(skb);
30925
30926 vcc->push(vcc, skb);
30927- atomic_inc(&vcc->stats->rx);
30928+ atomic_inc_unchecked(&vcc->stats->rx);
30929
30930 if (skb->truesize > SAR_FB_SIZE_3)
30931 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
30932@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
30933 if (vcc->qos.aal != ATM_AAL0) {
30934 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
30935 card->name, vpi, vci);
30936- atomic_inc(&vcc->stats->rx_drop);
30937+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30938 goto drop;
30939 }
30940
30941 if ((sb = dev_alloc_skb(64)) == NULL) {
30942 printk("%s: Can't allocate buffers for AAL0.\n",
30943 card->name);
30944- atomic_inc(&vcc->stats->rx_err);
30945+ atomic_inc_unchecked(&vcc->stats->rx_err);
30946 goto drop;
30947 }
30948
30949@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
30950 ATM_SKB(sb)->vcc = vcc;
30951 __net_timestamp(sb);
30952 vcc->push(vcc, sb);
30953- atomic_inc(&vcc->stats->rx);
30954+ atomic_inc_unchecked(&vcc->stats->rx);
30955
30956 drop:
30957 skb_pull(queue, 64);
30958@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30959
30960 if (vc == NULL) {
30961 printk("%s: NULL connection in send().\n", card->name);
30962- atomic_inc(&vcc->stats->tx_err);
30963+ atomic_inc_unchecked(&vcc->stats->tx_err);
30964 dev_kfree_skb(skb);
30965 return -EINVAL;
30966 }
30967 if (!test_bit(VCF_TX, &vc->flags)) {
30968 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
30969- atomic_inc(&vcc->stats->tx_err);
30970+ atomic_inc_unchecked(&vcc->stats->tx_err);
30971 dev_kfree_skb(skb);
30972 return -EINVAL;
30973 }
30974@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30975 break;
30976 default:
30977 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
30978- atomic_inc(&vcc->stats->tx_err);
30979+ atomic_inc_unchecked(&vcc->stats->tx_err);
30980 dev_kfree_skb(skb);
30981 return -EINVAL;
30982 }
30983
30984 if (skb_shinfo(skb)->nr_frags != 0) {
30985 printk("%s: No scatter-gather yet.\n", card->name);
30986- atomic_inc(&vcc->stats->tx_err);
30987+ atomic_inc_unchecked(&vcc->stats->tx_err);
30988 dev_kfree_skb(skb);
30989 return -EINVAL;
30990 }
30991@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
30992
30993 err = queue_skb(card, vc, skb, oam);
30994 if (err) {
30995- atomic_inc(&vcc->stats->tx_err);
30996+ atomic_inc_unchecked(&vcc->stats->tx_err);
30997 dev_kfree_skb(skb);
30998 return err;
30999 }
31000@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
31001 skb = dev_alloc_skb(64);
31002 if (!skb) {
31003 printk("%s: Out of memory in send_oam().\n", card->name);
31004- atomic_inc(&vcc->stats->tx_err);
31005+ atomic_inc_unchecked(&vcc->stats->tx_err);
31006 return -ENOMEM;
31007 }
31008 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
31009diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
31010index b2c1b37..faa672b 100644
31011--- a/drivers/atm/iphase.c
31012+++ b/drivers/atm/iphase.c
31013@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
31014 status = (u_short) (buf_desc_ptr->desc_mode);
31015 if (status & (RX_CER | RX_PTE | RX_OFL))
31016 {
31017- atomic_inc(&vcc->stats->rx_err);
31018+ atomic_inc_unchecked(&vcc->stats->rx_err);
31019 IF_ERR(printk("IA: bad packet, dropping it");)
31020 if (status & RX_CER) {
31021 IF_ERR(printk(" cause: packet CRC error\n");)
31022@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
31023 len = dma_addr - buf_addr;
31024 if (len > iadev->rx_buf_sz) {
31025 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
31026- atomic_inc(&vcc->stats->rx_err);
31027+ atomic_inc_unchecked(&vcc->stats->rx_err);
31028 goto out_free_desc;
31029 }
31030
31031@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31032 ia_vcc = INPH_IA_VCC(vcc);
31033 if (ia_vcc == NULL)
31034 {
31035- atomic_inc(&vcc->stats->rx_err);
31036+ atomic_inc_unchecked(&vcc->stats->rx_err);
31037 dev_kfree_skb_any(skb);
31038 atm_return(vcc, atm_guess_pdu2truesize(len));
31039 goto INCR_DLE;
31040@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31041 if ((length > iadev->rx_buf_sz) || (length >
31042 (skb->len - sizeof(struct cpcs_trailer))))
31043 {
31044- atomic_inc(&vcc->stats->rx_err);
31045+ atomic_inc_unchecked(&vcc->stats->rx_err);
31046 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
31047 length, skb->len);)
31048 dev_kfree_skb_any(skb);
31049@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
31050
31051 IF_RX(printk("rx_dle_intr: skb push");)
31052 vcc->push(vcc,skb);
31053- atomic_inc(&vcc->stats->rx);
31054+ atomic_inc_unchecked(&vcc->stats->rx);
31055 iadev->rx_pkt_cnt++;
31056 }
31057 INCR_DLE:
31058@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
31059 {
31060 struct k_sonet_stats *stats;
31061 stats = &PRIV(_ia_dev[board])->sonet_stats;
31062- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
31063- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
31064- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
31065- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
31066- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
31067- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
31068- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
31069- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
31070- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
31071+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
31072+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
31073+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
31074+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
31075+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
31076+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
31077+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
31078+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
31079+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
31080 }
31081 ia_cmds.status = 0;
31082 break;
31083@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
31084 if ((desc == 0) || (desc > iadev->num_tx_desc))
31085 {
31086 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
31087- atomic_inc(&vcc->stats->tx);
31088+ atomic_inc_unchecked(&vcc->stats->tx);
31089 if (vcc->pop)
31090 vcc->pop(vcc, skb);
31091 else
31092@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
31093 ATM_DESC(skb) = vcc->vci;
31094 skb_queue_tail(&iadev->tx_dma_q, skb);
31095
31096- atomic_inc(&vcc->stats->tx);
31097+ atomic_inc_unchecked(&vcc->stats->tx);
31098 iadev->tx_pkt_cnt++;
31099 /* Increment transaction counter */
31100 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
31101
31102 #if 0
31103 /* add flow control logic */
31104- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
31105+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
31106 if (iavcc->vc_desc_cnt > 10) {
31107 vcc->tx_quota = vcc->tx_quota * 3 / 4;
31108 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
31109diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
31110index cf97c34..8d30655 100644
31111--- a/drivers/atm/lanai.c
31112+++ b/drivers/atm/lanai.c
31113@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
31114 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
31115 lanai_endtx(lanai, lvcc);
31116 lanai_free_skb(lvcc->tx.atmvcc, skb);
31117- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
31118+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
31119 }
31120
31121 /* Try to fill the buffer - don't call unless there is backlog */
31122@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
31123 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
31124 __net_timestamp(skb);
31125 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
31126- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
31127+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
31128 out:
31129 lvcc->rx.buf.ptr = end;
31130 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
31131@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31132 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
31133 "vcc %d\n", lanai->number, (unsigned int) s, vci);
31134 lanai->stats.service_rxnotaal5++;
31135- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31136+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31137 return 0;
31138 }
31139 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
31140@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31141 int bytes;
31142 read_unlock(&vcc_sklist_lock);
31143 DPRINTK("got trashed rx pdu on vci %d\n", vci);
31144- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31145+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31146 lvcc->stats.x.aal5.service_trash++;
31147 bytes = (SERVICE_GET_END(s) * 16) -
31148 (((unsigned long) lvcc->rx.buf.ptr) -
31149@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31150 }
31151 if (s & SERVICE_STREAM) {
31152 read_unlock(&vcc_sklist_lock);
31153- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31154+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31155 lvcc->stats.x.aal5.service_stream++;
31156 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
31157 "PDU on VCI %d!\n", lanai->number, vci);
31158@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
31159 return 0;
31160 }
31161 DPRINTK("got rx crc error on vci %d\n", vci);
31162- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
31163+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
31164 lvcc->stats.x.aal5.service_rxcrc++;
31165 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
31166 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
31167diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
31168index 3da804b..d3b0eed 100644
31169--- a/drivers/atm/nicstar.c
31170+++ b/drivers/atm/nicstar.c
31171@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31172 if ((vc = (vc_map *) vcc->dev_data) == NULL)
31173 {
31174 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
31175- atomic_inc(&vcc->stats->tx_err);
31176+ atomic_inc_unchecked(&vcc->stats->tx_err);
31177 dev_kfree_skb_any(skb);
31178 return -EINVAL;
31179 }
31180@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31181 if (!vc->tx)
31182 {
31183 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
31184- atomic_inc(&vcc->stats->tx_err);
31185+ atomic_inc_unchecked(&vcc->stats->tx_err);
31186 dev_kfree_skb_any(skb);
31187 return -EINVAL;
31188 }
31189@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31190 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
31191 {
31192 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
31193- atomic_inc(&vcc->stats->tx_err);
31194+ atomic_inc_unchecked(&vcc->stats->tx_err);
31195 dev_kfree_skb_any(skb);
31196 return -EINVAL;
31197 }
31198@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31199 if (skb_shinfo(skb)->nr_frags != 0)
31200 {
31201 printk("nicstar%d: No scatter-gather yet.\n", card->index);
31202- atomic_inc(&vcc->stats->tx_err);
31203+ atomic_inc_unchecked(&vcc->stats->tx_err);
31204 dev_kfree_skb_any(skb);
31205 return -EINVAL;
31206 }
31207@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
31208
31209 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
31210 {
31211- atomic_inc(&vcc->stats->tx_err);
31212+ atomic_inc_unchecked(&vcc->stats->tx_err);
31213 dev_kfree_skb_any(skb);
31214 return -EIO;
31215 }
31216- atomic_inc(&vcc->stats->tx);
31217+ atomic_inc_unchecked(&vcc->stats->tx);
31218
31219 return 0;
31220 }
31221@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31222 {
31223 printk("nicstar%d: Can't allocate buffers for aal0.\n",
31224 card->index);
31225- atomic_add(i,&vcc->stats->rx_drop);
31226+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
31227 break;
31228 }
31229 if (!atm_charge(vcc, sb->truesize))
31230 {
31231 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
31232 card->index);
31233- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
31234+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
31235 dev_kfree_skb_any(sb);
31236 break;
31237 }
31238@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31239 ATM_SKB(sb)->vcc = vcc;
31240 __net_timestamp(sb);
31241 vcc->push(vcc, sb);
31242- atomic_inc(&vcc->stats->rx);
31243+ atomic_inc_unchecked(&vcc->stats->rx);
31244 cell += ATM_CELL_PAYLOAD;
31245 }
31246
31247@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31248 if (iovb == NULL)
31249 {
31250 printk("nicstar%d: Out of iovec buffers.\n", card->index);
31251- atomic_inc(&vcc->stats->rx_drop);
31252+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31253 recycle_rx_buf(card, skb);
31254 return;
31255 }
31256@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31257 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
31258 {
31259 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
31260- atomic_inc(&vcc->stats->rx_err);
31261+ atomic_inc_unchecked(&vcc->stats->rx_err);
31262 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
31263 NS_SKB(iovb)->iovcnt = 0;
31264 iovb->len = 0;
31265@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31266 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
31267 card->index);
31268 which_list(card, skb);
31269- atomic_inc(&vcc->stats->rx_err);
31270+ atomic_inc_unchecked(&vcc->stats->rx_err);
31271 recycle_rx_buf(card, skb);
31272 vc->rx_iov = NULL;
31273 recycle_iov_buf(card, iovb);
31274@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31275 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
31276 card->index);
31277 which_list(card, skb);
31278- atomic_inc(&vcc->stats->rx_err);
31279+ atomic_inc_unchecked(&vcc->stats->rx_err);
31280 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31281 NS_SKB(iovb)->iovcnt);
31282 vc->rx_iov = NULL;
31283@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31284 printk(" - PDU size mismatch.\n");
31285 else
31286 printk(".\n");
31287- atomic_inc(&vcc->stats->rx_err);
31288+ atomic_inc_unchecked(&vcc->stats->rx_err);
31289 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31290 NS_SKB(iovb)->iovcnt);
31291 vc->rx_iov = NULL;
31292@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31293 if (!atm_charge(vcc, skb->truesize))
31294 {
31295 push_rxbufs(card, skb);
31296- atomic_inc(&vcc->stats->rx_drop);
31297+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31298 }
31299 else
31300 {
31301@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31302 ATM_SKB(skb)->vcc = vcc;
31303 __net_timestamp(skb);
31304 vcc->push(vcc, skb);
31305- atomic_inc(&vcc->stats->rx);
31306+ atomic_inc_unchecked(&vcc->stats->rx);
31307 }
31308 }
31309 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
31310@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31311 if (!atm_charge(vcc, sb->truesize))
31312 {
31313 push_rxbufs(card, sb);
31314- atomic_inc(&vcc->stats->rx_drop);
31315+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31316 }
31317 else
31318 {
31319@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31320 ATM_SKB(sb)->vcc = vcc;
31321 __net_timestamp(sb);
31322 vcc->push(vcc, sb);
31323- atomic_inc(&vcc->stats->rx);
31324+ atomic_inc_unchecked(&vcc->stats->rx);
31325 }
31326
31327 push_rxbufs(card, skb);
31328@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31329 if (!atm_charge(vcc, skb->truesize))
31330 {
31331 push_rxbufs(card, skb);
31332- atomic_inc(&vcc->stats->rx_drop);
31333+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31334 }
31335 else
31336 {
31337@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31338 ATM_SKB(skb)->vcc = vcc;
31339 __net_timestamp(skb);
31340 vcc->push(vcc, skb);
31341- atomic_inc(&vcc->stats->rx);
31342+ atomic_inc_unchecked(&vcc->stats->rx);
31343 }
31344
31345 push_rxbufs(card, sb);
31346@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31347 if (hb == NULL)
31348 {
31349 printk("nicstar%d: Out of huge buffers.\n", card->index);
31350- atomic_inc(&vcc->stats->rx_drop);
31351+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31352 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
31353 NS_SKB(iovb)->iovcnt);
31354 vc->rx_iov = NULL;
31355@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31356 }
31357 else
31358 dev_kfree_skb_any(hb);
31359- atomic_inc(&vcc->stats->rx_drop);
31360+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31361 }
31362 else
31363 {
31364@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
31365 #endif /* NS_USE_DESTRUCTORS */
31366 __net_timestamp(hb);
31367 vcc->push(vcc, hb);
31368- atomic_inc(&vcc->stats->rx);
31369+ atomic_inc_unchecked(&vcc->stats->rx);
31370 }
31371 }
31372
31373diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
31374index 84c93ff..e6ed269 100644
31375--- a/drivers/atm/solos-pci.c
31376+++ b/drivers/atm/solos-pci.c
31377@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
31378 }
31379 atm_charge(vcc, skb->truesize);
31380 vcc->push(vcc, skb);
31381- atomic_inc(&vcc->stats->rx);
31382+ atomic_inc_unchecked(&vcc->stats->rx);
31383 break;
31384
31385 case PKT_STATUS:
31386@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
31387 char msg[500];
31388 char item[10];
31389
31390+ pax_track_stack();
31391+
31392 len = buf->len;
31393 for (i = 0; i < len; i++){
31394 if(i % 8 == 0)
31395@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
31396 vcc = SKB_CB(oldskb)->vcc;
31397
31398 if (vcc) {
31399- atomic_inc(&vcc->stats->tx);
31400+ atomic_inc_unchecked(&vcc->stats->tx);
31401 solos_pop(vcc, oldskb);
31402 } else
31403 dev_kfree_skb_irq(oldskb);
31404diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
31405index 6dd3f59..ee377f3 100644
31406--- a/drivers/atm/suni.c
31407+++ b/drivers/atm/suni.c
31408@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
31409
31410
31411 #define ADD_LIMITED(s,v) \
31412- atomic_add((v),&stats->s); \
31413- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
31414+ atomic_add_unchecked((v),&stats->s); \
31415+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
31416
31417
31418 static void suni_hz(unsigned long from_timer)
31419diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
31420index fc8cb07..4a80e53 100644
31421--- a/drivers/atm/uPD98402.c
31422+++ b/drivers/atm/uPD98402.c
31423@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
31424 struct sonet_stats tmp;
31425 int error = 0;
31426
31427- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31428+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
31429 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
31430 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
31431 if (zero && !error) {
31432@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
31433
31434
31435 #define ADD_LIMITED(s,v) \
31436- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
31437- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
31438- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31439+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
31440+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
31441+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
31442
31443
31444 static void stat_event(struct atm_dev *dev)
31445@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
31446 if (reason & uPD98402_INT_PFM) stat_event(dev);
31447 if (reason & uPD98402_INT_PCO) {
31448 (void) GET(PCOCR); /* clear interrupt cause */
31449- atomic_add(GET(HECCT),
31450+ atomic_add_unchecked(GET(HECCT),
31451 &PRIV(dev)->sonet_stats.uncorr_hcs);
31452 }
31453 if ((reason & uPD98402_INT_RFO) &&
31454@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
31455 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
31456 uPD98402_INT_LOS),PIMR); /* enable them */
31457 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
31458- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
31459- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
31460- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
31461+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
31462+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
31463+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
31464 return 0;
31465 }
31466
31467diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
31468index 2e9635b..32927b4 100644
31469--- a/drivers/atm/zatm.c
31470+++ b/drivers/atm/zatm.c
31471@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
31472 }
31473 if (!size) {
31474 dev_kfree_skb_irq(skb);
31475- if (vcc) atomic_inc(&vcc->stats->rx_err);
31476+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
31477 continue;
31478 }
31479 if (!atm_charge(vcc,skb->truesize)) {
31480@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
31481 skb->len = size;
31482 ATM_SKB(skb)->vcc = vcc;
31483 vcc->push(vcc,skb);
31484- atomic_inc(&vcc->stats->rx);
31485+ atomic_inc_unchecked(&vcc->stats->rx);
31486 }
31487 zout(pos & 0xffff,MTA(mbx));
31488 #if 0 /* probably a stupid idea */
31489@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
31490 skb_queue_head(&zatm_vcc->backlog,skb);
31491 break;
31492 }
31493- atomic_inc(&vcc->stats->tx);
31494+ atomic_inc_unchecked(&vcc->stats->tx);
31495 wake_up(&zatm_vcc->tx_wait);
31496 }
31497
31498diff --git a/drivers/base/bus.c b/drivers/base/bus.c
31499index 63c143e..fece183 100644
31500--- a/drivers/base/bus.c
31501+++ b/drivers/base/bus.c
31502@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
31503 return ret;
31504 }
31505
31506-static struct sysfs_ops driver_sysfs_ops = {
31507+static const struct sysfs_ops driver_sysfs_ops = {
31508 .show = drv_attr_show,
31509 .store = drv_attr_store,
31510 };
31511@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
31512 return ret;
31513 }
31514
31515-static struct sysfs_ops bus_sysfs_ops = {
31516+static const struct sysfs_ops bus_sysfs_ops = {
31517 .show = bus_attr_show,
31518 .store = bus_attr_store,
31519 };
31520@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
31521 return 0;
31522 }
31523
31524-static struct kset_uevent_ops bus_uevent_ops = {
31525+static const struct kset_uevent_ops bus_uevent_ops = {
31526 .filter = bus_uevent_filter,
31527 };
31528
31529diff --git a/drivers/base/class.c b/drivers/base/class.c
31530index 6e2c3b0..cb61871 100644
31531--- a/drivers/base/class.c
31532+++ b/drivers/base/class.c
31533@@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
31534 kfree(cp);
31535 }
31536
31537-static struct sysfs_ops class_sysfs_ops = {
31538+static const struct sysfs_ops class_sysfs_ops = {
31539 .show = class_attr_show,
31540 .store = class_attr_store,
31541 };
31542diff --git a/drivers/base/core.c b/drivers/base/core.c
31543index f33d768..a9358d0 100644
31544--- a/drivers/base/core.c
31545+++ b/drivers/base/core.c
31546@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
31547 return ret;
31548 }
31549
31550-static struct sysfs_ops dev_sysfs_ops = {
31551+static const struct sysfs_ops dev_sysfs_ops = {
31552 .show = dev_attr_show,
31553 .store = dev_attr_store,
31554 };
31555@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
31556 return retval;
31557 }
31558
31559-static struct kset_uevent_ops device_uevent_ops = {
31560+static const struct kset_uevent_ops device_uevent_ops = {
31561 .filter = dev_uevent_filter,
31562 .name = dev_uevent_name,
31563 .uevent = dev_uevent,
31564diff --git a/drivers/base/memory.c b/drivers/base/memory.c
31565index 989429c..2272b00 100644
31566--- a/drivers/base/memory.c
31567+++ b/drivers/base/memory.c
31568@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
31569 return retval;
31570 }
31571
31572-static struct kset_uevent_ops memory_uevent_ops = {
31573+static const struct kset_uevent_ops memory_uevent_ops = {
31574 .name = memory_uevent_name,
31575 .uevent = memory_uevent,
31576 };
31577diff --git a/drivers/base/sys.c b/drivers/base/sys.c
31578index 3f202f7..61c4a6f 100644
31579--- a/drivers/base/sys.c
31580+++ b/drivers/base/sys.c
31581@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
31582 return -EIO;
31583 }
31584
31585-static struct sysfs_ops sysfs_ops = {
31586+static const struct sysfs_ops sysfs_ops = {
31587 .show = sysdev_show,
31588 .store = sysdev_store,
31589 };
31590@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
31591 return -EIO;
31592 }
31593
31594-static struct sysfs_ops sysfs_class_ops = {
31595+static const struct sysfs_ops sysfs_class_ops = {
31596 .show = sysdev_class_show,
31597 .store = sysdev_class_store,
31598 };
31599diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
31600index eb4fa19..1954777 100644
31601--- a/drivers/block/DAC960.c
31602+++ b/drivers/block/DAC960.c
31603@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
31604 unsigned long flags;
31605 int Channel, TargetID;
31606
31607+ pax_track_stack();
31608+
31609 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
31610 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
31611 sizeof(DAC960_SCSI_Inquiry_T) +
31612diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
31613index 68b90d9..7e2e3f3 100644
31614--- a/drivers/block/cciss.c
31615+++ b/drivers/block/cciss.c
31616@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
31617 int err;
31618 u32 cp;
31619
31620+ memset(&arg64, 0, sizeof(arg64));
31621+
31622 err = 0;
31623 err |=
31624 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
31625@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
31626 /* Wait (up to 20 seconds) for a command to complete */
31627
31628 for (i = 20 * HZ; i > 0; i--) {
31629- done = hba[ctlr]->access.command_completed(hba[ctlr]);
31630+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
31631 if (done == FIFO_EMPTY)
31632 schedule_timeout_uninterruptible(1);
31633 else
31634@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
31635 resend_cmd1:
31636
31637 /* Disable interrupt on the board. */
31638- h->access.set_intr_mask(h, CCISS_INTR_OFF);
31639+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
31640
31641 /* Make sure there is room in the command FIFO */
31642 /* Actually it should be completely empty at this time */
31643@@ -2884,13 +2886,13 @@ resend_cmd1:
31644 /* tape side of the driver. */
31645 for (i = 200000; i > 0; i--) {
31646 /* if fifo isn't full go */
31647- if (!(h->access.fifo_full(h)))
31648+ if (!(h->access->fifo_full(h)))
31649 break;
31650 udelay(10);
31651 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
31652 " waiting!\n", h->ctlr);
31653 }
31654- h->access.submit_command(h, c); /* Send the cmd */
31655+ h->access->submit_command(h, c); /* Send the cmd */
31656 do {
31657 complete = pollcomplete(h->ctlr);
31658
31659@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
31660 while (!hlist_empty(&h->reqQ)) {
31661 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
31662 /* can't do anything if fifo is full */
31663- if ((h->access.fifo_full(h))) {
31664+ if ((h->access->fifo_full(h))) {
31665 printk(KERN_WARNING "cciss: fifo full\n");
31666 break;
31667 }
31668@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
31669 h->Qdepth--;
31670
31671 /* Tell the controller execute command */
31672- h->access.submit_command(h, c);
31673+ h->access->submit_command(h, c);
31674
31675 /* Put job onto the completed Q */
31676 addQ(&h->cmpQ, c);
31677@@ -3393,17 +3395,17 @@ startio:
31678
31679 static inline unsigned long get_next_completion(ctlr_info_t *h)
31680 {
31681- return h->access.command_completed(h);
31682+ return h->access->command_completed(h);
31683 }
31684
31685 static inline int interrupt_pending(ctlr_info_t *h)
31686 {
31687- return h->access.intr_pending(h);
31688+ return h->access->intr_pending(h);
31689 }
31690
31691 static inline long interrupt_not_for_us(ctlr_info_t *h)
31692 {
31693- return (((h->access.intr_pending(h) == 0) ||
31694+ return (((h->access->intr_pending(h) == 0) ||
31695 (h->interrupts_enabled == 0)));
31696 }
31697
31698@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
31699 */
31700 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
31701 c->product_name = products[prod_index].product_name;
31702- c->access = *(products[prod_index].access);
31703+ c->access = products[prod_index].access;
31704 c->nr_cmds = c->max_commands - 4;
31705 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
31706 (readb(&c->cfgtable->Signature[1]) != 'I') ||
31707@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
31708 }
31709
31710 /* make sure the board interrupts are off */
31711- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
31712+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
31713 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
31714 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
31715 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
31716@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
31717 cciss_scsi_setup(i);
31718
31719 /* Turn the interrupts on so we can service requests */
31720- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
31721+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
31722
31723 /* Get the firmware version */
31724 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
31725diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
31726index 04d6bf8..36e712d 100644
31727--- a/drivers/block/cciss.h
31728+++ b/drivers/block/cciss.h
31729@@ -90,7 +90,7 @@ struct ctlr_info
31730 // information about each logical volume
31731 drive_info_struct *drv[CISS_MAX_LUN];
31732
31733- struct access_method access;
31734+ struct access_method *access;
31735
31736 /* queue and queue Info */
31737 struct hlist_head reqQ;
31738diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
31739index 6422651..bb1bdef 100644
31740--- a/drivers/block/cpqarray.c
31741+++ b/drivers/block/cpqarray.c
31742@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
31743 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
31744 goto Enomem4;
31745 }
31746- hba[i]->access.set_intr_mask(hba[i], 0);
31747+ hba[i]->access->set_intr_mask(hba[i], 0);
31748 if (request_irq(hba[i]->intr, do_ida_intr,
31749 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
31750 {
31751@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
31752 add_timer(&hba[i]->timer);
31753
31754 /* Enable IRQ now that spinlock and rate limit timer are set up */
31755- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
31756+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
31757
31758 for(j=0; j<NWD; j++) {
31759 struct gendisk *disk = ida_gendisk[i][j];
31760@@ -695,7 +695,7 @@ DBGINFO(
31761 for(i=0; i<NR_PRODUCTS; i++) {
31762 if (board_id == products[i].board_id) {
31763 c->product_name = products[i].product_name;
31764- c->access = *(products[i].access);
31765+ c->access = products[i].access;
31766 break;
31767 }
31768 }
31769@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
31770 hba[ctlr]->intr = intr;
31771 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
31772 hba[ctlr]->product_name = products[j].product_name;
31773- hba[ctlr]->access = *(products[j].access);
31774+ hba[ctlr]->access = products[j].access;
31775 hba[ctlr]->ctlr = ctlr;
31776 hba[ctlr]->board_id = board_id;
31777 hba[ctlr]->pci_dev = NULL; /* not PCI */
31778@@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
31779 struct scatterlist tmp_sg[SG_MAX];
31780 int i, dir, seg;
31781
31782+ pax_track_stack();
31783+
31784 if (blk_queue_plugged(q))
31785 goto startio;
31786
31787@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
31788
31789 while((c = h->reqQ) != NULL) {
31790 /* Can't do anything if we're busy */
31791- if (h->access.fifo_full(h) == 0)
31792+ if (h->access->fifo_full(h) == 0)
31793 return;
31794
31795 /* Get the first entry from the request Q */
31796@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
31797 h->Qdepth--;
31798
31799 /* Tell the controller to do our bidding */
31800- h->access.submit_command(h, c);
31801+ h->access->submit_command(h, c);
31802
31803 /* Get onto the completion Q */
31804 addQ(&h->cmpQ, c);
31805@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
31806 unsigned long flags;
31807 __u32 a,a1;
31808
31809- istat = h->access.intr_pending(h);
31810+ istat = h->access->intr_pending(h);
31811 /* Is this interrupt for us? */
31812 if (istat == 0)
31813 return IRQ_NONE;
31814@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
31815 */
31816 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
31817 if (istat & FIFO_NOT_EMPTY) {
31818- while((a = h->access.command_completed(h))) {
31819+ while((a = h->access->command_completed(h))) {
31820 a1 = a; a &= ~3;
31821 if ((c = h->cmpQ) == NULL)
31822 {
31823@@ -1434,11 +1436,11 @@ static int sendcmd(
31824 /*
31825 * Disable interrupt
31826 */
31827- info_p->access.set_intr_mask(info_p, 0);
31828+ info_p->access->set_intr_mask(info_p, 0);
31829 /* Make sure there is room in the command FIFO */
31830 /* Actually it should be completely empty at this time. */
31831 for (i = 200000; i > 0; i--) {
31832- temp = info_p->access.fifo_full(info_p);
31833+ temp = info_p->access->fifo_full(info_p);
31834 if (temp != 0) {
31835 break;
31836 }
31837@@ -1451,7 +1453,7 @@ DBG(
31838 /*
31839 * Send the cmd
31840 */
31841- info_p->access.submit_command(info_p, c);
31842+ info_p->access->submit_command(info_p, c);
31843 complete = pollcomplete(ctlr);
31844
31845 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
31846@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
31847 * we check the new geometry. Then turn interrupts back on when
31848 * we're done.
31849 */
31850- host->access.set_intr_mask(host, 0);
31851+ host->access->set_intr_mask(host, 0);
31852 getgeometry(ctlr);
31853- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
31854+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
31855
31856 for(i=0; i<NWD; i++) {
31857 struct gendisk *disk = ida_gendisk[ctlr][i];
31858@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
31859 /* Wait (up to 2 seconds) for a command to complete */
31860
31861 for (i = 200000; i > 0; i--) {
31862- done = hba[ctlr]->access.command_completed(hba[ctlr]);
31863+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
31864 if (done == 0) {
31865 udelay(10); /* a short fixed delay */
31866 } else
31867diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
31868index be73e9d..7fbf140 100644
31869--- a/drivers/block/cpqarray.h
31870+++ b/drivers/block/cpqarray.h
31871@@ -99,7 +99,7 @@ struct ctlr_info {
31872 drv_info_t drv[NWD];
31873 struct proc_dir_entry *proc;
31874
31875- struct access_method access;
31876+ struct access_method *access;
31877
31878 cmdlist_t *reqQ;
31879 cmdlist_t *cmpQ;
31880diff --git a/drivers/block/loop.c b/drivers/block/loop.c
31881index 8ec2d70..2804b30 100644
31882--- a/drivers/block/loop.c
31883+++ b/drivers/block/loop.c
31884@@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
31885 mm_segment_t old_fs = get_fs();
31886
31887 set_fs(get_ds());
31888- bw = file->f_op->write(file, buf, len, &pos);
31889+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
31890 set_fs(old_fs);
31891 if (likely(bw == len))
31892 return 0;
31893diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
31894index 26ada47..083c480 100644
31895--- a/drivers/block/nbd.c
31896+++ b/drivers/block/nbd.c
31897@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
31898 struct kvec iov;
31899 sigset_t blocked, oldset;
31900
31901+ pax_track_stack();
31902+
31903 if (unlikely(!sock)) {
31904 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
31905 lo->disk->disk_name, (send ? "send" : "recv"));
31906@@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
31907 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
31908 unsigned int cmd, unsigned long arg)
31909 {
31910+ pax_track_stack();
31911+
31912 switch (cmd) {
31913 case NBD_DISCONNECT: {
31914 struct request sreq;
31915diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
31916index a5d585d..d087be3 100644
31917--- a/drivers/block/pktcdvd.c
31918+++ b/drivers/block/pktcdvd.c
31919@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
31920 return len;
31921 }
31922
31923-static struct sysfs_ops kobj_pkt_ops = {
31924+static const struct sysfs_ops kobj_pkt_ops = {
31925 .show = kobj_pkt_show,
31926 .store = kobj_pkt_store
31927 };
31928diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
31929index 6aad99e..89cd142 100644
31930--- a/drivers/char/Kconfig
31931+++ b/drivers/char/Kconfig
31932@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
31933
31934 config DEVKMEM
31935 bool "/dev/kmem virtual device support"
31936- default y
31937+ default n
31938+ depends on !GRKERNSEC_KMEM
31939 help
31940 Say Y here if you want to support the /dev/kmem device. The
31941 /dev/kmem device is rarely used, but can be used for certain
31942@@ -1114,6 +1115,7 @@ config DEVPORT
31943 bool
31944 depends on !M68K
31945 depends on ISA || PCI
31946+ depends on !GRKERNSEC_KMEM
31947 default y
31948
31949 source "drivers/s390/char/Kconfig"
31950diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
31951index a96f319..a778a5b 100644
31952--- a/drivers/char/agp/frontend.c
31953+++ b/drivers/char/agp/frontend.c
31954@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
31955 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
31956 return -EFAULT;
31957
31958- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
31959+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
31960 return -EFAULT;
31961
31962 client = agp_find_client_by_pid(reserve.pid);
31963diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
31964index d8cff90..9628e70 100644
31965--- a/drivers/char/briq_panel.c
31966+++ b/drivers/char/briq_panel.c
31967@@ -10,6 +10,7 @@
31968 #include <linux/types.h>
31969 #include <linux/errno.h>
31970 #include <linux/tty.h>
31971+#include <linux/mutex.h>
31972 #include <linux/timer.h>
31973 #include <linux/kernel.h>
31974 #include <linux/wait.h>
31975@@ -36,6 +37,7 @@ static int vfd_is_open;
31976 static unsigned char vfd[40];
31977 static int vfd_cursor;
31978 static unsigned char ledpb, led;
31979+static DEFINE_MUTEX(vfd_mutex);
31980
31981 static void update_vfd(void)
31982 {
31983@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
31984 if (!vfd_is_open)
31985 return -EBUSY;
31986
31987+ mutex_lock(&vfd_mutex);
31988 for (;;) {
31989 char c;
31990 if (!indx)
31991 break;
31992- if (get_user(c, buf))
31993+ if (get_user(c, buf)) {
31994+ mutex_unlock(&vfd_mutex);
31995 return -EFAULT;
31996+ }
31997 if (esc) {
31998 set_led(c);
31999 esc = 0;
32000@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
32001 buf++;
32002 }
32003 update_vfd();
32004+ mutex_unlock(&vfd_mutex);
32005
32006 return len;
32007 }
32008diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
32009index 31e7c91..161afc0 100644
32010--- a/drivers/char/genrtc.c
32011+++ b/drivers/char/genrtc.c
32012@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
32013 switch (cmd) {
32014
32015 case RTC_PLL_GET:
32016+ memset(&pll, 0, sizeof(pll));
32017 if (get_rtc_pll(&pll))
32018 return -EINVAL;
32019 else
32020diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
32021index 006466d..a2bb21c 100644
32022--- a/drivers/char/hpet.c
32023+++ b/drivers/char/hpet.c
32024@@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
32025 return 0;
32026 }
32027
32028-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
32029+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
32030
32031 static int
32032 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
32033@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
32034 }
32035
32036 static int
32037-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
32038+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
32039 {
32040 struct hpet_timer __iomem *timer;
32041 struct hpet __iomem *hpet;
32042@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
32043 {
32044 struct hpet_info info;
32045
32046+ memset(&info, 0, sizeof(info));
32047+
32048 if (devp->hd_ireqfreq)
32049 info.hi_ireqfreq =
32050 hpet_time_div(hpetp, devp->hd_ireqfreq);
32051- else
32052- info.hi_ireqfreq = 0;
32053 info.hi_flags =
32054 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
32055 info.hi_hpet = hpetp->hp_which;
32056diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
32057index 0afc8b8..6913fc3 100644
32058--- a/drivers/char/hvc_beat.c
32059+++ b/drivers/char/hvc_beat.c
32060@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
32061 return cnt;
32062 }
32063
32064-static struct hv_ops hvc_beat_get_put_ops = {
32065+static const struct hv_ops hvc_beat_get_put_ops = {
32066 .get_chars = hvc_beat_get_chars,
32067 .put_chars = hvc_beat_put_chars,
32068 };
32069diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
32070index 98097f2..407dddc 100644
32071--- a/drivers/char/hvc_console.c
32072+++ b/drivers/char/hvc_console.c
32073@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
32074 * console interfaces but can still be used as a tty device. This has to be
32075 * static because kmalloc will not work during early console init.
32076 */
32077-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
32078+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
32079 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
32080 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
32081
32082@@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
32083 * vty adapters do NOT get an hvc_instantiate() callback since they
32084 * appear after early console init.
32085 */
32086-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
32087+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
32088 {
32089 struct hvc_struct *hp;
32090
32091@@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
32092 };
32093
32094 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
32095- struct hv_ops *ops, int outbuf_size)
32096+ const struct hv_ops *ops, int outbuf_size)
32097 {
32098 struct hvc_struct *hp;
32099 int i;
32100diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
32101index 10950ca..ed176c3 100644
32102--- a/drivers/char/hvc_console.h
32103+++ b/drivers/char/hvc_console.h
32104@@ -55,7 +55,7 @@ struct hvc_struct {
32105 int outbuf_size;
32106 int n_outbuf;
32107 uint32_t vtermno;
32108- struct hv_ops *ops;
32109+ const struct hv_ops *ops;
32110 int irq_requested;
32111 int data;
32112 struct winsize ws;
32113@@ -76,11 +76,11 @@ struct hv_ops {
32114 };
32115
32116 /* Register a vterm and a slot index for use as a console (console_init) */
32117-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
32118+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
32119
32120 /* register a vterm for hvc tty operation (module_init or hotplug add) */
32121 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
32122- struct hv_ops *ops, int outbuf_size);
32123+ const struct hv_ops *ops, int outbuf_size);
32124 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
32125 extern int hvc_remove(struct hvc_struct *hp);
32126
32127diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
32128index 936d05b..fd02426 100644
32129--- a/drivers/char/hvc_iseries.c
32130+++ b/drivers/char/hvc_iseries.c
32131@@ -197,7 +197,7 @@ done:
32132 return sent;
32133 }
32134
32135-static struct hv_ops hvc_get_put_ops = {
32136+static const struct hv_ops hvc_get_put_ops = {
32137 .get_chars = get_chars,
32138 .put_chars = put_chars,
32139 .notifier_add = notifier_add_irq,
32140diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
32141index b0e168f..69cda2a 100644
32142--- a/drivers/char/hvc_iucv.c
32143+++ b/drivers/char/hvc_iucv.c
32144@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
32145
32146
32147 /* HVC operations */
32148-static struct hv_ops hvc_iucv_ops = {
32149+static const struct hv_ops hvc_iucv_ops = {
32150 .get_chars = hvc_iucv_get_chars,
32151 .put_chars = hvc_iucv_put_chars,
32152 .notifier_add = hvc_iucv_notifier_add,
32153diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
32154index 88590d0..61c4a61 100644
32155--- a/drivers/char/hvc_rtas.c
32156+++ b/drivers/char/hvc_rtas.c
32157@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
32158 return i;
32159 }
32160
32161-static struct hv_ops hvc_rtas_get_put_ops = {
32162+static const struct hv_ops hvc_rtas_get_put_ops = {
32163 .get_chars = hvc_rtas_read_console,
32164 .put_chars = hvc_rtas_write_console,
32165 };
32166diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
32167index bd63ba8..b0957e6 100644
32168--- a/drivers/char/hvc_udbg.c
32169+++ b/drivers/char/hvc_udbg.c
32170@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
32171 return i;
32172 }
32173
32174-static struct hv_ops hvc_udbg_ops = {
32175+static const struct hv_ops hvc_udbg_ops = {
32176 .get_chars = hvc_udbg_get,
32177 .put_chars = hvc_udbg_put,
32178 };
32179diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
32180index 10be343..27370e9 100644
32181--- a/drivers/char/hvc_vio.c
32182+++ b/drivers/char/hvc_vio.c
32183@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
32184 return got;
32185 }
32186
32187-static struct hv_ops hvc_get_put_ops = {
32188+static const struct hv_ops hvc_get_put_ops = {
32189 .get_chars = filtered_get_chars,
32190 .put_chars = hvc_put_chars,
32191 .notifier_add = notifier_add_irq,
32192diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
32193index a6ee32b..94f8c26 100644
32194--- a/drivers/char/hvc_xen.c
32195+++ b/drivers/char/hvc_xen.c
32196@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
32197 return recv;
32198 }
32199
32200-static struct hv_ops hvc_ops = {
32201+static const struct hv_ops hvc_ops = {
32202 .get_chars = read_console,
32203 .put_chars = write_console,
32204 .notifier_add = notifier_add_irq,
32205diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
32206index 266b858..f3ee0bb 100644
32207--- a/drivers/char/hvcs.c
32208+++ b/drivers/char/hvcs.c
32209@@ -82,6 +82,7 @@
32210 #include <asm/hvcserver.h>
32211 #include <asm/uaccess.h>
32212 #include <asm/vio.h>
32213+#include <asm/local.h>
32214
32215 /*
32216 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32217@@ -269,7 +270,7 @@ struct hvcs_struct {
32218 unsigned int index;
32219
32220 struct tty_struct *tty;
32221- int open_count;
32222+ local_t open_count;
32223
32224 /*
32225 * Used to tell the driver kernel_thread what operations need to take
32226@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
32227
32228 spin_lock_irqsave(&hvcsd->lock, flags);
32229
32230- if (hvcsd->open_count > 0) {
32231+ if (local_read(&hvcsd->open_count) > 0) {
32232 spin_unlock_irqrestore(&hvcsd->lock, flags);
32233 printk(KERN_INFO "HVCS: vterm state unchanged. "
32234 "The hvcs device node is still in use.\n");
32235@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
32236 if ((retval = hvcs_partner_connect(hvcsd)))
32237 goto error_release;
32238
32239- hvcsd->open_count = 1;
32240+ local_set(&hvcsd->open_count, 1);
32241 hvcsd->tty = tty;
32242 tty->driver_data = hvcsd;
32243
32244@@ -1169,7 +1170,7 @@ fast_open:
32245
32246 spin_lock_irqsave(&hvcsd->lock, flags);
32247 kref_get(&hvcsd->kref);
32248- hvcsd->open_count++;
32249+ local_inc(&hvcsd->open_count);
32250 hvcsd->todo_mask |= HVCS_SCHED_READ;
32251 spin_unlock_irqrestore(&hvcsd->lock, flags);
32252
32253@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
32254 hvcsd = tty->driver_data;
32255
32256 spin_lock_irqsave(&hvcsd->lock, flags);
32257- if (--hvcsd->open_count == 0) {
32258+ if (local_dec_and_test(&hvcsd->open_count)) {
32259
32260 vio_disable_interrupts(hvcsd->vdev);
32261
32262@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
32263 free_irq(irq, hvcsd);
32264 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32265 return;
32266- } else if (hvcsd->open_count < 0) {
32267+ } else if (local_read(&hvcsd->open_count) < 0) {
32268 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32269 " is missmanaged.\n",
32270- hvcsd->vdev->unit_address, hvcsd->open_count);
32271+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32272 }
32273
32274 spin_unlock_irqrestore(&hvcsd->lock, flags);
32275@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
32276
32277 spin_lock_irqsave(&hvcsd->lock, flags);
32278 /* Preserve this so that we know how many kref refs to put */
32279- temp_open_count = hvcsd->open_count;
32280+ temp_open_count = local_read(&hvcsd->open_count);
32281
32282 /*
32283 * Don't kref put inside the spinlock because the destruction
32284@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
32285 hvcsd->tty->driver_data = NULL;
32286 hvcsd->tty = NULL;
32287
32288- hvcsd->open_count = 0;
32289+ local_set(&hvcsd->open_count, 0);
32290
32291 /* This will drop any buffered data on the floor which is OK in a hangup
32292 * scenario. */
32293@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
32294 * the middle of a write operation? This is a crummy place to do this
32295 * but we want to keep it all in the spinlock.
32296 */
32297- if (hvcsd->open_count <= 0) {
32298+ if (local_read(&hvcsd->open_count) <= 0) {
32299 spin_unlock_irqrestore(&hvcsd->lock, flags);
32300 return -ENODEV;
32301 }
32302@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
32303 {
32304 struct hvcs_struct *hvcsd = tty->driver_data;
32305
32306- if (!hvcsd || hvcsd->open_count <= 0)
32307+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32308 return 0;
32309
32310 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32311diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
32312index ec5e3f8..02455ba 100644
32313--- a/drivers/char/ipmi/ipmi_msghandler.c
32314+++ b/drivers/char/ipmi/ipmi_msghandler.c
32315@@ -414,7 +414,7 @@ struct ipmi_smi {
32316 struct proc_dir_entry *proc_dir;
32317 char proc_dir_name[10];
32318
32319- atomic_t stats[IPMI_NUM_STATS];
32320+ atomic_unchecked_t stats[IPMI_NUM_STATS];
32321
32322 /*
32323 * run_to_completion duplicate of smb_info, smi_info
32324@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
32325
32326
32327 #define ipmi_inc_stat(intf, stat) \
32328- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
32329+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
32330 #define ipmi_get_stat(intf, stat) \
32331- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
32332+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
32333
32334 static int is_lan_addr(struct ipmi_addr *addr)
32335 {
32336@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
32337 INIT_LIST_HEAD(&intf->cmd_rcvrs);
32338 init_waitqueue_head(&intf->waitq);
32339 for (i = 0; i < IPMI_NUM_STATS; i++)
32340- atomic_set(&intf->stats[i], 0);
32341+ atomic_set_unchecked(&intf->stats[i], 0);
32342
32343 intf->proc_dir = NULL;
32344
32345@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
32346 struct ipmi_smi_msg smi_msg;
32347 struct ipmi_recv_msg recv_msg;
32348
32349+ pax_track_stack();
32350+
32351 si = (struct ipmi_system_interface_addr *) &addr;
32352 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
32353 si->channel = IPMI_BMC_CHANNEL;
32354diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
32355index abae8c9..8021979 100644
32356--- a/drivers/char/ipmi/ipmi_si_intf.c
32357+++ b/drivers/char/ipmi/ipmi_si_intf.c
32358@@ -277,7 +277,7 @@ struct smi_info {
32359 unsigned char slave_addr;
32360
32361 /* Counters and things for the proc filesystem. */
32362- atomic_t stats[SI_NUM_STATS];
32363+ atomic_unchecked_t stats[SI_NUM_STATS];
32364
32365 struct task_struct *thread;
32366
32367@@ -285,9 +285,9 @@ struct smi_info {
32368 };
32369
32370 #define smi_inc_stat(smi, stat) \
32371- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
32372+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
32373 #define smi_get_stat(smi, stat) \
32374- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
32375+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
32376
32377 #define SI_MAX_PARMS 4
32378
32379@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
32380 atomic_set(&new_smi->req_events, 0);
32381 new_smi->run_to_completion = 0;
32382 for (i = 0; i < SI_NUM_STATS; i++)
32383- atomic_set(&new_smi->stats[i], 0);
32384+ atomic_set_unchecked(&new_smi->stats[i], 0);
32385
32386 new_smi->interrupt_disabled = 0;
32387 atomic_set(&new_smi->stop_operation, 0);
32388diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
32389index 402838f..55e2200 100644
32390--- a/drivers/char/istallion.c
32391+++ b/drivers/char/istallion.c
32392@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
32393 * re-used for each stats call.
32394 */
32395 static comstats_t stli_comstats;
32396-static combrd_t stli_brdstats;
32397 static struct asystats stli_cdkstats;
32398
32399 /*****************************************************************************/
32400@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
32401 {
32402 struct stlibrd *brdp;
32403 unsigned int i;
32404+ combrd_t stli_brdstats;
32405
32406 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
32407 return -EFAULT;
32408@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
32409 struct stliport stli_dummyport;
32410 struct stliport *portp;
32411
32412+ pax_track_stack();
32413+
32414 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
32415 return -EFAULT;
32416 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
32417@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
32418 struct stlibrd stli_dummybrd;
32419 struct stlibrd *brdp;
32420
32421+ pax_track_stack();
32422+
32423 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
32424 return -EFAULT;
32425 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
32426diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
32427index 950837c..e55a288 100644
32428--- a/drivers/char/keyboard.c
32429+++ b/drivers/char/keyboard.c
32430@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
32431 kbd->kbdmode == VC_MEDIUMRAW) &&
32432 value != KVAL(K_SAK))
32433 return; /* SAK is allowed even in raw mode */
32434+
32435+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
32436+ {
32437+ void *func = fn_handler[value];
32438+ if (func == fn_show_state || func == fn_show_ptregs ||
32439+ func == fn_show_mem)
32440+ return;
32441+ }
32442+#endif
32443+
32444 fn_handler[value](vc);
32445 }
32446
32447@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
32448 .evbit = { BIT_MASK(EV_SND) },
32449 },
32450
32451- { }, /* Terminating entry */
32452+ { 0 }, /* Terminating entry */
32453 };
32454
32455 MODULE_DEVICE_TABLE(input, kbd_ids);
32456diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
32457index 87c67b4..230527a 100644
32458--- a/drivers/char/mbcs.c
32459+++ b/drivers/char/mbcs.c
32460@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
32461 return 0;
32462 }
32463
32464-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
32465+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
32466 {
32467 .part_num = MBCS_PART_NUM,
32468 .mfg_num = MBCS_MFG_NUM,
32469diff --git a/drivers/char/mem.c b/drivers/char/mem.c
32470index 1270f64..8495f49 100644
32471--- a/drivers/char/mem.c
32472+++ b/drivers/char/mem.c
32473@@ -18,6 +18,7 @@
32474 #include <linux/raw.h>
32475 #include <linux/tty.h>
32476 #include <linux/capability.h>
32477+#include <linux/security.h>
32478 #include <linux/ptrace.h>
32479 #include <linux/device.h>
32480 #include <linux/highmem.h>
32481@@ -35,6 +36,10 @@
32482 # include <linux/efi.h>
32483 #endif
32484
32485+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
32486+extern struct file_operations grsec_fops;
32487+#endif
32488+
32489 static inline unsigned long size_inside_page(unsigned long start,
32490 unsigned long size)
32491 {
32492@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32493
32494 while (cursor < to) {
32495 if (!devmem_is_allowed(pfn)) {
32496+#ifdef CONFIG_GRKERNSEC_KMEM
32497+ gr_handle_mem_readwrite(from, to);
32498+#else
32499 printk(KERN_INFO
32500 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
32501 current->comm, from, to);
32502+#endif
32503 return 0;
32504 }
32505 cursor += PAGE_SIZE;
32506@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32507 }
32508 return 1;
32509 }
32510+#elif defined(CONFIG_GRKERNSEC_KMEM)
32511+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32512+{
32513+ return 0;
32514+}
32515 #else
32516 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32517 {
32518@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
32519 #endif
32520
32521 while (count > 0) {
32522+ char *temp;
32523+
32524 /*
32525 * Handle first page in case it's not aligned
32526 */
32527@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
32528 if (!ptr)
32529 return -EFAULT;
32530
32531- if (copy_to_user(buf, ptr, sz)) {
32532+#ifdef CONFIG_PAX_USERCOPY
32533+ temp = kmalloc(sz, GFP_KERNEL);
32534+ if (!temp) {
32535+ unxlate_dev_mem_ptr(p, ptr);
32536+ return -ENOMEM;
32537+ }
32538+ memcpy(temp, ptr, sz);
32539+#else
32540+ temp = ptr;
32541+#endif
32542+
32543+ if (copy_to_user(buf, temp, sz)) {
32544+
32545+#ifdef CONFIG_PAX_USERCOPY
32546+ kfree(temp);
32547+#endif
32548+
32549 unxlate_dev_mem_ptr(p, ptr);
32550 return -EFAULT;
32551 }
32552
32553+#ifdef CONFIG_PAX_USERCOPY
32554+ kfree(temp);
32555+#endif
32556+
32557 unxlate_dev_mem_ptr(p, ptr);
32558
32559 buf += sz;
32560@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
32561 size_t count, loff_t *ppos)
32562 {
32563 unsigned long p = *ppos;
32564- ssize_t low_count, read, sz;
32565+ ssize_t low_count, read, sz, err = 0;
32566 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
32567- int err = 0;
32568
32569 read = 0;
32570 if (p < (unsigned long) high_memory) {
32571@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
32572 }
32573 #endif
32574 while (low_count > 0) {
32575+ char *temp;
32576+
32577 sz = size_inside_page(p, low_count);
32578
32579 /*
32580@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
32581 */
32582 kbuf = xlate_dev_kmem_ptr((char *)p);
32583
32584- if (copy_to_user(buf, kbuf, sz))
32585+#ifdef CONFIG_PAX_USERCOPY
32586+ temp = kmalloc(sz, GFP_KERNEL);
32587+ if (!temp)
32588+ return -ENOMEM;
32589+ memcpy(temp, kbuf, sz);
32590+#else
32591+ temp = kbuf;
32592+#endif
32593+
32594+ err = copy_to_user(buf, temp, sz);
32595+
32596+#ifdef CONFIG_PAX_USERCOPY
32597+ kfree(temp);
32598+#endif
32599+
32600+ if (err)
32601 return -EFAULT;
32602 buf += sz;
32603 p += sz;
32604@@ -889,6 +941,9 @@ static const struct memdev {
32605 #ifdef CONFIG_CRASH_DUMP
32606 [12] = { "oldmem", 0, &oldmem_fops, NULL },
32607 #endif
32608+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
32609+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
32610+#endif
32611 };
32612
32613 static int memory_open(struct inode *inode, struct file *filp)
32614diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
32615index 918711a..4ffaf5e 100644
32616--- a/drivers/char/mmtimer.c
32617+++ b/drivers/char/mmtimer.c
32618@@ -756,7 +756,7 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
32619 return err;
32620 }
32621
32622-static struct k_clock sgi_clock = {
32623+static k_clock_no_const sgi_clock = {
32624 .res = 0,
32625 .clock_set = sgi_clock_set,
32626 .clock_get = sgi_clock_get,
32627diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
32628index 674b3ab..a8d1970 100644
32629--- a/drivers/char/pcmcia/ipwireless/tty.c
32630+++ b/drivers/char/pcmcia/ipwireless/tty.c
32631@@ -29,6 +29,7 @@
32632 #include <linux/tty_driver.h>
32633 #include <linux/tty_flip.h>
32634 #include <linux/uaccess.h>
32635+#include <asm/local.h>
32636
32637 #include "tty.h"
32638 #include "network.h"
32639@@ -51,7 +52,7 @@ struct ipw_tty {
32640 int tty_type;
32641 struct ipw_network *network;
32642 struct tty_struct *linux_tty;
32643- int open_count;
32644+ local_t open_count;
32645 unsigned int control_lines;
32646 struct mutex ipw_tty_mutex;
32647 int tx_bytes_queued;
32648@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
32649 mutex_unlock(&tty->ipw_tty_mutex);
32650 return -ENODEV;
32651 }
32652- if (tty->open_count == 0)
32653+ if (local_read(&tty->open_count) == 0)
32654 tty->tx_bytes_queued = 0;
32655
32656- tty->open_count++;
32657+ local_inc(&tty->open_count);
32658
32659 tty->linux_tty = linux_tty;
32660 linux_tty->driver_data = tty;
32661@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
32662
32663 static void do_ipw_close(struct ipw_tty *tty)
32664 {
32665- tty->open_count--;
32666-
32667- if (tty->open_count == 0) {
32668+ if (local_dec_return(&tty->open_count) == 0) {
32669 struct tty_struct *linux_tty = tty->linux_tty;
32670
32671 if (linux_tty != NULL) {
32672@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
32673 return;
32674
32675 mutex_lock(&tty->ipw_tty_mutex);
32676- if (tty->open_count == 0) {
32677+ if (local_read(&tty->open_count) == 0) {
32678 mutex_unlock(&tty->ipw_tty_mutex);
32679 return;
32680 }
32681@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
32682 return;
32683 }
32684
32685- if (!tty->open_count) {
32686+ if (!local_read(&tty->open_count)) {
32687 mutex_unlock(&tty->ipw_tty_mutex);
32688 return;
32689 }
32690@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
32691 return -ENODEV;
32692
32693 mutex_lock(&tty->ipw_tty_mutex);
32694- if (!tty->open_count) {
32695+ if (!local_read(&tty->open_count)) {
32696 mutex_unlock(&tty->ipw_tty_mutex);
32697 return -EINVAL;
32698 }
32699@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
32700 if (!tty)
32701 return -ENODEV;
32702
32703- if (!tty->open_count)
32704+ if (!local_read(&tty->open_count))
32705 return -EINVAL;
32706
32707 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32708@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
32709 if (!tty)
32710 return 0;
32711
32712- if (!tty->open_count)
32713+ if (!local_read(&tty->open_count))
32714 return 0;
32715
32716 return tty->tx_bytes_queued;
32717@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
32718 if (!tty)
32719 return -ENODEV;
32720
32721- if (!tty->open_count)
32722+ if (!local_read(&tty->open_count))
32723 return -EINVAL;
32724
32725 return get_control_lines(tty);
32726@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
32727 if (!tty)
32728 return -ENODEV;
32729
32730- if (!tty->open_count)
32731+ if (!local_read(&tty->open_count))
32732 return -EINVAL;
32733
32734 return set_control_lines(tty, set, clear);
32735@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
32736 if (!tty)
32737 return -ENODEV;
32738
32739- if (!tty->open_count)
32740+ if (!local_read(&tty->open_count))
32741 return -EINVAL;
32742
32743 /* FIXME: Exactly how is the tty object locked here .. */
32744@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
32745 against a parallel ioctl etc */
32746 mutex_lock(&ttyj->ipw_tty_mutex);
32747 }
32748- while (ttyj->open_count)
32749+ while (local_read(&ttyj->open_count))
32750 do_ipw_close(ttyj);
32751 ipwireless_disassociate_network_ttys(network,
32752 ttyj->channel_idx);
32753diff --git a/drivers/char/pty.c b/drivers/char/pty.c
32754index 62f282e..e45c45c 100644
32755--- a/drivers/char/pty.c
32756+++ b/drivers/char/pty.c
32757@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
32758 register_sysctl_table(pty_root_table);
32759
32760 /* Now create the /dev/ptmx special device */
32761+ pax_open_kernel();
32762 tty_default_fops(&ptmx_fops);
32763- ptmx_fops.open = ptmx_open;
32764+ *(void **)&ptmx_fops.open = ptmx_open;
32765+ pax_close_kernel();
32766
32767 cdev_init(&ptmx_cdev, &ptmx_fops);
32768 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
32769diff --git a/drivers/char/random.c b/drivers/char/random.c
32770index 3a19e2d..6ed09d3 100644
32771--- a/drivers/char/random.c
32772+++ b/drivers/char/random.c
32773@@ -254,8 +254,13 @@
32774 /*
32775 * Configuration information
32776 */
32777+#ifdef CONFIG_GRKERNSEC_RANDNET
32778+#define INPUT_POOL_WORDS 512
32779+#define OUTPUT_POOL_WORDS 128
32780+#else
32781 #define INPUT_POOL_WORDS 128
32782 #define OUTPUT_POOL_WORDS 32
32783+#endif
32784 #define SEC_XFER_SIZE 512
32785
32786 /*
32787@@ -292,10 +297,17 @@ static struct poolinfo {
32788 int poolwords;
32789 int tap1, tap2, tap3, tap4, tap5;
32790 } poolinfo_table[] = {
32791+#ifdef CONFIG_GRKERNSEC_RANDNET
32792+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
32793+ { 512, 411, 308, 208, 104, 1 },
32794+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
32795+ { 128, 103, 76, 51, 25, 1 },
32796+#else
32797 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
32798 { 128, 103, 76, 51, 25, 1 },
32799 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
32800 { 32, 26, 20, 14, 7, 1 },
32801+#endif
32802 #if 0
32803 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
32804 { 2048, 1638, 1231, 819, 411, 1 },
32805@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
32806 #include <linux/sysctl.h>
32807
32808 static int min_read_thresh = 8, min_write_thresh;
32809-static int max_read_thresh = INPUT_POOL_WORDS * 32;
32810+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
32811 static int max_write_thresh = INPUT_POOL_WORDS * 32;
32812 static char sysctl_bootid[16];
32813
32814diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
32815index 0e29a23..0efc2c2 100644
32816--- a/drivers/char/rocket.c
32817+++ b/drivers/char/rocket.c
32818@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
32819 struct rocket_ports tmp;
32820 int board;
32821
32822+ pax_track_stack();
32823+
32824 if (!retports)
32825 return -EFAULT;
32826 memset(&tmp, 0, sizeof (tmp));
32827diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
32828index 8c262aa..4d3b058 100644
32829--- a/drivers/char/sonypi.c
32830+++ b/drivers/char/sonypi.c
32831@@ -55,6 +55,7 @@
32832 #include <asm/uaccess.h>
32833 #include <asm/io.h>
32834 #include <asm/system.h>
32835+#include <asm/local.h>
32836
32837 #include <linux/sonypi.h>
32838
32839@@ -491,7 +492,7 @@ static struct sonypi_device {
32840 spinlock_t fifo_lock;
32841 wait_queue_head_t fifo_proc_list;
32842 struct fasync_struct *fifo_async;
32843- int open_count;
32844+ local_t open_count;
32845 int model;
32846 struct input_dev *input_jog_dev;
32847 struct input_dev *input_key_dev;
32848@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
32849 static int sonypi_misc_release(struct inode *inode, struct file *file)
32850 {
32851 mutex_lock(&sonypi_device.lock);
32852- sonypi_device.open_count--;
32853+ local_dec(&sonypi_device.open_count);
32854 mutex_unlock(&sonypi_device.lock);
32855 return 0;
32856 }
32857@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
32858 lock_kernel();
32859 mutex_lock(&sonypi_device.lock);
32860 /* Flush input queue on first open */
32861- if (!sonypi_device.open_count)
32862+ if (!local_read(&sonypi_device.open_count))
32863 kfifo_reset(sonypi_device.fifo);
32864- sonypi_device.open_count++;
32865+ local_inc(&sonypi_device.open_count);
32866 mutex_unlock(&sonypi_device.lock);
32867 unlock_kernel();
32868 return 0;
32869diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
32870index db6dcfa..13834cb 100644
32871--- a/drivers/char/stallion.c
32872+++ b/drivers/char/stallion.c
32873@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
32874 struct stlport stl_dummyport;
32875 struct stlport *portp;
32876
32877+ pax_track_stack();
32878+
32879 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32880 return -EFAULT;
32881 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32882diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
32883index a0789f6..cea3902 100644
32884--- a/drivers/char/tpm/tpm.c
32885+++ b/drivers/char/tpm/tpm.c
32886@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
32887 chip->vendor.req_complete_val)
32888 goto out_recv;
32889
32890- if ((status == chip->vendor.req_canceled)) {
32891+ if (status == chip->vendor.req_canceled) {
32892 dev_err(chip->dev, "Operation Canceled\n");
32893 rc = -ECANCELED;
32894 goto out;
32895@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
32896
32897 struct tpm_chip *chip = dev_get_drvdata(dev);
32898
32899+ pax_track_stack();
32900+
32901 tpm_cmd.header.in = tpm_readpubek_header;
32902 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
32903 "attempting to read the PUBEK");
32904diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
32905index bf2170f..ce8cab9 100644
32906--- a/drivers/char/tpm/tpm_bios.c
32907+++ b/drivers/char/tpm/tpm_bios.c
32908@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
32909 event = addr;
32910
32911 if ((event->event_type == 0 && event->event_size == 0) ||
32912- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
32913+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
32914 return NULL;
32915
32916 return addr;
32917@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
32918 return NULL;
32919
32920 if ((event->event_type == 0 && event->event_size == 0) ||
32921- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
32922+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
32923 return NULL;
32924
32925 (*pos)++;
32926@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
32927 int i;
32928
32929 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
32930- seq_putc(m, data[i]);
32931+ if (!seq_putc(m, data[i]))
32932+ return -EFAULT;
32933
32934 return 0;
32935 }
32936@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
32937 log->bios_event_log_end = log->bios_event_log + len;
32938
32939 virt = acpi_os_map_memory(start, len);
32940+ if (!virt) {
32941+ kfree(log->bios_event_log);
32942+ log->bios_event_log = NULL;
32943+ return -EFAULT;
32944+ }
32945
32946- memcpy(log->bios_event_log, virt, len);
32947+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
32948
32949 acpi_os_unmap_memory(virt, len);
32950 return 0;
32951diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
32952index 123cedf..6664cb4 100644
32953--- a/drivers/char/tty_io.c
32954+++ b/drivers/char/tty_io.c
32955@@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
32956 static int tty_release(struct inode *, struct file *);
32957 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
32958 #ifdef CONFIG_COMPAT
32959-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32960+long tty_compat_ioctl(struct file *file, unsigned int cmd,
32961 unsigned long arg);
32962 #else
32963 #define tty_compat_ioctl NULL
32964@@ -1774,6 +1774,7 @@ got_driver:
32965
32966 if (IS_ERR(tty)) {
32967 mutex_unlock(&tty_mutex);
32968+ tty_driver_kref_put(driver);
32969 return PTR_ERR(tty);
32970 }
32971 }
32972@@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
32973 return retval;
32974 }
32975
32976+EXPORT_SYMBOL(tty_ioctl);
32977+
32978 #ifdef CONFIG_COMPAT
32979-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32980+long tty_compat_ioctl(struct file *file, unsigned int cmd,
32981 unsigned long arg)
32982 {
32983 struct inode *inode = file->f_dentry->d_inode;
32984@@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
32985
32986 return retval;
32987 }
32988+
32989+EXPORT_SYMBOL(tty_compat_ioctl);
32990 #endif
32991
32992 /*
32993@@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
32994
32995 void tty_default_fops(struct file_operations *fops)
32996 {
32997- *fops = tty_fops;
32998+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
32999 }
33000
33001 /*
33002diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
33003index d814a3d..b55b9c9 100644
33004--- a/drivers/char/tty_ldisc.c
33005+++ b/drivers/char/tty_ldisc.c
33006@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
33007 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33008 struct tty_ldisc_ops *ldo = ld->ops;
33009
33010- ldo->refcount--;
33011+ atomic_dec(&ldo->refcount);
33012 module_put(ldo->owner);
33013 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33014
33015@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
33016 spin_lock_irqsave(&tty_ldisc_lock, flags);
33017 tty_ldiscs[disc] = new_ldisc;
33018 new_ldisc->num = disc;
33019- new_ldisc->refcount = 0;
33020+ atomic_set(&new_ldisc->refcount, 0);
33021 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33022
33023 return ret;
33024@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33025 return -EINVAL;
33026
33027 spin_lock_irqsave(&tty_ldisc_lock, flags);
33028- if (tty_ldiscs[disc]->refcount)
33029+ if (atomic_read(&tty_ldiscs[disc]->refcount))
33030 ret = -EBUSY;
33031 else
33032 tty_ldiscs[disc] = NULL;
33033@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
33034 if (ldops) {
33035 ret = ERR_PTR(-EAGAIN);
33036 if (try_module_get(ldops->owner)) {
33037- ldops->refcount++;
33038+ atomic_inc(&ldops->refcount);
33039 ret = ldops;
33040 }
33041 }
33042@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
33043 unsigned long flags;
33044
33045 spin_lock_irqsave(&tty_ldisc_lock, flags);
33046- ldops->refcount--;
33047+ atomic_dec(&ldops->refcount);
33048 module_put(ldops->owner);
33049 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33050 }
33051diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
33052index a035ae3..c27fe2c 100644
33053--- a/drivers/char/virtio_console.c
33054+++ b/drivers/char/virtio_console.c
33055@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
33056 * virtqueue, so we let the drivers do some boutique early-output thing. */
33057 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
33058 {
33059- virtio_cons.put_chars = put_chars;
33060+ pax_open_kernel();
33061+ *(void **)&virtio_cons.put_chars = put_chars;
33062+ pax_close_kernel();
33063 return hvc_instantiate(0, 0, &virtio_cons);
33064 }
33065
33066@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
33067 out_vq = vqs[1];
33068
33069 /* Start using the new console output. */
33070- virtio_cons.get_chars = get_chars;
33071- virtio_cons.put_chars = put_chars;
33072- virtio_cons.notifier_add = notifier_add_vio;
33073- virtio_cons.notifier_del = notifier_del_vio;
33074- virtio_cons.notifier_hangup = notifier_del_vio;
33075+ pax_open_kernel();
33076+ *(void **)&virtio_cons.get_chars = get_chars;
33077+ *(void **)&virtio_cons.put_chars = put_chars;
33078+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
33079+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
33080+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
33081+ pax_close_kernel();
33082
33083 /* The first argument of hvc_alloc() is the virtual console number, so
33084 * we use zero. The second argument is the parameter for the
33085diff --git a/drivers/char/vt.c b/drivers/char/vt.c
33086index 0c80c68..53d59c1 100644
33087--- a/drivers/char/vt.c
33088+++ b/drivers/char/vt.c
33089@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
33090
33091 static void notify_write(struct vc_data *vc, unsigned int unicode)
33092 {
33093- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33094+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
33095 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33096 }
33097
33098diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
33099index 6351a26..999af95 100644
33100--- a/drivers/char/vt_ioctl.c
33101+++ b/drivers/char/vt_ioctl.c
33102@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
33103 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33104 return -EFAULT;
33105
33106- if (!capable(CAP_SYS_TTY_CONFIG))
33107- perm = 0;
33108-
33109 switch (cmd) {
33110 case KDGKBENT:
33111 key_map = key_maps[s];
33112@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
33113 val = (i ? K_HOLE : K_NOSUCHMAP);
33114 return put_user(val, &user_kbe->kb_value);
33115 case KDSKBENT:
33116+ if (!capable(CAP_SYS_TTY_CONFIG))
33117+ perm = 0;
33118+
33119 if (!perm)
33120 return -EPERM;
33121+
33122 if (!i && v == K_NOSUCHMAP) {
33123 /* deallocate map */
33124 key_map = key_maps[s];
33125@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
33126 int i, j, k;
33127 int ret;
33128
33129- if (!capable(CAP_SYS_TTY_CONFIG))
33130- perm = 0;
33131-
33132 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33133 if (!kbs) {
33134 ret = -ENOMEM;
33135@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
33136 kfree(kbs);
33137 return ((p && *p) ? -EOVERFLOW : 0);
33138 case KDSKBSENT:
33139+ if (!capable(CAP_SYS_TTY_CONFIG))
33140+ perm = 0;
33141+
33142 if (!perm) {
33143 ret = -EPERM;
33144 goto reterr;
33145diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
33146index c7ae026..1769c1d 100644
33147--- a/drivers/cpufreq/cpufreq.c
33148+++ b/drivers/cpufreq/cpufreq.c
33149@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
33150 complete(&policy->kobj_unregister);
33151 }
33152
33153-static struct sysfs_ops sysfs_ops = {
33154+static const struct sysfs_ops sysfs_ops = {
33155 .show = show,
33156 .store = store,
33157 };
33158diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
33159index 97b0038..2056670 100644
33160--- a/drivers/cpuidle/sysfs.c
33161+++ b/drivers/cpuidle/sysfs.c
33162@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
33163 return ret;
33164 }
33165
33166-static struct sysfs_ops cpuidle_sysfs_ops = {
33167+static const struct sysfs_ops cpuidle_sysfs_ops = {
33168 .show = cpuidle_show,
33169 .store = cpuidle_store,
33170 };
33171@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
33172 return ret;
33173 }
33174
33175-static struct sysfs_ops cpuidle_state_sysfs_ops = {
33176+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
33177 .show = cpuidle_state_show,
33178 };
33179
33180@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
33181 .release = cpuidle_state_sysfs_release,
33182 };
33183
33184-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
33185+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
33186 {
33187 kobject_put(&device->kobjs[i]->kobj);
33188 wait_for_completion(&device->kobjs[i]->kobj_unregister);
33189diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
33190index 5f753fc..0377ae9 100644
33191--- a/drivers/crypto/hifn_795x.c
33192+++ b/drivers/crypto/hifn_795x.c
33193@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
33194 0xCA, 0x34, 0x2B, 0x2E};
33195 struct scatterlist sg;
33196
33197+ pax_track_stack();
33198+
33199 memset(src, 0, sizeof(src));
33200 memset(ctx.key, 0, sizeof(ctx.key));
33201
33202diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
33203index 71e6482..de8d96c 100644
33204--- a/drivers/crypto/padlock-aes.c
33205+++ b/drivers/crypto/padlock-aes.c
33206@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
33207 struct crypto_aes_ctx gen_aes;
33208 int cpu;
33209
33210+ pax_track_stack();
33211+
33212 if (key_len % 8) {
33213 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
33214 return -EINVAL;
33215diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
33216index dcc4ab7..cc834bb 100644
33217--- a/drivers/dma/ioat/dma.c
33218+++ b/drivers/dma/ioat/dma.c
33219@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
33220 return entry->show(&chan->common, page);
33221 }
33222
33223-struct sysfs_ops ioat_sysfs_ops = {
33224+const struct sysfs_ops ioat_sysfs_ops = {
33225 .show = ioat_attr_show,
33226 };
33227
33228diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
33229index bbc3e78..f2db62c 100644
33230--- a/drivers/dma/ioat/dma.h
33231+++ b/drivers/dma/ioat/dma.h
33232@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
33233 unsigned long *phys_complete);
33234 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
33235 void ioat_kobject_del(struct ioatdma_device *device);
33236-extern struct sysfs_ops ioat_sysfs_ops;
33237+extern const struct sysfs_ops ioat_sysfs_ops;
33238 extern struct ioat_sysfs_entry ioat_version_attr;
33239 extern struct ioat_sysfs_entry ioat_cap_attr;
33240 #endif /* IOATDMA_H */
33241diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
33242index 9908c9e..3ceb0e5 100644
33243--- a/drivers/dma/ioat/dma_v3.c
33244+++ b/drivers/dma/ioat/dma_v3.c
33245@@ -71,10 +71,10 @@
33246 /* provide a lookup table for setting the source address in the base or
33247 * extended descriptor of an xor or pq descriptor
33248 */
33249-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
33250-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
33251-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
33252-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
33253+static const u8 xor_idx_to_desc = 0xd0;
33254+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
33255+static const u8 pq_idx_to_desc = 0xf8;
33256+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
33257
33258 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
33259 {
33260diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
33261index 85c464a..afd1e73 100644
33262--- a/drivers/edac/amd64_edac.c
33263+++ b/drivers/edac/amd64_edac.c
33264@@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
33265 * PCI core identifies what devices are on a system during boot, and then
33266 * inquiry this table to see if this driver is for a given device found.
33267 */
33268-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
33269+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
33270 {
33271 .vendor = PCI_VENDOR_ID_AMD,
33272 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
33273diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
33274index 2b95f1a..4f52793 100644
33275--- a/drivers/edac/amd76x_edac.c
33276+++ b/drivers/edac/amd76x_edac.c
33277@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
33278 edac_mc_free(mci);
33279 }
33280
33281-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
33282+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
33283 {
33284 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33285 AMD762},
33286diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
33287index d205d49..74c9672 100644
33288--- a/drivers/edac/e752x_edac.c
33289+++ b/drivers/edac/e752x_edac.c
33290@@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
33291 edac_mc_free(mci);
33292 }
33293
33294-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
33295+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
33296 {
33297 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33298 E7520},
33299diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
33300index c7d11cc..c59c1ca 100644
33301--- a/drivers/edac/e7xxx_edac.c
33302+++ b/drivers/edac/e7xxx_edac.c
33303@@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
33304 edac_mc_free(mci);
33305 }
33306
33307-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
33308+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
33309 {
33310 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33311 E7205},
33312diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
33313index 5376457..5fdedbc 100644
33314--- a/drivers/edac/edac_device_sysfs.c
33315+++ b/drivers/edac/edac_device_sysfs.c
33316@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
33317 }
33318
33319 /* edac_dev file operations for an 'ctl_info' */
33320-static struct sysfs_ops device_ctl_info_ops = {
33321+static const struct sysfs_ops device_ctl_info_ops = {
33322 .show = edac_dev_ctl_info_show,
33323 .store = edac_dev_ctl_info_store
33324 };
33325@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
33326 }
33327
33328 /* edac_dev file operations for an 'instance' */
33329-static struct sysfs_ops device_instance_ops = {
33330+static const struct sysfs_ops device_instance_ops = {
33331 .show = edac_dev_instance_show,
33332 .store = edac_dev_instance_store
33333 };
33334@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
33335 }
33336
33337 /* edac_dev file operations for a 'block' */
33338-static struct sysfs_ops device_block_ops = {
33339+static const struct sysfs_ops device_block_ops = {
33340 .show = edac_dev_block_show,
33341 .store = edac_dev_block_store
33342 };
33343diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
33344index e1d4ce0..88840e9 100644
33345--- a/drivers/edac/edac_mc_sysfs.c
33346+++ b/drivers/edac/edac_mc_sysfs.c
33347@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
33348 return -EIO;
33349 }
33350
33351-static struct sysfs_ops csrowfs_ops = {
33352+static const struct sysfs_ops csrowfs_ops = {
33353 .show = csrowdev_show,
33354 .store = csrowdev_store
33355 };
33356@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
33357 }
33358
33359 /* Intermediate show/store table */
33360-static struct sysfs_ops mci_ops = {
33361+static const struct sysfs_ops mci_ops = {
33362 .show = mcidev_show,
33363 .store = mcidev_store
33364 };
33365diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
33366index 422728c..d8d9c88 100644
33367--- a/drivers/edac/edac_pci_sysfs.c
33368+++ b/drivers/edac/edac_pci_sysfs.c
33369@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
33370 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
33371 static int edac_pci_poll_msec = 1000; /* one second workq period */
33372
33373-static atomic_t pci_parity_count = ATOMIC_INIT(0);
33374-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
33375+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
33376+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
33377
33378 static struct kobject *edac_pci_top_main_kobj;
33379 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
33380@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
33381 }
33382
33383 /* fs_ops table */
33384-static struct sysfs_ops pci_instance_ops = {
33385+static const struct sysfs_ops pci_instance_ops = {
33386 .show = edac_pci_instance_show,
33387 .store = edac_pci_instance_store
33388 };
33389@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
33390 return -EIO;
33391 }
33392
33393-static struct sysfs_ops edac_pci_sysfs_ops = {
33394+static const struct sysfs_ops edac_pci_sysfs_ops = {
33395 .show = edac_pci_dev_show,
33396 .store = edac_pci_dev_store
33397 };
33398@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33399 edac_printk(KERN_CRIT, EDAC_PCI,
33400 "Signaled System Error on %s\n",
33401 pci_name(dev));
33402- atomic_inc(&pci_nonparity_count);
33403+ atomic_inc_unchecked(&pci_nonparity_count);
33404 }
33405
33406 if (status & (PCI_STATUS_PARITY)) {
33407@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33408 "Master Data Parity Error on %s\n",
33409 pci_name(dev));
33410
33411- atomic_inc(&pci_parity_count);
33412+ atomic_inc_unchecked(&pci_parity_count);
33413 }
33414
33415 if (status & (PCI_STATUS_DETECTED_PARITY)) {
33416@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33417 "Detected Parity Error on %s\n",
33418 pci_name(dev));
33419
33420- atomic_inc(&pci_parity_count);
33421+ atomic_inc_unchecked(&pci_parity_count);
33422 }
33423 }
33424
33425@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33426 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
33427 "Signaled System Error on %s\n",
33428 pci_name(dev));
33429- atomic_inc(&pci_nonparity_count);
33430+ atomic_inc_unchecked(&pci_nonparity_count);
33431 }
33432
33433 if (status & (PCI_STATUS_PARITY)) {
33434@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33435 "Master Data Parity Error on "
33436 "%s\n", pci_name(dev));
33437
33438- atomic_inc(&pci_parity_count);
33439+ atomic_inc_unchecked(&pci_parity_count);
33440 }
33441
33442 if (status & (PCI_STATUS_DETECTED_PARITY)) {
33443@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
33444 "Detected Parity Error on %s\n",
33445 pci_name(dev));
33446
33447- atomic_inc(&pci_parity_count);
33448+ atomic_inc_unchecked(&pci_parity_count);
33449 }
33450 }
33451 }
33452@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
33453 if (!check_pci_errors)
33454 return;
33455
33456- before_count = atomic_read(&pci_parity_count);
33457+ before_count = atomic_read_unchecked(&pci_parity_count);
33458
33459 /* scan all PCI devices looking for a Parity Error on devices and
33460 * bridges.
33461@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
33462 /* Only if operator has selected panic on PCI Error */
33463 if (edac_pci_get_panic_on_pe()) {
33464 /* If the count is different 'after' from 'before' */
33465- if (before_count != atomic_read(&pci_parity_count))
33466+ if (before_count != atomic_read_unchecked(&pci_parity_count))
33467 panic("EDAC: PCI Parity Error");
33468 }
33469 }
33470diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
33471index 6c9a0f2..9c1cf7e 100644
33472--- a/drivers/edac/i3000_edac.c
33473+++ b/drivers/edac/i3000_edac.c
33474@@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
33475 edac_mc_free(mci);
33476 }
33477
33478-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
33479+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
33480 {
33481 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33482 I3000},
33483diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
33484index fde4db9..fe108f9 100644
33485--- a/drivers/edac/i3200_edac.c
33486+++ b/drivers/edac/i3200_edac.c
33487@@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
33488 edac_mc_free(mci);
33489 }
33490
33491-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
33492+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
33493 {
33494 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33495 I3200},
33496diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
33497index adc10a2..57d4ccf 100644
33498--- a/drivers/edac/i5000_edac.c
33499+++ b/drivers/edac/i5000_edac.c
33500@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
33501 *
33502 * The "E500P" device is the first device supported.
33503 */
33504-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
33505+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
33506 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
33507 .driver_data = I5000P},
33508
33509diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
33510index 22db05a..b2b5503 100644
33511--- a/drivers/edac/i5100_edac.c
33512+++ b/drivers/edac/i5100_edac.c
33513@@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
33514 edac_mc_free(mci);
33515 }
33516
33517-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
33518+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
33519 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
33520 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
33521 { 0, }
33522diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
33523index f99d106..f050710 100644
33524--- a/drivers/edac/i5400_edac.c
33525+++ b/drivers/edac/i5400_edac.c
33526@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
33527 *
33528 * The "E500P" device is the first device supported.
33529 */
33530-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
33531+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
33532 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
33533 {0,} /* 0 terminated list. */
33534 };
33535diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
33536index 577760a..9ce16ce 100644
33537--- a/drivers/edac/i82443bxgx_edac.c
33538+++ b/drivers/edac/i82443bxgx_edac.c
33539@@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
33540
33541 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
33542
33543-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
33544+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
33545 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
33546 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
33547 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
33548diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
33549index c0088ba..64a7b98 100644
33550--- a/drivers/edac/i82860_edac.c
33551+++ b/drivers/edac/i82860_edac.c
33552@@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
33553 edac_mc_free(mci);
33554 }
33555
33556-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
33557+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
33558 {
33559 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33560 I82860},
33561diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
33562index b2d83b9..a34357b 100644
33563--- a/drivers/edac/i82875p_edac.c
33564+++ b/drivers/edac/i82875p_edac.c
33565@@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
33566 edac_mc_free(mci);
33567 }
33568
33569-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
33570+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
33571 {
33572 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33573 I82875P},
33574diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
33575index 2eed3ea..87bbbd1 100644
33576--- a/drivers/edac/i82975x_edac.c
33577+++ b/drivers/edac/i82975x_edac.c
33578@@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
33579 edac_mc_free(mci);
33580 }
33581
33582-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
33583+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
33584 {
33585 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33586 I82975X
33587diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
33588index 9900675..78ac2b6 100644
33589--- a/drivers/edac/r82600_edac.c
33590+++ b/drivers/edac/r82600_edac.c
33591@@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
33592 edac_mc_free(mci);
33593 }
33594
33595-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
33596+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
33597 {
33598 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
33599 },
33600diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
33601index d4ec605..4cfec4e 100644
33602--- a/drivers/edac/x38_edac.c
33603+++ b/drivers/edac/x38_edac.c
33604@@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
33605 edac_mc_free(mci);
33606 }
33607
33608-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
33609+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
33610 {
33611 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
33612 X38},
33613diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
33614index 3fc2ceb..daf098f 100644
33615--- a/drivers/firewire/core-card.c
33616+++ b/drivers/firewire/core-card.c
33617@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
33618
33619 void fw_core_remove_card(struct fw_card *card)
33620 {
33621- struct fw_card_driver dummy_driver = dummy_driver_template;
33622+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
33623
33624 card->driver->update_phy_reg(card, 4,
33625 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
33626diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
33627index 4560d8f..36db24a 100644
33628--- a/drivers/firewire/core-cdev.c
33629+++ b/drivers/firewire/core-cdev.c
33630@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
33631 int ret;
33632
33633 if ((request->channels == 0 && request->bandwidth == 0) ||
33634- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
33635- request->bandwidth < 0)
33636+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
33637 return -EINVAL;
33638
33639 r = kmalloc(sizeof(*r), GFP_KERNEL);
33640diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
33641index da628c7..cf54a2c 100644
33642--- a/drivers/firewire/core-transaction.c
33643+++ b/drivers/firewire/core-transaction.c
33644@@ -36,6 +36,7 @@
33645 #include <linux/string.h>
33646 #include <linux/timer.h>
33647 #include <linux/types.h>
33648+#include <linux/sched.h>
33649
33650 #include <asm/byteorder.h>
33651
33652@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
33653 struct transaction_callback_data d;
33654 struct fw_transaction t;
33655
33656+ pax_track_stack();
33657+
33658 init_completion(&d.done);
33659 d.payload = payload;
33660 fw_send_request(card, &t, tcode, destination_id, generation, speed,
33661diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
33662index 7ff6e75..a2965d9 100644
33663--- a/drivers/firewire/core.h
33664+++ b/drivers/firewire/core.h
33665@@ -86,6 +86,7 @@ struct fw_card_driver {
33666
33667 int (*stop_iso)(struct fw_iso_context *ctx);
33668 };
33669+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
33670
33671 void fw_card_initialize(struct fw_card *card,
33672 const struct fw_card_driver *driver, struct device *device);
33673diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
33674index 3a2ccb0..82fd7c4 100644
33675--- a/drivers/firmware/dmi_scan.c
33676+++ b/drivers/firmware/dmi_scan.c
33677@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
33678 }
33679 }
33680 else {
33681- /*
33682- * no iounmap() for that ioremap(); it would be a no-op, but
33683- * it's so early in setup that sucker gets confused into doing
33684- * what it shouldn't if we actually call it.
33685- */
33686 p = dmi_ioremap(0xF0000, 0x10000);
33687 if (p == NULL)
33688 goto error;
33689@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
33690 if (buf == NULL)
33691 return -1;
33692
33693- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
33694+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
33695
33696 iounmap(buf);
33697 return 0;
33698diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
33699index 9e4f59d..110e24e 100644
33700--- a/drivers/firmware/edd.c
33701+++ b/drivers/firmware/edd.c
33702@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
33703 return ret;
33704 }
33705
33706-static struct sysfs_ops edd_attr_ops = {
33707+static const struct sysfs_ops edd_attr_ops = {
33708 .show = edd_attr_show,
33709 };
33710
33711diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
33712index f4f709d..082f06e 100644
33713--- a/drivers/firmware/efivars.c
33714+++ b/drivers/firmware/efivars.c
33715@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
33716 return ret;
33717 }
33718
33719-static struct sysfs_ops efivar_attr_ops = {
33720+static const struct sysfs_ops efivar_attr_ops = {
33721 .show = efivar_attr_show,
33722 .store = efivar_attr_store,
33723 };
33724diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
33725index 051d1eb..0a5d4e7 100644
33726--- a/drivers/firmware/iscsi_ibft.c
33727+++ b/drivers/firmware/iscsi_ibft.c
33728@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
33729 return ret;
33730 }
33731
33732-static struct sysfs_ops ibft_attr_ops = {
33733+static const struct sysfs_ops ibft_attr_ops = {
33734 .show = ibft_show_attribute,
33735 };
33736
33737diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
33738index 56f9234..8c58c7b 100644
33739--- a/drivers/firmware/memmap.c
33740+++ b/drivers/firmware/memmap.c
33741@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
33742 NULL
33743 };
33744
33745-static struct sysfs_ops memmap_attr_ops = {
33746+static const struct sysfs_ops memmap_attr_ops = {
33747 .show = memmap_attr_show,
33748 };
33749
33750diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
33751index b16c9a8..2af7d3f 100644
33752--- a/drivers/gpio/vr41xx_giu.c
33753+++ b/drivers/gpio/vr41xx_giu.c
33754@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
33755 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
33756 maskl, pendl, maskh, pendh);
33757
33758- atomic_inc(&irq_err_count);
33759+ atomic_inc_unchecked(&irq_err_count);
33760
33761 return -EINVAL;
33762 }
33763diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
33764index bea6efc..3dc0f42 100644
33765--- a/drivers/gpu/drm/drm_crtc.c
33766+++ b/drivers/gpu/drm/drm_crtc.c
33767@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
33768 */
33769 if ((out_resp->count_modes >= mode_count) && mode_count) {
33770 copied = 0;
33771- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
33772+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
33773 list_for_each_entry(mode, &connector->modes, head) {
33774 drm_crtc_convert_to_umode(&u_mode, mode);
33775 if (copy_to_user(mode_ptr + copied,
33776@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
33777
33778 if ((out_resp->count_props >= props_count) && props_count) {
33779 copied = 0;
33780- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
33781- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
33782+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
33783+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
33784 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
33785 if (connector->property_ids[i] != 0) {
33786 if (put_user(connector->property_ids[i],
33787@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
33788
33789 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
33790 copied = 0;
33791- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
33792+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
33793 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
33794 if (connector->encoder_ids[i] != 0) {
33795 if (put_user(connector->encoder_ids[i],
33796@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
33797 }
33798
33799 for (i = 0; i < crtc_req->count_connectors; i++) {
33800- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
33801+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
33802 if (get_user(out_id, &set_connectors_ptr[i])) {
33803 ret = -EFAULT;
33804 goto out;
33805@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
33806 out_resp->flags = property->flags;
33807
33808 if ((out_resp->count_values >= value_count) && value_count) {
33809- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
33810+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
33811 for (i = 0; i < value_count; i++) {
33812 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
33813 ret = -EFAULT;
33814@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
33815 if (property->flags & DRM_MODE_PROP_ENUM) {
33816 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
33817 copied = 0;
33818- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
33819+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
33820 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
33821
33822 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
33823@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
33824 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
33825 copied = 0;
33826 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
33827- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
33828+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
33829
33830 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
33831 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
33832@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
33833 blob = obj_to_blob(obj);
33834
33835 if (out_resp->length == blob->length) {
33836- blob_ptr = (void *)(unsigned long)out_resp->data;
33837+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
33838 if (copy_to_user(blob_ptr, blob->data, blob->length)){
33839 ret = -EFAULT;
33840 goto done;
33841diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
33842index 1b8745d..92fdbf6 100644
33843--- a/drivers/gpu/drm/drm_crtc_helper.c
33844+++ b/drivers/gpu/drm/drm_crtc_helper.c
33845@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
33846 struct drm_crtc *tmp;
33847 int crtc_mask = 1;
33848
33849- WARN(!crtc, "checking null crtc?");
33850+ BUG_ON(!crtc);
33851
33852 dev = crtc->dev;
33853
33854@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
33855
33856 adjusted_mode = drm_mode_duplicate(dev, mode);
33857
33858+ pax_track_stack();
33859+
33860 crtc->enabled = drm_helper_crtc_in_use(crtc);
33861
33862 if (!crtc->enabled)
33863diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
33864index 0e27d98..dec8768 100644
33865--- a/drivers/gpu/drm/drm_drv.c
33866+++ b/drivers/gpu/drm/drm_drv.c
33867@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
33868 char *kdata = NULL;
33869
33870 atomic_inc(&dev->ioctl_count);
33871- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
33872+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
33873 ++file_priv->ioctl_count;
33874
33875 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
33876diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
33877index 519161e..98c840c 100644
33878--- a/drivers/gpu/drm/drm_fops.c
33879+++ b/drivers/gpu/drm/drm_fops.c
33880@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
33881 }
33882
33883 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
33884- atomic_set(&dev->counts[i], 0);
33885+ atomic_set_unchecked(&dev->counts[i], 0);
33886
33887 dev->sigdata.lock = NULL;
33888
33889@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
33890
33891 retcode = drm_open_helper(inode, filp, dev);
33892 if (!retcode) {
33893- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
33894+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
33895 spin_lock(&dev->count_lock);
33896- if (!dev->open_count++) {
33897+ if (local_inc_return(&dev->open_count) == 1) {
33898 spin_unlock(&dev->count_lock);
33899 retcode = drm_setup(dev);
33900 goto out;
33901@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
33902
33903 lock_kernel();
33904
33905- DRM_DEBUG("open_count = %d\n", dev->open_count);
33906+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
33907
33908 if (dev->driver->preclose)
33909 dev->driver->preclose(dev, file_priv);
33910@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
33911 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
33912 task_pid_nr(current),
33913 (long)old_encode_dev(file_priv->minor->device),
33914- dev->open_count);
33915+ local_read(&dev->open_count));
33916
33917 /* Release any auth tokens that might point to this file_priv,
33918 (do that under the drm_global_mutex) */
33919@@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
33920 * End inline drm_release
33921 */
33922
33923- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
33924+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
33925 spin_lock(&dev->count_lock);
33926- if (!--dev->open_count) {
33927+ if (local_dec_and_test(&dev->open_count)) {
33928 if (atomic_read(&dev->ioctl_count)) {
33929 DRM_ERROR("Device busy: %d\n",
33930 atomic_read(&dev->ioctl_count));
33931diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
33932index 8bf3770..79422805 100644
33933--- a/drivers/gpu/drm/drm_gem.c
33934+++ b/drivers/gpu/drm/drm_gem.c
33935@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
33936 spin_lock_init(&dev->object_name_lock);
33937 idr_init(&dev->object_name_idr);
33938 atomic_set(&dev->object_count, 0);
33939- atomic_set(&dev->object_memory, 0);
33940+ atomic_set_unchecked(&dev->object_memory, 0);
33941 atomic_set(&dev->pin_count, 0);
33942- atomic_set(&dev->pin_memory, 0);
33943+ atomic_set_unchecked(&dev->pin_memory, 0);
33944 atomic_set(&dev->gtt_count, 0);
33945- atomic_set(&dev->gtt_memory, 0);
33946+ atomic_set_unchecked(&dev->gtt_memory, 0);
33947
33948 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
33949 if (!mm) {
33950@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
33951 goto fput;
33952 }
33953 atomic_inc(&dev->object_count);
33954- atomic_add(obj->size, &dev->object_memory);
33955+ atomic_add_unchecked(obj->size, &dev->object_memory);
33956 return obj;
33957 fput:
33958 fput(obj->filp);
33959@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
33960
33961 fput(obj->filp);
33962 atomic_dec(&dev->object_count);
33963- atomic_sub(obj->size, &dev->object_memory);
33964+ atomic_sub_unchecked(obj->size, &dev->object_memory);
33965 kfree(obj);
33966 }
33967 EXPORT_SYMBOL(drm_gem_object_free);
33968diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
33969index f0f6c6b..34af322 100644
33970--- a/drivers/gpu/drm/drm_info.c
33971+++ b/drivers/gpu/drm/drm_info.c
33972@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
33973 struct drm_local_map *map;
33974 struct drm_map_list *r_list;
33975
33976- /* Hardcoded from _DRM_FRAME_BUFFER,
33977- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
33978- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
33979- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
33980+ static const char * const types[] = {
33981+ [_DRM_FRAME_BUFFER] = "FB",
33982+ [_DRM_REGISTERS] = "REG",
33983+ [_DRM_SHM] = "SHM",
33984+ [_DRM_AGP] = "AGP",
33985+ [_DRM_SCATTER_GATHER] = "SG",
33986+ [_DRM_CONSISTENT] = "PCI",
33987+ [_DRM_GEM] = "GEM" };
33988 const char *type;
33989 int i;
33990
33991@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
33992 map = r_list->map;
33993 if (!map)
33994 continue;
33995- if (map->type < 0 || map->type > 5)
33996+ if (map->type >= ARRAY_SIZE(types))
33997 type = "??";
33998 else
33999 type = types[map->type];
34000@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
34001 struct drm_device *dev = node->minor->dev;
34002
34003 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
34004- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
34005+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
34006 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
34007- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
34008- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
34009+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
34010+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
34011 seq_printf(m, "%d gtt total\n", dev->gtt_total);
34012 return 0;
34013 }
34014@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
34015 mutex_lock(&dev->struct_mutex);
34016 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
34017 atomic_read(&dev->vma_count),
34018+#ifdef CONFIG_GRKERNSEC_HIDESYM
34019+ NULL, 0);
34020+#else
34021 high_memory, (u64)virt_to_phys(high_memory));
34022+#endif
34023
34024 list_for_each_entry(pt, &dev->vmalist, head) {
34025 vma = pt->vma;
34026@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
34027 continue;
34028 seq_printf(m,
34029 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
34030- pt->pid, vma->vm_start, vma->vm_end,
34031+ pt->pid,
34032+#ifdef CONFIG_GRKERNSEC_HIDESYM
34033+ 0, 0,
34034+#else
34035+ vma->vm_start, vma->vm_end,
34036+#endif
34037 vma->vm_flags & VM_READ ? 'r' : '-',
34038 vma->vm_flags & VM_WRITE ? 'w' : '-',
34039 vma->vm_flags & VM_EXEC ? 'x' : '-',
34040 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
34041 vma->vm_flags & VM_LOCKED ? 'l' : '-',
34042 vma->vm_flags & VM_IO ? 'i' : '-',
34043+#ifdef CONFIG_GRKERNSEC_HIDESYM
34044+ 0);
34045+#else
34046 vma->vm_pgoff);
34047+#endif
34048
34049 #if defined(__i386__)
34050 pgprot = pgprot_val(vma->vm_page_prot);
34051diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
34052index 282d9fd..71e5f11 100644
34053--- a/drivers/gpu/drm/drm_ioc32.c
34054+++ b/drivers/gpu/drm/drm_ioc32.c
34055@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
34056 request = compat_alloc_user_space(nbytes);
34057 if (!access_ok(VERIFY_WRITE, request, nbytes))
34058 return -EFAULT;
34059- list = (struct drm_buf_desc *) (request + 1);
34060+ list = (struct drm_buf_desc __user *) (request + 1);
34061
34062 if (__put_user(count, &request->count)
34063 || __put_user(list, &request->list))
34064@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
34065 request = compat_alloc_user_space(nbytes);
34066 if (!access_ok(VERIFY_WRITE, request, nbytes))
34067 return -EFAULT;
34068- list = (struct drm_buf_pub *) (request + 1);
34069+ list = (struct drm_buf_pub __user *) (request + 1);
34070
34071 if (__put_user(count, &request->count)
34072 || __put_user(list, &request->list))
34073diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
34074index 9b9ff46..4ea724c 100644
34075--- a/drivers/gpu/drm/drm_ioctl.c
34076+++ b/drivers/gpu/drm/drm_ioctl.c
34077@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
34078 stats->data[i].value =
34079 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
34080 else
34081- stats->data[i].value = atomic_read(&dev->counts[i]);
34082+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
34083 stats->data[i].type = dev->types[i];
34084 }
34085
34086diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
34087index e2f70a5..c703e86 100644
34088--- a/drivers/gpu/drm/drm_lock.c
34089+++ b/drivers/gpu/drm/drm_lock.c
34090@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34091 if (drm_lock_take(&master->lock, lock->context)) {
34092 master->lock.file_priv = file_priv;
34093 master->lock.lock_time = jiffies;
34094- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
34095+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
34096 break; /* Got lock */
34097 }
34098
34099@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34100 return -EINVAL;
34101 }
34102
34103- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
34104+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
34105
34106 /* kernel_context_switch isn't used by any of the x86 drm
34107 * modules but is required by the Sparc driver.
34108diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
34109index 7d1d88c..b9131b2 100644
34110--- a/drivers/gpu/drm/i810/i810_dma.c
34111+++ b/drivers/gpu/drm/i810/i810_dma.c
34112@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
34113 dma->buflist[vertex->idx],
34114 vertex->discard, vertex->used);
34115
34116- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34117- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34118+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34119+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34120 sarea_priv->last_enqueue = dev_priv->counter - 1;
34121 sarea_priv->last_dispatch = (int)hw_status[5];
34122
34123@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
34124 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
34125 mc->last_render);
34126
34127- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34128- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34129+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34130+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34131 sarea_priv->last_enqueue = dev_priv->counter - 1;
34132 sarea_priv->last_dispatch = (int)hw_status[5];
34133
34134diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
34135index 21e2691..7321edd 100644
34136--- a/drivers/gpu/drm/i810/i810_drv.h
34137+++ b/drivers/gpu/drm/i810/i810_drv.h
34138@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
34139 int page_flipping;
34140
34141 wait_queue_head_t irq_queue;
34142- atomic_t irq_received;
34143- atomic_t irq_emitted;
34144+ atomic_unchecked_t irq_received;
34145+ atomic_unchecked_t irq_emitted;
34146
34147 int front_offset;
34148 } drm_i810_private_t;
34149diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
34150index da82afe..48a45de 100644
34151--- a/drivers/gpu/drm/i830/i830_drv.h
34152+++ b/drivers/gpu/drm/i830/i830_drv.h
34153@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
34154 int page_flipping;
34155
34156 wait_queue_head_t irq_queue;
34157- atomic_t irq_received;
34158- atomic_t irq_emitted;
34159+ atomic_unchecked_t irq_received;
34160+ atomic_unchecked_t irq_emitted;
34161
34162 int use_mi_batchbuffer_start;
34163
34164diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
34165index 91ec2bb..6f21fab 100644
34166--- a/drivers/gpu/drm/i830/i830_irq.c
34167+++ b/drivers/gpu/drm/i830/i830_irq.c
34168@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
34169
34170 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
34171
34172- atomic_inc(&dev_priv->irq_received);
34173+ atomic_inc_unchecked(&dev_priv->irq_received);
34174 wake_up_interruptible(&dev_priv->irq_queue);
34175
34176 return IRQ_HANDLED;
34177@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
34178
34179 DRM_DEBUG("%s\n", __func__);
34180
34181- atomic_inc(&dev_priv->irq_emitted);
34182+ atomic_inc_unchecked(&dev_priv->irq_emitted);
34183
34184 BEGIN_LP_RING(2);
34185 OUT_RING(0);
34186 OUT_RING(GFX_OP_USER_INTERRUPT);
34187 ADVANCE_LP_RING();
34188
34189- return atomic_read(&dev_priv->irq_emitted);
34190+ return atomic_read_unchecked(&dev_priv->irq_emitted);
34191 }
34192
34193 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34194@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34195
34196 DRM_DEBUG("%s\n", __func__);
34197
34198- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
34199+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
34200 return 0;
34201
34202 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
34203@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
34204
34205 for (;;) {
34206 __set_current_state(TASK_INTERRUPTIBLE);
34207- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
34208+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
34209 break;
34210 if ((signed)(end - jiffies) <= 0) {
34211 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
34212@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
34213 I830_WRITE16(I830REG_HWSTAM, 0xffff);
34214 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
34215 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
34216- atomic_set(&dev_priv->irq_received, 0);
34217- atomic_set(&dev_priv->irq_emitted, 0);
34218+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34219+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
34220 init_waitqueue_head(&dev_priv->irq_queue);
34221 }
34222
34223diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
34224index 288fc50..c6092055 100644
34225--- a/drivers/gpu/drm/i915/dvo.h
34226+++ b/drivers/gpu/drm/i915/dvo.h
34227@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
34228 *
34229 * \return singly-linked list of modes or NULL if no modes found.
34230 */
34231- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
34232+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
34233
34234 /**
34235 * Clean up driver-specific bits of the output
34236 */
34237- void (*destroy) (struct intel_dvo_device *dvo);
34238+ void (* const destroy) (struct intel_dvo_device *dvo);
34239
34240 /**
34241 * Debugging hook to dump device registers to log file
34242 */
34243- void (*dump_regs)(struct intel_dvo_device *dvo);
34244+ void (* const dump_regs)(struct intel_dvo_device *dvo);
34245 };
34246
34247-extern struct intel_dvo_dev_ops sil164_ops;
34248-extern struct intel_dvo_dev_ops ch7xxx_ops;
34249-extern struct intel_dvo_dev_ops ivch_ops;
34250-extern struct intel_dvo_dev_ops tfp410_ops;
34251-extern struct intel_dvo_dev_ops ch7017_ops;
34252+extern const struct intel_dvo_dev_ops sil164_ops;
34253+extern const struct intel_dvo_dev_ops ch7xxx_ops;
34254+extern const struct intel_dvo_dev_ops ivch_ops;
34255+extern const struct intel_dvo_dev_ops tfp410_ops;
34256+extern const struct intel_dvo_dev_ops ch7017_ops;
34257
34258 #endif /* _INTEL_DVO_H */
34259diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
34260index 621815b..499d82e 100644
34261--- a/drivers/gpu/drm/i915/dvo_ch7017.c
34262+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
34263@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
34264 }
34265 }
34266
34267-struct intel_dvo_dev_ops ch7017_ops = {
34268+const struct intel_dvo_dev_ops ch7017_ops = {
34269 .init = ch7017_init,
34270 .detect = ch7017_detect,
34271 .mode_valid = ch7017_mode_valid,
34272diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
34273index a9b8962..ac769ba 100644
34274--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
34275+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
34276@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
34277 }
34278 }
34279
34280-struct intel_dvo_dev_ops ch7xxx_ops = {
34281+const struct intel_dvo_dev_ops ch7xxx_ops = {
34282 .init = ch7xxx_init,
34283 .detect = ch7xxx_detect,
34284 .mode_valid = ch7xxx_mode_valid,
34285diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
34286index aa176f9..ed2930c 100644
34287--- a/drivers/gpu/drm/i915/dvo_ivch.c
34288+++ b/drivers/gpu/drm/i915/dvo_ivch.c
34289@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
34290 }
34291 }
34292
34293-struct intel_dvo_dev_ops ivch_ops= {
34294+const struct intel_dvo_dev_ops ivch_ops= {
34295 .init = ivch_init,
34296 .dpms = ivch_dpms,
34297 .save = ivch_save,
34298diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
34299index e1c1f73..7dbebcf 100644
34300--- a/drivers/gpu/drm/i915/dvo_sil164.c
34301+++ b/drivers/gpu/drm/i915/dvo_sil164.c
34302@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
34303 }
34304 }
34305
34306-struct intel_dvo_dev_ops sil164_ops = {
34307+const struct intel_dvo_dev_ops sil164_ops = {
34308 .init = sil164_init,
34309 .detect = sil164_detect,
34310 .mode_valid = sil164_mode_valid,
34311diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
34312index 16dce84..7e1b6f8 100644
34313--- a/drivers/gpu/drm/i915/dvo_tfp410.c
34314+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
34315@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
34316 }
34317 }
34318
34319-struct intel_dvo_dev_ops tfp410_ops = {
34320+const struct intel_dvo_dev_ops tfp410_ops = {
34321 .init = tfp410_init,
34322 .detect = tfp410_detect,
34323 .mode_valid = tfp410_mode_valid,
34324diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
34325index 7e859d6..7d1cf2b 100644
34326--- a/drivers/gpu/drm/i915/i915_debugfs.c
34327+++ b/drivers/gpu/drm/i915/i915_debugfs.c
34328@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
34329 I915_READ(GTIMR));
34330 }
34331 seq_printf(m, "Interrupts received: %d\n",
34332- atomic_read(&dev_priv->irq_received));
34333+ atomic_read_unchecked(&dev_priv->irq_received));
34334 if (dev_priv->hw_status_page != NULL) {
34335 seq_printf(m, "Current sequence: %d\n",
34336 i915_get_gem_seqno(dev));
34337diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
34338index 5449239..7e4f68d 100644
34339--- a/drivers/gpu/drm/i915/i915_drv.c
34340+++ b/drivers/gpu/drm/i915/i915_drv.c
34341@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
34342 return i915_resume(dev);
34343 }
34344
34345-static struct vm_operations_struct i915_gem_vm_ops = {
34346+static const struct vm_operations_struct i915_gem_vm_ops = {
34347 .fault = i915_gem_fault,
34348 .open = drm_gem_vm_open,
34349 .close = drm_gem_vm_close,
34350diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
34351index 97163f7..c24c7c7 100644
34352--- a/drivers/gpu/drm/i915/i915_drv.h
34353+++ b/drivers/gpu/drm/i915/i915_drv.h
34354@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
34355 /* display clock increase/decrease */
34356 /* pll clock increase/decrease */
34357 /* clock gating init */
34358-};
34359+} __no_const;
34360
34361 typedef struct drm_i915_private {
34362 struct drm_device *dev;
34363@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
34364 int page_flipping;
34365
34366 wait_queue_head_t irq_queue;
34367- atomic_t irq_received;
34368+ atomic_unchecked_t irq_received;
34369 /** Protects user_irq_refcount and irq_mask_reg */
34370 spinlock_t user_irq_lock;
34371 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
34372diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
34373index 27a3074..eb3f959 100644
34374--- a/drivers/gpu/drm/i915/i915_gem.c
34375+++ b/drivers/gpu/drm/i915/i915_gem.c
34376@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
34377
34378 args->aper_size = dev->gtt_total;
34379 args->aper_available_size = (args->aper_size -
34380- atomic_read(&dev->pin_memory));
34381+ atomic_read_unchecked(&dev->pin_memory));
34382
34383 return 0;
34384 }
34385@@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
34386
34387 if (obj_priv->gtt_space) {
34388 atomic_dec(&dev->gtt_count);
34389- atomic_sub(obj->size, &dev->gtt_memory);
34390+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
34391
34392 drm_mm_put_block(obj_priv->gtt_space);
34393 obj_priv->gtt_space = NULL;
34394@@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
34395 goto search_free;
34396 }
34397 atomic_inc(&dev->gtt_count);
34398- atomic_add(obj->size, &dev->gtt_memory);
34399+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
34400
34401 /* Assert that the object is not currently in any GPU domain. As it
34402 * wasn't in the GTT, there shouldn't be any way it could have been in
34403@@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
34404 "%d/%d gtt bytes\n",
34405 atomic_read(&dev->object_count),
34406 atomic_read(&dev->pin_count),
34407- atomic_read(&dev->object_memory),
34408- atomic_read(&dev->pin_memory),
34409- atomic_read(&dev->gtt_memory),
34410+ atomic_read_unchecked(&dev->object_memory),
34411+ atomic_read_unchecked(&dev->pin_memory),
34412+ atomic_read_unchecked(&dev->gtt_memory),
34413 dev->gtt_total);
34414 }
34415 goto err;
34416@@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
34417 */
34418 if (obj_priv->pin_count == 1) {
34419 atomic_inc(&dev->pin_count);
34420- atomic_add(obj->size, &dev->pin_memory);
34421+ atomic_add_unchecked(obj->size, &dev->pin_memory);
34422 if (!obj_priv->active &&
34423 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
34424 !list_empty(&obj_priv->list))
34425@@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
34426 list_move_tail(&obj_priv->list,
34427 &dev_priv->mm.inactive_list);
34428 atomic_dec(&dev->pin_count);
34429- atomic_sub(obj->size, &dev->pin_memory);
34430+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
34431 }
34432 i915_verify_inactive(dev, __FILE__, __LINE__);
34433 }
34434diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
34435index 63f28ad..f5469da 100644
34436--- a/drivers/gpu/drm/i915/i915_irq.c
34437+++ b/drivers/gpu/drm/i915/i915_irq.c
34438@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
34439 int irq_received;
34440 int ret = IRQ_NONE;
34441
34442- atomic_inc(&dev_priv->irq_received);
34443+ atomic_inc_unchecked(&dev_priv->irq_received);
34444
34445 if (IS_IGDNG(dev))
34446 return igdng_irq_handler(dev);
34447@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
34448 {
34449 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34450
34451- atomic_set(&dev_priv->irq_received, 0);
34452+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34453
34454 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
34455 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
34456diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
34457index 5d9c6a7..d1b0e29 100644
34458--- a/drivers/gpu/drm/i915/intel_sdvo.c
34459+++ b/drivers/gpu/drm/i915/intel_sdvo.c
34460@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
34461 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
34462
34463 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
34464- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
34465+ pax_open_kernel();
34466+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
34467+ pax_close_kernel();
34468
34469 /* Read the regs to test if we can talk to the device */
34470 for (i = 0; i < 0x40; i++) {
34471diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
34472index be6c6b9..8615d9c 100644
34473--- a/drivers/gpu/drm/mga/mga_drv.h
34474+++ b/drivers/gpu/drm/mga/mga_drv.h
34475@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
34476 u32 clear_cmd;
34477 u32 maccess;
34478
34479- atomic_t vbl_received; /**< Number of vblanks received. */
34480+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
34481 wait_queue_head_t fence_queue;
34482- atomic_t last_fence_retired;
34483+ atomic_unchecked_t last_fence_retired;
34484 u32 next_fence_to_post;
34485
34486 unsigned int fb_cpp;
34487diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
34488index daa6041..a28a5da 100644
34489--- a/drivers/gpu/drm/mga/mga_irq.c
34490+++ b/drivers/gpu/drm/mga/mga_irq.c
34491@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
34492 if (crtc != 0)
34493 return 0;
34494
34495- return atomic_read(&dev_priv->vbl_received);
34496+ return atomic_read_unchecked(&dev_priv->vbl_received);
34497 }
34498
34499
34500@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
34501 /* VBLANK interrupt */
34502 if (status & MGA_VLINEPEN) {
34503 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
34504- atomic_inc(&dev_priv->vbl_received);
34505+ atomic_inc_unchecked(&dev_priv->vbl_received);
34506 drm_handle_vblank(dev, 0);
34507 handled = 1;
34508 }
34509@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
34510 MGA_WRITE(MGA_PRIMEND, prim_end);
34511 }
34512
34513- atomic_inc(&dev_priv->last_fence_retired);
34514+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
34515 DRM_WAKEUP(&dev_priv->fence_queue);
34516 handled = 1;
34517 }
34518@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
34519 * using fences.
34520 */
34521 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
34522- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
34523+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
34524 - *sequence) <= (1 << 23)));
34525
34526 *sequence = cur_fence;
34527diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
34528index 4c39a40..b22a9ea 100644
34529--- a/drivers/gpu/drm/r128/r128_cce.c
34530+++ b/drivers/gpu/drm/r128/r128_cce.c
34531@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
34532
34533 /* GH: Simple idle check.
34534 */
34535- atomic_set(&dev_priv->idle_count, 0);
34536+ atomic_set_unchecked(&dev_priv->idle_count, 0);
34537
34538 /* We don't support anything other than bus-mastering ring mode,
34539 * but the ring can be in either AGP or PCI space for the ring
34540diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
34541index 3c60829..4faf484 100644
34542--- a/drivers/gpu/drm/r128/r128_drv.h
34543+++ b/drivers/gpu/drm/r128/r128_drv.h
34544@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
34545 int is_pci;
34546 unsigned long cce_buffers_offset;
34547
34548- atomic_t idle_count;
34549+ atomic_unchecked_t idle_count;
34550
34551 int page_flipping;
34552 int current_page;
34553 u32 crtc_offset;
34554 u32 crtc_offset_cntl;
34555
34556- atomic_t vbl_received;
34557+ atomic_unchecked_t vbl_received;
34558
34559 u32 color_fmt;
34560 unsigned int front_offset;
34561diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
34562index 69810fb..97bf17a 100644
34563--- a/drivers/gpu/drm/r128/r128_irq.c
34564+++ b/drivers/gpu/drm/r128/r128_irq.c
34565@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
34566 if (crtc != 0)
34567 return 0;
34568
34569- return atomic_read(&dev_priv->vbl_received);
34570+ return atomic_read_unchecked(&dev_priv->vbl_received);
34571 }
34572
34573 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
34574@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
34575 /* VBLANK interrupt */
34576 if (status & R128_CRTC_VBLANK_INT) {
34577 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
34578- atomic_inc(&dev_priv->vbl_received);
34579+ atomic_inc_unchecked(&dev_priv->vbl_received);
34580 drm_handle_vblank(dev, 0);
34581 return IRQ_HANDLED;
34582 }
34583diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
34584index af2665c..51922d2 100644
34585--- a/drivers/gpu/drm/r128/r128_state.c
34586+++ b/drivers/gpu/drm/r128/r128_state.c
34587@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
34588
34589 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
34590 {
34591- if (atomic_read(&dev_priv->idle_count) == 0) {
34592+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
34593 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
34594 } else {
34595- atomic_set(&dev_priv->idle_count, 0);
34596+ atomic_set_unchecked(&dev_priv->idle_count, 0);
34597 }
34598 }
34599
34600diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
34601index dd72b91..8644b3c 100644
34602--- a/drivers/gpu/drm/radeon/atom.c
34603+++ b/drivers/gpu/drm/radeon/atom.c
34604@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
34605 char name[512];
34606 int i;
34607
34608+ pax_track_stack();
34609+
34610 ctx->card = card;
34611 ctx->bios = bios;
34612
34613diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
34614index 0d79577..efaa7a5 100644
34615--- a/drivers/gpu/drm/radeon/mkregtable.c
34616+++ b/drivers/gpu/drm/radeon/mkregtable.c
34617@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
34618 regex_t mask_rex;
34619 regmatch_t match[4];
34620 char buf[1024];
34621- size_t end;
34622+ long end;
34623 int len;
34624 int done = 0;
34625 int r;
34626 unsigned o;
34627 struct offset *offset;
34628 char last_reg_s[10];
34629- int last_reg;
34630+ unsigned long last_reg;
34631
34632 if (regcomp
34633 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
34634diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
34635index 6735213..38c2c67 100644
34636--- a/drivers/gpu/drm/radeon/radeon.h
34637+++ b/drivers/gpu/drm/radeon/radeon.h
34638@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
34639 */
34640 struct radeon_fence_driver {
34641 uint32_t scratch_reg;
34642- atomic_t seq;
34643+ atomic_unchecked_t seq;
34644 uint32_t last_seq;
34645 unsigned long count_timeout;
34646 wait_queue_head_t queue;
34647@@ -640,7 +640,7 @@ struct radeon_asic {
34648 uint32_t offset, uint32_t obj_size);
34649 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
34650 void (*bandwidth_update)(struct radeon_device *rdev);
34651-};
34652+} __no_const;
34653
34654 /*
34655 * Asic structures
34656diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
34657index 4e928b9..d8b6008 100644
34658--- a/drivers/gpu/drm/radeon/radeon_atombios.c
34659+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
34660@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
34661 bool linkb;
34662 struct radeon_i2c_bus_rec ddc_bus;
34663
34664+ pax_track_stack();
34665+
34666 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
34667
34668 if (data_offset == 0)
34669@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
34670 }
34671 }
34672
34673-struct bios_connector {
34674+static struct bios_connector {
34675 bool valid;
34676 uint16_t line_mux;
34677 uint16_t devices;
34678 int connector_type;
34679 struct radeon_i2c_bus_rec ddc_bus;
34680-};
34681+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
34682
34683 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
34684 drm_device
34685@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
34686 uint8_t dac;
34687 union atom_supported_devices *supported_devices;
34688 int i, j;
34689- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
34690
34691 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
34692
34693diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
34694index 083a181..ccccae0 100644
34695--- a/drivers/gpu/drm/radeon/radeon_display.c
34696+++ b/drivers/gpu/drm/radeon/radeon_display.c
34697@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
34698
34699 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
34700 error = freq - current_freq;
34701- error = error < 0 ? 0xffffffff : error;
34702+ error = (int32_t)error < 0 ? 0xffffffff : error;
34703 } else
34704 error = abs(current_freq - freq);
34705 vco_diff = abs(vco - best_vco);
34706diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
34707index 76e4070..193fa7f 100644
34708--- a/drivers/gpu/drm/radeon/radeon_drv.h
34709+++ b/drivers/gpu/drm/radeon/radeon_drv.h
34710@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
34711
34712 /* SW interrupt */
34713 wait_queue_head_t swi_queue;
34714- atomic_t swi_emitted;
34715+ atomic_unchecked_t swi_emitted;
34716 int vblank_crtc;
34717 uint32_t irq_enable_reg;
34718 uint32_t r500_disp_irq_reg;
34719diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
34720index 3beb26d..6ce9c4a 100644
34721--- a/drivers/gpu/drm/radeon/radeon_fence.c
34722+++ b/drivers/gpu/drm/radeon/radeon_fence.c
34723@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
34724 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
34725 return 0;
34726 }
34727- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
34728+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
34729 if (!rdev->cp.ready) {
34730 /* FIXME: cp is not running assume everythings is done right
34731 * away
34732@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
34733 return r;
34734 }
34735 WREG32(rdev->fence_drv.scratch_reg, 0);
34736- atomic_set(&rdev->fence_drv.seq, 0);
34737+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
34738 INIT_LIST_HEAD(&rdev->fence_drv.created);
34739 INIT_LIST_HEAD(&rdev->fence_drv.emited);
34740 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
34741diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
34742index a1bf11d..4a123c0 100644
34743--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
34744+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
34745@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
34746 request = compat_alloc_user_space(sizeof(*request));
34747 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
34748 || __put_user(req32.param, &request->param)
34749- || __put_user((void __user *)(unsigned long)req32.value,
34750+ || __put_user((unsigned long)req32.value,
34751 &request->value))
34752 return -EFAULT;
34753
34754diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
34755index b79ecc4..8dab92d 100644
34756--- a/drivers/gpu/drm/radeon/radeon_irq.c
34757+++ b/drivers/gpu/drm/radeon/radeon_irq.c
34758@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
34759 unsigned int ret;
34760 RING_LOCALS;
34761
34762- atomic_inc(&dev_priv->swi_emitted);
34763- ret = atomic_read(&dev_priv->swi_emitted);
34764+ atomic_inc_unchecked(&dev_priv->swi_emitted);
34765+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
34766
34767 BEGIN_RING(4);
34768 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
34769@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
34770 drm_radeon_private_t *dev_priv =
34771 (drm_radeon_private_t *) dev->dev_private;
34772
34773- atomic_set(&dev_priv->swi_emitted, 0);
34774+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
34775 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
34776
34777 dev->max_vblank_count = 0x001fffff;
34778diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
34779index 4747910..48ca4b3 100644
34780--- a/drivers/gpu/drm/radeon/radeon_state.c
34781+++ b/drivers/gpu/drm/radeon/radeon_state.c
34782@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
34783 {
34784 drm_radeon_private_t *dev_priv = dev->dev_private;
34785 drm_radeon_getparam_t *param = data;
34786- int value;
34787+ int value = 0;
34788
34789 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
34790
34791diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
34792index 1381e06..0e53b17 100644
34793--- a/drivers/gpu/drm/radeon/radeon_ttm.c
34794+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
34795@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
34796 DRM_INFO("radeon: ttm finalized\n");
34797 }
34798
34799-static struct vm_operations_struct radeon_ttm_vm_ops;
34800-static const struct vm_operations_struct *ttm_vm_ops = NULL;
34801-
34802-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
34803-{
34804- struct ttm_buffer_object *bo;
34805- int r;
34806-
34807- bo = (struct ttm_buffer_object *)vma->vm_private_data;
34808- if (bo == NULL) {
34809- return VM_FAULT_NOPAGE;
34810- }
34811- r = ttm_vm_ops->fault(vma, vmf);
34812- return r;
34813-}
34814-
34815 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
34816 {
34817 struct drm_file *file_priv;
34818 struct radeon_device *rdev;
34819- int r;
34820
34821 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
34822 return drm_mmap(filp, vma);
34823@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
34824
34825 file_priv = (struct drm_file *)filp->private_data;
34826 rdev = file_priv->minor->dev->dev_private;
34827- if (rdev == NULL) {
34828+ if (!rdev)
34829 return -EINVAL;
34830- }
34831- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
34832- if (unlikely(r != 0)) {
34833- return r;
34834- }
34835- if (unlikely(ttm_vm_ops == NULL)) {
34836- ttm_vm_ops = vma->vm_ops;
34837- radeon_ttm_vm_ops = *ttm_vm_ops;
34838- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
34839- }
34840- vma->vm_ops = &radeon_ttm_vm_ops;
34841- return 0;
34842+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
34843 }
34844
34845
34846diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
34847index b12ff76..0bd0c6e 100644
34848--- a/drivers/gpu/drm/radeon/rs690.c
34849+++ b/drivers/gpu/drm/radeon/rs690.c
34850@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
34851 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
34852 rdev->pm.sideport_bandwidth.full)
34853 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
34854- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
34855+ read_delay_latency.full = rfixed_const(800 * 1000);
34856 read_delay_latency.full = rfixed_div(read_delay_latency,
34857 rdev->pm.igp_sideport_mclk);
34858+ a.full = rfixed_const(370);
34859+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
34860 } else {
34861 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
34862 rdev->pm.k8_bandwidth.full)
34863diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
34864index 0ed436e..e6e7ce3 100644
34865--- a/drivers/gpu/drm/ttm/ttm_bo.c
34866+++ b/drivers/gpu/drm/ttm/ttm_bo.c
34867@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
34868 NULL
34869 };
34870
34871-static struct sysfs_ops ttm_bo_global_ops = {
34872+static const struct sysfs_ops ttm_bo_global_ops = {
34873 .show = &ttm_bo_global_show
34874 };
34875
34876diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
34877index 1c040d0..f9e4af8 100644
34878--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
34879+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
34880@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
34881 {
34882 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
34883 vma->vm_private_data;
34884- struct ttm_bo_device *bdev = bo->bdev;
34885+ struct ttm_bo_device *bdev;
34886 unsigned long bus_base;
34887 unsigned long bus_offset;
34888 unsigned long bus_size;
34889@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
34890 unsigned long address = (unsigned long)vmf->virtual_address;
34891 int retval = VM_FAULT_NOPAGE;
34892
34893+ if (!bo)
34894+ return VM_FAULT_NOPAGE;
34895+ bdev = bo->bdev;
34896+
34897 /*
34898 * Work around locking order reversal in fault / nopfn
34899 * between mmap_sem and bo_reserve: Perform a trylock operation
34900diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
34901index b170071..28ae90e 100644
34902--- a/drivers/gpu/drm/ttm/ttm_global.c
34903+++ b/drivers/gpu/drm/ttm/ttm_global.c
34904@@ -36,7 +36,7 @@
34905 struct ttm_global_item {
34906 struct mutex mutex;
34907 void *object;
34908- int refcount;
34909+ atomic_t refcount;
34910 };
34911
34912 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
34913@@ -49,7 +49,7 @@ void ttm_global_init(void)
34914 struct ttm_global_item *item = &glob[i];
34915 mutex_init(&item->mutex);
34916 item->object = NULL;
34917- item->refcount = 0;
34918+ atomic_set(&item->refcount, 0);
34919 }
34920 }
34921
34922@@ -59,7 +59,7 @@ void ttm_global_release(void)
34923 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
34924 struct ttm_global_item *item = &glob[i];
34925 BUG_ON(item->object != NULL);
34926- BUG_ON(item->refcount != 0);
34927+ BUG_ON(atomic_read(&item->refcount) != 0);
34928 }
34929 }
34930
34931@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
34932 void *object;
34933
34934 mutex_lock(&item->mutex);
34935- if (item->refcount == 0) {
34936+ if (atomic_read(&item->refcount) == 0) {
34937 item->object = kzalloc(ref->size, GFP_KERNEL);
34938 if (unlikely(item->object == NULL)) {
34939 ret = -ENOMEM;
34940@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
34941 goto out_err;
34942
34943 }
34944- ++item->refcount;
34945+ atomic_inc(&item->refcount);
34946 ref->object = item->object;
34947 object = item->object;
34948 mutex_unlock(&item->mutex);
34949@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
34950 struct ttm_global_item *item = &glob[ref->global_type];
34951
34952 mutex_lock(&item->mutex);
34953- BUG_ON(item->refcount == 0);
34954+ BUG_ON(atomic_read(&item->refcount) == 0);
34955 BUG_ON(ref->object != item->object);
34956- if (--item->refcount == 0) {
34957+ if (atomic_dec_and_test(&item->refcount)) {
34958 ref->release(ref);
34959 item->object = NULL;
34960 }
34961diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
34962index 072c281..d8ef483 100644
34963--- a/drivers/gpu/drm/ttm/ttm_memory.c
34964+++ b/drivers/gpu/drm/ttm/ttm_memory.c
34965@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
34966 NULL
34967 };
34968
34969-static struct sysfs_ops ttm_mem_zone_ops = {
34970+static const struct sysfs_ops ttm_mem_zone_ops = {
34971 .show = &ttm_mem_zone_show,
34972 .store = &ttm_mem_zone_store
34973 };
34974diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
34975index cafcb84..b8e66cc 100644
34976--- a/drivers/gpu/drm/via/via_drv.h
34977+++ b/drivers/gpu/drm/via/via_drv.h
34978@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
34979 typedef uint32_t maskarray_t[5];
34980
34981 typedef struct drm_via_irq {
34982- atomic_t irq_received;
34983+ atomic_unchecked_t irq_received;
34984 uint32_t pending_mask;
34985 uint32_t enable_mask;
34986 wait_queue_head_t irq_queue;
34987@@ -75,7 +75,7 @@ typedef struct drm_via_private {
34988 struct timeval last_vblank;
34989 int last_vblank_valid;
34990 unsigned usec_per_vblank;
34991- atomic_t vbl_received;
34992+ atomic_unchecked_t vbl_received;
34993 drm_via_state_t hc_state;
34994 char pci_buf[VIA_PCI_BUF_SIZE];
34995 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
34996diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
34997index 5935b88..127a8a6 100644
34998--- a/drivers/gpu/drm/via/via_irq.c
34999+++ b/drivers/gpu/drm/via/via_irq.c
35000@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
35001 if (crtc != 0)
35002 return 0;
35003
35004- return atomic_read(&dev_priv->vbl_received);
35005+ return atomic_read_unchecked(&dev_priv->vbl_received);
35006 }
35007
35008 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35009@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35010
35011 status = VIA_READ(VIA_REG_INTERRUPT);
35012 if (status & VIA_IRQ_VBLANK_PENDING) {
35013- atomic_inc(&dev_priv->vbl_received);
35014- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
35015+ atomic_inc_unchecked(&dev_priv->vbl_received);
35016+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
35017 do_gettimeofday(&cur_vblank);
35018 if (dev_priv->last_vblank_valid) {
35019 dev_priv->usec_per_vblank =
35020@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35021 dev_priv->last_vblank = cur_vblank;
35022 dev_priv->last_vblank_valid = 1;
35023 }
35024- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
35025+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
35026 DRM_DEBUG("US per vblank is: %u\n",
35027 dev_priv->usec_per_vblank);
35028 }
35029@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35030
35031 for (i = 0; i < dev_priv->num_irqs; ++i) {
35032 if (status & cur_irq->pending_mask) {
35033- atomic_inc(&cur_irq->irq_received);
35034+ atomic_inc_unchecked(&cur_irq->irq_received);
35035 DRM_WAKEUP(&cur_irq->irq_queue);
35036 handled = 1;
35037 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
35038@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
35039 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35040 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
35041 masks[irq][4]));
35042- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
35043+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
35044 } else {
35045 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35046 (((cur_irq_sequence =
35047- atomic_read(&cur_irq->irq_received)) -
35048+ atomic_read_unchecked(&cur_irq->irq_received)) -
35049 *sequence) <= (1 << 23)));
35050 }
35051 *sequence = cur_irq_sequence;
35052@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
35053 }
35054
35055 for (i = 0; i < dev_priv->num_irqs; ++i) {
35056- atomic_set(&cur_irq->irq_received, 0);
35057+ atomic_set_unchecked(&cur_irq->irq_received, 0);
35058 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
35059 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
35060 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
35061@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
35062 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
35063 case VIA_IRQ_RELATIVE:
35064 irqwait->request.sequence +=
35065- atomic_read(&cur_irq->irq_received);
35066+ atomic_read_unchecked(&cur_irq->irq_received);
35067 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
35068 case VIA_IRQ_ABSOLUTE:
35069 break;
35070diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
35071index aa8688d..6a0140c 100644
35072--- a/drivers/gpu/vga/vgaarb.c
35073+++ b/drivers/gpu/vga/vgaarb.c
35074@@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
35075 uc = &priv->cards[i];
35076 }
35077
35078- if (!uc)
35079- return -EINVAL;
35080+ if (!uc) {
35081+ ret_val = -EINVAL;
35082+ goto done;
35083+ }
35084
35085- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
35086- return -EINVAL;
35087+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
35088+ ret_val = -EINVAL;
35089+ goto done;
35090+ }
35091
35092- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
35093- return -EINVAL;
35094+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
35095+ ret_val = -EINVAL;
35096+ goto done;
35097+ }
35098
35099 vga_put(pdev, io_state);
35100
35101diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
35102index 11f8069..4783396 100644
35103--- a/drivers/hid/hid-core.c
35104+++ b/drivers/hid/hid-core.c
35105@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
35106
35107 int hid_add_device(struct hid_device *hdev)
35108 {
35109- static atomic_t id = ATOMIC_INIT(0);
35110+ static atomic_unchecked_t id = ATOMIC_INIT(0);
35111 int ret;
35112
35113 if (WARN_ON(hdev->status & HID_STAT_ADDED))
35114@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
35115 /* XXX hack, any other cleaner solution after the driver core
35116 * is converted to allow more than 20 bytes as the device name? */
35117 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
35118- hdev->vendor, hdev->product, atomic_inc_return(&id));
35119+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
35120
35121 ret = device_add(&hdev->dev);
35122 if (!ret)
35123diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
35124index 8b6ee24..70f657d 100644
35125--- a/drivers/hid/usbhid/hiddev.c
35126+++ b/drivers/hid/usbhid/hiddev.c
35127@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
35128 return put_user(HID_VERSION, (int __user *)arg);
35129
35130 case HIDIOCAPPLICATION:
35131- if (arg < 0 || arg >= hid->maxapplication)
35132+ if (arg >= hid->maxapplication)
35133 return -EINVAL;
35134
35135 for (i = 0; i < hid->maxcollection; i++)
35136diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
35137index 5d5ed69..f40533e 100644
35138--- a/drivers/hwmon/lis3lv02d.c
35139+++ b/drivers/hwmon/lis3lv02d.c
35140@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
35141 * the lid is closed. This leads to interrupts as soon as a little move
35142 * is done.
35143 */
35144- atomic_inc(&lis3_dev.count);
35145+ atomic_inc_unchecked(&lis3_dev.count);
35146
35147 wake_up_interruptible(&lis3_dev.misc_wait);
35148 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
35149@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
35150 if (test_and_set_bit(0, &lis3_dev.misc_opened))
35151 return -EBUSY; /* already open */
35152
35153- atomic_set(&lis3_dev.count, 0);
35154+ atomic_set_unchecked(&lis3_dev.count, 0);
35155
35156 /*
35157 * The sensor can generate interrupts for free-fall and direction
35158@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
35159 add_wait_queue(&lis3_dev.misc_wait, &wait);
35160 while (true) {
35161 set_current_state(TASK_INTERRUPTIBLE);
35162- data = atomic_xchg(&lis3_dev.count, 0);
35163+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
35164 if (data)
35165 break;
35166
35167@@ -244,7 +244,7 @@ out:
35168 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
35169 {
35170 poll_wait(file, &lis3_dev.misc_wait, wait);
35171- if (atomic_read(&lis3_dev.count))
35172+ if (atomic_read_unchecked(&lis3_dev.count))
35173 return POLLIN | POLLRDNORM;
35174 return 0;
35175 }
35176diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
35177index 7cdd76f..fe0efdf 100644
35178--- a/drivers/hwmon/lis3lv02d.h
35179+++ b/drivers/hwmon/lis3lv02d.h
35180@@ -201,7 +201,7 @@ struct lis3lv02d {
35181
35182 struct input_polled_dev *idev; /* input device */
35183 struct platform_device *pdev; /* platform device */
35184- atomic_t count; /* interrupt count after last read */
35185+ atomic_unchecked_t count; /* interrupt count after last read */
35186 int xcalib; /* calibrated null value for x */
35187 int ycalib; /* calibrated null value for y */
35188 int zcalib; /* calibrated null value for z */
35189diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
35190index 740785e..5a5c6c6 100644
35191--- a/drivers/hwmon/sht15.c
35192+++ b/drivers/hwmon/sht15.c
35193@@ -112,7 +112,7 @@ struct sht15_data {
35194 int supply_uV;
35195 int supply_uV_valid;
35196 struct work_struct update_supply_work;
35197- atomic_t interrupt_handled;
35198+ atomic_unchecked_t interrupt_handled;
35199 };
35200
35201 /**
35202@@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
35203 return ret;
35204
35205 gpio_direction_input(data->pdata->gpio_data);
35206- atomic_set(&data->interrupt_handled, 0);
35207+ atomic_set_unchecked(&data->interrupt_handled, 0);
35208
35209 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35210 if (gpio_get_value(data->pdata->gpio_data) == 0) {
35211 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
35212 /* Only relevant if the interrupt hasn't occured. */
35213- if (!atomic_read(&data->interrupt_handled))
35214+ if (!atomic_read_unchecked(&data->interrupt_handled))
35215 schedule_work(&data->read_work);
35216 }
35217 ret = wait_event_timeout(data->wait_queue,
35218@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
35219 struct sht15_data *data = d;
35220 /* First disable the interrupt */
35221 disable_irq_nosync(irq);
35222- atomic_inc(&data->interrupt_handled);
35223+ atomic_inc_unchecked(&data->interrupt_handled);
35224 /* Then schedule a reading work struct */
35225 if (data->flag != SHT15_READING_NOTHING)
35226 schedule_work(&data->read_work);
35227@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
35228 here as could have gone low in meantime so verify
35229 it hasn't!
35230 */
35231- atomic_set(&data->interrupt_handled, 0);
35232+ atomic_set_unchecked(&data->interrupt_handled, 0);
35233 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35234 /* If still not occured or another handler has been scheduled */
35235 if (gpio_get_value(data->pdata->gpio_data)
35236- || atomic_read(&data->interrupt_handled))
35237+ || atomic_read_unchecked(&data->interrupt_handled))
35238 return;
35239 }
35240 /* Read the data back from the device */
35241diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
35242index 97851c5..cb40626 100644
35243--- a/drivers/hwmon/w83791d.c
35244+++ b/drivers/hwmon/w83791d.c
35245@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
35246 struct i2c_board_info *info);
35247 static int w83791d_remove(struct i2c_client *client);
35248
35249-static int w83791d_read(struct i2c_client *client, u8 register);
35250-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
35251+static int w83791d_read(struct i2c_client *client, u8 reg);
35252+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
35253 static struct w83791d_data *w83791d_update_device(struct device *dev);
35254
35255 #ifdef DEBUG
35256diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
35257index 378fcb5..5e91fa8 100644
35258--- a/drivers/i2c/busses/i2c-amd756-s4882.c
35259+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
35260@@ -43,7 +43,7 @@
35261 extern struct i2c_adapter amd756_smbus;
35262
35263 static struct i2c_adapter *s4882_adapter;
35264-static struct i2c_algorithm *s4882_algo;
35265+static i2c_algorithm_no_const *s4882_algo;
35266
35267 /* Wrapper access functions for multiplexed SMBus */
35268 static DEFINE_MUTEX(amd756_lock);
35269diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
35270index 29015eb..af2d8e9 100644
35271--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
35272+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
35273@@ -41,7 +41,7 @@
35274 extern struct i2c_adapter *nforce2_smbus;
35275
35276 static struct i2c_adapter *s4985_adapter;
35277-static struct i2c_algorithm *s4985_algo;
35278+static i2c_algorithm_no_const *s4985_algo;
35279
35280 /* Wrapper access functions for multiplexed SMBus */
35281 static DEFINE_MUTEX(nforce2_lock);
35282diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
35283index 878f8ec..12376fc 100644
35284--- a/drivers/ide/aec62xx.c
35285+++ b/drivers/ide/aec62xx.c
35286@@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
35287 .cable_detect = atp86x_cable_detect,
35288 };
35289
35290-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
35291+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
35292 { /* 0: AEC6210 */
35293 .name = DRV_NAME,
35294 .init_chipset = init_chipset_aec62xx,
35295diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
35296index e59b6de..4b4fc65 100644
35297--- a/drivers/ide/alim15x3.c
35298+++ b/drivers/ide/alim15x3.c
35299@@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
35300 .dma_sff_read_status = ide_dma_sff_read_status,
35301 };
35302
35303-static const struct ide_port_info ali15x3_chipset __devinitdata = {
35304+static const struct ide_port_info ali15x3_chipset __devinitconst = {
35305 .name = DRV_NAME,
35306 .init_chipset = init_chipset_ali15x3,
35307 .init_hwif = init_hwif_ali15x3,
35308diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
35309index 628cd2e..087a414 100644
35310--- a/drivers/ide/amd74xx.c
35311+++ b/drivers/ide/amd74xx.c
35312@@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
35313 .udma_mask = udma, \
35314 }
35315
35316-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
35317+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
35318 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
35319 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
35320 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
35321diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
35322index 837322b..837fd71 100644
35323--- a/drivers/ide/atiixp.c
35324+++ b/drivers/ide/atiixp.c
35325@@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
35326 .cable_detect = atiixp_cable_detect,
35327 };
35328
35329-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
35330+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
35331 { /* 0: IXP200/300/400/700 */
35332 .name = DRV_NAME,
35333 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
35334diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
35335index ca0c46f..d55318a 100644
35336--- a/drivers/ide/cmd64x.c
35337+++ b/drivers/ide/cmd64x.c
35338@@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
35339 .dma_sff_read_status = ide_dma_sff_read_status,
35340 };
35341
35342-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
35343+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
35344 { /* 0: CMD643 */
35345 .name = DRV_NAME,
35346 .init_chipset = init_chipset_cmd64x,
35347diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
35348index 09f98ed..cebc5bc 100644
35349--- a/drivers/ide/cs5520.c
35350+++ b/drivers/ide/cs5520.c
35351@@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
35352 .set_dma_mode = cs5520_set_dma_mode,
35353 };
35354
35355-static const struct ide_port_info cyrix_chipset __devinitdata = {
35356+static const struct ide_port_info cyrix_chipset __devinitconst = {
35357 .name = DRV_NAME,
35358 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
35359 .port_ops = &cs5520_port_ops,
35360diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
35361index 40bf05e..7d58ca0 100644
35362--- a/drivers/ide/cs5530.c
35363+++ b/drivers/ide/cs5530.c
35364@@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
35365 .udma_filter = cs5530_udma_filter,
35366 };
35367
35368-static const struct ide_port_info cs5530_chipset __devinitdata = {
35369+static const struct ide_port_info cs5530_chipset __devinitconst = {
35370 .name = DRV_NAME,
35371 .init_chipset = init_chipset_cs5530,
35372 .init_hwif = init_hwif_cs5530,
35373diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
35374index 983d957..53e6172 100644
35375--- a/drivers/ide/cs5535.c
35376+++ b/drivers/ide/cs5535.c
35377@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
35378 .cable_detect = cs5535_cable_detect,
35379 };
35380
35381-static const struct ide_port_info cs5535_chipset __devinitdata = {
35382+static const struct ide_port_info cs5535_chipset __devinitconst = {
35383 .name = DRV_NAME,
35384 .port_ops = &cs5535_port_ops,
35385 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
35386diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
35387index 74fc540..8e933d8 100644
35388--- a/drivers/ide/cy82c693.c
35389+++ b/drivers/ide/cy82c693.c
35390@@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
35391 .set_dma_mode = cy82c693_set_dma_mode,
35392 };
35393
35394-static const struct ide_port_info cy82c693_chipset __devinitdata = {
35395+static const struct ide_port_info cy82c693_chipset __devinitconst = {
35396 .name = DRV_NAME,
35397 .init_iops = init_iops_cy82c693,
35398 .port_ops = &cy82c693_port_ops,
35399diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
35400index 7ce68ef..e78197d 100644
35401--- a/drivers/ide/hpt366.c
35402+++ b/drivers/ide/hpt366.c
35403@@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
35404 }
35405 };
35406
35407-static const struct hpt_info hpt36x __devinitdata = {
35408+static const struct hpt_info hpt36x __devinitconst = {
35409 .chip_name = "HPT36x",
35410 .chip_type = HPT36x,
35411 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
35412@@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
35413 .timings = &hpt36x_timings
35414 };
35415
35416-static const struct hpt_info hpt370 __devinitdata = {
35417+static const struct hpt_info hpt370 __devinitconst = {
35418 .chip_name = "HPT370",
35419 .chip_type = HPT370,
35420 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
35421@@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
35422 .timings = &hpt37x_timings
35423 };
35424
35425-static const struct hpt_info hpt370a __devinitdata = {
35426+static const struct hpt_info hpt370a __devinitconst = {
35427 .chip_name = "HPT370A",
35428 .chip_type = HPT370A,
35429 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
35430@@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
35431 .timings = &hpt37x_timings
35432 };
35433
35434-static const struct hpt_info hpt374 __devinitdata = {
35435+static const struct hpt_info hpt374 __devinitconst = {
35436 .chip_name = "HPT374",
35437 .chip_type = HPT374,
35438 .udma_mask = ATA_UDMA5,
35439@@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
35440 .timings = &hpt37x_timings
35441 };
35442
35443-static const struct hpt_info hpt372 __devinitdata = {
35444+static const struct hpt_info hpt372 __devinitconst = {
35445 .chip_name = "HPT372",
35446 .chip_type = HPT372,
35447 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35448@@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
35449 .timings = &hpt37x_timings
35450 };
35451
35452-static const struct hpt_info hpt372a __devinitdata = {
35453+static const struct hpt_info hpt372a __devinitconst = {
35454 .chip_name = "HPT372A",
35455 .chip_type = HPT372A,
35456 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35457@@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
35458 .timings = &hpt37x_timings
35459 };
35460
35461-static const struct hpt_info hpt302 __devinitdata = {
35462+static const struct hpt_info hpt302 __devinitconst = {
35463 .chip_name = "HPT302",
35464 .chip_type = HPT302,
35465 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35466@@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
35467 .timings = &hpt37x_timings
35468 };
35469
35470-static const struct hpt_info hpt371 __devinitdata = {
35471+static const struct hpt_info hpt371 __devinitconst = {
35472 .chip_name = "HPT371",
35473 .chip_type = HPT371,
35474 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35475@@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
35476 .timings = &hpt37x_timings
35477 };
35478
35479-static const struct hpt_info hpt372n __devinitdata = {
35480+static const struct hpt_info hpt372n __devinitconst = {
35481 .chip_name = "HPT372N",
35482 .chip_type = HPT372N,
35483 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35484@@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
35485 .timings = &hpt37x_timings
35486 };
35487
35488-static const struct hpt_info hpt302n __devinitdata = {
35489+static const struct hpt_info hpt302n __devinitconst = {
35490 .chip_name = "HPT302N",
35491 .chip_type = HPT302N,
35492 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35493@@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
35494 .timings = &hpt37x_timings
35495 };
35496
35497-static const struct hpt_info hpt371n __devinitdata = {
35498+static const struct hpt_info hpt371n __devinitconst = {
35499 .chip_name = "HPT371N",
35500 .chip_type = HPT371N,
35501 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
35502@@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
35503 .dma_sff_read_status = ide_dma_sff_read_status,
35504 };
35505
35506-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
35507+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
35508 { /* 0: HPT36x */
35509 .name = DRV_NAME,
35510 .init_chipset = init_chipset_hpt366,
35511diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
35512index 2de76cc..74186a1 100644
35513--- a/drivers/ide/ide-cd.c
35514+++ b/drivers/ide/ide-cd.c
35515@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
35516 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
35517 if ((unsigned long)buf & alignment
35518 || blk_rq_bytes(rq) & q->dma_pad_mask
35519- || object_is_on_stack(buf))
35520+ || object_starts_on_stack(buf))
35521 drive->dma = 0;
35522 }
35523 }
35524diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
35525index fefbdfc..62ff465 100644
35526--- a/drivers/ide/ide-floppy.c
35527+++ b/drivers/ide/ide-floppy.c
35528@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
35529 u8 pc_buf[256], header_len, desc_cnt;
35530 int i, rc = 1, blocks, length;
35531
35532+ pax_track_stack();
35533+
35534 ide_debug_log(IDE_DBG_FUNC, "enter");
35535
35536 drive->bios_cyl = 0;
35537diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
35538index 39d4e01..11538ce 100644
35539--- a/drivers/ide/ide-pci-generic.c
35540+++ b/drivers/ide/ide-pci-generic.c
35541@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
35542 .udma_mask = ATA_UDMA6, \
35543 }
35544
35545-static const struct ide_port_info generic_chipsets[] __devinitdata = {
35546+static const struct ide_port_info generic_chipsets[] __devinitconst = {
35547 /* 0: Unknown */
35548 DECLARE_GENERIC_PCI_DEV(0),
35549
35550diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
35551index 0d266a5..aaca790 100644
35552--- a/drivers/ide/it8172.c
35553+++ b/drivers/ide/it8172.c
35554@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
35555 .set_dma_mode = it8172_set_dma_mode,
35556 };
35557
35558-static const struct ide_port_info it8172_port_info __devinitdata = {
35559+static const struct ide_port_info it8172_port_info __devinitconst = {
35560 .name = DRV_NAME,
35561 .port_ops = &it8172_port_ops,
35562 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
35563diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
35564index 4797616..4be488a 100644
35565--- a/drivers/ide/it8213.c
35566+++ b/drivers/ide/it8213.c
35567@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
35568 .cable_detect = it8213_cable_detect,
35569 };
35570
35571-static const struct ide_port_info it8213_chipset __devinitdata = {
35572+static const struct ide_port_info it8213_chipset __devinitconst = {
35573 .name = DRV_NAME,
35574 .enablebits = { {0x41, 0x80, 0x80} },
35575 .port_ops = &it8213_port_ops,
35576diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
35577index 51aa745..146ee60 100644
35578--- a/drivers/ide/it821x.c
35579+++ b/drivers/ide/it821x.c
35580@@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
35581 .cable_detect = it821x_cable_detect,
35582 };
35583
35584-static const struct ide_port_info it821x_chipset __devinitdata = {
35585+static const struct ide_port_info it821x_chipset __devinitconst = {
35586 .name = DRV_NAME,
35587 .init_chipset = init_chipset_it821x,
35588 .init_hwif = init_hwif_it821x,
35589diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
35590index bf2be64..9270098 100644
35591--- a/drivers/ide/jmicron.c
35592+++ b/drivers/ide/jmicron.c
35593@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
35594 .cable_detect = jmicron_cable_detect,
35595 };
35596
35597-static const struct ide_port_info jmicron_chipset __devinitdata = {
35598+static const struct ide_port_info jmicron_chipset __devinitconst = {
35599 .name = DRV_NAME,
35600 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
35601 .port_ops = &jmicron_port_ops,
35602diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
35603index 95327a2..73f78d8 100644
35604--- a/drivers/ide/ns87415.c
35605+++ b/drivers/ide/ns87415.c
35606@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
35607 .dma_sff_read_status = superio_dma_sff_read_status,
35608 };
35609
35610-static const struct ide_port_info ns87415_chipset __devinitdata = {
35611+static const struct ide_port_info ns87415_chipset __devinitconst = {
35612 .name = DRV_NAME,
35613 .init_hwif = init_hwif_ns87415,
35614 .tp_ops = &ns87415_tp_ops,
35615diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
35616index f1d70d6..e1de05b 100644
35617--- a/drivers/ide/opti621.c
35618+++ b/drivers/ide/opti621.c
35619@@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
35620 .set_pio_mode = opti621_set_pio_mode,
35621 };
35622
35623-static const struct ide_port_info opti621_chipset __devinitdata = {
35624+static const struct ide_port_info opti621_chipset __devinitconst = {
35625 .name = DRV_NAME,
35626 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
35627 .port_ops = &opti621_port_ops,
35628diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
35629index 65ba823..7311f4d 100644
35630--- a/drivers/ide/pdc202xx_new.c
35631+++ b/drivers/ide/pdc202xx_new.c
35632@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
35633 .udma_mask = udma, \
35634 }
35635
35636-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
35637+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
35638 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
35639 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
35640 };
35641diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
35642index cb812f3..af816ef 100644
35643--- a/drivers/ide/pdc202xx_old.c
35644+++ b/drivers/ide/pdc202xx_old.c
35645@@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
35646 .max_sectors = sectors, \
35647 }
35648
35649-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
35650+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
35651 { /* 0: PDC20246 */
35652 .name = DRV_NAME,
35653 .init_chipset = init_chipset_pdc202xx,
35654diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
35655index bf14f39..15c4b98 100644
35656--- a/drivers/ide/piix.c
35657+++ b/drivers/ide/piix.c
35658@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
35659 .udma_mask = udma, \
35660 }
35661
35662-static const struct ide_port_info piix_pci_info[] __devinitdata = {
35663+static const struct ide_port_info piix_pci_info[] __devinitconst = {
35664 /* 0: MPIIX */
35665 { /*
35666 * MPIIX actually has only a single IDE channel mapped to
35667diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
35668index a6414a8..c04173e 100644
35669--- a/drivers/ide/rz1000.c
35670+++ b/drivers/ide/rz1000.c
35671@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
35672 }
35673 }
35674
35675-static const struct ide_port_info rz1000_chipset __devinitdata = {
35676+static const struct ide_port_info rz1000_chipset __devinitconst = {
35677 .name = DRV_NAME,
35678 .host_flags = IDE_HFLAG_NO_DMA,
35679 };
35680diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
35681index d467478..9203942 100644
35682--- a/drivers/ide/sc1200.c
35683+++ b/drivers/ide/sc1200.c
35684@@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
35685 .dma_sff_read_status = ide_dma_sff_read_status,
35686 };
35687
35688-static const struct ide_port_info sc1200_chipset __devinitdata = {
35689+static const struct ide_port_info sc1200_chipset __devinitconst = {
35690 .name = DRV_NAME,
35691 .port_ops = &sc1200_port_ops,
35692 .dma_ops = &sc1200_dma_ops,
35693diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
35694index 1104bb3..59c5194 100644
35695--- a/drivers/ide/scc_pata.c
35696+++ b/drivers/ide/scc_pata.c
35697@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
35698 .dma_sff_read_status = scc_dma_sff_read_status,
35699 };
35700
35701-static const struct ide_port_info scc_chipset __devinitdata = {
35702+static const struct ide_port_info scc_chipset __devinitconst = {
35703 .name = "sccIDE",
35704 .init_iops = init_iops_scc,
35705 .init_dma = scc_init_dma,
35706diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
35707index b6554ef..6cc2cc3 100644
35708--- a/drivers/ide/serverworks.c
35709+++ b/drivers/ide/serverworks.c
35710@@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
35711 .cable_detect = svwks_cable_detect,
35712 };
35713
35714-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
35715+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
35716 { /* 0: OSB4 */
35717 .name = DRV_NAME,
35718 .init_chipset = init_chipset_svwks,
35719diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
35720index ab3db61..afed580 100644
35721--- a/drivers/ide/setup-pci.c
35722+++ b/drivers/ide/setup-pci.c
35723@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
35724 int ret, i, n_ports = dev2 ? 4 : 2;
35725 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
35726
35727+ pax_track_stack();
35728+
35729 for (i = 0; i < n_ports / 2; i++) {
35730 ret = ide_setup_pci_controller(pdev[i], d, !i);
35731 if (ret < 0)
35732diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
35733index d95df52..0b03a39 100644
35734--- a/drivers/ide/siimage.c
35735+++ b/drivers/ide/siimage.c
35736@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
35737 .udma_mask = ATA_UDMA6, \
35738 }
35739
35740-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
35741+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
35742 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
35743 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
35744 };
35745diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
35746index 3b88eba..ca8699d 100644
35747--- a/drivers/ide/sis5513.c
35748+++ b/drivers/ide/sis5513.c
35749@@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
35750 .cable_detect = sis_cable_detect,
35751 };
35752
35753-static const struct ide_port_info sis5513_chipset __devinitdata = {
35754+static const struct ide_port_info sis5513_chipset __devinitconst = {
35755 .name = DRV_NAME,
35756 .init_chipset = init_chipset_sis5513,
35757 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
35758diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
35759index d698da4..fca42a4 100644
35760--- a/drivers/ide/sl82c105.c
35761+++ b/drivers/ide/sl82c105.c
35762@@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
35763 .dma_sff_read_status = ide_dma_sff_read_status,
35764 };
35765
35766-static const struct ide_port_info sl82c105_chipset __devinitdata = {
35767+static const struct ide_port_info sl82c105_chipset __devinitconst = {
35768 .name = DRV_NAME,
35769 .init_chipset = init_chipset_sl82c105,
35770 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
35771diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
35772index 1ccfb40..83d5779 100644
35773--- a/drivers/ide/slc90e66.c
35774+++ b/drivers/ide/slc90e66.c
35775@@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
35776 .cable_detect = slc90e66_cable_detect,
35777 };
35778
35779-static const struct ide_port_info slc90e66_chipset __devinitdata = {
35780+static const struct ide_port_info slc90e66_chipset __devinitconst = {
35781 .name = DRV_NAME,
35782 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
35783 .port_ops = &slc90e66_port_ops,
35784diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
35785index 05a93d6..5f9e325 100644
35786--- a/drivers/ide/tc86c001.c
35787+++ b/drivers/ide/tc86c001.c
35788@@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
35789 .dma_sff_read_status = ide_dma_sff_read_status,
35790 };
35791
35792-static const struct ide_port_info tc86c001_chipset __devinitdata = {
35793+static const struct ide_port_info tc86c001_chipset __devinitconst = {
35794 .name = DRV_NAME,
35795 .init_hwif = init_hwif_tc86c001,
35796 .port_ops = &tc86c001_port_ops,
35797diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
35798index 8773c3b..7907d6c 100644
35799--- a/drivers/ide/triflex.c
35800+++ b/drivers/ide/triflex.c
35801@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
35802 .set_dma_mode = triflex_set_mode,
35803 };
35804
35805-static const struct ide_port_info triflex_device __devinitdata = {
35806+static const struct ide_port_info triflex_device __devinitconst = {
35807 .name = DRV_NAME,
35808 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
35809 .port_ops = &triflex_port_ops,
35810diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
35811index 4b42ca0..e494a98 100644
35812--- a/drivers/ide/trm290.c
35813+++ b/drivers/ide/trm290.c
35814@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
35815 .dma_check = trm290_dma_check,
35816 };
35817
35818-static const struct ide_port_info trm290_chipset __devinitdata = {
35819+static const struct ide_port_info trm290_chipset __devinitconst = {
35820 .name = DRV_NAME,
35821 .init_hwif = init_hwif_trm290,
35822 .tp_ops = &trm290_tp_ops,
35823diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
35824index 028de26..520d5d5 100644
35825--- a/drivers/ide/via82cxxx.c
35826+++ b/drivers/ide/via82cxxx.c
35827@@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
35828 .cable_detect = via82cxxx_cable_detect,
35829 };
35830
35831-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
35832+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
35833 .name = DRV_NAME,
35834 .init_chipset = init_chipset_via82cxxx,
35835 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
35836diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
35837index 2cd00b5..14de699 100644
35838--- a/drivers/ieee1394/dv1394.c
35839+++ b/drivers/ieee1394/dv1394.c
35840@@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
35841 based upon DIF section and sequence
35842 */
35843
35844-static void inline
35845+static inline void
35846 frame_put_packet (struct frame *f, struct packet *p)
35847 {
35848 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
35849diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
35850index e947d8f..6a966b9 100644
35851--- a/drivers/ieee1394/hosts.c
35852+++ b/drivers/ieee1394/hosts.c
35853@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
35854 }
35855
35856 static struct hpsb_host_driver dummy_driver = {
35857+ .name = "dummy",
35858 .transmit_packet = dummy_transmit_packet,
35859 .devctl = dummy_devctl,
35860 .isoctl = dummy_isoctl
35861diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
35862index ddaab6e..8d37435 100644
35863--- a/drivers/ieee1394/init_ohci1394_dma.c
35864+++ b/drivers/ieee1394/init_ohci1394_dma.c
35865@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
35866 for (func = 0; func < 8; func++) {
35867 u32 class = read_pci_config(num,slot,func,
35868 PCI_CLASS_REVISION);
35869- if ((class == 0xffffffff))
35870+ if (class == 0xffffffff)
35871 continue; /* No device at this func */
35872
35873 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
35874diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
35875index 65c1429..5d8c11f 100644
35876--- a/drivers/ieee1394/ohci1394.c
35877+++ b/drivers/ieee1394/ohci1394.c
35878@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
35879 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
35880
35881 /* Module Parameters */
35882-static int phys_dma = 1;
35883+static int phys_dma;
35884 module_param(phys_dma, int, 0444);
35885-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
35886+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
35887
35888 static void dma_trm_tasklet(unsigned long data);
35889 static void dma_trm_reset(struct dma_trm_ctx *d);
35890diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
35891index f199896..78c9fc8 100644
35892--- a/drivers/ieee1394/sbp2.c
35893+++ b/drivers/ieee1394/sbp2.c
35894@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
35895 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
35896 MODULE_LICENSE("GPL");
35897
35898-static int sbp2_module_init(void)
35899+static int __init sbp2_module_init(void)
35900 {
35901 int ret;
35902
35903diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
35904index a5dea6b..0cefe8f 100644
35905--- a/drivers/infiniband/core/cm.c
35906+++ b/drivers/infiniband/core/cm.c
35907@@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
35908
35909 struct cm_counter_group {
35910 struct kobject obj;
35911- atomic_long_t counter[CM_ATTR_COUNT];
35912+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
35913 };
35914
35915 struct cm_counter_attribute {
35916@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
35917 struct ib_mad_send_buf *msg = NULL;
35918 int ret;
35919
35920- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35921+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35922 counter[CM_REQ_COUNTER]);
35923
35924 /* Quick state check to discard duplicate REQs. */
35925@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
35926 if (!cm_id_priv)
35927 return;
35928
35929- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35930+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35931 counter[CM_REP_COUNTER]);
35932 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
35933 if (ret)
35934@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
35935 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
35936 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
35937 spin_unlock_irq(&cm_id_priv->lock);
35938- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35939+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35940 counter[CM_RTU_COUNTER]);
35941 goto out;
35942 }
35943@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
35944 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
35945 dreq_msg->local_comm_id);
35946 if (!cm_id_priv) {
35947- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35948+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35949 counter[CM_DREQ_COUNTER]);
35950 cm_issue_drep(work->port, work->mad_recv_wc);
35951 return -EINVAL;
35952@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
35953 case IB_CM_MRA_REP_RCVD:
35954 break;
35955 case IB_CM_TIMEWAIT:
35956- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35957+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35958 counter[CM_DREQ_COUNTER]);
35959 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35960 goto unlock;
35961@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
35962 cm_free_msg(msg);
35963 goto deref;
35964 case IB_CM_DREQ_RCVD:
35965- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35966+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35967 counter[CM_DREQ_COUNTER]);
35968 goto unlock;
35969 default:
35970@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
35971 ib_modify_mad(cm_id_priv->av.port->mad_agent,
35972 cm_id_priv->msg, timeout)) {
35973 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
35974- atomic_long_inc(&work->port->
35975+ atomic_long_inc_unchecked(&work->port->
35976 counter_group[CM_RECV_DUPLICATES].
35977 counter[CM_MRA_COUNTER]);
35978 goto out;
35979@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
35980 break;
35981 case IB_CM_MRA_REQ_RCVD:
35982 case IB_CM_MRA_REP_RCVD:
35983- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35984+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35985 counter[CM_MRA_COUNTER]);
35986 /* fall through */
35987 default:
35988@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
35989 case IB_CM_LAP_IDLE:
35990 break;
35991 case IB_CM_MRA_LAP_SENT:
35992- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
35993+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
35994 counter[CM_LAP_COUNTER]);
35995 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
35996 goto unlock;
35997@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
35998 cm_free_msg(msg);
35999 goto deref;
36000 case IB_CM_LAP_RCVD:
36001- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36002+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36003 counter[CM_LAP_COUNTER]);
36004 goto unlock;
36005 default:
36006@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
36007 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
36008 if (cur_cm_id_priv) {
36009 spin_unlock_irq(&cm.lock);
36010- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36011+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36012 counter[CM_SIDR_REQ_COUNTER]);
36013 goto out; /* Duplicate message. */
36014 }
36015@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
36016 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
36017 msg->retries = 1;
36018
36019- atomic_long_add(1 + msg->retries,
36020+ atomic_long_add_unchecked(1 + msg->retries,
36021 &port->counter_group[CM_XMIT].counter[attr_index]);
36022 if (msg->retries)
36023- atomic_long_add(msg->retries,
36024+ atomic_long_add_unchecked(msg->retries,
36025 &port->counter_group[CM_XMIT_RETRIES].
36026 counter[attr_index]);
36027
36028@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
36029 }
36030
36031 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
36032- atomic_long_inc(&port->counter_group[CM_RECV].
36033+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
36034 counter[attr_id - CM_ATTR_ID_OFFSET]);
36035
36036 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
36037@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
36038 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
36039
36040 return sprintf(buf, "%ld\n",
36041- atomic_long_read(&group->counter[cm_attr->index]));
36042+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
36043 }
36044
36045-static struct sysfs_ops cm_counter_ops = {
36046+static const struct sysfs_ops cm_counter_ops = {
36047 .show = cm_show_counter
36048 };
36049
36050diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
36051index 8fd3a6f..61d8075 100644
36052--- a/drivers/infiniband/core/cma.c
36053+++ b/drivers/infiniband/core/cma.c
36054@@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
36055
36056 req.private_data_len = sizeof(struct cma_hdr) +
36057 conn_param->private_data_len;
36058+ if (req.private_data_len < conn_param->private_data_len)
36059+ return -EINVAL;
36060+
36061 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
36062 if (!req.private_data)
36063 return -ENOMEM;
36064@@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
36065 memset(&req, 0, sizeof req);
36066 offset = cma_user_data_offset(id_priv->id.ps);
36067 req.private_data_len = offset + conn_param->private_data_len;
36068+ if (req.private_data_len < conn_param->private_data_len)
36069+ return -EINVAL;
36070+
36071 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
36072 if (!private_data)
36073 return -ENOMEM;
36074diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
36075index 4507043..14ad522 100644
36076--- a/drivers/infiniband/core/fmr_pool.c
36077+++ b/drivers/infiniband/core/fmr_pool.c
36078@@ -97,8 +97,8 @@ struct ib_fmr_pool {
36079
36080 struct task_struct *thread;
36081
36082- atomic_t req_ser;
36083- atomic_t flush_ser;
36084+ atomic_unchecked_t req_ser;
36085+ atomic_unchecked_t flush_ser;
36086
36087 wait_queue_head_t force_wait;
36088 };
36089@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36090 struct ib_fmr_pool *pool = pool_ptr;
36091
36092 do {
36093- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
36094+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
36095 ib_fmr_batch_release(pool);
36096
36097- atomic_inc(&pool->flush_ser);
36098+ atomic_inc_unchecked(&pool->flush_ser);
36099 wake_up_interruptible(&pool->force_wait);
36100
36101 if (pool->flush_function)
36102@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36103 }
36104
36105 set_current_state(TASK_INTERRUPTIBLE);
36106- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
36107+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
36108 !kthread_should_stop())
36109 schedule();
36110 __set_current_state(TASK_RUNNING);
36111@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
36112 pool->dirty_watermark = params->dirty_watermark;
36113 pool->dirty_len = 0;
36114 spin_lock_init(&pool->pool_lock);
36115- atomic_set(&pool->req_ser, 0);
36116- atomic_set(&pool->flush_ser, 0);
36117+ atomic_set_unchecked(&pool->req_ser, 0);
36118+ atomic_set_unchecked(&pool->flush_ser, 0);
36119 init_waitqueue_head(&pool->force_wait);
36120
36121 pool->thread = kthread_run(ib_fmr_cleanup_thread,
36122@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
36123 }
36124 spin_unlock_irq(&pool->pool_lock);
36125
36126- serial = atomic_inc_return(&pool->req_ser);
36127+ serial = atomic_inc_return_unchecked(&pool->req_ser);
36128 wake_up_process(pool->thread);
36129
36130 if (wait_event_interruptible(pool->force_wait,
36131- atomic_read(&pool->flush_ser) - serial >= 0))
36132+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
36133 return -EINTR;
36134
36135 return 0;
36136@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
36137 } else {
36138 list_add_tail(&fmr->list, &pool->dirty_list);
36139 if (++pool->dirty_len >= pool->dirty_watermark) {
36140- atomic_inc(&pool->req_ser);
36141+ atomic_inc_unchecked(&pool->req_ser);
36142 wake_up_process(pool->thread);
36143 }
36144 }
36145diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
36146index 158a214..1558bb7 100644
36147--- a/drivers/infiniband/core/sysfs.c
36148+++ b/drivers/infiniband/core/sysfs.c
36149@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
36150 return port_attr->show(p, port_attr, buf);
36151 }
36152
36153-static struct sysfs_ops port_sysfs_ops = {
36154+static const struct sysfs_ops port_sysfs_ops = {
36155 .show = port_attr_show
36156 };
36157
36158diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
36159index 5440da0..1194ecb 100644
36160--- a/drivers/infiniband/core/uverbs_marshall.c
36161+++ b/drivers/infiniband/core/uverbs_marshall.c
36162@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
36163 dst->grh.sgid_index = src->grh.sgid_index;
36164 dst->grh.hop_limit = src->grh.hop_limit;
36165 dst->grh.traffic_class = src->grh.traffic_class;
36166+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
36167 dst->dlid = src->dlid;
36168 dst->sl = src->sl;
36169 dst->src_path_bits = src->src_path_bits;
36170 dst->static_rate = src->static_rate;
36171 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
36172 dst->port_num = src->port_num;
36173+ dst->reserved = 0;
36174 }
36175 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
36176
36177 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
36178 struct ib_qp_attr *src)
36179 {
36180+ dst->qp_state = src->qp_state;
36181 dst->cur_qp_state = src->cur_qp_state;
36182 dst->path_mtu = src->path_mtu;
36183 dst->path_mig_state = src->path_mig_state;
36184@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
36185 dst->rnr_retry = src->rnr_retry;
36186 dst->alt_port_num = src->alt_port_num;
36187 dst->alt_timeout = src->alt_timeout;
36188+ memset(dst->reserved, 0, sizeof(dst->reserved));
36189 }
36190 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
36191
36192diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
36193index 100da85..62e6b88 100644
36194--- a/drivers/infiniband/hw/ipath/ipath_fs.c
36195+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
36196@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
36197 struct infinipath_counters counters;
36198 struct ipath_devdata *dd;
36199
36200+ pax_track_stack();
36201+
36202 dd = file->f_path.dentry->d_inode->i_private;
36203 dd->ipath_f_read_counters(dd, &counters);
36204
36205diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
36206index cbde0cf..afaf55c 100644
36207--- a/drivers/infiniband/hw/nes/nes.c
36208+++ b/drivers/infiniband/hw/nes/nes.c
36209@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
36210 LIST_HEAD(nes_adapter_list);
36211 static LIST_HEAD(nes_dev_list);
36212
36213-atomic_t qps_destroyed;
36214+atomic_unchecked_t qps_destroyed;
36215
36216 static unsigned int ee_flsh_adapter;
36217 static unsigned int sysfs_nonidx_addr;
36218@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
36219 struct nes_adapter *nesadapter = nesdev->nesadapter;
36220 u32 qp_id;
36221
36222- atomic_inc(&qps_destroyed);
36223+ atomic_inc_unchecked(&qps_destroyed);
36224
36225 /* Free the control structures */
36226
36227diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
36228index bcc6abc..9c76b2f 100644
36229--- a/drivers/infiniband/hw/nes/nes.h
36230+++ b/drivers/infiniband/hw/nes/nes.h
36231@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
36232 extern unsigned int wqm_quanta;
36233 extern struct list_head nes_adapter_list;
36234
36235-extern atomic_t cm_connects;
36236-extern atomic_t cm_accepts;
36237-extern atomic_t cm_disconnects;
36238-extern atomic_t cm_closes;
36239-extern atomic_t cm_connecteds;
36240-extern atomic_t cm_connect_reqs;
36241-extern atomic_t cm_rejects;
36242-extern atomic_t mod_qp_timouts;
36243-extern atomic_t qps_created;
36244-extern atomic_t qps_destroyed;
36245-extern atomic_t sw_qps_destroyed;
36246+extern atomic_unchecked_t cm_connects;
36247+extern atomic_unchecked_t cm_accepts;
36248+extern atomic_unchecked_t cm_disconnects;
36249+extern atomic_unchecked_t cm_closes;
36250+extern atomic_unchecked_t cm_connecteds;
36251+extern atomic_unchecked_t cm_connect_reqs;
36252+extern atomic_unchecked_t cm_rejects;
36253+extern atomic_unchecked_t mod_qp_timouts;
36254+extern atomic_unchecked_t qps_created;
36255+extern atomic_unchecked_t qps_destroyed;
36256+extern atomic_unchecked_t sw_qps_destroyed;
36257 extern u32 mh_detected;
36258 extern u32 mh_pauses_sent;
36259 extern u32 cm_packets_sent;
36260@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
36261 extern u32 cm_listens_created;
36262 extern u32 cm_listens_destroyed;
36263 extern u32 cm_backlog_drops;
36264-extern atomic_t cm_loopbacks;
36265-extern atomic_t cm_nodes_created;
36266-extern atomic_t cm_nodes_destroyed;
36267-extern atomic_t cm_accel_dropped_pkts;
36268-extern atomic_t cm_resets_recvd;
36269+extern atomic_unchecked_t cm_loopbacks;
36270+extern atomic_unchecked_t cm_nodes_created;
36271+extern atomic_unchecked_t cm_nodes_destroyed;
36272+extern atomic_unchecked_t cm_accel_dropped_pkts;
36273+extern atomic_unchecked_t cm_resets_recvd;
36274
36275 extern u32 int_mod_timer_init;
36276 extern u32 int_mod_cq_depth_256;
36277diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
36278index 73473db..5ed06e8 100644
36279--- a/drivers/infiniband/hw/nes/nes_cm.c
36280+++ b/drivers/infiniband/hw/nes/nes_cm.c
36281@@ -69,11 +69,11 @@ u32 cm_packets_received;
36282 u32 cm_listens_created;
36283 u32 cm_listens_destroyed;
36284 u32 cm_backlog_drops;
36285-atomic_t cm_loopbacks;
36286-atomic_t cm_nodes_created;
36287-atomic_t cm_nodes_destroyed;
36288-atomic_t cm_accel_dropped_pkts;
36289-atomic_t cm_resets_recvd;
36290+atomic_unchecked_t cm_loopbacks;
36291+atomic_unchecked_t cm_nodes_created;
36292+atomic_unchecked_t cm_nodes_destroyed;
36293+atomic_unchecked_t cm_accel_dropped_pkts;
36294+atomic_unchecked_t cm_resets_recvd;
36295
36296 static inline int mini_cm_accelerated(struct nes_cm_core *,
36297 struct nes_cm_node *);
36298@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
36299
36300 static struct nes_cm_core *g_cm_core;
36301
36302-atomic_t cm_connects;
36303-atomic_t cm_accepts;
36304-atomic_t cm_disconnects;
36305-atomic_t cm_closes;
36306-atomic_t cm_connecteds;
36307-atomic_t cm_connect_reqs;
36308-atomic_t cm_rejects;
36309+atomic_unchecked_t cm_connects;
36310+atomic_unchecked_t cm_accepts;
36311+atomic_unchecked_t cm_disconnects;
36312+atomic_unchecked_t cm_closes;
36313+atomic_unchecked_t cm_connecteds;
36314+atomic_unchecked_t cm_connect_reqs;
36315+atomic_unchecked_t cm_rejects;
36316
36317
36318 /**
36319@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
36320 cm_node->rem_mac);
36321
36322 add_hte_node(cm_core, cm_node);
36323- atomic_inc(&cm_nodes_created);
36324+ atomic_inc_unchecked(&cm_nodes_created);
36325
36326 return cm_node;
36327 }
36328@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
36329 }
36330
36331 atomic_dec(&cm_core->node_cnt);
36332- atomic_inc(&cm_nodes_destroyed);
36333+ atomic_inc_unchecked(&cm_nodes_destroyed);
36334 nesqp = cm_node->nesqp;
36335 if (nesqp) {
36336 nesqp->cm_node = NULL;
36337@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
36338
36339 static void drop_packet(struct sk_buff *skb)
36340 {
36341- atomic_inc(&cm_accel_dropped_pkts);
36342+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36343 dev_kfree_skb_any(skb);
36344 }
36345
36346@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
36347
36348 int reset = 0; /* whether to send reset in case of err.. */
36349 int passive_state;
36350- atomic_inc(&cm_resets_recvd);
36351+ atomic_inc_unchecked(&cm_resets_recvd);
36352 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
36353 " refcnt=%d\n", cm_node, cm_node->state,
36354 atomic_read(&cm_node->ref_count));
36355@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
36356 rem_ref_cm_node(cm_node->cm_core, cm_node);
36357 return NULL;
36358 }
36359- atomic_inc(&cm_loopbacks);
36360+ atomic_inc_unchecked(&cm_loopbacks);
36361 loopbackremotenode->loopbackpartner = cm_node;
36362 loopbackremotenode->tcp_cntxt.rcv_wscale =
36363 NES_CM_DEFAULT_RCV_WND_SCALE;
36364@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
36365 add_ref_cm_node(cm_node);
36366 } else if (cm_node->state == NES_CM_STATE_TSA) {
36367 rem_ref_cm_node(cm_core, cm_node);
36368- atomic_inc(&cm_accel_dropped_pkts);
36369+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36370 dev_kfree_skb_any(skb);
36371 break;
36372 }
36373@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36374
36375 if ((cm_id) && (cm_id->event_handler)) {
36376 if (issue_disconn) {
36377- atomic_inc(&cm_disconnects);
36378+ atomic_inc_unchecked(&cm_disconnects);
36379 cm_event.event = IW_CM_EVENT_DISCONNECT;
36380 cm_event.status = disconn_status;
36381 cm_event.local_addr = cm_id->local_addr;
36382@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36383 }
36384
36385 if (issue_close) {
36386- atomic_inc(&cm_closes);
36387+ atomic_inc_unchecked(&cm_closes);
36388 nes_disconnect(nesqp, 1);
36389
36390 cm_id->provider_data = nesqp;
36391@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36392
36393 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
36394 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
36395- atomic_inc(&cm_accepts);
36396+ atomic_inc_unchecked(&cm_accepts);
36397
36398 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
36399 atomic_read(&nesvnic->netdev->refcnt));
36400@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
36401
36402 struct nes_cm_core *cm_core;
36403
36404- atomic_inc(&cm_rejects);
36405+ atomic_inc_unchecked(&cm_rejects);
36406 cm_node = (struct nes_cm_node *) cm_id->provider_data;
36407 loopback = cm_node->loopbackpartner;
36408 cm_core = cm_node->cm_core;
36409@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36410 ntohl(cm_id->local_addr.sin_addr.s_addr),
36411 ntohs(cm_id->local_addr.sin_port));
36412
36413- atomic_inc(&cm_connects);
36414+ atomic_inc_unchecked(&cm_connects);
36415 nesqp->active_conn = 1;
36416
36417 /* cache the cm_id in the qp */
36418@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
36419 if (nesqp->destroyed) {
36420 return;
36421 }
36422- atomic_inc(&cm_connecteds);
36423+ atomic_inc_unchecked(&cm_connecteds);
36424 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
36425 " local port 0x%04X. jiffies = %lu.\n",
36426 nesqp->hwqp.qp_id,
36427@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
36428
36429 ret = cm_id->event_handler(cm_id, &cm_event);
36430 cm_id->add_ref(cm_id);
36431- atomic_inc(&cm_closes);
36432+ atomic_inc_unchecked(&cm_closes);
36433 cm_event.event = IW_CM_EVENT_CLOSE;
36434 cm_event.status = IW_CM_EVENT_STATUS_OK;
36435 cm_event.provider_data = cm_id->provider_data;
36436@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
36437 return;
36438 cm_id = cm_node->cm_id;
36439
36440- atomic_inc(&cm_connect_reqs);
36441+ atomic_inc_unchecked(&cm_connect_reqs);
36442 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36443 cm_node, cm_id, jiffies);
36444
36445@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
36446 return;
36447 cm_id = cm_node->cm_id;
36448
36449- atomic_inc(&cm_connect_reqs);
36450+ atomic_inc_unchecked(&cm_connect_reqs);
36451 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36452 cm_node, cm_id, jiffies);
36453
36454diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
36455index e593af3..870694a 100644
36456--- a/drivers/infiniband/hw/nes/nes_nic.c
36457+++ b/drivers/infiniband/hw/nes/nes_nic.c
36458@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
36459 target_stat_values[++index] = mh_detected;
36460 target_stat_values[++index] = mh_pauses_sent;
36461 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
36462- target_stat_values[++index] = atomic_read(&cm_connects);
36463- target_stat_values[++index] = atomic_read(&cm_accepts);
36464- target_stat_values[++index] = atomic_read(&cm_disconnects);
36465- target_stat_values[++index] = atomic_read(&cm_connecteds);
36466- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
36467- target_stat_values[++index] = atomic_read(&cm_rejects);
36468- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
36469- target_stat_values[++index] = atomic_read(&qps_created);
36470- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
36471- target_stat_values[++index] = atomic_read(&qps_destroyed);
36472- target_stat_values[++index] = atomic_read(&cm_closes);
36473+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
36474+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
36475+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
36476+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
36477+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
36478+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
36479+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
36480+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
36481+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
36482+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
36483+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
36484 target_stat_values[++index] = cm_packets_sent;
36485 target_stat_values[++index] = cm_packets_bounced;
36486 target_stat_values[++index] = cm_packets_created;
36487@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
36488 target_stat_values[++index] = cm_listens_created;
36489 target_stat_values[++index] = cm_listens_destroyed;
36490 target_stat_values[++index] = cm_backlog_drops;
36491- target_stat_values[++index] = atomic_read(&cm_loopbacks);
36492- target_stat_values[++index] = atomic_read(&cm_nodes_created);
36493- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
36494- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
36495- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
36496+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
36497+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
36498+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
36499+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
36500+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
36501 target_stat_values[++index] = int_mod_timer_init;
36502 target_stat_values[++index] = int_mod_cq_depth_1;
36503 target_stat_values[++index] = int_mod_cq_depth_4;
36504diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
36505index a680c42..f914deb 100644
36506--- a/drivers/infiniband/hw/nes/nes_verbs.c
36507+++ b/drivers/infiniband/hw/nes/nes_verbs.c
36508@@ -45,9 +45,9 @@
36509
36510 #include <rdma/ib_umem.h>
36511
36512-atomic_t mod_qp_timouts;
36513-atomic_t qps_created;
36514-atomic_t sw_qps_destroyed;
36515+atomic_unchecked_t mod_qp_timouts;
36516+atomic_unchecked_t qps_created;
36517+atomic_unchecked_t sw_qps_destroyed;
36518
36519 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
36520
36521@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
36522 if (init_attr->create_flags)
36523 return ERR_PTR(-EINVAL);
36524
36525- atomic_inc(&qps_created);
36526+ atomic_inc_unchecked(&qps_created);
36527 switch (init_attr->qp_type) {
36528 case IB_QPT_RC:
36529 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
36530@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
36531 struct iw_cm_event cm_event;
36532 int ret;
36533
36534- atomic_inc(&sw_qps_destroyed);
36535+ atomic_inc_unchecked(&sw_qps_destroyed);
36536 nesqp->destroyed = 1;
36537
36538 /* Blow away the connection if it exists. */
36539diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
36540index ac11be0..3883c04 100644
36541--- a/drivers/input/gameport/gameport.c
36542+++ b/drivers/input/gameport/gameport.c
36543@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
36544 */
36545 static void gameport_init_port(struct gameport *gameport)
36546 {
36547- static atomic_t gameport_no = ATOMIC_INIT(0);
36548+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
36549
36550 __module_get(THIS_MODULE);
36551
36552 mutex_init(&gameport->drv_mutex);
36553 device_initialize(&gameport->dev);
36554- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
36555+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
36556 gameport->dev.bus = &gameport_bus;
36557 gameport->dev.release = gameport_release_port;
36558 if (gameport->parent)
36559diff --git a/drivers/input/input.c b/drivers/input/input.c
36560index c82ae82..8cfb9cb 100644
36561--- a/drivers/input/input.c
36562+++ b/drivers/input/input.c
36563@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
36564 */
36565 int input_register_device(struct input_dev *dev)
36566 {
36567- static atomic_t input_no = ATOMIC_INIT(0);
36568+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
36569 struct input_handler *handler;
36570 const char *path;
36571 int error;
36572@@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
36573 dev->setkeycode = input_default_setkeycode;
36574
36575 dev_set_name(&dev->dev, "input%ld",
36576- (unsigned long) atomic_inc_return(&input_no) - 1);
36577+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
36578
36579 error = device_add(&dev->dev);
36580 if (error)
36581diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
36582index ca13a6b..b032b0c 100644
36583--- a/drivers/input/joystick/sidewinder.c
36584+++ b/drivers/input/joystick/sidewinder.c
36585@@ -30,6 +30,7 @@
36586 #include <linux/kernel.h>
36587 #include <linux/module.h>
36588 #include <linux/slab.h>
36589+#include <linux/sched.h>
36590 #include <linux/init.h>
36591 #include <linux/input.h>
36592 #include <linux/gameport.h>
36593@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
36594 unsigned char buf[SW_LENGTH];
36595 int i;
36596
36597+ pax_track_stack();
36598+
36599 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
36600
36601 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
36602diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
36603index 79e3edc..01412b9 100644
36604--- a/drivers/input/joystick/xpad.c
36605+++ b/drivers/input/joystick/xpad.c
36606@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
36607
36608 static int xpad_led_probe(struct usb_xpad *xpad)
36609 {
36610- static atomic_t led_seq = ATOMIC_INIT(0);
36611+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
36612 long led_no;
36613 struct xpad_led *led;
36614 struct led_classdev *led_cdev;
36615@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
36616 if (!led)
36617 return -ENOMEM;
36618
36619- led_no = (long)atomic_inc_return(&led_seq) - 1;
36620+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
36621
36622 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
36623 led->xpad = xpad;
36624diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
36625index 0236f0d..c7327f1 100644
36626--- a/drivers/input/serio/serio.c
36627+++ b/drivers/input/serio/serio.c
36628@@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
36629 */
36630 static void serio_init_port(struct serio *serio)
36631 {
36632- static atomic_t serio_no = ATOMIC_INIT(0);
36633+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
36634
36635 __module_get(THIS_MODULE);
36636
36637@@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
36638 mutex_init(&serio->drv_mutex);
36639 device_initialize(&serio->dev);
36640 dev_set_name(&serio->dev, "serio%ld",
36641- (long)atomic_inc_return(&serio_no) - 1);
36642+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
36643 serio->dev.bus = &serio_bus;
36644 serio->dev.release = serio_release_port;
36645 if (serio->parent) {
36646diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
36647index 33dcd8d..2783d25 100644
36648--- a/drivers/isdn/gigaset/common.c
36649+++ b/drivers/isdn/gigaset/common.c
36650@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
36651 cs->commands_pending = 0;
36652 cs->cur_at_seq = 0;
36653 cs->gotfwver = -1;
36654- cs->open_count = 0;
36655+ local_set(&cs->open_count, 0);
36656 cs->dev = NULL;
36657 cs->tty = NULL;
36658 cs->tty_dev = NULL;
36659diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
36660index a2f6125..6a70677 100644
36661--- a/drivers/isdn/gigaset/gigaset.h
36662+++ b/drivers/isdn/gigaset/gigaset.h
36663@@ -34,6 +34,7 @@
36664 #include <linux/tty_driver.h>
36665 #include <linux/list.h>
36666 #include <asm/atomic.h>
36667+#include <asm/local.h>
36668
36669 #define GIG_VERSION {0,5,0,0}
36670 #define GIG_COMPAT {0,4,0,0}
36671@@ -446,7 +447,7 @@ struct cardstate {
36672 spinlock_t cmdlock;
36673 unsigned curlen, cmdbytes;
36674
36675- unsigned open_count;
36676+ local_t open_count;
36677 struct tty_struct *tty;
36678 struct tasklet_struct if_wake_tasklet;
36679 unsigned control_state;
36680diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
36681index b3065b8..c7e8cc9 100644
36682--- a/drivers/isdn/gigaset/interface.c
36683+++ b/drivers/isdn/gigaset/interface.c
36684@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
36685 return -ERESTARTSYS; // FIXME -EINTR?
36686 tty->driver_data = cs;
36687
36688- ++cs->open_count;
36689-
36690- if (cs->open_count == 1) {
36691+ if (local_inc_return(&cs->open_count) == 1) {
36692 spin_lock_irqsave(&cs->lock, flags);
36693 cs->tty = tty;
36694 spin_unlock_irqrestore(&cs->lock, flags);
36695@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
36696
36697 if (!cs->connected)
36698 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
36699- else if (!cs->open_count)
36700+ else if (!local_read(&cs->open_count))
36701 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36702 else {
36703- if (!--cs->open_count) {
36704+ if (!local_dec_return(&cs->open_count)) {
36705 spin_lock_irqsave(&cs->lock, flags);
36706 cs->tty = NULL;
36707 spin_unlock_irqrestore(&cs->lock, flags);
36708@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
36709 if (!cs->connected) {
36710 gig_dbg(DEBUG_IF, "not connected");
36711 retval = -ENODEV;
36712- } else if (!cs->open_count)
36713+ } else if (!local_read(&cs->open_count))
36714 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36715 else {
36716 retval = 0;
36717@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
36718 if (!cs->connected) {
36719 gig_dbg(DEBUG_IF, "not connected");
36720 retval = -ENODEV;
36721- } else if (!cs->open_count)
36722+ } else if (!local_read(&cs->open_count))
36723 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36724 else if (cs->mstate != MS_LOCKED) {
36725 dev_warn(cs->dev, "can't write to unlocked device\n");
36726@@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
36727 if (!cs->connected) {
36728 gig_dbg(DEBUG_IF, "not connected");
36729 retval = -ENODEV;
36730- } else if (!cs->open_count)
36731+ } else if (!local_read(&cs->open_count))
36732 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36733 else if (cs->mstate != MS_LOCKED) {
36734 dev_warn(cs->dev, "can't write to unlocked device\n");
36735@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
36736
36737 if (!cs->connected)
36738 gig_dbg(DEBUG_IF, "not connected");
36739- else if (!cs->open_count)
36740+ else if (!local_read(&cs->open_count))
36741 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36742 else if (cs->mstate != MS_LOCKED)
36743 dev_warn(cs->dev, "can't write to unlocked device\n");
36744@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
36745
36746 if (!cs->connected)
36747 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
36748- else if (!cs->open_count)
36749+ else if (!local_read(&cs->open_count))
36750 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36751 else {
36752 //FIXME
36753@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
36754
36755 if (!cs->connected)
36756 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
36757- else if (!cs->open_count)
36758+ else if (!local_read(&cs->open_count))
36759 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36760 else {
36761 //FIXME
36762@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
36763 goto out;
36764 }
36765
36766- if (!cs->open_count) {
36767+ if (!local_read(&cs->open_count)) {
36768 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36769 goto out;
36770 }
36771diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
36772index a7c0083..62a7cb6 100644
36773--- a/drivers/isdn/hardware/avm/b1.c
36774+++ b/drivers/isdn/hardware/avm/b1.c
36775@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
36776 }
36777 if (left) {
36778 if (t4file->user) {
36779- if (copy_from_user(buf, dp, left))
36780+ if (left > sizeof buf || copy_from_user(buf, dp, left))
36781 return -EFAULT;
36782 } else {
36783 memcpy(buf, dp, left);
36784@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
36785 }
36786 if (left) {
36787 if (config->user) {
36788- if (copy_from_user(buf, dp, left))
36789+ if (left > sizeof buf || copy_from_user(buf, dp, left))
36790 return -EFAULT;
36791 } else {
36792 memcpy(buf, dp, left);
36793diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
36794index f130724..c373c68 100644
36795--- a/drivers/isdn/hardware/eicon/capidtmf.c
36796+++ b/drivers/isdn/hardware/eicon/capidtmf.c
36797@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
36798 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
36799 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
36800
36801+ pax_track_stack();
36802
36803 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
36804 {
36805diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
36806index 4d425c6..a9be6c4 100644
36807--- a/drivers/isdn/hardware/eicon/capifunc.c
36808+++ b/drivers/isdn/hardware/eicon/capifunc.c
36809@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
36810 IDI_SYNC_REQ req;
36811 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
36812
36813+ pax_track_stack();
36814+
36815 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
36816
36817 for (x = 0; x < MAX_DESCRIPTORS; x++) {
36818diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
36819index 3029234..ef0d9e2 100644
36820--- a/drivers/isdn/hardware/eicon/diddfunc.c
36821+++ b/drivers/isdn/hardware/eicon/diddfunc.c
36822@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
36823 IDI_SYNC_REQ req;
36824 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
36825
36826+ pax_track_stack();
36827+
36828 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
36829
36830 for (x = 0; x < MAX_DESCRIPTORS; x++) {
36831diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
36832index d36a4c0..11e7d1a 100644
36833--- a/drivers/isdn/hardware/eicon/divasfunc.c
36834+++ b/drivers/isdn/hardware/eicon/divasfunc.c
36835@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
36836 IDI_SYNC_REQ req;
36837 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
36838
36839+ pax_track_stack();
36840+
36841 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
36842
36843 for (x = 0; x < MAX_DESCRIPTORS; x++) {
36844diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
36845index 85784a7..a19ca98 100644
36846--- a/drivers/isdn/hardware/eicon/divasync.h
36847+++ b/drivers/isdn/hardware/eicon/divasync.h
36848@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
36849 } diva_didd_add_adapter_t;
36850 typedef struct _diva_didd_remove_adapter {
36851 IDI_CALL p_request;
36852-} diva_didd_remove_adapter_t;
36853+} __no_const diva_didd_remove_adapter_t;
36854 typedef struct _diva_didd_read_adapter_array {
36855 void * buffer;
36856 dword length;
36857diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
36858index db87d51..7d09acf 100644
36859--- a/drivers/isdn/hardware/eicon/idifunc.c
36860+++ b/drivers/isdn/hardware/eicon/idifunc.c
36861@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
36862 IDI_SYNC_REQ req;
36863 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
36864
36865+ pax_track_stack();
36866+
36867 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
36868
36869 for (x = 0; x < MAX_DESCRIPTORS; x++) {
36870diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
36871index ae89fb8..0fab299 100644
36872--- a/drivers/isdn/hardware/eicon/message.c
36873+++ b/drivers/isdn/hardware/eicon/message.c
36874@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
36875 dword d;
36876 word w;
36877
36878+ pax_track_stack();
36879+
36880 a = plci->adapter;
36881 Id = ((word)plci->Id<<8)|a->Id;
36882 PUT_WORD(&SS_Ind[4],0x0000);
36883@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
36884 word j, n, w;
36885 dword d;
36886
36887+ pax_track_stack();
36888+
36889
36890 for(i=0;i<8;i++) bp_parms[i].length = 0;
36891 for(i=0;i<2;i++) global_config[i].length = 0;
36892@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
36893 const byte llc3[] = {4,3,2,2,6,6,0};
36894 const byte header[] = {0,2,3,3,0,0,0};
36895
36896+ pax_track_stack();
36897+
36898 for(i=0;i<8;i++) bp_parms[i].length = 0;
36899 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
36900 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
36901@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
36902 word appl_number_group_type[MAX_APPL];
36903 PLCI *auxplci;
36904
36905+ pax_track_stack();
36906+
36907 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
36908
36909 if(!a->group_optimization_enabled)
36910diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
36911index a564b75..f3cf8b5 100644
36912--- a/drivers/isdn/hardware/eicon/mntfunc.c
36913+++ b/drivers/isdn/hardware/eicon/mntfunc.c
36914@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
36915 IDI_SYNC_REQ req;
36916 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
36917
36918+ pax_track_stack();
36919+
36920 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
36921
36922 for (x = 0; x < MAX_DESCRIPTORS; x++) {
36923diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
36924index a3bd163..8956575 100644
36925--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
36926+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
36927@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
36928 typedef struct _diva_os_idi_adapter_interface {
36929 diva_init_card_proc_t cleanup_adapter_proc;
36930 diva_cmd_card_proc_t cmd_proc;
36931-} diva_os_idi_adapter_interface_t;
36932+} __no_const diva_os_idi_adapter_interface_t;
36933
36934 typedef struct _diva_os_xdi_adapter {
36935 struct list_head link;
36936diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
36937index adb1e8c..21b590b 100644
36938--- a/drivers/isdn/i4l/isdn_common.c
36939+++ b/drivers/isdn/i4l/isdn_common.c
36940@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
36941 } iocpar;
36942 void __user *argp = (void __user *)arg;
36943
36944+ pax_track_stack();
36945+
36946 #define name iocpar.name
36947 #define bname iocpar.bname
36948 #define iocts iocpar.iocts
36949diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
36950index 90b56ed..5ed3305 100644
36951--- a/drivers/isdn/i4l/isdn_net.c
36952+++ b/drivers/isdn/i4l/isdn_net.c
36953@@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
36954 {
36955 isdn_net_local *lp = netdev_priv(dev);
36956 unsigned char *p;
36957- ushort len = 0;
36958+ int len = 0;
36959
36960 switch (lp->p_encap) {
36961 case ISDN_NET_ENCAP_ETHER:
36962diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
36963index bf7997a..cf091db 100644
36964--- a/drivers/isdn/icn/icn.c
36965+++ b/drivers/isdn/icn/icn.c
36966@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
36967 if (count > len)
36968 count = len;
36969 if (user) {
36970- if (copy_from_user(msg, buf, count))
36971+ if (count > sizeof msg || copy_from_user(msg, buf, count))
36972 return -EFAULT;
36973 } else
36974 memcpy(msg, buf, count);
36975diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
36976index feb0fa4..f76f830 100644
36977--- a/drivers/isdn/mISDN/socket.c
36978+++ b/drivers/isdn/mISDN/socket.c
36979@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36980 if (dev) {
36981 struct mISDN_devinfo di;
36982
36983+ memset(&di, 0, sizeof(di));
36984 di.id = dev->id;
36985 di.Dprotocols = dev->Dprotocols;
36986 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36987@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
36988 if (dev) {
36989 struct mISDN_devinfo di;
36990
36991+ memset(&di, 0, sizeof(di));
36992 di.id = dev->id;
36993 di.Dprotocols = dev->Dprotocols;
36994 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
36995diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
36996index 485be8b..f0225bc 100644
36997--- a/drivers/isdn/sc/interrupt.c
36998+++ b/drivers/isdn/sc/interrupt.c
36999@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
37000 }
37001 else if(callid>=0x0000 && callid<=0x7FFF)
37002 {
37003+ int len;
37004+
37005 pr_debug("%s: Got Incoming Call\n",
37006 sc_adapter[card]->devicename);
37007- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
37008- strcpy(setup.eazmsn,
37009- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
37010+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
37011+ sizeof(setup.phone));
37012+ if (len >= sizeof(setup.phone))
37013+ continue;
37014+ len = strlcpy(setup.eazmsn,
37015+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
37016+ sizeof(setup.eazmsn));
37017+ if (len >= sizeof(setup.eazmsn))
37018+ continue;
37019 setup.si1 = 7;
37020 setup.si2 = 0;
37021 setup.plan = 0;
37022@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
37023 * Handle a GetMyNumber Rsp
37024 */
37025 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
37026- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
37027+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
37028+ rcvmsg.msg_data.byte_array,
37029+ sizeof(rcvmsg.msg_data.byte_array));
37030 continue;
37031 }
37032
37033diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
37034index 8744d24..d1f9a9a 100644
37035--- a/drivers/lguest/core.c
37036+++ b/drivers/lguest/core.c
37037@@ -91,9 +91,17 @@ static __init int map_switcher(void)
37038 * it's worked so far. The end address needs +1 because __get_vm_area
37039 * allocates an extra guard page, so we need space for that.
37040 */
37041+
37042+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
37043+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37044+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
37045+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37046+#else
37047 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37048 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
37049 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37050+#endif
37051+
37052 if (!switcher_vma) {
37053 err = -ENOMEM;
37054 printk("lguest: could not map switcher pages high\n");
37055@@ -118,7 +126,7 @@ static __init int map_switcher(void)
37056 * Now the Switcher is mapped at the right address, we can't fail!
37057 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
37058 */
37059- memcpy(switcher_vma->addr, start_switcher_text,
37060+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
37061 end_switcher_text - start_switcher_text);
37062
37063 printk(KERN_INFO "lguest: mapped switcher at %p\n",
37064diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
37065index 6ae3888..8b38145 100644
37066--- a/drivers/lguest/x86/core.c
37067+++ b/drivers/lguest/x86/core.c
37068@@ -59,7 +59,7 @@ static struct {
37069 /* Offset from where switcher.S was compiled to where we've copied it */
37070 static unsigned long switcher_offset(void)
37071 {
37072- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
37073+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
37074 }
37075
37076 /* This cpu's struct lguest_pages. */
37077@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
37078 * These copies are pretty cheap, so we do them unconditionally: */
37079 /* Save the current Host top-level page directory.
37080 */
37081+
37082+#ifdef CONFIG_PAX_PER_CPU_PGD
37083+ pages->state.host_cr3 = read_cr3();
37084+#else
37085 pages->state.host_cr3 = __pa(current->mm->pgd);
37086+#endif
37087+
37088 /*
37089 * Set up the Guest's page tables to see this CPU's pages (and no
37090 * other CPU's pages).
37091@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
37092 * compiled-in switcher code and the high-mapped copy we just made.
37093 */
37094 for (i = 0; i < IDT_ENTRIES; i++)
37095- default_idt_entries[i] += switcher_offset();
37096+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
37097
37098 /*
37099 * Set up the Switcher's per-cpu areas.
37100@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
37101 * it will be undisturbed when we switch. To change %cs and jump we
37102 * need this structure to feed to Intel's "lcall" instruction.
37103 */
37104- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
37105+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
37106 lguest_entry.segment = LGUEST_CS;
37107
37108 /*
37109diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
37110index 40634b0..4f5855e 100644
37111--- a/drivers/lguest/x86/switcher_32.S
37112+++ b/drivers/lguest/x86/switcher_32.S
37113@@ -87,6 +87,7 @@
37114 #include <asm/page.h>
37115 #include <asm/segment.h>
37116 #include <asm/lguest.h>
37117+#include <asm/processor-flags.h>
37118
37119 // We mark the start of the code to copy
37120 // It's placed in .text tho it's never run here
37121@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
37122 // Changes type when we load it: damn Intel!
37123 // For after we switch over our page tables
37124 // That entry will be read-only: we'd crash.
37125+
37126+#ifdef CONFIG_PAX_KERNEXEC
37127+ mov %cr0, %edx
37128+ xor $X86_CR0_WP, %edx
37129+ mov %edx, %cr0
37130+#endif
37131+
37132 movl $(GDT_ENTRY_TSS*8), %edx
37133 ltr %dx
37134
37135@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
37136 // Let's clear it again for our return.
37137 // The GDT descriptor of the Host
37138 // Points to the table after two "size" bytes
37139- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
37140+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
37141 // Clear "used" from type field (byte 5, bit 2)
37142- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
37143+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
37144+
37145+#ifdef CONFIG_PAX_KERNEXEC
37146+ mov %cr0, %eax
37147+ xor $X86_CR0_WP, %eax
37148+ mov %eax, %cr0
37149+#endif
37150
37151 // Once our page table's switched, the Guest is live!
37152 // The Host fades as we run this final step.
37153@@ -295,13 +309,12 @@ deliver_to_host:
37154 // I consulted gcc, and it gave
37155 // These instructions, which I gladly credit:
37156 leal (%edx,%ebx,8), %eax
37157- movzwl (%eax),%edx
37158- movl 4(%eax), %eax
37159- xorw %ax, %ax
37160- orl %eax, %edx
37161+ movl 4(%eax), %edx
37162+ movw (%eax), %dx
37163 // Now the address of the handler's in %edx
37164 // We call it now: its "iret" drops us home.
37165- jmp *%edx
37166+ ljmp $__KERNEL_CS, $1f
37167+1: jmp *%edx
37168
37169 // Every interrupt can come to us here
37170 // But we must truly tell each apart.
37171diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
37172index 588a5b0..b71db89 100644
37173--- a/drivers/macintosh/macio_asic.c
37174+++ b/drivers/macintosh/macio_asic.c
37175@@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
37176 * MacIO is matched against any Apple ID, it's probe() function
37177 * will then decide wether it applies or not
37178 */
37179-static const struct pci_device_id __devinitdata pci_ids [] = { {
37180+static const struct pci_device_id __devinitconst pci_ids [] = { {
37181 .vendor = PCI_VENDOR_ID_APPLE,
37182 .device = PCI_ANY_ID,
37183 .subvendor = PCI_ANY_ID,
37184diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
37185index a348bb0..ecd9b3f 100644
37186--- a/drivers/macintosh/via-pmu-backlight.c
37187+++ b/drivers/macintosh/via-pmu-backlight.c
37188@@ -15,7 +15,7 @@
37189
37190 #define MAX_PMU_LEVEL 0xFF
37191
37192-static struct backlight_ops pmu_backlight_data;
37193+static const struct backlight_ops pmu_backlight_data;
37194 static DEFINE_SPINLOCK(pmu_backlight_lock);
37195 static int sleeping, uses_pmu_bl;
37196 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
37197@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
37198 return bd->props.brightness;
37199 }
37200
37201-static struct backlight_ops pmu_backlight_data = {
37202+static const struct backlight_ops pmu_backlight_data = {
37203 .get_brightness = pmu_backlight_get_brightness,
37204 .update_status = pmu_backlight_update_status,
37205
37206diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
37207index 6f308a4..b5f7ff7 100644
37208--- a/drivers/macintosh/via-pmu.c
37209+++ b/drivers/macintosh/via-pmu.c
37210@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
37211 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
37212 }
37213
37214-static struct platform_suspend_ops pmu_pm_ops = {
37215+static const struct platform_suspend_ops pmu_pm_ops = {
37216 .enter = powerbook_sleep,
37217 .valid = pmu_sleep_valid,
37218 };
37219diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
37220index 818b617..4656e38 100644
37221--- a/drivers/md/dm-ioctl.c
37222+++ b/drivers/md/dm-ioctl.c
37223@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
37224 cmd == DM_LIST_VERSIONS_CMD)
37225 return 0;
37226
37227- if ((cmd == DM_DEV_CREATE_CMD)) {
37228+ if (cmd == DM_DEV_CREATE_CMD) {
37229 if (!*param->name) {
37230 DMWARN("name not supplied when creating device");
37231 return -EINVAL;
37232diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
37233index 6021d0a..a878643 100644
37234--- a/drivers/md/dm-raid1.c
37235+++ b/drivers/md/dm-raid1.c
37236@@ -41,7 +41,7 @@ enum dm_raid1_error {
37237
37238 struct mirror {
37239 struct mirror_set *ms;
37240- atomic_t error_count;
37241+ atomic_unchecked_t error_count;
37242 unsigned long error_type;
37243 struct dm_dev *dev;
37244 sector_t offset;
37245@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37246 * simple way to tell if a device has encountered
37247 * errors.
37248 */
37249- atomic_inc(&m->error_count);
37250+ atomic_inc_unchecked(&m->error_count);
37251
37252 if (test_and_set_bit(error_type, &m->error_type))
37253 return;
37254@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37255 }
37256
37257 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
37258- if (!atomic_read(&new->error_count)) {
37259+ if (!atomic_read_unchecked(&new->error_count)) {
37260 set_default_mirror(new);
37261 break;
37262 }
37263@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
37264 struct mirror *m = get_default_mirror(ms);
37265
37266 do {
37267- if (likely(!atomic_read(&m->error_count)))
37268+ if (likely(!atomic_read_unchecked(&m->error_count)))
37269 return m;
37270
37271 if (m-- == ms->mirror)
37272@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
37273 {
37274 struct mirror *default_mirror = get_default_mirror(m->ms);
37275
37276- return !atomic_read(&default_mirror->error_count);
37277+ return !atomic_read_unchecked(&default_mirror->error_count);
37278 }
37279
37280 static int mirror_available(struct mirror_set *ms, struct bio *bio)
37281@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
37282 */
37283 if (likely(region_in_sync(ms, region, 1)))
37284 m = choose_mirror(ms, bio->bi_sector);
37285- else if (m && atomic_read(&m->error_count))
37286+ else if (m && atomic_read_unchecked(&m->error_count))
37287 m = NULL;
37288
37289 if (likely(m))
37290@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
37291 }
37292
37293 ms->mirror[mirror].ms = ms;
37294- atomic_set(&(ms->mirror[mirror].error_count), 0);
37295+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
37296 ms->mirror[mirror].error_type = 0;
37297 ms->mirror[mirror].offset = offset;
37298
37299@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
37300 */
37301 static char device_status_char(struct mirror *m)
37302 {
37303- if (!atomic_read(&(m->error_count)))
37304+ if (!atomic_read_unchecked(&(m->error_count)))
37305 return 'A';
37306
37307 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
37308diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
37309index bd58703..9f26571 100644
37310--- a/drivers/md/dm-stripe.c
37311+++ b/drivers/md/dm-stripe.c
37312@@ -20,7 +20,7 @@ struct stripe {
37313 struct dm_dev *dev;
37314 sector_t physical_start;
37315
37316- atomic_t error_count;
37317+ atomic_unchecked_t error_count;
37318 };
37319
37320 struct stripe_c {
37321@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37322 kfree(sc);
37323 return r;
37324 }
37325- atomic_set(&(sc->stripe[i].error_count), 0);
37326+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
37327 }
37328
37329 ti->private = sc;
37330@@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
37331 DMEMIT("%d ", sc->stripes);
37332 for (i = 0; i < sc->stripes; i++) {
37333 DMEMIT("%s ", sc->stripe[i].dev->name);
37334- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
37335+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
37336 'D' : 'A';
37337 }
37338 buffer[i] = '\0';
37339@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
37340 */
37341 for (i = 0; i < sc->stripes; i++)
37342 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
37343- atomic_inc(&(sc->stripe[i].error_count));
37344- if (atomic_read(&(sc->stripe[i].error_count)) <
37345+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
37346+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
37347 DM_IO_ERROR_THRESHOLD)
37348 queue_work(kstriped, &sc->kstriped_ws);
37349 }
37350diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
37351index 4b04590..13a77b2 100644
37352--- a/drivers/md/dm-sysfs.c
37353+++ b/drivers/md/dm-sysfs.c
37354@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
37355 NULL,
37356 };
37357
37358-static struct sysfs_ops dm_sysfs_ops = {
37359+static const struct sysfs_ops dm_sysfs_ops = {
37360 .show = dm_attr_show,
37361 };
37362
37363diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
37364index 03345bb..332250d 100644
37365--- a/drivers/md/dm-table.c
37366+++ b/drivers/md/dm-table.c
37367@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
37368 if (!dev_size)
37369 return 0;
37370
37371- if ((start >= dev_size) || (start + len > dev_size)) {
37372+ if ((start >= dev_size) || (len > dev_size - start)) {
37373 DMWARN("%s: %s too small for target: "
37374 "start=%llu, len=%llu, dev_size=%llu",
37375 dm_device_name(ti->table->md), bdevname(bdev, b),
37376diff --git a/drivers/md/dm.c b/drivers/md/dm.c
37377index c988ac2..c418141 100644
37378--- a/drivers/md/dm.c
37379+++ b/drivers/md/dm.c
37380@@ -165,9 +165,9 @@ struct mapped_device {
37381 /*
37382 * Event handling.
37383 */
37384- atomic_t event_nr;
37385+ atomic_unchecked_t event_nr;
37386 wait_queue_head_t eventq;
37387- atomic_t uevent_seq;
37388+ atomic_unchecked_t uevent_seq;
37389 struct list_head uevent_list;
37390 spinlock_t uevent_lock; /* Protect access to uevent_list */
37391
37392@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
37393 rwlock_init(&md->map_lock);
37394 atomic_set(&md->holders, 1);
37395 atomic_set(&md->open_count, 0);
37396- atomic_set(&md->event_nr, 0);
37397- atomic_set(&md->uevent_seq, 0);
37398+ atomic_set_unchecked(&md->event_nr, 0);
37399+ atomic_set_unchecked(&md->uevent_seq, 0);
37400 INIT_LIST_HEAD(&md->uevent_list);
37401 spin_lock_init(&md->uevent_lock);
37402
37403@@ -1927,7 +1927,7 @@ static void event_callback(void *context)
37404
37405 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
37406
37407- atomic_inc(&md->event_nr);
37408+ atomic_inc_unchecked(&md->event_nr);
37409 wake_up(&md->eventq);
37410 }
37411
37412@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
37413
37414 uint32_t dm_next_uevent_seq(struct mapped_device *md)
37415 {
37416- return atomic_add_return(1, &md->uevent_seq);
37417+ return atomic_add_return_unchecked(1, &md->uevent_seq);
37418 }
37419
37420 uint32_t dm_get_event_nr(struct mapped_device *md)
37421 {
37422- return atomic_read(&md->event_nr);
37423+ return atomic_read_unchecked(&md->event_nr);
37424 }
37425
37426 int dm_wait_event(struct mapped_device *md, int event_nr)
37427 {
37428 return wait_event_interruptible(md->eventq,
37429- (event_nr != atomic_read(&md->event_nr)));
37430+ (event_nr != atomic_read_unchecked(&md->event_nr)));
37431 }
37432
37433 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
37434diff --git a/drivers/md/md.c b/drivers/md/md.c
37435index 4ce6e2f..7a9530a 100644
37436--- a/drivers/md/md.c
37437+++ b/drivers/md/md.c
37438@@ -153,10 +153,10 @@ static int start_readonly;
37439 * start build, activate spare
37440 */
37441 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
37442-static atomic_t md_event_count;
37443+static atomic_unchecked_t md_event_count;
37444 void md_new_event(mddev_t *mddev)
37445 {
37446- atomic_inc(&md_event_count);
37447+ atomic_inc_unchecked(&md_event_count);
37448 wake_up(&md_event_waiters);
37449 }
37450 EXPORT_SYMBOL_GPL(md_new_event);
37451@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
37452 */
37453 static void md_new_event_inintr(mddev_t *mddev)
37454 {
37455- atomic_inc(&md_event_count);
37456+ atomic_inc_unchecked(&md_event_count);
37457 wake_up(&md_event_waiters);
37458 }
37459
37460@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
37461
37462 rdev->preferred_minor = 0xffff;
37463 rdev->data_offset = le64_to_cpu(sb->data_offset);
37464- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37465+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37466
37467 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
37468 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
37469@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
37470 else
37471 sb->resync_offset = cpu_to_le64(0);
37472
37473- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
37474+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
37475
37476 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
37477 sb->size = cpu_to_le64(mddev->dev_sectors);
37478@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
37479 static ssize_t
37480 errors_show(mdk_rdev_t *rdev, char *page)
37481 {
37482- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
37483+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
37484 }
37485
37486 static ssize_t
37487@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
37488 char *e;
37489 unsigned long n = simple_strtoul(buf, &e, 10);
37490 if (*buf && (*e == 0 || *e == '\n')) {
37491- atomic_set(&rdev->corrected_errors, n);
37492+ atomic_set_unchecked(&rdev->corrected_errors, n);
37493 return len;
37494 }
37495 return -EINVAL;
37496@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
37497 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
37498 kfree(rdev);
37499 }
37500-static struct sysfs_ops rdev_sysfs_ops = {
37501+static const struct sysfs_ops rdev_sysfs_ops = {
37502 .show = rdev_attr_show,
37503 .store = rdev_attr_store,
37504 };
37505@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
37506 rdev->data_offset = 0;
37507 rdev->sb_events = 0;
37508 atomic_set(&rdev->nr_pending, 0);
37509- atomic_set(&rdev->read_errors, 0);
37510- atomic_set(&rdev->corrected_errors, 0);
37511+ atomic_set_unchecked(&rdev->read_errors, 0);
37512+ atomic_set_unchecked(&rdev->corrected_errors, 0);
37513
37514 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
37515 if (!size) {
37516@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
37517 kfree(mddev);
37518 }
37519
37520-static struct sysfs_ops md_sysfs_ops = {
37521+static const struct sysfs_ops md_sysfs_ops = {
37522 .show = md_attr_show,
37523 .store = md_attr_store,
37524 };
37525@@ -4482,7 +4482,8 @@ out:
37526 err = 0;
37527 blk_integrity_unregister(disk);
37528 md_new_event(mddev);
37529- sysfs_notify_dirent(mddev->sysfs_state);
37530+ if (mddev->sysfs_state)
37531+ sysfs_notify_dirent(mddev->sysfs_state);
37532 return err;
37533 }
37534
37535@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
37536
37537 spin_unlock(&pers_lock);
37538 seq_printf(seq, "\n");
37539- mi->event = atomic_read(&md_event_count);
37540+ mi->event = atomic_read_unchecked(&md_event_count);
37541 return 0;
37542 }
37543 if (v == (void*)2) {
37544@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
37545 chunk_kb ? "KB" : "B");
37546 if (bitmap->file) {
37547 seq_printf(seq, ", file: ");
37548- seq_path(seq, &bitmap->file->f_path, " \t\n");
37549+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
37550 }
37551
37552 seq_printf(seq, "\n");
37553@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
37554 else {
37555 struct seq_file *p = file->private_data;
37556 p->private = mi;
37557- mi->event = atomic_read(&md_event_count);
37558+ mi->event = atomic_read_unchecked(&md_event_count);
37559 }
37560 return error;
37561 }
37562@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
37563 /* always allow read */
37564 mask = POLLIN | POLLRDNORM;
37565
37566- if (mi->event != atomic_read(&md_event_count))
37567+ if (mi->event != atomic_read_unchecked(&md_event_count))
37568 mask |= POLLERR | POLLPRI;
37569 return mask;
37570 }
37571@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
37572 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
37573 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
37574 (int)part_stat_read(&disk->part0, sectors[1]) -
37575- atomic_read(&disk->sync_io);
37576+ atomic_read_unchecked(&disk->sync_io);
37577 /* sync IO will cause sync_io to increase before the disk_stats
37578 * as sync_io is counted when a request starts, and
37579 * disk_stats is counted when it completes.
37580diff --git a/drivers/md/md.h b/drivers/md/md.h
37581index 87430fe..0024a4c 100644
37582--- a/drivers/md/md.h
37583+++ b/drivers/md/md.h
37584@@ -94,10 +94,10 @@ struct mdk_rdev_s
37585 * only maintained for arrays that
37586 * support hot removal
37587 */
37588- atomic_t read_errors; /* number of consecutive read errors that
37589+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
37590 * we have tried to ignore.
37591 */
37592- atomic_t corrected_errors; /* number of corrected read errors,
37593+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
37594 * for reporting to userspace and storing
37595 * in superblock.
37596 */
37597@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
37598
37599 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
37600 {
37601- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
37602+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
37603 }
37604
37605 struct mdk_personality
37606diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
37607index 968cb14..f0ad2e4 100644
37608--- a/drivers/md/raid1.c
37609+++ b/drivers/md/raid1.c
37610@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
37611 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
37612 continue;
37613 rdev = conf->mirrors[d].rdev;
37614- atomic_add(s, &rdev->corrected_errors);
37615+ atomic_add_unchecked(s, &rdev->corrected_errors);
37616 if (sync_page_io(rdev->bdev,
37617 sect + rdev->data_offset,
37618 s<<9,
37619@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
37620 /* Well, this device is dead */
37621 md_error(mddev, rdev);
37622 else {
37623- atomic_add(s, &rdev->corrected_errors);
37624+ atomic_add_unchecked(s, &rdev->corrected_errors);
37625 printk(KERN_INFO
37626 "raid1:%s: read error corrected "
37627 "(%d sectors at %llu on %s)\n",
37628diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
37629index 1b4e232..cf0f534b 100644
37630--- a/drivers/md/raid10.c
37631+++ b/drivers/md/raid10.c
37632@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
37633 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
37634 set_bit(R10BIO_Uptodate, &r10_bio->state);
37635 else {
37636- atomic_add(r10_bio->sectors,
37637+ atomic_add_unchecked(r10_bio->sectors,
37638 &conf->mirrors[d].rdev->corrected_errors);
37639 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
37640 md_error(r10_bio->mddev,
37641@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
37642 test_bit(In_sync, &rdev->flags)) {
37643 atomic_inc(&rdev->nr_pending);
37644 rcu_read_unlock();
37645- atomic_add(s, &rdev->corrected_errors);
37646+ atomic_add_unchecked(s, &rdev->corrected_errors);
37647 if (sync_page_io(rdev->bdev,
37648 r10_bio->devs[sl].addr +
37649 sect + rdev->data_offset,
37650diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
37651index 883215d..675bf47 100644
37652--- a/drivers/md/raid5.c
37653+++ b/drivers/md/raid5.c
37654@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
37655 bi->bi_next = NULL;
37656 if ((rw & WRITE) &&
37657 test_bit(R5_ReWrite, &sh->dev[i].flags))
37658- atomic_add(STRIPE_SECTORS,
37659+ atomic_add_unchecked(STRIPE_SECTORS,
37660 &rdev->corrected_errors);
37661 generic_make_request(bi);
37662 } else {
37663@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
37664 clear_bit(R5_ReadError, &sh->dev[i].flags);
37665 clear_bit(R5_ReWrite, &sh->dev[i].flags);
37666 }
37667- if (atomic_read(&conf->disks[i].rdev->read_errors))
37668- atomic_set(&conf->disks[i].rdev->read_errors, 0);
37669+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
37670+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
37671 } else {
37672 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
37673 int retry = 0;
37674 rdev = conf->disks[i].rdev;
37675
37676 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
37677- atomic_inc(&rdev->read_errors);
37678+ atomic_inc_unchecked(&rdev->read_errors);
37679 if (conf->mddev->degraded >= conf->max_degraded)
37680 printk_rl(KERN_WARNING
37681 "raid5:%s: read error not correctable "
37682@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
37683 (unsigned long long)(sh->sector
37684 + rdev->data_offset),
37685 bdn);
37686- else if (atomic_read(&rdev->read_errors)
37687+ else if (atomic_read_unchecked(&rdev->read_errors)
37688 > conf->max_nr_stripes)
37689 printk(KERN_WARNING
37690 "raid5:%s: Too many read errors, failing device %s.\n",
37691@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
37692 sector_t r_sector;
37693 struct stripe_head sh2;
37694
37695+ pax_track_stack();
37696
37697 chunk_offset = sector_div(new_sector, sectors_per_chunk);
37698 stripe = new_sector;
37699diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
37700index 05bde9c..2f31d40 100644
37701--- a/drivers/media/common/saa7146_hlp.c
37702+++ b/drivers/media/common/saa7146_hlp.c
37703@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
37704
37705 int x[32], y[32], w[32], h[32];
37706
37707+ pax_track_stack();
37708+
37709 /* clear out memory */
37710 memset(&line_list[0], 0x00, sizeof(u32)*32);
37711 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
37712diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
37713index cb22da5..82b686e 100644
37714--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
37715+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
37716@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
37717 u8 buf[HOST_LINK_BUF_SIZE];
37718 int i;
37719
37720+ pax_track_stack();
37721+
37722 dprintk("%s\n", __func__);
37723
37724 /* check if we have space for a link buf in the rx_buffer */
37725@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
37726 unsigned long timeout;
37727 int written;
37728
37729+ pax_track_stack();
37730+
37731 dprintk("%s\n", __func__);
37732
37733 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
37734diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
37735index 2fe05d0..a3289c4 100644
37736--- a/drivers/media/dvb/dvb-core/dvb_demux.h
37737+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
37738@@ -71,7 +71,7 @@ struct dvb_demux_feed {
37739 union {
37740 dmx_ts_cb ts;
37741 dmx_section_cb sec;
37742- } cb;
37743+ } __no_const cb;
37744
37745 struct dvb_demux *demux;
37746 void *priv;
37747diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
37748index 94159b9..376bd8e 100644
37749--- a/drivers/media/dvb/dvb-core/dvbdev.c
37750+++ b/drivers/media/dvb/dvb-core/dvbdev.c
37751@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
37752 const struct dvb_device *template, void *priv, int type)
37753 {
37754 struct dvb_device *dvbdev;
37755- struct file_operations *dvbdevfops;
37756+ file_operations_no_const *dvbdevfops;
37757 struct device *clsdev;
37758 int minor;
37759 int id;
37760diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
37761index 2a53dd0..db8c07a 100644
37762--- a/drivers/media/dvb/dvb-usb/cxusb.c
37763+++ b/drivers/media/dvb/dvb-usb/cxusb.c
37764@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
37765 struct dib0700_adapter_state {
37766 int (*set_param_save) (struct dvb_frontend *,
37767 struct dvb_frontend_parameters *);
37768-};
37769+} __no_const;
37770
37771 static int dib7070_set_param_override(struct dvb_frontend *fe,
37772 struct dvb_frontend_parameters *fep)
37773diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
37774index db7f7f7..f55e96f 100644
37775--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
37776+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
37777@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
37778
37779 u8 buf[260];
37780
37781+ pax_track_stack();
37782+
37783 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
37784 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
37785
37786diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
37787index 524acf5..5ffc403 100644
37788--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
37789+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
37790@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
37791
37792 struct dib0700_adapter_state {
37793 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
37794-};
37795+} __no_const;
37796
37797 /* Hauppauge Nova-T 500 (aka Bristol)
37798 * has a LNA on GPIO0 which is enabled by setting 1 */
37799diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
37800index ba91735..4261d84 100644
37801--- a/drivers/media/dvb/frontends/dib3000.h
37802+++ b/drivers/media/dvb/frontends/dib3000.h
37803@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
37804 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
37805 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
37806 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
37807-};
37808+} __no_const;
37809
37810 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
37811 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
37812diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
37813index c709ce6..b3fe620 100644
37814--- a/drivers/media/dvb/frontends/or51211.c
37815+++ b/drivers/media/dvb/frontends/or51211.c
37816@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
37817 u8 tudata[585];
37818 int i;
37819
37820+ pax_track_stack();
37821+
37822 dprintk("Firmware is %zd bytes\n",fw->size);
37823
37824 /* Get eprom data */
37825diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
37826index 482d0f3..ee1e202 100644
37827--- a/drivers/media/radio/radio-cadet.c
37828+++ b/drivers/media/radio/radio-cadet.c
37829@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
37830 while (i < count && dev->rdsin != dev->rdsout)
37831 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
37832
37833- if (copy_to_user(data, readbuf, i))
37834+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
37835 return -EFAULT;
37836 return i;
37837 }
37838diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
37839index 6dd51e2..0359b92 100644
37840--- a/drivers/media/video/cx18/cx18-driver.c
37841+++ b/drivers/media/video/cx18/cx18-driver.c
37842@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
37843
37844 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
37845
37846-static atomic_t cx18_instance = ATOMIC_INIT(0);
37847+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
37848
37849 /* Parameter declarations */
37850 static int cardtype[CX18_MAX_CARDS];
37851@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
37852 struct i2c_client c;
37853 u8 eedata[256];
37854
37855+ pax_track_stack();
37856+
37857 memset(&c, 0, sizeof(c));
37858 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
37859 c.adapter = &cx->i2c_adap[0];
37860@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
37861 struct cx18 *cx;
37862
37863 /* FIXME - module parameter arrays constrain max instances */
37864- i = atomic_inc_return(&cx18_instance) - 1;
37865+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
37866 if (i >= CX18_MAX_CARDS) {
37867 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
37868 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
37869diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
37870index 463ec34..2f4625a 100644
37871--- a/drivers/media/video/ivtv/ivtv-driver.c
37872+++ b/drivers/media/video/ivtv/ivtv-driver.c
37873@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
37874 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
37875
37876 /* ivtv instance counter */
37877-static atomic_t ivtv_instance = ATOMIC_INIT(0);
37878+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
37879
37880 /* Parameter declarations */
37881 static int cardtype[IVTV_MAX_CARDS];
37882diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
37883index 5fc4ac0..652a54a 100644
37884--- a/drivers/media/video/omap24xxcam.c
37885+++ b/drivers/media/video/omap24xxcam.c
37886@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
37887 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
37888
37889 do_gettimeofday(&vb->ts);
37890- vb->field_count = atomic_add_return(2, &fh->field_count);
37891+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
37892 if (csr & csr_error) {
37893 vb->state = VIDEOBUF_ERROR;
37894 if (!atomic_read(&fh->cam->in_reset)) {
37895diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
37896index 2ce67f5..cf26a5b 100644
37897--- a/drivers/media/video/omap24xxcam.h
37898+++ b/drivers/media/video/omap24xxcam.h
37899@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
37900 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
37901 struct videobuf_queue vbq;
37902 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
37903- atomic_t field_count; /* field counter for videobuf_buffer */
37904+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
37905 /* accessing cam here doesn't need serialisation: it's constant */
37906 struct omap24xxcam_device *cam;
37907 };
37908diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
37909index 299afa4..eb47459 100644
37910--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
37911+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
37912@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
37913 u8 *eeprom;
37914 struct tveeprom tvdata;
37915
37916+ pax_track_stack();
37917+
37918 memset(&tvdata,0,sizeof(tvdata));
37919
37920 eeprom = pvr2_eeprom_fetch(hdw);
37921diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
37922index 5b152ff..3320638 100644
37923--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
37924+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
37925@@ -195,7 +195,7 @@ struct pvr2_hdw {
37926
37927 /* I2C stuff */
37928 struct i2c_adapter i2c_adap;
37929- struct i2c_algorithm i2c_algo;
37930+ i2c_algorithm_no_const i2c_algo;
37931 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
37932 int i2c_cx25840_hack_state;
37933 int i2c_linked;
37934diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
37935index 1eabff6..8e2313a 100644
37936--- a/drivers/media/video/saa7134/saa6752hs.c
37937+++ b/drivers/media/video/saa7134/saa6752hs.c
37938@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
37939 unsigned char localPAT[256];
37940 unsigned char localPMT[256];
37941
37942+ pax_track_stack();
37943+
37944 /* Set video format - must be done first as it resets other settings */
37945 set_reg8(client, 0x41, h->video_format);
37946
37947diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
37948index 9c1d3ac..b1b49e9 100644
37949--- a/drivers/media/video/saa7164/saa7164-cmd.c
37950+++ b/drivers/media/video/saa7164/saa7164-cmd.c
37951@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
37952 wait_queue_head_t *q = 0;
37953 dprintk(DBGLVL_CMD, "%s()\n", __func__);
37954
37955+ pax_track_stack();
37956+
37957 /* While any outstand message on the bus exists... */
37958 do {
37959
37960@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
37961 u8 tmp[512];
37962 dprintk(DBGLVL_CMD, "%s()\n", __func__);
37963
37964+ pax_track_stack();
37965+
37966 while (loop) {
37967
37968 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
37969diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
37970index b085496..cde0270 100644
37971--- a/drivers/media/video/usbvideo/ibmcam.c
37972+++ b/drivers/media/video/usbvideo/ibmcam.c
37973@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
37974 static int __init ibmcam_init(void)
37975 {
37976 struct usbvideo_cb cbTbl;
37977- memset(&cbTbl, 0, sizeof(cbTbl));
37978- cbTbl.probe = ibmcam_probe;
37979- cbTbl.setupOnOpen = ibmcam_setup_on_open;
37980- cbTbl.videoStart = ibmcam_video_start;
37981- cbTbl.videoStop = ibmcam_video_stop;
37982- cbTbl.processData = ibmcam_ProcessIsocData;
37983- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37984- cbTbl.adjustPicture = ibmcam_adjust_picture;
37985- cbTbl.getFPS = ibmcam_calculate_fps;
37986+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
37987+ *(void **)&cbTbl.probe = ibmcam_probe;
37988+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
37989+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
37990+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
37991+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
37992+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
37993+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
37994+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
37995 return usbvideo_register(
37996 &cams,
37997 MAX_IBMCAM,
37998diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
37999index 31d57f2..600b735 100644
38000--- a/drivers/media/video/usbvideo/konicawc.c
38001+++ b/drivers/media/video/usbvideo/konicawc.c
38002@@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
38003 int error;
38004
38005 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
38006- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38007+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38008
38009 cam->input = input_dev = input_allocate_device();
38010 if (!input_dev) {
38011@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
38012 struct usbvideo_cb cbTbl;
38013 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
38014 DRIVER_DESC "\n");
38015- memset(&cbTbl, 0, sizeof(cbTbl));
38016- cbTbl.probe = konicawc_probe;
38017- cbTbl.setupOnOpen = konicawc_setup_on_open;
38018- cbTbl.processData = konicawc_process_isoc;
38019- cbTbl.getFPS = konicawc_calculate_fps;
38020- cbTbl.setVideoMode = konicawc_set_video_mode;
38021- cbTbl.startDataPump = konicawc_start_data;
38022- cbTbl.stopDataPump = konicawc_stop_data;
38023- cbTbl.adjustPicture = konicawc_adjust_picture;
38024- cbTbl.userFree = konicawc_free_uvd;
38025+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
38026+ *(void **)&cbTbl.probe = konicawc_probe;
38027+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
38028+ *(void **)&cbTbl.processData = konicawc_process_isoc;
38029+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
38030+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
38031+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
38032+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
38033+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
38034+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
38035 return usbvideo_register(
38036 &cams,
38037 MAX_CAMERAS,
38038diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
38039index 803d3e4..c4d1b96 100644
38040--- a/drivers/media/video/usbvideo/quickcam_messenger.c
38041+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
38042@@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
38043 int error;
38044
38045 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
38046- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38047+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
38048
38049 cam->input = input_dev = input_allocate_device();
38050 if (!input_dev) {
38051diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
38052index fbd1b63..292f9f0 100644
38053--- a/drivers/media/video/usbvideo/ultracam.c
38054+++ b/drivers/media/video/usbvideo/ultracam.c
38055@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
38056 {
38057 struct usbvideo_cb cbTbl;
38058 memset(&cbTbl, 0, sizeof(cbTbl));
38059- cbTbl.probe = ultracam_probe;
38060- cbTbl.setupOnOpen = ultracam_setup_on_open;
38061- cbTbl.videoStart = ultracam_video_start;
38062- cbTbl.videoStop = ultracam_video_stop;
38063- cbTbl.processData = ultracam_ProcessIsocData;
38064- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38065- cbTbl.adjustPicture = ultracam_adjust_picture;
38066- cbTbl.getFPS = ultracam_calculate_fps;
38067+ *(void **)&cbTbl.probe = ultracam_probe;
38068+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
38069+ *(void **)&cbTbl.videoStart = ultracam_video_start;
38070+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
38071+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
38072+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
38073+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
38074+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
38075 return usbvideo_register(
38076 &cams,
38077 MAX_CAMERAS,
38078diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
38079index dea8b32..34f6878 100644
38080--- a/drivers/media/video/usbvideo/usbvideo.c
38081+++ b/drivers/media/video/usbvideo/usbvideo.c
38082@@ -697,15 +697,15 @@ int usbvideo_register(
38083 __func__, cams, base_size, num_cams);
38084
38085 /* Copy callbacks, apply defaults for those that are not set */
38086- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
38087+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
38088 if (cams->cb.getFrame == NULL)
38089- cams->cb.getFrame = usbvideo_GetFrame;
38090+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
38091 if (cams->cb.disconnect == NULL)
38092- cams->cb.disconnect = usbvideo_Disconnect;
38093+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
38094 if (cams->cb.startDataPump == NULL)
38095- cams->cb.startDataPump = usbvideo_StartDataPump;
38096+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
38097 if (cams->cb.stopDataPump == NULL)
38098- cams->cb.stopDataPump = usbvideo_StopDataPump;
38099+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
38100
38101 cams->num_cameras = num_cams;
38102 cams->cam = (struct uvd *) &cams[1];
38103diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
38104index c66985b..7fa143a 100644
38105--- a/drivers/media/video/usbvideo/usbvideo.h
38106+++ b/drivers/media/video/usbvideo/usbvideo.h
38107@@ -268,7 +268,7 @@ struct usbvideo_cb {
38108 int (*startDataPump)(struct uvd *uvd);
38109 void (*stopDataPump)(struct uvd *uvd);
38110 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
38111-};
38112+} __no_const;
38113
38114 struct usbvideo {
38115 int num_cameras; /* As allocated */
38116diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
38117index e0f91e4..37554ea 100644
38118--- a/drivers/media/video/usbvision/usbvision-core.c
38119+++ b/drivers/media/video/usbvision/usbvision-core.c
38120@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
38121 unsigned char rv, gv, bv;
38122 static unsigned char *Y, *U, *V;
38123
38124+ pax_track_stack();
38125+
38126 frame = usbvision->curFrame;
38127 imageSize = frame->frmwidth * frame->frmheight;
38128 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
38129diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
38130index 0d06e7c..3d17d24 100644
38131--- a/drivers/media/video/v4l2-device.c
38132+++ b/drivers/media/video/v4l2-device.c
38133@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
38134 EXPORT_SYMBOL_GPL(v4l2_device_register);
38135
38136 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
38137- atomic_t *instance)
38138+ atomic_unchecked_t *instance)
38139 {
38140- int num = atomic_inc_return(instance) - 1;
38141+ int num = atomic_inc_return_unchecked(instance) - 1;
38142 int len = strlen(basename);
38143
38144 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
38145diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
38146index 032ebae..6a3532c 100644
38147--- a/drivers/media/video/videobuf-dma-sg.c
38148+++ b/drivers/media/video/videobuf-dma-sg.c
38149@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
38150 {
38151 struct videobuf_queue q;
38152
38153+ pax_track_stack();
38154+
38155 /* Required to make generic handler to call __videobuf_alloc */
38156 q.int_ops = &sg_ops;
38157
38158diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
38159index b6992b7..9fa7547 100644
38160--- a/drivers/message/fusion/mptbase.c
38161+++ b/drivers/message/fusion/mptbase.c
38162@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
38163 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
38164 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
38165
38166+#ifdef CONFIG_GRKERNSEC_HIDESYM
38167+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38168+ NULL, NULL);
38169+#else
38170 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38171 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
38172+#endif
38173+
38174 /*
38175 * Rounding UP to nearest 4-kB boundary here...
38176 */
38177diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
38178index 83873e3..e360e9a 100644
38179--- a/drivers/message/fusion/mptsas.c
38180+++ b/drivers/message/fusion/mptsas.c
38181@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
38182 return 0;
38183 }
38184
38185+static inline void
38186+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38187+{
38188+ if (phy_info->port_details) {
38189+ phy_info->port_details->rphy = rphy;
38190+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38191+ ioc->name, rphy));
38192+ }
38193+
38194+ if (rphy) {
38195+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38196+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38197+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38198+ ioc->name, rphy, rphy->dev.release));
38199+ }
38200+}
38201+
38202 /* no mutex */
38203 static void
38204 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
38205@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
38206 return NULL;
38207 }
38208
38209-static inline void
38210-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38211-{
38212- if (phy_info->port_details) {
38213- phy_info->port_details->rphy = rphy;
38214- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38215- ioc->name, rphy));
38216- }
38217-
38218- if (rphy) {
38219- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38220- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38221- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38222- ioc->name, rphy, rphy->dev.release));
38223- }
38224-}
38225-
38226 static inline struct sas_port *
38227 mptsas_get_port(struct mptsas_phyinfo *phy_info)
38228 {
38229diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
38230index bd096ca..332cf76 100644
38231--- a/drivers/message/fusion/mptscsih.c
38232+++ b/drivers/message/fusion/mptscsih.c
38233@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
38234
38235 h = shost_priv(SChost);
38236
38237- if (h) {
38238- if (h->info_kbuf == NULL)
38239- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38240- return h->info_kbuf;
38241- h->info_kbuf[0] = '\0';
38242+ if (!h)
38243+ return NULL;
38244
38245- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38246- h->info_kbuf[size-1] = '\0';
38247- }
38248+ if (h->info_kbuf == NULL)
38249+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38250+ return h->info_kbuf;
38251+ h->info_kbuf[0] = '\0';
38252+
38253+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38254+ h->info_kbuf[size-1] = '\0';
38255
38256 return h->info_kbuf;
38257 }
38258diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
38259index efba702..59b2c0f 100644
38260--- a/drivers/message/i2o/i2o_config.c
38261+++ b/drivers/message/i2o/i2o_config.c
38262@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
38263 struct i2o_message *msg;
38264 unsigned int iop;
38265
38266+ pax_track_stack();
38267+
38268 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
38269 return -EFAULT;
38270
38271diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
38272index 7045c45..c07b170 100644
38273--- a/drivers/message/i2o/i2o_proc.c
38274+++ b/drivers/message/i2o/i2o_proc.c
38275@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
38276 "Array Controller Device"
38277 };
38278
38279-static char *chtostr(u8 * chars, int n)
38280-{
38281- char tmp[256];
38282- tmp[0] = 0;
38283- return strncat(tmp, (char *)chars, n);
38284-}
38285-
38286 static int i2o_report_query_status(struct seq_file *seq, int block_status,
38287 char *group)
38288 {
38289@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38290
38291 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
38292 seq_printf(seq, "%-#8x", ddm_table.module_id);
38293- seq_printf(seq, "%-29s",
38294- chtostr(ddm_table.module_name_version, 28));
38295+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
38296 seq_printf(seq, "%9d ", ddm_table.data_size);
38297 seq_printf(seq, "%8d", ddm_table.code_size);
38298
38299@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38300
38301 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
38302 seq_printf(seq, "%-#8x", dst->module_id);
38303- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
38304- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
38305+ seq_printf(seq, "%-.28s", dst->module_name_version);
38306+ seq_printf(seq, "%-.8s", dst->date);
38307 seq_printf(seq, "%8d ", dst->module_size);
38308 seq_printf(seq, "%8d ", dst->mpb_size);
38309 seq_printf(seq, "0x%04x", dst->module_flags);
38310@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38311 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
38312 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
38313 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
38314- seq_printf(seq, "Vendor info : %s\n",
38315- chtostr((u8 *) (work32 + 2), 16));
38316- seq_printf(seq, "Product info : %s\n",
38317- chtostr((u8 *) (work32 + 6), 16));
38318- seq_printf(seq, "Description : %s\n",
38319- chtostr((u8 *) (work32 + 10), 16));
38320- seq_printf(seq, "Product rev. : %s\n",
38321- chtostr((u8 *) (work32 + 14), 8));
38322+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
38323+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
38324+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
38325+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
38326
38327 seq_printf(seq, "Serial number : ");
38328 print_serial_number(seq, (u8 *) (work32 + 16),
38329@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38330 }
38331
38332 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
38333- seq_printf(seq, "Module name : %s\n",
38334- chtostr(result.module_name, 24));
38335- seq_printf(seq, "Module revision : %s\n",
38336- chtostr(result.module_rev, 8));
38337+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
38338+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
38339
38340 seq_printf(seq, "Serial number : ");
38341 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
38342@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38343 return 0;
38344 }
38345
38346- seq_printf(seq, "Device name : %s\n",
38347- chtostr(result.device_name, 64));
38348- seq_printf(seq, "Service name : %s\n",
38349- chtostr(result.service_name, 64));
38350- seq_printf(seq, "Physical name : %s\n",
38351- chtostr(result.physical_location, 64));
38352- seq_printf(seq, "Instance number : %s\n",
38353- chtostr(result.instance_number, 4));
38354+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
38355+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
38356+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
38357+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
38358
38359 return 0;
38360 }
38361diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
38362index 27cf4af..b1205b8 100644
38363--- a/drivers/message/i2o/iop.c
38364+++ b/drivers/message/i2o/iop.c
38365@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
38366
38367 spin_lock_irqsave(&c->context_list_lock, flags);
38368
38369- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
38370- atomic_inc(&c->context_list_counter);
38371+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
38372+ atomic_inc_unchecked(&c->context_list_counter);
38373
38374- entry->context = atomic_read(&c->context_list_counter);
38375+ entry->context = atomic_read_unchecked(&c->context_list_counter);
38376
38377 list_add(&entry->list, &c->context_list);
38378
38379@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
38380
38381 #if BITS_PER_LONG == 64
38382 spin_lock_init(&c->context_list_lock);
38383- atomic_set(&c->context_list_counter, 0);
38384+ atomic_set_unchecked(&c->context_list_counter, 0);
38385 INIT_LIST_HEAD(&c->context_list);
38386 #endif
38387
38388diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
38389index 78e3e85..66c9a0d 100644
38390--- a/drivers/mfd/ab3100-core.c
38391+++ b/drivers/mfd/ab3100-core.c
38392@@ -777,7 +777,7 @@ struct ab_family_id {
38393 char *name;
38394 };
38395
38396-static const struct ab_family_id ids[] __initdata = {
38397+static const struct ab_family_id ids[] __initconst = {
38398 /* AB3100 */
38399 {
38400 .id = 0xc0,
38401diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
38402index 8d8c932..8104515 100644
38403--- a/drivers/mfd/wm8350-i2c.c
38404+++ b/drivers/mfd/wm8350-i2c.c
38405@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
38406 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
38407 int ret;
38408
38409+ pax_track_stack();
38410+
38411 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
38412 return -EINVAL;
38413
38414diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
38415index e4ff50b..4cc3f04 100644
38416--- a/drivers/misc/kgdbts.c
38417+++ b/drivers/misc/kgdbts.c
38418@@ -118,7 +118,7 @@
38419 } while (0)
38420 #define MAX_CONFIG_LEN 40
38421
38422-static struct kgdb_io kgdbts_io_ops;
38423+static const struct kgdb_io kgdbts_io_ops;
38424 static char get_buf[BUFMAX];
38425 static int get_buf_cnt;
38426 static char put_buf[BUFMAX];
38427@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
38428 module_put(THIS_MODULE);
38429 }
38430
38431-static struct kgdb_io kgdbts_io_ops = {
38432+static const struct kgdb_io kgdbts_io_ops = {
38433 .name = "kgdbts",
38434 .read_char = kgdbts_get_char,
38435 .write_char = kgdbts_put_char,
38436diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
38437index 37e7cfc..67cfb76 100644
38438--- a/drivers/misc/sgi-gru/gruhandles.c
38439+++ b/drivers/misc/sgi-gru/gruhandles.c
38440@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
38441
38442 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
38443 {
38444- atomic_long_inc(&mcs_op_statistics[op].count);
38445- atomic_long_add(clks, &mcs_op_statistics[op].total);
38446+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
38447+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
38448 if (mcs_op_statistics[op].max < clks)
38449 mcs_op_statistics[op].max = clks;
38450 }
38451diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
38452index 3f2375c..467c6e6 100644
38453--- a/drivers/misc/sgi-gru/gruprocfs.c
38454+++ b/drivers/misc/sgi-gru/gruprocfs.c
38455@@ -32,9 +32,9 @@
38456
38457 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
38458
38459-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
38460+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
38461 {
38462- unsigned long val = atomic_long_read(v);
38463+ unsigned long val = atomic_long_read_unchecked(v);
38464
38465 if (val)
38466 seq_printf(s, "%16lu %s\n", val, id);
38467@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
38468 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
38469
38470 for (op = 0; op < mcsop_last; op++) {
38471- count = atomic_long_read(&mcs_op_statistics[op].count);
38472- total = atomic_long_read(&mcs_op_statistics[op].total);
38473+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
38474+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
38475 max = mcs_op_statistics[op].max;
38476 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
38477 count ? total / count : 0, max);
38478diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
38479index 46990bc..4a251b5 100644
38480--- a/drivers/misc/sgi-gru/grutables.h
38481+++ b/drivers/misc/sgi-gru/grutables.h
38482@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
38483 * GRU statistics.
38484 */
38485 struct gru_stats_s {
38486- atomic_long_t vdata_alloc;
38487- atomic_long_t vdata_free;
38488- atomic_long_t gts_alloc;
38489- atomic_long_t gts_free;
38490- atomic_long_t vdata_double_alloc;
38491- atomic_long_t gts_double_allocate;
38492- atomic_long_t assign_context;
38493- atomic_long_t assign_context_failed;
38494- atomic_long_t free_context;
38495- atomic_long_t load_user_context;
38496- atomic_long_t load_kernel_context;
38497- atomic_long_t lock_kernel_context;
38498- atomic_long_t unlock_kernel_context;
38499- atomic_long_t steal_user_context;
38500- atomic_long_t steal_kernel_context;
38501- atomic_long_t steal_context_failed;
38502- atomic_long_t nopfn;
38503- atomic_long_t break_cow;
38504- atomic_long_t asid_new;
38505- atomic_long_t asid_next;
38506- atomic_long_t asid_wrap;
38507- atomic_long_t asid_reuse;
38508- atomic_long_t intr;
38509- atomic_long_t intr_mm_lock_failed;
38510- atomic_long_t call_os;
38511- atomic_long_t call_os_offnode_reference;
38512- atomic_long_t call_os_check_for_bug;
38513- atomic_long_t call_os_wait_queue;
38514- atomic_long_t user_flush_tlb;
38515- atomic_long_t user_unload_context;
38516- atomic_long_t user_exception;
38517- atomic_long_t set_context_option;
38518- atomic_long_t migrate_check;
38519- atomic_long_t migrated_retarget;
38520- atomic_long_t migrated_unload;
38521- atomic_long_t migrated_unload_delay;
38522- atomic_long_t migrated_nopfn_retarget;
38523- atomic_long_t migrated_nopfn_unload;
38524- atomic_long_t tlb_dropin;
38525- atomic_long_t tlb_dropin_fail_no_asid;
38526- atomic_long_t tlb_dropin_fail_upm;
38527- atomic_long_t tlb_dropin_fail_invalid;
38528- atomic_long_t tlb_dropin_fail_range_active;
38529- atomic_long_t tlb_dropin_fail_idle;
38530- atomic_long_t tlb_dropin_fail_fmm;
38531- atomic_long_t tlb_dropin_fail_no_exception;
38532- atomic_long_t tlb_dropin_fail_no_exception_war;
38533- atomic_long_t tfh_stale_on_fault;
38534- atomic_long_t mmu_invalidate_range;
38535- atomic_long_t mmu_invalidate_page;
38536- atomic_long_t mmu_clear_flush_young;
38537- atomic_long_t flush_tlb;
38538- atomic_long_t flush_tlb_gru;
38539- atomic_long_t flush_tlb_gru_tgh;
38540- atomic_long_t flush_tlb_gru_zero_asid;
38541+ atomic_long_unchecked_t vdata_alloc;
38542+ atomic_long_unchecked_t vdata_free;
38543+ atomic_long_unchecked_t gts_alloc;
38544+ atomic_long_unchecked_t gts_free;
38545+ atomic_long_unchecked_t vdata_double_alloc;
38546+ atomic_long_unchecked_t gts_double_allocate;
38547+ atomic_long_unchecked_t assign_context;
38548+ atomic_long_unchecked_t assign_context_failed;
38549+ atomic_long_unchecked_t free_context;
38550+ atomic_long_unchecked_t load_user_context;
38551+ atomic_long_unchecked_t load_kernel_context;
38552+ atomic_long_unchecked_t lock_kernel_context;
38553+ atomic_long_unchecked_t unlock_kernel_context;
38554+ atomic_long_unchecked_t steal_user_context;
38555+ atomic_long_unchecked_t steal_kernel_context;
38556+ atomic_long_unchecked_t steal_context_failed;
38557+ atomic_long_unchecked_t nopfn;
38558+ atomic_long_unchecked_t break_cow;
38559+ atomic_long_unchecked_t asid_new;
38560+ atomic_long_unchecked_t asid_next;
38561+ atomic_long_unchecked_t asid_wrap;
38562+ atomic_long_unchecked_t asid_reuse;
38563+ atomic_long_unchecked_t intr;
38564+ atomic_long_unchecked_t intr_mm_lock_failed;
38565+ atomic_long_unchecked_t call_os;
38566+ atomic_long_unchecked_t call_os_offnode_reference;
38567+ atomic_long_unchecked_t call_os_check_for_bug;
38568+ atomic_long_unchecked_t call_os_wait_queue;
38569+ atomic_long_unchecked_t user_flush_tlb;
38570+ atomic_long_unchecked_t user_unload_context;
38571+ atomic_long_unchecked_t user_exception;
38572+ atomic_long_unchecked_t set_context_option;
38573+ atomic_long_unchecked_t migrate_check;
38574+ atomic_long_unchecked_t migrated_retarget;
38575+ atomic_long_unchecked_t migrated_unload;
38576+ atomic_long_unchecked_t migrated_unload_delay;
38577+ atomic_long_unchecked_t migrated_nopfn_retarget;
38578+ atomic_long_unchecked_t migrated_nopfn_unload;
38579+ atomic_long_unchecked_t tlb_dropin;
38580+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
38581+ atomic_long_unchecked_t tlb_dropin_fail_upm;
38582+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
38583+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
38584+ atomic_long_unchecked_t tlb_dropin_fail_idle;
38585+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
38586+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
38587+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
38588+ atomic_long_unchecked_t tfh_stale_on_fault;
38589+ atomic_long_unchecked_t mmu_invalidate_range;
38590+ atomic_long_unchecked_t mmu_invalidate_page;
38591+ atomic_long_unchecked_t mmu_clear_flush_young;
38592+ atomic_long_unchecked_t flush_tlb;
38593+ atomic_long_unchecked_t flush_tlb_gru;
38594+ atomic_long_unchecked_t flush_tlb_gru_tgh;
38595+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
38596
38597- atomic_long_t copy_gpa;
38598+ atomic_long_unchecked_t copy_gpa;
38599
38600- atomic_long_t mesq_receive;
38601- atomic_long_t mesq_receive_none;
38602- atomic_long_t mesq_send;
38603- atomic_long_t mesq_send_failed;
38604- atomic_long_t mesq_noop;
38605- atomic_long_t mesq_send_unexpected_error;
38606- atomic_long_t mesq_send_lb_overflow;
38607- atomic_long_t mesq_send_qlimit_reached;
38608- atomic_long_t mesq_send_amo_nacked;
38609- atomic_long_t mesq_send_put_nacked;
38610- atomic_long_t mesq_qf_not_full;
38611- atomic_long_t mesq_qf_locked;
38612- atomic_long_t mesq_qf_noop_not_full;
38613- atomic_long_t mesq_qf_switch_head_failed;
38614- atomic_long_t mesq_qf_unexpected_error;
38615- atomic_long_t mesq_noop_unexpected_error;
38616- atomic_long_t mesq_noop_lb_overflow;
38617- atomic_long_t mesq_noop_qlimit_reached;
38618- atomic_long_t mesq_noop_amo_nacked;
38619- atomic_long_t mesq_noop_put_nacked;
38620+ atomic_long_unchecked_t mesq_receive;
38621+ atomic_long_unchecked_t mesq_receive_none;
38622+ atomic_long_unchecked_t mesq_send;
38623+ atomic_long_unchecked_t mesq_send_failed;
38624+ atomic_long_unchecked_t mesq_noop;
38625+ atomic_long_unchecked_t mesq_send_unexpected_error;
38626+ atomic_long_unchecked_t mesq_send_lb_overflow;
38627+ atomic_long_unchecked_t mesq_send_qlimit_reached;
38628+ atomic_long_unchecked_t mesq_send_amo_nacked;
38629+ atomic_long_unchecked_t mesq_send_put_nacked;
38630+ atomic_long_unchecked_t mesq_qf_not_full;
38631+ atomic_long_unchecked_t mesq_qf_locked;
38632+ atomic_long_unchecked_t mesq_qf_noop_not_full;
38633+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
38634+ atomic_long_unchecked_t mesq_qf_unexpected_error;
38635+ atomic_long_unchecked_t mesq_noop_unexpected_error;
38636+ atomic_long_unchecked_t mesq_noop_lb_overflow;
38637+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
38638+ atomic_long_unchecked_t mesq_noop_amo_nacked;
38639+ atomic_long_unchecked_t mesq_noop_put_nacked;
38640
38641 };
38642
38643@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
38644 cchop_deallocate, tghop_invalidate, mcsop_last};
38645
38646 struct mcs_op_statistic {
38647- atomic_long_t count;
38648- atomic_long_t total;
38649+ atomic_long_unchecked_t count;
38650+ atomic_long_unchecked_t total;
38651 unsigned long max;
38652 };
38653
38654@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
38655
38656 #define STAT(id) do { \
38657 if (gru_options & OPT_STATS) \
38658- atomic_long_inc(&gru_stats.id); \
38659+ atomic_long_inc_unchecked(&gru_stats.id); \
38660 } while (0)
38661
38662 #ifdef CONFIG_SGI_GRU_DEBUG
38663diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
38664index 2275126..12a9dbfb 100644
38665--- a/drivers/misc/sgi-xp/xp.h
38666+++ b/drivers/misc/sgi-xp/xp.h
38667@@ -289,7 +289,7 @@ struct xpc_interface {
38668 xpc_notify_func, void *);
38669 void (*received) (short, int, void *);
38670 enum xp_retval (*partid_to_nasids) (short, void *);
38671-};
38672+} __no_const;
38673
38674 extern struct xpc_interface xpc_interface;
38675
38676diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
38677index b94d5f7..7f494c5 100644
38678--- a/drivers/misc/sgi-xp/xpc.h
38679+++ b/drivers/misc/sgi-xp/xpc.h
38680@@ -835,6 +835,7 @@ struct xpc_arch_operations {
38681 void (*received_payload) (struct xpc_channel *, void *);
38682 void (*notify_senders_of_disconnect) (struct xpc_channel *);
38683 };
38684+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
38685
38686 /* struct xpc_partition act_state values (for XPC HB) */
38687
38688@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
38689 /* found in xpc_main.c */
38690 extern struct device *xpc_part;
38691 extern struct device *xpc_chan;
38692-extern struct xpc_arch_operations xpc_arch_ops;
38693+extern xpc_arch_operations_no_const xpc_arch_ops;
38694 extern int xpc_disengage_timelimit;
38695 extern int xpc_disengage_timedout;
38696 extern int xpc_activate_IRQ_rcvd;
38697diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
38698index fd3688a..7e211a4 100644
38699--- a/drivers/misc/sgi-xp/xpc_main.c
38700+++ b/drivers/misc/sgi-xp/xpc_main.c
38701@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
38702 .notifier_call = xpc_system_die,
38703 };
38704
38705-struct xpc_arch_operations xpc_arch_ops;
38706+xpc_arch_operations_no_const xpc_arch_ops;
38707
38708 /*
38709 * Timer function to enforce the timelimit on the partition disengage.
38710diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
38711index 8b70e03..700bda6 100644
38712--- a/drivers/misc/sgi-xp/xpc_sn2.c
38713+++ b/drivers/misc/sgi-xp/xpc_sn2.c
38714@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
38715 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
38716 }
38717
38718-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
38719+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
38720 .setup_partitions = xpc_setup_partitions_sn2,
38721 .teardown_partitions = xpc_teardown_partitions_sn2,
38722 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
38723@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
38724 int ret;
38725 size_t buf_size;
38726
38727- xpc_arch_ops = xpc_arch_ops_sn2;
38728+ pax_open_kernel();
38729+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
38730+ pax_close_kernel();
38731
38732 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
38733 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
38734diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
38735index 8e08d71..7cb8c9b 100644
38736--- a/drivers/misc/sgi-xp/xpc_uv.c
38737+++ b/drivers/misc/sgi-xp/xpc_uv.c
38738@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
38739 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
38740 }
38741
38742-static struct xpc_arch_operations xpc_arch_ops_uv = {
38743+static const struct xpc_arch_operations xpc_arch_ops_uv = {
38744 .setup_partitions = xpc_setup_partitions_uv,
38745 .teardown_partitions = xpc_teardown_partitions_uv,
38746 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
38747@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
38748 int
38749 xpc_init_uv(void)
38750 {
38751- xpc_arch_ops = xpc_arch_ops_uv;
38752+ pax_open_kernel();
38753+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
38754+ pax_close_kernel();
38755
38756 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
38757 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
38758diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
38759index 6fd20b42..650efe3 100644
38760--- a/drivers/mmc/host/sdhci-pci.c
38761+++ b/drivers/mmc/host/sdhci-pci.c
38762@@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
38763 .probe = via_probe,
38764 };
38765
38766-static const struct pci_device_id pci_ids[] __devinitdata = {
38767+static const struct pci_device_id pci_ids[] __devinitconst = {
38768 {
38769 .vendor = PCI_VENDOR_ID_RICOH,
38770 .device = PCI_DEVICE_ID_RICOH_R5C822,
38771diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
38772index e7563a9..5f90ce5 100644
38773--- a/drivers/mtd/chips/cfi_cmdset_0001.c
38774+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
38775@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
38776 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
38777 unsigned long timeo = jiffies + HZ;
38778
38779+ pax_track_stack();
38780+
38781 /* Prevent setting state FL_SYNCING for chip in suspended state. */
38782 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
38783 goto sleep;
38784@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
38785 unsigned long initial_adr;
38786 int initial_len = len;
38787
38788+ pax_track_stack();
38789+
38790 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
38791 adr += chip->start;
38792 initial_adr = adr;
38793@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
38794 int retries = 3;
38795 int ret;
38796
38797+ pax_track_stack();
38798+
38799 adr += chip->start;
38800
38801 retry:
38802diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
38803index 0667a67..3ab97ed 100644
38804--- a/drivers/mtd/chips/cfi_cmdset_0020.c
38805+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
38806@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
38807 unsigned long cmd_addr;
38808 struct cfi_private *cfi = map->fldrv_priv;
38809
38810+ pax_track_stack();
38811+
38812 adr += chip->start;
38813
38814 /* Ensure cmd read/writes are aligned. */
38815@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
38816 DECLARE_WAITQUEUE(wait, current);
38817 int wbufsize, z;
38818
38819+ pax_track_stack();
38820+
38821 /* M58LW064A requires bus alignment for buffer wriets -- saw */
38822 if (adr & (map_bankwidth(map)-1))
38823 return -EINVAL;
38824@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
38825 DECLARE_WAITQUEUE(wait, current);
38826 int ret = 0;
38827
38828+ pax_track_stack();
38829+
38830 adr += chip->start;
38831
38832 /* Let's determine this according to the interleave only once */
38833@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
38834 unsigned long timeo = jiffies + HZ;
38835 DECLARE_WAITQUEUE(wait, current);
38836
38837+ pax_track_stack();
38838+
38839 adr += chip->start;
38840
38841 /* Let's determine this according to the interleave only once */
38842@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
38843 unsigned long timeo = jiffies + HZ;
38844 DECLARE_WAITQUEUE(wait, current);
38845
38846+ pax_track_stack();
38847+
38848 adr += chip->start;
38849
38850 /* Let's determine this according to the interleave only once */
38851diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
38852index 5bf5f46..c5de373 100644
38853--- a/drivers/mtd/devices/doc2000.c
38854+++ b/drivers/mtd/devices/doc2000.c
38855@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
38856
38857 /* The ECC will not be calculated correctly if less than 512 is written */
38858 /* DBB-
38859- if (len != 0x200 && eccbuf)
38860+ if (len != 0x200)
38861 printk(KERN_WARNING
38862 "ECC needs a full sector write (adr: %lx size %lx)\n",
38863 (long) to, (long) len);
38864diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
38865index 0990f78..bb4e8a4 100644
38866--- a/drivers/mtd/devices/doc2001.c
38867+++ b/drivers/mtd/devices/doc2001.c
38868@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
38869 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
38870
38871 /* Don't allow read past end of device */
38872- if (from >= this->totlen)
38873+ if (from >= this->totlen || !len)
38874 return -EINVAL;
38875
38876 /* Don't allow a single read to cross a 512-byte block boundary */
38877diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
38878index e56d6b4..f07e6cf 100644
38879--- a/drivers/mtd/ftl.c
38880+++ b/drivers/mtd/ftl.c
38881@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
38882 loff_t offset;
38883 uint16_t srcunitswap = cpu_to_le16(srcunit);
38884
38885+ pax_track_stack();
38886+
38887 eun = &part->EUNInfo[srcunit];
38888 xfer = &part->XferInfo[xferunit];
38889 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
38890diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
38891index 8aca552..146446e 100755
38892--- a/drivers/mtd/inftlcore.c
38893+++ b/drivers/mtd/inftlcore.c
38894@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
38895 struct inftl_oob oob;
38896 size_t retlen;
38897
38898+ pax_track_stack();
38899+
38900 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
38901 "pending=%d)\n", inftl, thisVUC, pendingblock);
38902
38903diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
38904index 32e82ae..ed50953 100644
38905--- a/drivers/mtd/inftlmount.c
38906+++ b/drivers/mtd/inftlmount.c
38907@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
38908 struct INFTLPartition *ip;
38909 size_t retlen;
38910
38911+ pax_track_stack();
38912+
38913 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
38914
38915 /*
38916diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
38917index 79bf40f..fe5f8fd 100644
38918--- a/drivers/mtd/lpddr/qinfo_probe.c
38919+++ b/drivers/mtd/lpddr/qinfo_probe.c
38920@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
38921 {
38922 map_word pfow_val[4];
38923
38924+ pax_track_stack();
38925+
38926 /* Check identification string */
38927 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
38928 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
38929diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
38930index 726a1b8..f46b460 100644
38931--- a/drivers/mtd/mtdchar.c
38932+++ b/drivers/mtd/mtdchar.c
38933@@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
38934 u_long size;
38935 struct mtd_info_user info;
38936
38937+ pax_track_stack();
38938+
38939 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
38940
38941 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
38942diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
38943index 1002e18..26d82d5 100644
38944--- a/drivers/mtd/nftlcore.c
38945+++ b/drivers/mtd/nftlcore.c
38946@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
38947 int inplace = 1;
38948 size_t retlen;
38949
38950+ pax_track_stack();
38951+
38952 memset(BlockMap, 0xff, sizeof(BlockMap));
38953 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
38954
38955diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
38956index 8b22b18..6fada85 100644
38957--- a/drivers/mtd/nftlmount.c
38958+++ b/drivers/mtd/nftlmount.c
38959@@ -23,6 +23,7 @@
38960 #include <asm/errno.h>
38961 #include <linux/delay.h>
38962 #include <linux/slab.h>
38963+#include <linux/sched.h>
38964 #include <linux/mtd/mtd.h>
38965 #include <linux/mtd/nand.h>
38966 #include <linux/mtd/nftl.h>
38967@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
38968 struct mtd_info *mtd = nftl->mbd.mtd;
38969 unsigned int i;
38970
38971+ pax_track_stack();
38972+
38973 /* Assume logical EraseSize == physical erasesize for starting the scan.
38974 We'll sort it out later if we find a MediaHeader which says otherwise */
38975 /* Actually, we won't. The new DiskOnChip driver has already scanned
38976diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
38977index 14cec04..d775b87 100644
38978--- a/drivers/mtd/ubi/build.c
38979+++ b/drivers/mtd/ubi/build.c
38980@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
38981 static int __init bytes_str_to_int(const char *str)
38982 {
38983 char *endp;
38984- unsigned long result;
38985+ unsigned long result, scale = 1;
38986
38987 result = simple_strtoul(str, &endp, 0);
38988 if (str == endp || result >= INT_MAX) {
38989@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
38990
38991 switch (*endp) {
38992 case 'G':
38993- result *= 1024;
38994+ scale *= 1024;
38995 case 'M':
38996- result *= 1024;
38997+ scale *= 1024;
38998 case 'K':
38999- result *= 1024;
39000+ scale *= 1024;
39001 if (endp[1] == 'i' && endp[2] == 'B')
39002 endp += 2;
39003 case '\0':
39004@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
39005 return -EINVAL;
39006 }
39007
39008- return result;
39009+ if ((intoverflow_t)result*scale >= INT_MAX) {
39010+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
39011+ str);
39012+ return -EINVAL;
39013+ }
39014+
39015+ return result*scale;
39016 }
39017
39018 /**
39019diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
39020index ab68886..ca405e8 100644
39021--- a/drivers/net/atlx/atl2.c
39022+++ b/drivers/net/atlx/atl2.c
39023@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
39024 */
39025
39026 #define ATL2_PARAM(X, desc) \
39027- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
39028+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
39029 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
39030 MODULE_PARM_DESC(X, desc);
39031 #else
39032diff --git a/drivers/net/benet/Makefile b/drivers/net/benet/Makefile
39033index a60cd80..0ed11ef 100644
39034--- a/drivers/net/benet/Makefile
39035+++ b/drivers/net/benet/Makefile
39036@@ -1,7 +1,9 @@
39037 #
39038-# Makefile to build the network driver for ServerEngine's BladeEngine.
39039+# Makefile to build the be2net network driver
39040 #
39041
39042+EXTRA_CFLAGS += -DCONFIG_PALAU
39043+
39044 obj-$(CONFIG_BE2NET) += be2net.o
39045
39046-be2net-y := be_main.o be_cmds.o be_ethtool.o
39047+be2net-y := be_main.o be_cmds.o be_ethtool.o be_compat.o be_misc.o
39048diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
39049index 5c74ff0..7382603 100644
39050--- a/drivers/net/benet/be.h
39051+++ b/drivers/net/benet/be.h
39052@@ -1,18 +1,18 @@
39053 /*
39054- * Copyright (C) 2005 - 2009 ServerEngines
39055+ * Copyright (C) 2005 - 2011 Emulex
39056 * All rights reserved.
39057 *
39058 * This program is free software; you can redistribute it and/or
39059 * modify it under the terms of the GNU General Public License version 2
39060- * as published by the Free Software Foundation. The full GNU General
39061+ * as published by the Free Software Foundation. The full GNU General
39062 * Public License is included in this distribution in the file called COPYING.
39063 *
39064 * Contact Information:
39065- * linux-drivers@serverengines.com
39066+ * linux-drivers@emulex.com
39067 *
39068- * ServerEngines
39069- * 209 N. Fair Oaks Ave
39070- * Sunnyvale, CA 94085
39071+ * Emulex
39072+ * 3333 Susan Street
39073+ * Costa Mesa, CA 92626
39074 */
39075
39076 #ifndef BE_H
39077@@ -29,32 +29,53 @@
39078 #include <linux/workqueue.h>
39079 #include <linux/interrupt.h>
39080 #include <linux/firmware.h>
39081+#include <linux/jhash.h>
39082+#ifndef CONFIG_PALAU
39083+#include <linux/inet_lro.h>
39084+#endif
39085
39086+#ifdef CONFIG_PALAU
39087+#include "be_compat.h"
39088+#endif
39089 #include "be_hw.h"
39090
39091-#define DRV_VER "2.101.205"
39092+#ifdef CONFIG_PALAU
39093+#include "version.h"
39094+#define DRV_VER STR_BE_MAJOR "." STR_BE_MINOR "."\
39095+ STR_BE_BUILD "." STR_BE_BRANCH
39096+#else
39097+#define DRV_VER "2.0.348"
39098+#endif
39099 #define DRV_NAME "be2net"
39100-#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39101-#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
39102-#define OC_NAME "Emulex OneConnect 10Gbps NIC"
39103-#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
39104-#define DRV_DESC BE_NAME "Driver"
39105+#define BE_NAME "Emulex BladeEngine2"
39106+#define BE3_NAME "Emulex BladeEngine3"
39107+#define OC_NAME "Emulex OneConnect"
39108+#define OC_NAME_BE OC_NAME "(be3)"
39109+#define OC_NAME_LANCER OC_NAME "(Lancer)"
39110+#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
39111
39112-#define BE_VENDOR_ID 0x19a2
39113+#define BE_VENDOR_ID 0x19a2
39114+#define EMULEX_VENDOR_ID 0x10df
39115 #define BE_DEVICE_ID1 0x211
39116 #define BE_DEVICE_ID2 0x221
39117-#define OC_DEVICE_ID1 0x700
39118-#define OC_DEVICE_ID2 0x701
39119-#define OC_DEVICE_ID3 0x710
39120+#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
39121+#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
39122+#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
39123+
39124+#define OC_SUBSYS_DEVICE_ID1 0xE602
39125+#define OC_SUBSYS_DEVICE_ID2 0xE642
39126+#define OC_SUBSYS_DEVICE_ID3 0xE612
39127+#define OC_SUBSYS_DEVICE_ID4 0xE652
39128
39129 static inline char *nic_name(struct pci_dev *pdev)
39130 {
39131 switch (pdev->device) {
39132 case OC_DEVICE_ID1:
39133- case OC_DEVICE_ID2:
39134 return OC_NAME;
39135+ case OC_DEVICE_ID2:
39136+ return OC_NAME_BE;
39137 case OC_DEVICE_ID3:
39138- return OC_NAME1;
39139+ return OC_NAME_LANCER;
39140 case BE_DEVICE_ID2:
39141 return BE3_NAME;
39142 default:
39143@@ -63,7 +84,7 @@ static inline char *nic_name(struct pci_dev *pdev)
39144 }
39145
39146 /* Number of bytes of an RX frame that are copied to skb->data */
39147-#define BE_HDR_LEN 64
39148+#define BE_HDR_LEN ((u16) 64)
39149 #define BE_MAX_JUMBO_FRAME_SIZE 9018
39150 #define BE_MIN_MTU 256
39151
39152@@ -79,10 +100,24 @@ static inline char *nic_name(struct pci_dev *pdev)
39153 #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
39154 #define MCC_CQ_LEN 256
39155
39156+#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
39157+
39158+#define MAX_RX_QS (MAX_RSS_QS + 1)
39159+
39160+#ifdef MQ_TX
39161+#define MAX_TX_QS 8
39162+#else
39163+#define MAX_TX_QS 1
39164+#endif
39165+
39166+#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RSS qs + 1 def Rx + Tx */
39167 #define BE_NAPI_WEIGHT 64
39168-#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
39169+#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
39170 #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
39171
39172+#define BE_MAX_LRO_DESCRIPTORS 16
39173+#define BE_MAX_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
39174+
39175 #define FW_VER_LEN 32
39176
39177 struct be_dma_mem {
39178@@ -127,6 +162,11 @@ static inline void *queue_tail_node(struct be_queue_info *q)
39179 return q->dma_mem.va + q->tail * q->entry_size;
39180 }
39181
39182+static inline void *queue_index_node(struct be_queue_info *q, u16 index)
39183+{
39184+ return q->dma_mem.va + index * q->entry_size;
39185+}
39186+
39187 static inline void queue_head_inc(struct be_queue_info *q)
39188 {
39189 index_inc(&q->head, q->len);
39190@@ -137,6 +177,7 @@ static inline void queue_tail_inc(struct be_queue_info *q)
39191 index_inc(&q->tail, q->len);
39192 }
39193
39194+
39195 struct be_eq_obj {
39196 struct be_queue_info q;
39197 char desc[32];
39198@@ -146,6 +187,7 @@ struct be_eq_obj {
39199 u16 min_eqd; /* in usecs */
39200 u16 max_eqd; /* in usecs */
39201 u16 cur_eqd; /* in usecs */
39202+ u8 eq_idx;
39203
39204 struct napi_struct napi;
39205 };
39206@@ -153,49 +195,20 @@ struct be_eq_obj {
39207 struct be_mcc_obj {
39208 struct be_queue_info q;
39209 struct be_queue_info cq;
39210+ bool rearm_cq;
39211 };
39212
39213-struct be_drvr_stats {
39214+struct be_tx_stats {
39215 u32 be_tx_reqs; /* number of TX requests initiated */
39216 u32 be_tx_stops; /* number of times TX Q was stopped */
39217- u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */
39218 u32 be_tx_wrbs; /* number of tx WRBs used */
39219- u32 be_tx_events; /* number of tx completion events */
39220 u32 be_tx_compl; /* number of tx completion entries processed */
39221 ulong be_tx_jiffies;
39222 u64 be_tx_bytes;
39223 u64 be_tx_bytes_prev;
39224 u64 be_tx_pkts;
39225 u32 be_tx_rate;
39226-
39227- u32 cache_barrier[16];
39228-
39229- u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */
39230- u32 be_polls; /* number of times NAPI called poll function */
39231- u32 be_rx_events; /* number of ucast rx completion events */
39232- u32 be_rx_compl; /* number of rx completion entries processed */
39233- ulong be_rx_jiffies;
39234- u64 be_rx_bytes;
39235- u64 be_rx_bytes_prev;
39236- u64 be_rx_pkts;
39237- u32 be_rx_rate;
39238- /* number of non ether type II frames dropped where
39239- * frame len > length field of Mac Hdr */
39240- u32 be_802_3_dropped_frames;
39241- /* number of non ether type II frames malformed where
39242- * in frame len < length field of Mac Hdr */
39243- u32 be_802_3_malformed_frames;
39244- u32 be_rxcp_err; /* Num rx completion entries w/ err set. */
39245- ulong rx_fps_jiffies; /* jiffies at last FPS calc */
39246- u32 be_rx_frags;
39247- u32 be_prev_rx_frags;
39248- u32 be_rx_fps; /* Rx frags per second */
39249-};
39250-
39251-struct be_stats_obj {
39252- struct be_drvr_stats drvr_stats;
39253- struct net_device_stats net_stats;
39254- struct be_dma_mem cmd;
39255+ u32 be_ipv6_ext_hdr_tx_drop;
39256 };
39257
39258 struct be_tx_obj {
39259@@ -203,23 +216,124 @@ struct be_tx_obj {
39260 struct be_queue_info cq;
39261 /* Remember the skbs that were transmitted */
39262 struct sk_buff *sent_skb_list[TX_Q_LEN];
39263+ struct be_tx_stats stats;
39264 };
39265
39266 /* Struct to remember the pages posted for rx frags */
39267 struct be_rx_page_info {
39268 struct page *page;
39269- dma_addr_t bus;
39270+ DEFINE_DMA_UNMAP_ADDR(bus);
39271 u16 page_offset;
39272 bool last_page_user;
39273 };
39274
39275+struct be_rx_stats {
39276+ u32 rx_post_fail;/* number of ethrx buffer alloc failures */
39277+ u32 rx_polls; /* number of times NAPI called poll function */
39278+ u32 rx_events; /* number of ucast rx completion events */
39279+ u32 rx_compl; /* number of rx completion entries processed */
39280+ ulong rx_jiffies;
39281+ u64 rx_bytes;
39282+ u64 rx_bytes_prev;
39283+ u64 rx_pkts;
39284+ u32 rx_rate;
39285+ u32 rx_mcast_pkts;
39286+ u32 rxcp_err; /* Num rx completion entries w/ err set. */
39287+ ulong rx_fps_jiffies; /* jiffies at last FPS calc */
39288+ u32 rx_frags;
39289+ u32 prev_rx_frags;
39290+ u32 rx_fps; /* Rx frags per second */
39291+ u32 rx_drops_no_frags;
39292+};
39293+
39294+struct be_rx_compl_info {
39295+ u32 rss_hash;
39296+ u16 vlan_tag;
39297+ u16 pkt_size;
39298+ u16 rxq_idx;
39299+ u16 port;
39300+ u8 vlanf;
39301+ u8 num_rcvd;
39302+ u8 err;
39303+ u8 ipf;
39304+ u8 tcpf;
39305+ u8 udpf;
39306+ u8 ip_csum;
39307+ u8 l4_csum;
39308+ u8 ipv6;
39309+ u8 vtm;
39310+ u8 pkt_type;
39311+};
39312+
39313 struct be_rx_obj {
39314+ struct be_adapter *adapter;
39315 struct be_queue_info q;
39316 struct be_queue_info cq;
39317- struct be_rx_page_info page_info_tbl[RX_Q_LEN];
39318+ struct be_rx_compl_info rxcp;
39319+ struct be_rx_page_info *page_info_tbl;
39320+ struct net_lro_mgr lro_mgr;
39321+ struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS];
39322+ struct be_eq_obj rx_eq;
39323+ struct be_rx_stats stats;
39324+ u8 rss_id;
39325+ bool rx_post_starved; /* Zero rx frags have been posted to BE */
39326+ u16 prev_frag_idx;
39327+ u32 cache_line_barrier[16];
39328 };
39329
39330-#define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */
39331+struct be_drv_stats {
39332+ u32 be_on_die_temperature;
39333+ u32 be_tx_events;
39334+ u32 eth_red_drops;
39335+ u32 rx_drops_no_pbuf;
39336+ u32 rx_drops_no_txpb;
39337+ u32 rx_drops_no_erx_descr;
39338+ u32 rx_drops_no_tpre_descr;
39339+ u32 rx_drops_too_many_frags;
39340+ u32 rx_drops_invalid_ring;
39341+ u32 forwarded_packets;
39342+ u32 rx_drops_mtu;
39343+ u32 rx_crc_errors;
39344+ u32 rx_alignment_symbol_errors;
39345+ u32 rx_pause_frames;
39346+ u32 rx_priority_pause_frames;
39347+ u32 rx_control_frames;
39348+ u32 rx_in_range_errors;
39349+ u32 rx_out_range_errors;
39350+ u32 rx_frame_too_long;
39351+ u32 rx_address_match_errors;
39352+ u32 rx_dropped_too_small;
39353+ u32 rx_dropped_too_short;
39354+ u32 rx_dropped_header_too_small;
39355+ u32 rx_dropped_tcp_length;
39356+ u32 rx_dropped_runt;
39357+ u32 rx_ip_checksum_errs;
39358+ u32 rx_tcp_checksum_errs;
39359+ u32 rx_udp_checksum_errs;
39360+ u32 rx_switched_unicast_packets;
39361+ u32 rx_switched_multicast_packets;
39362+ u32 rx_switched_broadcast_packets;
39363+ u32 tx_pauseframes;
39364+ u32 tx_priority_pauseframes;
39365+ u32 tx_controlframes;
39366+ u32 rxpp_fifo_overflow_drop;
39367+ u32 rx_input_fifo_overflow_drop;
39368+ u32 pmem_fifo_overflow_drop;
39369+ u32 jabber_events;
39370+};
39371+
39372+struct be_vf_cfg {
39373+ unsigned char vf_mac_addr[ETH_ALEN];
39374+ u32 vf_if_handle;
39375+ u32 vf_pmac_id;
39376+ u16 vf_def_vid;
39377+ u16 vf_vlan_tag;
39378+ u32 vf_tx_rate;
39379+};
39380+
39381+#define BE_INVALID_PMAC_ID 0xffffffff
39382+#define BE_FLAGS_DCBX (1 << 16)
39383+
39384 struct be_adapter {
39385 struct pci_dev *pdev;
39386 struct net_device *netdev;
39387@@ -228,7 +342,7 @@ struct be_adapter {
39388 u8 __iomem *db; /* Door Bell */
39389 u8 __iomem *pcicfg; /* PCI config space */
39390
39391- spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
39392+ struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
39393 struct be_dma_mem mbox_mem;
39394 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
39395 * is stored for freeing purpose */
39396@@ -238,66 +352,121 @@ struct be_adapter {
39397 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
39398 spinlock_t mcc_cq_lock;
39399
39400- struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS];
39401- bool msix_enabled;
39402+ struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
39403+ u32 num_msix_vec;
39404 bool isr_registered;
39405
39406 /* TX Rings */
39407 struct be_eq_obj tx_eq;
39408- struct be_tx_obj tx_obj;
39409+ struct be_tx_obj tx_obj[MAX_TX_QS];
39410+ u8 num_tx_qs;
39411+ u8 prio_tc_map[MAX_TX_QS]; /* prio_tc_map[prio] => tc-id */
39412+ u8 tc_txq_map[MAX_TX_QS]; /* tc_txq_map[tc-id] => txq index */
39413
39414 u32 cache_line_break[8];
39415
39416 /* Rx rings */
39417- struct be_eq_obj rx_eq;
39418- struct be_rx_obj rx_obj;
39419+ struct be_rx_obj rx_obj[MAX_RX_QS]; /* one default non-rss Q */
39420+ u32 num_rx_qs;
39421+
39422+ struct be_dma_mem stats_cmd;
39423+ struct net_device_stats net_stats;
39424+ struct be_drv_stats drv_stats;
39425 u32 big_page_size; /* Compounded page size shared by rx wrbs */
39426- bool rx_post_starved; /* Zero rx frags have been posted to BE */
39427
39428 struct vlan_group *vlan_grp;
39429- u16 num_vlans;
39430+ u16 vlans_added;
39431+ u16 max_vlans; /* Number of vlans supported */
39432 u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
39433+ u8 vlan_prio_bmap; /* Available priority BitMap */
39434+ u16 recommended_prio; /* Recommended Priority */
39435+ struct be_dma_mem rx_filter;
39436
39437- struct be_stats_obj stats;
39438 /* Work queue used to perform periodic tasks like getting statistics */
39439 struct delayed_work work;
39440+ u16 work_counter;
39441
39442- /* Ethtool knobs and info */
39443- bool rx_csum; /* BE card must perform rx-checksumming */
39444+ u32 flags;
39445+ bool rx_csum; /* BE card must perform rx-checksumming */
39446+ u32 max_rx_coal;
39447 char fw_ver[FW_VER_LEN];
39448 u32 if_handle; /* Used to configure filtering */
39449 u32 pmac_id; /* MAC addr handle used by BE card */
39450+ u32 beacon_state; /* for set_phys_id */
39451
39452- bool link_up;
39453+ bool eeh_err;
39454+ int link_status;
39455 u32 port_num;
39456+ u32 hba_port_num;
39457 bool promiscuous;
39458- u32 cap;
39459+ bool wol;
39460+ u32 function_mode;
39461+ u32 function_caps;
39462 u32 rx_fc; /* Rx flow control */
39463 u32 tx_fc; /* Tx flow control */
39464+ bool ue_detected;
39465+ bool stats_cmd_sent;
39466+ bool gro_supported;
39467+ int link_speed;
39468+ u8 port_type;
39469+ u8 transceiver;
39470+ u8 autoneg;
39471 u8 generation; /* BladeEngine ASIC generation */
39472+ u32 flash_status;
39473+ struct completion flash_compl;
39474+
39475+ u8 eq_next_idx;
39476+ bool be3_native;
39477+ u16 num_vfs;
39478+ struct be_vf_cfg *vf_cfg;
39479+ u8 is_virtfn;
39480+ u16 pvid;
39481+ u32 sli_family;
39482+ u8 port_name[4];
39483+ char model_number[32];
39484 };
39485
39486 /* BladeEngine Generation numbers */
39487 #define BE_GEN2 2
39488 #define BE_GEN3 3
39489
39490-extern const struct ethtool_ops be_ethtool_ops;
39491+#define ON 1
39492+#define OFF 0
39493+#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3)
39494+#define lancer_A0_chip(adapter) \
39495+ (adapter->sli_family == LANCER_A0_SLI_FAMILY)
39496
39497-#define drvr_stats(adapter) (&adapter->stats.drvr_stats)
39498+extern struct ethtool_ops be_ethtool_ops;
39499
39500-static inline unsigned int be_pci_func(struct be_adapter *adapter)
39501-{
39502- return PCI_FUNC(adapter->pdev->devfn);
39503-}
39504+#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
39505+#define tx_stats(txo) (&txo->stats)
39506+#define rx_stats(rxo) (&rxo->stats)
39507
39508+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
39509+#define BE_SET_NETDEV_OPS(netdev, ops) be_netdev_ops_init(netdev, ops)
39510+#else
39511 #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
39512+#endif
39513+
39514+#define for_all_rx_queues(adapter, rxo, i) \
39515+ for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
39516+ i++, rxo++)
39517+
39518+/* Just skip the first default non-rss queue */
39519+#define for_all_rss_queues(adapter, rxo, i) \
39520+ for (i = 0, rxo = &adapter->rx_obj[i+1]; i < (adapter->num_rx_qs - 1);\
39521+ i++, rxo++)
39522+
39523+#define for_all_tx_queues(adapter, txo, i) \
39524+ for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
39525+ i++, txo++)
39526
39527 #define PAGE_SHIFT_4K 12
39528 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
39529
39530 /* Returns number of pages spanned by the data starting at the given addr */
39531-#define PAGES_4K_SPANNED(_address, size) \
39532- ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
39533+#define PAGES_4K_SPANNED(_address, size) \
39534+ ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
39535 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
39536
39537 /* Byte offset into the page corresponding to given address */
39538@@ -305,7 +474,7 @@ static inline unsigned int be_pci_func(struct be_adapter *adapter)
39539 ((size_t)(addr) & (PAGE_SIZE_4K-1))
39540
39541 /* Returns bit offset within a DWORD of a bitfield */
39542-#define AMAP_BIT_OFFSET(_struct, field) \
39543+#define AMAP_BIT_OFFSET(_struct, field) \
39544 (((size_t)&(((_struct *)0)->field))%32)
39545
39546 /* Returns the bit mask of the field that is NOT shifted into location. */
39547@@ -356,6 +525,11 @@ static inline void swap_dws(void *wrb, int len)
39548 #endif /* __BIG_ENDIAN */
39549 }
39550
39551+static inline bool vlan_configured(struct be_adapter *adapter)
39552+{
39553+ return adapter->vlan_grp && adapter->vlans_added;
39554+}
39555+
39556 static inline u8 is_tcp_pkt(struct sk_buff *skb)
39557 {
39558 u8 val = 0;
39559@@ -380,9 +554,65 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
39560 return val;
39561 }
39562
39563+static inline u8 is_ipv6_ext_hdr(struct sk_buff *skb)
39564+{
39565+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
39566+ if (ip_hdr(skb)->version == 6)
39567+ return ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr);
39568+ else
39569+#endif
39570+ return 0;
39571+}
39572+
39573+static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
39574+{
39575+ u32 sli_intf;
39576+
39577+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
39578+ adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
39579+}
39580+
39581+static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
39582+{
39583+ u32 addr;
39584+
39585+ addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
39586+
39587+ mac[5] = (u8)(addr & 0xFF);
39588+ mac[4] = (u8)((addr >> 8) & 0xFF);
39589+ mac[3] = (u8)((addr >> 16) & 0xFF);
39590+ /* Use the OUI programmed in hardware */
39591+ memcpy(mac, adapter->netdev->dev_addr, 3);
39592+}
39593+
39594+static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
39595+ struct sk_buff *skb)
39596+{
39597+ u8 vlan_prio = 0;
39598+ u16 vlan_tag = 0;
39599+
39600+ vlan_tag = vlan_tx_tag_get(skb);
39601+ vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
39602+ /* If vlan priority provided by OS is NOT in available bmap */
39603+ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
39604+ vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
39605+ adapter->recommended_prio;
39606+
39607+ return vlan_tag;
39608+}
39609+
39610+#define be_physfn(adapter) (!adapter->is_virtfn)
39611+
39612 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
39613 u16 num_popped);
39614-extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
39615+extern void be_link_status_update(struct be_adapter *adapter, int link_status);
39616 extern void netdev_stats_update(struct be_adapter *adapter);
39617+extern void be_parse_stats(struct be_adapter *adapter);
39618 extern int be_load_fw(struct be_adapter *adapter, u8 *func);
39619+
39620+#ifdef CONFIG_PALAU
39621+extern void be_sysfs_create_group(struct be_adapter *adapter);
39622+extern void be_sysfs_remove_group(struct be_adapter *adapter);
39623+#endif
39624+
39625 #endif /* BE_H */
39626diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
39627index 28a0eda..b4ca89c 100644
39628--- a/drivers/net/benet/be_cmds.c
39629+++ b/drivers/net/benet/be_cmds.c
39630@@ -1,30 +1,45 @@
39631 /*
39632- * Copyright (C) 2005 - 2009 ServerEngines
39633+ * Copyright (C) 2005 - 2011 Emulex
39634 * All rights reserved.
39635 *
39636 * This program is free software; you can redistribute it and/or
39637 * modify it under the terms of the GNU General Public License version 2
39638- * as published by the Free Software Foundation. The full GNU General
39639+ * as published by the Free Software Foundation. The full GNU General
39640 * Public License is included in this distribution in the file called COPYING.
39641 *
39642 * Contact Information:
39643- * linux-drivers@serverengines.com
39644+ * linux-drivers@emulex.com
39645 *
39646- * ServerEngines
39647- * 209 N. Fair Oaks Ave
39648- * Sunnyvale, CA 94085
39649+ * Emulex
39650+ * 3333 Susan Street
39651+ * Costa Mesa, CA 92626
39652 */
39653
39654 #include "be.h"
39655 #include "be_cmds.h"
39656
39657+/* Must be a power of 2 or else MODULO will BUG_ON */
39658+static int be_get_temp_freq = 64;
39659+
39660+static inline void *embedded_payload(struct be_mcc_wrb *wrb)
39661+{
39662+ return wrb->payload.embedded_payload;
39663+}
39664+
39665 static void be_mcc_notify(struct be_adapter *adapter)
39666 {
39667 struct be_queue_info *mccq = &adapter->mcc_obj.q;
39668 u32 val = 0;
39669
39670+ if (adapter->eeh_err) {
39671+ dev_info(&adapter->pdev->dev, "Error in Card Detected! Cannot issue commands\n");
39672+ return;
39673+ }
39674+
39675 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
39676 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
39677+
39678+ wmb();
39679 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
39680 }
39681
39682@@ -59,21 +74,67 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
39683
39684 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
39685 CQE_STATUS_COMPL_MASK;
39686+
39687+ if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
39688+ (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
39689+ adapter->flash_status = compl_status;
39690+ complete(&adapter->flash_compl);
39691+ }
39692+
39693 if (compl_status == MCC_STATUS_SUCCESS) {
39694- if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
39695- struct be_cmd_resp_get_stats *resp =
39696- adapter->stats.cmd.va;
39697- be_dws_le_to_cpu(&resp->hw_stats,
39698- sizeof(resp->hw_stats));
39699+ if ((compl->tag0 == OPCODE_ETH_GET_STATISTICS) &&
39700+ (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
39701+ if (adapter->generation == BE_GEN3) {
39702+ struct be_cmd_resp_get_stats_v1 *resp =
39703+ adapter->stats_cmd.va;
39704+
39705+ be_dws_le_to_cpu(&resp->hw_stats,
39706+ sizeof(resp->hw_stats));
39707+ } else {
39708+ struct be_cmd_resp_get_stats_v0 *resp =
39709+ adapter->stats_cmd.va;
39710+
39711+ be_dws_le_to_cpu(&resp->hw_stats,
39712+ sizeof(resp->hw_stats));
39713+ }
39714+ be_parse_stats(adapter);
39715 netdev_stats_update(adapter);
39716+ adapter->stats_cmd_sent = false;
39717+ }
39718+ if (compl->tag0 ==
39719+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) {
39720+ struct be_mcc_wrb *mcc_wrb =
39721+ queue_index_node(&adapter->mcc_obj.q,
39722+ compl->tag1);
39723+ struct be_cmd_resp_get_cntl_addnl_attribs *resp =
39724+ embedded_payload(mcc_wrb);
39725+ adapter->drv_stats.be_on_die_temperature =
39726+ resp->on_die_temperature;
39727+ }
39728+ } else {
39729+ if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
39730+ be_get_temp_freq = 0;
39731+
39732+ if (compl->tag1 == MCC_WRB_PASS_THRU)
39733+ goto done;
39734+
39735+ if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
39736+ compl_status == MCC_STATUS_ILLEGAL_REQUEST)
39737+ goto done;
39738+
39739+ if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
39740+ dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
39741+ "permitted to execute this cmd (opcode %d)\n",
39742+ compl->tag0);
39743+ } else {
39744+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
39745+ CQE_STATUS_EXTD_MASK;
39746+ dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
39747+ "status %d, extd-status %d\n",
39748+ compl->tag0, compl_status, extd_status);
39749 }
39750- } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
39751- extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
39752- CQE_STATUS_EXTD_MASK;
39753- dev_warn(&adapter->pdev->dev,
39754- "Error in cmd completion: status(compl/extd)=%d/%d\n",
39755- compl_status, extd_status);
39756 }
39757+done:
39758 return compl_status;
39759 }
39760
39761@@ -82,7 +143,70 @@ static void be_async_link_state_process(struct be_adapter *adapter,
39762 struct be_async_event_link_state *evt)
39763 {
39764 be_link_status_update(adapter,
39765- evt->port_link_status == ASYNC_EVENT_LINK_UP);
39766+ ((evt->port_link_status & ~ASYNC_EVENT_LOGICAL) ==
39767+ ASYNC_EVENT_LINK_UP ? LINK_UP : LINK_DOWN));
39768+}
39769+
39770+/* Grp5 CoS Priority evt */
39771+static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
39772+ struct be_async_event_grp5_cos_priority *evt)
39773+{
39774+ if (evt->valid) {
39775+ adapter->vlan_prio_bmap = evt->available_priority_bmap;
39776+ adapter->recommended_prio &= ~VLAN_PRIO_MASK;
39777+ adapter->recommended_prio =
39778+ evt->reco_default_priority << VLAN_PRIO_SHIFT;
39779+ }
39780+}
39781+
39782+/* Grp5 QOS Speed evt */
39783+static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
39784+ struct be_async_event_grp5_qos_link_speed *evt)
39785+{
39786+ if (evt->physical_port == adapter->hba_port_num) {
39787+ /* qos_link_speed is in units of 10 Mbps */
39788+ adapter->link_speed = evt->qos_link_speed * 10;
39789+ }
39790+}
39791+
39792+/*Grp5 PVID evt*/
39793+static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
39794+ struct be_async_event_grp5_pvid_state *evt)
39795+{
39796+ if (evt->enabled)
39797+ adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK ;
39798+ else
39799+ adapter->pvid = 0;
39800+}
39801+
39802+static void be_async_grp5_evt_process(struct be_adapter *adapter,
39803+ u32 trailer, struct be_mcc_compl *evt)
39804+{
39805+ u8 event_type = 0;
39806+
39807+ event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
39808+ ASYNC_TRAILER_EVENT_TYPE_MASK;
39809+
39810+ switch (event_type) {
39811+ case ASYNC_EVENT_COS_PRIORITY:
39812+ be_async_grp5_cos_priority_process(adapter,
39813+ (struct be_async_event_grp5_cos_priority *)evt);
39814+ break;
39815+ case ASYNC_EVENT_QOS_SPEED:
39816+ be_async_grp5_qos_speed_process(adapter,
39817+ (struct be_async_event_grp5_qos_link_speed *)evt);
39818+ break;
39819+ case ASYNC_EVENT_PVID_STATE:
39820+ be_async_grp5_pvid_state_process(adapter,
39821+ (struct be_async_event_grp5_pvid_state *)evt);
39822+ break;
39823+ case GRP5_TYPE_PRIO_TC_MAP:
39824+ memcpy(adapter->prio_tc_map, evt, MAX_TX_QS);
39825+ break;
39826+ default:
39827+ printk(KERN_WARNING "Unknown grp5 event!\n");
39828+ break;
39829+ }
39830 }
39831
39832 static inline bool is_link_state_evt(u32 trailer)
39833@@ -92,6 +216,13 @@ static inline bool is_link_state_evt(u32 trailer)
39834 ASYNC_EVENT_CODE_LINK_STATE);
39835 }
39836
39837+static inline bool is_grp5_evt(u32 trailer)
39838+{
39839+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
39840+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
39841+ ASYNC_EVENT_CODE_GRP_5);
39842+}
39843+
39844 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
39845 {
39846 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
39847@@ -104,46 +235,67 @@ static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
39848 return NULL;
39849 }
39850
39851-int be_process_mcc(struct be_adapter *adapter)
39852+void be_async_mcc_enable(struct be_adapter *adapter)
39853+{
39854+ spin_lock_bh(&adapter->mcc_cq_lock);
39855+
39856+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
39857+ adapter->mcc_obj.rearm_cq = true;
39858+
39859+ spin_unlock_bh(&adapter->mcc_cq_lock);
39860+}
39861+
39862+void be_async_mcc_disable(struct be_adapter *adapter)
39863+{
39864+ adapter->mcc_obj.rearm_cq = false;
39865+}
39866+
39867+int be_process_mcc(struct be_adapter *adapter, int *status)
39868 {
39869 struct be_mcc_compl *compl;
39870- int num = 0, status = 0;
39871+ int num = 0;
39872+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
39873
39874 spin_lock_bh(&adapter->mcc_cq_lock);
39875 while ((compl = be_mcc_compl_get(adapter))) {
39876 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
39877 /* Interpret flags as an async trailer */
39878- BUG_ON(!is_link_state_evt(compl->flags));
39879-
39880- /* Interpret compl as a async link evt */
39881- be_async_link_state_process(adapter,
39882+ if (is_link_state_evt(compl->flags))
39883+ be_async_link_state_process(adapter,
39884 (struct be_async_event_link_state *) compl);
39885+ else if (is_grp5_evt(compl->flags))
39886+ be_async_grp5_evt_process(adapter,
39887+ compl->flags, compl);
39888+
39889 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
39890- status = be_mcc_compl_process(adapter, compl);
39891- atomic_dec(&adapter->mcc_obj.q.used);
39892+ *status = be_mcc_compl_process(adapter, compl);
39893+ atomic_dec(&mcc_obj->q.used);
39894 }
39895 be_mcc_compl_use(compl);
39896 num++;
39897 }
39898
39899- if (num)
39900- be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
39901-
39902 spin_unlock_bh(&adapter->mcc_cq_lock);
39903- return status;
39904+ return num;
39905 }
39906
39907 /* Wait till no more pending mcc requests are present */
39908 static int be_mcc_wait_compl(struct be_adapter *adapter)
39909 {
39910 #define mcc_timeout 120000 /* 12s timeout */
39911- int i, status;
39912+ int i, num, status = 0;
39913+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
39914+
39915+ if (adapter->eeh_err)
39916+ return -EIO;
39917+
39918 for (i = 0; i < mcc_timeout; i++) {
39919- status = be_process_mcc(adapter);
39920- if (status)
39921- return status;
39922+ num = be_process_mcc(adapter, &status);
39923+ if (num)
39924+ be_cq_notify(adapter, mcc_obj->cq.id,
39925+ mcc_obj->rearm_cq, num);
39926
39927- if (atomic_read(&adapter->mcc_obj.q.used) == 0)
39928+ if (atomic_read(&mcc_obj->q.used) == 0)
39929 break;
39930 udelay(100);
39931 }
39932@@ -151,7 +303,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
39933 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
39934 return -1;
39935 }
39936- return 0;
39937+ return status;
39938 }
39939
39940 /* Notify MCC requests and wait for completion */
39941@@ -163,23 +315,34 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
39942
39943 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
39944 {
39945- int cnt = 0, wait = 5;
39946+ int msecs = 0;
39947 u32 ready;
39948
39949+ if (adapter->eeh_err) {
39950+ dev_err(&adapter->pdev->dev, "Error detected in card.Cannot issue commands\n");
39951+ return -EIO;
39952+ }
39953 do {
39954- ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
39955+ ready = ioread32(db);
39956+ if (ready == 0xffffffff) {
39957+ dev_err(&adapter->pdev->dev,
39958+ "pci slot disconnected\n");
39959+ return -1;
39960+ }
39961+
39962+ ready &= MPU_MAILBOX_DB_RDY_MASK;
39963 if (ready)
39964 break;
39965
39966- if (cnt > 4000000) {
39967+ if (msecs > 4000) {
39968 dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
39969+ be_detect_dump_ue(adapter);
39970 return -1;
39971 }
39972
39973- if (cnt > 50)
39974- wait = 200;
39975- cnt += wait;
39976- udelay(wait);
39977+ set_current_state(TASK_UNINTERRUPTIBLE);
39978+ schedule_timeout(msecs_to_jiffies(1));
39979+ msecs++;
39980 } while (true);
39981
39982 return 0;
39983@@ -198,6 +361,11 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
39984 struct be_mcc_mailbox *mbox = mbox_mem->va;
39985 struct be_mcc_compl *compl = &mbox->compl;
39986
39987+ /* wait for ready to be set */
39988+ status = be_mbox_db_ready_wait(adapter, db);
39989+ if (status != 0)
39990+ return status;
39991+
39992 val |= MPU_MAILBOX_DB_HI_MASK;
39993 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
39994 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
39995@@ -232,7 +400,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
39996
39997 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
39998 {
39999- u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
40000+ u32 sem;
40001+
40002+ if (lancer_chip(adapter))
40003+ sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
40004+ else
40005+ sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
40006
40007 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
40008 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
40009@@ -245,30 +418,29 @@ int be_cmd_POST(struct be_adapter *adapter)
40010 {
40011 u16 stage;
40012 int status, timeout = 0;
40013+ struct device *dev = &adapter->pdev->dev;
40014
40015 do {
40016 status = be_POST_stage_get(adapter, &stage);
40017 if (status) {
40018- dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
40019- stage);
40020+ dev_err(dev, "POST error; stage=0x%x\n", stage);
40021 return -1;
40022 } else if (stage != POST_STAGE_ARMFW_RDY) {
40023 set_current_state(TASK_INTERRUPTIBLE);
40024- schedule_timeout(2 * HZ);
40025+ if (schedule_timeout(2 * HZ)) {
40026+ dev_err(dev, "POST cmd aborted\n");
40027+ return -EINTR;
40028+ }
40029 timeout += 2;
40030 } else {
40031 return 0;
40032 }
40033- } while (timeout < 20);
40034+ } while (timeout < 40);
40035
40036- dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
40037+ dev_err(dev, "POST timeout; stage=0x%x\n", stage);
40038 return -1;
40039 }
40040
40041-static inline void *embedded_payload(struct be_mcc_wrb *wrb)
40042-{
40043- return wrb->payload.embedded_payload;
40044-}
40045
40046 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
40047 {
40048@@ -277,7 +449,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
40049
40050 /* Don't touch the hdr after it's prepared */
40051 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
40052- bool embedded, u8 sge_cnt)
40053+ bool embedded, u8 sge_cnt, u32 opcode)
40054 {
40055 if (embedded)
40056 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
40057@@ -285,7 +457,8 @@ static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
40058 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
40059 MCC_WRB_SGE_CNT_SHIFT;
40060 wrb->payload_length = payload_len;
40061- be_dws_cpu_to_le(wrb, 20);
40062+ wrb->tag0 = opcode;
40063+ be_dws_cpu_to_le(wrb, 8);
40064 }
40065
40066 /* Don't touch the hdr after it's prepared */
40067@@ -295,6 +468,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
40068 req_hdr->opcode = opcode;
40069 req_hdr->subsystem = subsystem;
40070 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
40071+ req_hdr->version = 0;
40072 }
40073
40074 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
40075@@ -349,7 +523,11 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
40076 struct be_queue_info *mccq = &adapter->mcc_obj.q;
40077 struct be_mcc_wrb *wrb;
40078
40079- BUG_ON(atomic_read(&mccq->used) >= mccq->len);
40080+ if (atomic_read(&mccq->used) >= mccq->len) {
40081+ dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
40082+ return NULL;
40083+ }
40084+
40085 wrb = queue_head_node(mccq);
40086 queue_head_inc(mccq);
40087 atomic_inc(&mccq->used);
40088@@ -357,6 +535,59 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
40089 return wrb;
40090 }
40091
40092+/* Tell fw we're about to start firing cmds by writing a
40093+ * special pattern across the wrb hdr; uses mbox
40094+ */
40095+int be_cmd_fw_init(struct be_adapter *adapter)
40096+{
40097+ u8 *wrb;
40098+ int status;
40099+
40100+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40101+ return -1;
40102+
40103+ wrb = (u8 *)wrb_from_mbox(adapter);
40104+ *wrb++ = 0xFF;
40105+ *wrb++ = 0x12;
40106+ *wrb++ = 0x34;
40107+ *wrb++ = 0xFF;
40108+ *wrb++ = 0xFF;
40109+ *wrb++ = 0x56;
40110+ *wrb++ = 0x78;
40111+ *wrb = 0xFF;
40112+
40113+ status = be_mbox_notify_wait(adapter);
40114+
40115+ mutex_unlock(&adapter->mbox_lock);
40116+ return status;
40117+}
40118+
40119+/* Tell fw we're done with firing cmds by writing a
40120+ * special pattern across the wrb hdr; uses mbox
40121+ */
40122+int be_cmd_fw_clean(struct be_adapter *adapter)
40123+{
40124+ u8 *wrb;
40125+ int status;
40126+
40127+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40128+ return -1;
40129+
40130+ wrb = (u8 *)wrb_from_mbox(adapter);
40131+ *wrb++ = 0xFF;
40132+ *wrb++ = 0xAA;
40133+ *wrb++ = 0xBB;
40134+ *wrb++ = 0xFF;
40135+ *wrb++ = 0xFF;
40136+ *wrb++ = 0xCC;
40137+ *wrb++ = 0xDD;
40138+ *wrb = 0xFF;
40139+
40140+ status = be_mbox_notify_wait(adapter);
40141+
40142+ mutex_unlock(&adapter->mbox_lock);
40143+ return status;
40144+}
40145 int be_cmd_eq_create(struct be_adapter *adapter,
40146 struct be_queue_info *eq, int eq_delay)
40147 {
40148@@ -365,20 +596,19 @@ int be_cmd_eq_create(struct be_adapter *adapter,
40149 struct be_dma_mem *q_mem = &eq->dma_mem;
40150 int status;
40151
40152- spin_lock(&adapter->mbox_lock);
40153+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40154+ return -1;
40155
40156 wrb = wrb_from_mbox(adapter);
40157 req = embedded_payload(wrb);
40158
40159- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40160+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
40161
40162 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40163 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
40164
40165 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40166
40167- AMAP_SET_BITS(struct amap_eq_context, func, req->context,
40168- be_pci_func(adapter));
40169 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
40170 /* 4byte eqe*/
40171 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
40172@@ -397,7 +627,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
40173 eq->created = true;
40174 }
40175
40176- spin_unlock(&adapter->mbox_lock);
40177+ mutex_unlock(&adapter->mbox_lock);
40178 return status;
40179 }
40180
40181@@ -409,12 +639,14 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
40182 struct be_cmd_req_mac_query *req;
40183 int status;
40184
40185- spin_lock(&adapter->mbox_lock);
40186+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40187+ return -1;
40188
40189 wrb = wrb_from_mbox(adapter);
40190 req = embedded_payload(wrb);
40191
40192- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40193+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40194+ OPCODE_COMMON_NTWK_MAC_QUERY);
40195
40196 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40197 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
40198@@ -433,13 +665,13 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
40199 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
40200 }
40201
40202- spin_unlock(&adapter->mbox_lock);
40203+ mutex_unlock(&adapter->mbox_lock);
40204 return status;
40205 }
40206
40207 /* Uses synchronous MCCQ */
40208 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40209- u32 if_id, u32 *pmac_id)
40210+ u32 if_id, u32 *pmac_id, u32 domain)
40211 {
40212 struct be_mcc_wrb *wrb;
40213 struct be_cmd_req_pmac_add *req;
40214@@ -448,13 +680,19 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40215 spin_lock_bh(&adapter->mcc_lock);
40216
40217 wrb = wrb_from_mccq(adapter);
40218+ if (!wrb) {
40219+ status = -EBUSY;
40220+ goto err;
40221+ }
40222 req = embedded_payload(wrb);
40223
40224- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40225+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40226+ OPCODE_COMMON_NTWK_PMAC_ADD);
40227
40228 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40229 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
40230
40231+ req->hdr.domain = domain;
40232 req->if_id = cpu_to_le32(if_id);
40233 memcpy(req->mac_address, mac_addr, ETH_ALEN);
40234
40235@@ -464,12 +702,13 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
40236 *pmac_id = le32_to_cpu(resp->pmac_id);
40237 }
40238
40239+err:
40240 spin_unlock_bh(&adapter->mcc_lock);
40241 return status;
40242 }
40243
40244 /* Uses synchronous MCCQ */
40245-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
40246+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
40247 {
40248 struct be_mcc_wrb *wrb;
40249 struct be_cmd_req_pmac_del *req;
40250@@ -478,20 +717,26 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
40251 spin_lock_bh(&adapter->mcc_lock);
40252
40253 wrb = wrb_from_mccq(adapter);
40254+ if (!wrb) {
40255+ status = -EBUSY;
40256+ goto err;
40257+ }
40258 req = embedded_payload(wrb);
40259
40260- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40261+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40262+ OPCODE_COMMON_NTWK_PMAC_DEL);
40263
40264 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40265 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
40266
40267+ req->hdr.domain = dom;
40268 req->if_id = cpu_to_le32(if_id);
40269 req->pmac_id = cpu_to_le32(pmac_id);
40270
40271 status = be_mcc_notify_wait(adapter);
40272
40273+err:
40274 spin_unlock_bh(&adapter->mcc_lock);
40275-
40276 return status;
40277 }
40278
40279@@ -506,29 +751,51 @@ int be_cmd_cq_create(struct be_adapter *adapter,
40280 void *ctxt;
40281 int status;
40282
40283- spin_lock(&adapter->mbox_lock);
40284+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40285+ return -1;
40286
40287 wrb = wrb_from_mbox(adapter);
40288 req = embedded_payload(wrb);
40289 ctxt = &req->context;
40290
40291- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40292+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40293+ OPCODE_COMMON_CQ_CREATE);
40294
40295 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40296 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
40297
40298 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40299
40300- AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
40301- AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
40302- AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
40303- __ilog2_u32(cq->len/256));
40304- AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
40305- AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
40306- AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
40307- AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
40308- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
40309- AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
40310+ if (lancer_chip(adapter)) {
40311+ req->hdr.version = 2;
40312+ req->page_size = 1; /* 1 for 4K */
40313+ AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
40314+ coalesce_wm);
40315+ AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
40316+ no_delay);
40317+ AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
40318+ __ilog2_u32(cq->len/256));
40319+ AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
40320+ AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
40321+ ctxt, 1);
40322+ AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
40323+ ctxt, eq->id);
40324+ AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
40325+ } else {
40326+ AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
40327+ coalesce_wm);
40328+ AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
40329+ ctxt, no_delay);
40330+ AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
40331+ __ilog2_u32(cq->len/256));
40332+ AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
40333+ AMAP_SET_BITS(struct amap_cq_context_be, solevent,
40334+ ctxt, sol_evts);
40335+ AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
40336+ AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
40337+ AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
40338+ }
40339+
40340 be_dws_cpu_to_le(ctxt, sizeof(req->context));
40341
40342 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
40343@@ -540,8 +807,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
40344 cq->created = true;
40345 }
40346
40347- spin_unlock(&adapter->mbox_lock);
40348-
40349+ mutex_unlock(&adapter->mbox_lock);
40350 return status;
40351 }
40352
40353@@ -553,7 +819,68 @@ static u32 be_encoded_q_len(int q_len)
40354 return len_encoded;
40355 }
40356
40357-int be_cmd_mccq_create(struct be_adapter *adapter,
40358+int be_cmd_mccq_ext_create(struct be_adapter *adapter,
40359+ struct be_queue_info *mccq,
40360+ struct be_queue_info *cq)
40361+{
40362+ struct be_mcc_wrb *wrb;
40363+ struct be_cmd_req_mcc_ext_create *req;
40364+ struct be_dma_mem *q_mem = &mccq->dma_mem;
40365+ void *ctxt;
40366+ int status;
40367+
40368+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40369+ return -1;
40370+
40371+ wrb = wrb_from_mbox(adapter);
40372+ req = embedded_payload(wrb);
40373+ ctxt = &req->context;
40374+
40375+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40376+ OPCODE_COMMON_MCC_CREATE_EXT);
40377+
40378+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40379+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
40380+
40381+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40382+ if (lancer_chip(adapter)) {
40383+ req->hdr.version = 1;
40384+ req->cq_id = cpu_to_le16(cq->id);
40385+
40386+ AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
40387+ be_encoded_q_len(mccq->len));
40388+ AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
40389+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
40390+ ctxt, cq->id);
40391+ AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
40392+ ctxt, 1);
40393+
40394+ } else {
40395+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
40396+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
40397+ be_encoded_q_len(mccq->len));
40398+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
40399+ }
40400+
40401+ /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
40402+ req->async_event_bitmap[0] |= cpu_to_le32(0x00000022);
40403+
40404+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
40405+
40406+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
40407+
40408+ status = be_mbox_notify_wait(adapter);
40409+ if (!status) {
40410+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
40411+ mccq->id = le16_to_cpu(resp->id);
40412+ mccq->created = true;
40413+ }
40414+
40415+ mutex_unlock(&adapter->mbox_lock);
40416+ return status;
40417+}
40418+
40419+int be_cmd_mccq_org_create(struct be_adapter *adapter,
40420 struct be_queue_info *mccq,
40421 struct be_queue_info *cq)
40422 {
40423@@ -563,24 +890,25 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
40424 void *ctxt;
40425 int status;
40426
40427- spin_lock(&adapter->mbox_lock);
40428+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40429+ return -1;
40430
40431 wrb = wrb_from_mbox(adapter);
40432 req = embedded_payload(wrb);
40433 ctxt = &req->context;
40434
40435- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40436+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40437+ OPCODE_COMMON_MCC_CREATE);
40438
40439 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40440 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
40441
40442- req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
40443+ req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
40444
40445- AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
40446- AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
40447- AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
40448- be_encoded_q_len(mccq->len));
40449- AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
40450+ AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
40451+ AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
40452+ be_encoded_q_len(mccq->len));
40453+ AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
40454
40455 be_dws_cpu_to_le(ctxt, sizeof(req->context));
40456
40457@@ -592,75 +920,93 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
40458 mccq->id = le16_to_cpu(resp->id);
40459 mccq->created = true;
40460 }
40461- spin_unlock(&adapter->mbox_lock);
40462
40463+ mutex_unlock(&adapter->mbox_lock);
40464 return status;
40465 }
40466
40467-int be_cmd_txq_create(struct be_adapter *adapter,
40468- struct be_queue_info *txq,
40469+int be_cmd_mccq_create(struct be_adapter *adapter,
40470+ struct be_queue_info *mccq,
40471 struct be_queue_info *cq)
40472 {
40473+ int status;
40474+
40475+ status = be_cmd_mccq_ext_create(adapter, mccq, cq);
40476+ if (status && !lancer_chip(adapter)) {
40477+ dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
40478+ "or newer to avoid conflicting priorities between NIC "
40479+ "and FCoE traffic");
40480+ status = be_cmd_mccq_org_create(adapter, mccq, cq);
40481+ }
40482+ return status;
40483+}
40484+
40485+int be_cmd_txq_create(struct be_adapter *adapter, struct be_queue_info *txq,
40486+ struct be_queue_info *cq, u8 *tc_id)
40487+{
40488 struct be_mcc_wrb *wrb;
40489 struct be_cmd_req_eth_tx_create *req;
40490 struct be_dma_mem *q_mem = &txq->dma_mem;
40491- void *ctxt;
40492 int status;
40493
40494- spin_lock(&adapter->mbox_lock);
40495+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40496+ return -1;
40497
40498 wrb = wrb_from_mbox(adapter);
40499 req = embedded_payload(wrb);
40500- ctxt = &req->context;
40501-
40502- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40503
40504+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_TX_CREATE);
40505 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
40506 sizeof(*req));
40507
40508- req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
40509+ if (adapter->flags & BE_FLAGS_DCBX || lancer_chip(adapter)) {
40510+ req->hdr.version = 1;
40511+ req->if_id = cpu_to_le16(adapter->if_handle);
40512+ }
40513+ if (adapter->flags & BE_FLAGS_DCBX)
40514+ req->type = cpu_to_le16(ETX_QUEUE_TYPE_PRIORITY);
40515+ else
40516+ req->type = cpu_to_le16(ETX_QUEUE_TYPE_STANDARD);
40517 req->ulp_num = BE_ULP1_NUM;
40518- req->type = BE_ETH_TX_RING_TYPE_STANDARD;
40519-
40520- AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
40521- be_encoded_q_len(txq->len));
40522- AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
40523- be_pci_func(adapter));
40524- AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
40525- AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
40526-
40527- be_dws_cpu_to_le(ctxt, sizeof(req->context));
40528-
40529+ req->cq_id = cpu_to_le16(cq->id);
40530+ req->queue_size = be_encoded_q_len(txq->len);
40531+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
40532 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
40533
40534 status = be_mbox_notify_wait(adapter);
40535 if (!status) {
40536 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
40537 txq->id = le16_to_cpu(resp->cid);
40538+ if (adapter->flags & BE_FLAGS_DCBX)
40539+ *tc_id = resp->tc_id;
40540 txq->created = true;
40541 }
40542
40543- spin_unlock(&adapter->mbox_lock);
40544-
40545+ mutex_unlock(&adapter->mbox_lock);
40546 return status;
40547 }
40548
40549-/* Uses mbox */
40550+/* Uses MCC */
40551 int be_cmd_rxq_create(struct be_adapter *adapter,
40552 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
40553- u16 max_frame_size, u32 if_id, u32 rss)
40554+ u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
40555 {
40556 struct be_mcc_wrb *wrb;
40557 struct be_cmd_req_eth_rx_create *req;
40558 struct be_dma_mem *q_mem = &rxq->dma_mem;
40559 int status;
40560
40561- spin_lock(&adapter->mbox_lock);
40562+ spin_lock_bh(&adapter->mcc_lock);
40563
40564- wrb = wrb_from_mbox(adapter);
40565+ wrb = wrb_from_mccq(adapter);
40566+ if (!wrb) {
40567+ status = -EBUSY;
40568+ goto err;
40569+ }
40570 req = embedded_payload(wrb);
40571
40572- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40573+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40574+ OPCODE_ETH_RX_CREATE);
40575
40576 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
40577 sizeof(*req));
40578@@ -673,15 +1019,16 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
40579 req->max_frame_size = cpu_to_le16(max_frame_size);
40580 req->rss_queue = cpu_to_le32(rss);
40581
40582- status = be_mbox_notify_wait(adapter);
40583+ status = be_mcc_notify_wait(adapter);
40584 if (!status) {
40585 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
40586 rxq->id = le16_to_cpu(resp->id);
40587 rxq->created = true;
40588+ *rss_id = resp->rss_id;
40589 }
40590
40591- spin_unlock(&adapter->mbox_lock);
40592-
40593+err:
40594+ spin_unlock_bh(&adapter->mcc_lock);
40595 return status;
40596 }
40597
40598@@ -696,13 +1043,12 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
40599 u8 subsys = 0, opcode = 0;
40600 int status;
40601
40602- spin_lock(&adapter->mbox_lock);
40603+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40604+ return -1;
40605
40606 wrb = wrb_from_mbox(adapter);
40607 req = embedded_payload(wrb);
40608
40609- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40610-
40611 switch (queue_type) {
40612 case QTYPE_EQ:
40613 subsys = CMD_SUBSYSTEM_COMMON;
40614@@ -727,13 +1073,47 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
40615 default:
40616 BUG();
40617 }
40618+
40619+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
40620+
40621 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
40622 req->id = cpu_to_le16(q->id);
40623
40624 status = be_mbox_notify_wait(adapter);
40625+ if (!status)
40626+ q->created = false;
40627
40628- spin_unlock(&adapter->mbox_lock);
40629+ mutex_unlock(&adapter->mbox_lock);
40630+ return status;
40631+}
40632
40633+/* Uses MCC */
40634+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
40635+{
40636+ struct be_mcc_wrb *wrb;
40637+ struct be_cmd_req_q_destroy *req;
40638+ int status;
40639+
40640+ spin_lock_bh(&adapter->mcc_lock);
40641+
40642+ wrb = wrb_from_mccq(adapter);
40643+ if (!wrb) {
40644+ status = -EBUSY;
40645+ goto err;
40646+ }
40647+ req = embedded_payload(wrb);
40648+
40649+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY);
40650+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY,
40651+ sizeof(*req));
40652+ req->id = cpu_to_le16(q->id);
40653+
40654+ status = be_mcc_notify_wait(adapter);
40655+ if (!status)
40656+ q->created = false;
40657+
40658+err:
40659+ spin_unlock_bh(&adapter->mcc_lock);
40660 return status;
40661 }
40662
40663@@ -741,22 +1121,26 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
40664 * Uses mbox
40665 */
40666 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
40667- u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
40668+ u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
40669+ u32 domain)
40670 {
40671 struct be_mcc_wrb *wrb;
40672 struct be_cmd_req_if_create *req;
40673 int status;
40674
40675- spin_lock(&adapter->mbox_lock);
40676+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40677+ return -1;
40678
40679 wrb = wrb_from_mbox(adapter);
40680 req = embedded_payload(wrb);
40681
40682- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40683+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40684+ OPCODE_COMMON_NTWK_INTERFACE_CREATE);
40685
40686 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40687 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
40688
40689+ req->hdr.domain = domain;
40690 req->capability_flags = cpu_to_le32(cap_flags);
40691 req->enable_flags = cpu_to_le32(en_flags);
40692 req->pmac_invalid = pmac_invalid;
40693@@ -771,33 +1155,35 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
40694 *pmac_id = le32_to_cpu(resp->pmac_id);
40695 }
40696
40697- spin_unlock(&adapter->mbox_lock);
40698+ mutex_unlock(&adapter->mbox_lock);
40699 return status;
40700 }
40701
40702 /* Uses mbox */
40703-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
40704+int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
40705 {
40706 struct be_mcc_wrb *wrb;
40707 struct be_cmd_req_if_destroy *req;
40708 int status;
40709
40710- spin_lock(&adapter->mbox_lock);
40711+ if (mutex_lock_interruptible(&adapter->mbox_lock))
40712+ return -1;
40713
40714 wrb = wrb_from_mbox(adapter);
40715 req = embedded_payload(wrb);
40716
40717- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40718+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40719+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
40720
40721 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40722 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
40723
40724+ req->hdr.domain = domain;
40725 req->interface_id = cpu_to_le32(interface_id);
40726
40727 status = be_mbox_notify_wait(adapter);
40728
40729- spin_unlock(&adapter->mbox_lock);
40730-
40731+ mutex_unlock(&adapter->mbox_lock);
40732 return status;
40733 }
40734
40735@@ -808,33 +1194,48 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
40736 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
40737 {
40738 struct be_mcc_wrb *wrb;
40739- struct be_cmd_req_get_stats *req;
40740+ struct be_cmd_req_hdr *hdr;
40741 struct be_sge *sge;
40742+ int status = 0;
40743+
40744+ if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
40745+ be_cmd_get_die_temperature(adapter);
40746
40747 spin_lock_bh(&adapter->mcc_lock);
40748
40749 wrb = wrb_from_mccq(adapter);
40750- req = nonemb_cmd->va;
40751+ if (!wrb) {
40752+ status = -EBUSY;
40753+ goto err;
40754+ }
40755+ hdr = nonemb_cmd->va;
40756 sge = nonembedded_sgl(wrb);
40757
40758- be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
40759- wrb->tag0 = OPCODE_ETH_GET_STATISTICS;
40760+ be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
40761+ OPCODE_ETH_GET_STATISTICS);
40762
40763- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
40764- OPCODE_ETH_GET_STATISTICS, sizeof(*req));
40765+ be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
40766+ OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
40767+
40768+ if (adapter->generation == BE_GEN3)
40769+ hdr->version = 1;
40770+
40771+ wrb->tag1 = CMD_SUBSYSTEM_ETH;
40772 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
40773 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
40774 sge->len = cpu_to_le32(nonemb_cmd->size);
40775
40776 be_mcc_notify(adapter);
40777+ adapter->stats_cmd_sent = true;
40778
40779+err:
40780 spin_unlock_bh(&adapter->mcc_lock);
40781- return 0;
40782+ return status;
40783 }
40784
40785 /* Uses synchronous mcc */
40786 int be_cmd_link_status_query(struct be_adapter *adapter,
40787- bool *link_up)
40788+ int *link_status, u8 *mac_speed, u16 *link_speed, u32 dom)
40789 {
40790 struct be_mcc_wrb *wrb;
40791 struct be_cmd_req_link_status *req;
40792@@ -843,50 +1244,216 @@ int be_cmd_link_status_query(struct be_adapter *adapter,
40793 spin_lock_bh(&adapter->mcc_lock);
40794
40795 wrb = wrb_from_mccq(adapter);
40796+ if (!wrb) {
40797+ status = -EBUSY;
40798+ goto err;
40799+ }
40800 req = embedded_payload(wrb);
40801
40802- *link_up = false;
40803+ *link_status = LINK_DOWN;
40804
40805- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40806+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40807+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
40808
40809 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40810 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
40811
40812+ req->hdr.domain = dom;
40813+
40814 status = be_mcc_notify_wait(adapter);
40815 if (!status) {
40816 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
40817- if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
40818- *link_up = true;
40819+ if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
40820+ *link_status = LINK_UP;
40821+ *link_speed = le16_to_cpu(resp->link_speed);
40822+ *mac_speed = resp->mac_speed;
40823+ }
40824 }
40825
40826+err:
40827 spin_unlock_bh(&adapter->mcc_lock);
40828 return status;
40829 }
40830
40831-/* Uses Mbox */
40832-int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
40833+/* Uses synchronous mcc */
40834+int be_cmd_get_die_temperature(struct be_adapter *adapter)
40835+{
40836+ struct be_mcc_wrb *wrb;
40837+ struct be_cmd_req_get_cntl_addnl_attribs *req;
40838+ u16 mccq_index;
40839+ int status;
40840+
40841+ spin_lock_bh(&adapter->mcc_lock);
40842+
40843+ mccq_index = adapter->mcc_obj.q.head;
40844+
40845+ wrb = wrb_from_mccq(adapter);
40846+ if (!wrb) {
40847+ status = -EBUSY;
40848+ goto err;
40849+ }
40850+ req = embedded_payload(wrb);
40851+
40852+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40853+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
40854+
40855+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40856+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
40857+
40858+ wrb->tag1 = mccq_index;
40859+
40860+ be_mcc_notify(adapter);
40861+
40862+err:
40863+ spin_unlock_bh(&adapter->mcc_lock);
40864+ return status;
40865+}
40866+
40867+
40868+/* Uses synchronous mcc */
40869+int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
40870+{
40871+ struct be_mcc_wrb *wrb;
40872+ struct be_cmd_req_get_fat *req;
40873+ int status;
40874+
40875+ spin_lock_bh(&adapter->mcc_lock);
40876+
40877+ wrb = wrb_from_mccq(adapter);
40878+ if (!wrb) {
40879+ status = -EBUSY;
40880+ goto err;
40881+ }
40882+ req = embedded_payload(wrb);
40883+
40884+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
40885+ OPCODE_COMMON_MANAGE_FAT);
40886+
40887+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40888+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
40889+ req->fat_operation = cpu_to_le32(QUERY_FAT);
40890+ status = be_mcc_notify_wait(adapter);
40891+ if (!status) {
40892+ struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
40893+ if (log_size && resp->log_size)
40894+ *log_size = le32_to_cpu(resp->log_size) -
40895+ sizeof(u32);
40896+ }
40897+err:
40898+ spin_unlock_bh(&adapter->mcc_lock);
40899+ return status;
40900+}
40901+
40902+void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
40903+{
40904+ struct be_dma_mem get_fat_cmd;
40905+ struct be_mcc_wrb *wrb;
40906+ struct be_cmd_req_get_fat *req;
40907+ struct be_sge *sge;
40908+ u32 offset = 0, total_size, buf_size,
40909+ log_offset = sizeof(u32), payload_len;
40910+ int status;
40911+
40912+ if (buf_len == 0)
40913+ return;
40914+
40915+ total_size = buf_len;
40916+
40917+ get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
40918+ get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
40919+ get_fat_cmd.size,
40920+ &get_fat_cmd.dma);
40921+ if (!get_fat_cmd.va) {
40922+ status = -ENOMEM;
40923+ dev_err(&adapter->pdev->dev,
40924+ "Memory allocation failure while retrieving FAT data\n");
40925+ return;
40926+ }
40927+
40928+ spin_lock_bh(&adapter->mcc_lock);
40929+
40930+ while (total_size) {
40931+ buf_size = min(total_size, (u32)60*1024);
40932+ total_size -= buf_size;
40933+
40934+ wrb = wrb_from_mccq(adapter);
40935+ if (!wrb) {
40936+ status = -EBUSY;
40937+ goto err;
40938+ }
40939+ req = get_fat_cmd.va;
40940+ sge = nonembedded_sgl(wrb);
40941+
40942+ payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
40943+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
40944+ OPCODE_COMMON_MANAGE_FAT);
40945+
40946+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
40947+ OPCODE_COMMON_MANAGE_FAT, payload_len);
40948+
40949+ sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
40950+ sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
40951+ sge->len = cpu_to_le32(get_fat_cmd.size);
40952+
40953+ req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
40954+ req->read_log_offset = cpu_to_le32(log_offset);
40955+ req->read_log_length = cpu_to_le32(buf_size);
40956+ req->data_buffer_size = cpu_to_le32(buf_size);
40957+
40958+ status = be_mcc_notify_wait(adapter);
40959+ if (!status) {
40960+ struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
40961+ memcpy(buf + offset,
40962+ resp->data_buffer,
40963+ le32_to_cpu(resp->read_log_length));
40964+ } else {
40965+ dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
40966+ goto err;
40967+ }
40968+ offset += buf_size;
40969+ log_offset += buf_size;
40970+ }
40971+err:
40972+ pci_free_consistent(adapter->pdev, get_fat_cmd.size,
40973+ get_fat_cmd.va,
40974+ get_fat_cmd.dma);
40975+ spin_unlock_bh(&adapter->mcc_lock);
40976+}
40977+
40978+/* Uses synchronous mcc */
40979+int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
40980+ char *fw_on_flash)
40981 {
40982 struct be_mcc_wrb *wrb;
40983 struct be_cmd_req_get_fw_version *req;
40984 int status;
40985
40986- spin_lock(&adapter->mbox_lock);
40987+ spin_lock_bh(&adapter->mcc_lock);
40988+
40989+ wrb = wrb_from_mccq(adapter);
40990+ if (!wrb) {
40991+ status = -EBUSY;
40992+ goto err;
40993+ }
40994
40995- wrb = wrb_from_mbox(adapter);
40996 req = embedded_payload(wrb);
40997
40998- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
40999+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41000+ OPCODE_COMMON_GET_FW_VERSION);
41001
41002 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41003 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
41004
41005- status = be_mbox_notify_wait(adapter);
41006+ status = be_mcc_notify_wait(adapter);
41007 if (!status) {
41008 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
41009- strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
41010+ strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN-1);
41011+ if (fw_on_flash)
41012+ strncpy(fw_on_flash, resp->fw_on_flash_version_string,
41013+ FW_VER_LEN-1);
41014 }
41015-
41016- spin_unlock(&adapter->mbox_lock);
41017+err:
41018+ spin_unlock_bh(&adapter->mcc_lock);
41019 return status;
41020 }
41021
41022@@ -897,13 +1464,19 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
41023 {
41024 struct be_mcc_wrb *wrb;
41025 struct be_cmd_req_modify_eq_delay *req;
41026+ int status = 0;
41027
41028 spin_lock_bh(&adapter->mcc_lock);
41029
41030 wrb = wrb_from_mccq(adapter);
41031+ if (!wrb) {
41032+ status = -EBUSY;
41033+ goto err;
41034+ }
41035 req = embedded_payload(wrb);
41036
41037- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41038+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41039+ OPCODE_COMMON_MODIFY_EQ_DELAY);
41040
41041 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41042 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
41043@@ -915,8 +1488,9 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
41044
41045 be_mcc_notify(adapter);
41046
41047+err:
41048 spin_unlock_bh(&adapter->mcc_lock);
41049- return 0;
41050+ return status;
41051 }
41052
41053 /* Uses sycnhronous mcc */
41054@@ -930,9 +1504,14 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
41055 spin_lock_bh(&adapter->mcc_lock);
41056
41057 wrb = wrb_from_mccq(adapter);
41058+ if (!wrb) {
41059+ status = -EBUSY;
41060+ goto err;
41061+ }
41062 req = embedded_payload(wrb);
41063
41064- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41065+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41066+ OPCODE_COMMON_NTWK_VLAN_CONFIG);
41067
41068 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41069 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
41070@@ -948,79 +1527,63 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
41071
41072 status = be_mcc_notify_wait(adapter);
41073
41074+err:
41075 spin_unlock_bh(&adapter->mcc_lock);
41076 return status;
41077 }
41078
41079-/* Uses MCC for this command as it may be called in BH context
41080- * Uses synchronous mcc
41081- */
41082-int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
41083+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
41084 {
41085 struct be_mcc_wrb *wrb;
41086- struct be_cmd_req_promiscuous_config *req;
41087+ struct be_dma_mem *mem = &adapter->rx_filter;
41088+ struct be_cmd_req_rx_filter *req = mem->va;
41089+ struct be_sge *sge;
41090 int status;
41091
41092 spin_lock_bh(&adapter->mcc_lock);
41093
41094 wrb = wrb_from_mccq(adapter);
41095- req = embedded_payload(wrb);
41096-
41097- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41098-
41099- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41100- OPCODE_ETH_PROMISCUOUS, sizeof(*req));
41101-
41102- if (port_num)
41103- req->port1_promiscuous = en;
41104- else
41105- req->port0_promiscuous = en;
41106-
41107- status = be_mcc_notify_wait(adapter);
41108-
41109- spin_unlock_bh(&adapter->mcc_lock);
41110- return status;
41111-}
41112-
41113-/*
41114- * Uses MCC for this command as it may be called in BH context
41115- * (mc == NULL) => multicast promiscous
41116- */
41117-int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
41118- struct dev_mc_list *mc_list, u32 mc_count)
41119-{
41120-#define BE_MAX_MC 32 /* set mcast promisc if > 32 */
41121- struct be_mcc_wrb *wrb;
41122- struct be_cmd_req_mcast_mac_config *req;
41123-
41124- spin_lock_bh(&adapter->mcc_lock);
41125-
41126- wrb = wrb_from_mccq(adapter);
41127- req = embedded_payload(wrb);
41128-
41129- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41130-
41131+ if (!wrb) {
41132+ status = -EBUSY;
41133+ goto err;
41134+ }
41135+ sge = nonembedded_sgl(wrb);
41136+ sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
41137+ sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
41138+ sge->len = cpu_to_le32(mem->size);
41139+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
41140+ OPCODE_COMMON_NTWK_RX_FILTER);
41141+
41142+ memset(req, 0, sizeof(*req));
41143 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41144- OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
41145+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
41146
41147- req->interface_id = if_id;
41148- if (mc_list && mc_count <= BE_MAX_MC) {
41149- int i;
41150- struct dev_mc_list *mc;
41151-
41152- req->num_mac = cpu_to_le16(mc_count);
41153-
41154- for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
41155- memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
41156+ req->if_id = cpu_to_le32(adapter->if_handle);
41157+ if (flags & IFF_PROMISC) {
41158+ req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
41159+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
41160+ if (value == ON)
41161+ req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
41162+ BE_IF_FLAGS_VLAN_PROMISCUOUS);
41163+ } else if (flags & IFF_ALLMULTI) {
41164+ req->if_flags_mask = req->if_flags =
41165+ cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
41166 } else {
41167- req->promiscuous = 1;
41168- }
41169+ struct netdev_hw_addr *ha;
41170+ int i = 0;
41171
41172- be_mcc_notify_wait(adapter);
41173+ req->if_flags_mask = req->if_flags =
41174+ cpu_to_le32(BE_IF_FLAGS_MULTICAST);
41175+ req->mcast_num = cpu_to_le16(netdev_mc_count(adapter->netdev));
41176+ netdev_for_each_mc_addr(ha, adapter->netdev)
41177+ memcpy(req->mcast_mac[i++].byte, ha->DMI_ADDR,
41178+ ETH_ALEN);
41179+ }
41180+ status = be_mcc_notify_wait(adapter);
41181
41182+err:
41183 spin_unlock_bh(&adapter->mcc_lock);
41184-
41185- return 0;
41186+ return status;
41187 }
41188
41189 /* Uses synchrounous mcc */
41190@@ -1033,9 +1596,14 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
41191 spin_lock_bh(&adapter->mcc_lock);
41192
41193 wrb = wrb_from_mccq(adapter);
41194+ if (!wrb) {
41195+ status = -EBUSY;
41196+ goto err;
41197+ }
41198 req = embedded_payload(wrb);
41199
41200- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41201+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41202+ OPCODE_COMMON_SET_FLOW_CONTROL);
41203
41204 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41205 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
41206@@ -1045,6 +1613,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
41207
41208 status = be_mcc_notify_wait(adapter);
41209
41210+err:
41211 spin_unlock_bh(&adapter->mcc_lock);
41212 return status;
41213 }
41214@@ -1059,9 +1628,14 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
41215 spin_lock_bh(&adapter->mcc_lock);
41216
41217 wrb = wrb_from_mccq(adapter);
41218+ if (!wrb) {
41219+ status = -EBUSY;
41220+ goto err;
41221+ }
41222 req = embedded_payload(wrb);
41223
41224- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41225+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41226+ OPCODE_COMMON_GET_FLOW_CONTROL);
41227
41228 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41229 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
41230@@ -1074,23 +1648,27 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
41231 *rx_fc = le16_to_cpu(resp->rx_flow_control);
41232 }
41233
41234+err:
41235 spin_unlock_bh(&adapter->mcc_lock);
41236 return status;
41237 }
41238
41239 /* Uses mbox */
41240-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
41241+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
41242+ u32 *mode, u32 *function_caps)
41243 {
41244 struct be_mcc_wrb *wrb;
41245 struct be_cmd_req_query_fw_cfg *req;
41246 int status;
41247
41248- spin_lock(&adapter->mbox_lock);
41249+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41250+ return -1;
41251
41252 wrb = wrb_from_mbox(adapter);
41253 req = embedded_payload(wrb);
41254
41255- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41256+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41257+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
41258
41259 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41260 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
41261@@ -1099,10 +1677,11 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
41262 if (!status) {
41263 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
41264 *port_num = le32_to_cpu(resp->phys_port);
41265- *cap = le32_to_cpu(resp->function_cap);
41266+ *mode = le32_to_cpu(resp->function_mode);
41267+ *function_caps = le32_to_cpu(resp->function_caps);
41268 }
41269
41270- spin_unlock(&adapter->mbox_lock);
41271+ mutex_unlock(&adapter->mbox_lock);
41272 return status;
41273 }
41274
41275@@ -1113,19 +1692,161 @@ int be_cmd_reset_function(struct be_adapter *adapter)
41276 struct be_cmd_req_hdr *req;
41277 int status;
41278
41279- spin_lock(&adapter->mbox_lock);
41280+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41281+ return -1;
41282
41283 wrb = wrb_from_mbox(adapter);
41284 req = embedded_payload(wrb);
41285
41286- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
41287+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41288+ OPCODE_COMMON_FUNCTION_RESET);
41289
41290 be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
41291 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
41292
41293 status = be_mbox_notify_wait(adapter);
41294
41295- spin_unlock(&adapter->mbox_lock);
41296+ mutex_unlock(&adapter->mbox_lock);
41297+ return status;
41298+}
41299+
41300+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
41301+{
41302+ struct be_mcc_wrb *wrb;
41303+ struct be_cmd_req_rss_config *req;
41304+ u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
41305+ 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
41306+ int status;
41307+
41308+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41309+ return -1;
41310+
41311+ wrb = wrb_from_mbox(adapter);
41312+ req = embedded_payload(wrb);
41313+
41314+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41315+ OPCODE_ETH_RSS_CONFIG);
41316+
41317+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41318+ OPCODE_ETH_RSS_CONFIG, sizeof(*req));
41319+
41320+ req->if_id = cpu_to_le32(adapter->if_handle);
41321+ req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
41322+ req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
41323+ memcpy(req->cpu_table, rsstable, table_size);
41324+ memcpy(req->hash, myhash, sizeof(myhash));
41325+ be_dws_cpu_to_le(req->hash, sizeof(req->hash));
41326+
41327+ status = be_mbox_notify_wait(adapter);
41328+
41329+ mutex_unlock(&adapter->mbox_lock);
41330+ return status;
41331+}
41332+
41333+/* Uses sync mcc */
41334+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
41335+ u8 bcn, u8 sts, u8 state)
41336+{
41337+ struct be_mcc_wrb *wrb;
41338+ struct be_cmd_req_enable_disable_beacon *req;
41339+ int status;
41340+
41341+ spin_lock_bh(&adapter->mcc_lock);
41342+
41343+ wrb = wrb_from_mccq(adapter);
41344+ if (!wrb) {
41345+ status = -EBUSY;
41346+ goto err;
41347+ }
41348+ req = embedded_payload(wrb);
41349+
41350+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41351+ OPCODE_COMMON_ENABLE_DISABLE_BEACON);
41352+
41353+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41354+ OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
41355+
41356+ req->port_num = port_num;
41357+ req->beacon_state = state;
41358+ req->beacon_duration = bcn;
41359+ req->status_duration = sts;
41360+
41361+ status = be_mcc_notify_wait(adapter);
41362+
41363+err:
41364+ spin_unlock_bh(&adapter->mcc_lock);
41365+ return status;
41366+}
41367+
41368+/* Uses sync mcc */
41369+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
41370+{
41371+ struct be_mcc_wrb *wrb;
41372+ struct be_cmd_req_get_beacon_state *req;
41373+ int status;
41374+
41375+ spin_lock_bh(&adapter->mcc_lock);
41376+
41377+ wrb = wrb_from_mccq(adapter);
41378+ if (!wrb) {
41379+ status = -EBUSY;
41380+ goto err;
41381+ }
41382+ req = embedded_payload(wrb);
41383+
41384+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41385+ OPCODE_COMMON_GET_BEACON_STATE);
41386+
41387+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41388+ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
41389+
41390+ req->port_num = port_num;
41391+
41392+ status = be_mcc_notify_wait(adapter);
41393+ if (!status) {
41394+ struct be_cmd_resp_get_beacon_state *resp =
41395+ embedded_payload(wrb);
41396+ *state = resp->beacon_state;
41397+ }
41398+
41399+err:
41400+ spin_unlock_bh(&adapter->mcc_lock);
41401+ return status;
41402+}
41403+
41404+/* Uses sync mcc */
41405+int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
41406+ u8 *connector)
41407+{
41408+ struct be_mcc_wrb *wrb;
41409+ struct be_cmd_req_port_type *req;
41410+ int status;
41411+
41412+ spin_lock_bh(&adapter->mcc_lock);
41413+
41414+ wrb = wrb_from_mccq(adapter);
41415+ if (!wrb) {
41416+ status = -EBUSY;
41417+ goto err;
41418+ }
41419+ req = embedded_payload(wrb);
41420+
41421+ be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
41422+ OPCODE_COMMON_READ_TRANSRECV_DATA);
41423+
41424+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41425+ OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
41426+
41427+ req->port = cpu_to_le32(port);
41428+ req->page_num = cpu_to_le32(TR_PAGE_A0);
41429+ status = be_mcc_notify_wait(adapter);
41430+ if (!status) {
41431+ struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
41432+ *connector = resp->data.connector;
41433+ }
41434+
41435+err:
41436+ spin_unlock_bh(&adapter->mcc_lock);
41437 return status;
41438 }
41439
41440@@ -1133,16 +1854,24 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
41441 u32 flash_type, u32 flash_opcode, u32 buf_size)
41442 {
41443 struct be_mcc_wrb *wrb;
41444- struct be_cmd_write_flashrom *req = cmd->va;
41445+ struct be_cmd_write_flashrom *req;
41446 struct be_sge *sge;
41447 int status;
41448
41449 spin_lock_bh(&adapter->mcc_lock);
41450+ adapter->flash_status = 0;
41451
41452 wrb = wrb_from_mccq(adapter);
41453+ if (!wrb) {
41454+ status = -EBUSY;
41455+ goto err_unlock;
41456+ }
41457+ req = cmd->va;
41458 sge = nonembedded_sgl(wrb);
41459
41460- be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
41461+ be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
41462+ OPCODE_COMMON_WRITE_FLASHROM);
41463+ wrb->tag1 = CMD_SUBSYSTEM_COMMON;
41464
41465 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41466 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
41467@@ -1154,8 +1883,852 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
41468 req->params.op_code = cpu_to_le32(flash_opcode);
41469 req->params.data_buf_size = cpu_to_le32(buf_size);
41470
41471+ be_mcc_notify(adapter);
41472+ spin_unlock_bh(&adapter->mcc_lock);
41473+
41474+ if (!wait_for_completion_timeout(&adapter->flash_compl,
41475+ msecs_to_jiffies(40000)))
41476+ status = -1;
41477+ else
41478+ status = adapter->flash_status;
41479+
41480+ return status;
41481+
41482+err_unlock:
41483+ spin_unlock_bh(&adapter->mcc_lock);
41484+ return status;
41485+}
41486+
41487+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
41488+ int offset)
41489+{
41490+ struct be_mcc_wrb *wrb;
41491+ struct be_cmd_write_flashrom *req;
41492+ int status;
41493+
41494+ spin_lock_bh(&adapter->mcc_lock);
41495+
41496+ wrb = wrb_from_mccq(adapter);
41497+ if (!wrb) {
41498+ status = -EBUSY;
41499+ goto err;
41500+ }
41501+ req = embedded_payload(wrb);
41502+
41503+ be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
41504+ OPCODE_COMMON_READ_FLASHROM);
41505+
41506+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41507+ OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
41508+
41509+ req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
41510+ req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
41511+ req->params.offset = cpu_to_le32(offset);
41512+ req->params.data_buf_size = cpu_to_le32(0x4);
41513+
41514+ status = be_mcc_notify_wait(adapter);
41515+ if (!status)
41516+ memcpy(flashed_crc, req->params.data_buf, 4);
41517+
41518+err:
41519+ spin_unlock_bh(&adapter->mcc_lock);
41520+ return status;
41521+}
41522+
41523+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
41524+ struct be_dma_mem *nonemb_cmd)
41525+{
41526+ struct be_mcc_wrb *wrb;
41527+ struct be_cmd_req_acpi_wol_magic_config *req;
41528+ struct be_sge *sge;
41529+ int status;
41530+
41531+ spin_lock_bh(&adapter->mcc_lock);
41532+
41533+ wrb = wrb_from_mccq(adapter);
41534+ if (!wrb) {
41535+ status = -EBUSY;
41536+ goto err;
41537+ }
41538+ req = nonemb_cmd->va;
41539+ sge = nonembedded_sgl(wrb);
41540+
41541+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
41542+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
41543+
41544+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
41545+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
41546+ memcpy(req->magic_mac, mac, ETH_ALEN);
41547+
41548+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
41549+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
41550+ sge->len = cpu_to_le32(nonemb_cmd->size);
41551+
41552+ status = be_mcc_notify_wait(adapter);
41553+
41554+err:
41555+ spin_unlock_bh(&adapter->mcc_lock);
41556+ return status;
41557+}
41558+
41559+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
41560+ u8 loopback_type, u8 enable)
41561+{
41562+ struct be_mcc_wrb *wrb;
41563+ struct be_cmd_req_set_lmode *req;
41564+ int status;
41565+
41566+ spin_lock_bh(&adapter->mcc_lock);
41567+
41568+ wrb = wrb_from_mccq(adapter);
41569+ if (!wrb) {
41570+ status = -EBUSY;
41571+ goto err;
41572+ }
41573+
41574+ req = embedded_payload(wrb);
41575+
41576+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41577+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
41578+
41579+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
41580+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
41581+ sizeof(*req));
41582+
41583+ req->src_port = port_num;
41584+ req->dest_port = port_num;
41585+ req->loopback_type = loopback_type;
41586+ req->loopback_state = enable;
41587+
41588+ status = be_mcc_notify_wait(adapter);
41589+err:
41590+ spin_unlock_bh(&adapter->mcc_lock);
41591+ return status;
41592+}
41593+
41594+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
41595+ u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
41596+{
41597+ struct be_mcc_wrb *wrb;
41598+ struct be_cmd_req_loopback_test *req;
41599+ int status;
41600+
41601+ spin_lock_bh(&adapter->mcc_lock);
41602+
41603+ wrb = wrb_from_mccq(adapter);
41604+ if (!wrb) {
41605+ status = -EBUSY;
41606+ goto err;
41607+ }
41608+
41609+ req = embedded_payload(wrb);
41610+
41611+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41612+ OPCODE_LOWLEVEL_LOOPBACK_TEST);
41613+
41614+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
41615+ OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
41616+ req->hdr.timeout = cpu_to_le32(4);
41617+
41618+ req->pattern = cpu_to_le64(pattern);
41619+ req->src_port = cpu_to_le32(port_num);
41620+ req->dest_port = cpu_to_le32(port_num);
41621+ req->pkt_size = cpu_to_le32(pkt_size);
41622+ req->num_pkts = cpu_to_le32(num_pkts);
41623+ req->loopback_type = cpu_to_le32(loopback_type);
41624+
41625+ status = be_mcc_notify_wait(adapter);
41626+ if (!status) {
41627+ struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
41628+ status = le32_to_cpu(resp->status);
41629+ }
41630+
41631+err:
41632+ spin_unlock_bh(&adapter->mcc_lock);
41633+ return status;
41634+}
41635+
41636+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
41637+ u32 byte_cnt, struct be_dma_mem *cmd)
41638+{
41639+ struct be_mcc_wrb *wrb;
41640+ struct be_cmd_req_ddrdma_test *req;
41641+ struct be_sge *sge;
41642+ int status;
41643+ int i, j = 0;
41644+
41645+ spin_lock_bh(&adapter->mcc_lock);
41646+
41647+ wrb = wrb_from_mccq(adapter);
41648+ if (!wrb) {
41649+ status = -EBUSY;
41650+ goto err;
41651+ }
41652+ req = cmd->va;
41653+ sge = nonembedded_sgl(wrb);
41654+ be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
41655+ OPCODE_LOWLEVEL_HOST_DDR_DMA);
41656+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
41657+ OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
41658+
41659+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
41660+ sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
41661+ sge->len = cpu_to_le32(cmd->size);
41662+
41663+ req->pattern = cpu_to_le64(pattern);
41664+ req->byte_count = cpu_to_le32(byte_cnt);
41665+ for (i = 0; i < byte_cnt; i++) {
41666+ req->snd_buff[i] = (u8)(pattern >> (j*8));
41667+ j++;
41668+ if (j > 7)
41669+ j = 0;
41670+ }
41671+
41672+ status = be_mcc_notify_wait(adapter);
41673+
41674+ if (!status) {
41675+ struct be_cmd_resp_ddrdma_test *resp;
41676+ resp = cmd->va;
41677+ if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
41678+ resp->snd_err) {
41679+ status = -1;
41680+ }
41681+ }
41682+
41683+err:
41684+ spin_unlock_bh(&adapter->mcc_lock);
41685+ return status;
41686+}
41687+
41688+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
41689+ struct be_dma_mem *nonemb_cmd)
41690+{
41691+ struct be_mcc_wrb *wrb;
41692+ struct be_cmd_req_seeprom_read *req;
41693+ struct be_sge *sge;
41694+ int status;
41695+
41696+ spin_lock_bh(&adapter->mcc_lock);
41697+
41698+ wrb = wrb_from_mccq(adapter);
41699+ req = nonemb_cmd->va;
41700+ sge = nonembedded_sgl(wrb);
41701+
41702+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
41703+ OPCODE_COMMON_SEEPROM_READ);
41704+
41705+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41706+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
41707+
41708+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
41709+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
41710+ sge->len = cpu_to_le32(nonemb_cmd->size);
41711+
41712+ status = be_mcc_notify_wait(adapter);
41713+
41714+ spin_unlock_bh(&adapter->mcc_lock);
41715+ return status;
41716+}
41717+
41718+int be_cmd_get_phy_info(struct be_adapter *adapter,
41719+ struct be_phy_info *phy_info)
41720+{
41721+ struct be_mcc_wrb *wrb;
41722+ struct be_cmd_req_get_phy_info *req;
41723+ struct be_sge *sge;
41724+ struct be_dma_mem cmd;
41725+ struct be_phy_info *resp_phy_info;
41726+ int status;
41727+
41728+ spin_lock_bh(&adapter->mcc_lock);
41729+ wrb = wrb_from_mccq(adapter);
41730+ if (!wrb) {
41731+ status = -EBUSY;
41732+ goto err;
41733+ }
41734+ cmd.size = sizeof(struct be_cmd_req_get_phy_info);
41735+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
41736+ &cmd.dma);
41737+ if (!cmd.va) {
41738+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
41739+ status = -ENOMEM;
41740+ goto err;
41741+ }
41742+
41743+ req = cmd.va;
41744+ sge = nonembedded_sgl(wrb);
41745+
41746+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
41747+ OPCODE_COMMON_GET_PHY_DETAILS);
41748+
41749+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41750+ OPCODE_COMMON_GET_PHY_DETAILS,
41751+ sizeof(*req));
41752+
41753+ sge->pa_hi = cpu_to_le32(upper_32_bits(cmd.dma));
41754+ sge->pa_lo = cpu_to_le32(cmd.dma & 0xFFFFFFFF);
41755+ sge->len = cpu_to_le32(cmd.size);
41756+
41757+ status = be_mcc_notify_wait(adapter);
41758+ if (!status) {
41759+ resp_phy_info = cmd.va + sizeof(struct be_cmd_req_hdr);
41760+ phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
41761+ phy_info->interface_type =
41762+ le16_to_cpu(resp_phy_info->interface_type);
41763+ phy_info->auto_speeds_supported =
41764+ le16_to_cpu(resp_phy_info->auto_speeds_supported);
41765+ phy_info->fixed_speeds_supported =
41766+ le16_to_cpu(resp_phy_info->fixed_speeds_supported);
41767+ phy_info->misc_params =
41768+ le32_to_cpu(resp_phy_info->misc_params);
41769+ }
41770+ pci_free_consistent(adapter->pdev, cmd.size,
41771+ cmd.va, cmd.dma);
41772+err:
41773+ spin_unlock_bh(&adapter->mcc_lock);
41774+ return status;
41775+}
41776+
41777+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
41778+{
41779+ struct be_mcc_wrb *wrb;
41780+ struct be_cmd_req_set_qos *req;
41781+ int status;
41782+
41783+ spin_lock_bh(&adapter->mcc_lock);
41784+
41785+ wrb = wrb_from_mccq(adapter);
41786+ if (!wrb) {
41787+ status = -EBUSY;
41788+ goto err;
41789+ }
41790+
41791+ req = embedded_payload(wrb);
41792+
41793+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41794+ OPCODE_COMMON_SET_QOS);
41795+
41796+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41797+ OPCODE_COMMON_SET_QOS, sizeof(*req));
41798+
41799+ req->hdr.domain = domain;
41800+ req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
41801+ req->max_bps_nic = cpu_to_le32(bps);
41802+
41803+ status = be_mcc_notify_wait(adapter);
41804+err:
41805+ spin_unlock_bh(&adapter->mcc_lock);
41806+ return status;
41807+}
41808+
41809+int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
41810+{
41811+ struct be_mcc_wrb *wrb;
41812+ struct be_cmd_req_cntl_attribs *req;
41813+ struct be_cmd_resp_cntl_attribs *resp;
41814+ struct be_sge *sge;
41815+ int status;
41816+ int payload_len = max(sizeof(*req), sizeof(*resp));
41817+ struct mgmt_controller_attrib *attribs;
41818+ struct be_dma_mem attribs_cmd;
41819+
41820+ memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
41821+ attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
41822+ attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
41823+ &attribs_cmd.dma);
41824+ if (!attribs_cmd.va) {
41825+ dev_err(&adapter->pdev->dev,
41826+ "Memory allocation failure\n");
41827+ return -ENOMEM;
41828+ }
41829+
41830+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41831+ return -1;
41832+
41833+ wrb = wrb_from_mbox(adapter);
41834+ if (!wrb) {
41835+ status = -EBUSY;
41836+ goto err;
41837+ }
41838+ req = attribs_cmd.va;
41839+ sge = nonembedded_sgl(wrb);
41840+
41841+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
41842+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
41843+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41844+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
41845+ sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
41846+ sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
41847+ sge->len = cpu_to_le32(attribs_cmd.size);
41848+
41849+ status = be_mbox_notify_wait(adapter);
41850+ if (!status) {
41851+ attribs = (struct mgmt_controller_attrib *)(attribs_cmd.va +
41852+ sizeof(struct be_cmd_resp_hdr));
41853+ adapter->hba_port_num = attribs->hba_attribs.phy_port;
41854+ strncpy(adapter->model_number,
41855+ attribs->hba_attribs.controller_model_number, 31);
41856+ }
41857+
41858+err:
41859+ mutex_unlock(&adapter->mbox_lock);
41860+ pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
41861+ attribs_cmd.dma);
41862+ return status;
41863+}
41864+
41865+/* Uses mbox */
41866+int be_cmd_req_native_mode(struct be_adapter *adapter)
41867+{
41868+ struct be_mcc_wrb *wrb;
41869+ struct be_cmd_req_set_func_cap *req;
41870+ int status;
41871+
41872+ if (mutex_lock_interruptible(&adapter->mbox_lock))
41873+ return -1;
41874+
41875+ wrb = wrb_from_mbox(adapter);
41876+ if (!wrb) {
41877+ status = -EBUSY;
41878+ goto err;
41879+ }
41880+
41881+ req = embedded_payload(wrb);
41882+
41883+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41884+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
41885+
41886+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41887+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
41888+
41889+ req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
41890+ CAPABILITY_BE3_NATIVE_ERX_API);
41891+ req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
41892+
41893+ status = be_mbox_notify_wait(adapter);
41894+ if (!status) {
41895+ struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
41896+ adapter->be3_native = le32_to_cpu(resp->cap_flags) &
41897+ CAPABILITY_BE3_NATIVE_ERX_API;
41898+ }
41899+err:
41900+ mutex_unlock(&adapter->mbox_lock);
41901+ return status;
41902+}
41903+
41904+static void encode_port_names(struct be_adapter *adapter)
41905+{
41906+ switch (adapter->port_name[adapter->hba_port_num]) {
41907+ case '0':
41908+ adapter->port_name[adapter->hba_port_num] = 0;
41909+ break;
41910+ case '1':
41911+ adapter->port_name[adapter->hba_port_num] = 1;
41912+ break;
41913+ case '2':
41914+ adapter->port_name[adapter->hba_port_num] = 2;
41915+ break;
41916+ case '3':
41917+ adapter->port_name[adapter->hba_port_num] = 3;
41918+ break;
41919+ case '4':
41920+ adapter->port_name[adapter->hba_port_num] = 4;
41921+ break;
41922+ case 'A':
41923+ adapter->port_name[adapter->hba_port_num] = 5;
41924+ break;
41925+ case 'B':
41926+ adapter->port_name[adapter->hba_port_num] = 6;
41927+ break;
41928+ case 'C':
41929+ adapter->port_name[adapter->hba_port_num] = 7;
41930+ break;
41931+ case 'D':
41932+ adapter->port_name[adapter->hba_port_num] = 8;
41933+ break;
41934+ }
41935+}
41936+
41937+int be_cmd_query_port_names_v0(struct be_adapter *adapter, u8 *port_name)
41938+{
41939+ struct be_mcc_wrb *wrb;
41940+ struct be_cmd_req_get_port_name *req;
41941+ int status;
41942+
41943+ spin_lock_bh(&adapter->mcc_lock);
41944+
41945+ wrb = wrb_from_mccq(adapter);
41946+ if (!wrb) {
41947+ status = -EBUSY;
41948+ goto err;
41949+ }
41950+
41951+ req = embedded_payload(wrb);
41952+
41953+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41954+ OPCODE_COMMON_GET_PORT_NAME);
41955+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41956+ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req));
41957+
41958+ status = be_mcc_notify_wait(adapter);
41959+ if (!status) {
41960+ struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
41961+ port_name[0] = resp->port0_name;
41962+ port_name[1] = resp->port1_name;
41963+ }
41964+
41965+err:
41966+ spin_unlock_bh(&adapter->mcc_lock);
41967+
41968+ if(!status)
41969+ encode_port_names(adapter);
41970+ return status;
41971+}
41972+
41973+int be_cmd_query_port_names_v1(struct be_adapter *adapter, u8 *port_name)
41974+{
41975+ struct be_mcc_wrb *wrb;
41976+ struct be_cmd_req_get_port_name *req;
41977+ int status;
41978+
41979+ spin_lock_bh(&adapter->mcc_lock);
41980+
41981+ wrb = wrb_from_mccq(adapter);
41982+ if (!wrb) {
41983+ status = -EBUSY;
41984+ goto err;
41985+ }
41986+ req = embedded_payload(wrb);
41987+
41988+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
41989+ OPCODE_COMMON_GET_PORT_NAME);
41990+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
41991+ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req));
41992+ req->hdr.version = 1;
41993+
41994 status = be_mcc_notify_wait(adapter);
41995+ if (!status) {
41996+ struct be_cmd_resp_get_port_name_v1 *resp = embedded_payload(wrb);
41997+ port_name[0] = resp->port0_name;
41998+ port_name[1] = resp->port1_name;
41999+ port_name[2] = resp->port2_name;
42000+ port_name[3] = resp->port3_name;
42001+ }
42002+
42003+err:
42004+ spin_unlock_bh(&adapter->mcc_lock);
42005+
42006+ if (!status)
42007+ encode_port_names(adapter);
42008+ return status;
42009+}
42010+
42011+int be_cmd_req_pg_pfc(struct be_adapter *adapter, int *fw_num_txqs)
42012+{
42013+ struct be_mcc_wrb *wrb;
42014+ struct be_cmd_req_pg *req;
42015+ int status, num = 0;
42016+ bool query = true;
42017+
42018+ *fw_num_txqs = MAX_TX_QS;
42019+
42020+ if (mutex_lock_interruptible(&adapter->mbox_lock))
42021+ return -1;
42022+
42023+enable_pfc:
42024+ wrb = wrb_from_mbox(adapter);
42025+ req = embedded_payload(wrb);
42026+
42027+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42028+ OPCODE_ETH_PG_FEATURE_QUERY_REQUEST);
42029+
42030+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
42031+ OPCODE_ETH_PG_FEATURE_QUERY_REQUEST, sizeof(*req));
42032+
42033+ if (query)
42034+ req->query |= cpu_to_le32(REQ_PG_QUERY);
42035+ req->pfc_pg |= cpu_to_le32(REQ_PG_FEAT);
42036+
42037+ status = be_mbox_notify_wait(adapter);
42038+ if (!status) {
42039+ struct be_cmd_resp_pg *resp = embedded_payload(wrb);
42040+ if (query) {
42041+ if (le32_to_cpu(resp->pfc_pg) & REQ_PG_FEAT) {
42042+ num = le32_to_cpu(resp->num_tx_rings);
42043+ query = false;
42044+ goto enable_pfc;
42045+ }
42046+ } else {
42047+ adapter->flags |= BE_FLAGS_DCBX;
42048+ *fw_num_txqs = num;
42049+ }
42050+ }
42051+
42052+ mutex_unlock(&adapter->mbox_lock);
42053+ return status;
42054+}
42055+
42056+/* Set privilege(s) for a function */
42057+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 mask, u32 *prev,
42058+ u32 domain)
42059+{
42060+ struct be_mcc_wrb *wrb;
42061+ struct be_cmd_req_set_fn_privileges *req;
42062+ int status;
42063+
42064+ spin_lock_bh(&adapter->mcc_lock);
42065+
42066+ wrb = wrb_from_mccq(adapter);
42067+ if (!wrb) {
42068+ status = -EBUSY;
42069+ goto err;
42070+ }
42071+
42072+ req = embedded_payload(wrb);
42073+
42074+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42075+ OPCODE_COMMON_SET_FN_PRIVILEGES);
42076+
42077+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42078+ OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req));
42079+
42080+ req->hdr.domain = domain;
42081+ req->privilege_mask = cpu_to_le32(mask);
42082+
42083+ status = be_mcc_notify_wait(adapter);
42084+
42085+err:
42086+ spin_unlock_bh(&adapter->mcc_lock);
42087+ return status;
42088+}
42089+
42090+/* Get privilege(s) for a function */
42091+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
42092+ u32 domain)
42093+{
42094+ struct be_mcc_wrb *wrb;
42095+ struct be_cmd_req_get_fn_privileges *req;
42096+ int status;
42097+
42098+ spin_lock_bh(&adapter->mcc_lock);
42099+
42100+ wrb = wrb_from_mccq(adapter);
42101+ if (!wrb) {
42102+ status = -EBUSY;
42103+ goto err;
42104+ }
42105+
42106+ req = embedded_payload(wrb);
42107+
42108+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42109+ OPCODE_COMMON_GET_FN_PRIVILEGES);
42110
42111+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42112+ OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req));
42113+
42114+ req->hdr.domain = domain;
42115+
42116+ status = be_mcc_notify_wait(adapter);
42117+ if (!status) {
42118+ struct be_cmd_resp_get_fn_privileges *resp =
42119+ embedded_payload(wrb);
42120+ *privilege = le32_to_cpu(resp->privilege_mask);
42121+ } else
42122+ *privilege = 0;
42123+
42124+err:
42125+ spin_unlock_bh(&adapter->mcc_lock);
42126+ return status;
42127+}
42128+
42129+/* Set Hyper switch config */
42130+int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
42131+ u32 domain, u16 intf_id)
42132+{
42133+ struct be_mcc_wrb *wrb;
42134+ struct be_cmd_req_set_hsw_config *req;
42135+ void *ctxt;
42136+ int status;
42137+
42138+ spin_lock_bh(&adapter->mcc_lock);
42139+
42140+ wrb = wrb_from_mccq(adapter);
42141+ if (!wrb) {
42142+ status = -EBUSY;
42143+ goto err;
42144+ }
42145+
42146+ req = embedded_payload(wrb);
42147+ ctxt = &req->context;
42148+
42149+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42150+ OPCODE_COMMON_SET_HSW_CONFIG);
42151+
42152+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42153+ OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req));
42154+
42155+ req->hdr.domain = domain;
42156+ AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
42157+ if (pvid) {
42158+ AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
42159+ AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
42160+ }
42161+
42162+ be_dws_cpu_to_le(req->context, sizeof(req->context));
42163+ status = be_mcc_notify_wait(adapter);
42164+
42165+err:
42166+ spin_unlock_bh(&adapter->mcc_lock);
42167+ return status;
42168+}
42169+
42170+/* Get Hyper switch config */
42171+int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
42172+ u32 domain, u16 intf_id)
42173+{
42174+ struct be_mcc_wrb *wrb;
42175+ struct be_cmd_req_get_hsw_config *req;
42176+ void *ctxt;
42177+ int status;
42178+ u16 vid;
42179+
42180+ spin_lock_bh(&adapter->mcc_lock);
42181+
42182+ wrb = wrb_from_mccq(adapter);
42183+ if (!wrb) {
42184+ status = -EBUSY;
42185+ goto err;
42186+ }
42187+
42188+ req = embedded_payload(wrb);
42189+ ctxt = &req->context;
42190+
42191+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42192+ OPCODE_COMMON_GET_HSW_CONFIG);
42193+
42194+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42195+ OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req));
42196+
42197+ req->hdr.domain = domain;
42198+ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
42199+ intf_id);
42200+ AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
42201+ be_dws_cpu_to_le(req->context, sizeof(req->context));
42202+
42203+ status = be_mcc_notify_wait(adapter);
42204+ if (!status) {
42205+ struct be_cmd_resp_get_hsw_config *resp =
42206+ embedded_payload(wrb);
42207+ be_dws_le_to_cpu(&resp->context,
42208+ sizeof(resp->context));
42209+ vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
42210+ pvid, &resp->context);
42211+ *pvid = le16_to_cpu(vid);
42212+ }
42213+
42214+err:
42215+ spin_unlock_bh(&adapter->mcc_lock);
42216+ return status;
42217+}
42218+
42219+int be_cmd_get_port_speed(struct be_adapter *adapter,
42220+ u8 port_num, u16 *dac_cable_len, u16 *port_speed)
42221+{
42222+ struct be_mcc_wrb *wrb;
42223+ struct be_cmd_req_get_port_speed *req;
42224+ int status = 0;
42225+
42226+ spin_lock_bh(&adapter->mcc_lock);
42227+
42228+ wrb = wrb_from_mccq(adapter);
42229+ if (!wrb) {
42230+ status = -EBUSY;
42231+ goto err;
42232+ }
42233+
42234+ req = embedded_payload(wrb);
42235+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42236+ OPCODE_COMMON_NTWK_GET_LINK_SPEED);
42237+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42238+ OPCODE_COMMON_NTWK_GET_LINK_SPEED,
42239+ sizeof(*req));
42240+ req->port_num = port_num;
42241+ status = be_mcc_notify_wait(adapter);
42242+ if (!status) {
42243+ struct be_cmd_resp_get_port_speed *resp =
42244+ embedded_payload(wrb);
42245+ *dac_cable_len = resp->dac_cable_length;
42246+ *port_speed = resp->mac_speed;
42247+ }
42248+
42249+err:
42250+ spin_unlock_bh(&adapter->mcc_lock);
42251+ return status;
42252+}
42253+
42254+int be_cmd_set_port_speed_v1(struct be_adapter *adapter,
42255+ u8 port_num, u16 mac_speed,
42256+ u16 dac_cable_len)
42257+{
42258+ struct be_mcc_wrb *wrb;
42259+ struct be_cmd_req_set_port_speed_v1 *req;
42260+ int status = 0;
42261+
42262+ spin_lock_bh(&adapter->mcc_lock);
42263+
42264+ wrb = wrb_from_mccq(adapter);
42265+ if (!wrb) {
42266+ status = -EBUSY;
42267+ goto err;
42268+ }
42269+ req = embedded_payload(wrb);
42270+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
42271+ OPCODE_COMMON_NTWK_SET_LINK_SPEED);
42272+
42273+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
42274+ OPCODE_COMMON_NTWK_SET_LINK_SPEED,
42275+ sizeof(*req));
42276+ req->hdr.version=1;
42277+
42278+ req->port_num = port_num;
42279+ req->virt_port = port_num;
42280+ req->mac_speed = mac_speed;
42281+ req->dac_cable_length = dac_cable_len;
42282+ status = be_mcc_notify_wait(adapter);
42283+err:
42284+ spin_unlock_bh(&adapter->mcc_lock);
42285+ return status;
42286+}
42287+
42288+
42289+/* Uses sync mcc */
42290+#ifdef CONFIG_PALAU
42291+int be_cmd_pass_ext_ioctl(struct be_adapter *adapter, dma_addr_t dma,
42292+ int req_size, void *va)
42293+{
42294+ struct be_mcc_wrb *wrb;
42295+ struct be_sge *sge;
42296+ int status;
42297+ struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) va;
42298+
42299+ spin_lock_bh(&adapter->mcc_lock);
42300+
42301+ wrb = wrb_from_mccq(adapter);
42302+ if (!wrb) {
42303+ status = -EBUSY;
42304+ goto err;
42305+ }
42306+ sge = nonembedded_sgl(wrb);
42307+
42308+ be_wrb_hdr_prepare(wrb, req_size, false, 1, hdr->opcode);
42309+ wrb->tag1 = MCC_WRB_PASS_THRU;
42310+ sge->pa_hi = cpu_to_le32(upper_32_bits(dma));
42311+ sge->pa_lo = cpu_to_le32(dma & 0xFFFFFFFF);
42312+ sge->len = cpu_to_le32(req_size);
42313+
42314+ status = be_mcc_notify_wait(adapter);
42315+err:
42316 spin_unlock_bh(&adapter->mcc_lock);
42317 return status;
42318 }
42319+#endif
42320diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
42321index ad33d55..35aa5c7 100644
42322--- a/drivers/net/benet/be_cmds.h
42323+++ b/drivers/net/benet/be_cmds.h
42324@@ -1,20 +1,23 @@
42325 /*
42326- * Copyright (C) 2005 - 2009 ServerEngines
42327+ * Copyright (C) 2005 - 2011 Emulex
42328 * All rights reserved.
42329 *
42330 * This program is free software; you can redistribute it and/or
42331 * modify it under the terms of the GNU General Public License version 2
42332- * as published by the Free Software Foundation. The full GNU General
42333+ * as published by the Free Software Foundation. The full GNU General
42334 * Public License is included in this distribution in the file called COPYING.
42335 *
42336 * Contact Information:
42337- * linux-drivers@serverengines.com
42338+ * linux-drivers@emulex.com
42339 *
42340- * ServerEngines
42341- * 209 N. Fair Oaks Ave
42342- * Sunnyvale, CA 94085
42343+ * Emulex
42344+ * 3333 Susan Street
42345+ * Costa Mesa, CA 92626
42346 */
42347
42348+#ifndef BE_CMDS_H
42349+#define BE_CMDS_H
42350+
42351 /*
42352 * The driver sends configuration and managements command requests to the
42353 * firmware in the BE. These requests are communicated to the processor
42354@@ -29,9 +32,10 @@ struct be_sge {
42355 u32 len;
42356 };
42357
42358-#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
42359+#define MCC_WRB_EMBEDDED_MASK 1 /* bit 0 of dword 0*/
42360 #define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
42361 #define MCC_WRB_SGE_CNT_MASK 0x1F /* bits 3 - 7 of dword 0 */
42362+#define MCC_WRB_PASS_THRU 0xFF /* this wrb is used for pass thru cmd */
42363 struct be_mcc_wrb {
42364 u32 embedded; /* dword 0 */
42365 u32 payload_length; /* dword 1 */
42366@@ -44,24 +48,19 @@ struct be_mcc_wrb {
42367 } payload;
42368 };
42369
42370-#define CQE_FLAGS_VALID_MASK (1 << 31)
42371-#define CQE_FLAGS_ASYNC_MASK (1 << 30)
42372-#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
42373-#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
42374+#define CQE_FLAGS_VALID_MASK (1 << 31)
42375+#define CQE_FLAGS_ASYNC_MASK (1 << 30)
42376+#define CQE_FLAGS_COMPLETED_MASK (1 << 28)
42377+#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
42378
42379 /* Completion Status */
42380 enum {
42381- MCC_STATUS_SUCCESS = 0x0,
42382-/* The client does not have sufficient privileges to execute the command */
42383- MCC_STATUS_INSUFFICIENT_PRIVILEGES = 0x1,
42384-/* A parameter in the command was invalid. */
42385- MCC_STATUS_INVALID_PARAMETER = 0x2,
42386-/* There are insufficient chip resources to execute the command */
42387- MCC_STATUS_INSUFFICIENT_RESOURCES = 0x3,
42388-/* The command is completing because the queue was getting flushed */
42389- MCC_STATUS_QUEUE_FLUSHING = 0x4,
42390-/* The command is completing with a DMA error */
42391- MCC_STATUS_DMA_FAILED = 0x5,
42392+ MCC_STATUS_SUCCESS = 0,
42393+ MCC_STATUS_FAILED = 1,
42394+ MCC_STATUS_ILLEGAL_REQUEST = 2,
42395+ MCC_STATUS_ILLEGAL_FIELD = 3,
42396+ MCC_STATUS_INSUFFICIENT_BUFFER = 4,
42397+ MCC_STATUS_UNAUTHORIZED_REQUEST = 5,
42398 MCC_STATUS_NOT_SUPPORTED = 66
42399 };
42400
42401@@ -81,15 +80,24 @@ struct be_mcc_compl {
42402 * mcc_compl is interpreted as follows:
42403 */
42404 #define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
42405+#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */
42406 #define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
42407+#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
42408 #define ASYNC_EVENT_CODE_LINK_STATE 0x1
42409+#define ASYNC_EVENT_CODE_GRP_5 0x5
42410+#define ASYNC_EVENT_QOS_SPEED 0x1
42411+#define ASYNC_EVENT_COS_PRIORITY 0x2
42412+#define ASYNC_EVENT_PVID_STATE 0x3
42413+#define GRP5_TYPE_PRIO_TC_MAP 4
42414+
42415 struct be_async_event_trailer {
42416 u32 code;
42417 };
42418
42419 enum {
42420- ASYNC_EVENT_LINK_DOWN = 0x0,
42421- ASYNC_EVENT_LINK_UP = 0x1
42422+ ASYNC_EVENT_LINK_DOWN = 0x0,
42423+ ASYNC_EVENT_LINK_UP = 0x1,
42424+ ASYNC_EVENT_LOGICAL = 0x2
42425 };
42426
42427 /* When the event code of an async trailer is link-state, the mcc_compl
42428@@ -101,7 +109,51 @@ struct be_async_event_link_state {
42429 u8 port_duplex;
42430 u8 port_speed;
42431 u8 port_fault;
42432- u8 rsvd0[7];
42433+ u8 rsvd0;
42434+ u16 qos_link_speed;
42435+ u32 event_tag;
42436+ struct be_async_event_trailer trailer;
42437+} __packed;
42438+
42439+/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
42440+ * the mcc_compl must be interpreted as follows
42441+ */
42442+struct be_async_event_grp5_qos_link_speed {
42443+ u8 physical_port;
42444+ u8 rsvd[5];
42445+ u16 qos_link_speed;
42446+ u32 event_tag;
42447+ struct be_async_event_trailer trailer;
42448+} __packed;
42449+
42450+/* When the event code of an async trailer is GRP5 and event type is
42451+ * CoS-Priority, the mcc_compl must be interpreted as follows
42452+ */
42453+struct be_async_event_grp5_cos_priority {
42454+ u8 physical_port;
42455+ u8 available_priority_bmap;
42456+ u8 reco_default_priority;
42457+ u8 valid;
42458+ u8 rsvd0;
42459+ u8 event_tag;
42460+ struct be_async_event_trailer trailer;
42461+} __packed;
42462+
42463+/* When the event code of an async trailer is GRP5 and event type is
42464+ * PVID state, the mcc_compl must be interpreted as follows
42465+ */
42466+struct be_async_event_grp5_pvid_state {
42467+ u8 enabled;
42468+ u8 rsvd0;
42469+ u16 tag;
42470+ u32 event_tag;
42471+ u32 rsvd1;
42472+ struct be_async_event_trailer trailer;
42473+} __packed;
42474+
42475+/* GRP5 prio-tc-map event */
42476+struct be_async_event_grp5_prio_tc_map {
42477+ u8 prio_tc_map[8]; /* map[prio] -> tc_id */
42478 struct be_async_event_trailer trailer;
42479 } __packed;
42480
42481@@ -111,41 +163,68 @@ struct be_mcc_mailbox {
42482 };
42483
42484 #define CMD_SUBSYSTEM_COMMON 0x1
42485-#define CMD_SUBSYSTEM_ETH 0x3
42486+#define CMD_SUBSYSTEM_ETH 0x3
42487+#define CMD_SUBSYSTEM_LOWLEVEL 0xb
42488
42489 #define OPCODE_COMMON_NTWK_MAC_QUERY 1
42490 #define OPCODE_COMMON_NTWK_MAC_SET 2
42491 #define OPCODE_COMMON_NTWK_MULTICAST_SET 3
42492-#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
42493+#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
42494 #define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
42495+#define OPCODE_COMMON_READ_FLASHROM 6
42496 #define OPCODE_COMMON_WRITE_FLASHROM 7
42497 #define OPCODE_COMMON_CQ_CREATE 12
42498 #define OPCODE_COMMON_EQ_CREATE 13
42499-#define OPCODE_COMMON_MCC_CREATE 21
42500-#define OPCODE_COMMON_NTWK_RX_FILTER 34
42501+#define OPCODE_COMMON_MCC_CREATE 21
42502+#define OPCODE_COMMON_SET_QOS 28
42503+#define OPCODE_COMMON_MCC_CREATE_EXT 90
42504+#define OPCODE_COMMON_SEEPROM_READ 30
42505+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32
42506+#define OPCODE_COMMON_NTWK_RX_FILTER 34
42507 #define OPCODE_COMMON_GET_FW_VERSION 35
42508 #define OPCODE_COMMON_SET_FLOW_CONTROL 36
42509 #define OPCODE_COMMON_GET_FLOW_CONTROL 37
42510 #define OPCODE_COMMON_SET_FRAME_SIZE 39
42511 #define OPCODE_COMMON_MODIFY_EQ_DELAY 41
42512 #define OPCODE_COMMON_FIRMWARE_CONFIG 42
42513-#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
42514-#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
42515-#define OPCODE_COMMON_MCC_DESTROY 53
42516-#define OPCODE_COMMON_CQ_DESTROY 54
42517-#define OPCODE_COMMON_EQ_DESTROY 55
42518+#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
42519+#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
42520+#define OPCODE_COMMON_MCC_DESTROY 53
42521+#define OPCODE_COMMON_CQ_DESTROY 54
42522+#define OPCODE_COMMON_EQ_DESTROY 55
42523+#define OPCODE_COMMON_NTWK_SET_LINK_SPEED 57
42524 #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
42525 #define OPCODE_COMMON_NTWK_PMAC_ADD 59
42526 #define OPCODE_COMMON_NTWK_PMAC_DEL 60
42527 #define OPCODE_COMMON_FUNCTION_RESET 61
42528+#define OPCODE_COMMON_MANAGE_FAT 68
42529+#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
42530+#define OPCODE_COMMON_GET_BEACON_STATE 70
42531+#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
42532+#define OPCODE_COMMON_GET_PORT_NAME 77
42533+#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
42534+#define OPCODE_COMMON_GET_PHY_DETAILS 102
42535+#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
42536+#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
42537+#define OPCODE_COMMON_NTWK_GET_LINK_SPEED 134
42538+#define OPCODE_COMMON_GET_HSW_CONFIG 152
42539+#define OPCODE_COMMON_SET_HSW_CONFIG 153
42540+#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
42541
42542+#define OPCODE_ETH_RSS_CONFIG 1
42543 #define OPCODE_ETH_ACPI_CONFIG 2
42544 #define OPCODE_ETH_PROMISCUOUS 3
42545 #define OPCODE_ETH_GET_STATISTICS 4
42546 #define OPCODE_ETH_TX_CREATE 7
42547-#define OPCODE_ETH_RX_CREATE 8
42548-#define OPCODE_ETH_TX_DESTROY 9
42549-#define OPCODE_ETH_RX_DESTROY 10
42550+#define OPCODE_ETH_RX_CREATE 8
42551+#define OPCODE_ETH_TX_DESTROY 9
42552+#define OPCODE_ETH_RX_DESTROY 10
42553+#define OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG 12
42554+#define OPCODE_ETH_PG_FEATURE_QUERY_REQUEST 23
42555+
42556+#define OPCODE_LOWLEVEL_HOST_DDR_DMA 17
42557+#define OPCODE_LOWLEVEL_LOOPBACK_TEST 18
42558+#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19
42559
42560 struct be_cmd_req_hdr {
42561 u8 opcode; /* dword 0 */
42562@@ -159,7 +238,7 @@ struct be_cmd_req_hdr {
42563 };
42564
42565 #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
42566-#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
42567+#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
42568 struct be_cmd_resp_hdr {
42569 u32 info; /* dword 0 */
42570 u32 status; /* dword 1 */
42571@@ -265,7 +344,7 @@ struct be_cmd_req_pmac_del {
42572 /******************** Create CQ ***************************/
42573 /* Pseudo amap definition in which each bit of the actual structure is defined
42574 * as a byte: used to calculate offset/shift/mask of each field */
42575-struct amap_cq_context {
42576+struct amap_cq_context_be {
42577 u8 cidx[11]; /* dword 0*/
42578 u8 rsvd0; /* dword 0*/
42579 u8 coalescwm[2]; /* dword 0*/
42580@@ -288,11 +367,28 @@ struct amap_cq_context {
42581 u8 rsvd5[32]; /* dword 3*/
42582 } __packed;
42583
42584+struct amap_cq_context_lancer {
42585+ u8 rsvd0[12]; /* dword 0*/
42586+ u8 coalescwm[2]; /* dword 0*/
42587+ u8 nodelay; /* dword 0*/
42588+ u8 rsvd1[12]; /* dword 0*/
42589+ u8 count[2]; /* dword 0*/
42590+ u8 valid; /* dword 0*/
42591+ u8 rsvd2; /* dword 0*/
42592+ u8 eventable; /* dword 0*/
42593+ u8 eqid[16]; /* dword 1*/
42594+ u8 rsvd3[15]; /* dword 1*/
42595+ u8 armed; /* dword 1*/
42596+ u8 rsvd4[32]; /* dword 2*/
42597+ u8 rsvd5[32]; /* dword 3*/
42598+} __packed;
42599+
42600 struct be_cmd_req_cq_create {
42601 struct be_cmd_req_hdr hdr;
42602 u16 num_pages;
42603- u16 rsvd0;
42604- u8 context[sizeof(struct amap_cq_context) / 8];
42605+ u8 page_size;
42606+ u8 rsvd0;
42607+ u8 context[sizeof(struct amap_cq_context_be) / 8];
42608 struct phys_addr pages[8];
42609 } __packed;
42610
42611@@ -302,10 +398,28 @@ struct be_cmd_resp_cq_create {
42612 u16 rsvd0;
42613 } __packed;
42614
42615+struct be_cmd_req_get_fat {
42616+ struct be_cmd_req_hdr hdr;
42617+ u32 fat_operation;
42618+ u32 read_log_offset;
42619+ u32 read_log_length;
42620+ u32 data_buffer_size;
42621+ u32 data_buffer[1];
42622+} __packed;
42623+
42624+struct be_cmd_resp_get_fat {
42625+ struct be_cmd_resp_hdr hdr;
42626+ u32 log_size;
42627+ u32 read_log_length;
42628+ u32 rsvd[2];
42629+ u32 data_buffer[1];
42630+} __packed;
42631+
42632+
42633 /******************** Create MCCQ ***************************/
42634 /* Pseudo amap definition in which each bit of the actual structure is defined
42635 * as a byte: used to calculate offset/shift/mask of each field */
42636-struct amap_mcc_context {
42637+struct amap_mcc_context_be {
42638 u8 con_index[14];
42639 u8 rsvd0[2];
42640 u8 ring_size[4];
42641@@ -320,11 +434,31 @@ struct amap_mcc_context {
42642 u8 rsvd2[32];
42643 } __packed;
42644
42645+struct amap_mcc_context_lancer {
42646+ u8 async_cq_id[16];
42647+ u8 ring_size[4];
42648+ u8 rsvd0[12];
42649+ u8 rsvd1[31];
42650+ u8 valid;
42651+ u8 async_cq_valid[1];
42652+ u8 rsvd2[31];
42653+ u8 rsvd3[32];
42654+} __packed;
42655+
42656 struct be_cmd_req_mcc_create {
42657 struct be_cmd_req_hdr hdr;
42658 u16 num_pages;
42659- u16 rsvd0;
42660- u8 context[sizeof(struct amap_mcc_context) / 8];
42661+ u16 cq_id;
42662+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
42663+ struct phys_addr pages[8];
42664+} __packed;
42665+
42666+struct be_cmd_req_mcc_ext_create {
42667+ struct be_cmd_req_hdr hdr;
42668+ u16 num_pages;
42669+ u16 cq_id;
42670+ u32 async_event_bitmap[1];
42671+ u8 context[sizeof(struct amap_mcc_context_be) / 8];
42672 struct phys_addr pages[8];
42673 } __packed;
42674
42675@@ -335,49 +469,32 @@ struct be_cmd_resp_mcc_create {
42676 } __packed;
42677
42678 /******************** Create TxQ ***************************/
42679-#define BE_ETH_TX_RING_TYPE_STANDARD 2
42680+#define ETX_QUEUE_TYPE_STANDARD 0x2
42681+#define ETX_QUEUE_TYPE_PRIORITY 0x10
42682 #define BE_ULP1_NUM 1
42683
42684-/* Pseudo amap definition in which each bit of the actual structure is defined
42685- * as a byte: used to calculate offset/shift/mask of each field */
42686-struct amap_tx_context {
42687- u8 rsvd0[16]; /* dword 0 */
42688- u8 tx_ring_size[4]; /* dword 0 */
42689- u8 rsvd1[26]; /* dword 0 */
42690- u8 pci_func_id[8]; /* dword 1 */
42691- u8 rsvd2[9]; /* dword 1 */
42692- u8 ctx_valid; /* dword 1 */
42693- u8 cq_id_send[16]; /* dword 2 */
42694- u8 rsvd3[16]; /* dword 2 */
42695- u8 rsvd4[32]; /* dword 3 */
42696- u8 rsvd5[32]; /* dword 4 */
42697- u8 rsvd6[32]; /* dword 5 */
42698- u8 rsvd7[32]; /* dword 6 */
42699- u8 rsvd8[32]; /* dword 7 */
42700- u8 rsvd9[32]; /* dword 8 */
42701- u8 rsvd10[32]; /* dword 9 */
42702- u8 rsvd11[32]; /* dword 10 */
42703- u8 rsvd12[32]; /* dword 11 */
42704- u8 rsvd13[32]; /* dword 12 */
42705- u8 rsvd14[32]; /* dword 13 */
42706- u8 rsvd15[32]; /* dword 14 */
42707- u8 rsvd16[32]; /* dword 15 */
42708-} __packed;
42709-
42710 struct be_cmd_req_eth_tx_create {
42711 struct be_cmd_req_hdr hdr;
42712 u8 num_pages;
42713 u8 ulp_num;
42714- u8 type;
42715- u8 bound_port;
42716- u8 context[sizeof(struct amap_tx_context) / 8];
42717+ u16 type;
42718+ u16 if_id;
42719+ u8 queue_size;
42720+ u8 rsvd1;
42721+ u32 rsvd2;
42722+ u16 cq_id;
42723+ u16 rsvd3;
42724+ u32 rsvd4[13];
42725 struct phys_addr pages[8];
42726 } __packed;
42727
42728 struct be_cmd_resp_eth_tx_create {
42729 struct be_cmd_resp_hdr hdr;
42730 u16 cid;
42731- u16 rsvd0;
42732+ u16 rid;
42733+ u32 db_offset;
42734+ u8 tc_id;
42735+ u8 rsvd0[3];
42736 } __packed;
42737
42738 /******************** Create RxQ ***************************/
42739@@ -396,7 +513,7 @@ struct be_cmd_req_eth_rx_create {
42740 struct be_cmd_resp_eth_rx_create {
42741 struct be_cmd_resp_hdr hdr;
42742 u16 id;
42743- u8 cpu_id;
42744+ u8 rss_id;
42745 u8 rsvd0;
42746 } __packed;
42747
42748@@ -429,14 +546,15 @@ enum be_if_flags {
42749 BE_IF_FLAGS_VLAN = 0x100,
42750 BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200,
42751 BE_IF_FLAGS_PASS_L2_ERRORS = 0x400,
42752- BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800
42753+ BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800,
42754+ BE_IF_FLAGS_MULTICAST = 0x1000
42755 };
42756
42757 /* An RX interface is an object with one or more MAC addresses and
42758 * filtering capabilities. */
42759 struct be_cmd_req_if_create {
42760 struct be_cmd_req_hdr hdr;
42761- u32 version; /* ignore currntly */
42762+ u32 version; /* ignore currently */
42763 u32 capability_flags;
42764 u32 enable_flags;
42765 u8 mac_addr[ETH_ALEN];
42766@@ -458,7 +576,7 @@ struct be_cmd_req_if_destroy {
42767 };
42768
42769 /*************** HW Stats Get **********************************/
42770-struct be_port_rxf_stats {
42771+struct be_port_rxf_stats_v0 {
42772 u32 rx_bytes_lsd; /* dword 0*/
42773 u32 rx_bytes_msd; /* dword 1*/
42774 u32 rx_total_frames; /* dword 2*/
42775@@ -527,8 +645,8 @@ struct be_port_rxf_stats {
42776 u32 rx_input_fifo_overflow; /* dword 65*/
42777 };
42778
42779-struct be_rxf_stats {
42780- struct be_port_rxf_stats port[2];
42781+struct be_rxf_stats_v0 {
42782+ struct be_port_rxf_stats_v0 port[2];
42783 u32 rx_drops_no_pbuf; /* dword 132*/
42784 u32 rx_drops_no_txpb; /* dword 133*/
42785 u32 rx_drops_no_erx_descr; /* dword 134*/
42786@@ -545,31 +663,51 @@ struct be_rxf_stats {
42787 u32 rx_drops_invalid_ring; /* dword 145*/
42788 u32 forwarded_packets; /* dword 146*/
42789 u32 rx_drops_mtu; /* dword 147*/
42790- u32 rsvd0[15];
42791+ u32 rsvd0[7];
42792+ u32 port0_jabber_events;
42793+ u32 port1_jabber_events;
42794+ u32 rsvd1[6];
42795 };
42796
42797-struct be_erx_stats {
42798+struct be_erx_stats_v0 {
42799 u32 rx_drops_no_fragments[44]; /* dwordS 0 to 43*/
42800- u32 debug_wdma_sent_hold; /* dword 44*/
42801- u32 debug_wdma_pbfree_sent_hold; /* dword 45*/
42802- u32 debug_wdma_zerobyte_pbfree_sent_hold; /* dword 46*/
42803- u32 debug_pmem_pbuf_dealloc; /* dword 47*/
42804+ u32 rsvd[4];
42805 };
42806
42807-struct be_hw_stats {
42808- struct be_rxf_stats rxf;
42809+struct be_pmem_stats {
42810+ u32 eth_red_drops;
42811+ u32 rsvd[5];
42812+};
42813+
42814+struct be_hw_stats_v0 {
42815+ struct be_rxf_stats_v0 rxf;
42816 u32 rsvd[48];
42817- struct be_erx_stats erx;
42818+ struct be_erx_stats_v0 erx;
42819+ struct be_pmem_stats pmem;
42820 };
42821
42822-struct be_cmd_req_get_stats {
42823+struct be_cmd_req_get_stats_v0 {
42824 struct be_cmd_req_hdr hdr;
42825- u8 rsvd[sizeof(struct be_hw_stats)];
42826+ u8 rsvd[sizeof(struct be_hw_stats_v0)];
42827 };
42828
42829-struct be_cmd_resp_get_stats {
42830+struct be_cmd_resp_get_stats_v0 {
42831 struct be_cmd_resp_hdr hdr;
42832- struct be_hw_stats hw_stats;
42833+ struct be_hw_stats_v0 hw_stats;
42834+};
42835+
42836+struct be_cmd_req_get_cntl_addnl_attribs {
42837+ struct be_cmd_req_hdr hdr;
42838+ u8 rsvd[8];
42839+};
42840+
42841+struct be_cmd_resp_get_cntl_addnl_attribs {
42842+ struct be_cmd_resp_hdr hdr;
42843+ u16 ipl_file_number;
42844+ u8 ipl_file_version;
42845+ u8 rsvd0;
42846+ u8 on_die_temperature; /* in degrees centigrade*/
42847+ u8 rsvd1[3];
42848 };
42849
42850 struct be_cmd_req_vlan_config {
42851@@ -581,30 +719,22 @@ struct be_cmd_req_vlan_config {
42852 u16 normal_vlan[64];
42853 } __packed;
42854
42855-struct be_cmd_req_promiscuous_config {
42856- struct be_cmd_req_hdr hdr;
42857- u8 port0_promiscuous;
42858- u8 port1_promiscuous;
42859- u16 rsvd0;
42860-} __packed;
42861-
42862+/******************** RX FILTER ******************************/
42863+#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
42864 struct macaddr {
42865 u8 byte[ETH_ALEN];
42866 };
42867
42868-struct be_cmd_req_mcast_mac_config {
42869+struct be_cmd_req_rx_filter {
42870 struct be_cmd_req_hdr hdr;
42871- u16 num_mac;
42872- u8 promiscuous;
42873- u8 interface_id;
42874- struct macaddr mac[32];
42875-} __packed;
42876-
42877-static inline struct be_hw_stats *
42878-hw_stats_from_cmd(struct be_cmd_resp_get_stats *cmd)
42879-{
42880- return &cmd->hw_stats;
42881-}
42882+ u32 global_flags_mask;
42883+ u32 global_flags;
42884+ u32 if_flags_mask;
42885+ u32 if_flags;
42886+ u32 if_id;
42887+ u32 mcast_num;
42888+ struct macaddr mcast_mac[BE_MAX_MC];
42889+};
42890
42891 /******************** Link Status Query *******************/
42892 struct be_cmd_req_link_status {
42893@@ -619,13 +749,18 @@ enum {
42894 };
42895
42896 enum {
42897- PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
42898+ PHY_LINK_SPEED_ZERO = 0x0, /* => No link */
42899 PHY_LINK_SPEED_10MBPS = 0x1,
42900 PHY_LINK_SPEED_100MBPS = 0x2,
42901 PHY_LINK_SPEED_1GBPS = 0x3,
42902 PHY_LINK_SPEED_10GBPS = 0x4
42903 };
42904
42905+enum {
42906+ LINK_DOWN = 0x0,
42907+ LINK_UP = 0X1
42908+};
42909+
42910 struct be_cmd_resp_link_status {
42911 struct be_cmd_resp_hdr hdr;
42912 u8 physical_port;
42913@@ -634,9 +769,47 @@ struct be_cmd_resp_link_status {
42914 u8 mac_fault;
42915 u8 mgmt_mac_duplex;
42916 u8 mgmt_mac_speed;
42917- u16 rsvd0;
42918+ u16 link_speed;
42919+ u32 logical_link_status;
42920 } __packed;
42921
42922+/******************** Port Identification ***************************/
42923+/* Identifies the type of port attached to NIC */
42924+struct be_cmd_req_port_type {
42925+ struct be_cmd_req_hdr hdr;
42926+ u32 page_num;
42927+ u32 port;
42928+};
42929+
42930+enum {
42931+ TR_PAGE_A0 = 0xa0,
42932+ TR_PAGE_A2 = 0xa2
42933+};
42934+
42935+struct be_cmd_resp_port_type {
42936+ struct be_cmd_resp_hdr hdr;
42937+ u32 page_num;
42938+ u32 port;
42939+ struct data {
42940+ u8 identifier;
42941+ u8 identifier_ext;
42942+ u8 connector;
42943+ u8 transceiver[8];
42944+ u8 rsvd0[3];
42945+ u8 length_km;
42946+ u8 length_hm;
42947+ u8 length_om1;
42948+ u8 length_om2;
42949+ u8 length_cu;
42950+ u8 length_cu_m;
42951+ u8 vendor_name[16];
42952+ u8 rsvd;
42953+ u8 vendor_oui[3];
42954+ u8 vendor_pn[16];
42955+ u8 vendor_rev[4];
42956+ } data;
42957+};
42958+
42959 /******************** Get FW Version *******************/
42960 struct be_cmd_req_get_fw_version {
42961 struct be_cmd_req_hdr hdr;
42962@@ -686,9 +859,13 @@ struct be_cmd_resp_modify_eq_delay {
42963 } __packed;
42964
42965 /******************** Get FW Config *******************/
42966+#define FLEX10_MODE 0x400
42967+#define VNIC_MODE 0x20000
42968+#define UMC_ENABLED 0x1000000
42969+
42970 struct be_cmd_req_query_fw_cfg {
42971 struct be_cmd_req_hdr hdr;
42972- u32 rsvd[30];
42973+ u32 rsvd[31];
42974 };
42975
42976 struct be_cmd_resp_query_fw_cfg {
42977@@ -696,10 +873,61 @@ struct be_cmd_resp_query_fw_cfg {
42978 u32 be_config_number;
42979 u32 asic_revision;
42980 u32 phys_port;
42981- u32 function_cap;
42982+ u32 function_mode;
42983 u32 rsvd[26];
42984+ u32 function_caps;
42985 };
42986
42987+/******************** RSS Config *******************/
42988+/* RSS types */
42989+#define RSS_ENABLE_NONE 0x0
42990+#define RSS_ENABLE_IPV4 0x1
42991+#define RSS_ENABLE_TCP_IPV4 0x2
42992+#define RSS_ENABLE_IPV6 0x4
42993+#define RSS_ENABLE_TCP_IPV6 0x8
42994+
42995+struct be_cmd_req_rss_config {
42996+ struct be_cmd_req_hdr hdr;
42997+ u32 if_id;
42998+ u16 enable_rss;
42999+ u16 cpu_table_size_log2;
43000+ u32 hash[10];
43001+ u8 cpu_table[128];
43002+ u8 flush;
43003+ u8 rsvd0[3];
43004+};
43005+
43006+/******************** Port Beacon ***************************/
43007+
43008+#define BEACON_STATE_ENABLED 0x1
43009+#define BEACON_STATE_DISABLED 0x0
43010+
43011+struct be_cmd_req_enable_disable_beacon {
43012+ struct be_cmd_req_hdr hdr;
43013+ u8 port_num;
43014+ u8 beacon_state;
43015+ u8 beacon_duration;
43016+ u8 status_duration;
43017+} __packed;
43018+
43019+struct be_cmd_resp_enable_disable_beacon {
43020+ struct be_cmd_resp_hdr resp_hdr;
43021+ u32 rsvd0;
43022+} __packed;
43023+
43024+struct be_cmd_req_get_beacon_state {
43025+ struct be_cmd_req_hdr hdr;
43026+ u8 port_num;
43027+ u8 rsvd0;
43028+ u16 rsvd1;
43029+} __packed;
43030+
43031+struct be_cmd_resp_get_beacon_state {
43032+ struct be_cmd_resp_hdr resp_hdr;
43033+ u8 beacon_state;
43034+ u8 rsvd0[3];
43035+} __packed;
43036+
43037 /****************** Firmware Flash ******************/
43038 struct flashrom_params {
43039 u32 op_code;
43040@@ -714,17 +942,468 @@ struct be_cmd_write_flashrom {
43041 struct flashrom_params params;
43042 };
43043
43044+/************************ WOL *******************************/
43045+struct be_cmd_req_acpi_wol_magic_config {
43046+ struct be_cmd_req_hdr hdr;
43047+ u32 rsvd0[145];
43048+ u8 magic_mac[6];
43049+ u8 rsvd2[2];
43050+} __packed;
43051+
43052+/********************** LoopBack test *********************/
43053+struct be_cmd_req_loopback_test {
43054+ struct be_cmd_req_hdr hdr;
43055+ u32 loopback_type;
43056+ u32 num_pkts;
43057+ u64 pattern;
43058+ u32 src_port;
43059+ u32 dest_port;
43060+ u32 pkt_size;
43061+};
43062+
43063+struct be_cmd_resp_loopback_test {
43064+ struct be_cmd_resp_hdr resp_hdr;
43065+ u32 status;
43066+ u32 num_txfer;
43067+ u32 num_rx;
43068+ u32 miscomp_off;
43069+ u32 ticks_compl;
43070+};
43071+
43072+struct be_cmd_req_set_lmode {
43073+ struct be_cmd_req_hdr hdr;
43074+ u8 src_port;
43075+ u8 dest_port;
43076+ u8 loopback_type;
43077+ u8 loopback_state;
43078+};
43079+
43080+struct be_cmd_resp_set_lmode {
43081+ struct be_cmd_resp_hdr resp_hdr;
43082+ u8 rsvd0[4];
43083+};
43084+
43085+/********************** DDR DMA test *********************/
43086+struct be_cmd_req_ddrdma_test {
43087+ struct be_cmd_req_hdr hdr;
43088+ u64 pattern;
43089+ u32 byte_count;
43090+ u32 rsvd0;
43091+ u8 snd_buff[4096];
43092+ u8 rsvd1[4096];
43093+};
43094+
43095+struct be_cmd_resp_ddrdma_test {
43096+ struct be_cmd_resp_hdr hdr;
43097+ u64 pattern;
43098+ u32 byte_cnt;
43099+ u32 snd_err;
43100+ u8 rsvd0[4096];
43101+ u8 rcv_buff[4096];
43102+};
43103+
43104+/*********************** SEEPROM Read ***********************/
43105+
43106+#define BE_READ_SEEPROM_LEN 1024
43107+struct be_cmd_req_seeprom_read {
43108+ struct be_cmd_req_hdr hdr;
43109+ u8 rsvd0[BE_READ_SEEPROM_LEN];
43110+};
43111+
43112+struct be_cmd_resp_seeprom_read {
43113+ struct be_cmd_req_hdr hdr;
43114+ u8 seeprom_data[BE_READ_SEEPROM_LEN];
43115+};
43116+
43117+enum {
43118+ PHY_TYPE_CX4_10GB = 0,
43119+ PHY_TYPE_XFP_10GB,
43120+ PHY_TYPE_SFP_1GB,
43121+ PHY_TYPE_SFP_PLUS_10GB,
43122+ PHY_TYPE_KR_10GB,
43123+ PHY_TYPE_KX4_10GB,
43124+ PHY_TYPE_BASET_10GB,
43125+ PHY_TYPE_BASET_1GB,
43126+ PHY_TYPE_BASEX_1GB,
43127+ PHY_TYPE_SGMII,
43128+ PHY_TYPE_DISABLED = 255
43129+};
43130+
43131+#define BE_AN_EN 0x2
43132+#define BE_PAUSE_SYM_EN 0x80
43133+
43134+struct be_cmd_req_get_phy_info {
43135+ struct be_cmd_req_hdr hdr;
43136+ u8 rsvd0[24];
43137+};
43138+
43139+struct be_phy_info {
43140+ u16 phy_type;
43141+ u16 interface_type;
43142+ u32 misc_params;
43143+ u16 ext_phy_details;
43144+ u16 rsvd;
43145+ u16 auto_speeds_supported;
43146+ u16 fixed_speeds_supported;
43147+ u32 future_use[2];
43148+};
43149+
43150+struct be_cmd_resp_get_phy_info {
43151+ struct be_cmd_req_hdr hdr;
43152+ struct be_phy_info phy_info;
43153+};
43154+
43155+/*********************** Set QOS ***********************/
43156+
43157+#define BE_QOS_BITS_NIC 1
43158+
43159+struct be_cmd_req_set_qos {
43160+ struct be_cmd_req_hdr hdr;
43161+ u32 valid_bits;
43162+ u32 max_bps_nic;
43163+ u32 rsvd[7];
43164+};
43165+
43166+struct be_cmd_resp_set_qos {
43167+ struct be_cmd_resp_hdr hdr;
43168+ u32 rsvd;
43169+};
43170+
43171+/*********************** Controller Attributes ***********************/
43172+struct be_cmd_req_cntl_attribs {
43173+ struct be_cmd_req_hdr hdr;
43174+};
43175+
43176+struct be_cmd_resp_cntl_attribs {
43177+ struct be_cmd_resp_hdr hdr;
43178+ struct mgmt_controller_attrib attribs;
43179+};
43180+
43181+/******************* get port names ***************/
43182+struct be_cmd_req_get_port_name {
43183+ struct be_cmd_req_hdr hdr;
43184+ u32 rsvd0;
43185+};
43186+
43187+struct be_cmd_resp_get_port_name {
43188+ struct be_cmd_req_hdr hdr;
43189+ u8 port0_name;
43190+ u8 port1_name;
43191+ u8 rsvd0[2];
43192+};
43193+
43194+struct be_cmd_resp_get_port_name_v1 {
43195+ struct be_cmd_req_hdr hdr;
43196+ u32 pt : 2;
43197+ u32 rsvd0 : 30;
43198+ u8 port0_name;
43199+ u8 port1_name;
43200+ u8 port2_name;
43201+ u8 port3_name;
43202+};
43203+
43204+/*********************** Set driver function ***********************/
43205+#define CAPABILITY_SW_TIMESTAMPS 2
43206+#define CAPABILITY_BE3_NATIVE_ERX_API 4
43207+
43208+struct be_cmd_req_set_func_cap {
43209+ struct be_cmd_req_hdr hdr;
43210+ u32 valid_cap_flags;
43211+ u32 cap_flags;
43212+ u8 rsvd[212];
43213+};
43214+
43215+struct be_cmd_resp_set_func_cap {
43216+ struct be_cmd_resp_hdr hdr;
43217+ u32 valid_cap_flags;
43218+ u32 cap_flags;
43219+ u8 rsvd[212];
43220+};
43221+
43222+/*********************** PG Query Request ****************************/
43223+#define REQ_PG_QUERY 0x1
43224+#define REQ_PG_FEAT 0x1
43225+struct be_cmd_req_pg {
43226+ struct be_cmd_req_hdr hdr;
43227+ u32 query;
43228+ u32 pfc_pg;
43229+};
43230+
43231+struct be_cmd_resp_pg {
43232+ struct be_cmd_resp_hdr hdr;
43233+ u32 pfc_pg;
43234+ u32 num_tx_rings;
43235+};
43236+
43237+/*********************** Function Privileges ***********************/
43238+enum {
43239+ BE_PRIV_DEFAULT = 0x1,
43240+ BE_PRIV_LNKQUERY = 0x2,
43241+ BE_PRIV_LNKSTATS = 0x4,
43242+ BE_PRIV_LNKMGMT = 0x8,
43243+ BE_PRIV_LNKDIAG = 0x10,
43244+ BE_PRIV_UTILQUERY = 0x20,
43245+ BE_PRIV_FILTMGMT = 0x40,
43246+ BE_PRIV_IFACEMGMT = 0x80,
43247+ BE_PRIV_VHADM = 0x100,
43248+ BE_PRIV_DEVCFG = 0x200,
43249+ BE_PRIV_DEVSEC = 0x400
43250+};
43251+
43252+struct be_cmd_req_get_fn_privileges {
43253+ struct be_cmd_req_hdr hdr;
43254+ u32 rsvd;
43255+};
43256+
43257+struct be_cmd_resp_get_fn_privileges {
43258+ struct be_cmd_resp_hdr hdr;
43259+ u32 privilege_mask;
43260+};
43261+
43262+struct be_cmd_req_set_fn_privileges {
43263+ struct be_cmd_req_hdr hdr;
43264+ u32 privilege_mask;
43265+};
43266+
43267+struct be_cmd_resp_set_fn_privileges {
43268+ struct be_cmd_resp_hdr hdr;
43269+ u32 prev_privilege_mask;
43270+};
43271+
43272+/*********************** HSW Config ***********************/
43273+struct amap_set_hsw_context {
43274+ u8 interface_id[16];
43275+ u8 rsvd0[14];
43276+ u8 pvid_valid;
43277+ u8 rsvd1;
43278+ u8 rsvd2[16];
43279+ u8 pvid[16];
43280+ u8 rsvd3[32];
43281+ u8 rsvd4[32];
43282+ u8 rsvd5[32];
43283+} __packed;
43284+
43285+struct be_cmd_req_set_hsw_config {
43286+ struct be_cmd_req_hdr hdr;
43287+ u8 context[sizeof(struct amap_set_hsw_context) / 8];
43288+} __packed;
43289+
43290+struct be_cmd_resp_set_hsw_config {
43291+ struct be_cmd_resp_hdr hdr;
43292+ u32 rsvd;
43293+};
43294+
43295+struct amap_get_hsw_req_context {
43296+ u8 interface_id[16];
43297+ u8 rsvd0[14];
43298+ u8 pvid_valid;
43299+ u8 pport;
43300+} __packed;
43301+
43302+struct amap_get_hsw_resp_context {
43303+ u8 rsvd1[16];
43304+ u8 pvid[16];
43305+ u8 rsvd2[32];
43306+ u8 rsvd3[32];
43307+ u8 rsvd4[32];
43308+} __packed;
43309+
43310+struct be_cmd_req_get_hsw_config {
43311+ struct be_cmd_req_hdr hdr;
43312+ u8 context[sizeof(struct amap_get_hsw_req_context) / 8];
43313+} __packed;
43314+
43315+struct be_cmd_resp_get_hsw_config {
43316+ struct be_cmd_resp_hdr hdr;
43317+ u8 context[sizeof(struct amap_get_hsw_resp_context) / 8];
43318+ u32 rsvd;
43319+};
43320+
43321+/*************** Set speed ********************/
43322+struct be_cmd_req_set_port_speed_v1 {
43323+ struct be_cmd_req_hdr hdr;
43324+ u8 port_num;
43325+ u8 virt_port;
43326+ u16 mac_speed;
43327+ u16 dac_cable_length;
43328+ u16 rsvd0;
43329+};
43330+
43331+struct be_cmd_resp_set_port_speed_v1 {
43332+ struct be_cmd_resp_hdr hdr;
43333+ u32 rsvd0;
43334+};
43335+
43336+/************** get port speed *******************/
43337+struct be_cmd_req_get_port_speed {
43338+ struct be_cmd_req_hdr hdr;
43339+ u8 port_num;
43340+};
43341+
43342+struct be_cmd_resp_get_port_speed {
43343+ struct be_cmd_req_hdr hdr;
43344+ u16 mac_speed;
43345+ u16 dac_cable_length;
43346+};
43347+
43348+/*************** HW Stats Get v1 **********************************/
43349+#define BE_TXP_SW_SZ 48
43350+struct be_port_rxf_stats_v1 {
43351+ u32 rsvd0[12];
43352+ u32 rx_crc_errors;
43353+ u32 rx_alignment_symbol_errors;
43354+ u32 rx_pause_frames;
43355+ u32 rx_priority_pause_frames;
43356+ u32 rx_control_frames;
43357+ u32 rx_in_range_errors;
43358+ u32 rx_out_range_errors;
43359+ u32 rx_frame_too_long;
43360+ u32 rx_address_match_errors;
43361+ u32 rx_dropped_too_small;
43362+ u32 rx_dropped_too_short;
43363+ u32 rx_dropped_header_too_small;
43364+ u32 rx_dropped_tcp_length;
43365+ u32 rx_dropped_runt;
43366+ u32 rsvd1[10];
43367+ u32 rx_ip_checksum_errs;
43368+ u32 rx_tcp_checksum_errs;
43369+ u32 rx_udp_checksum_errs;
43370+ u32 rsvd2[7];
43371+ u32 rx_switched_unicast_packets;
43372+ u32 rx_switched_multicast_packets;
43373+ u32 rx_switched_broadcast_packets;
43374+ u32 rsvd3[3];
43375+ u32 tx_pauseframes;
43376+ u32 tx_priority_pauseframes;
43377+ u32 tx_controlframes;
43378+ u32 rsvd4[10];
43379+ u32 rxpp_fifo_overflow_drop;
43380+ u32 rx_input_fifo_overflow_drop;
43381+ u32 pmem_fifo_overflow_drop;
43382+ u32 jabber_events;
43383+ u32 rsvd5[3];
43384+};
43385+
43386+
43387+struct be_rxf_stats_v1 {
43388+ struct be_port_rxf_stats_v1 port[4];
43389+ u32 rsvd0[2];
43390+ u32 rx_drops_no_pbuf;
43391+ u32 rx_drops_no_txpb;
43392+ u32 rx_drops_no_erx_descr;
43393+ u32 rx_drops_no_tpre_descr;
43394+ u32 rsvd1[6];
43395+ u32 rx_drops_too_many_frags;
43396+ u32 rx_drops_invalid_ring;
43397+ u32 forwarded_packets;
43398+ u32 rx_drops_mtu;
43399+ u32 rsvd2[14];
43400+};
43401+
43402+struct be_erx_stats_v1 {
43403+ u32 rx_drops_no_fragments[68]; /* dwordS 0 to 67*/
43404+ u32 rsvd[4];
43405+};
43406+
43407+struct be_hw_stats_v1 {
43408+ struct be_rxf_stats_v1 rxf;
43409+ u32 rsvd0[BE_TXP_SW_SZ];
43410+ struct be_erx_stats_v1 erx;
43411+ struct be_pmem_stats pmem;
43412+ u32 rsvd1[3];
43413+};
43414+
43415+struct be_cmd_req_get_stats_v1 {
43416+ struct be_cmd_req_hdr hdr;
43417+ u8 rsvd[sizeof(struct be_hw_stats_v1)];
43418+};
43419+
43420+struct be_cmd_resp_get_stats_v1 {
43421+ struct be_cmd_resp_hdr hdr;
43422+ struct be_hw_stats_v1 hw_stats;
43423+};
43424+
43425+static inline void *
43426+hw_stats_from_cmd(struct be_adapter *adapter)
43427+{
43428+ if (adapter->generation == BE_GEN3) {
43429+ struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
43430+
43431+ return &cmd->hw_stats;
43432+ } else {
43433+ struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
43434+
43435+ return &cmd->hw_stats;
43436+ }
43437+}
43438+
43439+static inline void *be_port_rxf_stats_from_cmd(struct be_adapter *adapter)
43440+{
43441+ if (adapter->generation == BE_GEN3) {
43442+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
43443+ struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
43444+
43445+ return &rxf_stats->port[adapter->port_num];
43446+ } else {
43447+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
43448+ struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
43449+
43450+ return &rxf_stats->port[adapter->port_num];
43451+ }
43452+}
43453+
43454+static inline void *be_rxf_stats_from_cmd(struct be_adapter *adapter)
43455+{
43456+ if (adapter->generation == BE_GEN3) {
43457+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
43458+
43459+ return &hw_stats->rxf;
43460+ } else {
43461+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
43462+
43463+ return &hw_stats->rxf;
43464+ }
43465+}
43466+
43467+static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
43468+{
43469+ if (adapter->generation == BE_GEN3) {
43470+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
43471+
43472+ return &hw_stats->erx;
43473+ } else {
43474+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
43475+
43476+ return &hw_stats->erx;
43477+ }
43478+}
43479+
43480+static inline void *be_pmem_stats_from_cmd(struct be_adapter *adapter)
43481+{
43482+ if (adapter->generation == BE_GEN3) {
43483+ struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
43484+
43485+ return &hw_stats->pmem;
43486+ } else {
43487+ struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
43488+
43489+ return &hw_stats->pmem;
43490+ }
43491+}
43492+
43493 extern int be_pci_fnum_get(struct be_adapter *adapter);
43494 extern int be_cmd_POST(struct be_adapter *adapter);
43495 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
43496 u8 type, bool permanent, u32 if_handle);
43497 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
43498- u32 if_id, u32 *pmac_id);
43499-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
43500+ u32 if_id, u32 *pmac_id, u32 domain);
43501+extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id,
43502+ u32 domain);
43503 extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
43504 u32 en_flags, u8 *mac, bool pmac_invalid,
43505- u32 *if_handle, u32 *pmac_id);
43506-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
43507+ u32 *if_handle, u32 *pmac_id, u32 domain);
43508+extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
43509+ u32 domain);
43510 extern int be_cmd_eq_create(struct be_adapter *adapter,
43511 struct be_queue_info *eq, int eq_delay);
43512 extern int be_cmd_cq_create(struct be_adapter *adapter,
43513@@ -736,36 +1415,92 @@ extern int be_cmd_mccq_create(struct be_adapter *adapter,
43514 struct be_queue_info *cq);
43515 extern int be_cmd_txq_create(struct be_adapter *adapter,
43516 struct be_queue_info *txq,
43517- struct be_queue_info *cq);
43518+ struct be_queue_info *cq, u8 *tc_id);
43519 extern int be_cmd_rxq_create(struct be_adapter *adapter,
43520 struct be_queue_info *rxq, u16 cq_id,
43521 u16 frag_size, u16 max_frame_size, u32 if_id,
43522- u32 rss);
43523+ u32 rss, u8 *rss_id);
43524 extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
43525 int type);
43526+extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
43527+ struct be_queue_info *q);
43528 extern int be_cmd_link_status_query(struct be_adapter *adapter,
43529- bool *link_up);
43530+ int *link_status, u8 *mac_speed, u16 *link_speed, u32 dom);
43531 extern int be_cmd_reset(struct be_adapter *adapter);
43532 extern int be_cmd_get_stats(struct be_adapter *adapter,
43533 struct be_dma_mem *nonemb_cmd);
43534-extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver);
43535+extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
43536+ char *fw_on_flash);
43537
43538 extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
43539 extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
43540 u16 *vtag_array, u32 num, bool untagged,
43541 bool promiscuous);
43542-extern int be_cmd_promiscuous_config(struct be_adapter *adapter,
43543- u8 port_num, bool en);
43544-extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
43545- struct dev_mc_list *mc_list, u32 mc_count);
43546+extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
43547 extern int be_cmd_set_flow_control(struct be_adapter *adapter,
43548 u32 tx_fc, u32 rx_fc);
43549 extern int be_cmd_get_flow_control(struct be_adapter *adapter,
43550 u32 *tx_fc, u32 *rx_fc);
43551-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
43552- u32 *port_num, u32 *cap);
43553+extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
43554+ u32 *function_mode, u32 *functions_caps);
43555 extern int be_cmd_reset_function(struct be_adapter *adapter);
43556-extern int be_process_mcc(struct be_adapter *adapter);
43557+extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
43558+ u16 table_size);
43559+extern int be_process_mcc(struct be_adapter *adapter, int *status);
43560+extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
43561+ u8 port_num, u8 beacon, u8 status, u8 state);
43562+extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
43563+ u8 port_num, u32 *state);
43564+extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
43565+ u8 *connector);
43566 extern int be_cmd_write_flashrom(struct be_adapter *adapter,
43567 struct be_dma_mem *cmd, u32 flash_oper,
43568 u32 flash_opcode, u32 buf_size);
43569+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
43570+ int offset);
43571+extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
43572+ struct be_dma_mem *nonemb_cmd);
43573+extern int be_cmd_fw_init(struct be_adapter *adapter);
43574+extern int be_cmd_fw_clean(struct be_adapter *adapter);
43575+extern void be_async_mcc_enable(struct be_adapter *adapter);
43576+extern void be_async_mcc_disable(struct be_adapter *adapter);
43577+extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
43578+ u32 loopback_type, u32 pkt_size,
43579+ u32 num_pkts, u64 pattern);
43580+extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
43581+ u32 byte_cnt, struct be_dma_mem *cmd);
43582+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
43583+ struct be_dma_mem *nonemb_cmd);
43584+extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
43585+ u8 loopback_type, u8 enable);
43586+extern int be_cmd_get_phy_info(struct be_adapter *adapter,
43587+ struct be_phy_info *phy_info);
43588+extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
43589+extern void be_detect_dump_ue(struct be_adapter *adapter);
43590+extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
43591+extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
43592+extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
43593+extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
43594+extern int be_cmd_req_native_mode(struct be_adapter *adapter);
43595+extern int be_cmd_query_port_names_v0(struct be_adapter *adapter, u8 *port_name);
43596+extern int be_cmd_query_port_names_v1(struct be_adapter *adapter, u8 *port_name);
43597+extern int be_cmd_req_pg_pfc(struct be_adapter *adapter, int *fw_num_txqs);
43598+
43599+extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
43600+ u32 *privilege, u32 domain);
43601+extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
43602+ u32 mask, u32 *prev, u32 domain);
43603+extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
43604+ u32 domain, u16 intf_id);
43605+extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
43606+ u32 domain, u16 intf_id);
43607+extern int be_cmd_set_port_speed_v1(struct be_adapter *adapter, u8 port_num,
43608+ u16 mac_speed, u16 dac_cable_len);
43609+extern int be_cmd_get_port_speed(struct be_adapter *adapter, u8 port_num,
43610+ u16 *dac_cable_len, u16 *port_speed);
43611+#ifdef CONFIG_PALAU
43612+int be_cmd_pass_ext_ioctl(struct be_adapter *adapter, dma_addr_t dma,
43613+ int req_size, void *va);
43614+#endif
43615+
43616+#endif /* !BE_CMDS_H */
43617diff --git a/drivers/net/benet/be_compat.c b/drivers/net/benet/be_compat.c
43618new file mode 100644
43619index 0000000..bdd1dba
43620--- /dev/null
43621+++ b/drivers/net/benet/be_compat.c
43622@@ -0,0 +1,630 @@
43623+/*
43624+ * Copyright (C) 2005 - 2011 Emulex
43625+ * All rights reserved.
43626+ *
43627+ * This program is free software; you can redistribute it and/or
43628+ * modify it under the terms of the GNU General Public License version 2
43629+ * as published by the Free Software Foundation. The full GNU General
43630+ * Public License is included in this distribution in the file called COPYING.
43631+ *
43632+ * Contact Information:
43633+ * linux-drivers@emulex.com
43634+ *
43635+ * Emulex
43636+ * 3333 Susan Street
43637+ * Costa Mesa, CA 92626
43638+ */
43639+
43640+#include "be.h"
43641+
43642+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
43643+void be_netdev_ops_init(struct net_device *netdev, struct net_device_ops *ops)
43644+{
43645+ netdev->open = ops->ndo_open;
43646+ netdev->stop = ops->ndo_stop;
43647+ netdev->hard_start_xmit = ops->ndo_start_xmit;
43648+ netdev->set_mac_address = ops->ndo_set_mac_address;
43649+ netdev->get_stats = ops->ndo_get_stats;
43650+ netdev->set_multicast_list = ops->ndo_set_rx_mode;
43651+ netdev->change_mtu = ops->ndo_change_mtu;
43652+ netdev->vlan_rx_register = ops->ndo_vlan_rx_register;
43653+ netdev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
43654+ netdev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
43655+ netdev->do_ioctl = ops->ndo_do_ioctl;
43656+#ifdef CONFIG_NET_POLL_CONTROLLER
43657+ netdev->poll_controller = ops->ndo_poll_controller;
43658+#endif
43659+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
43660+ netdev->select_queue = ops->ndo_select_queue;
43661+#endif
43662+}
43663+#endif
43664+
43665+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
43666+int eth_validate_addr(struct net_device *netdev)
43667+{
43668+ return 0;
43669+}
43670+#endif
43671+
43672+/* New NAPI backport */
43673+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 24)
43674+
43675+int be_poll_compat(struct net_device *netdev, int *budget)
43676+{
43677+ struct napi_struct *napi = netdev->priv;
43678+ u32 work_done, can_do;
43679+
43680+ can_do = min(*budget, netdev->quota);
43681+ work_done = napi->poll(napi, can_do);
43682+
43683+ *budget -= work_done;
43684+ netdev->quota -= work_done;
43685+ if (napi->rx)
43686+ return (work_done >= can_do);
43687+ return 0;
43688+}
43689+
43690+
43691+#endif /* New NAPI backport */
43692+
43693+int be_netif_napi_add(struct net_device *netdev,
43694+ struct napi_struct *napi,
43695+ int (*poll) (struct napi_struct *, int), int weight)
43696+{
43697+#ifdef HAVE_SIMULATED_MULTI_NAPI
43698+ struct be_adapter *adapter = netdev_priv(netdev);
43699+ struct net_device *nd;
43700+
43701+ nd = alloc_netdev(0, "", ether_setup);
43702+ if (!nd)
43703+ return -ENOMEM;
43704+ nd->priv = napi;
43705+ nd->weight = BE_NAPI_WEIGHT;
43706+ nd->poll = be_poll_compat;
43707+ set_bit(__LINK_STATE_START, &nd->state);
43708+
43709+ if (napi == &adapter->rx_obj[0].rx_eq.napi)
43710+ napi->rx = true;
43711+ napi->poll = poll;
43712+ napi->dev = nd;
43713+#ifdef RHEL_NEW_NAPI
43714+ napi->napi.dev = netdev;
43715+#endif
43716+ return 0;
43717+#else
43718+ netif_napi_add(netdev, napi, poll, weight);
43719+ return 0;
43720+#endif
43721+}
43722+void be_netif_napi_del(struct net_device *netdev)
43723+{
43724+#ifdef HAVE_SIMULATED_MULTI_NAPI
43725+ struct be_adapter *adapter = netdev_priv(netdev);
43726+ struct napi_struct *napi;
43727+ struct be_rx_obj *rxo;
43728+ int i;
43729+
43730+ for_all_rx_queues(adapter, rxo, i) {
43731+ napi = &rxo->rx_eq.napi;
43732+ if (napi->dev) {
43733+ free_netdev(napi->dev);
43734+ napi->dev = NULL;
43735+ }
43736+ }
43737+
43738+ napi = &adapter->tx_eq.napi;
43739+ if (napi->dev) {
43740+ free_netdev(napi->dev);
43741+ napi->dev = NULL;
43742+ }
43743+#endif
43744+}
43745+/* INET_LRO backport */
43746+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
43747+
43748+#define TCP_HDR_LEN(tcph) (tcph->doff << 2)
43749+#define IP_HDR_LEN(iph) (iph->ihl << 2)
43750+#define TCP_PAYLOAD_LENGTH(iph, tcph) (ntohs(iph->tot_len) - IP_HDR_LEN(iph) \
43751+ - TCP_HDR_LEN(tcph))
43752+
43753+#define IPH_LEN_WO_OPTIONS 5
43754+#define TCPH_LEN_WO_OPTIONS 5
43755+#define TCPH_LEN_W_TIMESTAMP 8
43756+
43757+#define LRO_MAX_PG_HLEN 64
43758+#define LRO_INC_STATS(lro_mgr, attr) { lro_mgr->stats.attr++; }
43759+/*
43760+ * Basic tcp checks whether packet is suitable for LRO
43761+ */
43762+static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph,
43763+ int len, struct net_lro_desc *lro_desc)
43764+{
43765+ /* check ip header: don't aggregate padded frames */
43766+ if (ntohs(iph->tot_len) != len)
43767+ return -1;
43768+
43769+ if (iph->ihl != IPH_LEN_WO_OPTIONS)
43770+ return -1;
43771+
43772+ if (tcph->cwr || tcph->ece || tcph->urg || !tcph->ack
43773+ || tcph->rst || tcph->syn || tcph->fin)
43774+ return -1;
43775+
43776+ if (INET_ECN_is_ce(ipv4_get_dsfield(iph)))
43777+ return -1;
43778+
43779+ if (tcph->doff != TCPH_LEN_WO_OPTIONS
43780+ && tcph->doff != TCPH_LEN_W_TIMESTAMP)
43781+ return -1;
43782+
43783+ /* check tcp options (only timestamp allowed) */
43784+ if (tcph->doff == TCPH_LEN_W_TIMESTAMP) {
43785+ u32 *topt = (u32 *)(tcph + 1);
43786+
43787+ if (*topt != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
43788+ | (TCPOPT_TIMESTAMP << 8)
43789+ | TCPOLEN_TIMESTAMP))
43790+ return -1;
43791+
43792+ /* timestamp should be in right order */
43793+ topt++;
43794+ if (lro_desc && after(ntohl(lro_desc->tcp_rcv_tsval),
43795+ ntohl(*topt)))
43796+ return -1;
43797+
43798+ /* timestamp reply should not be zero */
43799+ topt++;
43800+ if (*topt == 0)
43801+ return -1;
43802+ }
43803+
43804+ return 0;
43805+}
43806+
43807+static void lro_update_tcp_ip_header(struct net_lro_desc *lro_desc)
43808+{
43809+ struct iphdr *iph = lro_desc->iph;
43810+ struct tcphdr *tcph = lro_desc->tcph;
43811+ u32 *p;
43812+ __wsum tcp_hdr_csum;
43813+
43814+ tcph->ack_seq = lro_desc->tcp_ack;
43815+ tcph->window = lro_desc->tcp_window;
43816+
43817+ if (lro_desc->tcp_saw_tstamp) {
43818+ p = (u32 *)(tcph + 1);
43819+ *(p+2) = lro_desc->tcp_rcv_tsecr;
43820+ }
43821+
43822+ iph->tot_len = htons(lro_desc->ip_tot_len);
43823+
43824+ iph->check = 0;
43825+ iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl);
43826+
43827+ tcph->check = 0;
43828+ tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), 0);
43829+ lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum);
43830+ tcph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
43831+ lro_desc->ip_tot_len -
43832+ IP_HDR_LEN(iph), IPPROTO_TCP,
43833+ lro_desc->data_csum);
43834+}
43835+
43836+static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len)
43837+{
43838+ __wsum tcp_csum;
43839+ __wsum tcp_hdr_csum;
43840+ __wsum tcp_ps_hdr_csum;
43841+
43842+ tcp_csum = ~csum_unfold(tcph->check);
43843+ tcp_hdr_csum = csum_partial((u8 *)tcph, TCP_HDR_LEN(tcph), tcp_csum);
43844+
43845+ tcp_ps_hdr_csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
43846+ len + TCP_HDR_LEN(tcph),
43847+ IPPROTO_TCP, 0);
43848+
43849+ return csum_sub(csum_sub(tcp_csum, tcp_hdr_csum),
43850+ tcp_ps_hdr_csum);
43851+}
43852+
43853+static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb,
43854+ struct iphdr *iph, struct tcphdr *tcph,
43855+ u16 vlan_tag, struct vlan_group *vgrp)
43856+{
43857+ int nr_frags;
43858+ u32 *ptr;
43859+ u32 tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
43860+
43861+ nr_frags = skb_shinfo(skb)->nr_frags;
43862+ lro_desc->parent = skb;
43863+ lro_desc->next_frag = &(skb_shinfo(skb)->frags[nr_frags]);
43864+ lro_desc->iph = iph;
43865+ lro_desc->tcph = tcph;
43866+ lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len;
43867+ lro_desc->tcp_ack = ntohl(tcph->ack_seq);
43868+ lro_desc->tcp_window = tcph->window;
43869+
43870+ lro_desc->pkt_aggr_cnt = 1;
43871+ lro_desc->ip_tot_len = ntohs(iph->tot_len);
43872+
43873+ if (tcph->doff == 8) {
43874+ ptr = (u32 *)(tcph+1);
43875+ lro_desc->tcp_saw_tstamp = 1;
43876+ lro_desc->tcp_rcv_tsval = *(ptr+1);
43877+ lro_desc->tcp_rcv_tsecr = *(ptr+2);
43878+ }
43879+
43880+ lro_desc->mss = tcp_data_len;
43881+ lro_desc->vgrp = vgrp;
43882+ lro_desc->vlan_tag = vlan_tag;
43883+ lro_desc->active = 1;
43884+
43885+ if (tcp_data_len)
43886+ lro_desc->data_csum = lro_tcp_data_csum(iph, tcph,
43887+ tcp_data_len);
43888+
43889+ if (!tcp_data_len)
43890+ lro_desc->ack_cnt++;
43891+}
43892+
43893+static inline void lro_clear_desc(struct net_lro_desc *lro_desc)
43894+{
43895+ memset(lro_desc, 0, sizeof(struct net_lro_desc));
43896+}
43897+
43898+static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph,
43899+ struct tcphdr *tcph, int tcp_data_len)
43900+{
43901+ struct sk_buff *parent = lro_desc->parent;
43902+ u32 *topt;
43903+
43904+ lro_desc->pkt_aggr_cnt++;
43905+ lro_desc->ip_tot_len += tcp_data_len;
43906+ lro_desc->tcp_next_seq += tcp_data_len;
43907+ lro_desc->tcp_window = tcph->window;
43908+ lro_desc->tcp_ack = tcph->ack_seq;
43909+
43910+ /* don't update tcp_rcv_tsval, would not work with PAWS */
43911+ if (lro_desc->tcp_saw_tstamp) {
43912+ topt = (u32 *) (tcph + 1);
43913+ lro_desc->tcp_rcv_tsecr = *(topt + 2);
43914+ }
43915+
43916+ if (tcp_data_len)
43917+ lro_desc->data_csum = csum_block_add(lro_desc->data_csum,
43918+ lro_tcp_data_csum(iph, tcph,
43919+ tcp_data_len),
43920+ parent->len);
43921+
43922+ parent->len += tcp_data_len;
43923+ parent->data_len += tcp_data_len;
43924+ if (tcp_data_len > lro_desc->mss)
43925+ lro_desc->mss = tcp_data_len;
43926+}
43927+
43928+static void lro_add_frags(struct net_lro_desc *lro_desc,
43929+ int len, int hlen, int truesize,
43930+ struct skb_frag_struct *skb_frags,
43931+ struct iphdr *iph, struct tcphdr *tcph)
43932+{
43933+ struct sk_buff *skb = lro_desc->parent;
43934+ int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
43935+
43936+ lro_add_common(lro_desc, iph, tcph, tcp_data_len);
43937+
43938+ skb->truesize += truesize;
43939+
43940+ if (!tcp_data_len) {
43941+ put_page(skb_frags[0].page);
43942+ lro_desc->ack_cnt++;
43943+ return;
43944+ }
43945+
43946+ skb_frags[0].page_offset += hlen;
43947+ skb_frags[0].size -= hlen;
43948+
43949+ while (tcp_data_len > 0) {
43950+ *(lro_desc->next_frag) = *skb_frags;
43951+ tcp_data_len -= skb_frags->size;
43952+ lro_desc->next_frag++;
43953+ skb_frags++;
43954+ skb_shinfo(skb)->nr_frags++;
43955+ }
43956+}
43957+
43958+static int lro_check_tcp_conn(struct net_lro_desc *lro_desc,
43959+ struct iphdr *iph,
43960+ struct tcphdr *tcph)
43961+{
43962+ if ((lro_desc->iph->saddr != iph->saddr)
43963+ || (lro_desc->iph->daddr != iph->daddr)
43964+ || (lro_desc->tcph->source != tcph->source)
43965+ || (lro_desc->tcph->dest != tcph->dest))
43966+ return -1;
43967+ return 0;
43968+}
43969+
43970+static struct net_lro_desc *lro_get_desc(struct net_lro_mgr *lro_mgr,
43971+ struct net_lro_desc *lro_arr,
43972+ struct iphdr *iph,
43973+ struct tcphdr *tcph)
43974+{
43975+ struct net_lro_desc *lro_desc = NULL;
43976+ struct net_lro_desc *tmp;
43977+ int max_desc = lro_mgr->max_desc;
43978+ int i;
43979+
43980+ for (i = 0; i < max_desc; i++) {
43981+ tmp = &lro_arr[i];
43982+ if (tmp->active)
43983+ if (!lro_check_tcp_conn(tmp, iph, tcph)) {
43984+ lro_desc = tmp;
43985+ goto out;
43986+ }
43987+ }
43988+
43989+ for (i = 0; i < max_desc; i++) {
43990+ if (!lro_arr[i].active) {
43991+ lro_desc = &lro_arr[i];
43992+ goto out;
43993+ }
43994+ }
43995+
43996+ LRO_INC_STATS(lro_mgr, no_desc);
43997+out:
43998+ return lro_desc;
43999+}
44000+
44001+static void lro_flush(struct net_lro_mgr *lro_mgr,
44002+ struct net_lro_desc *lro_desc)
44003+{
44004+ struct be_adapter *adapter = netdev_priv(lro_mgr->dev);
44005+
44006+ if (lro_desc->pkt_aggr_cnt > 1)
44007+ lro_update_tcp_ip_header(lro_desc);
44008+
44009+ skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss;
44010+
44011+ if (lro_desc->vgrp) {
44012+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44013+ vlan_hwaccel_receive_skb(lro_desc->parent,
44014+ lro_desc->vgrp,
44015+ lro_desc->vlan_tag);
44016+ else
44017+ vlan_hwaccel_rx(lro_desc->parent,
44018+ lro_desc->vgrp,
44019+ lro_desc->vlan_tag);
44020+
44021+ } else {
44022+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44023+ netif_receive_skb(lro_desc->parent);
44024+ else
44025+ netif_rx(lro_desc->parent);
44026+ }
44027+
44028+ LRO_INC_STATS(lro_mgr, flushed);
44029+ lro_clear_desc(lro_desc);
44030+}
44031+
44032+static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
44033+ struct skb_frag_struct *frags,
44034+ int len, int true_size,
44035+ void *mac_hdr,
44036+ int hlen, __wsum sum,
44037+ u32 ip_summed)
44038+{
44039+ struct sk_buff *skb;
44040+ struct skb_frag_struct *skb_frags;
44041+ int data_len = len;
44042+ int hdr_len = min(len, hlen);
44043+
44044+ skb = netdev_alloc_skb(lro_mgr->dev, hlen);
44045+ if (!skb)
44046+ return NULL;
44047+
44048+ skb->len = len;
44049+ skb->data_len = len - hdr_len;
44050+ skb->truesize += true_size;
44051+ skb->tail += hdr_len;
44052+
44053+ memcpy(skb->data, mac_hdr, hdr_len);
44054+
44055+ if (skb->data_len) {
44056+ skb_frags = skb_shinfo(skb)->frags;
44057+ while (data_len > 0) {
44058+ *skb_frags = *frags;
44059+ data_len -= frags->size;
44060+ skb_frags++;
44061+ frags++;
44062+ skb_shinfo(skb)->nr_frags++;
44063+ }
44064+ skb_shinfo(skb)->frags[0].page_offset += hdr_len;
44065+ skb_shinfo(skb)->frags[0].size -= hdr_len;
44066+ } else {
44067+ put_page(frags[0].page);
44068+ }
44069+
44070+
44071+ skb->ip_summed = ip_summed;
44072+ skb->csum = sum;
44073+ skb->protocol = eth_type_trans(skb, lro_mgr->dev);
44074+ return skb;
44075+}
44076+
44077+static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
44078+ struct skb_frag_struct *frags,
44079+ int len, int true_size,
44080+ struct vlan_group *vgrp,
44081+ u16 vlan_tag, void *priv, __wsum sum)
44082+{
44083+ struct net_lro_desc *lro_desc;
44084+ struct iphdr *iph;
44085+ struct tcphdr *tcph;
44086+ struct sk_buff *skb;
44087+ u64 flags;
44088+ void *mac_hdr;
44089+ int mac_hdr_len;
44090+ int hdr_len = LRO_MAX_PG_HLEN;
44091+ int vlan_hdr_len = 0;
44092+ u8 pad_bytes;
44093+
44094+ if (!lro_mgr->get_frag_header
44095+ || lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
44096+ (void *)&tcph, &flags, priv)) {
44097+ mac_hdr = page_address(frags->page) + frags->page_offset;
44098+ goto out1;
44099+ }
44100+
44101+ if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
44102+ goto out1;
44103+
44104+ hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr);
44105+ mac_hdr_len = (int)((void *)(iph) - mac_hdr);
44106+
44107+ lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
44108+ if (!lro_desc)
44109+ goto out1;
44110+
44111+ pad_bytes = len - (ntohs(iph->tot_len) + mac_hdr_len);
44112+ if (!TCP_PAYLOAD_LENGTH(iph, tcph) && pad_bytes) {
44113+ len -= pad_bytes; /* trim the packet */
44114+ frags[0].size -= pad_bytes;
44115+ true_size -= pad_bytes;
44116+ }
44117+
44118+ if (!lro_desc->active) { /* start new lro session */
44119+ if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL))
44120+ goto out1;
44121+
44122+ skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
44123+ hdr_len, 0, lro_mgr->ip_summed_aggr);
44124+ if (!skb)
44125+ goto out;
44126+
44127+ if ((skb->protocol == htons(ETH_P_8021Q))
44128+ && !test_bit(LRO_F_EXTRACT_VLAN_ID, &lro_mgr->features))
44129+ vlan_hdr_len = VLAN_HLEN;
44130+
44131+ iph = (void *)(skb->data + vlan_hdr_len);
44132+ tcph = (void *)((u8 *)skb->data + vlan_hdr_len
44133+ + IP_HDR_LEN(iph));
44134+
44135+ lro_init_desc(lro_desc, skb, iph, tcph, vlan_tag, vgrp);
44136+ LRO_INC_STATS(lro_mgr, aggregated);
44137+ return 0;
44138+ }
44139+
44140+ if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
44141+ goto out2;
44142+
44143+ if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc))
44144+ goto out2;
44145+
44146+ lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph);
44147+ LRO_INC_STATS(lro_mgr, aggregated);
44148+
44149+ if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) ||
44150+ lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
44151+ lro_flush(lro_mgr, lro_desc);
44152+
44153+ return NULL;
44154+
44155+out2: /* send aggregated packets to the stack */
44156+ lro_flush(lro_mgr, lro_desc);
44157+
44158+out1: /* Original packet has to be posted to the stack */
44159+ skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
44160+ hdr_len, sum, lro_mgr->ip_summed);
44161+out:
44162+ return skb;
44163+}
44164+
44165+void lro_receive_frags_compat(struct net_lro_mgr *lro_mgr,
44166+ struct skb_frag_struct *frags,
44167+ int len, int true_size, void *priv, __wsum sum)
44168+{
44169+ struct sk_buff *skb;
44170+
44171+ skb = __lro_proc_segment(lro_mgr, frags, len, true_size, NULL, 0,
44172+ priv, sum);
44173+ if (!skb)
44174+ return;
44175+
44176+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44177+ netif_receive_skb(skb);
44178+ else
44179+ netif_rx(skb);
44180+}
44181+
44182+void lro_vlan_hwaccel_receive_frags_compat(struct net_lro_mgr *lro_mgr,
44183+ struct skb_frag_struct *frags,
44184+ int len, int true_size,
44185+ struct vlan_group *vgrp,
44186+ u16 vlan_tag, void *priv, __wsum sum)
44187+{
44188+ struct sk_buff *skb;
44189+
44190+ skb = __lro_proc_segment(lro_mgr, frags, len, true_size, vgrp,
44191+ vlan_tag, priv, sum);
44192+ if (!skb)
44193+ return;
44194+
44195+ if (test_bit(LRO_F_NAPI, &lro_mgr->features))
44196+ vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag);
44197+ else
44198+ vlan_hwaccel_rx(skb, vgrp, vlan_tag);
44199+}
44200+
44201+void lro_flush_all_compat(struct net_lro_mgr *lro_mgr)
44202+{
44203+ int i;
44204+ struct net_lro_desc *lro_desc = lro_mgr->lro_arr;
44205+
44206+ for (i = 0; i < lro_mgr->max_desc; i++) {
44207+ if (lro_desc[i].active)
44208+ lro_flush(lro_mgr, &lro_desc[i]);
44209+ }
44210+}
44211+#endif /* INET_LRO backport */
44212+
44213+#ifndef TX_MQ
44214+struct net_device *alloc_etherdev_mq_compat(int sizeof_priv,
44215+ unsigned int queue_count)
44216+{
44217+ return alloc_etherdev(sizeof_priv);
44218+}
44219+
44220+void netif_wake_subqueue_compat(struct net_device *dev, u16 queue_index)
44221+{
44222+ netif_wake_queue(dev);
44223+}
44224+
44225+void netif_stop_subqueue_compat(struct net_device *dev, u16 queue_index)
44226+{
44227+ netif_stop_queue(dev);
44228+}
44229+
44230+int __netif_subqueue_stopped_compat(const struct net_device *dev,
44231+ u16 queue_index)
44232+{
44233+ return netif_queue_stopped(dev);
44234+}
44235+
44236+u16 skb_get_queue_mapping_compat(const struct sk_buff *skb)
44237+{
44238+ return 0;
44239+}
44240+
44241+void netif_set_real_num_tx_queues_compat(struct net_device *dev,
44242+ unsigned int txq)
44243+{
44244+ return;
44245+}
44246+
44247+u16 skb_tx_hash_compat(const struct net_device *dev,
44248+ const struct sk_buff *skb)
44249+{
44250+ return 0;
44251+}
44252+#endif
44253diff --git a/drivers/net/benet/be_compat.h b/drivers/net/benet/be_compat.h
44254new file mode 100644
44255index 0000000..8ceecc8
44256--- /dev/null
44257+++ b/drivers/net/benet/be_compat.h
44258@@ -0,0 +1,621 @@
44259+/*
44260+ * Copyright (C) 2005 - 2011 Emulex
44261+ * All rights reserved.
44262+ *
44263+ * This program is free software; you can redistribute it and/or
44264+ * modify it under the terms of the GNU General Public License version 2
44265+ * as published by the Free Software Foundation. The full GNU General
44266+ * Public License is included in this distribution in the file called COPYING.
44267+ *
44268+ * Contact Information:
44269+ * linux-drivers@emulex.com
44270+ *
44271+ * Emulex
44272+ * 3333 Susan Street
44273+ * Costa Mesa, CA 92626
44274+ */
44275+
44276+#ifndef BE_COMPAT_H
44277+#define BE_COMPAT_H
44278+
44279+/****************** RHEL5 and SLES10 backport ***************************/
44280+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
44281+
44282+#ifndef upper_32_bits
44283+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
44284+#endif
44285+
44286+#ifndef CHECKSUM_PARTIAL
44287+#define CHECKSUM_PARTIAL CHECKSUM_HW
44288+#define CHECKSUM_COMPLETE CHECKSUM_HW
44289+#endif
44290+
44291+#if !defined(ip_hdr)
44292+#define ip_hdr(skb) (skb->nh.iph)
44293+#define ipv6_hdr(skb) (skb->nh.ipv6h)
44294+#endif
44295+
44296+#if !defined(__packed)
44297+#define __packed __attribute__ ((packed))
44298+#endif
44299+
44300+#if !defined(RHEL_MINOR)
44301+/* Only for RH5U1 (Maui) and SLES10 NIC driver */
44302+enum {
44303+ false = 0,
44304+ true = 1
44305+};
44306+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)
44307+/* Only for RH5U1 (Maui) NIC driver */
44308+static inline __attribute__((const))
44309+int __ilog2_u32(u32 n)
44310+{
44311+ return fls(n) - 1;
44312+}
44313+#endif
44314+#endif
44315+
44316+#define ETH_FCS_LEN 4
44317+#define bool u8
44318+#ifndef PTR_ALIGN
44319+#define PTR_ALIGN(p, a) ((typeof(p)) \
44320+ ALIGN((unsigned long)(p), (a)))
44321+#endif
44322+#define list_first_entry(ptr, type, member) \
44323+ list_entry((ptr)->next, type, member)
44324+
44325+#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
44326+ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
44327+#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] \
44328+ __devinitdata
44329+#endif
44330+
44331+/* Backport of request_irq */
44332+typedef irqreturn_t(*backport_irq_handler_t) (int, void *);
44333+static inline int
44334+backport_request_irq(unsigned int irq, irqreturn_t(*handler) (int, void *),
44335+ unsigned long flags, const char *dev_name, void *dev_id)
44336+{
44337+ return request_irq(irq,
44338+ (irqreturn_t(*) (int, void *, struct pt_regs *))handler,
44339+ flags, dev_name, dev_id);
44340+}
44341+#define request_irq backport_request_irq
44342+
44343+#endif /*** RHEL5 and SLES10 backport ***/
44344+
44345+#if !defined(__packed)
44346+#define __packed __attribute__ ((packed))
44347+#endif
44348+
44349+/****************** SLES10 only backport ***************************/
44350+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
44351+
44352+#include <linux/tifm.h>
44353+
44354+#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f))
44355+#define IRQF_SHARED SA_SHIRQ
44356+#define CHECKSUM_PARTIAL CHECKSUM_HW
44357+#define CHECKSUM_COMPLETE CHECKSUM_HW
44358+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
44359+#define NETIF_F_IPV6_CSUM NETIF_F_IP_CSUM
44360+#define NETIF_F_TSO6 NETIF_F_TSO
44361+
44362+
44363+static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
44364+ unsigned int length)
44365+{
44366+ /* 16 == NET_PAD_SKB */
44367+ struct sk_buff *skb;
44368+ skb = alloc_skb(length + 16, GFP_ATOMIC);
44369+ if (likely(skb != NULL)) {
44370+ skb_reserve(skb, 16);
44371+ skb->dev = dev;
44372+ }
44373+ return skb;
44374+}
44375+
44376+#define PCI_SAVE_STATE(x)
44377+
44378+#else /* SLES10 only backport */
44379+
44380+#define PCI_SAVE_STATE(x) pci_save_state(x)
44381+
44382+#endif /* SLES10 only backport */
44383+
44384+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 31)
44385+#define netdev_tx_t int
44386+#endif
44387+
44388+#ifndef VLAN_PRIO_MASK
44389+#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
44390+#define VLAN_PRIO_SHIFT 13
44391+#endif
44392+
44393+/*
44394+ * Backport of netdev ops struct
44395+ */
44396+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28)
44397+struct net_device_ops {
44398+ int (*ndo_init)(struct net_device *dev);
44399+ void (*ndo_uninit)(struct net_device *dev);
44400+ int (*ndo_open)(struct net_device *dev);
44401+ int (*ndo_stop)(struct net_device *dev);
44402+ int (*ndo_start_xmit) (struct sk_buff *skb, struct net_device *dev);
44403+ u16 (*ndo_select_queue)(struct net_device *dev,
44404+ struct sk_buff *skb);
44405+ void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
44406+ void (*ndo_set_rx_mode)(struct net_device *dev);
44407+ void (*ndo_set_multicast_list)(struct net_device *dev);
44408+ int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
44409+ int (*ndo_validate_addr)(struct net_device *dev);
44410+ int (*ndo_do_ioctl)(struct net_device *dev,
44411+ struct ifreq *ifr, int cmd);
44412+ int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
44413+ int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
44414+ int (*ndo_neigh_setup)(struct net_device *dev,
44415+ struct neigh_parms *);
44416+ void (*ndo_tx_timeout) (struct net_device *dev);
44417+
44418+ struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
44419+
44420+ void (*ndo_vlan_rx_register)(struct net_device *dev,
44421+ struct vlan_group *grp);
44422+ void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
44423+ unsigned short vid);
44424+ void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
44425+ unsigned short vid);
44426+#ifdef CONFIG_NET_POLL_CONTROLLER
44427+#define HAVE_NETDEV_POLL
44428+ void (*ndo_poll_controller)(struct net_device *dev);
44429+#endif
44430+};
44431+extern void be_netdev_ops_init(struct net_device *netdev,
44432+ struct net_device_ops *ops);
44433+extern int eth_validate_addr(struct net_device *);
44434+
44435+#endif /* Netdev ops backport */
44436+
44437+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 29)
44438+#undef NETIF_F_GRO
44439+#endif
44440+
44441+#ifdef NO_GRO
44442+#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 5)))
44443+#undef NETIF_F_GRO
44444+#endif
44445+#endif
44446+
44447+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
44448+#define HAVE_ETHTOOL_FLASH
44449+#endif
44450+
44451+/*
44452+ * Backport of NAPI
44453+ */
44454+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 24)
44455+
44456+#if defined(RHEL_MINOR) && (RHEL_MINOR > 3)
44457+#define RHEL_NEW_NAPI
44458+#endif
44459+
44460+/* We need a new struct that has some meta data beyond rhel 5.4's napi_struct
44461+ * to fix rhel5.4's half-baked new napi implementation.
44462+ * We don't want to use rhel 5.4's broken napi_complete; so
44463+ * define a new be_napi_complete that executes the logic only for Rx
44464+ */
44465+
44466+#ifdef RHEL_NEW_NAPI
44467+#define napi_complete be_napi_complete
44468+typedef struct napi_struct rhel_napi_struct;
44469+#endif
44470+#define napi_struct be_napi_struct
44471+#define napi_gro_frags(napi) napi_gro_frags((rhel_napi_struct *) napi)
44472+#define vlan_gro_frags(napi, vlan_grp, vid)\
44473+ vlan_gro_frags((rhel_napi_struct *) napi, vlan_grp, vid)
44474+#define napi_get_frags(napi) napi_get_frags((rhel_napi_struct *) napi)
44475+
44476+struct napi_struct {
44477+#ifdef RHEL_NEW_NAPI
44478+ rhel_napi_struct napi; /* must be the first member */
44479+#endif
44480+ struct net_device *dev;
44481+ int (*poll) (struct napi_struct *napi, int budget);
44482+ bool rx;
44483+};
44484+
44485+static inline void napi_complete(struct napi_struct *napi)
44486+{
44487+#ifdef NETIF_F_GRO
44488+ napi_gro_flush((rhel_napi_struct *)napi);
44489+#endif
44490+ netif_rx_complete(napi->dev);
44491+}
44492+
44493+static inline void napi_schedule(struct napi_struct *napi)
44494+{
44495+ netif_rx_schedule(napi->dev);
44496+}
44497+
44498+static inline void napi_enable(struct napi_struct *napi)
44499+{
44500+ netif_poll_enable(napi->dev);
44501+}
44502+
44503+static inline void napi_disable(struct napi_struct *napi)
44504+{
44505+ netif_poll_disable(napi->dev);
44506+}
44507+
44508+#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
44509+ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
44510+static inline void vlan_group_set_device(struct vlan_group *vg,
44511+ u16 vlan_id,
44512+ struct net_device *dev)
44513+{
44514+ struct net_device **array;
44515+ if (!vg)
44516+ return;
44517+ array = vg->vlan_devices;
44518+ array[vlan_id] = dev;
44519+}
44520+#endif
44521+
44522+#endif /* New NAPI backport */
44523+
44524+extern int be_netif_napi_add(struct net_device *netdev,
44525+ struct napi_struct *napi,
44526+ int (*poll) (struct napi_struct *, int), int weight);
44527+extern void be_netif_napi_del(struct net_device *netdev);
44528+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
44529+#define HAVE_SIMULATED_MULTI_NAPI
44530+#endif
44531+
44532+/************** Backport of Delayed work queues interface ****************/
44533+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
44534+#if (defined(RHEL_MINOR) && RHEL_MINOR < 6) || \
44535+ LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 16)
44536+struct delayed_work {
44537+ struct work_struct work;
44538+};
44539+#endif
44540+
44541+#define INIT_DELAYED_WORK(_work, _func) \
44542+ INIT_WORK(&(_work)->work, _func, &(_work)->work)
44543+
44544+static inline int backport_cancel_delayed_work_sync(struct delayed_work *work)
44545+{
44546+ cancel_rearming_delayed_work(&work->work);
44547+ return 0;
44548+}
44549+#define cancel_delayed_work_sync backport_cancel_delayed_work_sync
44550+
44551+static inline int backport_schedule_delayed_work(struct delayed_work *work,
44552+ unsigned long delay)
44553+{
44554+ if (unlikely(!delay))
44555+ return schedule_work(&work->work);
44556+ else
44557+ return schedule_delayed_work(&work->work, delay);
44558+}
44559+#define schedule_delayed_work backport_schedule_delayed_work
44560+#endif /* backport delayed workqueue */
44561+
44562+
44563+/************** Backport of INET_LRO **********************************/
44564+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
44565+
44566+#include <linux/inet_lro.h>
44567+
44568+#else
44569+
44570+#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)
44571+
44572+#if defined(RHEL_MINOR) && RHEL_MINOR < 6
44573+typedef __u16 __bitwise __sum16;
44574+typedef __u32 __bitwise __wsum;
44575+#endif
44576+
44577+#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR <= 3)) || \
44578+ (!defined(RHEL_MINOR)))
44579+static inline __wsum csum_unfold(__sum16 n)
44580+{
44581+ return (__force __wsum)n;
44582+}
44583+#endif
44584+
44585+#endif
44586+
44587+#define lro_flush_all lro_flush_all_compat
44588+#define lro_vlan_hwaccel_receive_frags lro_vlan_hwaccel_receive_frags_compat
44589+#define lro_receive_frags lro_receive_frags_compat
44590+
44591+struct net_lro_stats {
44592+ unsigned long aggregated;
44593+ unsigned long flushed;
44594+ unsigned long no_desc;
44595+};
44596+
44597+struct net_lro_desc {
44598+ struct sk_buff *parent;
44599+ struct sk_buff *last_skb;
44600+ struct skb_frag_struct *next_frag;
44601+ struct iphdr *iph;
44602+ struct tcphdr *tcph;
44603+ struct vlan_group *vgrp;
44604+ __wsum data_csum;
44605+ u32 tcp_rcv_tsecr;
44606+ u32 tcp_rcv_tsval;
44607+ u32 tcp_ack;
44608+ u32 tcp_next_seq;
44609+ u32 skb_tot_frags_len;
44610+ u32 ack_cnt;
44611+ u16 ip_tot_len;
44612+ u16 tcp_saw_tstamp; /* timestamps enabled */
44613+ u16 tcp_window;
44614+ u16 vlan_tag;
44615+ int pkt_aggr_cnt; /* counts aggregated packets */
44616+ int vlan_packet;
44617+ int mss;
44618+ int active;
44619+};
44620+
44621+struct net_lro_mgr {
44622+ struct net_device *dev;
44623+ struct net_lro_stats stats;
44624+
44625+ /* LRO features */
44626+ unsigned long features;
44627+#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */
44628+#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted
44629+ from received packets and eth protocol
44630+ is still ETH_P_8021Q */
44631+
44632+ u32 ip_summed; /* Set in non generated SKBs in page mode */
44633+ u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
44634+ * or CHECKSUM_NONE */
44635+
44636+ int max_desc; /* Max number of LRO descriptors */
44637+ int max_aggr; /* Max number of LRO packets to be aggregated */
44638+
44639+ struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
44640+
44641+ /* Optimized driver functions
44642+ * get_skb_header: returns tcp and ip header for packet in SKB
44643+ */
44644+ int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr,
44645+ void **tcpudp_hdr, u64 *hdr_flags, void *priv);
44646+
44647+ /* hdr_flags: */
44648+#define LRO_IPV4 1 /* ip_hdr is IPv4 header */
44649+#define LRO_TCP 2 /* tcpudp_hdr is TCP header */
44650+
44651+ /*
44652+ * get_frag_header: returns mac, tcp and ip header for packet in SKB
44653+ *
44654+ * @hdr_flags: Indicate what kind of LRO has to be done
44655+ * (IPv4/IPv6/TCP/UDP)
44656+ */
44657+ int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr,
44658+ void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
44659+ void *priv);
44660+};
44661+
44662+extern void lro_receive_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
44663+ void *priv);
44664+
44665+extern void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr,
44666+ struct sk_buff *skb, struct vlan_group *vgrp,
44667+ u16 vlan_tag, void *priv);
44668+
44669+/* This functions aggregate fragments and generate SKBs do pass
44670+ * the packets to the stack.
44671+ *
44672+ * @lro_mgr: LRO manager to use
44673+ * @frags: Fragment to be processed. Must contain entire header in first
44674+ * element.
44675+ * @len: Length of received data
44676+ * @true_size: Actual size of memory the fragment is consuming
44677+ * @priv: Private data that may be used by driver functions
44678+ * (for example get_tcp_ip_hdr)
44679+ */
44680+extern void lro_receive_frags_compat(struct net_lro_mgr *lro_mgr,
44681+ struct skb_frag_struct *frags, int len, int true_size,
44682+ void *priv, __wsum sum);
44683+
44684+extern void lro_vlan_hwaccel_receive_frags_compat(struct net_lro_mgr *lro_mgr,
44685+ struct skb_frag_struct *frags, int len, int true_size,
44686+ struct vlan_group *vgrp, u16 vlan_tag, void *priv,
44687+ __wsum sum);
44688+
44689+/* Forward all aggregated SKBs held by lro_mgr to network stack */
44690+extern void lro_flush_all_compat(struct net_lro_mgr *lro_mgr);
44691+
44692+extern void lro_flush_pkt(struct net_lro_mgr *lro_mgr, struct iphdr *iph,
44693+ struct tcphdr *tcph);
44694+#endif /* backport of inet_lro */
44695+
44696+#ifndef ETHTOOL_FLASH_MAX_FILENAME
44697+#define ETHTOOL_FLASH_MAX_FILENAME 128
44698+#endif
44699+
44700+#if defined(CONFIG_XEN) && !defined(NETIF_F_GRO)
44701+#define BE_INIT_FRAGS_PER_FRAME (u32) 1
44702+#else
44703+#define BE_INIT_FRAGS_PER_FRAME (min((u32) 16, (u32) MAX_SKB_FRAGS))
44704+#endif
44705+
44706+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
44707+#ifdef CONFIG_PCI_IOV
44708+#if (!(defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR == 6)))
44709+#undef CONFIG_PCI_IOV
44710+#endif
44711+#endif
44712+#endif
44713+
44714+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
44715+#define dev_to_node(dev) -1
44716+#endif
44717+
44718+
44719+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
44720+#if (!(defined(RHEL_MAJOR) && (RHEL_MAJOR == 5) && (RHEL_MINOR > 6)))
44721+static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
44722+ unsigned int length)
44723+{
44724+ struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN);
44725+
44726+ if (NET_IP_ALIGN && skb)
44727+ skb_reserve(skb, NET_IP_ALIGN);
44728+ return skb;
44729+}
44730+#endif
44731+#endif
44732+
44733+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
44734+#ifndef netif_set_gso_max_size
44735+#define netif_set_gso_max_size(netdev, size) do {} while (0)
44736+#endif
44737+#endif
44738+
44739+#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
44740+#if defined(RHEL_MINOR) && (RHEL_MINOR <= 4)
44741+static inline int skb_is_gso_v6(const struct sk_buff *skb)
44742+{
44743+ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
44744+}
44745+#endif
44746+#endif
44747+
44748+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
44749+static inline int skb_is_gso_v6(const struct sk_buff *skb)
44750+{
44751+ return (ip_hdr(skb)->version == 6);
44752+}
44753+#endif
44754+
44755+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
44756+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
44757+#endif
44758+
44759+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
44760+#if ((defined(RHEL_MAJOR) && (RHEL_MAJOR == 6)))
44761+#define HAVE_SRIOV_CONFIG
44762+#endif
44763+#endif
44764+
44765+#ifndef NETIF_F_VLAN_SG
44766+#define NETIF_F_VLAN_SG NETIF_F_SG
44767+#endif
44768+
44769+#ifndef NETIF_F_VLAN_CSUM
44770+#define NETIF_F_VLAN_CSUM NETIF_F_HW_CSUM
44771+#endif
44772+
44773+#ifndef NETIF_F_VLAN_TSO
44774+#define NETIF_F_VLAN_TSO NETIF_F_TSO
44775+#endif
44776+
44777+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
44778+#define vlan_features features
44779+#endif
44780+
44781+#ifndef DEFINE_DMA_UNMAP_ADDR
44782+#define DEFINE_DMA_UNMAP_ADDR(bus) dma_addr_t bus
44783+#endif
44784+
44785+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
44786+
44787+#ifndef netdev_mc_count
44788+#define netdev_mc_count(nd) (nd->mc_count)
44789+#endif
44790+
44791+#ifndef netdev_hw_addr
44792+#define netdev_hw_addr dev_mc_list
44793+#endif
44794+
44795+#ifndef netdev_for_each_mc_addr
44796+#define netdev_for_each_mc_addr(ha, nd) \
44797+ for (ha = (nd)->mc_list; ha; ha = ha->next)
44798+#endif
44799+
44800+#define DMI_ADDR dmi_addr
44801+#else
44802+#define DMI_ADDR addr
44803+#endif
44804+
44805+#ifndef VLAN_GROUP_ARRAY_LEN
44806+#define VLAN_GROUP_ARRAY_LEN VLAN_N_VID
44807+#endif
44808+/**************************** Multi TXQ Support ******************************/
44809+
44810+/* Supported only in RHEL6 and SL11.1 (barring one execption) */
44811+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
44812+#define MQ_TX
44813+#endif
44814+
44815+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
44816+#define alloc_etherdev_mq(sz, cnt) alloc_etherdev(sz)
44817+#define skb_get_queue_mapping(skb) 0
44818+#define skb_tx_hash(dev, skb) 0
44819+#define netif_set_real_num_tx_queues(dev, txq) do {} while(0)
44820+#define netif_wake_subqueue(dev, idx) netif_wake_queue(dev)
44821+#define netif_stop_subqueue(dev, idx) netif_stop_queue(dev)
44822+#define __netif_subqueue_stopped(dev, idx) netif_queue_stopped(dev)
44823+#endif /* < 2.6.27 */
44824+
44825+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && \
44826+ (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)))
44827+#define skb_tx_hash(dev, skb) 0
44828+#define netif_set_real_num_tx_queues(dev, txq) do {} while(0)
44829+#endif
44830+
44831+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
44832+#define netif_set_real_num_tx_queues be_set_real_num_tx_queues
44833+static inline void be_set_real_num_tx_queues(struct net_device *dev,
44834+ unsigned int txq)
44835+{
44836+ dev->real_num_tx_queues = txq;
44837+}
44838+#endif
44839+
44840+#include <linux/if_vlan.h>
44841+static inline void be_reset_skb_tx_vlan(struct sk_buff *skb)
44842+{
44843+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
44844+ skb->vlan_tci = 0;
44845+#else
44846+ struct vlan_skb_tx_cookie *cookie;
44847+
44848+ cookie = VLAN_TX_SKB_CB(skb);
44849+ cookie->magic = 0;
44850+#endif
44851+}
44852+
44853+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
44854+static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
44855+{
44856+ skb->nh.raw = skb->data + offset;
44857+}
44858+#endif
44859+
44860+static inline struct sk_buff *be_vlan_put_tag(struct sk_buff *skb,
44861+ unsigned short vlan_tag)
44862+{
44863+ struct sk_buff *new_skb = __vlan_put_tag(skb, vlan_tag);
44864+ /* On kernel versions < 2.6.27 the __vlan_put_tag() function
44865+ * distorts the network layer hdr pointer in the skb which
44866+ * affects the detection of UDP/TCP packets down the line in
44867+ * wrb_fill_hdr().This work-around sets it right.
44868+ */
44869+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27))
44870+ skb_set_network_header(new_skb, VLAN_ETH_HLEN);
44871+#endif
44872+ return new_skb;
44873+}
44874+
44875+#ifndef ACCESS_ONCE
44876+#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
44877+#endif
44878+
44879+#endif /* BE_COMPAT_H */
44880diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
44881index f0fd95b..37bad99 100644
44882--- a/drivers/net/benet/be_ethtool.c
44883+++ b/drivers/net/benet/be_ethtool.c
44884@@ -1,18 +1,18 @@
44885 /*
44886- * Copyright (C) 2005 - 2009 ServerEngines
44887+ * Copyright (C) 2005 - 2011 Emulex
44888 * All rights reserved.
44889 *
44890 * This program is free software; you can redistribute it and/or
44891 * modify it under the terms of the GNU General Public License version 2
44892- * as published by the Free Software Foundation. The full GNU General
44893+ * as published by the Free Software Foundation. The full GNU General
44894 * Public License is included in this distribution in the file called COPYING.
44895 *
44896 * Contact Information:
44897- * linux-drivers@serverengines.com
44898+ * linux-drivers@emulex.com
44899 *
44900- * ServerEngines
44901- * 209 N. Fair Oaks Ave
44902- * Sunnyvale, CA 94085
44903+ * Emulex
44904+ * 3333 Susan Street
44905+ * Costa Mesa, CA 92626
44906 */
44907
44908 #include "be.h"
44909@@ -26,21 +26,19 @@ struct be_ethtool_stat {
44910 int offset;
44911 };
44912
44913-enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT, ERXSTAT};
44914+enum {NETSTAT, DRVSTAT_TX, DRVSTAT_RX, DRVSTAT};
44915 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
44916 offsetof(_struct, field)
44917-#define NETSTAT_INFO(field) #field, NETSTAT,\
44918+#define NETSTAT_INFO(field) #field, NETSTAT,\
44919 FIELDINFO(struct net_device_stats,\
44920 field)
44921-#define DRVSTAT_INFO(field) #field, DRVSTAT,\
44922- FIELDINFO(struct be_drvr_stats, field)
44923-#define MISCSTAT_INFO(field) #field, MISCSTAT,\
44924- FIELDINFO(struct be_rxf_stats, field)
44925-#define PORTSTAT_INFO(field) #field, PORTSTAT,\
44926- FIELDINFO(struct be_port_rxf_stats, \
44927+#define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\
44928+ FIELDINFO(struct be_tx_stats, field)
44929+#define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\
44930+ FIELDINFO(struct be_rx_stats, field)
44931+#define DRVSTAT_INFO(field) #field, DRVSTAT,\
44932+ FIELDINFO(struct be_drv_stats, \
44933 field)
44934-#define ERXSTAT_INFO(field) #field, ERXSTAT,\
44935- FIELDINFO(struct be_erx_stats, field)
44936
44937 static const struct be_ethtool_stat et_stats[] = {
44938 {NETSTAT_INFO(rx_packets)},
44939@@ -51,70 +49,131 @@ static const struct be_ethtool_stat et_stats[] = {
44940 {NETSTAT_INFO(tx_errors)},
44941 {NETSTAT_INFO(rx_dropped)},
44942 {NETSTAT_INFO(tx_dropped)},
44943- {DRVSTAT_INFO(be_tx_reqs)},
44944- {DRVSTAT_INFO(be_tx_stops)},
44945- {DRVSTAT_INFO(be_fwd_reqs)},
44946- {DRVSTAT_INFO(be_tx_wrbs)},
44947- {DRVSTAT_INFO(be_polls)},
44948 {DRVSTAT_INFO(be_tx_events)},
44949- {DRVSTAT_INFO(be_rx_events)},
44950- {DRVSTAT_INFO(be_tx_compl)},
44951- {DRVSTAT_INFO(be_rx_compl)},
44952- {DRVSTAT_INFO(be_ethrx_post_fail)},
44953- {DRVSTAT_INFO(be_802_3_dropped_frames)},
44954- {DRVSTAT_INFO(be_802_3_malformed_frames)},
44955- {DRVSTAT_INFO(be_tx_rate)},
44956- {DRVSTAT_INFO(be_rx_rate)},
44957- {PORTSTAT_INFO(rx_unicast_frames)},
44958- {PORTSTAT_INFO(rx_multicast_frames)},
44959- {PORTSTAT_INFO(rx_broadcast_frames)},
44960- {PORTSTAT_INFO(rx_crc_errors)},
44961- {PORTSTAT_INFO(rx_alignment_symbol_errors)},
44962- {PORTSTAT_INFO(rx_pause_frames)},
44963- {PORTSTAT_INFO(rx_control_frames)},
44964- {PORTSTAT_INFO(rx_in_range_errors)},
44965- {PORTSTAT_INFO(rx_out_range_errors)},
44966- {PORTSTAT_INFO(rx_frame_too_long)},
44967- {PORTSTAT_INFO(rx_address_match_errors)},
44968- {PORTSTAT_INFO(rx_vlan_mismatch)},
44969- {PORTSTAT_INFO(rx_dropped_too_small)},
44970- {PORTSTAT_INFO(rx_dropped_too_short)},
44971- {PORTSTAT_INFO(rx_dropped_header_too_small)},
44972- {PORTSTAT_INFO(rx_dropped_tcp_length)},
44973- {PORTSTAT_INFO(rx_dropped_runt)},
44974- {PORTSTAT_INFO(rx_fifo_overflow)},
44975- {PORTSTAT_INFO(rx_input_fifo_overflow)},
44976- {PORTSTAT_INFO(rx_ip_checksum_errs)},
44977- {PORTSTAT_INFO(rx_tcp_checksum_errs)},
44978- {PORTSTAT_INFO(rx_udp_checksum_errs)},
44979- {PORTSTAT_INFO(rx_non_rss_packets)},
44980- {PORTSTAT_INFO(rx_ipv4_packets)},
44981- {PORTSTAT_INFO(rx_ipv6_packets)},
44982- {PORTSTAT_INFO(tx_unicastframes)},
44983- {PORTSTAT_INFO(tx_multicastframes)},
44984- {PORTSTAT_INFO(tx_broadcastframes)},
44985- {PORTSTAT_INFO(tx_pauseframes)},
44986- {PORTSTAT_INFO(tx_controlframes)},
44987- {MISCSTAT_INFO(rx_drops_no_pbuf)},
44988- {MISCSTAT_INFO(rx_drops_no_txpb)},
44989- {MISCSTAT_INFO(rx_drops_no_erx_descr)},
44990- {MISCSTAT_INFO(rx_drops_no_tpre_descr)},
44991- {MISCSTAT_INFO(rx_drops_too_many_frags)},
44992- {MISCSTAT_INFO(rx_drops_invalid_ring)},
44993- {MISCSTAT_INFO(forwarded_packets)},
44994- {MISCSTAT_INFO(rx_drops_mtu)},
44995- {ERXSTAT_INFO(rx_drops_no_fragments)},
44996+ {DRVSTAT_INFO(rx_crc_errors)},
44997+ {DRVSTAT_INFO(rx_alignment_symbol_errors)},
44998+ {DRVSTAT_INFO(rx_pause_frames)},
44999+ {DRVSTAT_INFO(rx_control_frames)},
45000+ {DRVSTAT_INFO(rx_in_range_errors)},
45001+ {DRVSTAT_INFO(rx_out_range_errors)},
45002+ {DRVSTAT_INFO(rx_frame_too_long)},
45003+ {DRVSTAT_INFO(rx_address_match_errors)},
45004+ {DRVSTAT_INFO(rx_dropped_too_small)},
45005+ {DRVSTAT_INFO(rx_dropped_too_short)},
45006+ {DRVSTAT_INFO(rx_dropped_header_too_small)},
45007+ {DRVSTAT_INFO(rx_dropped_tcp_length)},
45008+ {DRVSTAT_INFO(rx_dropped_runt)},
45009+ {DRVSTAT_INFO(rxpp_fifo_overflow_drop)},
45010+ {DRVSTAT_INFO(rx_input_fifo_overflow_drop)},
45011+ {DRVSTAT_INFO(rx_ip_checksum_errs)},
45012+ {DRVSTAT_INFO(rx_tcp_checksum_errs)},
45013+ {DRVSTAT_INFO(rx_udp_checksum_errs)},
45014+ {DRVSTAT_INFO(rx_switched_unicast_packets)},
45015+ {DRVSTAT_INFO(rx_switched_multicast_packets)},
45016+ {DRVSTAT_INFO(rx_switched_broadcast_packets)},
45017+ {DRVSTAT_INFO(tx_pauseframes)},
45018+ {DRVSTAT_INFO(tx_controlframes)},
45019+ {DRVSTAT_INFO(rx_priority_pause_frames)},
45020+ {DRVSTAT_INFO(pmem_fifo_overflow_drop)},
45021+ {DRVSTAT_INFO(jabber_events)},
45022+ {DRVSTAT_INFO(rx_drops_no_pbuf)},
45023+ {DRVSTAT_INFO(rx_drops_no_txpb)},
45024+ {DRVSTAT_INFO(rx_drops_no_erx_descr)},
45025+ {DRVSTAT_INFO(rx_drops_no_tpre_descr)},
45026+ {DRVSTAT_INFO(rx_drops_too_many_frags)},
45027+ {DRVSTAT_INFO(rx_drops_invalid_ring)},
45028+ {DRVSTAT_INFO(forwarded_packets)},
45029+ {DRVSTAT_INFO(rx_drops_mtu)},
45030+ {DRVSTAT_INFO(eth_red_drops)},
45031+ {DRVSTAT_INFO(be_on_die_temperature)}
45032 };
45033 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
45034
45035+/* Stats related to multi RX queues */
45036+static const struct be_ethtool_stat et_rx_stats[] = {
45037+ {DRVSTAT_RX_INFO(rx_bytes)},
45038+ {DRVSTAT_RX_INFO(rx_pkts)},
45039+ {DRVSTAT_RX_INFO(rx_rate)},
45040+ {DRVSTAT_RX_INFO(rx_polls)},
45041+ {DRVSTAT_RX_INFO(rx_events)},
45042+ {DRVSTAT_RX_INFO(rx_compl)},
45043+ {DRVSTAT_RX_INFO(rx_mcast_pkts)},
45044+ {DRVSTAT_RX_INFO(rx_post_fail)},
45045+ {DRVSTAT_RX_INFO(rx_drops_no_frags)}
45046+};
45047+#define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats))
45048+
45049+/* Stats related to multi TX queues */
45050+static const struct be_ethtool_stat et_tx_stats[] = {
45051+ {DRVSTAT_TX_INFO(be_tx_rate)},
45052+ {DRVSTAT_TX_INFO(be_tx_reqs)},
45053+ {DRVSTAT_TX_INFO(be_tx_wrbs)},
45054+ {DRVSTAT_TX_INFO(be_tx_stops)},
45055+ {DRVSTAT_TX_INFO(be_tx_compl)},
45056+ {DRVSTAT_TX_INFO(be_ipv6_ext_hdr_tx_drop)}
45057+};
45058+#define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
45059+
45060+static const char et_self_tests[][ETH_GSTRING_LEN] = {
45061+ "MAC Loopback test",
45062+ "PHY Loopback test",
45063+ "External Loopback test",
45064+ "DDR DMA test",
45065+ "Link test"
45066+};
45067+
45068+#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
45069+#define BE_MAC_LOOPBACK 0x0
45070+#define BE_PHY_LOOPBACK 0x1
45071+#define BE_ONE_PORT_EXT_LOOPBACK 0x2
45072+#define BE_NO_LOOPBACK 0xff
45073+
45074+/* MAC speed valid values */
45075+#define SPEED_DEFAULT 0x0
45076+#define SPEED_FORCED_10GB 0x1
45077+#define SPEED_FORCED_1GB 0x2
45078+#define SPEED_AUTONEG_10GB 0x3
45079+#define SPEED_AUTONEG_1GB 0x4
45080+#define SPEED_AUTONEG_100MB 0x5
45081+#define SPEED_AUTONEG_10GB_1GB 0x6
45082+#define SPEED_AUTONEG_10GB_1GB_100MB 0x7
45083+#define SPEED_AUTONEG_1GB_100MB 0x8
45084+#define SPEED_AUTONEG_10MB 0x9
45085+#define SPEED_AUTONEG_1GB_100MB_10MB 0xa
45086+#define SPEED_AUTONEG_100MB_10MB 0xb
45087+#define SPEED_FORCED_100MB 0xc
45088+#define SPEED_FORCED_10MB 0xd
45089+
45090+
45091+
45092 static void
45093 be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
45094 {
45095 struct be_adapter *adapter = netdev_priv(netdev);
45096+ int len;
45097+ char fw_on_flash[FW_VER_LEN];
45098+
45099+ memset(fw_on_flash, 0 , sizeof(fw_on_flash));
45100+
45101+ be_cmd_get_fw_ver(adapter, adapter->fw_ver,
45102+ fw_on_flash);
45103
45104 strcpy(drvinfo->driver, DRV_NAME);
45105 strcpy(drvinfo->version, DRV_VER);
45106+
45107 strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
45108+ if (memcmp(adapter->fw_ver, fw_on_flash,
45109+ FW_VER_LEN) != 0) {
45110+ len = strlen(drvinfo->fw_version);
45111+ strncpy(drvinfo->fw_version+len, " [",
45112+ FW_VER_LEN-len-1);
45113+ len = strlen(drvinfo->fw_version);
45114+ strncpy(drvinfo->fw_version+len, fw_on_flash,
45115+ FW_VER_LEN-len-1);
45116+ len = strlen(drvinfo->fw_version);
45117+ strncpy(drvinfo->fw_version+len, "]", FW_VER_LEN-len-1);
45118+ }
45119+
45120 strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
45121 drvinfo->testinfo_len = 0;
45122 drvinfo->regdump_len = 0;
45123@@ -122,12 +181,37 @@ be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
45124 }
45125
45126 static int
45127+be_get_reg_len(struct net_device *netdev)
45128+{
45129+ struct be_adapter *adapter = netdev_priv(netdev);
45130+ u32 log_size = 0;
45131+
45132+ if (be_physfn(adapter))
45133+ be_cmd_get_reg_len(adapter, &log_size);
45134+
45135+ return log_size;
45136+}
45137+
45138+static void
45139+be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
45140+{
45141+ struct be_adapter *adapter = netdev_priv(netdev);
45142+
45143+ if (be_physfn(adapter)) {
45144+ memset(buf, 0, regs->len);
45145+ be_cmd_get_regs(adapter, regs->len, buf);
45146+ }
45147+}
45148+
45149+static int
45150 be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45151 {
45152 struct be_adapter *adapter = netdev_priv(netdev);
45153- struct be_eq_obj *rx_eq = &adapter->rx_eq;
45154+ struct be_eq_obj *rx_eq = &adapter->rx_obj[0].rx_eq;
45155 struct be_eq_obj *tx_eq = &adapter->tx_eq;
45156
45157+ coalesce->rx_max_coalesced_frames = adapter->max_rx_coal;
45158+
45159 coalesce->rx_coalesce_usecs = rx_eq->cur_eqd;
45160 coalesce->rx_coalesce_usecs_high = rx_eq->max_eqd;
45161 coalesce->rx_coalesce_usecs_low = rx_eq->min_eqd;
45162@@ -149,25 +233,52 @@ static int
45163 be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45164 {
45165 struct be_adapter *adapter = netdev_priv(netdev);
45166- struct be_eq_obj *rx_eq = &adapter->rx_eq;
45167+ struct be_rx_obj *rxo;
45168+ struct be_eq_obj *rx_eq;
45169 struct be_eq_obj *tx_eq = &adapter->tx_eq;
45170 u32 tx_max, tx_min, tx_cur;
45171 u32 rx_max, rx_min, rx_cur;
45172- int status = 0;
45173+ int status = 0, i;
45174
45175 if (coalesce->use_adaptive_tx_coalesce == 1)
45176 return -EINVAL;
45177+ adapter->max_rx_coal = coalesce->rx_max_coalesced_frames;
45178+ if (adapter->max_rx_coal > BE_MAX_FRAGS_PER_FRAME)
45179+ adapter->max_rx_coal = BE_MAX_FRAGS_PER_FRAME;
45180
45181- /* if AIC is being turned on now, start with an EQD of 0 */
45182- if (rx_eq->enable_aic == 0 &&
45183- coalesce->use_adaptive_rx_coalesce == 1) {
45184- rx_eq->cur_eqd = 0;
45185+ for_all_rx_queues(adapter, rxo, i) {
45186+ rx_eq = &rxo->rx_eq;
45187+
45188+ if (!rx_eq->enable_aic && coalesce->use_adaptive_rx_coalesce)
45189+ rx_eq->cur_eqd = 0;
45190+ rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
45191+
45192+ rx_max = coalesce->rx_coalesce_usecs_high;
45193+ rx_min = coalesce->rx_coalesce_usecs_low;
45194+ rx_cur = coalesce->rx_coalesce_usecs;
45195+
45196+ if (rx_eq->enable_aic) {
45197+ if (rx_max > BE_MAX_EQD)
45198+ rx_max = BE_MAX_EQD;
45199+ if (rx_min > rx_max)
45200+ rx_min = rx_max;
45201+ rx_eq->max_eqd = rx_max;
45202+ rx_eq->min_eqd = rx_min;
45203+ if (rx_eq->cur_eqd > rx_max)
45204+ rx_eq->cur_eqd = rx_max;
45205+ if (rx_eq->cur_eqd < rx_min)
45206+ rx_eq->cur_eqd = rx_min;
45207+ } else {
45208+ if (rx_cur > BE_MAX_EQD)
45209+ rx_cur = BE_MAX_EQD;
45210+ if (rx_eq->cur_eqd != rx_cur) {
45211+ status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
45212+ rx_cur);
45213+ if (!status)
45214+ rx_eq->cur_eqd = rx_cur;
45215+ }
45216+ }
45217 }
45218- rx_eq->enable_aic = coalesce->use_adaptive_rx_coalesce;
45219-
45220- rx_max = coalesce->rx_coalesce_usecs_high;
45221- rx_min = coalesce->rx_coalesce_usecs_low;
45222- rx_cur = coalesce->rx_coalesce_usecs;
45223
45224 tx_max = coalesce->tx_coalesce_usecs_high;
45225 tx_min = coalesce->tx_coalesce_usecs_low;
45226@@ -181,27 +292,6 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
45227 tx_eq->cur_eqd = tx_cur;
45228 }
45229
45230- if (rx_eq->enable_aic) {
45231- if (rx_max > BE_MAX_EQD)
45232- rx_max = BE_MAX_EQD;
45233- if (rx_min > rx_max)
45234- rx_min = rx_max;
45235- rx_eq->max_eqd = rx_max;
45236- rx_eq->min_eqd = rx_min;
45237- if (rx_eq->cur_eqd > rx_max)
45238- rx_eq->cur_eqd = rx_max;
45239- if (rx_eq->cur_eqd < rx_min)
45240- rx_eq->cur_eqd = rx_min;
45241- } else {
45242- if (rx_cur > BE_MAX_EQD)
45243- rx_cur = BE_MAX_EQD;
45244- if (rx_eq->cur_eqd != rx_cur) {
45245- status = be_cmd_modify_eqd(adapter, rx_eq->q.id,
45246- rx_cur);
45247- if (!status)
45248- rx_eq->cur_eqd = rx_cur;
45249- }
45250- }
45251 return 0;
45252 }
45253
45254@@ -229,81 +319,294 @@ be_get_ethtool_stats(struct net_device *netdev,
45255 struct ethtool_stats *stats, uint64_t *data)
45256 {
45257 struct be_adapter *adapter = netdev_priv(netdev);
45258- struct be_drvr_stats *drvr_stats = &adapter->stats.drvr_stats;
45259- struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
45260- struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
45261- struct be_port_rxf_stats *port_stats =
45262- &rxf_stats->port[adapter->port_num];
45263- struct net_device_stats *net_stats = &adapter->stats.net_stats;
45264- struct be_erx_stats *erx_stats = &hw_stats->erx;
45265+ struct be_rx_obj *rxo;
45266+ struct be_tx_obj *txo;
45267 void *p = NULL;
45268- int i;
45269+ int i, j, base;
45270
45271 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
45272 switch (et_stats[i].type) {
45273 case NETSTAT:
45274- p = net_stats;
45275+ p = &adapter->net_stats;
45276 break;
45277 case DRVSTAT:
45278- p = drvr_stats;
45279- break;
45280- case PORTSTAT:
45281- p = port_stats;
45282- break;
45283- case MISCSTAT:
45284- p = rxf_stats;
45285- break;
45286- case ERXSTAT: /* Currently only one ERX stat is provided */
45287- p = (u32 *)erx_stats + adapter->rx_obj.q.id;
45288+ p = &adapter->drv_stats;
45289 break;
45290 }
45291
45292 p = (u8 *)p + et_stats[i].offset;
45293 data[i] = (et_stats[i].size == sizeof(u64)) ?
45294- *(u64 *)p: *(u32 *)p;
45295+ *(u64 *)p:(*(u32 *)p);
45296 }
45297
45298- return;
45299+ base = ETHTOOL_STATS_NUM;
45300+ for_all_rx_queues(adapter, rxo, j) {
45301+ for (i = 0; i < ETHTOOL_RXSTATS_NUM; i++) {
45302+ p = (u8 *)&rxo->stats + et_rx_stats[i].offset;
45303+ data[base + j * ETHTOOL_RXSTATS_NUM + i] =
45304+ (et_rx_stats[i].size == sizeof(u64)) ?
45305+ *(u64 *)p: *(u32 *)p;
45306+ }
45307+ }
45308+
45309+ base = ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM;
45310+ for_all_tx_queues(adapter, txo, j) {
45311+ for (i = 0; i < ETHTOOL_TXSTATS_NUM; i++) {
45312+ p = (u8 *)&txo->stats + et_tx_stats[i].offset;
45313+ data[base + j * ETHTOOL_TXSTATS_NUM + i] =
45314+ (et_tx_stats[i].size == sizeof(u64)) ?
45315+ *(u64 *)p: *(u32 *)p;
45316+ }
45317+ }
45318 }
45319
45320 static void
45321 be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
45322 uint8_t *data)
45323 {
45324- int i;
45325+ struct be_adapter *adapter = netdev_priv(netdev);
45326+ int i, j;
45327+
45328 switch (stringset) {
45329 case ETH_SS_STATS:
45330 for (i = 0; i < ETHTOOL_STATS_NUM; i++) {
45331 memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN);
45332 data += ETH_GSTRING_LEN;
45333 }
45334+ for (i = 0; i < adapter->num_rx_qs; i++) {
45335+ for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) {
45336+ sprintf(data, "rxq%d: %s", i,
45337+ et_rx_stats[j].desc);
45338+ data += ETH_GSTRING_LEN;
45339+ }
45340+ }
45341+ for (i = 0; i < adapter->num_tx_qs; i++) {
45342+ for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) {
45343+ sprintf(data, "txq%d: %s", i,
45344+ et_tx_stats[j].desc);
45345+ data += ETH_GSTRING_LEN;
45346+ }
45347+ }
45348+ break;
45349+ case ETH_SS_TEST:
45350+ for (i = 0; i < ETHTOOL_TESTS_NUM; i++) {
45351+ memcpy(data, et_self_tests[i], ETH_GSTRING_LEN);
45352+ data += ETH_GSTRING_LEN;
45353+ }
45354 break;
45355 }
45356 }
45357
45358+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
45359 static int be_get_stats_count(struct net_device *netdev)
45360 {
45361- return ETHTOOL_STATS_NUM;
45362+ struct be_adapter *adapter = netdev_priv(netdev);
45363+
45364+ return ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM
45365+ + adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
45366 }
45367+static int
45368+be_self_test_count(struct net_device *dev)
45369+{
45370+ return ETHTOOL_TESTS_NUM;
45371+}
45372+#else
45373+
45374+static int be_get_sset_count(struct net_device *netdev, int stringset)
45375+{
45376+ struct be_adapter *adapter = netdev_priv(netdev);
45377+
45378+ switch (stringset) {
45379+ case ETH_SS_TEST:
45380+ return ETHTOOL_TESTS_NUM;
45381+ case ETH_SS_STATS:
45382+ return ETHTOOL_STATS_NUM +
45383+ adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM +
45384+ adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM;
45385+ default:
45386+ return -EINVAL;
45387+ }
45388+}
45389+#endif
45390
45391 static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
45392 {
45393- ecmd->speed = SPEED_10000;
45394+ struct be_adapter *adapter = netdev_priv(netdev);
45395+ struct be_phy_info phy_info;
45396+ u8 mac_speed = 0;
45397+ u16 link_speed = 0;
45398+ int link_status = LINK_DOWN;
45399+ int status;
45400+
45401+ if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
45402+ status = be_cmd_link_status_query(adapter, &link_status,
45403+ &mac_speed, &link_speed, 0);
45404+
45405+ be_link_status_update(adapter, link_status);
45406+ /* link_speed is in units of 10 Mbps */
45407+ if (link_speed) {
45408+ ecmd->speed = link_speed*10;
45409+ } else {
45410+ switch (mac_speed) {
45411+ case PHY_LINK_SPEED_10MBPS:
45412+ ecmd->speed = SPEED_10;
45413+ break;
45414+ case PHY_LINK_SPEED_100MBPS:
45415+ ecmd->speed = SPEED_100;
45416+ break;
45417+ case PHY_LINK_SPEED_1GBPS:
45418+ ecmd->speed = SPEED_1000;
45419+ break;
45420+ case PHY_LINK_SPEED_10GBPS:
45421+ ecmd->speed = SPEED_10000;
45422+ break;
45423+ case PHY_LINK_SPEED_ZERO:
45424+ ecmd->speed = 0;
45425+ break;
45426+ }
45427+ }
45428+
45429+ status = be_cmd_get_phy_info(adapter, &phy_info);
45430+ if (!status) {
45431+ switch (phy_info.interface_type) {
45432+ case PHY_TYPE_XFP_10GB:
45433+ case PHY_TYPE_SFP_1GB:
45434+ case PHY_TYPE_SFP_PLUS_10GB:
45435+ ecmd->port = PORT_FIBRE;
45436+ break;
45437+ default:
45438+ ecmd->port = PORT_TP;
45439+ break;
45440+ }
45441+
45442+ switch (phy_info.interface_type) {
45443+ case PHY_TYPE_KR_10GB:
45444+ case PHY_TYPE_KX4_10GB:
45445+ ecmd->transceiver = XCVR_INTERNAL;
45446+ break;
45447+ default:
45448+ ecmd->transceiver = XCVR_EXTERNAL;
45449+ break;
45450+ }
45451+
45452+ if (phy_info.auto_speeds_supported) {
45453+ ecmd->supported |= SUPPORTED_Autoneg;
45454+ ecmd->autoneg = AUTONEG_ENABLE;
45455+ ecmd->advertising |= ADVERTISED_Autoneg;
45456+ }
45457+
45458+ if (phy_info.misc_params & BE_PAUSE_SYM_EN) {
45459+ ecmd->supported |= SUPPORTED_Pause;
45460+ ecmd->advertising |= ADVERTISED_Pause;
45461+ }
45462+
45463+ }
45464+
45465+ /* Save for future use */
45466+ adapter->link_speed = ecmd->speed;
45467+ adapter->port_type = ecmd->port;
45468+ adapter->transceiver = ecmd->transceiver;
45469+ adapter->autoneg = ecmd->autoneg;
45470+ } else {
45471+ ecmd->speed = adapter->link_speed;
45472+ ecmd->port = adapter->port_type;
45473+ ecmd->transceiver = adapter->transceiver;
45474+ ecmd->autoneg = adapter->autoneg;
45475+ }
45476+
45477 ecmd->duplex = DUPLEX_FULL;
45478- ecmd->autoneg = AUTONEG_DISABLE;
45479+ ecmd->phy_address = (adapter->hba_port_num << 4) |
45480+ (adapter->port_name[adapter->hba_port_num]);
45481+ switch (ecmd->port) {
45482+ case PORT_FIBRE:
45483+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
45484+ break;
45485+ case PORT_TP:
45486+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
45487+ break;
45488+ }
45489+
45490+ if (ecmd->autoneg) {
45491+ ecmd->supported |= SUPPORTED_1000baseT_Full;
45492+ ecmd->advertising |= (ADVERTISED_10000baseT_Full |
45493+ ADVERTISED_1000baseT_Full);
45494+ }
45495+
45496 return 0;
45497 }
45498
45499+static int be_set_settings(struct net_device *netdev,
45500+ struct ethtool_cmd *ecmd)
45501+{
45502+ struct be_adapter *adapter = netdev_priv(netdev);
45503+ struct be_phy_info phy_info;
45504+ u16 mac_speed=0;
45505+ u16 dac_cable_len=0;
45506+ u16 port_speed = 0;
45507+ int status;
45508+
45509+ status = be_cmd_get_phy_info(adapter, &phy_info);
45510+ if (status) {
45511+ dev_warn(&adapter->pdev->dev, "port speed set failed.\n");
45512+ return status;
45513+ }
45514+
45515+ if (ecmd->autoneg == AUTONEG_ENABLE) {
45516+ switch(phy_info.interface_type) {
45517+ case PHY_TYPE_SFP_1GB:
45518+ case PHY_TYPE_BASET_1GB:
45519+ case PHY_TYPE_BASEX_1GB:
45520+ case PHY_TYPE_SGMII:
45521+ mac_speed = SPEED_AUTONEG_1GB_100MB_10MB;
45522+ break;
45523+ case PHY_TYPE_SFP_PLUS_10GB:
45524+ dev_warn(&adapter->pdev->dev,
45525+ "Autoneg not supported on this module. \n");
45526+ return -EINVAL;
45527+ case PHY_TYPE_KR_10GB:
45528+ case PHY_TYPE_KX4_10GB:
45529+ mac_speed = SPEED_AUTONEG_10GB_1GB;
45530+ break;
45531+ case PHY_TYPE_BASET_10GB:
45532+ mac_speed = SPEED_AUTONEG_10GB_1GB_100MB;
45533+ break;
45534+ }
45535+ } else if(ecmd->autoneg == AUTONEG_DISABLE) {
45536+ if(ecmd->speed == SPEED_10) {
45537+ mac_speed = SPEED_FORCED_10MB;
45538+ } else if(ecmd->speed == SPEED_100) {
45539+ mac_speed = SPEED_FORCED_100MB;
45540+ } else if(ecmd->speed == SPEED_1000) {
45541+ mac_speed = SPEED_FORCED_1GB;
45542+ } else if(ecmd->speed == SPEED_10000) {
45543+ mac_speed = SPEED_FORCED_10GB;
45544+ }
45545+ }
45546+
45547+ status = be_cmd_get_port_speed(adapter, adapter->hba_port_num,
45548+ &dac_cable_len, &port_speed);
45549+
45550+ if (!status && port_speed != mac_speed)
45551+ status = be_cmd_set_port_speed_v1(adapter,
45552+ adapter->hba_port_num, mac_speed,
45553+ dac_cable_len);
45554+ if (status)
45555+ dev_warn(&adapter->pdev->dev, "port speed set failed.\n");
45556+
45557+ return status;
45558+
45559+}
45560+
45561 static void
45562 be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
45563 {
45564 struct be_adapter *adapter = netdev_priv(netdev);
45565
45566- ring->rx_max_pending = adapter->rx_obj.q.len;
45567- ring->tx_max_pending = adapter->tx_obj.q.len;
45568+ ring->rx_max_pending = adapter->rx_obj[0].q.len;
45569+ ring->tx_max_pending = adapter->tx_obj[0].q.len;
45570
45571- ring->rx_pending = atomic_read(&adapter->rx_obj.q.used);
45572- ring->tx_pending = atomic_read(&adapter->tx_obj.q.used);
45573+ ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
45574+ ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
45575 }
45576
45577 static void
45578@@ -312,7 +615,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
45579 struct be_adapter *adapter = netdev_priv(netdev);
45580
45581 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
45582- ecmd->autoneg = 0;
45583+ ecmd->autoneg = adapter->autoneg;
45584 }
45585
45586 static int
45587@@ -334,6 +637,203 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
45588 return status;
45589 }
45590
45591+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
45592+static int
45593+be_phys_id(struct net_device *netdev, u32 data)
45594+{
45595+ struct be_adapter *adapter = netdev_priv(netdev);
45596+ int status;
45597+ u32 cur;
45598+
45599+ be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
45600+
45601+ if (cur == BEACON_STATE_ENABLED)
45602+ return 0;
45603+
45604+ if (data < 2)
45605+ data = 2;
45606+
45607+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
45608+ BEACON_STATE_ENABLED);
45609+ set_current_state(TASK_INTERRUPTIBLE);
45610+ schedule_timeout(data*HZ);
45611+
45612+ status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
45613+ BEACON_STATE_DISABLED);
45614+
45615+ return status;
45616+}
45617+#else
45618+static int
45619+be_set_phys_id(struct net_device *netdev,
45620+ enum ethtool_phys_id_state state)
45621+{
45622+ struct be_adapter *adapter = netdev_priv(netdev);
45623+
45624+ switch (state) {
45625+ case ETHTOOL_ID_ACTIVE:
45626+ be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
45627+ &adapter->beacon_state);
45628+ return 1; /* cycle on/off once per second */
45629+
45630+ case ETHTOOL_ID_ON:
45631+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
45632+ BEACON_STATE_ENABLED);
45633+ break;
45634+
45635+ case ETHTOOL_ID_OFF:
45636+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
45637+ BEACON_STATE_DISABLED);
45638+ break;
45639+
45640+ case ETHTOOL_ID_INACTIVE:
45641+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
45642+ adapter->beacon_state);
45643+ }
45644+
45645+ return 0;
45646+}
45647+#endif
45648+
45649+static bool
45650+be_is_wol_supported(struct be_adapter *adapter)
45651+{
45652+ struct pci_dev *pdev = adapter->pdev;
45653+
45654+ if (!be_physfn(adapter))
45655+ return false;
45656+
45657+ switch (pdev->subsystem_device) {
45658+ case OC_SUBSYS_DEVICE_ID1:
45659+ case OC_SUBSYS_DEVICE_ID2:
45660+ case OC_SUBSYS_DEVICE_ID3:
45661+ case OC_SUBSYS_DEVICE_ID4:
45662+ return false;
45663+ default:
45664+ return true;
45665+ }
45666+}
45667+
45668+static void
45669+be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
45670+{
45671+ struct be_adapter *adapter = netdev_priv(netdev);
45672+
45673+ if (be_is_wol_supported(adapter))
45674+ wol->supported = WAKE_MAGIC;
45675+ if (adapter->wol)
45676+ wol->wolopts = WAKE_MAGIC;
45677+ else
45678+ wol->wolopts = 0;
45679+ memset(&wol->sopass, 0, sizeof(wol->sopass));
45680+}
45681+
45682+static int
45683+be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
45684+{
45685+ struct be_adapter *adapter = netdev_priv(netdev);
45686+
45687+ if (wol->wolopts & ~WAKE_MAGIC)
45688+ return -EOPNOTSUPP;
45689+
45690+ if (!be_is_wol_supported(adapter)) {
45691+ dev_warn(&adapter->pdev->dev,
45692+ "WOL not supported for this subsystemid: %x\n",
45693+ adapter->pdev->subsystem_device);
45694+ return -EOPNOTSUPP;
45695+ }
45696+
45697+ if (wol->wolopts & WAKE_MAGIC)
45698+ adapter->wol = true;
45699+ else
45700+ adapter->wol = false;
45701+
45702+ return 0;
45703+}
45704+
45705+static int
45706+be_test_ddr_dma(struct be_adapter *adapter)
45707+{
45708+ int ret, i;
45709+ struct be_dma_mem ddrdma_cmd;
45710+ u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
45711+
45712+ ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
45713+ ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
45714+ &ddrdma_cmd.dma);
45715+ if (!ddrdma_cmd.va) {
45716+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
45717+ return -ENOMEM;
45718+ }
45719+
45720+ for (i = 0; i < 2; i++) {
45721+ ret = be_cmd_ddr_dma_test(adapter, pattern[i],
45722+ 4096, &ddrdma_cmd);
45723+ if (ret != 0)
45724+ goto err;
45725+ }
45726+
45727+err:
45728+ pci_free_consistent(adapter->pdev, ddrdma_cmd.size,
45729+ ddrdma_cmd.va, ddrdma_cmd.dma);
45730+ return ret;
45731+}
45732+
45733+static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
45734+ u64 *status)
45735+{
45736+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
45737+ loopback_type, 1);
45738+ *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
45739+ loopback_type, 1500,
45740+ 2, 0xabc);
45741+ be_cmd_set_loopback(adapter, adapter->hba_port_num,
45742+ BE_NO_LOOPBACK, 1);
45743+ return *status;
45744+}
45745+
45746+static void
45747+be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
45748+{
45749+ struct be_adapter *adapter = netdev_priv(netdev);
45750+ int link_status;
45751+ u8 mac_speed = 0;
45752+ u16 qos_link_speed = 0;
45753+
45754+ memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
45755+
45756+ if (test->flags & ETH_TEST_FL_OFFLINE) {
45757+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
45758+ &data[0]) != 0) {
45759+ test->flags |= ETH_TEST_FL_FAILED;
45760+ }
45761+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
45762+ &data[1]) != 0) {
45763+ test->flags |= ETH_TEST_FL_FAILED;
45764+ }
45765+ if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
45766+ &data[2]) != 0) {
45767+ test->flags |= ETH_TEST_FL_FAILED;
45768+ }
45769+ }
45770+
45771+ if (be_test_ddr_dma(adapter) != 0) {
45772+ data[3] = 1;
45773+ test->flags |= ETH_TEST_FL_FAILED;
45774+ }
45775+
45776+ if (be_cmd_link_status_query(adapter, &link_status, &mac_speed,
45777+ &qos_link_speed, 0) != 0) {
45778+ test->flags |= ETH_TEST_FL_FAILED;
45779+ data[4] = -1;
45780+ } else if (!mac_speed) {
45781+ test->flags |= ETH_TEST_FL_FAILED;
45782+ data[4] = 1;
45783+ }
45784+
45785+}
45786+
45787+#ifdef HAVE_ETHTOOL_FLASH
45788 static int
45789 be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
45790 {
45791@@ -347,11 +847,73 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
45792
45793 return be_load_fw(adapter, file_name);
45794 }
45795+#endif
45796
45797-const struct ethtool_ops be_ethtool_ops = {
45798+static int
45799+be_get_eeprom_len(struct net_device *netdev)
45800+{
45801+ return BE_READ_SEEPROM_LEN;
45802+}
45803+
45804+static int
45805+be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
45806+ uint8_t *data)
45807+{
45808+ struct be_adapter *adapter = netdev_priv(netdev);
45809+ struct be_dma_mem eeprom_cmd;
45810+ struct be_cmd_resp_seeprom_read *resp;
45811+ int status;
45812+
45813+ if (!eeprom->len)
45814+ return -EINVAL;
45815+
45816+ eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
45817+
45818+ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
45819+ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
45820+ eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
45821+ &eeprom_cmd.dma);
45822+
45823+ if (!eeprom_cmd.va) {
45824+ dev_err(&adapter->pdev->dev,
45825+ "Memory allocation failure. Could not read eeprom\n");
45826+ return -ENOMEM;
45827+ }
45828+
45829+ status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
45830+
45831+ if (!status) {
45832+ resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
45833+ memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
45834+ }
45835+ pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
45836+ eeprom_cmd.dma);
45837+
45838+ return status;
45839+}
45840+
45841+static int be_set_tso(struct net_device *netdev, uint32_t data)
45842+{
45843+ if (data) {
45844+ netdev->features |= NETIF_F_TSO;
45845+ netdev->features |= NETIF_F_TSO6;
45846+ } else {
45847+ netdev->features &= ~NETIF_F_TSO;
45848+ netdev->features &= ~NETIF_F_TSO6;
45849+ }
45850+ return 0;
45851+}
45852+
45853+
45854+struct ethtool_ops be_ethtool_ops = {
45855 .get_settings = be_get_settings,
45856+ .set_settings = be_set_settings,
45857 .get_drvinfo = be_get_drvinfo,
45858+ .get_wol = be_get_wol,
45859+ .set_wol = be_set_wol,
45860 .get_link = ethtool_op_get_link,
45861+ .get_eeprom_len = be_get_eeprom_len,
45862+ .get_eeprom = be_read_eeprom,
45863 .get_coalesce = be_get_coalesce,
45864 .set_coalesce = be_set_coalesce,
45865 .get_ringparam = be_get_ringparam,
45866@@ -364,9 +926,21 @@ const struct ethtool_ops be_ethtool_ops = {
45867 .get_sg = ethtool_op_get_sg,
45868 .set_sg = ethtool_op_set_sg,
45869 .get_tso = ethtool_op_get_tso,
45870- .set_tso = ethtool_op_set_tso,
45871+ .set_tso = be_set_tso,
45872 .get_strings = be_get_stat_strings,
45873+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
45874+ .phys_id = be_phys_id,
45875 .get_stats_count = be_get_stats_count,
45876+ .self_test_count = be_self_test_count,
45877+#else
45878+ .set_phys_id = be_set_phys_id,
45879+ .get_sset_count = be_get_sset_count,
45880+#endif
45881 .get_ethtool_stats = be_get_ethtool_stats,
45882+ .get_regs_len = be_get_reg_len,
45883+ .get_regs = be_get_regs,
45884+#ifdef HAVE_ETHTOOL_FLASH
45885 .flash_device = be_do_flash,
45886+#endif
45887+ .self_test = be_self_test
45888 };
45889diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
45890index a3394b4..f871d8c 100644
45891--- a/drivers/net/benet/be_hw.h
45892+++ b/drivers/net/benet/be_hw.h
45893@@ -1,18 +1,18 @@
45894 /*
45895- * Copyright (C) 2005 - 2009 ServerEngines
45896+ * Copyright (C) 2005 - 2011 Emulex
45897 * All rights reserved.
45898 *
45899 * This program is free software; you can redistribute it and/or
45900 * modify it under the terms of the GNU General Public License version 2
45901- * as published by the Free Software Foundation. The full GNU General
45902+ * as published by the Free Software Foundation. The full GNU General
45903 * Public License is included in this distribution in the file called COPYING.
45904 *
45905 * Contact Information:
45906- * linux-drivers@serverengines.com
45907+ * linux-drivers@emulex.com
45908 *
45909- * ServerEngines
45910- * 209 N. Fair Oaks Ave
45911- * Sunnyvale, CA 94085
45912+ * Emulex
45913+ * 3333 Susan Street
45914+ * Costa Mesa, CA 92626
45915 */
45916
45917 /********* Mailbox door bell *************/
45918@@ -26,24 +26,34 @@
45919 * queue entry.
45920 */
45921 #define MPU_MAILBOX_DB_OFFSET 0x160
45922-#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
45923+#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
45924 #define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
45925
45926-#define MPU_EP_CONTROL 0
45927+#define MPU_EP_CONTROL 0
45928
45929 /********** MPU semphore ******************/
45930-#define MPU_EP_SEMAPHORE_OFFSET 0xac
45931+#define MPU_EP_SEMAPHORE_OFFSET 0xac
45932+#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
45933 #define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
45934 #define EP_SEMAPHORE_POST_ERR_MASK 0x1
45935 #define EP_SEMAPHORE_POST_ERR_SHIFT 31
45936 /* MPU semphore POST stage values */
45937-#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
45938-#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
45939+#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
45940+#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
45941 #define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
45942 #define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
45943
45944+/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
45945+#define SLIPORT_STATUS_OFFSET 0x404
45946+#define SLIPORT_CONTROL_OFFSET 0x408
45947+
45948+#define SLIPORT_STATUS_ERR_MASK 0x80000000
45949+#define SLIPORT_STATUS_RN_MASK 0x01000000
45950+#define SLIPORT_STATUS_RDY_MASK 0x00800000
45951+#define SLI_PORT_CONTROL_IP_MASK 0x08000000
45952+
45953 /********* Memory BAR register ************/
45954-#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
45955+#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
45956 /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
45957 * Disable" may still globally block interrupts in addition to individual
45958 * interrupt masks; a mechanism for the device driver to block all interrupts
45959@@ -52,13 +62,70 @@
45960 */
45961 #define MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK (1 << 29) /* bit 29 */
45962
45963+/********* Link Status CSR ****************/
45964+#define PCICFG_PCIE_LINK_STATUS_OFFSET 0xd0
45965+#define PCIE_LINK_STATUS_SPEED_MASK 0xFF /* bits 16 - 19 */
45966+#define PCIE_LINK_STATUS_SPEED_SHIFT 16
45967+#define PCIE_LINK_STATUS_NEG_WIDTH_MASK 0x3F /* bits 20 - 25 */
45968+#define PCIE_LINK_STATUS_NEG_WIDTH_SHIFT 20
45969+
45970+/********* Link Capability CSR ************/
45971+#define PCICFG_PCIE_LINK_CAP_OFFSET 0xcc
45972+#define PCIE_LINK_CAP_MAX_SPEED_MASK 0xFF /* bits 0 - 3 */
45973+#define PCIE_LINK_CAP_MAX_SPEED_SHIFT 0
45974+#define PCIE_LINK_CAP_MAX_WIDTH_MASK 0x3F /* bits 4 - 9 */
45975+#define PCIE_LINK_CAP_MAX_WIDTH_SHIFT 4
45976+
45977+/********* PCI Function Capability ************/
45978+#define BE_FUNCTION_CAPS_UNCLASSIFIED_STATS 0x1
45979+#define BE_FUNCTION_CAPS_RSS 0x2
45980+#define BE_FUNCTION_CAPS_PROMISCUOUS 0x4
45981+#define BE_FUNCTION_CAPS_LEGACY_MODE 0x8
45982+
45983+/********* Power managment (WOL) **********/
45984+#define PCICFG_PM_CONTROL_OFFSET 0x44
45985+#define PCICFG_PM_CONTROL_MASK 0x108 /* bits 3 & 8 */
45986+
45987+/********* Online Control Registers *******/
45988+#define PCICFG_ONLINE0 0xB0
45989+#define PCICFG_ONLINE1 0xB4
45990+
45991+/********* UE Status and Mask Registers ***/
45992+#define PCICFG_UE_STATUS_LOW 0xA0
45993+#define PCICFG_UE_STATUS_HIGH 0xA4
45994+#define PCICFG_UE_STATUS_LOW_MASK 0xA8
45995+#define PCICFG_UE_STATUS_HI_MASK 0xAC
45996+
45997+/******** SLI_INTF ***********************/
45998+#define SLI_INTF_REG_OFFSET 0x58
45999+#define SLI_INTF_VALID_MASK 0xE0000000
46000+#define SLI_INTF_VALID 0xC0000000
46001+#define SLI_INTF_HINT2_MASK 0x1F000000
46002+#define SLI_INTF_HINT2_SHIFT 24
46003+#define SLI_INTF_HINT1_MASK 0x00FF0000
46004+#define SLI_INTF_HINT1_SHIFT 16
46005+#define SLI_INTF_FAMILY_MASK 0x00000F00
46006+#define SLI_INTF_FAMILY_SHIFT 8
46007+#define SLI_INTF_IF_TYPE_MASK 0x0000F000
46008+#define SLI_INTF_IF_TYPE_SHIFT 12
46009+#define SLI_INTF_REV_MASK 0x000000F0
46010+#define SLI_INTF_REV_SHIFT 4
46011+#define SLI_INTF_FT_MASK 0x00000001
46012+
46013+/* SLI family */
46014+#define BE_SLI_FAMILY 0x0
46015+#define LANCER_A0_SLI_FAMILY 0xA
46016+
46017 /********* ISR0 Register offset **********/
46018-#define CEV_ISR0_OFFSET 0xC18
46019+#define CEV_ISR0_OFFSET 0xC18
46020 #define CEV_ISR_SIZE 4
46021
46022 /********* Event Q door bell *************/
46023 #define DB_EQ_OFFSET DB_CQ_OFFSET
46024 #define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
46025+#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
46026+#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
46027+
46028 /* Clear the interrupt for this eq */
46029 #define DB_EQ_CLR_SHIFT (9) /* bit 9 */
46030 /* Must be 1 */
46031@@ -69,12 +136,16 @@
46032 #define DB_EQ_REARM_SHIFT (29) /* bit 29 */
46033
46034 /********* Compl Q door bell *************/
46035-#define DB_CQ_OFFSET 0x120
46036+#define DB_CQ_OFFSET 0x120
46037 #define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
46038+#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
46039+#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
46040+ placing at 11-15 */
46041+
46042 /* Number of event entries processed */
46043-#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
46044+#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
46045 /* Rearm bit */
46046-#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
46047+#define DB_CQ_REARM_SHIFT (29) /* bit 29 */
46048
46049 /********** TX ULP door bell *************/
46050 #define DB_TXULP1_OFFSET 0x60
46051@@ -84,25 +155,103 @@
46052 #define DB_TXULP_NUM_POSTED_MASK 0x3FFF /* bits 16 - 29 */
46053
46054 /********** RQ(erx) door bell ************/
46055-#define DB_RQ_OFFSET 0x100
46056+#define DB_RQ_OFFSET 0x100
46057 #define DB_RQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
46058 /* Number of rx frags posted */
46059 #define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
46060
46061 /********** MCC door bell ************/
46062-#define DB_MCCQ_OFFSET 0x140
46063+#define DB_MCCQ_OFFSET 0x140
46064 #define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
46065 /* Number of entries posted */
46066 #define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
46067
46068+/********** SRIOV VF PCICFG OFFSET ********/
46069+#define SRIOV_VF_PCICFG_OFFSET (4096)
46070+
46071+/********** FAT TABLE ********/
46072+#define RETRIEVE_FAT 0
46073+#define QUERY_FAT 1
46074+
46075+/* Flashrom related descriptors */
46076+#define IMAGE_TYPE_FIRMWARE 160
46077+#define IMAGE_TYPE_BOOTCODE 224
46078+#define IMAGE_TYPE_OPTIONROM 32
46079+
46080+#define NUM_FLASHDIR_ENTRIES 32
46081+
46082+#define IMG_TYPE_ISCSI_ACTIVE 0
46083+#define IMG_TYPE_REDBOOT 1
46084+#define IMG_TYPE_BIOS 2
46085+#define IMG_TYPE_PXE_BIOS 3
46086+#define IMG_TYPE_FCOE_BIOS 8
46087+#define IMG_TYPE_ISCSI_BACKUP 9
46088+#define IMG_TYPE_FCOE_FW_ACTIVE 10
46089+#define IMG_TYPE_FCOE_FW_BACKUP 11
46090+#define IMG_TYPE_NCSI_FW 13
46091+#define IMG_TYPE_PHY_FW 99
46092+#define TN_8022 13
46093+
46094+#define ILLEGAL_IOCTL_REQ 2
46095+#define FLASHROM_OPER_PHY_FLASH 9
46096+#define FLASHROM_OPER_PHY_SAVE 10
46097+#define FLASHROM_OPER_FLASH 1
46098+#define FLASHROM_OPER_SAVE 2
46099+#define FLASHROM_OPER_REPORT 4
46100+
46101+#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image size */
46102+#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM image sz */
46103+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
46104+#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max firmware image size */
46105+#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM image sz */
46106+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
46107+#define FLASH_NCSI_IMAGE_MAX_SIZE_g3 (262144)
46108+#define FLASH_PHY_FW_IMAGE_MAX_SIZE_g3 (262144)
46109+
46110+#define FLASH_NCSI_MAGIC (0x16032009)
46111+#define FLASH_NCSI_DISABLED (0)
46112+#define FLASH_NCSI_ENABLED (1)
46113+
46114+#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
46115+
46116+/* Offsets for components on Flash. */
46117+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
46118+#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
46119+#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
46120+#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
46121+#define FLASH_iSCSI_BIOS_START_g2 (7340032)
46122+#define FLASH_PXE_BIOS_START_g2 (7864320)
46123+#define FLASH_FCoE_BIOS_START_g2 (524288)
46124+#define FLASH_REDBOOT_START_g2 (0)
46125+
46126+#define FLASH_NCSI_START_g3 (15990784)
46127+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
46128+#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
46129+#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
46130+#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
46131+#define FLASH_iSCSI_BIOS_START_g3 (12582912)
46132+#define FLASH_PXE_BIOS_START_g3 (13107200)
46133+#define FLASH_FCoE_BIOS_START_g3 (13631488)
46134+#define FLASH_REDBOOT_START_g3 (262144)
46135+#define FLASH_PHY_FW_START_g3 (1310720)
46136+
46137+/************* Rx Packet Type Encoding **************/
46138+#define BE_UNICAST_PACKET 0
46139+#define BE_MULTICAST_PACKET 1
46140+#define BE_BROADCAST_PACKET 2
46141+#define BE_RSVD_PACKET 3
46142+
46143 /*
46144 * BE descriptors: host memory data structures whose formats
46145 * are hardwired in BE silicon.
46146 */
46147 /* Event Queue Descriptor */
46148-#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
46149-#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
46150-#define EQ_ENTRY_RES_ID_SHIFT 16
46151+#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
46152+#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
46153+#define EQ_ENTRY_RES_ID_SHIFT 16
46154+
46155+#define BE_MAC_PROMISCUOUS 62 /* Promiscuous mode */
46156+
46157 struct be_eq_entry {
46158 u32 evt;
46159 };
46160@@ -126,7 +275,7 @@ struct amap_eth_hdr_wrb {
46161 u8 event;
46162 u8 crc;
46163 u8 forward;
46164- u8 ipsec;
46165+ u8 lso6;
46166 u8 mgmt;
46167 u8 ipcs;
46168 u8 udpcs;
46169@@ -151,7 +300,7 @@ struct be_eth_hdr_wrb {
46170 * offset/shift/mask of each field */
46171 struct amap_eth_tx_compl {
46172 u8 wrb_index[16]; /* dword 0 */
46173- u8 ct[2]; /* dword 0 */
46174+ u8 ct[2]; /* dword 0 */
46175 u8 port[2]; /* dword 0 */
46176 u8 rsvd0[8]; /* dword 0 */
46177 u8 status[4]; /* dword 0 */
46178@@ -179,10 +328,10 @@ struct be_eth_rx_d {
46179
46180 /* RX Compl Queue Descriptor */
46181
46182-/* Pseudo amap definition for eth_rx_compl in which each bit of the
46183- * actual structure is defined as a byte: used to calculate
46184+/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which
46185+ * each bit of the actual structure is defined as a byte: used to calculate
46186 * offset/shift/mask of each field */
46187-struct amap_eth_rx_compl {
46188+struct amap_eth_rx_compl_v0 {
46189 u8 vlan_tag[16]; /* dword 0 */
46190 u8 pktsize[14]; /* dword 0 */
46191 u8 port; /* dword 0 */
46192@@ -213,39 +362,91 @@ struct amap_eth_rx_compl {
46193 u8 rsshash[32]; /* dword 3 */
46194 } __packed;
46195
46196+/* Pseudo amap definition for BE3 native mode eth_rx_compl in which
46197+ * each bit of the actual structure is defined as a byte: used to calculate
46198+ * offset/shift/mask of each field */
46199+struct amap_eth_rx_compl_v1 {
46200+ u8 vlan_tag[16]; /* dword 0 */
46201+ u8 pktsize[14]; /* dword 0 */
46202+ u8 vtp; /* dword 0 */
46203+ u8 ip_opt; /* dword 0 */
46204+ u8 err; /* dword 1 */
46205+ u8 rsshp; /* dword 1 */
46206+ u8 ipf; /* dword 1 */
46207+ u8 tcpf; /* dword 1 */
46208+ u8 udpf; /* dword 1 */
46209+ u8 ipcksm; /* dword 1 */
46210+ u8 l4_cksm; /* dword 1 */
46211+ u8 ip_version; /* dword 1 */
46212+ u8 macdst[7]; /* dword 1 */
46213+ u8 rsvd0; /* dword 1 */
46214+ u8 fragndx[10]; /* dword 1 */
46215+ u8 ct[2]; /* dword 1 */
46216+ u8 sw; /* dword 1 */
46217+ u8 numfrags[3]; /* dword 1 */
46218+ u8 rss_flush; /* dword 2 */
46219+ u8 cast_enc[2]; /* dword 2 */
46220+ u8 vtm; /* dword 2 */
46221+ u8 rss_bank; /* dword 2 */
46222+ u8 port[2]; /* dword 2 */
46223+ u8 vntagp; /* dword 2 */
46224+ u8 header_len[8]; /* dword 2 */
46225+ u8 header_split[2]; /* dword 2 */
46226+ u8 rsvd1[13]; /* dword 2 */
46227+ u8 valid; /* dword 2 */
46228+ u8 rsshash[32]; /* dword 3 */
46229+} __packed;
46230+
46231 struct be_eth_rx_compl {
46232 u32 dw[4];
46233 };
46234
46235-/* Flashrom related descriptors */
46236-#define IMAGE_TYPE_FIRMWARE 160
46237-#define IMAGE_TYPE_BOOTCODE 224
46238-#define IMAGE_TYPE_OPTIONROM 32
46239+struct mgmt_hba_attribs {
46240+ u8 flashrom_version_string[32];
46241+ u8 manufacturer_name[32];
46242+ u32 supported_modes;
46243+ u32 rsvd0[3];
46244+ u8 ncsi_ver_string[12];
46245+ u32 default_extended_timeout;
46246+ u8 controller_model_number[32];
46247+ u8 controller_description[64];
46248+ u8 controller_serial_number[32];
46249+ u8 ip_version_string[32];
46250+ u8 firmware_version_string[32];
46251+ u8 bios_version_string[32];
46252+ u8 redboot_version_string[32];
46253+ u8 driver_version_string[32];
46254+ u8 fw_on_flash_version_string[32];
46255+ u32 functionalities_supported;
46256+ u16 max_cdblength;
46257+ u8 asic_revision;
46258+ u8 generational_guid[16];
46259+ u8 hba_port_count;
46260+ u16 default_link_down_timeout;
46261+ u8 iscsi_ver_min_max;
46262+ u8 multifunction_device;
46263+ u8 cache_valid;
46264+ u8 hba_status;
46265+ u8 max_domains_supported;
46266+ u8 phy_port;
46267+ u32 firmware_post_status;
46268+ u32 hba_mtu[8];
46269+ u32 rsvd1[4];
46270+};
46271
46272-#define NUM_FLASHDIR_ENTRIES 32
46273-
46274-#define FLASHROM_TYPE_ISCSI_ACTIVE 0
46275-#define FLASHROM_TYPE_BIOS 2
46276-#define FLASHROM_TYPE_PXE_BIOS 3
46277-#define FLASHROM_TYPE_FCOE_BIOS 8
46278-#define FLASHROM_TYPE_ISCSI_BACKUP 9
46279-#define FLASHROM_TYPE_FCOE_FW_ACTIVE 10
46280-#define FLASHROM_TYPE_FCOE_FW_BACKUP 11
46281-
46282-#define FLASHROM_OPER_FLASH 1
46283-#define FLASHROM_OPER_SAVE 2
46284-
46285-#define FLASH_IMAGE_MAX_SIZE (1310720) /* Max firmware image size */
46286-#define FLASH_BIOS_IMAGE_MAX_SIZE (262144) /* Max OPTION ROM image sz */
46287-
46288-/* Offsets for components on Flash. */
46289-#define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576)
46290-#define FLASH_iSCSI_BACKUP_IMAGE_START (2359296)
46291-#define FLASH_FCoE_PRIMARY_IMAGE_START (3670016)
46292-#define FLASH_FCoE_BACKUP_IMAGE_START (4980736)
46293-#define FLASH_iSCSI_BIOS_START (7340032)
46294-#define FLASH_PXE_BIOS_START (7864320)
46295-#define FLASH_FCoE_BIOS_START (524288)
46296+struct mgmt_controller_attrib {
46297+ struct mgmt_hba_attribs hba_attribs;
46298+ u16 pci_vendor_id;
46299+ u16 pci_device_id;
46300+ u16 pci_sub_vendor_id;
46301+ u16 pci_sub_system_id;
46302+ u8 pci_bus_number;
46303+ u8 pci_device_number;
46304+ u8 pci_function_number;
46305+ u8 interface_type;
46306+ u64 unique_identifier;
46307+ u32 rsvd0[5];
46308+};
46309
46310 struct controller_id {
46311 u32 vendor;
46312@@ -254,7 +455,20 @@ struct controller_id {
46313 u32 subdevice;
46314 };
46315
46316-struct flash_file_hdr {
46317+struct flash_comp {
46318+ unsigned long offset;
46319+ int optype;
46320+ int size;
46321+};
46322+
46323+struct image_hdr {
46324+ u32 imageid;
46325+ u32 imageoffset;
46326+ u32 imagelength;
46327+ u32 image_checksum;
46328+ u8 image_version[32];
46329+};
46330+struct flash_file_hdr_g2 {
46331 u8 sign[32];
46332 u32 cksum;
46333 u32 antidote;
46334@@ -266,6 +480,17 @@ struct flash_file_hdr {
46335 u8 build[24];
46336 };
46337
46338+struct flash_file_hdr_g3 {
46339+ u8 sign[52];
46340+ u8 ufi_version[4];
46341+ u32 file_len;
46342+ u32 cksum;
46343+ u32 antidote;
46344+ u32 num_imgs;
46345+ u8 build[24];
46346+ u8 rsvd[32];
46347+};
46348+
46349 struct flash_section_hdr {
46350 u32 format_rev;
46351 u32 cksum;
46352@@ -299,3 +524,19 @@ struct flash_section_info {
46353 struct flash_section_hdr fsec_hdr;
46354 struct flash_section_entry fsec_entry[32];
46355 };
46356+
46357+struct flash_ncsi_image_hdr {
46358+ u32 magic;
46359+ u8 hdr_len;
46360+ u8 type;
46361+ u16 hdr_ver;
46362+ u8 rsvd0[2];
46363+ u16 load_offset;
46364+ u32 len;
46365+ u32 flash_offset;
46366+ u8 ver[16];
46367+ u8 name[24];
46368+ u32 img_cksum;
46369+ u8 rsvd1[4];
46370+ u32 hdr_cksum;
46371+};
46372diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
46373index 000e377..f501aa3 100644
46374--- a/drivers/net/benet/be_main.c
46375+++ b/drivers/net/benet/be_main.c
46376@@ -1,18 +1,18 @@
46377 /*
46378- * Copyright (C) 2005 - 2009 ServerEngines
46379+ * Copyright (C) 2005 - 2011 Emulex
46380 * All rights reserved.
46381 *
46382 * This program is free software; you can redistribute it and/or
46383 * modify it under the terms of the GNU General Public License version 2
46384- * as published by the Free Software Foundation. The full GNU General
46385+ * as published by the Free Software Foundation. The full GNU General
46386 * Public License is included in this distribution in the file called COPYING.
46387 *
46388 * Contact Information:
46389- * linux-drivers@serverengines.com
46390+ * linux-drivers@emulex.com
46391 *
46392- * ServerEngines
46393- * 209 N. Fair Oaks Ave
46394- * Sunnyvale, CA 94085
46395+ * Emulex
46396+ * 3333 Susan Street
46397+ * Costa Mesa, CA 92626
46398 */
46399
46400 #include "be.h"
46401@@ -22,23 +22,119 @@
46402 MODULE_VERSION(DRV_VER);
46403 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46404 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
46405-MODULE_AUTHOR("ServerEngines Corporation");
46406+MODULE_AUTHOR("Emulex Corporation");
46407 MODULE_LICENSE("GPL");
46408+MODULE_INFO(supported, "external");
46409
46410-static unsigned int rx_frag_size = 2048;
46411-module_param(rx_frag_size, uint, S_IRUGO);
46412-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
46413+static ushort rx_frag_size = 2048;
46414+static unsigned int num_vfs;
46415+static unsigned int msix = 1;
46416+module_param(rx_frag_size, ushort, S_IRUGO);
46417+module_param(num_vfs, uint, S_IRUGO);
46418+module_param(msix, uint, S_IRUGO);
46419+MODULE_PARM_DESC(rx_frag_size, "Size of receive fragment buffer"
46420+ " - 2048 (default), 4096 or 8192");
46421+MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
46422+MODULE_PARM_DESC(msix, "Enable and disable the MSI"
46423+ "x (By default MSIx is enabled)");
46424+static unsigned int gro = 1;
46425+module_param(gro, uint, S_IRUGO);
46426+MODULE_PARM_DESC(gro, "Enable or Disable GRO. Enabled by default");
46427+
46428+static unsigned int multi_rxq = true;
46429+module_param(multi_rxq, uint, S_IRUGO);
46430+MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
46431
46432 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
46433 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
46434 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
46435 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46436 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46437- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
46438+ /*
46439+ * Lancer is not part of Palau 4.0
46440+ * { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
46441+ */
46442 { 0 }
46443 };
46444 MODULE_DEVICE_TABLE(pci, be_dev_ids);
46445
46446+/* UE Status Low CSR */
46447+static char *ue_status_low_desc[] = {
46448+ "CEV",
46449+ "CTX",
46450+ "DBUF",
46451+ "ERX",
46452+ "Host",
46453+ "MPU",
46454+ "NDMA",
46455+ "PTC ",
46456+ "RDMA ",
46457+ "RXF ",
46458+ "RXIPS ",
46459+ "RXULP0 ",
46460+ "RXULP1 ",
46461+ "RXULP2 ",
46462+ "TIM ",
46463+ "TPOST ",
46464+ "TPRE ",
46465+ "TXIPS ",
46466+ "TXULP0 ",
46467+ "TXULP1 ",
46468+ "UC ",
46469+ "WDMA ",
46470+ "TXULP2 ",
46471+ "HOST1 ",
46472+ "P0_OB_LINK ",
46473+ "P1_OB_LINK ",
46474+ "HOST_GPIO ",
46475+ "MBOX ",
46476+ "AXGMAC0",
46477+ "AXGMAC1",
46478+ "JTAG",
46479+ "MPU_INTPEND"
46480+};
46481+
46482+/* UE Status High CSR */
46483+static char *ue_status_hi_desc[] = {
46484+ "LPCMEMHOST",
46485+ "MGMT_MAC",
46486+ "PCS0ONLINE",
46487+ "MPU_IRAM",
46488+ "PCS1ONLINE",
46489+ "PCTL0",
46490+ "PCTL1",
46491+ "PMEM",
46492+ "RR",
46493+ "TXPB",
46494+ "RXPP",
46495+ "XAUI",
46496+ "TXP",
46497+ "ARM",
46498+ "IPC",
46499+ "HOST2",
46500+ "HOST3",
46501+ "HOST4",
46502+ "HOST5",
46503+ "HOST6",
46504+ "HOST7",
46505+ "HOST8",
46506+ "HOST9",
46507+ "NETC",
46508+ "Unknown",
46509+ "Unknown",
46510+ "Unknown",
46511+ "Unknown",
46512+ "Unknown",
46513+ "Unknown",
46514+ "Unknown",
46515+ "Unknown"
46516+};
46517+
46518+static inline bool be_multi_rxq(struct be_adapter *adapter)
46519+{
46520+ return (adapter->num_rx_qs > 1);
46521+}
46522+
46523 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
46524 {
46525 struct be_dma_mem *mem = &q->dma_mem;
46526@@ -69,6 +165,9 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
46527 u32 reg = ioread32(addr);
46528 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
46529
46530+ if (adapter->eeh_err)
46531+ return;
46532+
46533 if (!enabled && enable)
46534 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
46535 else if (enabled && !enable)
46536@@ -84,6 +183,8 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
46537 u32 val = 0;
46538 val |= qid & DB_RQ_RING_ID_MASK;
46539 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
46540+
46541+ wmb();
46542 iowrite32(val, adapter->db + DB_RQ_OFFSET);
46543 }
46544
46545@@ -92,6 +193,8 @@ static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
46546 u32 val = 0;
46547 val |= qid & DB_TXULP_RING_ID_MASK;
46548 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
46549+
46550+ wmb();
46551 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
46552 }
46553
46554@@ -100,6 +203,12 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
46555 {
46556 u32 val = 0;
46557 val |= qid & DB_EQ_RING_ID_MASK;
46558+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
46559+ DB_EQ_RING_ID_EXT_MASK_SHIFT);
46560+
46561+ if (adapter->eeh_err)
46562+ return;
46563+
46564 if (arm)
46565 val |= 1 << DB_EQ_REARM_SHIFT;
46566 if (clear_int)
46567@@ -113,6 +222,12 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
46568 {
46569 u32 val = 0;
46570 val |= qid & DB_CQ_RING_ID_MASK;
46571+ val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
46572+ DB_CQ_RING_ID_EXT_MASK_SHIFT);
46573+
46574+ if (adapter->eeh_err)
46575+ return;
46576+
46577 if (arm)
46578 val |= 1 << DB_CQ_REARM_SHIFT;
46579 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
46580@@ -124,96 +239,250 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
46581 struct be_adapter *adapter = netdev_priv(netdev);
46582 struct sockaddr *addr = p;
46583 int status = 0;
46584+ u8 current_mac[ETH_ALEN];
46585+ u32 pmac_id = adapter->pmac_id;
46586
46587- status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
46588+ if (!is_valid_ether_addr(addr->sa_data))
46589+ return -EADDRNOTAVAIL;
46590+
46591+ status = be_cmd_mac_addr_query(adapter, current_mac,
46592+ MAC_ADDRESS_TYPE_NETWORK, false,
46593+ adapter->if_handle);
46594 if (status)
46595- return status;
46596+ goto err;
46597+
46598+ if (!memcmp(addr->sa_data, current_mac, ETH_ALEN))
46599+ goto done;
46600
46601 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
46602- adapter->if_handle, &adapter->pmac_id);
46603- if (!status)
46604+ adapter->if_handle, &adapter->pmac_id, 0);
46605+
46606+ if (!status) {
46607+ status = be_cmd_pmac_del(adapter, adapter->if_handle,
46608+ pmac_id, 0);
46609 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
46610+ goto done;
46611+ }
46612
46613- return status;
46614+err:
46615+ if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
46616+ return -EPERM;
46617+ else
46618+ dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n",
46619+ addr->sa_data);
46620+done:
46621+ return status;
46622+}
46623+
46624+static void populate_be2_stats(struct be_adapter *adapter)
46625+{
46626+
46627+ struct be_drv_stats *drvs = &adapter->drv_stats;
46628+ struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
46629+ struct be_port_rxf_stats_v0 *port_stats =
46630+ be_port_rxf_stats_from_cmd(adapter);
46631+ struct be_rxf_stats_v0 *rxf_stats =
46632+ be_rxf_stats_from_cmd(adapter);
46633+
46634+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
46635+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
46636+ drvs->rx_control_frames = port_stats->rx_control_frames;
46637+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
46638+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
46639+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
46640+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
46641+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
46642+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
46643+ drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
46644+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
46645+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
46646+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
46647+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
46648+ drvs->rx_input_fifo_overflow_drop =
46649+ port_stats->rx_input_fifo_overflow;
46650+ drvs->rx_dropped_header_too_small =
46651+ port_stats->rx_dropped_header_too_small;
46652+ drvs->rx_address_match_errors =
46653+ port_stats->rx_address_match_errors;
46654+ drvs->rx_alignment_symbol_errors =
46655+ port_stats->rx_alignment_symbol_errors;
46656+
46657+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
46658+ drvs->tx_controlframes = port_stats->tx_controlframes;
46659+
46660+ if (adapter->port_num)
46661+ drvs->jabber_events =
46662+ rxf_stats->port1_jabber_events;
46663+ else
46664+ drvs->jabber_events =
46665+ rxf_stats->port0_jabber_events;
46666+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
46667+ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
46668+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
46669+ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
46670+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
46671+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
46672+ drvs->rx_drops_no_tpre_descr =
46673+ rxf_stats->rx_drops_no_tpre_descr;
46674+ drvs->rx_drops_too_many_frags =
46675+ rxf_stats->rx_drops_too_many_frags;
46676+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
46677+}
46678+
46679+static void populate_be3_stats(struct be_adapter *adapter)
46680+{
46681+ struct be_drv_stats *drvs = &adapter->drv_stats;
46682+ struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
46683+
46684+ struct be_rxf_stats_v1 *rxf_stats =
46685+ be_rxf_stats_from_cmd(adapter);
46686+ struct be_port_rxf_stats_v1 *port_stats =
46687+ be_port_rxf_stats_from_cmd(adapter);
46688+
46689+ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
46690+ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
46691+ drvs->rx_pause_frames = port_stats->rx_pause_frames;
46692+ drvs->rx_crc_errors = port_stats->rx_crc_errors;
46693+ drvs->rx_control_frames = port_stats->rx_control_frames;
46694+ drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
46695+ drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
46696+ drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
46697+ drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
46698+ drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
46699+ drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
46700+ drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
46701+ drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
46702+ drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
46703+ drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
46704+ drvs->rx_dropped_header_too_small =
46705+ port_stats->rx_dropped_header_too_small;
46706+ drvs->rx_input_fifo_overflow_drop =
46707+ port_stats->rx_input_fifo_overflow_drop;
46708+ drvs->rx_address_match_errors =
46709+ port_stats->rx_address_match_errors;
46710+ drvs->rx_alignment_symbol_errors =
46711+ port_stats->rx_alignment_symbol_errors;
46712+ drvs->rxpp_fifo_overflow_drop =
46713+ port_stats->rxpp_fifo_overflow_drop;
46714+ drvs->tx_pauseframes = port_stats->tx_pauseframes;
46715+ drvs->tx_controlframes = port_stats->tx_controlframes;
46716+ drvs->jabber_events = port_stats->jabber_events;
46717+ drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
46718+ drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
46719+ drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
46720+ drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
46721+ drvs->forwarded_packets = rxf_stats->forwarded_packets;
46722+ drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
46723+ drvs->rx_drops_no_tpre_descr =
46724+ rxf_stats->rx_drops_no_tpre_descr;
46725+ drvs->rx_drops_too_many_frags =
46726+ rxf_stats->rx_drops_too_many_frags;
46727+ adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
46728+}
46729+
46730+
46731+static void accumulate_16bit_val(u32 *acc, u16 val)
46732+{
46733+#define lo(x) (x & 0xFFFF)
46734+#define hi(x) (x & 0xFFFF0000)
46735+ bool wrapped = val < lo(*acc);
46736+ u32 newacc = hi(*acc) + val;
46737+
46738+ if (wrapped)
46739+ newacc += 65536;
46740+ ACCESS_ONCE_RW(*acc) = newacc;
46741+}
46742+
46743+void be_parse_stats(struct be_adapter *adapter)
46744+{
46745+ struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
46746+ struct be_rx_obj *rxo;
46747+ int i;
46748+
46749+ if (adapter->generation == BE_GEN3) {
46750+ populate_be3_stats(adapter);
46751+ } else {
46752+ populate_be2_stats(adapter);
46753+ }
46754+
46755+ /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
46756+ for_all_rx_queues(adapter, rxo, i) {
46757+ /* below erx HW counter can actually wrap around after
46758+ * 65535. Driver accumulates a 32-bit value
46759+ */
46760+ accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
46761+ (u16)erx->rx_drops_no_fragments[rxo->q.id]);
46762+ }
46763 }
46764
46765 void netdev_stats_update(struct be_adapter *adapter)
46766 {
46767- struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
46768- struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
46769- struct be_port_rxf_stats *port_stats =
46770- &rxf_stats->port[adapter->port_num];
46771- struct net_device_stats *dev_stats = &adapter->stats.net_stats;
46772- struct be_erx_stats *erx_stats = &hw_stats->erx;
46773+ struct be_drv_stats *drvs = &adapter->drv_stats;
46774+ struct net_device_stats *dev_stats = &adapter->net_stats;
46775+ struct be_rx_obj *rxo;
46776+ struct be_tx_obj *txo;
46777+ unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
46778+ int i;
46779
46780- dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
46781- dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
46782- dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
46783- dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
46784+ for_all_rx_queues(adapter, rxo, i) {
46785+ pkts += rx_stats(rxo)->rx_pkts;
46786+ bytes += rx_stats(rxo)->rx_bytes;
46787+ mcast += rx_stats(rxo)->rx_mcast_pkts;
46788+ drops += rx_stats(rxo)->rx_drops_no_frags;
46789+ }
46790+ dev_stats->rx_packets = pkts;
46791+ dev_stats->rx_bytes = bytes;
46792+ dev_stats->multicast = mcast;
46793+ dev_stats->rx_dropped = drops;
46794+
46795+ pkts = bytes = 0;
46796+ for_all_tx_queues(adapter, txo, i) {
46797+ pkts += tx_stats(txo)->be_tx_pkts;
46798+ bytes += tx_stats(txo)->be_tx_bytes;
46799+ }
46800+ dev_stats->tx_packets = pkts;
46801+ dev_stats->tx_bytes = bytes;
46802
46803 /* bad pkts received */
46804- dev_stats->rx_errors = port_stats->rx_crc_errors +
46805- port_stats->rx_alignment_symbol_errors +
46806- port_stats->rx_in_range_errors +
46807- port_stats->rx_out_range_errors +
46808- port_stats->rx_frame_too_long +
46809- port_stats->rx_dropped_too_small +
46810- port_stats->rx_dropped_too_short +
46811- port_stats->rx_dropped_header_too_small +
46812- port_stats->rx_dropped_tcp_length +
46813- port_stats->rx_dropped_runt +
46814- port_stats->rx_tcp_checksum_errs +
46815- port_stats->rx_ip_checksum_errs +
46816- port_stats->rx_udp_checksum_errs;
46817-
46818- /* no space in linux buffers: best possible approximation */
46819- dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0];
46820+ dev_stats->rx_errors = drvs->rx_crc_errors +
46821+ drvs->rx_alignment_symbol_errors +
46822+ drvs->rx_in_range_errors +
46823+ drvs->rx_out_range_errors +
46824+ drvs->rx_frame_too_long +
46825+ drvs->rx_dropped_too_small +
46826+ drvs->rx_dropped_too_short +
46827+ drvs->rx_dropped_header_too_small +
46828+ drvs->rx_dropped_tcp_length +
46829+ drvs->rx_dropped_runt +
46830+ drvs->rx_tcp_checksum_errs +
46831+ drvs->rx_ip_checksum_errs +
46832+ drvs->rx_udp_checksum_errs;
46833
46834 /* detailed rx errors */
46835- dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
46836- port_stats->rx_out_range_errors +
46837- port_stats->rx_frame_too_long;
46838+ dev_stats->rx_length_errors = drvs->rx_in_range_errors +
46839+ drvs->rx_out_range_errors +
46840+ drvs->rx_frame_too_long;
46841
46842- /* receive ring buffer overflow */
46843- dev_stats->rx_over_errors = 0;
46844-
46845- dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
46846+ dev_stats->rx_crc_errors = drvs->rx_crc_errors;
46847
46848 /* frame alignment errors */
46849- dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
46850+ dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
46851
46852 /* receiver fifo overrun */
46853 /* drops_no_pbuf is no per i/f, it's per BE card */
46854- dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
46855- port_stats->rx_input_fifo_overflow +
46856- rxf_stats->rx_drops_no_pbuf;
46857- /* receiver missed packetd */
46858- dev_stats->rx_missed_errors = 0;
46859-
46860- /* packet transmit problems */
46861- dev_stats->tx_errors = 0;
46862-
46863- /* no space available in linux */
46864- dev_stats->tx_dropped = 0;
46865-
46866- dev_stats->multicast = port_stats->rx_multicast_frames;
46867- dev_stats->collisions = 0;
46868-
46869- /* detailed tx_errors */
46870- dev_stats->tx_aborted_errors = 0;
46871- dev_stats->tx_carrier_errors = 0;
46872- dev_stats->tx_fifo_errors = 0;
46873- dev_stats->tx_heartbeat_errors = 0;
46874- dev_stats->tx_window_errors = 0;
46875+ dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
46876+ drvs->rx_input_fifo_overflow_drop +
46877+ drvs->rx_drops_no_pbuf;
46878 }
46879
46880-void be_link_status_update(struct be_adapter *adapter, bool link_up)
46881+void be_link_status_update(struct be_adapter *adapter, int link_status)
46882 {
46883 struct net_device *netdev = adapter->netdev;
46884
46885 /* If link came up or went down */
46886- if (adapter->link_up != link_up) {
46887- if (link_up) {
46888+ if (adapter->link_status != link_status) {
46889+ adapter->link_speed = -1;
46890+ if (link_status == LINK_UP) {
46891 netif_start_queue(netdev);
46892 netif_carrier_on(netdev);
46893 printk(KERN_INFO "%s: Link up\n", netdev->name);
46894@@ -222,15 +491,15 @@ void be_link_status_update(struct be_adapter *adapter, bool link_up)
46895 netif_carrier_off(netdev);
46896 printk(KERN_INFO "%s: Link down\n", netdev->name);
46897 }
46898- adapter->link_up = link_up;
46899+ adapter->link_status = link_status;
46900 }
46901 }
46902
46903 /* Update the EQ delay n BE based on the RX frags consumed / sec */
46904-static void be_rx_eqd_update(struct be_adapter *adapter)
46905+static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
46906 {
46907- struct be_eq_obj *rx_eq = &adapter->rx_eq;
46908- struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
46909+ struct be_eq_obj *rx_eq = &rxo->rx_eq;
46910+ struct be_rx_stats *stats = &rxo->stats;
46911 ulong now = jiffies;
46912 u32 eqd;
46913
46914@@ -247,19 +516,17 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
46915 if ((now - stats->rx_fps_jiffies) < HZ)
46916 return;
46917
46918- stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
46919+ stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
46920 ((now - stats->rx_fps_jiffies) / HZ);
46921
46922 stats->rx_fps_jiffies = now;
46923- stats->be_prev_rx_frags = stats->be_rx_frags;
46924- eqd = stats->be_rx_fps / 110000;
46925+ stats->prev_rx_frags = stats->rx_frags;
46926+ eqd = stats->rx_fps / 110000;
46927 eqd = eqd << 3;
46928 if (eqd > rx_eq->max_eqd)
46929 eqd = rx_eq->max_eqd;
46930 if (eqd < rx_eq->min_eqd)
46931 eqd = rx_eq->min_eqd;
46932- if (eqd < 10)
46933- eqd = 0;
46934 if (eqd != rx_eq->cur_eqd)
46935 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
46936
46937@@ -270,7 +537,7 @@ static struct net_device_stats *be_get_stats(struct net_device *dev)
46938 {
46939 struct be_adapter *adapter = netdev_priv(dev);
46940
46941- return &adapter->stats.net_stats;
46942+ return &adapter->net_stats;
46943 }
46944
46945 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
46946@@ -284,9 +551,9 @@ static u32 be_calc_rate(u64 bytes, unsigned long ticks)
46947 return rate;
46948 }
46949
46950-static void be_tx_rate_update(struct be_adapter *adapter)
46951+static void be_tx_rate_update(struct be_tx_obj *txo)
46952 {
46953- struct be_drvr_stats *stats = drvr_stats(adapter);
46954+ struct be_tx_stats *stats = tx_stats(txo);
46955 ulong now = jiffies;
46956
46957 /* Wrapped around? */
46958@@ -305,10 +572,11 @@ static void be_tx_rate_update(struct be_adapter *adapter)
46959 }
46960 }
46961
46962-static void be_tx_stats_update(struct be_adapter *adapter,
46963+static void be_tx_stats_update(struct be_tx_obj *txo,
46964 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
46965 {
46966- struct be_drvr_stats *stats = drvr_stats(adapter);
46967+ struct be_tx_stats *stats = tx_stats(txo);
46968+
46969 stats->be_tx_reqs++;
46970 stats->be_tx_wrbs += wrb_cnt;
46971 stats->be_tx_bytes += copied;
46972@@ -318,7 +586,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
46973 }
46974
46975 /* Determine number of WRB entries needed to xmit data in an skb */
46976-static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
46977+static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
46978+ bool *dummy)
46979 {
46980 int cnt = (skb->len > skb->data_len);
46981
46982@@ -326,12 +595,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
46983
46984 /* to account for hdr wrb */
46985 cnt++;
46986- if (cnt & 1) {
46987+ if (lancer_chip(adapter) || !(cnt & 1)) {
46988+ *dummy = false;
46989+ } else {
46990 /* add a dummy to make it an even num */
46991 cnt++;
46992 *dummy = true;
46993- } else
46994- *dummy = false;
46995+ }
46996 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
46997 return cnt;
46998 }
46999@@ -343,17 +613,31 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
47000 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
47001 }
47002
47003-static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47004- bool vlan, u32 wrb_cnt, u32 len)
47005+static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
47006+ struct sk_buff *skb, u32 wrb_cnt, u32 len)
47007 {
47008+ u16 vlan_tag = 0;
47009+
47010 memset(hdr, 0, sizeof(*hdr));
47011
47012 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
47013
47014- if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
47015+ if (skb_is_gso(skb)) {
47016 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
47017 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
47018 hdr, skb_shinfo(skb)->gso_size);
47019+ if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
47020+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
47021+
47022+ if (lancer_A0_chip(adapter)) {
47023+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
47024+ if (is_tcp_pkt(skb))
47025+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
47026+ tcpcs, hdr, 1);
47027+ else if (is_udp_pkt(skb))
47028+ AMAP_SET_BITS(struct amap_eth_hdr_wrb,
47029+ udpcs, hdr, 1);
47030+ }
47031 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
47032 if (is_tcp_pkt(skb))
47033 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
47034@@ -361,10 +645,10 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47035 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
47036 }
47037
47038- if (vlan && vlan_tx_tag_present(skb)) {
47039+ if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
47040 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
47041- AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
47042- hdr, vlan_tx_tag_get(skb));
47043+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
47044+ AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
47045 }
47046
47047 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
47048@@ -374,14 +658,13 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
47049 }
47050
47051
47052-static int make_tx_wrbs(struct be_adapter *adapter,
47053+static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
47054 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
47055 {
47056- u64 busaddr;
47057- u32 i, copied = 0;
47058+ dma_addr_t busaddr;
47059+ int i, copied = 0;
47060 struct pci_dev *pdev = adapter->pdev;
47061 struct sk_buff *first_skb = skb;
47062- struct be_queue_info *txq = &adapter->tx_obj.q;
47063 struct be_eth_wrb *wrb;
47064 struct be_eth_hdr_wrb *hdr;
47065
47066@@ -389,15 +672,11 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47067 atomic_add(wrb_cnt, &txq->used);
47068 queue_head_inc(txq);
47069
47070- if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
47071- dev_err(&pdev->dev, "TX DMA mapping failed\n");
47072- return 0;
47073- }
47074-
47075 if (skb->len > skb->data_len) {
47076- int len = skb->len - skb->data_len;
47077+ int len = skb_headlen(skb);
47078+ busaddr = pci_map_single(pdev, skb->data, len,
47079+ PCI_DMA_TODEVICE);
47080 wrb = queue_head_node(txq);
47081- busaddr = skb_shinfo(skb)->dma_head;
47082 wrb_fill(wrb, busaddr, len);
47083 be_dws_cpu_to_le(wrb, sizeof(*wrb));
47084 queue_head_inc(txq);
47085@@ -407,8 +686,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47086 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
47087 struct skb_frag_struct *frag =
47088 &skb_shinfo(skb)->frags[i];
47089-
47090- busaddr = skb_shinfo(skb)->dma_maps[i];
47091+ busaddr = pci_map_page(pdev, frag->page,
47092+ frag->page_offset,
47093+ frag->size, PCI_DMA_TODEVICE);
47094 wrb = queue_head_node(txq);
47095 wrb_fill(wrb, busaddr, frag->size);
47096 be_dws_cpu_to_le(wrb, sizeof(*wrb));
47097@@ -423,8 +703,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
47098 queue_head_inc(txq);
47099 }
47100
47101- wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
47102- wrb_cnt, copied);
47103+ wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
47104 be_dws_cpu_to_le(hdr, sizeof(*hdr));
47105
47106 return copied;
47107@@ -434,19 +713,70 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
47108 struct net_device *netdev)
47109 {
47110 struct be_adapter *adapter = netdev_priv(netdev);
47111- struct be_tx_obj *tx_obj = &adapter->tx_obj;
47112- struct be_queue_info *txq = &tx_obj->q;
47113+ struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
47114+ struct be_queue_info *txq = &txo->q;
47115 u32 wrb_cnt = 0, copied = 0;
47116 u32 start = txq->head;
47117 bool dummy_wrb, stopped = false;
47118
47119- wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
47120+ if (unlikely((skb_shinfo(skb)->gso_segs > 1) &&
47121+ skb_shinfo(skb)->gso_size && is_ipv6_ext_hdr(skb))) {
47122+ tx_stats(txo)->be_ipv6_ext_hdr_tx_drop++;
47123+ goto tx_drop;
47124+ }
47125
47126- copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
47127+ /* If the skb is a large pkt forwarded to this interface
47128+ * after being LRO'd on another interface, drop the pkt.
47129+ * HW cannot handle such pkts. LRO must be disabled when
47130+ * using the server as a router.
47131+ */
47132+ if (!skb_is_gso(skb)) {
47133+ int eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
47134+ VLAN_ETH_HLEN : ETH_HLEN;
47135+
47136+ if ((skb->len - eth_hdr_len) > adapter->netdev->mtu)
47137+ goto tx_drop;
47138+ }
47139+
47140+ /* The ASIC is calculating checksum for Vlan tagged pkts
47141+ * though CSO is disabled.
47142+ * To work around this, insert the Vlan tag in the driver
47143+ * and donot set the vlan bit, cso bit in the Tx WRB.
47144+ */
47145+ if (unlikely(vlan_tx_tag_present(skb) &&
47146+ ((skb->ip_summed != CHECKSUM_PARTIAL) || (skb->len <= 60)))) {
47147+ /* Bug 28694: Don't embed the host VLAN tag in SKB
47148+ * when UMC mode enabled on that interface
47149+ */
47150+ if (!(adapter->function_mode & UMC_ENABLED)) {
47151+ skb = skb_share_check(skb, GFP_ATOMIC);
47152+ if (unlikely(!skb))
47153+ goto tx_drop;
47154+
47155+ skb = be_vlan_put_tag(skb,
47156+ be_get_tx_vlan_tag(adapter, skb));
47157+ if (unlikely(!skb))
47158+ goto tx_drop;
47159+
47160+ be_reset_skb_tx_vlan(skb);
47161+ }
47162+ }
47163+
47164+ /* Bug 12422: the stack can send us skbs with length more than 65535
47165+ * BE cannot handle such requests. Hack the extra data out and drop it.
47166+ */
47167+ if (skb->len > 65535) {
47168+ int err = __pskb_trim(skb, 65535);
47169+ BUG_ON(err);
47170+ }
47171+
47172+ wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
47173+
47174+ copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
47175 if (copied) {
47176 /* record the sent skb in the sent_skb table */
47177- BUG_ON(tx_obj->sent_skb_list[start]);
47178- tx_obj->sent_skb_list[start] = skb;
47179+ BUG_ON(txo->sent_skb_list[start]);
47180+ txo->sent_skb_list[start] = skb;
47181
47182 /* Ensure txq has space for the next skb; Else stop the queue
47183 * *BEFORE* ringing the tx doorbell, so that we serialze the
47184@@ -454,16 +784,21 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
47185 */
47186 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
47187 txq->len) {
47188- netif_stop_queue(netdev);
47189+ netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
47190 stopped = true;
47191 }
47192
47193 be_txq_notify(adapter, txq->id, wrb_cnt);
47194
47195- be_tx_stats_update(adapter, wrb_cnt, copied,
47196+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
47197+ netdev->trans_start = jiffies;
47198+#endif
47199+
47200+ be_tx_stats_update(txo, wrb_cnt, copied,
47201 skb_shinfo(skb)->gso_segs, stopped);
47202 } else {
47203 txq->head = start;
47204+tx_drop:
47205 dev_kfree_skb_any(skb);
47206 }
47207 return NETDEV_TX_OK;
47208@@ -473,10 +808,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
47209 {
47210 struct be_adapter *adapter = netdev_priv(netdev);
47211 if (new_mtu < BE_MIN_MTU ||
47212- new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
47213+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
47214+ (ETH_HLEN + ETH_FCS_LEN))) {
47215 dev_info(&adapter->pdev->dev,
47216 "MTU must be between %d and %d bytes\n",
47217- BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
47218+ BE_MIN_MTU,
47219+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
47220 return -EINVAL;
47221 }
47222 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
47223@@ -486,17 +823,19 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
47224 }
47225
47226 /*
47227- * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
47228- * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
47229- * set the BE in promiscuous VLAN mode.
47230+ * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
47231+ * If the user configures more, place BE in vlan promiscuous mode.
47232 */
47233-static int be_vid_config(struct be_adapter *adapter)
47234+static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
47235 {
47236 u16 vtag[BE_NUM_VLANS_SUPPORTED];
47237 u16 ntags = 0, i;
47238- int status;
47239+ int status = 0;
47240
47241- if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
47242+ /* No need to change the VLAN state if the I/F is in promiscous */
47243+ if (adapter->promiscuous)
47244+ return 0;
47245+ if (adapter->vlans_added <= adapter->max_vlans) {
47246 /* Construct VLAN Table to give to HW */
47247 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
47248 if (adapter->vlan_tag[i]) {
47249@@ -504,47 +843,46 @@ static int be_vid_config(struct be_adapter *adapter)
47250 ntags++;
47251 }
47252 }
47253- status = be_cmd_vlan_config(adapter, adapter->if_handle,
47254- vtag, ntags, 1, 0);
47255+ /* Send command only if there is something to be programmed */
47256+ if (ntags)
47257+ status = be_cmd_vlan_config(adapter, adapter->if_handle,
47258+ vtag, ntags, 1, 0);
47259 } else {
47260 status = be_cmd_vlan_config(adapter, adapter->if_handle,
47261- NULL, 0, 1, 1);
47262+ NULL, 0, 1, 1);
47263 }
47264+
47265 return status;
47266 }
47267
47268 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
47269 {
47270 struct be_adapter *adapter = netdev_priv(netdev);
47271- struct be_eq_obj *rx_eq = &adapter->rx_eq;
47272- struct be_eq_obj *tx_eq = &adapter->tx_eq;
47273
47274- be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
47275- be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
47276 adapter->vlan_grp = grp;
47277- be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
47278- be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
47279 }
47280
47281 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
47282 {
47283 struct be_adapter *adapter = netdev_priv(netdev);
47284
47285- adapter->num_vlans++;
47286+ adapter->vlans_added++;
47287+
47288 adapter->vlan_tag[vid] = 1;
47289-
47290- be_vid_config(adapter);
47291+ if (adapter->vlans_added <= (adapter->max_vlans + 1))
47292+ be_vid_config(adapter, false, 0);
47293 }
47294
47295 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
47296 {
47297 struct be_adapter *adapter = netdev_priv(netdev);
47298
47299- adapter->num_vlans--;
47300- adapter->vlan_tag[vid] = 0;
47301-
47302+ adapter->vlans_added--;
47303 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
47304- be_vid_config(adapter);
47305+
47306+ adapter->vlan_tag[vid] = 0;
47307+ if (adapter->vlans_added <= adapter->max_vlans)
47308+ be_vid_config(adapter, false, 0);
47309 }
47310
47311 static void be_set_multicast_list(struct net_device *netdev)
47312@@ -552,7 +890,7 @@ static void be_set_multicast_list(struct net_device *netdev)
47313 struct be_adapter *adapter = netdev_priv(netdev);
47314
47315 if (netdev->flags & IFF_PROMISC) {
47316- be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
47317+ be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
47318 adapter->promiscuous = true;
47319 goto done;
47320 }
47321@@ -560,81 +898,244 @@ static void be_set_multicast_list(struct net_device *netdev)
47322 /* BE was previously in promiscous mode; disable it */
47323 if (adapter->promiscuous) {
47324 adapter->promiscuous = false;
47325- be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
47326+ be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
47327+
47328+ if (adapter->vlans_added)
47329+ be_vid_config(adapter, false, 0);
47330 }
47331
47332- if (netdev->flags & IFF_ALLMULTI) {
47333- be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0);
47334+ /* Enable multicast promisc if num configured exceeds what we support */
47335+ if (netdev->flags & IFF_ALLMULTI ||
47336+ netdev_mc_count(netdev) > BE_MAX_MC) {
47337+ be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
47338 goto done;
47339 }
47340
47341- be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
47342- netdev->mc_count);
47343+ be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
47344 done:
47345 return;
47346 }
47347
47348-static void be_rx_rate_update(struct be_adapter *adapter)
47349+#ifdef HAVE_SRIOV_CONFIG
47350+static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
47351 {
47352- struct be_drvr_stats *stats = drvr_stats(adapter);
47353+ struct be_adapter *adapter = netdev_priv(netdev);
47354+ int status;
47355+
47356+ if (adapter->num_vfs == 0)
47357+ return -EPERM;
47358+
47359+ if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
47360+ return -EINVAL;
47361+
47362+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
47363+ status = be_cmd_pmac_del(adapter,
47364+ adapter->vf_cfg[vf].vf_if_handle,
47365+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
47366+
47367+ status = be_cmd_pmac_add(adapter, mac,
47368+ adapter->vf_cfg[vf].vf_if_handle,
47369+ &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
47370+
47371+ if (status)
47372+ dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
47373+ mac, vf);
47374+ else
47375+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
47376+
47377+ return status;
47378+}
47379+
47380+static int be_get_vf_config(struct net_device *netdev, int vf,
47381+ struct ifla_vf_info *vi)
47382+{
47383+ struct be_adapter *adapter = netdev_priv(netdev);
47384+
47385+ if (adapter->num_vfs == 0)
47386+ return -EPERM;
47387+
47388+ if (vf >= adapter->num_vfs)
47389+ return -EINVAL;
47390+
47391+ vi->vf = vf;
47392+ vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
47393+ vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag & VLAN_VID_MASK;
47394+ vi->qos = adapter->vf_cfg[vf].vf_vlan_tag >> VLAN_PRIO_SHIFT;
47395+ memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
47396+
47397+ return 0;
47398+}
47399+
47400+/*
47401+ * Entry point to configure vlan behavior for a VF.
47402+ * 1. By default a VF is vlan Challenged.
47403+ * 2. It may or may not have Transparent Tagging enabled.
47404+ * 3. Vlan privilege for a VF can be toggled using special VID 4095.
47405+ * 4. When removing the Vlan privilege for a VF there is no need set default vid
47406+ * 5. Transparent Tagging configured for a VF resets its Vlan privilege
47407+ * 6. To disable the current Transparet Tagging for a VF:
47408+ * 6a. run the last iproute command with vlan set to 0.
47409+ * 6b. programing the default vid will disable Transparent Tagging in ARM/ASIC
47410+ */
47411+static int be_set_vf_vlan(struct net_device *netdev,
47412+ int vf, u16 vlan, u8 qos)
47413+{
47414+ struct be_adapter *adapter = netdev_priv(netdev);
47415+ int status = 0;
47416+ u32 en = 0;
47417+
47418+ if (adapter->num_vfs == 0)
47419+ return -EPERM;
47420+
47421+ if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
47422+ return -EINVAL;
47423+
47424+ status = be_cmd_get_fn_privileges(adapter, &en, vf + 1);
47425+ if (status)
47426+ goto sts;
47427+
47428+ if (vlan == 4095) {
47429+ if (en & BE_PRIV_FILTMGMT) {
47430+ /* Knock off filtering privileges */
47431+ en &= ~BE_PRIV_FILTMGMT;
47432+ } else {
47433+ en |= BE_PRIV_FILTMGMT;
47434+ /* Transparent Tagging is currently enabled, Reset it */
47435+ if (adapter->vf_cfg[vf].vf_vlan_tag) {
47436+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
47437+ vlan = adapter->vf_cfg[vf].vf_def_vid;
47438+ be_cmd_set_hsw_config(adapter, vlan, vf + 1,
47439+ adapter->vf_cfg[vf].vf_if_handle);
47440+ }
47441+ }
47442+
47443+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
47444+ status = be_cmd_set_fn_privileges(adapter, en, NULL, vf + 1);
47445+
47446+ goto sts;
47447+ }
47448+
47449+ if (vlan || qos) {
47450+ if (en & BE_PRIV_FILTMGMT) {
47451+ /* Check privilege and reset it to default */
47452+ en &= ~BE_PRIV_FILTMGMT;
47453+ be_cmd_set_fn_privileges(adapter, en, NULL, vf + 1);
47454+ }
47455+
47456+ vlan |= qos << VLAN_PRIO_SHIFT;
47457+ if (adapter->vf_cfg[vf].vf_vlan_tag != vlan) {
47458+ /* If this is new value, program it. Else skip. */
47459+ adapter->vf_cfg[vf].vf_vlan_tag = vlan;
47460+
47461+ status = be_cmd_set_hsw_config(adapter, vlan,
47462+ vf + 1, adapter->vf_cfg[vf].vf_if_handle);
47463+ }
47464+
47465+ } else {
47466+ /* Reset Transparent Vlan Tagging. */
47467+ adapter->vf_cfg[vf].vf_vlan_tag = 0;
47468+ vlan = adapter->vf_cfg[vf].vf_def_vid;
47469+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
47470+ adapter->vf_cfg[vf].vf_if_handle);
47471+ }
47472+
47473+sts:
47474+ if (status)
47475+ dev_info(&adapter->pdev->dev,
47476+ "VLAN %d config on VF %d failed\n", vlan, vf);
47477+ return status;
47478+}
47479+
47480+static int be_set_vf_tx_rate(struct net_device *netdev,
47481+ int vf, int rate)
47482+{
47483+ struct be_adapter *adapter = netdev_priv(netdev);
47484+ int status = 0;
47485+
47486+ if (adapter->num_vfs == 0)
47487+ return -EPERM;
47488+
47489+ if ((vf >= adapter->num_vfs) || (rate < 0))
47490+ return -EINVAL;
47491+
47492+ if (rate > 10000)
47493+ rate = 10000;
47494+
47495+ adapter->vf_cfg[vf].vf_tx_rate = rate;
47496+ status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
47497+
47498+ if (status)
47499+ dev_info(&adapter->pdev->dev,
47500+ "tx rate %d on VF %d failed\n", rate, vf);
47501+ return status;
47502+}
47503+#endif /* HAVE_SRIOV_CONFIG */
47504+
47505+static void be_rx_rate_update(struct be_rx_obj *rxo)
47506+{
47507+ struct be_rx_stats *stats = &rxo->stats;
47508 ulong now = jiffies;
47509
47510 /* Wrapped around */
47511- if (time_before(now, stats->be_rx_jiffies)) {
47512- stats->be_rx_jiffies = now;
47513+ if (time_before(now, stats->rx_jiffies)) {
47514+ stats->rx_jiffies = now;
47515 return;
47516 }
47517
47518 /* Update the rate once in two seconds */
47519- if ((now - stats->be_rx_jiffies) < 2 * HZ)
47520+ if ((now - stats->rx_jiffies) < 2 * HZ)
47521 return;
47522
47523- stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
47524- - stats->be_rx_bytes_prev,
47525- now - stats->be_rx_jiffies);
47526- stats->be_rx_jiffies = now;
47527- stats->be_rx_bytes_prev = stats->be_rx_bytes;
47528+ stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
47529+ now - stats->rx_jiffies);
47530+ stats->rx_jiffies = now;
47531+ stats->rx_bytes_prev = stats->rx_bytes;
47532 }
47533
47534-static void be_rx_stats_update(struct be_adapter *adapter,
47535- u32 pktsize, u16 numfrags)
47536+static void be_rx_stats_update(struct be_rx_obj *rxo,
47537+ struct be_rx_compl_info *rxcp)
47538 {
47539- struct be_drvr_stats *stats = drvr_stats(adapter);
47540+ struct be_rx_stats *stats = &rxo->stats;
47541
47542- stats->be_rx_compl++;
47543- stats->be_rx_frags += numfrags;
47544- stats->be_rx_bytes += pktsize;
47545- stats->be_rx_pkts++;
47546+ stats->rx_compl++;
47547+ stats->rx_frags += rxcp->num_rcvd;
47548+ stats->rx_bytes += rxcp->pkt_size;
47549+ stats->rx_pkts++;
47550+ if (rxcp->pkt_type == BE_MULTICAST_PACKET)
47551+ stats->rx_mcast_pkts++;
47552+ if (rxcp->err)
47553+ stats->rxcp_err++;
47554 }
47555
47556-static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
47557+static inline bool csum_passed(struct be_rx_compl_info *rxcp)
47558 {
47559- u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
47560-
47561- l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
47562- ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
47563- ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
47564- if (ip_version) {
47565- tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
47566- udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
47567- }
47568- ipv6_chk = (ip_version && (tcpf || udpf));
47569-
47570- return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
47571+ /* L4 checksum is not reliable for non TCP/UDP packets.
47572+ * Also ignore ipcksm for ipv6 pkts */
47573+ return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
47574+ (rxcp->ip_csum || rxcp->ipv6);
47575 }
47576
47577 static struct be_rx_page_info *
47578-get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
47579+get_rx_page_info(struct be_adapter *adapter, struct be_rx_obj *rxo,
47580+ u16 frag_idx)
47581 {
47582 struct be_rx_page_info *rx_page_info;
47583- struct be_queue_info *rxq = &adapter->rx_obj.q;
47584+ struct be_queue_info *rxq = &rxo->q;
47585
47586- rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
47587- BUG_ON(!rx_page_info->page);
47588+ rx_page_info = &rxo->page_info_tbl[frag_idx];
47589+ if (!rx_page_info->page) {
47590+ printk(KERN_EMERG "curr_idx=%d prev_dix=%d rxq->head=%d\n",
47591+ frag_idx, rxo->prev_frag_idx, rxq->head);
47592+ BUG_ON(!rx_page_info->page);
47593+ }
47594
47595- if (rx_page_info->last_page_user)
47596+ if (rx_page_info->last_page_user) {
47597 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
47598 adapter->big_page_size, PCI_DMA_FROMDEVICE);
47599+ rx_page_info->last_page_user = false;
47600+ }
47601+
47602+ rxo->prev_frag_idx = frag_idx;
47603
47604 atomic_dec(&rxq->used);
47605 return rx_page_info;
47606@@ -642,20 +1143,26 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
47607
47608 /* Throwaway the data in the Rx completion */
47609 static void be_rx_compl_discard(struct be_adapter *adapter,
47610- struct be_eth_rx_compl *rxcp)
47611+ struct be_rx_obj *rxo,
47612+ struct be_rx_compl_info *rxcp)
47613 {
47614- struct be_queue_info *rxq = &adapter->rx_obj.q;
47615+ struct be_queue_info *rxq = &rxo->q;
47616 struct be_rx_page_info *page_info;
47617- u16 rxq_idx, i, num_rcvd;
47618+ u16 i;
47619+ bool oob_error;
47620+ u16 num_rcvd = rxcp->num_rcvd;
47621
47622- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
47623- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
47624+ oob_error = lancer_A0_chip(adapter) && rxcp->err;
47625+
47626+ /* In case of OOB error num_rcvd will be 1 more than actual */
47627+ if (oob_error && num_rcvd)
47628+ num_rcvd -= 1;
47629
47630 for (i = 0; i < num_rcvd; i++) {
47631- page_info = get_rx_page_info(adapter, rxq_idx);
47632+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
47633 put_page(page_info->page);
47634 memset(page_info, 0, sizeof(*page_info));
47635- index_inc(&rxq_idx, rxq->len);
47636+ index_inc(&rxcp->rxq_idx, rxq->len);
47637 }
47638 }
47639
47640@@ -663,29 +1170,24 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
47641 * skb_fill_rx_data forms a complete skb for an ether frame
47642 * indicated by rxcp.
47643 */
47644-static void skb_fill_rx_data(struct be_adapter *adapter,
47645- struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
47646+static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
47647+ struct sk_buff *skb, struct be_rx_compl_info *rxcp)
47648 {
47649- struct be_queue_info *rxq = &adapter->rx_obj.q;
47650+ struct be_queue_info *rxq = &rxo->q;
47651 struct be_rx_page_info *page_info;
47652- u16 rxq_idx, i, num_rcvd, j;
47653- u32 pktsize, hdr_len, curr_frag_len, size;
47654+ u16 i, j;
47655+ u16 hdr_len, curr_frag_len, remaining;
47656 u8 *start;
47657
47658- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
47659- pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
47660- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
47661-
47662- page_info = get_rx_page_info(adapter, rxq_idx);
47663-
47664+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
47665 start = page_address(page_info->page) + page_info->page_offset;
47666 prefetch(start);
47667
47668 /* Copy data in the first descriptor of this completion */
47669- curr_frag_len = min(pktsize, rx_frag_size);
47670+ curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
47671
47672 /* Copy the header portion into skb_data */
47673- hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
47674+ hdr_len = min(BE_HDR_LEN, curr_frag_len);
47675 memcpy(skb->data, start, hdr_len);
47676 skb->len = curr_frag_len;
47677 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
47678@@ -702,21 +1204,19 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
47679 skb->data_len = curr_frag_len - hdr_len;
47680 skb->tail += hdr_len;
47681 }
47682- memset(page_info, 0, sizeof(*page_info));
47683+ page_info->page = NULL;
47684
47685- if (pktsize <= rx_frag_size) {
47686- BUG_ON(num_rcvd != 1);
47687- goto done;
47688+ if (rxcp->pkt_size <= rx_frag_size) {
47689+ BUG_ON(rxcp->num_rcvd != 1);
47690+ return;
47691 }
47692
47693 /* More frags present for this completion */
47694- size = pktsize;
47695- for (i = 1, j = 0; i < num_rcvd; i++) {
47696- size -= curr_frag_len;
47697- index_inc(&rxq_idx, rxq->len);
47698- page_info = get_rx_page_info(adapter, rxq_idx);
47699-
47700- curr_frag_len = min(size, rx_frag_size);
47701+ index_inc(&rxcp->rxq_idx, rxq->len);
47702+ remaining = rxcp->pkt_size - curr_frag_len;
47703+ for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
47704+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
47705+ curr_frag_len = min(remaining, rx_frag_size);
47706
47707 /* Coalesce all frags from the same physical page in one slot */
47708 if (page_info->page_offset == 0) {
47709@@ -735,99 +1235,122 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
47710 skb->len += curr_frag_len;
47711 skb->data_len += curr_frag_len;
47712
47713- memset(page_info, 0, sizeof(*page_info));
47714+ remaining -= curr_frag_len;
47715+ index_inc(&rxcp->rxq_idx, rxq->len);
47716+ page_info->page = NULL;
47717 }
47718 BUG_ON(j > MAX_SKB_FRAGS);
47719-
47720-done:
47721- be_rx_stats_update(adapter, pktsize, num_rcvd);
47722- return;
47723 }
47724
47725-/* Process the RX completion indicated by rxcp when GRO is disabled */
47726+/* Process the RX completion indicated by rxcp when LRO is disabled */
47727 static void be_rx_compl_process(struct be_adapter *adapter,
47728- struct be_eth_rx_compl *rxcp)
47729+ struct be_rx_obj *rxo,
47730+ struct be_rx_compl_info *rxcp)
47731 {
47732 struct sk_buff *skb;
47733- u32 vlanf, vid;
47734- u8 vtm;
47735
47736- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
47737- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
47738-
47739- /* vlanf could be wrongly set in some cards.
47740- * ignore if vtm is not set */
47741- if ((adapter->cap == 0x400) && !vtm)
47742- vlanf = 0;
47743-
47744- skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN);
47745- if (!skb) {
47746+ skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
47747+ if (unlikely(!skb)) {
47748 if (net_ratelimit())
47749 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
47750- be_rx_compl_discard(adapter, rxcp);
47751+ be_rx_compl_discard(adapter, rxo, rxcp);
47752 return;
47753 }
47754
47755- skb_reserve(skb, NET_IP_ALIGN);
47756+ skb_fill_rx_data(adapter, rxo, skb, rxcp);
47757
47758- skb_fill_rx_data(adapter, skb, rxcp);
47759-
47760- if (do_pkt_csum(rxcp, adapter->rx_csum))
47761- skb->ip_summed = CHECKSUM_NONE;
47762- else
47763+ if (likely(adapter->rx_csum && csum_passed(rxcp)))
47764 skb->ip_summed = CHECKSUM_UNNECESSARY;
47765+ else
47766+ skb->ip_summed = CHECKSUM_NONE;
47767
47768 skb->truesize = skb->len + sizeof(struct sk_buff);
47769+ if (unlikely(rxcp->vlanf) &&
47770+ unlikely(!vlan_configured(adapter))) {
47771+ __vlan_put_tag(skb, rxcp->vlan_tag);
47772+ }
47773 skb->protocol = eth_type_trans(skb, adapter->netdev);
47774 skb->dev = adapter->netdev;
47775
47776- if (vlanf) {
47777- if (!adapter->vlan_grp || adapter->num_vlans == 0) {
47778- kfree_skb(skb);
47779- return;
47780- }
47781- vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
47782- vid = be16_to_cpu(vid);
47783- vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
47784- } else {
47785+ if (unlikely(rxcp->vlanf) &&
47786+ vlan_configured(adapter))
47787+ vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
47788+ rxcp->vlan_tag);
47789+ else
47790 netif_receive_skb(skb);
47791+
47792+ return;
47793+}
47794+
47795+/* Process the RX completion indicated by rxcp when LRO is enabled */
47796+static void be_rx_compl_process_lro(struct be_adapter *adapter,
47797+ struct be_rx_obj *rxo,
47798+ struct be_rx_compl_info *rxcp)
47799+{
47800+ struct be_rx_page_info *page_info;
47801+ struct skb_frag_struct rx_frags[BE_MAX_FRAGS_PER_FRAME];
47802+ struct be_queue_info *rxq = &rxo->q;
47803+ u16 remaining, curr_frag_len;
47804+ u16 i, j;
47805+
47806+ remaining = rxcp->pkt_size;
47807+ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
47808+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
47809+
47810+ curr_frag_len = min(remaining, rx_frag_size);
47811+
47812+ /* Coalesce all frags from the same physical page in one slot */
47813+ if (i == 0 || page_info->page_offset == 0) {
47814+ /* First frag or Fresh page */
47815+ j++;
47816+ rx_frags[j].page = page_info->page;
47817+ rx_frags[j].page_offset = page_info->page_offset;
47818+ rx_frags[j].size = 0;
47819+ } else {
47820+ put_page(page_info->page);
47821+ }
47822+ rx_frags[j].size += curr_frag_len;
47823+
47824+ remaining -= curr_frag_len;
47825+ index_inc(&rxcp->rxq_idx, rxq->len);
47826+ memset(page_info, 0, sizeof(*page_info));
47827+ }
47828+ BUG_ON(j > MAX_SKB_FRAGS);
47829+
47830+ if (likely(!rxcp->vlanf)) {
47831+ lro_receive_frags(&rxo->lro_mgr, rx_frags, rxcp->pkt_size,
47832+ rxcp->pkt_size, NULL, 0);
47833+ } else {
47834+ lro_vlan_hwaccel_receive_frags(&rxo->lro_mgr, rx_frags,
47835+ rxcp->pkt_size, rxcp->pkt_size, adapter->vlan_grp,
47836+ rxcp->vlan_tag, NULL, 0);
47837 }
47838
47839 return;
47840 }
47841
47842 /* Process the RX completion indicated by rxcp when GRO is enabled */
47843-static void be_rx_compl_process_gro(struct be_adapter *adapter,
47844- struct be_eth_rx_compl *rxcp)
47845+void be_rx_compl_process_gro(struct be_adapter *adapter,
47846+ struct be_rx_obj *rxo,
47847+ struct be_rx_compl_info *rxcp)
47848 {
47849+#ifdef NETIF_F_GRO
47850 struct be_rx_page_info *page_info;
47851 struct sk_buff *skb = NULL;
47852- struct be_queue_info *rxq = &adapter->rx_obj.q;
47853- struct be_eq_obj *eq_obj = &adapter->rx_eq;
47854- u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
47855- u16 i, rxq_idx = 0, vid, j;
47856- u8 vtm;
47857-
47858- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
47859- pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
47860- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
47861- rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
47862- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
47863-
47864- /* vlanf could be wrongly set in some cards.
47865- * ignore if vtm is not set */
47866- if ((adapter->cap == 0x400) && !vtm)
47867- vlanf = 0;
47868+ struct be_queue_info *rxq = &rxo->q;
47869+ struct be_eq_obj *eq_obj = &rxo->rx_eq;
47870+ u16 remaining, curr_frag_len;
47871+ u16 i, j;
47872
47873 skb = napi_get_frags(&eq_obj->napi);
47874 if (!skb) {
47875- be_rx_compl_discard(adapter, rxcp);
47876+ be_rx_compl_discard(adapter, rxo, rxcp);
47877 return;
47878 }
47879
47880- remaining = pkt_size;
47881- for (i = 0, j = -1; i < num_rcvd; i++) {
47882- page_info = get_rx_page_info(adapter, rxq_idx);
47883+ remaining = rxcp->pkt_size;
47884+ for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
47885+ page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
47886
47887 curr_frag_len = min(remaining, rx_frag_size);
47888
47889@@ -845,55 +1368,129 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
47890 skb_shinfo(skb)->frags[j].size += curr_frag_len;
47891
47892 remaining -= curr_frag_len;
47893- index_inc(&rxq_idx, rxq->len);
47894+ index_inc(&rxcp->rxq_idx, rxq->len);
47895 memset(page_info, 0, sizeof(*page_info));
47896 }
47897 BUG_ON(j > MAX_SKB_FRAGS);
47898
47899 skb_shinfo(skb)->nr_frags = j + 1;
47900- skb->len = pkt_size;
47901- skb->data_len = pkt_size;
47902- skb->truesize += pkt_size;
47903+ skb->len = rxcp->pkt_size;
47904+ skb->data_len = rxcp->pkt_size;
47905+ skb->truesize += rxcp->pkt_size;
47906 skb->ip_summed = CHECKSUM_UNNECESSARY;
47907
47908- if (likely(!vlanf)) {
47909+ if (likely(!rxcp->vlanf))
47910 napi_gro_frags(&eq_obj->napi);
47911- } else {
47912- vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
47913- vid = be16_to_cpu(vid);
47914+ else
47915+ vlan_gro_frags(&eq_obj->napi,
47916+ adapter->vlan_grp, rxcp->vlan_tag);
47917+#endif
47918
47919- if (!adapter->vlan_grp || adapter->num_vlans == 0)
47920- return;
47921-
47922- vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
47923- }
47924-
47925- be_rx_stats_update(adapter, pkt_size, num_rcvd);
47926 return;
47927 }
47928
47929-static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
47930+static void be_parse_rx_compl_v1(struct be_adapter *adapter,
47931+ struct be_eth_rx_compl *compl,
47932+ struct be_rx_compl_info *rxcp)
47933 {
47934- struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
47935+ rxcp->pkt_size =
47936+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
47937+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
47938+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
47939+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
47940+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
47941+ rxcp->ip_csum =
47942+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
47943+ rxcp->l4_csum =
47944+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
47945+ rxcp->ipv6 =
47946+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
47947+ rxcp->rxq_idx =
47948+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
47949+ rxcp->num_rcvd =
47950+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
47951+ rxcp->pkt_type =
47952+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
47953+ if (rxcp->vlanf) {
47954+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
47955+ compl);
47956+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
47957+ vlan_tag, compl);
47958+ }
47959+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
47960+}
47961
47962- if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
47963+static void be_parse_rx_compl_v0(struct be_adapter *adapter,
47964+ struct be_eth_rx_compl *compl,
47965+ struct be_rx_compl_info *rxcp)
47966+{
47967+ rxcp->pkt_size =
47968+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
47969+ rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
47970+ rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
47971+ rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
47972+ rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
47973+ rxcp->ip_csum =
47974+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
47975+ rxcp->l4_csum =
47976+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
47977+ rxcp->ipv6 =
47978+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
47979+ rxcp->rxq_idx =
47980+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
47981+ rxcp->num_rcvd =
47982+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
47983+ rxcp->pkt_type =
47984+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
47985+ if (rxcp->vlanf) {
47986+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
47987+ compl);
47988+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
47989+ vlan_tag, compl);
47990+ }
47991+ rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
47992+}
47993+
47994+static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
47995+{
47996+ struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
47997+ struct be_rx_compl_info *rxcp = &rxo->rxcp;
47998+ struct be_adapter *adapter = rxo->adapter;
47999+
48000+ /* For checking the valid bit it is Ok to use either definition as the
48001+ * valid bit is at the same position in both v0 and v1 Rx compl */
48002+ if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
48003 return NULL;
48004
48005- be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
48006+ rmb();
48007+ be_dws_le_to_cpu(compl, sizeof(*compl));
48008
48009- queue_tail_inc(&adapter->rx_obj.cq);
48010+ if (adapter->be3_native)
48011+ be_parse_rx_compl_v1(adapter, compl, rxcp);
48012+ else
48013+ be_parse_rx_compl_v0(adapter, compl, rxcp);
48014+
48015+ if (rxcp->vlanf) {
48016+ /* vlanf could be wrongly set in some cards.
48017+ * ignore if vtm is not set */
48018+ if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
48019+ rxcp->vlanf = 0;
48020+
48021+ if (!lancer_chip(adapter))
48022+ rxcp->vlan_tag = swab16(rxcp->vlan_tag);
48023+
48024+ if ((adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK)) &&
48025+ !adapter->vlan_tag[rxcp->vlan_tag])
48026+ rxcp->vlanf = 0;
48027+ }
48028+
48029+ /* As the compl has been parsed, reset it; we wont touch it again */
48030+ compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
48031+
48032+ queue_tail_inc(&rxo->cq);
48033 return rxcp;
48034 }
48035
48036-/* To reset the valid bit, we need to reset the whole word as
48037- * when walking the queue the valid entries are little-endian
48038- * and invalid entries are host endian
48039- */
48040-static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
48041-{
48042- rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
48043-}
48044-
48045 static inline struct page *be_alloc_pages(u32 size)
48046 {
48047 gfp_t alloc_flags = GFP_ATOMIC;
48048@@ -907,11 +1504,12 @@ static inline struct page *be_alloc_pages(u32 size)
48049 * Allocate a page, split it to fragments of size rx_frag_size and post as
48050 * receive buffers to BE
48051 */
48052-static void be_post_rx_frags(struct be_adapter *adapter)
48053+static void be_post_rx_frags(struct be_rx_obj *rxo)
48054 {
48055- struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
48056- struct be_rx_page_info *page_info = NULL;
48057- struct be_queue_info *rxq = &adapter->rx_obj.q;
48058+ struct be_adapter *adapter = rxo->adapter;
48059+ struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
48060+ struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
48061+ struct be_queue_info *rxq = &rxo->q;
48062 struct page *pagep = NULL;
48063 struct be_eth_rx_d *rxd;
48064 u64 page_dmaaddr = 0, frag_dmaaddr;
48065@@ -922,7 +1520,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48066 if (!pagep) {
48067 pagep = be_alloc_pages(adapter->big_page_size);
48068 if (unlikely(!pagep)) {
48069- drvr_stats(adapter)->be_ethrx_post_fail++;
48070+ rxo->stats.rx_post_fail++;
48071 break;
48072 }
48073 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
48074@@ -941,7 +1539,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48075 rxd = queue_head_node(rxq);
48076 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
48077 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
48078- queue_head_inc(rxq);
48079
48080 /* Any space left in the current big page for another frag? */
48081 if ((page_offset + rx_frag_size + rx_frag_size) >
48082@@ -949,17 +1546,24 @@ static void be_post_rx_frags(struct be_adapter *adapter)
48083 pagep = NULL;
48084 page_info->last_page_user = true;
48085 }
48086+
48087+ prev_page_info = page_info;
48088+ queue_head_inc(rxq);
48089 page_info = &page_info_tbl[rxq->head];
48090 }
48091 if (pagep)
48092- page_info->last_page_user = true;
48093+ prev_page_info->last_page_user = true;
48094
48095+ /* Ensure that posting buffers is the last thing done by this
48096+ * routine to avoid racing between rx bottom-half and
48097+ * be_worker (process) contexts.
48098+ */
48099 if (posted) {
48100 atomic_add(posted, &rxq->used);
48101 be_rxq_notify(adapter, rxq->id, posted);
48102 } else if (atomic_read(&rxq->used) == 0) {
48103 /* Let be_worker replenish when memory is available */
48104- adapter->rx_post_starved = true;
48105+ rxo->rx_post_starved = true;
48106 }
48107
48108 return;
48109@@ -972,6 +1576,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
48110 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
48111 return NULL;
48112
48113+ rmb();
48114 be_dws_le_to_cpu(txcp, sizeof(*txcp));
48115
48116 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
48117@@ -980,11 +1585,14 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
48118 return txcp;
48119 }
48120
48121-static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
48122+static u16 be_tx_compl_process(struct be_adapter *adapter,
48123+ struct be_tx_obj *txo, u16 last_index)
48124 {
48125- struct be_queue_info *txq = &adapter->tx_obj.q;
48126- struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
48127+ struct be_queue_info *txq = &txo->q;
48128+ struct be_eth_wrb *wrb;
48129+ struct sk_buff **sent_skbs = txo->sent_skb_list;
48130 struct sk_buff *sent_skb;
48131+ u64 busaddr;
48132 u16 cur_index, num_wrbs = 0;
48133
48134 cur_index = txq->tail;
48135@@ -992,15 +1600,31 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
48136 BUG_ON(!sent_skb);
48137 sent_skbs[cur_index] = NULL;
48138
48139- do {
48140+ wrb = queue_tail_node(txq);
48141+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
48142+ busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
48143+ if (busaddr != 0) {
48144+ pci_unmap_single(adapter->pdev, busaddr,
48145+ wrb->frag_len, PCI_DMA_TODEVICE);
48146+ }
48147+ num_wrbs++;
48148+ queue_tail_inc(txq);
48149+
48150+ while (cur_index != last_index) {
48151 cur_index = txq->tail;
48152+ wrb = queue_tail_node(txq);
48153+ be_dws_le_to_cpu(wrb, sizeof(*wrb));
48154+ busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
48155+ if (busaddr != 0) {
48156+ pci_unmap_page(adapter->pdev, busaddr,
48157+ wrb->frag_len, PCI_DMA_TODEVICE);
48158+ }
48159 num_wrbs++;
48160 queue_tail_inc(txq);
48161- } while (cur_index != last_index);
48162+ }
48163
48164- atomic_sub(num_wrbs, &txq->used);
48165- skb_dma_unmap(&adapter->pdev->dev, sent_skb, DMA_TO_DEVICE);
48166 kfree_skb(sent_skb);
48167+ return num_wrbs;
48168 }
48169
48170 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
48171@@ -1010,13 +1634,15 @@ static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
48172 if (!eqe->evt)
48173 return NULL;
48174
48175+ rmb();
48176 eqe->evt = le32_to_cpu(eqe->evt);
48177 queue_tail_inc(&eq_obj->q);
48178 return eqe;
48179 }
48180
48181 static int event_handle(struct be_adapter *adapter,
48182- struct be_eq_obj *eq_obj)
48183+ struct be_eq_obj *eq_obj,
48184+ bool rearm)
48185 {
48186 struct be_eq_entry *eqe;
48187 u16 num = 0;
48188@@ -1029,7 +1655,10 @@ static int event_handle(struct be_adapter *adapter,
48189 /* Deal with any spurious interrupts that come
48190 * without events
48191 */
48192- be_eq_notify(adapter, eq_obj->q.id, true, true, num);
48193+ if (!num)
48194+ rearm = true;
48195+
48196+ be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
48197 if (num)
48198 napi_schedule(&eq_obj->napi);
48199
48200@@ -1053,49 +1682,55 @@ static void be_eq_clean(struct be_adapter *adapter,
48201 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
48202 }
48203
48204-static void be_rx_q_clean(struct be_adapter *adapter)
48205+static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
48206 {
48207 struct be_rx_page_info *page_info;
48208- struct be_queue_info *rxq = &adapter->rx_obj.q;
48209- struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
48210- struct be_eth_rx_compl *rxcp;
48211+ struct be_queue_info *rxq = &rxo->q;
48212+ struct be_queue_info *rx_cq = &rxo->cq;
48213+ struct be_rx_compl_info *rxcp;
48214 u16 tail;
48215
48216 /* First cleanup pending rx completions */
48217- while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
48218- be_rx_compl_discard(adapter, rxcp);
48219- be_rx_compl_reset(rxcp);
48220+ while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
48221+ be_rx_compl_discard(adapter, rxo, rxcp);
48222 be_cq_notify(adapter, rx_cq->id, true, 1);
48223 }
48224
48225 /* Then free posted rx buffer that were not used */
48226 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
48227 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
48228- page_info = get_rx_page_info(adapter, tail);
48229+ page_info = get_rx_page_info(adapter, rxo, tail);
48230 put_page(page_info->page);
48231 memset(page_info, 0, sizeof(*page_info));
48232 }
48233 BUG_ON(atomic_read(&rxq->used));
48234+ rxq->tail = rxq->head = 0;
48235 }
48236
48237-static void be_tx_compl_clean(struct be_adapter *adapter)
48238+static void be_tx_compl_clean(struct be_adapter *adapter,
48239+ struct be_tx_obj *txo)
48240 {
48241- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
48242- struct be_queue_info *txq = &adapter->tx_obj.q;
48243+ struct be_queue_info *tx_cq = &txo->cq;
48244+ struct be_queue_info *txq = &txo->q;
48245 struct be_eth_tx_compl *txcp;
48246- u16 end_idx, cmpl = 0, timeo = 0;
48247+ u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
48248+ struct sk_buff **sent_skbs = txo->sent_skb_list;
48249+ struct sk_buff *sent_skb;
48250+ bool dummy_wrb;
48251
48252 /* Wait for a max of 200ms for all the tx-completions to arrive. */
48253 do {
48254 while ((txcp = be_tx_compl_get(tx_cq))) {
48255 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
48256 wrb_index, txcp);
48257- be_tx_compl_process(adapter, end_idx);
48258+ num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
48259 cmpl++;
48260 }
48261 if (cmpl) {
48262 be_cq_notify(adapter, tx_cq->id, false, cmpl);
48263+ atomic_sub(num_wrbs, &txq->used);
48264 cmpl = 0;
48265+ num_wrbs = 0;
48266 }
48267
48268 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
48269@@ -1107,6 +1742,17 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
48270 if (atomic_read(&txq->used))
48271 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
48272 atomic_read(&txq->used));
48273+
48274+ /* free posted tx for which compls will never arrive */
48275+ while (atomic_read(&txq->used)) {
48276+ sent_skb = sent_skbs[txq->tail];
48277+ end_idx = txq->tail;
48278+ index_adv(&end_idx,
48279+ wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
48280+ txq->len);
48281+ num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
48282+ atomic_sub(num_wrbs, &txq->used);
48283+ }
48284 }
48285
48286 static void be_mcc_queues_destroy(struct be_adapter *adapter)
48287@@ -1145,8 +1791,9 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
48288 goto mcc_cq_destroy;
48289
48290 /* Ask BE to create MCC queue */
48291- if (be_cmd_mccq_create(adapter, q, cq))
48292+ if (be_cmd_mccq_create(adapter, q, cq)) {
48293 goto mcc_q_free;
48294+ }
48295
48296 return 0;
48297
48298@@ -1163,16 +1810,20 @@ err:
48299 static void be_tx_queues_destroy(struct be_adapter *adapter)
48300 {
48301 struct be_queue_info *q;
48302+ struct be_tx_obj *txo;
48303+ u8 i;
48304
48305- q = &adapter->tx_obj.q;
48306- if (q->created)
48307- be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
48308- be_queue_free(adapter, q);
48309+ for_all_tx_queues(adapter, txo, i) {
48310+ q = &txo->q;
48311+ if (q->created)
48312+ be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
48313+ be_queue_free(adapter, q);
48314
48315- q = &adapter->tx_obj.cq;
48316- if (q->created)
48317- be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48318- be_queue_free(adapter, q);
48319+ q = &txo->cq;
48320+ if (q->created)
48321+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48322+ be_queue_free(adapter, q);
48323+ }
48324
48325 /* Clear any residual events */
48326 be_eq_clean(adapter, &adapter->tx_eq);
48327@@ -1183,168 +1834,210 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
48328 be_queue_free(adapter, q);
48329 }
48330
48331+/* One TX event queue is shared by all TX compl qs */
48332 static int be_tx_queues_create(struct be_adapter *adapter)
48333 {
48334 struct be_queue_info *eq, *q, *cq;
48335+ struct be_tx_obj *txo;
48336+ u8 i, tc_id;
48337
48338 adapter->tx_eq.max_eqd = 0;
48339 adapter->tx_eq.min_eqd = 0;
48340 adapter->tx_eq.cur_eqd = 96;
48341 adapter->tx_eq.enable_aic = false;
48342- /* Alloc Tx Event queue */
48343+
48344 eq = &adapter->tx_eq.q;
48345- if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
48346+ if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
48347+ sizeof(struct be_eq_entry)))
48348 return -1;
48349
48350- /* Ask BE to create Tx Event queue */
48351 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
48352- goto tx_eq_free;
48353- /* Alloc TX eth compl queue */
48354- cq = &adapter->tx_obj.cq;
48355- if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
48356+ goto err;
48357+ adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
48358+
48359+ for_all_tx_queues(adapter, txo, i) {
48360+ cq = &txo->cq;
48361+ if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
48362 sizeof(struct be_eth_tx_compl)))
48363- goto tx_eq_destroy;
48364+ goto err;
48365
48366- /* Ask BE to create Tx eth compl queue */
48367- if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
48368- goto tx_cq_free;
48369+ if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
48370+ goto err;
48371
48372- /* Alloc TX eth queue */
48373- q = &adapter->tx_obj.q;
48374- if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
48375- goto tx_cq_destroy;
48376+ q = &txo->q;
48377+ if (be_queue_alloc(adapter, q, TX_Q_LEN,
48378+ sizeof(struct be_eth_wrb)))
48379+ goto err;
48380
48381- /* Ask BE to create Tx eth queue */
48382- if (be_cmd_txq_create(adapter, q, cq))
48383- goto tx_q_free;
48384+ if (be_cmd_txq_create(adapter, q, cq, &tc_id))
48385+ goto err;
48386+
48387+ if (adapter->flags & BE_FLAGS_DCBX)
48388+ adapter->tc_txq_map[tc_id] = i;
48389+ }
48390 return 0;
48391
48392-tx_q_free:
48393- be_queue_free(adapter, q);
48394-tx_cq_destroy:
48395- be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
48396-tx_cq_free:
48397- be_queue_free(adapter, cq);
48398-tx_eq_destroy:
48399- be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
48400-tx_eq_free:
48401- be_queue_free(adapter, eq);
48402+err:
48403+ be_tx_queues_destroy(adapter);
48404 return -1;
48405 }
48406
48407 static void be_rx_queues_destroy(struct be_adapter *adapter)
48408 {
48409 struct be_queue_info *q;
48410+ struct be_rx_obj *rxo;
48411+ int i;
48412
48413- q = &adapter->rx_obj.q;
48414- if (q->created) {
48415- be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
48416- be_rx_q_clean(adapter);
48417- }
48418- be_queue_free(adapter, q);
48419+ for_all_rx_queues(adapter, rxo, i) {
48420+ be_queue_free(adapter, &rxo->q);
48421+
48422+ q = &rxo->cq;
48423+ if (q->created)
48424+ be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48425+ be_queue_free(adapter, q);
48426
48427- q = &adapter->rx_obj.cq;
48428- if (q->created)
48429- be_cmd_q_destroy(adapter, q, QTYPE_CQ);
48430- be_queue_free(adapter, q);
48431+ q = &rxo->rx_eq.q;
48432+ if (q->created)
48433+ be_cmd_q_destroy(adapter, q, QTYPE_EQ);
48434+ be_queue_free(adapter, q);
48435
48436- /* Clear any residual events */
48437- be_eq_clean(adapter, &adapter->rx_eq);
48438+ kfree(rxo->page_info_tbl);
48439+ }
48440+}
48441
48442- q = &adapter->rx_eq.q;
48443- if (q->created)
48444- be_cmd_q_destroy(adapter, q, QTYPE_EQ);
48445- be_queue_free(adapter, q);
48446+/* Is BE in a multi-channel mode */
48447+static inline bool be_is_mc(struct be_adapter *adapter) {
48448+ return (adapter->function_mode & FLEX10_MODE ||
48449+ adapter->function_mode & VNIC_MODE ||
48450+ adapter->function_mode & UMC_ENABLED);
48451+}
48452+
48453+static u32 be_num_rxqs_want(struct be_adapter *adapter)
48454+{
48455+ if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
48456+ adapter->num_vfs == 0 && be_physfn(adapter) &&
48457+ !be_is_mc(adapter)) {
48458+ return 1 + MAX_RSS_QS; /* one default non-RSS queue */
48459+ } else {
48460+ dev_warn(&adapter->pdev->dev,
48461+ "No support for multiple RX queues\n");
48462+ return 1;
48463+ }
48464 }
48465
48466 static int be_rx_queues_create(struct be_adapter *adapter)
48467 {
48468 struct be_queue_info *eq, *q, *cq;
48469- int rc;
48470+ struct be_rx_obj *rxo;
48471+ int rc, i;
48472
48473+ adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
48474+ msix_enabled(adapter) ?
48475+ adapter->num_msix_vec - 1 : 1);
48476+ if (adapter->num_rx_qs != MAX_RX_QS)
48477+ dev_warn(&adapter->pdev->dev,
48478+ "Could create only %d receive queues",
48479+ adapter->num_rx_qs);
48480+
48481+ adapter->max_rx_coal = gro ? BE_INIT_FRAGS_PER_FRAME : 1;
48482 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
48483- adapter->rx_eq.max_eqd = BE_MAX_EQD;
48484- adapter->rx_eq.min_eqd = 0;
48485- adapter->rx_eq.cur_eqd = 0;
48486- adapter->rx_eq.enable_aic = true;
48487+ for_all_rx_queues(adapter, rxo, i) {
48488+ rxo->adapter = adapter;
48489+ rxo->rx_eq.max_eqd = BE_MAX_EQD;
48490+ rxo->rx_eq.enable_aic = true;
48491
48492- /* Alloc Rx Event queue */
48493- eq = &adapter->rx_eq.q;
48494- rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
48495- sizeof(struct be_eq_entry));
48496- if (rc)
48497- return rc;
48498+ /* EQ */
48499+ eq = &rxo->rx_eq.q;
48500+ rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
48501+ sizeof(struct be_eq_entry));
48502+ if (rc)
48503+ goto err;
48504
48505- /* Ask BE to create Rx Event queue */
48506- rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
48507- if (rc)
48508- goto rx_eq_free;
48509+ rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
48510+ if (rc)
48511+ goto err;
48512
48513- /* Alloc RX eth compl queue */
48514- cq = &adapter->rx_obj.cq;
48515- rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
48516- sizeof(struct be_eth_rx_compl));
48517- if (rc)
48518- goto rx_eq_destroy;
48519+ rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
48520
48521- /* Ask BE to create Rx eth compl queue */
48522- rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
48523- if (rc)
48524- goto rx_cq_free;
48525+ /* CQ */
48526+ cq = &rxo->cq;
48527+ rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
48528+ sizeof(struct be_eth_rx_compl));
48529+ if (rc)
48530+ goto err;
48531
48532- /* Alloc RX eth queue */
48533- q = &adapter->rx_obj.q;
48534- rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
48535- if (rc)
48536- goto rx_cq_destroy;
48537+ rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
48538+ if (rc)
48539+ goto err;
48540
48541- /* Ask BE to create Rx eth queue */
48542- rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
48543- BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
48544- if (rc)
48545- goto rx_q_free;
48546+ /* Rx Q - will be created in be_open() */
48547+ q = &rxo->q;
48548+ rc = be_queue_alloc(adapter, q, RX_Q_LEN,
48549+ sizeof(struct be_eth_rx_d));
48550+ if (rc)
48551+ goto err;
48552+
48553+ rxo->page_info_tbl = kzalloc(sizeof(struct be_rx_page_info) *
48554+ RX_Q_LEN, GFP_KERNEL);
48555+ if (!rxo->page_info_tbl)
48556+ goto err;
48557+ }
48558
48559 return 0;
48560-rx_q_free:
48561- be_queue_free(adapter, q);
48562-rx_cq_destroy:
48563- be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
48564-rx_cq_free:
48565- be_queue_free(adapter, cq);
48566-rx_eq_destroy:
48567- be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
48568-rx_eq_free:
48569- be_queue_free(adapter, eq);
48570- return rc;
48571+err:
48572+ be_rx_queues_destroy(adapter);
48573+ return -1;
48574 }
48575
48576-/* There are 8 evt ids per func. Retruns the evt id's bit number */
48577-static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
48578+static bool event_peek(struct be_eq_obj *eq_obj)
48579 {
48580- return eq_id - 8 * be_pci_func(adapter);
48581+ struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
48582+ if (!eqe->evt)
48583+ return false;
48584+ else
48585+ return true;
48586 }
48587
48588 static irqreturn_t be_intx(int irq, void *dev)
48589 {
48590 struct be_adapter *adapter = dev;
48591- int isr;
48592+ struct be_rx_obj *rxo;
48593+ int isr, i, tx = 0 , rx = 0;
48594
48595- isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
48596- be_pci_func(adapter) * CEV_ISR_SIZE);
48597- if (!isr)
48598- return IRQ_NONE;
48599+ if (lancer_chip(adapter)) {
48600+ if (event_peek(&adapter->tx_eq))
48601+ tx = event_handle(adapter, &adapter->tx_eq, false);
48602+ for_all_rx_queues(adapter, rxo, i) {
48603+ if (event_peek(&rxo->rx_eq))
48604+ rx |= event_handle(adapter, &rxo->rx_eq, true);
48605+ }
48606
48607- event_handle(adapter, &adapter->tx_eq);
48608- event_handle(adapter, &adapter->rx_eq);
48609+ if (!(tx || rx))
48610+ return IRQ_NONE;
48611+ } else {
48612+ isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
48613+ (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
48614+ if (!isr)
48615+ return IRQ_NONE;
48616+
48617+ if ((1 << adapter->tx_eq.eq_idx & isr))
48618+ event_handle(adapter, &adapter->tx_eq, false);
48619+
48620+ for_all_rx_queues(adapter, rxo, i) {
48621+ if ((1 << rxo->rx_eq.eq_idx & isr))
48622+ event_handle(adapter, &rxo->rx_eq, true);
48623+ }
48624+ }
48625
48626 return IRQ_HANDLED;
48627 }
48628
48629 static irqreturn_t be_msix_rx(int irq, void *dev)
48630 {
48631- struct be_adapter *adapter = dev;
48632+ struct be_rx_obj *rxo = dev;
48633+ struct be_adapter *adapter = rxo->adapter;
48634
48635- event_handle(adapter, &adapter->rx_eq);
48636+ event_handle(adapter, &rxo->rx_eq, true);
48637
48638 return IRQ_HANDLED;
48639 }
48640@@ -1353,48 +2046,72 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
48641 {
48642 struct be_adapter *adapter = dev;
48643
48644- event_handle(adapter, &adapter->tx_eq);
48645+ event_handle(adapter, &adapter->tx_eq, false);
48646
48647 return IRQ_HANDLED;
48648 }
48649
48650 static inline bool do_gro(struct be_adapter *adapter,
48651- struct be_eth_rx_compl *rxcp)
48652+ struct be_rx_compl_info *rxcp)
48653 {
48654- int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
48655- int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
48656-
48657- if (err)
48658- drvr_stats(adapter)->be_rxcp_err++;
48659-
48660- return (tcp_frame && !err) ? true : false;
48661+ return (!rxcp->tcpf || rxcp->err || adapter->max_rx_coal <= 1 ||
48662+ (rxcp->vlanf && !vlan_configured(adapter))) ?
48663+ false : true;
48664 }
48665
48666 int be_poll_rx(struct napi_struct *napi, int budget)
48667 {
48668 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
48669- struct be_adapter *adapter =
48670- container_of(rx_eq, struct be_adapter, rx_eq);
48671- struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
48672- struct be_eth_rx_compl *rxcp;
48673+ struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
48674+ struct be_adapter *adapter = rxo->adapter;
48675+ struct be_queue_info *rx_cq = &rxo->cq;
48676+ struct be_rx_compl_info *rxcp;
48677 u32 work_done;
48678+ bool flush_lro = false;
48679
48680+ rxo->stats.rx_polls++;
48681 for (work_done = 0; work_done < budget; work_done++) {
48682- rxcp = be_rx_compl_get(adapter);
48683+ rxcp = be_rx_compl_get(rxo);
48684 if (!rxcp)
48685 break;
48686
48687- if (do_gro(adapter, rxcp))
48688- be_rx_compl_process_gro(adapter, rxcp);
48689- else
48690- be_rx_compl_process(adapter, rxcp);
48691+ /* Is it a flush compl that has no data */
48692+ if (unlikely(rxcp->num_rcvd == 0))
48693+ continue;
48694
48695- be_rx_compl_reset(rxcp);
48696+ if (unlikely(rxcp->port != adapter->port_num)) {
48697+ be_rx_compl_discard(adapter, rxo, rxcp);
48698+ be_rx_stats_update(rxo, rxcp);
48699+ continue;
48700+ }
48701+
48702+ if (likely((lancer_A0_chip(adapter) && !rxcp->err) ||
48703+ !lancer_A0_chip(adapter))) {
48704+ if (do_gro(adapter, rxcp)) {
48705+ if (adapter->gro_supported) {
48706+ be_rx_compl_process_gro(adapter, rxo,
48707+ rxcp);
48708+ } else {
48709+ be_rx_compl_process_lro(adapter, rxo,
48710+ rxcp);
48711+ flush_lro = true;
48712+ }
48713+ } else {
48714+ be_rx_compl_process(adapter, rxo, rxcp);
48715+ }
48716+ } else if (lancer_A0_chip(adapter) && rxcp->err) {
48717+ be_rx_compl_discard(adapter, rxo, rxcp);
48718+ }
48719+
48720+ be_rx_stats_update(rxo, rxcp);
48721 }
48722
48723+ if (flush_lro)
48724+ lro_flush_all(&rxo->lro_mgr);
48725+
48726 /* Refill the queue */
48727- if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
48728- be_post_rx_frags(adapter);
48729+ if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
48730+ be_post_rx_frags(rxo);
48731
48732 /* All consumed */
48733 if (work_done < budget) {
48734@@ -1404,40 +2121,13 @@ int be_poll_rx(struct napi_struct *napi, int budget)
48735 /* More to be consumed; continue with interrupts disabled */
48736 be_cq_notify(adapter, rx_cq->id, false, work_done);
48737 }
48738+
48739+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
48740+ adapter->netdev->last_rx = jiffies;
48741+#endif
48742 return work_done;
48743 }
48744
48745-void be_process_tx(struct be_adapter *adapter)
48746-{
48747- struct be_queue_info *txq = &adapter->tx_obj.q;
48748- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
48749- struct be_eth_tx_compl *txcp;
48750- u32 num_cmpl = 0;
48751- u16 end_idx;
48752-
48753- while ((txcp = be_tx_compl_get(tx_cq))) {
48754- end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
48755- wrb_index, txcp);
48756- be_tx_compl_process(adapter, end_idx);
48757- num_cmpl++;
48758- }
48759-
48760- if (num_cmpl) {
48761- be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
48762-
48763- /* As Tx wrbs have been freed up, wake up netdev queue if
48764- * it was stopped due to lack of tx wrbs.
48765- */
48766- if (netif_queue_stopped(adapter->netdev) &&
48767- atomic_read(&txq->used) < txq->len / 2) {
48768- netif_wake_queue(adapter->netdev);
48769- }
48770-
48771- drvr_stats(adapter)->be_tx_events++;
48772- drvr_stats(adapter)->be_tx_compl += num_cmpl;
48773- }
48774-}
48775-
48776 /* As TX and MCC share the same EQ check for both TX and MCC completions.
48777 * For TX/MCC we don't honour budget; consume everything
48778 */
48779@@ -1446,96 +2136,264 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
48780 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
48781 struct be_adapter *adapter =
48782 container_of(tx_eq, struct be_adapter, tx_eq);
48783+ struct be_tx_obj *txo;
48784+ struct be_eth_tx_compl *txcp;
48785+ int tx_compl, mcc_compl, status = 0;
48786+ u8 i;
48787+ u16 num_wrbs;
48788+
48789+ for_all_tx_queues(adapter, txo, i) {
48790+ tx_compl = 0;
48791+ num_wrbs = 0;
48792+ while ((txcp = be_tx_compl_get(&txo->cq))) {
48793+ num_wrbs += be_tx_compl_process(adapter, txo,
48794+ AMAP_GET_BITS(struct amap_eth_tx_compl,
48795+ wrb_index, txcp));
48796+ tx_compl++;
48797+ }
48798+ if (tx_compl) {
48799+ be_cq_notify(adapter, txo->cq.id, true, tx_compl);
48800+
48801+ atomic_sub(num_wrbs, &txo->q.used);
48802+
48803+ /* As Tx wrbs have been freed up, wake up netdev queue
48804+ * if it was stopped due to lack of tx wrbs. */
48805+ if (__netif_subqueue_stopped(adapter->netdev, i) &&
48806+ atomic_read(&txo->q.used) < txo->q.len / 2) {
48807+ netif_wake_subqueue(adapter->netdev, i);
48808+ }
48809+
48810+ adapter->drv_stats.be_tx_events++;
48811+ txo->stats.be_tx_compl += tx_compl;
48812+ }
48813+ }
48814+
48815+ mcc_compl = be_process_mcc(adapter, &status);
48816+
48817+ if (mcc_compl) {
48818+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
48819+ be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
48820+ }
48821
48822 napi_complete(napi);
48823
48824- be_process_tx(adapter);
48825-
48826- be_process_mcc(adapter);
48827-
48828+ be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
48829 return 1;
48830 }
48831
48832+void be_detect_dump_ue(struct be_adapter *adapter)
48833+{
48834+ u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
48835+ u32 i;
48836+
48837+ pci_read_config_dword(adapter->pdev,
48838+ PCICFG_UE_STATUS_LOW, &ue_status_lo);
48839+ pci_read_config_dword(adapter->pdev,
48840+ PCICFG_UE_STATUS_HIGH, &ue_status_hi);
48841+ pci_read_config_dword(adapter->pdev,
48842+ PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
48843+ pci_read_config_dword(adapter->pdev,
48844+ PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
48845+
48846+ ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
48847+ ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
48848+
48849+ if (ue_status_lo || ue_status_hi) {
48850+ adapter->ue_detected = true;
48851+ adapter->eeh_err = true;
48852+ dev_err(&adapter->pdev->dev, "UE Detected!!\n");
48853+ }
48854+
48855+ if (ue_status_lo) {
48856+ for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
48857+ if (ue_status_lo & 1)
48858+ dev_err(&adapter->pdev->dev,
48859+ "UE: %s bit set\n", ue_status_low_desc[i]);
48860+ }
48861+ }
48862+ if (ue_status_hi) {
48863+ for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
48864+ if (ue_status_hi & 1)
48865+ dev_err(&adapter->pdev->dev,
48866+ "UE: %s bit set\n", ue_status_hi_desc[i]);
48867+ }
48868+ }
48869+
48870+}
48871+
48872 static void be_worker(struct work_struct *work)
48873 {
48874 struct be_adapter *adapter =
48875 container_of(work, struct be_adapter, work.work);
48876+ struct be_rx_obj *rxo;
48877+ struct be_tx_obj *txo;
48878+ int i;
48879
48880- be_cmd_get_stats(adapter, &adapter->stats.cmd);
48881+ if (!adapter->ue_detected && !lancer_chip(adapter))
48882+ be_detect_dump_ue(adapter);
48883
48884- /* Set EQ delay */
48885- be_rx_eqd_update(adapter);
48886+ /* when interrupts are not yet enabled, just reap any pending
48887+ * mcc completions */
48888+ if (!netif_running(adapter->netdev)) {
48889+ int mcc_compl, status = 0;
48890
48891- be_tx_rate_update(adapter);
48892- be_rx_rate_update(adapter);
48893+ mcc_compl = be_process_mcc(adapter, &status);
48894
48895- if (adapter->rx_post_starved) {
48896- adapter->rx_post_starved = false;
48897- be_post_rx_frags(adapter);
48898+ if (mcc_compl) {
48899+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
48900+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
48901+ }
48902+
48903+ goto reschedule;
48904+ }
48905+
48906+ if (!adapter->stats_cmd_sent)
48907+ be_cmd_get_stats(adapter, &adapter->stats_cmd);
48908+
48909+ for_all_tx_queues(adapter, txo, i)
48910+ be_tx_rate_update(txo);
48911+
48912+ for_all_rx_queues(adapter, rxo, i) {
48913+ be_rx_rate_update(rxo);
48914+ be_rx_eqd_update(adapter, rxo);
48915+
48916+ if (rxo->rx_post_starved) {
48917+ rxo->rx_post_starved = false;
48918+ be_post_rx_frags(rxo);
48919+ }
48920 }
48921
48922+reschedule:
48923+ adapter->work_counter++;
48924 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
48925 }
48926
48927+static void be_msix_disable(struct be_adapter *adapter)
48928+{
48929+ if (msix_enabled(adapter)) {
48930+ pci_disable_msix(adapter->pdev);
48931+ adapter->num_msix_vec = 0;
48932+ }
48933+}
48934+
48935 static void be_msix_enable(struct be_adapter *adapter)
48936 {
48937- int i, status;
48938+#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
48939+ int i, status, num_vec;
48940
48941- for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
48942+ num_vec = be_num_rxqs_want(adapter) + 1;
48943+
48944+ for (i = 0; i < num_vec; i++)
48945 adapter->msix_entries[i].entry = i;
48946
48947- status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
48948- BE_NUM_MSIX_VECTORS);
48949- if (status == 0)
48950- adapter->msix_enabled = true;
48951+ status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
48952+ if (status == 0) {
48953+ goto done;
48954+ } else if (status >= BE_MIN_MSIX_VECTORS) {
48955+ num_vec = status;
48956+ if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
48957+ num_vec) == 0)
48958+ goto done;
48959+ }
48960 return;
48961+done:
48962+ adapter->num_msix_vec = num_vec;
48963+ return;
48964+}
48965+
48966+static void be_sriov_enable(struct be_adapter *adapter)
48967+{
48968+ be_check_sriov_fn_type(adapter);
48969+#ifdef CONFIG_PCI_IOV
48970+ if (be_physfn(adapter) && num_vfs) {
48971+ int status, pos;
48972+ u16 nvfs;
48973+
48974+ pos = pci_find_ext_capability(adapter->pdev,
48975+ PCI_EXT_CAP_ID_SRIOV);
48976+ pci_read_config_word(adapter->pdev,
48977+ pos + PCI_SRIOV_TOTAL_VF, &nvfs);
48978+ adapter->num_vfs = num_vfs;
48979+ if (num_vfs > nvfs) {
48980+ dev_info(&adapter->pdev->dev,
48981+ "Device supports %d VFs and not %d\n",
48982+ nvfs, num_vfs);
48983+ adapter->num_vfs = nvfs;
48984+ }
48985+
48986+ status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
48987+ if (status)
48988+ adapter->num_vfs = 0;
48989+ }
48990+#endif
48991+}
48992+
48993+static void be_sriov_disable(struct be_adapter *adapter)
48994+{
48995+#ifdef CONFIG_PCI_IOV
48996+ if (adapter->num_vfs > 0) {
48997+ pci_disable_sriov(adapter->pdev);
48998+ adapter->num_vfs = 0;
48999+ }
49000+#endif
49001 }
49002
49003-static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
49004+static inline int be_msix_vec_get(struct be_adapter *adapter,
49005+ struct be_eq_obj *eq_obj)
49006 {
49007- return adapter->msix_entries[
49008- be_evt_bit_get(adapter, eq_id)].vector;
49009+ return adapter->msix_entries[eq_obj->eq_idx].vector;
49010 }
49011
49012 static int be_request_irq(struct be_adapter *adapter,
49013 struct be_eq_obj *eq_obj,
49014- void *handler, char *desc)
49015+ void *handler, char *desc, void *context)
49016 {
49017 struct net_device *netdev = adapter->netdev;
49018 int vec;
49019
49020 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
49021- vec = be_msix_vec_get(adapter, eq_obj->q.id);
49022- return request_irq(vec, handler, 0, eq_obj->desc, adapter);
49023+ vec = be_msix_vec_get(adapter, eq_obj);
49024+ return request_irq(vec, handler, 0, eq_obj->desc, context);
49025 }
49026
49027-static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
49028+static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
49029+ void *context)
49030 {
49031- int vec = be_msix_vec_get(adapter, eq_obj->q.id);
49032- free_irq(vec, adapter);
49033+ int vec = be_msix_vec_get(adapter, eq_obj);
49034+ free_irq(vec, context);
49035 }
49036
49037 static int be_msix_register(struct be_adapter *adapter)
49038 {
49039- int status;
49040+ struct be_rx_obj *rxo;
49041+ int status, i;
49042+ char qname[10];
49043
49044- status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
49045+ status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
49046+ adapter);
49047 if (status)
49048 goto err;
49049
49050- status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
49051- if (status)
49052- goto free_tx_irq;
49053+ for_all_rx_queues(adapter, rxo, i) {
49054+ sprintf(qname, "rxq%d", i);
49055+ status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
49056+ qname, rxo);
49057+ if (status)
49058+ goto err_msix;
49059+ }
49060
49061 return 0;
49062
49063-free_tx_irq:
49064- be_free_irq(adapter, &adapter->tx_eq);
49065+err_msix:
49066+ be_free_irq(adapter, &adapter->tx_eq, adapter);
49067+
49068+ for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
49069+ be_free_irq(adapter, &rxo->rx_eq, rxo);
49070+
49071 err:
49072 dev_warn(&adapter->pdev->dev,
49073 "MSIX Request IRQ failed - err %d\n", status);
49074- pci_disable_msix(adapter->pdev);
49075- adapter->msix_enabled = false;
49076+ be_msix_disable(adapter);
49077 return status;
49078 }
49079
49080@@ -1544,10 +2402,13 @@ static int be_irq_register(struct be_adapter *adapter)
49081 struct net_device *netdev = adapter->netdev;
49082 int status;
49083
49084- if (adapter->msix_enabled) {
49085+ if (msix_enabled(adapter)) {
49086 status = be_msix_register(adapter);
49087 if (status == 0)
49088 goto done;
49089+ /* INTx is not supported for VF */
49090+ if (!be_physfn(adapter))
49091+ return status;
49092 }
49093
49094 /* INTx */
49095@@ -1567,87 +2428,363 @@ done:
49096 static void be_irq_unregister(struct be_adapter *adapter)
49097 {
49098 struct net_device *netdev = adapter->netdev;
49099+ struct be_rx_obj *rxo;
49100+ int i;
49101
49102 if (!adapter->isr_registered)
49103 return;
49104
49105 /* INTx */
49106- if (!adapter->msix_enabled) {
49107+ if (!msix_enabled(adapter)) {
49108 free_irq(netdev->irq, adapter);
49109 goto done;
49110 }
49111
49112 /* MSIx */
49113- be_free_irq(adapter, &adapter->tx_eq);
49114- be_free_irq(adapter, &adapter->rx_eq);
49115+ be_free_irq(adapter, &adapter->tx_eq, adapter);
49116+
49117+ for_all_rx_queues(adapter, rxo, i)
49118+ be_free_irq(adapter, &rxo->rx_eq, rxo);
49119+
49120 done:
49121 adapter->isr_registered = false;
49122- return;
49123 }
49124
49125-static int be_open(struct net_device *netdev)
49126+static u16 be_select_queue(struct net_device *netdev,
49127+ struct sk_buff *skb)
49128 {
49129 struct be_adapter *adapter = netdev_priv(netdev);
49130- struct be_eq_obj *rx_eq = &adapter->rx_eq;
49131+ u8 prio;
49132+
49133+ if (adapter->num_tx_qs == 1)
49134+ return 0;
49135+
49136+ prio = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
49137+ return adapter->tc_txq_map[adapter->prio_tc_map[prio]];
49138+}
49139+
49140+static void be_rx_queues_clear(struct be_adapter *adapter)
49141+{
49142+ struct be_queue_info *q;
49143+ struct be_rx_obj *rxo;
49144+ int i;
49145+
49146+ for_all_rx_queues(adapter, rxo, i) {
49147+ q = &rxo->q;
49148+ if (q->created) {
49149+ be_cmd_rxq_destroy(adapter, q);
49150+ /* After the rxq is invalidated, wait for a grace time
49151+ * of 1ms for all dma to end and the flush compl to
49152+ * arrive
49153+ */
49154+ mdelay(1);
49155+ be_rx_q_clean(adapter, rxo);
49156+ }
49157+
49158+ /* Clear any residual events */
49159+ q = &rxo->rx_eq.q;
49160+ if (q->created)
49161+ be_eq_clean(adapter, &rxo->rx_eq);
49162+ }
49163+}
49164+
49165+static int be_close(struct net_device *netdev)
49166+{
49167+ struct be_adapter *adapter = netdev_priv(netdev);
49168+ struct be_rx_obj *rxo;
49169+ struct be_tx_obj *txo;
49170 struct be_eq_obj *tx_eq = &adapter->tx_eq;
49171- bool link_up;
49172- int status;
49173+ int vec, i;
49174+
49175+ be_async_mcc_disable(adapter);
49176+
49177+ netif_stop_queue(netdev);
49178+ netif_carrier_off(netdev);
49179+ adapter->link_status = LINK_DOWN;
49180+
49181+ if (!lancer_chip(adapter))
49182+ be_intr_set(adapter, false);
49183+
49184+ for_all_rx_queues(adapter, rxo, i)
49185+ napi_disable(&rxo->rx_eq.napi);
49186+
49187+ napi_disable(&tx_eq->napi);
49188+
49189+ if (lancer_chip(adapter)) {
49190+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
49191+ for_all_rx_queues(adapter, rxo, i)
49192+ be_cq_notify(adapter, rxo->cq.id, false, 0);
49193+ for_all_tx_queues(adapter, txo, i)
49194+ be_cq_notify(adapter, txo->cq.id, false, 0);
49195+ }
49196+
49197+ if (msix_enabled(adapter)) {
49198+ vec = be_msix_vec_get(adapter, tx_eq);
49199+ synchronize_irq(vec);
49200+
49201+ for_all_rx_queues(adapter, rxo, i) {
49202+ vec = be_msix_vec_get(adapter, &rxo->rx_eq);
49203+ synchronize_irq(vec);
49204+ }
49205+ } else {
49206+ synchronize_irq(netdev->irq);
49207+ }
49208+ be_irq_unregister(adapter);
49209+
49210+ /* Wait for all pending tx completions to arrive so that
49211+ * all tx skbs are freed.
49212+ */
49213+ for_all_tx_queues(adapter, txo, i)
49214+ be_tx_compl_clean(adapter, txo);
49215+
49216+ be_rx_queues_clear(adapter);
49217+ return 0;
49218+}
49219+
49220+static int be_rx_queues_setup(struct be_adapter *adapter)
49221+{
49222+ struct be_rx_obj *rxo;
49223+ int rc, i;
49224+ u8 rsstable[MAX_RSS_QS];
49225+
49226+ for_all_rx_queues(adapter, rxo, i) {
49227+ rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
49228+ rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
49229+ adapter->if_handle,
49230+ (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
49231+ if (rc)
49232+ return rc;
49233+ }
49234+
49235+ if (be_multi_rxq(adapter)) {
49236+ for_all_rss_queues(adapter, rxo, i)
49237+ rsstable[i] = rxo->rss_id;
49238+
49239+ rc = be_cmd_rss_config(adapter, rsstable,
49240+ adapter->num_rx_qs - 1);
49241+ if (rc)
49242+ return rc;
49243+ }
49244
49245 /* First time posting */
49246- be_post_rx_frags(adapter);
49247+ for_all_rx_queues(adapter, rxo, i) {
49248+ be_post_rx_frags(rxo);
49249+ napi_enable(&rxo->rx_eq.napi);
49250+ }
49251+ return 0;
49252+}
49253+
49254+static int be_open(struct net_device *netdev)
49255+{
49256+ struct be_adapter *adapter = netdev_priv(netdev);
49257+ struct be_eq_obj *tx_eq = &adapter->tx_eq;
49258+ struct be_rx_obj *rxo;
49259+ int link_status;
49260+ int status, i;
49261+ u8 mac_speed;
49262+ u16 link_speed;
49263+
49264+ status = be_rx_queues_setup(adapter);
49265+ if (status)
49266+ goto err;
49267
49268- napi_enable(&rx_eq->napi);
49269 napi_enable(&tx_eq->napi);
49270
49271 be_irq_register(adapter);
49272
49273- be_intr_set(adapter, true);
49274+ if (!lancer_chip(adapter))
49275+ be_intr_set(adapter, true);
49276
49277 /* The evt queues are created in unarmed state; arm them */
49278- be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
49279+ for_all_rx_queues(adapter, rxo, i) {
49280+ be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
49281+ be_cq_notify(adapter, rxo->cq.id, true, 0);
49282+ }
49283 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
49284
49285- /* Rx compl queue may be in unarmed state; rearm it */
49286- be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
49287+ /* Now that interrupts are on we can process async mcc */
49288+ be_async_mcc_enable(adapter);
49289
49290- status = be_cmd_link_status_query(adapter, &link_up);
49291+ status = be_cmd_link_status_query(adapter, &link_status, &mac_speed,
49292+ &link_speed, 0);
49293 if (status)
49294- goto ret_sts;
49295- be_link_status_update(adapter, link_up);
49296+ goto err;
49297+ be_link_status_update(adapter, link_status);
49298
49299- status = be_vid_config(adapter);
49300+ status = be_vid_config(adapter, false, 0);
49301 if (status)
49302- goto ret_sts;
49303+ goto err;
49304
49305- status = be_cmd_set_flow_control(adapter,
49306- adapter->tx_fc, adapter->rx_fc);
49307- if (status)
49308- goto ret_sts;
49309+ if (be_physfn(adapter)) {
49310+ status = be_cmd_set_flow_control(adapter,
49311+ adapter->tx_fc, adapter->rx_fc);
49312+ if (status)
49313+ goto err;
49314+ }
49315+
49316+ return 0;
49317+err:
49318+ be_close(adapter->netdev);
49319+ return -EIO;
49320+}
49321+
49322+static int be_setup_wol(struct be_adapter *adapter, bool enable)
49323+{
49324+ struct be_dma_mem cmd;
49325+ int status = 0;
49326+ u8 mac[ETH_ALEN];
49327+
49328+ memset(mac, 0, ETH_ALEN);
49329+
49330+ cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
49331+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
49332+ if (cmd.va == NULL)
49333+ return -1;
49334+ memset(cmd.va, 0, cmd.size);
49335+
49336+ if (enable) {
49337+ status = pci_write_config_dword(adapter->pdev,
49338+ PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
49339+ if (status) {
49340+ dev_err(&adapter->pdev->dev,
49341+ "Could not enable Wake-on-lan\n");
49342+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
49343+ cmd.dma);
49344+ return status;
49345+ }
49346+ status = be_cmd_enable_magic_wol(adapter,
49347+ adapter->netdev->dev_addr, &cmd);
49348+ pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
49349+ pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
49350+ } else {
49351+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
49352+ pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
49353+ pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
49354+ }
49355+
49356+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
49357+ return status;
49358+}
49359+
49360+/*
49361+ * Generate a seed MAC address from the PF MAC Address using jhash.
49362+ * MAC Address for VFs are assigned incrementally starting from the seed.
49363+ * These addresses are programmed in the ASIC by the PF and the VF driver
49364+ * queries for the MAC address during its probe.
49365+ */
49366+static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
49367+{
49368+ u32 vf = 0;
49369+ int status = 0;
49370+ u8 mac[ETH_ALEN];
49371+
49372+ be_vf_eth_addr_generate(adapter, mac);
49373+
49374+ for (vf = 0; vf < adapter->num_vfs; vf++) {
49375+ status = be_cmd_pmac_add(adapter, mac,
49376+ adapter->vf_cfg[vf].vf_if_handle,
49377+ &adapter->vf_cfg[vf].vf_pmac_id,
49378+ vf + 1);
49379+ if (status)
49380+ dev_err(&adapter->pdev->dev,
49381+ "Mac address add failed for VF %d\n", vf);
49382+ else
49383+ memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
49384
49385- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
49386-ret_sts:
49387+ mac[5] += 1;
49388+ }
49389 return status;
49390 }
49391
49392+static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
49393+{
49394+ u32 vf;
49395+
49396+ for (vf = 0; vf < adapter->num_vfs; vf++) {
49397+ if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
49398+ be_cmd_pmac_del(adapter,
49399+ adapter->vf_cfg[vf].vf_if_handle,
49400+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
49401+ }
49402+}
49403+
49404+static int be_num_txqs_want(struct be_adapter *adapter)
49405+{
49406+ if (adapter->num_vfs > 0 || be_is_mc(adapter) ||
49407+ lancer_chip(adapter) || !be_physfn(adapter) ||
49408+ adapter->generation == BE_GEN2)
49409+ return 1;
49410+ else
49411+ return MAX_TX_QS;
49412+}
49413+
49414 static int be_setup(struct be_adapter *adapter)
49415 {
49416 struct net_device *netdev = adapter->netdev;
49417- u32 cap_flags, en_flags;
49418- int status;
49419-
49420- cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
49421- BE_IF_FLAGS_MCAST_PROMISCUOUS |
49422- BE_IF_FLAGS_PROMISCUOUS |
49423- BE_IF_FLAGS_PASS_L3L4_ERRORS;
49424- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
49425- BE_IF_FLAGS_PASS_L3L4_ERRORS;
49426+ int status, fw_num_txqs, num_txqs;
49427+ u32 cap_flags, en_flags, vf = 0;
49428+ u8 mac[ETH_ALEN];
49429+
49430+ num_txqs = be_num_txqs_want(adapter);
49431+ if (num_txqs > 1) {
49432+ be_cmd_req_pg_pfc(adapter, &fw_num_txqs);
49433+ num_txqs = min(num_txqs, fw_num_txqs);
49434+ }
49435+ adapter->num_tx_qs = num_txqs;
49436+ if (adapter->num_tx_qs != MAX_TX_QS)
49437+ netif_set_real_num_tx_queues(adapter->netdev,
49438+ adapter->num_tx_qs);
49439+
49440+ be_cmd_req_native_mode(adapter);
49441+
49442+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
49443+ BE_IF_FLAGS_BROADCAST |
49444+ BE_IF_FLAGS_MULTICAST;
49445+
49446+ if (be_physfn(adapter)) {
49447+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
49448+ cap_flags |= BE_IF_FLAGS_RSS;
49449+ en_flags |= BE_IF_FLAGS_RSS;
49450+ }
49451+ cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
49452+ BE_IF_FLAGS_PROMISCUOUS;
49453+ if (!lancer_A0_chip(adapter)) {
49454+ cap_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
49455+ en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
49456+ }
49457+ }
49458
49459 status = be_cmd_if_create(adapter, cap_flags, en_flags,
49460 netdev->dev_addr, false/* pmac_invalid */,
49461- &adapter->if_handle, &adapter->pmac_id);
49462+ &adapter->if_handle, &adapter->pmac_id, 0);
49463 if (status != 0)
49464 goto do_none;
49465
49466+ if (be_physfn(adapter)) {
49467+ while (vf < adapter->num_vfs) {
49468+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
49469+ BE_IF_FLAGS_BROADCAST;
49470+ status = be_cmd_if_create(adapter, cap_flags,
49471+ en_flags, mac, true,
49472+ &adapter->vf_cfg[vf].vf_if_handle,
49473+ NULL, vf+1);
49474+ if (status) {
49475+ dev_err(&adapter->pdev->dev,
49476+ "Interface Create failed for VF %d\n", vf);
49477+ goto if_destroy;
49478+ }
49479+ adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
49480+ vf++;
49481+ }
49482+ } else {
49483+ status = be_cmd_mac_addr_query(adapter, mac,
49484+ MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
49485+ if (!status) {
49486+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
49487+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
49488+ }
49489+ }
49490+
49491 status = be_tx_queues_create(adapter);
49492 if (status != 0)
49493 goto if_destroy;
49494@@ -1656,10 +2793,15 @@ static int be_setup(struct be_adapter *adapter)
49495 if (status != 0)
49496 goto tx_qs_destroy;
49497
49498+ /* Allow all priorities by default. A GRP5 evt may modify this */
49499+ adapter->vlan_prio_bmap = 0xff;
49500+
49501 status = be_mcc_queues_create(adapter);
49502 if (status != 0)
49503 goto rx_qs_destroy;
49504
49505+ adapter->link_speed = -1;
49506+
49507 return 0;
49508
49509 rx_qs_destroy:
49510@@ -1667,158 +2809,392 @@ rx_qs_destroy:
49511 tx_qs_destroy:
49512 be_tx_queues_destroy(adapter);
49513 if_destroy:
49514- be_cmd_if_destroy(adapter, adapter->if_handle);
49515+ if (be_physfn(adapter)) {
49516+ for (vf = 0; vf < adapter->num_vfs; vf++)
49517+ if (adapter->vf_cfg[vf].vf_if_handle)
49518+ be_cmd_if_destroy(adapter,
49519+ adapter->vf_cfg[vf].vf_if_handle,
49520+ vf + 1);
49521+ }
49522+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
49523 do_none:
49524 return status;
49525 }
49526
49527 static int be_clear(struct be_adapter *adapter)
49528 {
49529+ int vf;
49530+
49531+ if (be_physfn(adapter) && adapter->num_vfs)
49532+ be_vf_eth_addr_rem(adapter);
49533+
49534 be_mcc_queues_destroy(adapter);
49535 be_rx_queues_destroy(adapter);
49536 be_tx_queues_destroy(adapter);
49537+ adapter->eq_next_idx = 0;
49538
49539- be_cmd_if_destroy(adapter, adapter->if_handle);
49540+ if (be_physfn(adapter)) {
49541+ for (vf = 0; vf < adapter->num_vfs; vf++)
49542+ if (adapter->vf_cfg[vf].vf_if_handle)
49543+ be_cmd_if_destroy(adapter,
49544+ adapter->vf_cfg[vf].vf_if_handle, vf + 1);
49545+ }
49546+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
49547
49548+ /* tell fw we're done with firing cmds */
49549+ be_cmd_fw_clean(adapter);
49550 return 0;
49551 }
49552
49553-static int be_close(struct net_device *netdev)
49554+static void be_cpy_drv_ver(struct be_adapter *adapter, void *va)
49555+{
49556+ struct mgmt_controller_attrib *attrib =
49557+ (struct mgmt_controller_attrib *) ((u8*) va +
49558+ sizeof(struct be_cmd_resp_hdr));
49559+
49560+ memcpy(attrib->hba_attribs.driver_version_string,
49561+ DRV_VER, sizeof(DRV_VER));
49562+ attrib->pci_bus_number = adapter->pdev->bus->number;
49563+ attrib->pci_device_number = PCI_SLOT(adapter->pdev->devfn);
49564+ return;
49565+}
49566+
49567+#define IOCTL_COOKIE "SERVERENGINES CORP"
49568+static int be_do_ioctl(struct net_device *netdev,
49569+ struct ifreq *ifr, int cmd)
49570 {
49571 struct be_adapter *adapter = netdev_priv(netdev);
49572- struct be_eq_obj *rx_eq = &adapter->rx_eq;
49573- struct be_eq_obj *tx_eq = &adapter->tx_eq;
49574- int vec;
49575+ struct be_cmd_req_hdr req;
49576+ struct be_cmd_resp_hdr *resp;
49577+ void *data = ifr->ifr_data;
49578+ void *ioctl_ptr;
49579+ void *va;
49580+ dma_addr_t dma;
49581+ u32 req_size;
49582+ int status, ret = 0;
49583+ u8 cookie[32];
49584+
49585+ switch (cmd) {
49586+ case SIOCDEVPRIVATE:
49587+ if (copy_from_user(cookie, data, strlen(IOCTL_COOKIE)))
49588+ return -EFAULT;
49589+
49590+ if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
49591+ return -EINVAL;
49592
49593- cancel_delayed_work_sync(&adapter->work);
49594+ ioctl_ptr = (u8 *)data + strlen(IOCTL_COOKIE);
49595+ if (copy_from_user(&req, ioctl_ptr,
49596+ sizeof(struct be_cmd_req_hdr)))
49597+ return -EFAULT;
49598
49599- netif_stop_queue(netdev);
49600- netif_carrier_off(netdev);
49601- adapter->link_up = false;
49602+ req_size = le32_to_cpu(req.request_length);
49603+ if (req_size > 65536)
49604+ return -EINVAL;
49605
49606- be_intr_set(adapter, false);
49607+ req_size += sizeof(struct be_cmd_req_hdr);
49608+ va = pci_alloc_consistent(adapter->pdev, req_size, &dma);
49609+ if (!va)
49610+ return -ENOMEM;
49611+ if (copy_from_user(va, ioctl_ptr, req_size)) {
49612+ ret = -EFAULT;
49613+ break;
49614+ }
49615
49616- if (adapter->msix_enabled) {
49617- vec = be_msix_vec_get(adapter, tx_eq->q.id);
49618- synchronize_irq(vec);
49619- vec = be_msix_vec_get(adapter, rx_eq->q.id);
49620- synchronize_irq(vec);
49621- } else {
49622- synchronize_irq(netdev->irq);
49623+ status = be_cmd_pass_ext_ioctl(adapter, dma, req_size, va);
49624+ if (status == -1) {
49625+ ret = -EIO;
49626+ break;
49627+ }
49628+
49629+ resp = (struct be_cmd_resp_hdr *) va;
49630+ if (!status) {
49631+ if (req.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES)
49632+ be_cpy_drv_ver(adapter, va);
49633+ }
49634+
49635+ if (copy_to_user(ioctl_ptr, va, req_size)) {
49636+ ret = -EFAULT;
49637+ break;
49638+ }
49639+ break;
49640+ default:
49641+ return -EOPNOTSUPP;
49642 }
49643- be_irq_unregister(adapter);
49644
49645- napi_disable(&rx_eq->napi);
49646- napi_disable(&tx_eq->napi);
49647+ if (va)
49648+ pci_free_consistent(adapter->pdev, req_size, va, dma);
49649+
49650+ return ret;
49651+}
49652+
49653+#ifdef CONFIG_NET_POLL_CONTROLLER
49654+static void be_netpoll(struct net_device *netdev)
49655+{
49656+ struct be_adapter *adapter = netdev_priv(netdev);
49657+ struct be_rx_obj *rxo;
49658+ int i;
49659
49660- /* Wait for all pending tx completions to arrive so that
49661- * all tx skbs are freed.
49662- */
49663- be_tx_compl_clean(adapter);
49664+ event_handle(adapter, &adapter->tx_eq, false);
49665+ for_all_rx_queues(adapter, rxo, i)
49666+ event_handle(adapter, &rxo->rx_eq, true);
49667+
49668+ return;
49669+}
49670+#endif
49671+
49672+static int be_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
49673+ void **ip_hdr, void **tcpudp_hdr,
49674+ u64 *hdr_flags, void *priv)
49675+{
49676+ struct ethhdr *eh;
49677+ struct vlan_ethhdr *veh;
49678+ struct iphdr *iph;
49679+ u8 *va = page_address(frag->page) + frag->page_offset;
49680+ unsigned long ll_hlen;
49681+
49682+ prefetch(va);
49683+ eh = (struct ethhdr *)va;
49684+ *mac_hdr = eh;
49685+ ll_hlen = ETH_HLEN;
49686+ if (eh->h_proto != htons(ETH_P_IP)) {
49687+ if (eh->h_proto == htons(ETH_P_8021Q)) {
49688+ veh = (struct vlan_ethhdr *)va;
49689+ if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
49690+ return -1;
49691+
49692+ ll_hlen += VLAN_HLEN;
49693+ } else {
49694+ return -1;
49695+ }
49696+ }
49697+ *hdr_flags = LRO_IPV4;
49698+ iph = (struct iphdr *)(va + ll_hlen);
49699+ *ip_hdr = iph;
49700+ if (iph->protocol != IPPROTO_TCP)
49701+ return -1;
49702+ *hdr_flags |= LRO_TCP;
49703+ *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
49704
49705 return 0;
49706 }
49707
49708-#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
49709+static void be_lro_init(struct be_adapter *adapter, struct net_device *netdev)
49710+{
49711+ struct net_lro_mgr *lro_mgr;
49712+ struct be_rx_obj *rxo;
49713+ int i;
49714+
49715+ for_all_rx_queues(adapter, rxo, i) {
49716+ lro_mgr = &rxo->lro_mgr;
49717+ lro_mgr->dev = netdev;
49718+ lro_mgr->features = LRO_F_NAPI;
49719+ lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
49720+ lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
49721+ lro_mgr->max_desc = BE_MAX_LRO_DESCRIPTORS;
49722+ lro_mgr->lro_arr = rxo->lro_desc;
49723+ lro_mgr->get_frag_header = be_get_frag_header;
49724+ lro_mgr->max_aggr = BE_MAX_FRAGS_PER_FRAME;
49725+ }
49726+
49727+#ifdef NETIF_F_GRO
49728+ netdev->features |= NETIF_F_GRO;
49729+ adapter->gro_supported = true;
49730+#endif
49731+}
49732+
49733+#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
49734 char flash_cookie[2][16] = {"*** SE FLAS",
49735 "H DIRECTORY *** "};
49736-static int be_flash_image(struct be_adapter *adapter,
49737+
49738+static bool be_flash_redboot(struct be_adapter *adapter,
49739+ const u8 *p, u32 img_start, int image_size,
49740+ int hdr_size)
49741+{
49742+ u32 crc_offset;
49743+ u8 flashed_crc[4];
49744+ int status;
49745+
49746+ crc_offset = hdr_size + img_start + image_size - 4;
49747+
49748+ p += crc_offset;
49749+
49750+ status = be_cmd_get_flash_crc(adapter, flashed_crc,
49751+ (image_size - 4));
49752+ if (status) {
49753+ dev_err(&adapter->pdev->dev,
49754+ "could not get crc from flash, not flashing redboot\n");
49755+ return false;
49756+ }
49757+
49758+ /*update redboot only if crc does not match*/
49759+ if (!memcmp(flashed_crc, p, 4))
49760+ return false;
49761+ else
49762+ return true;
49763+}
49764+
49765+static bool phy_flashing_required(struct be_adapter *adapter)
49766+{
49767+ int status = 0;
49768+ struct be_phy_info phy_info;
49769+
49770+ status = be_cmd_get_phy_info(adapter, &phy_info);
49771+ if (status)
49772+ return false;
49773+ if ((phy_info.phy_type == TN_8022) &&
49774+ (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
49775+ return true;
49776+ }
49777+ return false;
49778+}
49779+
49780+static int be_flash_data(struct be_adapter *adapter,
49781 const struct firmware *fw,
49782- struct be_dma_mem *flash_cmd, u32 flash_type)
49783+ struct be_dma_mem *flash_cmd, int num_of_images)
49784+
49785 {
49786- int status;
49787- u32 flash_op, image_offset = 0, total_bytes, image_size = 0;
49788+ int status = 0, i, filehdr_size = 0;
49789+ u32 total_bytes = 0, flash_op;
49790 int num_bytes;
49791 const u8 *p = fw->data;
49792 struct be_cmd_write_flashrom *req = flash_cmd->va;
49793+ struct flash_comp *pflashcomp;
49794+ int num_comp;
49795
49796- switch (flash_type) {
49797- case FLASHROM_TYPE_ISCSI_ACTIVE:
49798- image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START;
49799- image_size = FLASH_IMAGE_MAX_SIZE;
49800- break;
49801- case FLASHROM_TYPE_ISCSI_BACKUP:
49802- image_offset = FLASH_iSCSI_BACKUP_IMAGE_START;
49803- image_size = FLASH_IMAGE_MAX_SIZE;
49804- break;
49805- case FLASHROM_TYPE_FCOE_FW_ACTIVE:
49806- image_offset = FLASH_FCoE_PRIMARY_IMAGE_START;
49807- image_size = FLASH_IMAGE_MAX_SIZE;
49808- break;
49809- case FLASHROM_TYPE_FCOE_FW_BACKUP:
49810- image_offset = FLASH_FCoE_BACKUP_IMAGE_START;
49811- image_size = FLASH_IMAGE_MAX_SIZE;
49812- break;
49813- case FLASHROM_TYPE_BIOS:
49814- image_offset = FLASH_iSCSI_BIOS_START;
49815- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
49816- break;
49817- case FLASHROM_TYPE_FCOE_BIOS:
49818- image_offset = FLASH_FCoE_BIOS_START;
49819- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
49820- break;
49821- case FLASHROM_TYPE_PXE_BIOS:
49822- image_offset = FLASH_PXE_BIOS_START;
49823- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
49824- break;
49825- default:
49826- return 0;
49827+ struct flash_comp gen3_flash_types[10] = {
49828+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
49829+ FLASH_IMAGE_MAX_SIZE_g3},
49830+ { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
49831+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
49832+ { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
49833+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
49834+ { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
49835+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
49836+ { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
49837+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
49838+ { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
49839+ FLASH_IMAGE_MAX_SIZE_g3},
49840+ { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
49841+ FLASH_IMAGE_MAX_SIZE_g3},
49842+ { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
49843+ FLASH_IMAGE_MAX_SIZE_g3},
49844+ { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
49845+ FLASH_NCSI_IMAGE_MAX_SIZE_g3},
49846+ { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
49847+ FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
49848+ };
49849+ struct flash_comp gen2_flash_types[8] = {
49850+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
49851+ FLASH_IMAGE_MAX_SIZE_g2},
49852+ { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
49853+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
49854+ { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
49855+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
49856+ { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
49857+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
49858+ { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
49859+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
49860+ { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
49861+ FLASH_IMAGE_MAX_SIZE_g2},
49862+ { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
49863+ FLASH_IMAGE_MAX_SIZE_g2},
49864+ { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
49865+ FLASH_IMAGE_MAX_SIZE_g2}
49866+ };
49867+ if (adapter->generation == BE_GEN3) {
49868+ pflashcomp = gen3_flash_types;
49869+ filehdr_size = sizeof(struct flash_file_hdr_g3);
49870+ num_comp = ARRAY_SIZE(gen3_flash_types);
49871+ } else {
49872+ pflashcomp = gen2_flash_types;
49873+ filehdr_size = sizeof(struct flash_file_hdr_g2);
49874+ num_comp = ARRAY_SIZE(gen2_flash_types);
49875 }
49876+ for (i = 0; i < num_comp; i++) {
49877+ if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
49878+ memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
49879+ continue;
49880+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
49881+ if (!phy_flashing_required(adapter))
49882+ continue;
49883+ }
49884+ if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
49885+ (!be_flash_redboot(adapter, fw->data,
49886+ pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
49887+ (num_of_images * sizeof(struct image_hdr)))))
49888+ continue;
49889
49890- p += sizeof(struct flash_file_hdr) + image_offset;
49891- if (p + image_size > fw->data + fw->size)
49892- return -1;
49893-
49894- total_bytes = image_size;
49895-
49896- while (total_bytes) {
49897- if (total_bytes > 32*1024)
49898- num_bytes = 32*1024;
49899- else
49900- num_bytes = total_bytes;
49901- total_bytes -= num_bytes;
49902-
49903- if (!total_bytes)
49904- flash_op = FLASHROM_OPER_FLASH;
49905- else
49906- flash_op = FLASHROM_OPER_SAVE;
49907- memcpy(req->params.data_buf, p, num_bytes);
49908- p += num_bytes;
49909- status = be_cmd_write_flashrom(adapter, flash_cmd,
49910- flash_type, flash_op, num_bytes);
49911- if (status) {
49912- dev_err(&adapter->pdev->dev,
49913- "cmd to write to flash rom failed. type/op %d/%d\n",
49914- flash_type, flash_op);
49915+ p = fw->data;
49916+ p += filehdr_size + pflashcomp[i].offset
49917+ + (num_of_images * sizeof(struct image_hdr));
49918+ if (p + pflashcomp[i].size > fw->data + fw->size)
49919 return -1;
49920+ total_bytes = pflashcomp[i].size;
49921+ while (total_bytes) {
49922+ if (total_bytes > 32*1024)
49923+ num_bytes = 32*1024;
49924+ else
49925+ num_bytes = total_bytes;
49926+ total_bytes -= num_bytes;
49927+ if (!total_bytes) {
49928+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
49929+ flash_op = FLASHROM_OPER_PHY_FLASH;
49930+ else
49931+ flash_op = FLASHROM_OPER_FLASH;
49932+ } else {
49933+ if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
49934+ flash_op = FLASHROM_OPER_PHY_SAVE;
49935+ else
49936+ flash_op = FLASHROM_OPER_SAVE;
49937+ }
49938+ memcpy(req->params.data_buf, p, num_bytes);
49939+ p += num_bytes;
49940+ status = be_cmd_write_flashrom(adapter, flash_cmd,
49941+ pflashcomp[i].optype, flash_op, num_bytes);
49942+ if (status) {
49943+ if ((status == ILLEGAL_IOCTL_REQ) &&
49944+ (pflashcomp[i].optype ==
49945+ IMG_TYPE_PHY_FW))
49946+ break;
49947+ dev_err(&adapter->pdev->dev,
49948+ "cmd to write to flash rom failed.\n");
49949+ return -1;
49950+ }
49951+ yield();
49952 }
49953- yield();
49954 }
49955-
49956 return 0;
49957 }
49958
49959+static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
49960+{
49961+ if (fhdr == NULL)
49962+ return 0;
49963+ if (fhdr->build[0] == '3')
49964+ return BE_GEN3;
49965+ else if (fhdr->build[0] == '2')
49966+ return BE_GEN2;
49967+ else
49968+ return 0;
49969+}
49970+
49971 int be_load_fw(struct be_adapter *adapter, u8 *func)
49972 {
49973 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
49974 const struct firmware *fw;
49975- struct flash_file_hdr *fhdr;
49976- struct flash_section_info *fsec = NULL;
49977+ struct flash_file_hdr_g2 *fhdr;
49978+ struct flash_file_hdr_g3 *fhdr3;
49979+ struct image_hdr *img_hdr_ptr = NULL;
49980 struct be_dma_mem flash_cmd;
49981- int status;
49982+ int status, i = 0, num_imgs = 0;
49983 const u8 *p;
49984- bool entry_found = false;
49985- int flash_type;
49986- char fw_ver[FW_VER_LEN];
49987- char fw_cfg;
49988
49989- status = be_cmd_get_fw_ver(adapter, fw_ver);
49990- if (status)
49991- return status;
49992+ if (!netif_running(adapter->netdev)) {
49993+ dev_err(&adapter->pdev->dev,
49994+ "Firmware load not allowed (interface is down)\n");
49995+ return -1;
49996+ }
49997
49998- fw_cfg = *(fw_ver + 2);
49999- if (fw_cfg == '0')
50000- fw_cfg = '1';
50001 strcpy(fw_file, func);
50002
50003 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
50004@@ -1826,34 +3202,9 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50005 goto fw_exit;
50006
50007 p = fw->data;
50008- fhdr = (struct flash_file_hdr *) p;
50009- if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
50010- dev_err(&adapter->pdev->dev,
50011- "Firmware(%s) load error (signature did not match)\n",
50012- fw_file);
50013- status = -1;
50014- goto fw_exit;
50015- }
50016-
50017+ fhdr = (struct flash_file_hdr_g2 *) p;
50018 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
50019
50020- p += sizeof(struct flash_file_hdr);
50021- while (p < (fw->data + fw->size)) {
50022- fsec = (struct flash_section_info *)p;
50023- if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
50024- entry_found = true;
50025- break;
50026- }
50027- p += 32;
50028- }
50029-
50030- if (!entry_found) {
50031- status = -1;
50032- dev_err(&adapter->pdev->dev,
50033- "Flash cookie not found in firmware image\n");
50034- goto fw_exit;
50035- }
50036-
50037 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
50038 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
50039 &flash_cmd.dma);
50040@@ -1864,12 +3215,25 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50041 goto fw_exit;
50042 }
50043
50044- for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE;
50045- flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) {
50046- status = be_flash_image(adapter, fw, &flash_cmd,
50047- flash_type);
50048- if (status)
50049- break;
50050+ if ((adapter->generation == BE_GEN3) &&
50051+ (get_ufigen_type(fhdr) == BE_GEN3)) {
50052+ fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
50053+ num_imgs = le32_to_cpu(fhdr3->num_imgs);
50054+ for (i = 0; i < num_imgs; i++) {
50055+ img_hdr_ptr = (struct image_hdr *) (fw->data +
50056+ (sizeof(struct flash_file_hdr_g3) +
50057+ i * sizeof(struct image_hdr)));
50058+ if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
50059+ status = be_flash_data(adapter, fw, &flash_cmd,
50060+ num_imgs);
50061+ }
50062+ } else if ((adapter->generation == BE_GEN2) &&
50063+ (get_ufigen_type(fhdr) == BE_GEN2)) {
50064+ status = be_flash_data(adapter, fw, &flash_cmd, 0);
50065+ } else {
50066+ dev_err(&adapter->pdev->dev,
50067+ "UFI and Interface are not compatible for flashing\n");
50068+ status = -1;
50069 }
50070
50071 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
50072@@ -1879,14 +3243,14 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
50073 goto fw_exit;
50074 }
50075
50076- dev_info(&adapter->pdev->dev, "Firmware flashed succesfully\n");
50077+ dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
50078
50079 fw_exit:
50080 release_firmware(fw);
50081 return status;
50082 }
50083
50084-static struct net_device_ops be_netdev_ops = {
50085+static net_device_ops_no_const be_netdev_ops = {
50086 .ndo_open = be_open,
50087 .ndo_stop = be_close,
50088 .ndo_start_xmit = be_xmit,
50089@@ -1898,15 +3262,32 @@ static struct net_device_ops be_netdev_ops = {
50090 .ndo_vlan_rx_register = be_vlan_register,
50091 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
50092 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
50093+#ifdef HAVE_SRIOV_CONFIG
50094+ .ndo_set_vf_mac = be_set_vf_mac,
50095+ .ndo_set_vf_vlan = be_set_vf_vlan,
50096+ .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
50097+ .ndo_get_vf_config = be_get_vf_config,
50098+#endif
50099+ .ndo_do_ioctl = be_do_ioctl,
50100+#ifdef CONFIG_NET_POLL_CONTROLLER
50101+ .ndo_poll_controller = be_netpoll,
50102+#endif
50103 };
50104
50105-static void be_netdev_init(struct net_device *netdev)
50106+static int be_netdev_init(struct net_device *netdev)
50107 {
50108 struct be_adapter *adapter = netdev_priv(netdev);
50109+ struct be_rx_obj *rxo;
50110+ int i, status = 0;
50111
50112 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
50113- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
50114- NETIF_F_GRO;
50115+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_CSUM | NETIF_F_TSO6;
50116+
50117+ netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
50118+ NETIF_F_HW_CSUM;
50119+
50120+ netdev->features |= NETIF_F_VLAN_SG | NETIF_F_VLAN_TSO |
50121+ NETIF_F_VLAN_CSUM;
50122
50123 netdev->flags |= IFF_MULTICAST;
50124
50125@@ -1918,17 +3299,30 @@ static void be_netdev_init(struct net_device *netdev)
50126
50127 netif_set_gso_max_size(netdev, 65535);
50128
50129+ if (adapter->flags & BE_FLAGS_DCBX)
50130+ be_netdev_ops.ndo_select_queue = be_select_queue;
50131 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
50132-
50133+
50134 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
50135
50136- netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
50137- BE_NAPI_WEIGHT);
50138- netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
50139+ be_lro_init(adapter, netdev);
50140+
50141+ for_all_rx_queues(adapter, rxo, i) {
50142+ status = be_netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
50143+ BE_NAPI_WEIGHT);
50144+ if (status) {
50145+ dev_err(&adapter->pdev->dev, "dummy netdev alloc fail"
50146+ "for rxo:%d\n", i);
50147+ return status;
50148+ }
50149+ }
50150+ status = be_netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
50151 BE_NAPI_WEIGHT);
50152+ if (status)
50153+ dev_err(&adapter->pdev->dev, "dummy netdev alloc fail"
50154+ "for tx\n");
50155
50156- netif_carrier_off(netdev);
50157- netif_stop_queue(netdev);
50158+ return status;
50159 }
50160
50161 static void be_unmap_pci_bars(struct be_adapter *adapter)
50162@@ -1937,37 +3331,62 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
50163 iounmap(adapter->csr);
50164 if (adapter->db)
50165 iounmap(adapter->db);
50166- if (adapter->pcicfg)
50167+ if (adapter->pcicfg && be_physfn(adapter))
50168 iounmap(adapter->pcicfg);
50169 }
50170
50171 static int be_map_pci_bars(struct be_adapter *adapter)
50172 {
50173+ struct pci_dev *pdev = adapter->pdev;
50174 u8 __iomem *addr;
50175- int pcicfg_reg;
50176+ int pcicfg_reg, db_reg;
50177
50178- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
50179- pci_resource_len(adapter->pdev, 2));
50180- if (addr == NULL)
50181- return -ENOMEM;
50182- adapter->csr = addr;
50183+ if (lancer_chip(adapter)) {
50184+ addr = ioremap_nocache(pci_resource_start(pdev, 0),
50185+ pci_resource_len(adapter->pdev, 0));
50186+ if (addr == NULL)
50187+ return -ENOMEM;
50188+ adapter->db = addr;
50189+ return 0;
50190+ }
50191
50192- addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
50193- 128 * 1024);
50194- if (addr == NULL)
50195- goto pci_map_err;
50196- adapter->db = addr;
50197+ if (be_physfn(adapter)) {
50198+ addr = ioremap_nocache(pci_resource_start(pdev, 2),
50199+ pci_resource_len(pdev, 2));
50200+ if (addr == NULL)
50201+ return -ENOMEM;
50202+ adapter->csr = addr;
50203+ adapter->netdev->mem_start = pci_resource_start(pdev, 2);
50204+ adapter->netdev->mem_end = pci_resource_start(pdev, 2) +
50205+ pci_resource_len(pdev, 2);
50206+ }
50207
50208- if (adapter->generation == BE_GEN2)
50209+ if (adapter->generation == BE_GEN2) {
50210 pcicfg_reg = 1;
50211- else
50212+ db_reg = 4;
50213+ } else {
50214 pcicfg_reg = 0;
50215+ if (be_physfn(adapter))
50216+ db_reg = 4;
50217+ else
50218+ db_reg = 0;
50219+ }
50220
50221- addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
50222- pci_resource_len(adapter->pdev, pcicfg_reg));
50223+ addr = ioremap_nocache(pci_resource_start(pdev, db_reg),
50224+ pci_resource_len(pdev, db_reg));
50225 if (addr == NULL)
50226 goto pci_map_err;
50227- adapter->pcicfg = addr;
50228+ adapter->db = addr;
50229+
50230+ if (be_physfn(adapter)) {
50231+ addr = ioremap_nocache(
50232+ pci_resource_start(pdev, pcicfg_reg),
50233+ pci_resource_len(pdev, pcicfg_reg));
50234+ if (addr == NULL)
50235+ goto pci_map_err;
50236+ adapter->pcicfg = addr;
50237+ } else
50238+ adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
50239
50240 return 0;
50241 pci_map_err:
50242@@ -1985,40 +3404,69 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
50243 if (mem->va)
50244 pci_free_consistent(adapter->pdev, mem->size,
50245 mem->va, mem->dma);
50246+
50247+ mem = &adapter->rx_filter;
50248+ if (mem->va)
50249+ pci_free_consistent(adapter->pdev, mem->size,
50250+ mem->va, mem->dma);
50251 }
50252
50253 static int be_ctrl_init(struct be_adapter *adapter)
50254 {
50255 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
50256 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
50257+ struct be_dma_mem *rx_filter = &adapter->rx_filter;
50258 int status;
50259
50260 status = be_map_pci_bars(adapter);
50261 if (status)
50262- return status;
50263+ goto done;
50264
50265 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
50266 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
50267 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
50268 if (!mbox_mem_alloc->va) {
50269- be_unmap_pci_bars(adapter);
50270- return -1;
50271+ status = -ENOMEM;
50272+ goto unmap_pci_bars;
50273 }
50274+
50275 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
50276 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
50277 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
50278 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
50279- spin_lock_init(&adapter->mbox_lock);
50280+
50281+ rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
50282+ rx_filter->va = pci_alloc_consistent(adapter->pdev, rx_filter->size,
50283+ &rx_filter->dma);
50284+ if (rx_filter->va == NULL) {
50285+ status = -ENOMEM;
50286+ goto free_mbox;
50287+ }
50288+ memset(rx_filter->va, 0, rx_filter->size);
50289+
50290+ mutex_init(&adapter->mbox_lock);
50291 spin_lock_init(&adapter->mcc_lock);
50292 spin_lock_init(&adapter->mcc_cq_lock);
50293
50294+ init_completion(&adapter->flash_compl);
50295+
50296+ PCI_SAVE_STATE(adapter->pdev);
50297 return 0;
50298+
50299+free_mbox:
50300+ pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
50301+ mbox_mem_alloc->va, mbox_mem_alloc->dma);
50302+
50303+unmap_pci_bars:
50304+ be_unmap_pci_bars(adapter);
50305+
50306+done:
50307+ return status;
50308 }
50309
50310 static void be_stats_cleanup(struct be_adapter *adapter)
50311 {
50312- struct be_stats_obj *stats = &adapter->stats;
50313- struct be_dma_mem *cmd = &stats->cmd;
50314+ struct be_dma_mem *cmd = &adapter->stats_cmd;
50315
50316 if (cmd->va)
50317 pci_free_consistent(adapter->pdev, cmd->size,
50318@@ -2027,10 +3475,12 @@ static void be_stats_cleanup(struct be_adapter *adapter)
50319
50320 static int be_stats_init(struct be_adapter *adapter)
50321 {
50322- struct be_stats_obj *stats = &adapter->stats;
50323- struct be_dma_mem *cmd = &stats->cmd;
50324+ struct be_dma_mem *cmd = &adapter->stats_cmd;
50325
50326- cmd->size = sizeof(struct be_cmd_req_get_stats);
50327+ if (adapter->generation == BE_GEN2)
50328+ cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
50329+ else
50330+ cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
50331 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
50332 if (cmd->va == NULL)
50333 return -1;
50334@@ -2041,9 +3491,17 @@ static int be_stats_init(struct be_adapter *adapter)
50335 static void __devexit be_remove(struct pci_dev *pdev)
50336 {
50337 struct be_adapter *adapter = pci_get_drvdata(pdev);
50338+
50339 if (!adapter)
50340 return;
50341
50342+ cancel_delayed_work_sync(&adapter->work);
50343+
50344+#ifdef CONFIG_PALAU
50345+ be_sysfs_remove_group(adapter);
50346+#endif
50347+
50348+ /* be_close() gets called if the device is open by unregister */
50349 unregister_netdev(adapter->netdev);
50350
50351 be_clear(adapter);
50352@@ -2052,36 +3510,203 @@ static void __devexit be_remove(struct pci_dev *pdev)
50353
50354 be_ctrl_cleanup(adapter);
50355
50356- if (adapter->msix_enabled) {
50357- pci_disable_msix(adapter->pdev);
50358- adapter->msix_enabled = false;
50359- }
50360+ kfree(adapter->vf_cfg);
50361+ be_sriov_disable(adapter);
50362+
50363+ be_msix_disable(adapter);
50364
50365 pci_set_drvdata(pdev, NULL);
50366 pci_release_regions(pdev);
50367 pci_disable_device(pdev);
50368-
50369+ be_netif_napi_del(adapter->netdev);
50370 free_netdev(adapter->netdev);
50371 }
50372
50373-static int be_hw_up(struct be_adapter *adapter)
50374+static void be_pcie_slot_check(struct be_adapter *adapter)
50375+{
50376+ u32 curr, max, width, max_wd, speed, max_sp;
50377+
50378+ pci_read_config_dword(adapter->pdev, PCICFG_PCIE_LINK_STATUS_OFFSET,
50379+ &curr);
50380+ width = (curr >> PCIE_LINK_STATUS_NEG_WIDTH_SHIFT) &
50381+ PCIE_LINK_STATUS_NEG_WIDTH_MASK;
50382+ speed = (curr >> PCIE_LINK_STATUS_SPEED_SHIFT) &
50383+ PCIE_LINK_STATUS_SPEED_MASK;
50384+
50385+ pci_read_config_dword(adapter->pdev, PCICFG_PCIE_LINK_CAP_OFFSET,
50386+ &max);
50387+ max_wd = (max >> PCIE_LINK_CAP_MAX_WIDTH_SHIFT) &
50388+ PCIE_LINK_CAP_MAX_WIDTH_MASK;
50389+ max_sp = (max >> PCIE_LINK_CAP_MAX_SPEED_SHIFT) &
50390+ PCIE_LINK_CAP_MAX_SPEED_MASK;
50391+
50392+ if (width < max_wd || speed < max_sp)
50393+ dev_warn(&adapter->pdev->dev,
50394+ "Found network device in a Gen%s x%d PCIe slot. It "
50395+ "should be in a Gen2 x%d slot for best performance\n",
50396+ speed < max_sp ? "1" : "2", width, max_wd);
50397+}
50398+
50399+static int be_get_ioctl_version(char *fw_version) {
50400+ char *str[4];
50401+ int i;
50402+ int val[4];
50403+ char *endptr;
50404+
50405+ if(!fw_version)
50406+ return 0;
50407+ for(i=0; i<3; i++) {
50408+ str[i] = strsep(&fw_version, ".");
50409+ val[i] = simple_strtol(str[i], &endptr, 10);
50410+ }
50411+
50412+ if (val[0]>4 || (val[0]>3 && val[2]>143))
50413+ return 1;
50414+ return 0;
50415+}
50416+
50417+static int be_get_port_names(struct be_adapter *adapter)
50418 {
50419 int status;
50420+ int ver;
50421
50422- status = be_cmd_POST(adapter);
50423+ status = be_cmd_get_fw_ver(adapter,
50424+ adapter->fw_ver, NULL);
50425 if (status)
50426 return status;
50427+ ver = be_get_ioctl_version(adapter->fw_ver);
50428+ if (ver && (adapter->generation == BE_GEN3))
50429+ status = be_cmd_query_port_names_v1(adapter,
50430+ adapter->port_name);
50431+ else
50432+ status = be_cmd_query_port_names_v0(adapter,
50433+ adapter->port_name);
50434+ return status;
50435+}
50436
50437- status = be_cmd_reset_function(adapter);
50438+static int be_get_config(struct be_adapter *adapter)
50439+{
50440+ int status;
50441+ u8 mac[ETH_ALEN];
50442+
50443+ status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
50444+ &adapter->function_mode,
50445+ &adapter->function_caps);
50446 if (status)
50447 return status;
50448
50449- status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
50450+ status = be_cmd_get_cntl_attributes(adapter);
50451 if (status)
50452 return status;
50453
50454- status = be_cmd_query_fw_cfg(adapter,
50455- &adapter->port_num, &adapter->cap);
50456+ memset(mac, 0, ETH_ALEN);
50457+ be_pcie_slot_check(adapter);
50458+
50459+ if (be_physfn(adapter)) {
50460+ status = be_cmd_mac_addr_query(adapter, mac,
50461+ MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
50462+
50463+ if (status)
50464+ return status;
50465+
50466+ if (!is_valid_ether_addr(mac))
50467+ return -EADDRNOTAVAIL;
50468+
50469+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
50470+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
50471+ }
50472+
50473+ if (adapter->function_mode & FLEX10_MODE)
50474+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
50475+ else
50476+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
50477+
50478+ return 0;
50479+}
50480+
50481+static int be_dev_family_check(struct be_adapter *adapter)
50482+{
50483+ struct pci_dev *pdev = adapter->pdev;
50484+ u32 sli_intf = 0, if_type;
50485+
50486+ switch (pdev->device) {
50487+ case BE_DEVICE_ID1:
50488+ case OC_DEVICE_ID1:
50489+ adapter->generation = BE_GEN2;
50490+ break;
50491+ case BE_DEVICE_ID2:
50492+ case OC_DEVICE_ID2:
50493+ adapter->generation = BE_GEN3;
50494+ break;
50495+ case OC_DEVICE_ID3:
50496+ pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
50497+ if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
50498+ SLI_INTF_IF_TYPE_SHIFT;
50499+
50500+ if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
50501+ if_type != 0x02) {
50502+ dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
50503+ return -EINVAL;
50504+ }
50505+ if (num_vfs > 0) {
50506+ dev_err(&pdev->dev, "VFs not supported\n");
50507+ return -EINVAL;
50508+ }
50509+ adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
50510+ SLI_INTF_FAMILY_SHIFT);
50511+ adapter->generation = BE_GEN3;
50512+ break;
50513+ default:
50514+ adapter->generation = 0;
50515+ }
50516+ return 0;
50517+}
50518+
50519+static int lancer_wait_ready(struct be_adapter *adapter)
50520+{
50521+#define SLIPORT_READY_TIMEOUT 500
50522+ u32 sliport_status;
50523+ int status = 0, i;
50524+
50525+ for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
50526+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
50527+ if (sliport_status & SLIPORT_STATUS_RDY_MASK)
50528+ break;
50529+
50530+ msleep(20);
50531+ }
50532+
50533+ if (i == SLIPORT_READY_TIMEOUT)
50534+ status = -1;
50535+
50536+ return status;
50537+}
50538+
50539+static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
50540+{
50541+ int status;
50542+ u32 sliport_status, err, reset_needed;
50543+ status = lancer_wait_ready(adapter);
50544+ if (!status) {
50545+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
50546+ err = sliport_status & SLIPORT_STATUS_ERR_MASK;
50547+ reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
50548+ if (err && reset_needed) {
50549+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
50550+ adapter->db + SLIPORT_CONTROL_OFFSET);
50551+
50552+ /* check adapter has corrected the error */
50553+ status = lancer_wait_ready(adapter);
50554+ sliport_status = ioread32(adapter->db +
50555+ SLIPORT_STATUS_OFFSET);
50556+ sliport_status &= (SLIPORT_STATUS_ERR_MASK |
50557+ SLIPORT_STATUS_RN_MASK);
50558+ if (status || sliport_status)
50559+ status = -1;
50560+ } else if (err || reset_needed) {
50561+ status = -1;
50562+ }
50563+ }
50564 return status;
50565 }
50566
50567@@ -2091,7 +3716,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
50568 int status = 0;
50569 struct be_adapter *adapter;
50570 struct net_device *netdev;
50571- u8 mac[ETH_ALEN];
50572+ u32 en;
50573
50574 status = pci_enable_device(pdev);
50575 if (status)
50576@@ -2102,31 +3727,22 @@ static int __devinit be_probe(struct pci_dev *pdev,
50577 goto disable_dev;
50578 pci_set_master(pdev);
50579
50580- netdev = alloc_etherdev(sizeof(struct be_adapter));
50581+ netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
50582 if (netdev == NULL) {
50583 status = -ENOMEM;
50584 goto rel_reg;
50585 }
50586 adapter = netdev_priv(netdev);
50587
50588- switch (pdev->device) {
50589- case BE_DEVICE_ID1:
50590- case OC_DEVICE_ID1:
50591- adapter->generation = BE_GEN2;
50592- break;
50593- case BE_DEVICE_ID2:
50594- case OC_DEVICE_ID2:
50595- adapter->generation = BE_GEN3;
50596- break;
50597- default:
50598- adapter->generation = 0;
50599- }
50600-
50601 adapter->pdev = pdev;
50602+
50603+ status = be_dev_family_check(adapter);
50604+ if (status)
50605+ goto free_netdev;
50606+
50607 pci_set_drvdata(pdev, adapter);
50608 adapter->netdev = netdev;
50609-
50610- be_msix_enable(adapter);
50611+ SET_NETDEV_DEV(netdev, &pdev->dev);
50612
50613 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
50614 if (!status) {
50615@@ -2139,46 +3755,150 @@ static int __devinit be_probe(struct pci_dev *pdev,
50616 }
50617 }
50618
50619+ be_sriov_enable(adapter);
50620+ if (adapter->num_vfs > 0) {
50621+ adapter->vf_cfg = kcalloc(adapter->num_vfs,
50622+ sizeof(struct be_vf_cfg), GFP_KERNEL);
50623+
50624+ if (!adapter->vf_cfg)
50625+ goto free_netdev;
50626+ }
50627+
50628 status = be_ctrl_init(adapter);
50629 if (status)
50630- goto free_netdev;
50631+ goto free_vf_cfg;
50632+
50633+ if (lancer_chip(adapter)) {
50634+ status = lancer_test_and_set_rdy_state(adapter);
50635+ if (status) {
50636+ dev_err(&pdev->dev, "Adapter in non recoverable error\n");
50637+ goto ctrl_clean;
50638+ }
50639+ }
50640+
50641+ /* sync up with fw's ready state */
50642+ if (be_physfn(adapter)) {
50643+ status = be_cmd_POST(adapter);
50644+ if (status)
50645+ goto ctrl_clean;
50646+ }
50647+
50648+ /* tell fw we're ready to fire cmds */
50649+ status = be_cmd_fw_init(adapter);
50650+ if (status)
50651+ goto ctrl_clean;
50652+
50653+ status = be_cmd_reset_function(adapter);
50654+ if (status)
50655+ goto ctrl_clean;
50656
50657 status = be_stats_init(adapter);
50658 if (status)
50659 goto ctrl_clean;
50660
50661- status = be_hw_up(adapter);
50662+ status = be_get_config(adapter);
50663 if (status)
50664 goto stats_clean;
50665
50666- status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
50667- true /* permanent */, 0);
50668- if (status)
50669- goto stats_clean;
50670- memcpy(netdev->dev_addr, mac, ETH_ALEN);
50671+ /* This bit is zero in normal boot case, but in crash kernel case this
50672+ is not cleared. clear this bit here, until we are ready with the irqs
50673+ i.e in be_open call.*/
50674+ if (!lancer_chip(adapter))
50675+ be_intr_set(adapter, false);
50676+
50677+ if (msix)
50678+ be_msix_enable(adapter);
50679
50680 INIT_DELAYED_WORK(&adapter->work, be_worker);
50681- be_netdev_init(netdev);
50682- SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
50683
50684 status = be_setup(adapter);
50685 if (status)
50686- goto stats_clean;
50687+ goto msix_disable;
50688+
50689+ /* Initilize the link status to -1 */
50690+ adapter->link_status = -1;
50691+
50692+ status = be_netdev_init(netdev);
50693+ if (status)
50694+ goto unsetup;
50695+
50696 status = register_netdev(netdev);
50697 if (status != 0)
50698 goto unsetup;
50699
50700- dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
50701+ be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
50702+
50703+ if (be_physfn(adapter) && adapter->num_vfs) {
50704+ u8 mac_speed;
50705+ int link_status;
50706+ u16 def_vlan, vf, lnk_speed;
50707+
50708+ status = be_vf_eth_addr_config(adapter);
50709+ if (status)
50710+ goto unreg_netdev;
50711+
50712+ for (vf = 0; vf < adapter->num_vfs; vf++) {
50713+ status = be_cmd_get_hsw_config(adapter, &def_vlan,
50714+ vf + 1, adapter->vf_cfg[vf].vf_if_handle);
50715+ if (!status)
50716+ adapter->vf_cfg[vf].vf_def_vid = def_vlan;
50717+ else
50718+ goto unreg_netdev;
50719+
50720+ status = be_cmd_link_status_query(adapter, &link_status,
50721+ &mac_speed, &lnk_speed, vf + 1);
50722+ if (!status)
50723+ adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
50724+ else
50725+ goto unreg_netdev;
50726+ }
50727+ }
50728+ if (be_physfn(adapter)) {
50729+ /* Temp fix ofr bug# 23034. Till ARM
50730+ * f/w fixes privilege lvl */
50731+ be_get_port_names(adapter);
50732+ }
50733+
50734+ /* Enable Vlan capability based on privileges.
50735+ * PF will have Vlan capability anyway. */
50736+ be_cmd_get_fn_privileges(adapter, &en, 0);
50737+
50738+ if ((en & (BE_PRIV_FILTMGMT | BE_PRIV_VHADM | BE_PRIV_DEVCFG)) ||
50739+ be_physfn(adapter))
50740+ netdev->features |= NETIF_F_HW_VLAN_FILTER;
50741+ else
50742+ netdev->features |= NETIF_F_VLAN_CHALLENGED;
50743+
50744+ dev_info(&pdev->dev, "%s: numa node %d\n", netdev->name,
50745+ dev_to_node(&pdev->dev));
50746+ dev_info(&pdev->dev, "%s %s \"%s\" port %d\n", nic_name(pdev),
50747+ (adapter->port_num > 1 ? "1Gbps NIC" : "10Gbps NIC"),
50748+ adapter->model_number, adapter->hba_port_num);
50749+
50750+
50751+#ifdef CONFIG_PALAU
50752+ be_sysfs_create_group(adapter);
50753+#endif
50754+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
50755 return 0;
50756
50757+unreg_netdev:
50758+ unregister_netdev(netdev);
50759 unsetup:
50760 be_clear(adapter);
50761+msix_disable:
50762+ be_msix_disable(adapter);
50763 stats_clean:
50764 be_stats_cleanup(adapter);
50765 ctrl_clean:
50766 be_ctrl_cleanup(adapter);
50767+free_vf_cfg:
50768+ kfree(adapter->vf_cfg);
50769 free_netdev:
50770- free_netdev(adapter->netdev);
50771+ be_sriov_disable(adapter);
50772+ be_netif_napi_del(netdev);
50773+ free_netdev(netdev);
50774+ pci_set_drvdata(pdev, NULL);
50775 rel_reg:
50776 pci_release_regions(pdev);
50777 disable_dev:
50778@@ -2193,6 +3913,10 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
50779 struct be_adapter *adapter = pci_get_drvdata(pdev);
50780 struct net_device *netdev = adapter->netdev;
50781
50782+ cancel_delayed_work_sync(&adapter->work);
50783+ if (adapter->wol)
50784+ be_setup_wol(adapter, true);
50785+
50786 netif_device_detach(netdev);
50787 if (netif_running(netdev)) {
50788 rtnl_lock();
50789@@ -2202,6 +3926,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
50790 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
50791 be_clear(adapter);
50792
50793+ be_msix_disable(adapter);
50794 pci_save_state(pdev);
50795 pci_disable_device(pdev);
50796 pci_set_power_state(pdev, pci_choose_state(pdev, state));
50797@@ -2223,6 +3948,12 @@ static int be_resume(struct pci_dev *pdev)
50798 pci_set_power_state(pdev, 0);
50799 pci_restore_state(pdev);
50800
50801+ be_msix_enable(adapter);
50802+ /* tell fw we're ready to fire cmds */
50803+ status = be_cmd_fw_init(adapter);
50804+ if (status)
50805+ return status;
50806+
50807 be_setup(adapter);
50808 if (netif_running(netdev)) {
50809 rtnl_lock();
50810@@ -2230,28 +3961,152 @@ static int be_resume(struct pci_dev *pdev)
50811 rtnl_unlock();
50812 }
50813 netif_device_attach(netdev);
50814+
50815+ if (adapter->wol)
50816+ be_setup_wol(adapter, false);
50817+
50818+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
50819 return 0;
50820 }
50821
50822+/*
50823+ * An FLR will stop BE from DMAing any data.
50824+ */
50825+static void be_shutdown(struct pci_dev *pdev)
50826+{
50827+ struct be_adapter *adapter = pci_get_drvdata(pdev);
50828+
50829+ if (!adapter)
50830+ return;
50831+
50832+ cancel_delayed_work_sync(&adapter->work);
50833+
50834+ netif_device_detach(adapter->netdev);
50835+
50836+ if (adapter->wol)
50837+ be_setup_wol(adapter, true);
50838+
50839+ be_cmd_reset_function(adapter);
50840+
50841+ pci_disable_device(pdev);
50842+}
50843+
50844+static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
50845+ pci_channel_state_t state)
50846+{
50847+ struct be_adapter *adapter = pci_get_drvdata(pdev);
50848+ struct net_device *netdev = adapter->netdev;
50849+
50850+ dev_err(&adapter->pdev->dev, "EEH error detected\n");
50851+
50852+ adapter->eeh_err = true;
50853+
50854+ netif_device_detach(netdev);
50855+
50856+ if (netif_running(netdev)) {
50857+ rtnl_lock();
50858+ be_close(netdev);
50859+ rtnl_unlock();
50860+ }
50861+ be_clear(adapter);
50862+
50863+ if (state == pci_channel_io_perm_failure)
50864+ return PCI_ERS_RESULT_DISCONNECT;
50865+
50866+ pci_disable_device(pdev);
50867+
50868+ return PCI_ERS_RESULT_NEED_RESET;
50869+}
50870+
50871+static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
50872+{
50873+ struct be_adapter *adapter = pci_get_drvdata(pdev);
50874+ int status;
50875+
50876+ dev_info(&adapter->pdev->dev, "EEH reset\n");
50877+ adapter->eeh_err = false;
50878+
50879+ status = pci_enable_device(pdev);
50880+ if (status)
50881+ return PCI_ERS_RESULT_DISCONNECT;
50882+
50883+ pci_set_master(pdev);
50884+ pci_set_power_state(pdev, 0);
50885+ pci_restore_state(pdev);
50886+
50887+ /* Check if card is ok and fw is ready */
50888+ status = be_cmd_POST(adapter);
50889+ if (status)
50890+ return PCI_ERS_RESULT_DISCONNECT;
50891+
50892+ return PCI_ERS_RESULT_RECOVERED;
50893+}
50894+
50895+static void be_eeh_resume(struct pci_dev *pdev)
50896+{
50897+ int status = 0;
50898+ struct be_adapter *adapter = pci_get_drvdata(pdev);
50899+ struct net_device *netdev = adapter->netdev;
50900+
50901+ dev_info(&adapter->pdev->dev, "EEH resume\n");
50902+
50903+ pci_save_state(pdev);
50904+
50905+ /* tell fw we're ready to fire cmds */
50906+ status = be_cmd_fw_init(adapter);
50907+ if (status)
50908+ goto err;
50909+
50910+ status = be_setup(adapter);
50911+ if (status)
50912+ goto err;
50913+
50914+ if (netif_running(netdev)) {
50915+ status = be_open(netdev);
50916+ if (status)
50917+ goto err;
50918+ }
50919+ netif_device_attach(netdev);
50920+ return;
50921+err:
50922+ dev_err(&adapter->pdev->dev, "EEH resume failed\n");
50923+ return;
50924+}
50925+
50926+static struct pci_error_handlers be_eeh_handlers = {
50927+ .error_detected = be_eeh_err_detected,
50928+ .slot_reset = be_eeh_reset,
50929+ .resume = be_eeh_resume,
50930+};
50931+
50932 static struct pci_driver be_driver = {
50933 .name = DRV_NAME,
50934 .id_table = be_dev_ids,
50935 .probe = be_probe,
50936 .remove = be_remove,
50937 .suspend = be_suspend,
50938- .resume = be_resume
50939+ .resume = be_resume,
50940+ .shutdown = be_shutdown,
50941+ .err_handler = &be_eeh_handlers
50942 };
50943
50944 static int __init be_init_module(void)
50945 {
50946- if (rx_frag_size != 8192 && rx_frag_size != 4096
50947- && rx_frag_size != 2048) {
50948+ if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
50949+ rx_frag_size != 2048) {
50950 printk(KERN_WARNING DRV_NAME
50951 " : Module param rx_frag_size must be 2048/4096/8192."
50952 " Using 2048\n");
50953 rx_frag_size = 2048;
50954 }
50955
50956+ if (!msix && num_vfs > 0) {
50957+ printk(KERN_WARNING DRV_NAME
50958+ " : MSIx required for num_vfs > 0. Ignoring msix=0\n");
50959+ msix = 1;
50960+ }
50961+
50962+
50963 return pci_register_driver(&be_driver);
50964 }
50965 module_init(be_init_module);
50966diff --git a/drivers/net/benet/be_misc.c b/drivers/net/benet/be_misc.c
50967new file mode 100644
50968index 0000000..4ab499f
50969--- /dev/null
50970+++ b/drivers/net/benet/be_misc.c
50971@@ -0,0 +1,106 @@
50972+/*
50973+ * Copyright (C) 2005 - 2011 Emulex
50974+ * All rights reserved.
50975+ *
50976+ * This program is free software; you can redistribute it and/or
50977+ * modify it under the terms of the GNU General Public License version 2
50978+ * as published by the Free Software Foundation. The full GNU General
50979+ * Public License is included in this distribution in the file called COPYING.
50980+ *
50981+ * Contact Information:
50982+ * linux-drivers@emulex.com
50983+ *
50984+ * Emulex
50985+ * 3333 Susan Street
50986+ * Costa Mesa, CA 92626
50987+ */
50988+#include "be.h"
50989+
50990+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
50991+static ssize_t
50992+flash_fw_store(struct class_device *cd, const char *buf, size_t len)
50993+{
50994+ struct be_adapter *adapter =
50995+ netdev_priv(container_of(cd, struct net_device, class_dev));
50996+ char file_name[ETHTOOL_FLASH_MAX_FILENAME];
50997+ int status;
50998+
50999+ if (!capable(CAP_NET_ADMIN))
51000+ return -EPERM;
51001+
51002+ file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
51003+ strncpy(file_name, buf, (ETHTOOL_FLASH_MAX_FILENAME - 1));
51004+
51005+ /* Removing new-line char given by sysfs */
51006+ file_name[strlen(file_name) - 1] = '\0';
51007+
51008+ status = be_load_fw(adapter, file_name);
51009+ if (!status)
51010+ return len;
51011+ else
51012+ return status;
51013+}
51014+
51015+static CLASS_DEVICE_ATTR(flash_fw, S_IWUSR, NULL, flash_fw_store);
51016+
51017+static struct attribute *benet_attrs[] = {
51018+ &class_device_attr_flash_fw.attr,
51019+ NULL,
51020+};
51021+#else
51022+
51023+static ssize_t
51024+flash_fw_store(struct device *dev, struct device_attribute *attr,
51025+ const char *buf, size_t len)
51026+{
51027+ struct be_adapter *adapter =
51028+ netdev_priv(container_of(dev, struct net_device, dev));
51029+ char file_name[ETHTOOL_FLASH_MAX_FILENAME];
51030+ int status;
51031+
51032+ if (!capable(CAP_NET_ADMIN))
51033+ return -EPERM;
51034+
51035+ file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
51036+ strncpy(file_name, buf, (ETHTOOL_FLASH_MAX_FILENAME - 1));
51037+
51038+ /* Removing new-line char given by sysfs */
51039+ file_name[strlen(file_name) - 1] = '\0';
51040+
51041+ status = be_load_fw(adapter, file_name);
51042+ if (!status)
51043+ return len;
51044+ else
51045+ return status;
51046+}
51047+
51048+static DEVICE_ATTR(flash_fw, S_IWUSR, NULL, flash_fw_store);
51049+
51050+static struct attribute *benet_attrs[] = {
51051+ &dev_attr_flash_fw.attr,
51052+ NULL,
51053+};
51054+#endif
51055+
51056+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
51057+#define CLASS_DEV class_dev
51058+#else
51059+#define CLASS_DEV dev
51060+#endif
51061+
51062+static struct attribute_group benet_attr_group = {.attrs = benet_attrs };
51063+
51064+void be_sysfs_create_group(struct be_adapter *adapter)
51065+{
51066+ int status;
51067+
51068+ status = sysfs_create_group(&adapter->netdev->CLASS_DEV.kobj,
51069+ &benet_attr_group);
51070+ if (status)
51071+ dev_err(&adapter->pdev->dev, "Could not create sysfs group\n");
51072+}
51073+
51074+void be_sysfs_remove_group(struct be_adapter *adapter)
51075+{
51076+ sysfs_remove_group(&adapter->netdev->CLASS_DEV.kobj, &benet_attr_group);
51077+}
51078diff --git a/drivers/net/benet/be_proc.c b/drivers/net/benet/be_proc.c
51079new file mode 100644
51080index 0000000..0bfdb3b
51081--- /dev/null
51082+++ b/drivers/net/benet/be_proc.c
51083@@ -0,0 +1,513 @@
51084+/*
51085+ * Copyright (C) 2005 - 2011 ServerEngines
51086+ * All rights reserved.
51087+ *
51088+ * This program is free software; you can redistribute it and/or
51089+ * modify it under the terms of the GNU General Public License version 2
51090+ * as published by the Free Software Foundation. The full GNU General
51091+ * Public License is included in this distribution in the file called COPYING.
51092+ *
51093+ * Contact Information:
51094+ * linux-drivers@serverengines.com
51095+ *
51096+ * ServerEngines
51097+ * 209 N. Fair Oaks Ave
51098+ * Sunnyvale, CA 94085
51099+ */
51100+#include <linux/proc_fs.h>
51101+#include "be.h"
51102+
51103+char *be_adpt_name[] = {
51104+ "driver/be2net0",
51105+ "driver/be2net1",
51106+ "driver/be2net2",
51107+ "driver/be2net3",
51108+ "driver/be2net4",
51109+ "driver/be2net5",
51110+ "driver/be2net6",
51111+ "driver/be2net7"
51112+};
51113+
51114+#define MAX_BE_DEVICES 8
51115+struct proc_dir_entry *be_proc_dir[MAX_BE_DEVICES];
51116+
51117+/*File to read Eth Ring Information */
51118+#define BE_ETH_RING_FILE "eth_ring"
51119+#define BE_DRVR_STAT_FILE "drvr_stat"
51120+
51121+/*
51122+ * this file enables user to read a 32 bit CSR register.
51123+ * to read 32 bit value of a register at offset 0x1234,
51124+ * first write the offset 0x1234 (echo "0x1234") in
51125+ * the file and then read the value from this file.
51126+ * the written offset is latched until another value is written
51127+ */
51128+#define BE_CSR_R_FILE "csrr"
51129+/*
51130+ * this file enables user to write to a 32 bit CSR register.
51131+ * to write a value 0xdeadbeef to a register at offset 0x1234,
51132+ * write 0x1234 0xdeadbeef (echo "0x1234 0xdeadbeeb") to
51133+ * the file.
51134+ */
51135+#define BE_CSR_W_FILE "csrw"
51136+
51137+#define BE_PROC_MODE 0600
51138+
51139+static char read_eth_ring_buf[4096];
51140+static int read_eth_ring_count;
51141+
51142+/*
51143+ * Get Various Eth Ring Properties
51144+ */
51145+static int proc_eth_read_ring(char *page, char **start,
51146+ off_t off, int count, int *eof, void *data)
51147+{
51148+ int i, n;
51149+ char *p = read_eth_ring_buf;
51150+ struct be_adapter *adapter = (struct be_adapter *) data;
51151+
51152+ if (off == 0) {
51153+ /* Reset read_eth_ring_count */
51154+ read_eth_ring_count = 0;
51155+
51156+ n = sprintf(p, " PhyAddr VirtAddr Size TotalEntries ProducerIndex ConsumerIndex NumUsed\n");
51157+ p += n;
51158+ read_eth_ring_count += n;
51159+
51160+ n = sprintf(p, " ------- -------- ---- ------------ ------------- ------------- -------\n");
51161+ p += n;
51162+ read_eth_ring_count += n;
51163+
51164+ n = sprintf(p, "%s", "EthSendRing");
51165+ p += n;
51166+ read_eth_ring_count += n;
51167+
51168+ n = sprintf(p, " %7lx %8p %4u %12u %13u %13u %7u \n",
51169+ (long) adapter->tx_obj.q.dma_mem.dma,
51170+ (void *)adapter->tx_obj.q.dma_mem.va,
51171+ (u32) (adapter->tx_obj.q.len *
51172+ sizeof(struct be_eth_wrb)),
51173+ adapter->tx_obj.q.len, adapter->tx_obj.q.head,
51174+ adapter->tx_obj.q.tail,
51175+ atomic_read(&adapter->tx_obj.q.used));
51176+
51177+ p += n;
51178+ read_eth_ring_count += n;
51179+
51180+ /* Get Eth Send Compl Queue Details */
51181+ n = sprintf(p, "%s", "EthSendCmplRing");
51182+ p += n;
51183+ read_eth_ring_count += n;
51184+
51185+ n = sprintf(p, " %7lx %8p %4u %12u %13s %13u %7s\n",
51186+ (long)adapter->tx_obj.cq.dma_mem.dma,
51187+ (void *)adapter->tx_obj.cq.dma_mem.va,
51188+ (u32) (adapter->tx_obj.cq.len *
51189+ sizeof(struct be_eth_tx_compl)),
51190+ adapter->tx_obj.cq.len, "NA",
51191+ adapter->tx_obj.cq.tail, "NA");
51192+
51193+ p += n;
51194+ read_eth_ring_count += n;
51195+ /* Get Eth Rx Queue Details */
51196+ n = sprintf(p, "%s", "EthRxRing");
51197+ p += n;
51198+ read_eth_ring_count += n;
51199+
51200+ n = sprintf(p, " %7lx %8p %4u %12u %13u %13s %7u \n",
51201+ (long)adapter->rx_obj.q.dma_mem.dma,
51202+ (void *)adapter->rx_obj.q.dma_mem.va,
51203+ (u32) (adapter->rx_obj.q.len *
51204+ sizeof(struct be_eth_rx_d)),
51205+ adapter->rx_obj.q.len, adapter->rx_obj.q.head,"NA",
51206+ atomic_read(&adapter->rx_obj.q.used));
51207+ p += n;
51208+ read_eth_ring_count += n;
51209+
51210+ /* Get Eth Unicast Rx Compl Queue Details */
51211+ n = sprintf(p, "%s", "EthRxCmplRing");
51212+ p += n;
51213+ read_eth_ring_count += n;
51214+
51215+ n = sprintf(p, " %7lx %8p %4u %12u %13s %13u %7s\n",
51216+ (long)adapter->rx_obj.cq.dma_mem.dma,
51217+ (void *)adapter->rx_obj.cq.dma_mem.va,
51218+ (u32) (adapter->rx_obj.cq.len *
51219+ sizeof(struct be_eth_rx_compl)),
51220+ adapter->rx_obj.cq.len, "NA",
51221+ adapter->rx_obj.cq.tail, "NA");
51222+ p += n;
51223+ read_eth_ring_count += n;
51224+
51225+ /* Get Eth Event Queue Details */
51226+ n = sprintf(p, "%s", "EthTxEventRing");
51227+ p += n;
51228+ read_eth_ring_count += n;
51229+
51230+ n = sprintf(p,
51231+ " %7lx %8p %4u %12u %13s %13u %7s\n",
51232+ (long) adapter->tx_eq.q.dma_mem.dma,
51233+ (void *)adapter->tx_eq.q.dma_mem.va,
51234+ (u32) (adapter->tx_eq.q.len *
51235+ sizeof(struct be_eq_entry)),
51236+ adapter->tx_eq.q.len, "NA",
51237+ adapter->tx_eq.q.tail, "NA");
51238+
51239+ p += n;
51240+ read_eth_ring_count += n;
51241+
51242+ /* Get Eth Event Queue Details */
51243+ n = sprintf(p, "%s", "EthRxEventRing");
51244+ p += n;
51245+ read_eth_ring_count += n;
51246+
51247+ n = sprintf(p,
51248+ " %7lx %8p %4u %12u %13s %13u %7s\n",
51249+ (long) adapter->rx_eq.q.dma_mem.dma,
51250+ (void *)adapter->rx_eq.q.dma_mem.va,
51251+ (u32) (adapter->rx_eq.q.len *
51252+ sizeof(struct be_eq_entry)),
51253+ adapter->rx_eq.q.len, "NA",
51254+ adapter->rx_eq.q.tail, "NA");
51255+
51256+ p += n;
51257+ read_eth_ring_count += n;
51258+ }
51259+
51260+ *start = page;
51261+ /* copy whatever we can */
51262+ if (count < (read_eth_ring_count - off)) {
51263+ i = count;
51264+ *eof = 0; /* More bytes left */
51265+ } else {
51266+ i = read_eth_ring_count - off;
51267+ *eof = 1; /* Nothing left. indicate EOF */
51268+ }
51269+
51270+ memcpy(page, read_eth_ring_buf + off, i);
51271+ return (i);
51272+}
51273+
51274+static int proc_eth_write_ring(struct file *file,
51275+ const char *buffer, unsigned long count,
51276+ void *data)
51277+{
51278+ return (count); /* we do not support write */
51279+}
51280+
51281+/*
51282+ * read the driver stats.
51283+ */
51284+static int proc_read_drvr_stat(char *page, char **start,
51285+ off_t off, int count, int *eof, void *data)
51286+{
51287+ int n, lro_cp;
51288+ char *p = page;
51289+ struct be_adapter *adapter = (struct be_adapter *) data;
51290+ struct net_device *netdev = adapter->netdev;
51291+
51292+ if (off == 0) {
51293+ n = sprintf(p, "interface = %s\n", netdev->name);
51294+ p += n;
51295+ n = sprintf(p, "tx_reqs = %d\n",
51296+ drvr_stats(adapter)->be_tx_reqs);
51297+ p += n;
51298+ n = sprintf(p, "tx_stops = %d\n",
51299+ drvr_stats(adapter)->be_tx_stops);
51300+ p += n;
51301+ n = sprintf(p, "fwd_reqs = %d\n",
51302+ drvr_stats(adapter)->be_fwd_reqs);
51303+ p += n;
51304+ n = sprintf(p, "tx_wrbs = %d\n",
51305+ drvr_stats(adapter)->be_tx_wrbs);
51306+ p += n;
51307+ n = sprintf(p, "rx_poll = %d\n", drvr_stats(adapter)->be_rx_polls);
51308+ p += n;
51309+ n = sprintf(p, "tx_events = %d\n",
51310+ drvr_stats(adapter)->be_tx_events);
51311+ p += n;
51312+ n = sprintf(p, "rx_events = %d\n",
51313+ drvr_stats(adapter)->be_rx_events);
51314+ p += n;
51315+ n = sprintf(p, "tx_compl = %d\n",
51316+ drvr_stats(adapter)->be_tx_compl);
51317+ p += n;
51318+ n = sprintf(p, "rx_compl = %d\n",
51319+ drvr_stats(adapter)->be_rx_compl);
51320+ p += n;
51321+ n = sprintf(p, "ethrx_post_fail = %d\n",
51322+ drvr_stats(adapter)->be_ethrx_post_fail);
51323+ p += n;
51324+ n = sprintf(p, "802.3_dropped_frames = %d\n",
51325+ drvr_stats(adapter)->be_802_3_dropped_frames);
51326+ p += n;
51327+ n = sprintf(p, "802.3_malformed_frames = %d\n",
51328+ drvr_stats(adapter)->be_802_3_malformed_frames);
51329+ p += n;
51330+ n = sprintf(p, "eth_tx_rate = %d\n",
51331+ drvr_stats(adapter)->be_tx_rate);
51332+ p += n;
51333+ n = sprintf(p, "eth_rx_rate = %d\n",
51334+ drvr_stats(adapter)->be_rx_rate);
51335+ p += n;
51336+
51337+ lro_cp = (drvr_stats(adapter)->be_lro_hgram_data[0] +
51338+ drvr_stats(adapter)->be_lro_hgram_data[1] +
51339+ drvr_stats(adapter)->be_lro_hgram_data[2] +
51340+ drvr_stats(adapter)->be_lro_hgram_data[3] +
51341+ drvr_stats(adapter)->be_lro_hgram_data[4] +
51342+ drvr_stats(adapter)->be_lro_hgram_data[5] +
51343+ drvr_stats(adapter)->be_lro_hgram_data[6] +
51344+ drvr_stats(adapter)->be_lro_hgram_data[7])/100;
51345+ lro_cp = (lro_cp == 0) ? 1 : lro_cp; /* avoid divide by 0 */
51346+ n = sprintf(p,
51347+ "LRO data count %% histogram (1, 2-3, 4-5,..,>=16) = "
51348+ "%d, %d, %d, %d - %d, %d, %d, %d\n",
51349+ drvr_stats(adapter)->be_lro_hgram_data[0]/lro_cp,
51350+ drvr_stats(adapter)->be_lro_hgram_data[1]/lro_cp,
51351+ drvr_stats(adapter)->be_lro_hgram_data[2]/lro_cp,
51352+ drvr_stats(adapter)->be_lro_hgram_data[3]/lro_cp,
51353+ drvr_stats(adapter)->be_lro_hgram_data[4]/lro_cp,
51354+ drvr_stats(adapter)->be_lro_hgram_data[5]/lro_cp,
51355+ drvr_stats(adapter)->be_lro_hgram_data[6]/lro_cp,
51356+ drvr_stats(adapter)->be_lro_hgram_data[7]/lro_cp);
51357+ p += n;
51358+
51359+ lro_cp = (drvr_stats(adapter)->be_lro_hgram_ack[0] +
51360+ drvr_stats(adapter)->be_lro_hgram_ack[1] +
51361+ drvr_stats(adapter)->be_lro_hgram_ack[2] +
51362+ drvr_stats(adapter)->be_lro_hgram_ack[3] +
51363+ drvr_stats(adapter)->be_lro_hgram_ack[4] +
51364+ drvr_stats(adapter)->be_lro_hgram_ack[5] +
51365+ drvr_stats(adapter)->be_lro_hgram_ack[6] +
51366+ drvr_stats(adapter)->be_lro_hgram_ack[7])/100;
51367+ lro_cp = (lro_cp == 0) ? 1 : lro_cp; /* avoid divide by 0 */
51368+ n = sprintf(p,
51369+ "LRO ack count %% histogram (1, 2-3, 4-5,..,>=16) = "
51370+ "%d, %d, %d, %d - %d, %d, %d, %d\n",
51371+ drvr_stats(adapter)->be_lro_hgram_ack[0]/lro_cp,
51372+ drvr_stats(adapter)->be_lro_hgram_ack[1]/lro_cp,
51373+ drvr_stats(adapter)->be_lro_hgram_ack[2]/lro_cp,
51374+ drvr_stats(adapter)->be_lro_hgram_ack[3]/lro_cp,
51375+ drvr_stats(adapter)->be_lro_hgram_ack[4]/lro_cp,
51376+ drvr_stats(adapter)->be_lro_hgram_ack[5]/lro_cp,
51377+ drvr_stats(adapter)->be_lro_hgram_ack[6]/lro_cp,
51378+ drvr_stats(adapter)->be_lro_hgram_ack[7]/lro_cp);
51379+ p += n;
51380+ n = sprintf(p, "rx_eq_delay = %d \n", adapter->rx_eq.cur_eqd);
51381+ p += n;
51382+ n = sprintf(p, "rx frags per sec=%d \n",
51383+ drvr_stats(adapter)->be_rx_fps);
51384+ p += n;
51385+
51386+ }
51387+ *eof = 1;
51388+ return (p - page);
51389+}
51390+
51391+static int proc_write_drvr_stat(struct file *file,
51392+ const char *buffer, unsigned long count,
51393+ void *data)
51394+{
51395+ struct be_adapter *adapter = (struct be_adapter *) data;
51396+
51397+ memset(&(adapter->stats.drvr_stats), 0,
51398+ sizeof(adapter->stats.drvr_stats));
51399+ return (count); /* we do not support write */
51400+}
51401+
51402+#if 0
51403+/* the following are some of the functions that are needed here
51404+ * until all initializations are done by MPU.
51405+ */
51406+
51407+u32
51408+CsrReadDr(void* BaseAddress, u32 Offset)
51409+{
51410+ u32 *rp;
51411+
51412+ rp = (u32 *) (((u8 *) BaseAddress) + Offset);
51413+ return (*rp);
51414+}
51415+
51416+/*!
51417+
51418+@brief
51419+ This routine writes to a register located within the CSR
51420+ space for a given function object.
51421+
51422+@param
51423+ FuncObj - Pointer to the function object to read from.
51424+
51425+@param
51426+ Offset - The Offset (in bytes) to write to within the function's CSR space.
51427+
51428+@param
51429+ Value - The value to write to the register.
51430+
51431+@return
51432+
51433+@note
51434+ IRQL: any
51435+
51436+*/
51437+void
51438+CsrWriteDr(void* BaseAddress, u32 Offset, u32 Value)
51439+{
51440+ u32 *Register;
51441+
51442+ Register = (u32 *) (((u8 *) BaseAddress) + Offset);
51443+
51444+ //TRACE(DL_INFO, "CsrWrite[ %X ] <= %X", Register, Value);
51445+ *Register = Value;
51446+}
51447+u32 be_proc_csrr_offset = -1; /* to latch the offset of next CSR Read req. */
51448+
51449+/*
51450+ * read the csr_r file. return the 32 bit register value from
51451+ * CSR space at offset latched in the global location
51452+ * be_proc_csrr_offset
51453+ */
51454+static int proc_read_csr_r(char *page, char **start,
51455+ off_t off, int count, int *eof, void *data)
51456+{
51457+ struct be_adapter * adapter = (struct be_adapter *)data;
51458+ u32 val;
51459+ int n = 0;
51460+ if (be_proc_csrr_offset == -1)
51461+ return -EINVAL;
51462+
51463+ if (off == 0) {
51464+ /* read the CSR at offset be_proc_csrr_offset and return */
51465+ val = CsrReadDr(adapter->csr_va, be_proc_csrr_offset);
51466+ n = sprintf(page, "0x%x\n", val);
51467+ }
51468+ *eof = 1;
51469+ return n;
51470+}
51471+
51472+/*
51473+ * save the written value in be_proc_csrr_offset for next
51474+ * read from the file
51475+ */
51476+static int proc_write_csr_r(struct file *file,
51477+ const char *buffer, unsigned long count, void *data)
51478+{
51479+ char buf[64];
51480+ u32 n;
51481+
51482+ if (count > sizeof(buf) + 1)
51483+ return -EINVAL;
51484+ if (copy_from_user(buf, buffer, count))
51485+ return -EFAULT;
51486+ buf[count] = '\0';
51487+
51488+ n = simple_strtoul(buf, NULL, 16);
51489+ if (n < 0x50000)
51490+ be_proc_csrr_offset = n;
51491+ return (count);
51492+}
51493+
51494+/*
51495+ * return the latched offset for reading the csr_r file.
51496+ */
51497+static int proc_read_csr_w(char *page, char **start,
51498+ off_t off, int count, int *eof, void *data)
51499+{
51500+
51501+ *eof = 1;
51502+ return sprintf(page, "0x%x\n", be_proc_csrr_offset);
51503+}
51504+
51505+/*
51506+ * the incoming string is of the form "<offset> <value>"
51507+ * where the offset is the offset of the register to be written
51508+ * and value is the value to be written.
51509+ */
51510+static int proc_write_csr_w(struct file *file,
51511+ const char *buffer, unsigned long count,
51512+ void *data)
51513+{
51514+ char buf[64];
51515+ char *p;
51516+ u32 n, val;
51517+ struct be_adapter * adapter = (struct be_adapter *)data;
51518+
51519+ if (count > sizeof(buf) + 1)
51520+ return -EINVAL;
51521+ if (copy_from_user(buf, buffer, count))
51522+ return -EFAULT;
51523+ buf[count] = '\0';
51524+
51525+ n = simple_strtoul(buf, &p, 16);
51526+ if (n > 0x50000)
51527+ return -EINVAL;
51528+
51529+ /* now get the actual value to be written */
51530+ while (*p == ' ' || *p == '\t')
51531+ p++;
51532+ val = simple_strtoul(p, NULL, 16);
51533+ CsrWriteDr(adapter->csr_va, n, val);
51534+ return (count);
51535+}
51536+#endif
51537+
51538+void be_init_procfs(struct be_adapter *adapter, int adapt_num)
51539+{
51540+ static struct proc_dir_entry *pde;
51541+
51542+ if (adapt_num > MAX_BE_DEVICES - 1)
51543+ return;
51544+
51545+ /* create directory */
51546+ be_proc_dir[adapt_num] =
51547+ proc_mkdir(be_adpt_name[adapt_num], NULL);
51548+ if (be_proc_dir[adapt_num]) {
51549+ (be_proc_dir[adapt_num])->owner = THIS_MODULE;
51550+ }
51551+
51552+ pde = create_proc_entry(BE_ETH_RING_FILE, BE_PROC_MODE,
51553+ be_proc_dir[adapt_num]);
51554+ if (pde) {
51555+ pde->read_proc = proc_eth_read_ring;
51556+ pde->write_proc = proc_eth_write_ring;
51557+ pde->data = adapter;
51558+ pde->owner = THIS_MODULE;
51559+ }
51560+
51561+ pde = create_proc_entry(BE_DRVR_STAT_FILE, BE_PROC_MODE,
51562+ be_proc_dir[adapt_num]);
51563+ if (pde) {
51564+ pde->read_proc = proc_read_drvr_stat;
51565+ pde->write_proc = proc_write_drvr_stat;
51566+ pde->data = adapter;
51567+ pde->owner = THIS_MODULE;
51568+ }
51569+
51570+#if 0
51571+ if ((pde = create_proc_entry(BE_CSR_R_FILE, BE_PROC_MODE, be_proc_dir[adapt_num]))) {
51572+ pde->read_proc = proc_read_csr_r;
51573+ pde->write_proc = proc_write_csr_r;
51574+ pde->data = adapter;
51575+ pde->owner = THIS_MODULE;
51576+ }
51577+
51578+ if ((pde = create_proc_entry(BE_CSR_W_FILE, BE_PROC_MODE, be_proc_dir[adapt_num]))) {
51579+ pde->read_proc = proc_read_csr_w;
51580+ pde->write_proc = proc_write_csr_w;
51581+ pde->data = adapter;
51582+ pde->owner = THIS_MODULE;
51583+ }
51584+#endif
51585+}
51586+
51587+void be_cleanup_procfs(struct be_adapter *adapter, int adapt_num)
51588+{
51589+ if (adapt_num > MAX_BE_DEVICES - 1)
51590+ return;
51591+ remove_proc_entry(BE_ETH_RING_FILE, be_proc_dir[adapt_num]);
51592+ remove_proc_entry(BE_DRVR_STAT_FILE, be_proc_dir[adapt_num]);
51593+ remove_proc_entry(BE_CSR_R_FILE, be_proc_dir[adapt_num]);
51594+ remove_proc_entry(BE_CSR_W_FILE, be_proc_dir[adapt_num]);
51595+ remove_proc_entry(be_adpt_name[adapt_num], NULL);
51596+}
51597diff --git a/drivers/net/benet/version.h b/drivers/net/benet/version.h
51598new file mode 100644
51599index 0000000..c7ed692
51600--- /dev/null
51601+++ b/drivers/net/benet/version.h
51602@@ -0,0 +1,51 @@
51603+#define STR_BE_BRANCH "0" \r
51604+#define STR_BE_BUILD "479" \r
51605+#define STR_BE_DOT "0"\r
51606+#define STR_BE_MINOR "0"\r
51607+#define STR_BE_MAJOR "4"\r
51608+\r
51609+#define BE_BRANCH 0 \r
51610+#define BE_BUILD 479 \r
51611+#define BE_DOT 0\r
51612+#define BE_MINOR 0\r
51613+#define BE_MAJOR 4\r
51614+\r
51615+#define MGMT_BRANCH 0\r
51616+#define MGMT_BUILDNUM 479\r
51617+#define MGMT_MINOR 0\r
51618+#define MGMT_MAJOR 4\r
51619+\r
51620+#define BE_REDBOOT_VERSION "2.0.5.0"\r
51621+\r
51622+//start-auto\r
51623+#define BUILD_MONTH "12"\r
51624+#define BUILD_MONTH_NAME "December"\r
51625+#define BUILD_DAY "6"\r
51626+#define BUILD_YEAR "2011"\r
51627+#define BUILD_24HOUR "21"\r
51628+#define BUILD_12HOUR "9"\r
51629+#define BUILD_AM_PM "PM"\r
51630+#define BUILD_MIN "48"\r
51631+#define BUILD_SEC "05"\r
51632+#define BUILD_MONTH_NUMBER 12\r
51633+#define BUILD_DAY_NUMBER 6\r
51634+#define BUILD_YEAR_NUMBER 2011\r
51635+#define BUILD_24HOUR_NUMBER 21\r
51636+#define BUILD_12HOUR_NUMBER 9\r
51637+#define BUILD_MIN_NUMBER 48\r
51638+#define BUILD_SEC_NUMBER 5\r
51639+#undef MAJOR_BUILD\r
51640+#undef MINOR_BUILD\r
51641+#undef DOT_BUILD\r
51642+#define NUMBERED_BUILD\r
51643+#undef BRANCH_BUILD\r
51644+//end-auto\r
51645+\r
51646+#define ELX_FCOE_XROM_BIOS_VER "7.03a1"\r
51647+#define ELX_FCoE_X86_VER "4.02a1"\r
51648+#define ELX_FCoE_EFI_VER "5.01a1"\r
51649+#define ELX_FCoE_FCODE_VER "4.01a0"\r
51650+#define ELX_PXE_BIOS_VER "3.00a5"\r
51651+#define ELX_UEFI_NIC_VER "2.10A10"\r
51652+#define ELX_UEFI_FCODE_VER "1.10A0"\r
51653+#define ELX_ISCSI_BIOS_VER "1.00A8"\r
51654diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
51655index 4874b2b..67f8526 100644
51656--- a/drivers/net/bnx2.c
51657+++ b/drivers/net/bnx2.c
51658@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
51659 int rc = 0;
51660 u32 magic, csum;
51661
51662+ pax_track_stack();
51663+
51664 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
51665 goto test_nvram_done;
51666
51667diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
51668index fd3eb07..8a6978d 100644
51669--- a/drivers/net/cxgb3/l2t.h
51670+++ b/drivers/net/cxgb3/l2t.h
51671@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
51672 */
51673 struct l2t_skb_cb {
51674 arp_failure_handler_func arp_failure_handler;
51675-};
51676+} __no_const;
51677
51678 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
51679
51680diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
51681index 032cfe0..411af379 100644
51682--- a/drivers/net/cxgb3/t3_hw.c
51683+++ b/drivers/net/cxgb3/t3_hw.c
51684@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
51685 int i, addr, ret;
51686 struct t3_vpd vpd;
51687
51688+ pax_track_stack();
51689+
51690 /*
51691 * Card information is normally at VPD_BASE but some early cards had
51692 * it at 0.
51693diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
51694index d1e0563..b9e129c 100644
51695--- a/drivers/net/e1000e/82571.c
51696+++ b/drivers/net/e1000e/82571.c
51697@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
51698 {
51699 struct e1000_hw *hw = &adapter->hw;
51700 struct e1000_mac_info *mac = &hw->mac;
51701- struct e1000_mac_operations *func = &mac->ops;
51702+ e1000_mac_operations_no_const *func = &mac->ops;
51703 u32 swsm = 0;
51704 u32 swsm2 = 0;
51705 bool force_clear_smbi = false;
51706@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
51707 temp = er32(ICRXDMTC);
51708 }
51709
51710-static struct e1000_mac_operations e82571_mac_ops = {
51711+static const struct e1000_mac_operations e82571_mac_ops = {
51712 /* .check_mng_mode: mac type dependent */
51713 /* .check_for_link: media type dependent */
51714 .id_led_init = e1000e_id_led_init,
51715@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
51716 .setup_led = e1000e_setup_led_generic,
51717 };
51718
51719-static struct e1000_phy_operations e82_phy_ops_igp = {
51720+static const struct e1000_phy_operations e82_phy_ops_igp = {
51721 .acquire_phy = e1000_get_hw_semaphore_82571,
51722 .check_reset_block = e1000e_check_reset_block_generic,
51723 .commit_phy = NULL,
51724@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
51725 .cfg_on_link_up = NULL,
51726 };
51727
51728-static struct e1000_phy_operations e82_phy_ops_m88 = {
51729+static const struct e1000_phy_operations e82_phy_ops_m88 = {
51730 .acquire_phy = e1000_get_hw_semaphore_82571,
51731 .check_reset_block = e1000e_check_reset_block_generic,
51732 .commit_phy = e1000e_phy_sw_reset,
51733@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
51734 .cfg_on_link_up = NULL,
51735 };
51736
51737-static struct e1000_phy_operations e82_phy_ops_bm = {
51738+static const struct e1000_phy_operations e82_phy_ops_bm = {
51739 .acquire_phy = e1000_get_hw_semaphore_82571,
51740 .check_reset_block = e1000e_check_reset_block_generic,
51741 .commit_phy = e1000e_phy_sw_reset,
51742@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
51743 .cfg_on_link_up = NULL,
51744 };
51745
51746-static struct e1000_nvm_operations e82571_nvm_ops = {
51747+static const struct e1000_nvm_operations e82571_nvm_ops = {
51748 .acquire_nvm = e1000_acquire_nvm_82571,
51749 .read_nvm = e1000e_read_nvm_eerd,
51750 .release_nvm = e1000_release_nvm_82571,
51751diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
51752index 47db9bd..fa58ccd 100644
51753--- a/drivers/net/e1000e/e1000.h
51754+++ b/drivers/net/e1000e/e1000.h
51755@@ -375,9 +375,9 @@ struct e1000_info {
51756 u32 pba;
51757 u32 max_hw_frame_size;
51758 s32 (*get_variants)(struct e1000_adapter *);
51759- struct e1000_mac_operations *mac_ops;
51760- struct e1000_phy_operations *phy_ops;
51761- struct e1000_nvm_operations *nvm_ops;
51762+ const struct e1000_mac_operations *mac_ops;
51763+ const struct e1000_phy_operations *phy_ops;
51764+ const struct e1000_nvm_operations *nvm_ops;
51765 };
51766
51767 /* hardware capability, feature, and workaround flags */
51768diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
51769index ae5d736..e9a93a1 100644
51770--- a/drivers/net/e1000e/es2lan.c
51771+++ b/drivers/net/e1000e/es2lan.c
51772@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
51773 {
51774 struct e1000_hw *hw = &adapter->hw;
51775 struct e1000_mac_info *mac = &hw->mac;
51776- struct e1000_mac_operations *func = &mac->ops;
51777+ e1000_mac_operations_no_const *func = &mac->ops;
51778
51779 /* Set media type */
51780 switch (adapter->pdev->device) {
51781@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
51782 temp = er32(ICRXDMTC);
51783 }
51784
51785-static struct e1000_mac_operations es2_mac_ops = {
51786+static const struct e1000_mac_operations es2_mac_ops = {
51787 .id_led_init = e1000e_id_led_init,
51788 .check_mng_mode = e1000e_check_mng_mode_generic,
51789 /* check_for_link dependent on media type */
51790@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
51791 .setup_led = e1000e_setup_led_generic,
51792 };
51793
51794-static struct e1000_phy_operations es2_phy_ops = {
51795+static const struct e1000_phy_operations es2_phy_ops = {
51796 .acquire_phy = e1000_acquire_phy_80003es2lan,
51797 .check_reset_block = e1000e_check_reset_block_generic,
51798 .commit_phy = e1000e_phy_sw_reset,
51799@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
51800 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
51801 };
51802
51803-static struct e1000_nvm_operations es2_nvm_ops = {
51804+static const struct e1000_nvm_operations es2_nvm_ops = {
51805 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
51806 .read_nvm = e1000e_read_nvm_eerd,
51807 .release_nvm = e1000_release_nvm_80003es2lan,
51808diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
51809index 11f3b7c..6381887 100644
51810--- a/drivers/net/e1000e/hw.h
51811+++ b/drivers/net/e1000e/hw.h
51812@@ -753,6 +753,7 @@ struct e1000_mac_operations {
51813 s32 (*setup_physical_interface)(struct e1000_hw *);
51814 s32 (*setup_led)(struct e1000_hw *);
51815 };
51816+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
51817
51818 /* Function pointers for the PHY. */
51819 struct e1000_phy_operations {
51820@@ -774,6 +775,7 @@ struct e1000_phy_operations {
51821 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
51822 s32 (*cfg_on_link_up)(struct e1000_hw *);
51823 };
51824+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
51825
51826 /* Function pointers for the NVM. */
51827 struct e1000_nvm_operations {
51828@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
51829 s32 (*validate_nvm)(struct e1000_hw *);
51830 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
51831 };
51832+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
51833
51834 struct e1000_mac_info {
51835- struct e1000_mac_operations ops;
51836+ e1000_mac_operations_no_const ops;
51837
51838 u8 addr[6];
51839 u8 perm_addr[6];
51840@@ -823,7 +826,7 @@ struct e1000_mac_info {
51841 };
51842
51843 struct e1000_phy_info {
51844- struct e1000_phy_operations ops;
51845+ e1000_phy_operations_no_const ops;
51846
51847 enum e1000_phy_type type;
51848
51849@@ -857,7 +860,7 @@ struct e1000_phy_info {
51850 };
51851
51852 struct e1000_nvm_info {
51853- struct e1000_nvm_operations ops;
51854+ e1000_nvm_operations_no_const ops;
51855
51856 enum e1000_nvm_type type;
51857 enum e1000_nvm_override override;
51858diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
51859index de39f9a..e28d3e0 100644
51860--- a/drivers/net/e1000e/ich8lan.c
51861+++ b/drivers/net/e1000e/ich8lan.c
51862@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
51863 }
51864 }
51865
51866-static struct e1000_mac_operations ich8_mac_ops = {
51867+static const struct e1000_mac_operations ich8_mac_ops = {
51868 .id_led_init = e1000e_id_led_init,
51869 .check_mng_mode = e1000_check_mng_mode_ich8lan,
51870 .check_for_link = e1000_check_for_copper_link_ich8lan,
51871@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
51872 /* id_led_init dependent on mac type */
51873 };
51874
51875-static struct e1000_phy_operations ich8_phy_ops = {
51876+static const struct e1000_phy_operations ich8_phy_ops = {
51877 .acquire_phy = e1000_acquire_swflag_ich8lan,
51878 .check_reset_block = e1000_check_reset_block_ich8lan,
51879 .commit_phy = NULL,
51880@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
51881 .write_phy_reg = e1000e_write_phy_reg_igp,
51882 };
51883
51884-static struct e1000_nvm_operations ich8_nvm_ops = {
51885+static const struct e1000_nvm_operations ich8_nvm_ops = {
51886 .acquire_nvm = e1000_acquire_nvm_ich8lan,
51887 .read_nvm = e1000_read_nvm_ich8lan,
51888 .release_nvm = e1000_release_nvm_ich8lan,
51889diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
51890index 18d5fbb..542d96d 100644
51891--- a/drivers/net/fealnx.c
51892+++ b/drivers/net/fealnx.c
51893@@ -151,7 +151,7 @@ struct chip_info {
51894 int flags;
51895 };
51896
51897-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
51898+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
51899 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
51900 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
51901 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
51902diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
51903index 0e5b54b..b503f82 100644
51904--- a/drivers/net/hamradio/6pack.c
51905+++ b/drivers/net/hamradio/6pack.c
51906@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
51907 unsigned char buf[512];
51908 int count1;
51909
51910+ pax_track_stack();
51911+
51912 if (!count)
51913 return;
51914
51915diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
51916index 5862282..7cce8cb 100644
51917--- a/drivers/net/ibmveth.c
51918+++ b/drivers/net/ibmveth.c
51919@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
51920 NULL,
51921 };
51922
51923-static struct sysfs_ops veth_pool_ops = {
51924+static const struct sysfs_ops veth_pool_ops = {
51925 .show = veth_pool_show,
51926 .store = veth_pool_store,
51927 };
51928diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
51929index d617f2d..57b5309 100644
51930--- a/drivers/net/igb/e1000_82575.c
51931+++ b/drivers/net/igb/e1000_82575.c
51932@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
51933 wr32(E1000_VT_CTL, vt_ctl);
51934 }
51935
51936-static struct e1000_mac_operations e1000_mac_ops_82575 = {
51937+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
51938 .reset_hw = igb_reset_hw_82575,
51939 .init_hw = igb_init_hw_82575,
51940 .check_for_link = igb_check_for_link_82575,
51941@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
51942 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
51943 };
51944
51945-static struct e1000_phy_operations e1000_phy_ops_82575 = {
51946+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
51947 .acquire = igb_acquire_phy_82575,
51948 .get_cfg_done = igb_get_cfg_done_82575,
51949 .release = igb_release_phy_82575,
51950 };
51951
51952-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
51953+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
51954 .acquire = igb_acquire_nvm_82575,
51955 .read = igb_read_nvm_eerd,
51956 .release = igb_release_nvm_82575,
51957diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
51958index 72081df..d855cf5 100644
51959--- a/drivers/net/igb/e1000_hw.h
51960+++ b/drivers/net/igb/e1000_hw.h
51961@@ -288,6 +288,7 @@ struct e1000_mac_operations {
51962 s32 (*read_mac_addr)(struct e1000_hw *);
51963 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
51964 };
51965+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
51966
51967 struct e1000_phy_operations {
51968 s32 (*acquire)(struct e1000_hw *);
51969@@ -303,6 +304,7 @@ struct e1000_phy_operations {
51970 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
51971 s32 (*write_reg)(struct e1000_hw *, u32, u16);
51972 };
51973+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
51974
51975 struct e1000_nvm_operations {
51976 s32 (*acquire)(struct e1000_hw *);
51977@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
51978 void (*release)(struct e1000_hw *);
51979 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
51980 };
51981+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
51982
51983 struct e1000_info {
51984 s32 (*get_invariants)(struct e1000_hw *);
51985@@ -321,7 +324,7 @@ struct e1000_info {
51986 extern const struct e1000_info e1000_82575_info;
51987
51988 struct e1000_mac_info {
51989- struct e1000_mac_operations ops;
51990+ e1000_mac_operations_no_const ops;
51991
51992 u8 addr[6];
51993 u8 perm_addr[6];
51994@@ -365,7 +368,7 @@ struct e1000_mac_info {
51995 };
51996
51997 struct e1000_phy_info {
51998- struct e1000_phy_operations ops;
51999+ e1000_phy_operations_no_const ops;
52000
52001 enum e1000_phy_type type;
52002
52003@@ -400,7 +403,7 @@ struct e1000_phy_info {
52004 };
52005
52006 struct e1000_nvm_info {
52007- struct e1000_nvm_operations ops;
52008+ e1000_nvm_operations_no_const ops;
52009
52010 enum e1000_nvm_type type;
52011 enum e1000_nvm_override override;
52012@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
52013 s32 (*check_for_ack)(struct e1000_hw *, u16);
52014 s32 (*check_for_rst)(struct e1000_hw *, u16);
52015 };
52016+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
52017
52018 struct e1000_mbx_stats {
52019 u32 msgs_tx;
52020@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
52021 };
52022
52023 struct e1000_mbx_info {
52024- struct e1000_mbx_operations ops;
52025+ e1000_mbx_operations_no_const ops;
52026 struct e1000_mbx_stats stats;
52027 u32 timeout;
52028 u32 usec_delay;
52029diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
52030index 1e8ce37..549c453 100644
52031--- a/drivers/net/igbvf/vf.h
52032+++ b/drivers/net/igbvf/vf.h
52033@@ -187,9 +187,10 @@ struct e1000_mac_operations {
52034 s32 (*read_mac_addr)(struct e1000_hw *);
52035 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
52036 };
52037+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
52038
52039 struct e1000_mac_info {
52040- struct e1000_mac_operations ops;
52041+ e1000_mac_operations_no_const ops;
52042 u8 addr[6];
52043 u8 perm_addr[6];
52044
52045@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
52046 s32 (*check_for_ack)(struct e1000_hw *);
52047 s32 (*check_for_rst)(struct e1000_hw *);
52048 };
52049+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
52050
52051 struct e1000_mbx_stats {
52052 u32 msgs_tx;
52053@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
52054 };
52055
52056 struct e1000_mbx_info {
52057- struct e1000_mbx_operations ops;
52058+ e1000_mbx_operations_no_const ops;
52059 struct e1000_mbx_stats stats;
52060 u32 timeout;
52061 u32 usec_delay;
52062diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
52063index aa7286b..a61394f 100644
52064--- a/drivers/net/iseries_veth.c
52065+++ b/drivers/net/iseries_veth.c
52066@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
52067 NULL
52068 };
52069
52070-static struct sysfs_ops veth_cnx_sysfs_ops = {
52071+static const struct sysfs_ops veth_cnx_sysfs_ops = {
52072 .show = veth_cnx_attribute_show
52073 };
52074
52075@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
52076 NULL
52077 };
52078
52079-static struct sysfs_ops veth_port_sysfs_ops = {
52080+static const struct sysfs_ops veth_port_sysfs_ops = {
52081 .show = veth_port_attribute_show
52082 };
52083
52084diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
52085index 8aa44dc..fa1e797 100644
52086--- a/drivers/net/ixgb/ixgb_main.c
52087+++ b/drivers/net/ixgb/ixgb_main.c
52088@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
52089 u32 rctl;
52090 int i;
52091
52092+ pax_track_stack();
52093+
52094 /* Check for Promiscuous and All Multicast modes */
52095
52096 rctl = IXGB_READ_REG(hw, RCTL);
52097diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
52098index af35e1d..8781785 100644
52099--- a/drivers/net/ixgb/ixgb_param.c
52100+++ b/drivers/net/ixgb/ixgb_param.c
52101@@ -260,6 +260,9 @@ void __devinit
52102 ixgb_check_options(struct ixgb_adapter *adapter)
52103 {
52104 int bd = adapter->bd_number;
52105+
52106+ pax_track_stack();
52107+
52108 if (bd >= IXGB_MAX_NIC) {
52109 printk(KERN_NOTICE
52110 "Warning: no configuration for board #%i\n", bd);
52111diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
52112index b17aa73..ed74540 100644
52113--- a/drivers/net/ixgbe/ixgbe_type.h
52114+++ b/drivers/net/ixgbe/ixgbe_type.h
52115@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
52116 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
52117 s32 (*update_checksum)(struct ixgbe_hw *);
52118 };
52119+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
52120
52121 struct ixgbe_mac_operations {
52122 s32 (*init_hw)(struct ixgbe_hw *);
52123@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
52124 /* Flow Control */
52125 s32 (*fc_enable)(struct ixgbe_hw *, s32);
52126 };
52127+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
52128
52129 struct ixgbe_phy_operations {
52130 s32 (*identify)(struct ixgbe_hw *);
52131@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
52132 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
52133 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
52134 };
52135+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
52136
52137 struct ixgbe_eeprom_info {
52138- struct ixgbe_eeprom_operations ops;
52139+ ixgbe_eeprom_operations_no_const ops;
52140 enum ixgbe_eeprom_type type;
52141 u32 semaphore_delay;
52142 u16 word_size;
52143@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
52144 };
52145
52146 struct ixgbe_mac_info {
52147- struct ixgbe_mac_operations ops;
52148+ ixgbe_mac_operations_no_const ops;
52149 enum ixgbe_mac_type type;
52150 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
52151 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
52152@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
52153 };
52154
52155 struct ixgbe_phy_info {
52156- struct ixgbe_phy_operations ops;
52157+ ixgbe_phy_operations_no_const ops;
52158 struct mdio_if_info mdio;
52159 enum ixgbe_phy_type type;
52160 u32 id;
52161diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
52162index 291a505..2543756 100644
52163--- a/drivers/net/mlx4/main.c
52164+++ b/drivers/net/mlx4/main.c
52165@@ -38,6 +38,7 @@
52166 #include <linux/errno.h>
52167 #include <linux/pci.h>
52168 #include <linux/dma-mapping.h>
52169+#include <linux/sched.h>
52170
52171 #include <linux/mlx4/device.h>
52172 #include <linux/mlx4/doorbell.h>
52173@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
52174 u64 icm_size;
52175 int err;
52176
52177+ pax_track_stack();
52178+
52179 err = mlx4_QUERY_FW(dev);
52180 if (err) {
52181 if (err == -EACCES)
52182diff --git a/drivers/net/niu.c b/drivers/net/niu.c
52183index 2dce134..fa5ce75 100644
52184--- a/drivers/net/niu.c
52185+++ b/drivers/net/niu.c
52186@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
52187 int i, num_irqs, err;
52188 u8 first_ldg;
52189
52190+ pax_track_stack();
52191+
52192 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
52193 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
52194 ldg_num_map[i] = first_ldg + i;
52195diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
52196index c1b3f09..97cd8c4 100644
52197--- a/drivers/net/pcnet32.c
52198+++ b/drivers/net/pcnet32.c
52199@@ -79,7 +79,7 @@ static int cards_found;
52200 /*
52201 * VLB I/O addresses
52202 */
52203-static unsigned int pcnet32_portlist[] __initdata =
52204+static unsigned int pcnet32_portlist[] __devinitdata =
52205 { 0x300, 0x320, 0x340, 0x360, 0 };
52206
52207 static int pcnet32_debug = 0;
52208@@ -267,7 +267,7 @@ struct pcnet32_private {
52209 struct sk_buff **rx_skbuff;
52210 dma_addr_t *tx_dma_addr;
52211 dma_addr_t *rx_dma_addr;
52212- struct pcnet32_access a;
52213+ struct pcnet32_access *a;
52214 spinlock_t lock; /* Guard lock */
52215 unsigned int cur_rx, cur_tx; /* The next free ring entry */
52216 unsigned int rx_ring_size; /* current rx ring size */
52217@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
52218 u16 val;
52219
52220 netif_wake_queue(dev);
52221- val = lp->a.read_csr(ioaddr, CSR3);
52222+ val = lp->a->read_csr(ioaddr, CSR3);
52223 val &= 0x00ff;
52224- lp->a.write_csr(ioaddr, CSR3, val);
52225+ lp->a->write_csr(ioaddr, CSR3, val);
52226 napi_enable(&lp->napi);
52227 }
52228
52229@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
52230 r = mii_link_ok(&lp->mii_if);
52231 } else if (lp->chip_version >= PCNET32_79C970A) {
52232 ulong ioaddr = dev->base_addr; /* card base I/O address */
52233- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
52234+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
52235 } else { /* can not detect link on really old chips */
52236 r = 1;
52237 }
52238@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
52239 pcnet32_netif_stop(dev);
52240
52241 spin_lock_irqsave(&lp->lock, flags);
52242- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52243+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52244
52245 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
52246
52247@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
52248 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52249 {
52250 struct pcnet32_private *lp = netdev_priv(dev);
52251- struct pcnet32_access *a = &lp->a; /* access to registers */
52252+ struct pcnet32_access *a = lp->a; /* access to registers */
52253 ulong ioaddr = dev->base_addr; /* card base I/O address */
52254 struct sk_buff *skb; /* sk buff */
52255 int x, i; /* counters */
52256@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52257 pcnet32_netif_stop(dev);
52258
52259 spin_lock_irqsave(&lp->lock, flags);
52260- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52261+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
52262
52263 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
52264
52265 /* Reset the PCNET32 */
52266- lp->a.reset(ioaddr);
52267- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52268+ lp->a->reset(ioaddr);
52269+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52270
52271 /* switch pcnet32 to 32bit mode */
52272- lp->a.write_bcr(ioaddr, 20, 2);
52273+ lp->a->write_bcr(ioaddr, 20, 2);
52274
52275 /* purge & init rings but don't actually restart */
52276 pcnet32_restart(dev, 0x0000);
52277
52278- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52279+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52280
52281 /* Initialize Transmit buffers. */
52282 size = data_len + 15;
52283@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52284
52285 /* set int loopback in CSR15 */
52286 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
52287- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
52288+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
52289
52290 teststatus = cpu_to_le16(0x8000);
52291- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
52292+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
52293
52294 /* Check status of descriptors */
52295 for (x = 0; x < numbuffs; x++) {
52296@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52297 }
52298 }
52299
52300- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52301+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
52302 wmb();
52303 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
52304 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
52305@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52306 pcnet32_restart(dev, CSR0_NORMAL);
52307 } else {
52308 pcnet32_purge_rx_ring(dev);
52309- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
52310+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
52311 }
52312 spin_unlock_irqrestore(&lp->lock, flags);
52313
52314@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
52315 static void pcnet32_led_blink_callback(struct net_device *dev)
52316 {
52317 struct pcnet32_private *lp = netdev_priv(dev);
52318- struct pcnet32_access *a = &lp->a;
52319+ struct pcnet32_access *a = lp->a;
52320 ulong ioaddr = dev->base_addr;
52321 unsigned long flags;
52322 int i;
52323@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
52324 static int pcnet32_phys_id(struct net_device *dev, u32 data)
52325 {
52326 struct pcnet32_private *lp = netdev_priv(dev);
52327- struct pcnet32_access *a = &lp->a;
52328+ struct pcnet32_access *a = lp->a;
52329 ulong ioaddr = dev->base_addr;
52330 unsigned long flags;
52331 int i, regs[4];
52332@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
52333 {
52334 int csr5;
52335 struct pcnet32_private *lp = netdev_priv(dev);
52336- struct pcnet32_access *a = &lp->a;
52337+ struct pcnet32_access *a = lp->a;
52338 ulong ioaddr = dev->base_addr;
52339 int ticks;
52340
52341@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
52342 spin_lock_irqsave(&lp->lock, flags);
52343 if (pcnet32_tx(dev)) {
52344 /* reset the chip to clear the error condition, then restart */
52345- lp->a.reset(ioaddr);
52346- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52347+ lp->a->reset(ioaddr);
52348+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52349 pcnet32_restart(dev, CSR0_START);
52350 netif_wake_queue(dev);
52351 }
52352@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
52353 __napi_complete(napi);
52354
52355 /* clear interrupt masks */
52356- val = lp->a.read_csr(ioaddr, CSR3);
52357+ val = lp->a->read_csr(ioaddr, CSR3);
52358 val &= 0x00ff;
52359- lp->a.write_csr(ioaddr, CSR3, val);
52360+ lp->a->write_csr(ioaddr, CSR3, val);
52361
52362 /* Set interrupt enable. */
52363- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
52364+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
52365
52366 spin_unlock_irqrestore(&lp->lock, flags);
52367 }
52368@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
52369 int i, csr0;
52370 u16 *buff = ptr;
52371 struct pcnet32_private *lp = netdev_priv(dev);
52372- struct pcnet32_access *a = &lp->a;
52373+ struct pcnet32_access *a = lp->a;
52374 ulong ioaddr = dev->base_addr;
52375 unsigned long flags;
52376
52377@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
52378 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
52379 if (lp->phymask & (1 << j)) {
52380 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
52381- lp->a.write_bcr(ioaddr, 33,
52382+ lp->a->write_bcr(ioaddr, 33,
52383 (j << 5) | i);
52384- *buff++ = lp->a.read_bcr(ioaddr, 34);
52385+ *buff++ = lp->a->read_bcr(ioaddr, 34);
52386 }
52387 }
52388 }
52389@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52390 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
52391 lp->options |= PCNET32_PORT_FD;
52392
52393- lp->a = *a;
52394+ lp->a = a;
52395
52396 /* prior to register_netdev, dev->name is not yet correct */
52397 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
52398@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52399 if (lp->mii) {
52400 /* lp->phycount and lp->phymask are set to 0 by memset above */
52401
52402- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
52403+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
52404 /* scan for PHYs */
52405 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
52406 unsigned short id1, id2;
52407@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
52408 "Found PHY %04x:%04x at address %d.\n",
52409 id1, id2, i);
52410 }
52411- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
52412+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
52413 if (lp->phycount > 1) {
52414 lp->options |= PCNET32_PORT_MII;
52415 }
52416@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
52417 }
52418
52419 /* Reset the PCNET32 */
52420- lp->a.reset(ioaddr);
52421+ lp->a->reset(ioaddr);
52422
52423 /* switch pcnet32 to 32bit mode */
52424- lp->a.write_bcr(ioaddr, 20, 2);
52425+ lp->a->write_bcr(ioaddr, 20, 2);
52426
52427 if (netif_msg_ifup(lp))
52428 printk(KERN_DEBUG
52429@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
52430 (u32) (lp->init_dma_addr));
52431
52432 /* set/reset autoselect bit */
52433- val = lp->a.read_bcr(ioaddr, 2) & ~2;
52434+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
52435 if (lp->options & PCNET32_PORT_ASEL)
52436 val |= 2;
52437- lp->a.write_bcr(ioaddr, 2, val);
52438+ lp->a->write_bcr(ioaddr, 2, val);
52439
52440 /* handle full duplex setting */
52441 if (lp->mii_if.full_duplex) {
52442- val = lp->a.read_bcr(ioaddr, 9) & ~3;
52443+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
52444 if (lp->options & PCNET32_PORT_FD) {
52445 val |= 1;
52446 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
52447@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
52448 if (lp->chip_version == 0x2627)
52449 val |= 3;
52450 }
52451- lp->a.write_bcr(ioaddr, 9, val);
52452+ lp->a->write_bcr(ioaddr, 9, val);
52453 }
52454
52455 /* set/reset GPSI bit in test register */
52456- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
52457+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
52458 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
52459 val |= 0x10;
52460- lp->a.write_csr(ioaddr, 124, val);
52461+ lp->a->write_csr(ioaddr, 124, val);
52462
52463 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
52464 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
52465@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
52466 * duplex, and/or enable auto negotiation, and clear DANAS
52467 */
52468 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
52469- lp->a.write_bcr(ioaddr, 32,
52470- lp->a.read_bcr(ioaddr, 32) | 0x0080);
52471+ lp->a->write_bcr(ioaddr, 32,
52472+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
52473 /* disable Auto Negotiation, set 10Mpbs, HD */
52474- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
52475+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
52476 if (lp->options & PCNET32_PORT_FD)
52477 val |= 0x10;
52478 if (lp->options & PCNET32_PORT_100)
52479 val |= 0x08;
52480- lp->a.write_bcr(ioaddr, 32, val);
52481+ lp->a->write_bcr(ioaddr, 32, val);
52482 } else {
52483 if (lp->options & PCNET32_PORT_ASEL) {
52484- lp->a.write_bcr(ioaddr, 32,
52485- lp->a.read_bcr(ioaddr,
52486+ lp->a->write_bcr(ioaddr, 32,
52487+ lp->a->read_bcr(ioaddr,
52488 32) | 0x0080);
52489 /* enable auto negotiate, setup, disable fd */
52490- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
52491+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
52492 val |= 0x20;
52493- lp->a.write_bcr(ioaddr, 32, val);
52494+ lp->a->write_bcr(ioaddr, 32, val);
52495 }
52496 }
52497 } else {
52498@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
52499 * There is really no good other way to handle multiple PHYs
52500 * other than turning off all automatics
52501 */
52502- val = lp->a.read_bcr(ioaddr, 2);
52503- lp->a.write_bcr(ioaddr, 2, val & ~2);
52504- val = lp->a.read_bcr(ioaddr, 32);
52505- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
52506+ val = lp->a->read_bcr(ioaddr, 2);
52507+ lp->a->write_bcr(ioaddr, 2, val & ~2);
52508+ val = lp->a->read_bcr(ioaddr, 32);
52509+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
52510
52511 if (!(lp->options & PCNET32_PORT_ASEL)) {
52512 /* setup ecmd */
52513@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
52514 ecmd.speed =
52515 lp->
52516 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
52517- bcr9 = lp->a.read_bcr(ioaddr, 9);
52518+ bcr9 = lp->a->read_bcr(ioaddr, 9);
52519
52520 if (lp->options & PCNET32_PORT_FD) {
52521 ecmd.duplex = DUPLEX_FULL;
52522@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
52523 ecmd.duplex = DUPLEX_HALF;
52524 bcr9 |= ~(1 << 0);
52525 }
52526- lp->a.write_bcr(ioaddr, 9, bcr9);
52527+ lp->a->write_bcr(ioaddr, 9, bcr9);
52528 }
52529
52530 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
52531@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
52532
52533 #ifdef DO_DXSUFLO
52534 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
52535- val = lp->a.read_csr(ioaddr, CSR3);
52536+ val = lp->a->read_csr(ioaddr, CSR3);
52537 val |= 0x40;
52538- lp->a.write_csr(ioaddr, CSR3, val);
52539+ lp->a->write_csr(ioaddr, CSR3, val);
52540 }
52541 #endif
52542
52543@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
52544 napi_enable(&lp->napi);
52545
52546 /* Re-initialize the PCNET32, and start it when done. */
52547- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
52548- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
52549+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
52550+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
52551
52552- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52553- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
52554+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
52555+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
52556
52557 netif_start_queue(dev);
52558
52559@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
52560
52561 i = 0;
52562 while (i++ < 100)
52563- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
52564+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
52565 break;
52566 /*
52567 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
52568 * reports that doing so triggers a bug in the '974.
52569 */
52570- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
52571+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
52572
52573 if (netif_msg_ifup(lp))
52574 printk(KERN_DEBUG
52575 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
52576 dev->name, i,
52577 (u32) (lp->init_dma_addr),
52578- lp->a.read_csr(ioaddr, CSR0));
52579+ lp->a->read_csr(ioaddr, CSR0));
52580
52581 spin_unlock_irqrestore(&lp->lock, flags);
52582
52583@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
52584 * Switch back to 16bit mode to avoid problems with dumb
52585 * DOS packet driver after a warm reboot
52586 */
52587- lp->a.write_bcr(ioaddr, 20, 4);
52588+ lp->a->write_bcr(ioaddr, 20, 4);
52589
52590 err_free_irq:
52591 spin_unlock_irqrestore(&lp->lock, flags);
52592@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
52593
52594 /* wait for stop */
52595 for (i = 0; i < 100; i++)
52596- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
52597+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
52598 break;
52599
52600 if (i >= 100 && netif_msg_drv(lp))
52601@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
52602 return;
52603
52604 /* ReInit Ring */
52605- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
52606+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
52607 i = 0;
52608 while (i++ < 1000)
52609- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
52610+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
52611 break;
52612
52613- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
52614+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
52615 }
52616
52617 static void pcnet32_tx_timeout(struct net_device *dev)
52618@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
52619 if (pcnet32_debug & NETIF_MSG_DRV)
52620 printk(KERN_ERR
52621 "%s: transmit timed out, status %4.4x, resetting.\n",
52622- dev->name, lp->a.read_csr(ioaddr, CSR0));
52623- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
52624+ dev->name, lp->a->read_csr(ioaddr, CSR0));
52625+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
52626 dev->stats.tx_errors++;
52627 if (netif_msg_tx_err(lp)) {
52628 int i;
52629@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
52630 if (netif_msg_tx_queued(lp)) {
52631 printk(KERN_DEBUG
52632 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
52633- dev->name, lp->a.read_csr(ioaddr, CSR0));
52634+ dev->name, lp->a->read_csr(ioaddr, CSR0));
52635 }
52636
52637 /* Default status -- will not enable Successful-TxDone
52638@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
52639 dev->stats.tx_bytes += skb->len;
52640
52641 /* Trigger an immediate send poll. */
52642- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
52643+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
52644
52645 dev->trans_start = jiffies;
52646
52647@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
52648
52649 spin_lock(&lp->lock);
52650
52651- csr0 = lp->a.read_csr(ioaddr, CSR0);
52652+ csr0 = lp->a->read_csr(ioaddr, CSR0);
52653 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
52654 if (csr0 == 0xffff) {
52655 break; /* PCMCIA remove happened */
52656 }
52657 /* Acknowledge all of the current interrupt sources ASAP. */
52658- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
52659+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
52660
52661 if (netif_msg_intr(lp))
52662 printk(KERN_DEBUG
52663 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
52664- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
52665+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
52666
52667 /* Log misc errors. */
52668 if (csr0 & 0x4000)
52669@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
52670 if (napi_schedule_prep(&lp->napi)) {
52671 u16 val;
52672 /* set interrupt masks */
52673- val = lp->a.read_csr(ioaddr, CSR3);
52674+ val = lp->a->read_csr(ioaddr, CSR3);
52675 val |= 0x5f00;
52676- lp->a.write_csr(ioaddr, CSR3, val);
52677+ lp->a->write_csr(ioaddr, CSR3, val);
52678
52679 __napi_schedule(&lp->napi);
52680 break;
52681 }
52682- csr0 = lp->a.read_csr(ioaddr, CSR0);
52683+ csr0 = lp->a->read_csr(ioaddr, CSR0);
52684 }
52685
52686 if (netif_msg_intr(lp))
52687 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
52688- dev->name, lp->a.read_csr(ioaddr, CSR0));
52689+ dev->name, lp->a->read_csr(ioaddr, CSR0));
52690
52691 spin_unlock(&lp->lock);
52692
52693@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
52694
52695 spin_lock_irqsave(&lp->lock, flags);
52696
52697- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
52698+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
52699
52700 if (netif_msg_ifdown(lp))
52701 printk(KERN_DEBUG
52702 "%s: Shutting down ethercard, status was %2.2x.\n",
52703- dev->name, lp->a.read_csr(ioaddr, CSR0));
52704+ dev->name, lp->a->read_csr(ioaddr, CSR0));
52705
52706 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
52707- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
52708+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
52709
52710 /*
52711 * Switch back to 16bit mode to avoid problems with dumb
52712 * DOS packet driver after a warm reboot
52713 */
52714- lp->a.write_bcr(ioaddr, 20, 4);
52715+ lp->a->write_bcr(ioaddr, 20, 4);
52716
52717 spin_unlock_irqrestore(&lp->lock, flags);
52718
52719@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
52720 unsigned long flags;
52721
52722 spin_lock_irqsave(&lp->lock, flags);
52723- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
52724+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
52725 spin_unlock_irqrestore(&lp->lock, flags);
52726
52727 return &dev->stats;
52728@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
52729 if (dev->flags & IFF_ALLMULTI) {
52730 ib->filter[0] = cpu_to_le32(~0U);
52731 ib->filter[1] = cpu_to_le32(~0U);
52732- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
52733- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
52734- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
52735- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
52736+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
52737+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
52738+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
52739+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
52740 return;
52741 }
52742 /* clear the multicast filter */
52743@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
52744 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
52745 }
52746 for (i = 0; i < 4; i++)
52747- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
52748+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
52749 le16_to_cpu(mcast_table[i]));
52750 return;
52751 }
52752@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
52753
52754 spin_lock_irqsave(&lp->lock, flags);
52755 suspended = pcnet32_suspend(dev, &flags, 0);
52756- csr15 = lp->a.read_csr(ioaddr, CSR15);
52757+ csr15 = lp->a->read_csr(ioaddr, CSR15);
52758 if (dev->flags & IFF_PROMISC) {
52759 /* Log any net taps. */
52760 if (netif_msg_hw(lp))
52761@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
52762 lp->init_block->mode =
52763 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
52764 7);
52765- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
52766+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
52767 } else {
52768 lp->init_block->mode =
52769 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
52770- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
52771+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
52772 pcnet32_load_multicast(dev);
52773 }
52774
52775 if (suspended) {
52776 int csr5;
52777 /* clear SUSPEND (SPND) - CSR5 bit 0 */
52778- csr5 = lp->a.read_csr(ioaddr, CSR5);
52779- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
52780+ csr5 = lp->a->read_csr(ioaddr, CSR5);
52781+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
52782 } else {
52783- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
52784+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
52785 pcnet32_restart(dev, CSR0_NORMAL);
52786 netif_wake_queue(dev);
52787 }
52788@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
52789 if (!lp->mii)
52790 return 0;
52791
52792- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
52793- val_out = lp->a.read_bcr(ioaddr, 34);
52794+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
52795+ val_out = lp->a->read_bcr(ioaddr, 34);
52796
52797 return val_out;
52798 }
52799@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
52800 if (!lp->mii)
52801 return;
52802
52803- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
52804- lp->a.write_bcr(ioaddr, 34, val);
52805+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
52806+ lp->a->write_bcr(ioaddr, 34, val);
52807 }
52808
52809 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
52810@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
52811 curr_link = mii_link_ok(&lp->mii_if);
52812 } else {
52813 ulong ioaddr = dev->base_addr; /* card base I/O address */
52814- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
52815+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
52816 }
52817 if (!curr_link) {
52818 if (prev_link || verbose) {
52819@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
52820 (ecmd.duplex ==
52821 DUPLEX_FULL) ? "full" : "half");
52822 }
52823- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
52824+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
52825 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
52826 if (lp->mii_if.full_duplex)
52827 bcr9 |= (1 << 0);
52828 else
52829 bcr9 &= ~(1 << 0);
52830- lp->a.write_bcr(dev->base_addr, 9, bcr9);
52831+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
52832 }
52833 } else {
52834 if (netif_msg_link(lp))
52835diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
52836index 7cc9898..6eb50d3 100644
52837--- a/drivers/net/sis190.c
52838+++ b/drivers/net/sis190.c
52839@@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
52840 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
52841 struct net_device *dev)
52842 {
52843- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
52844+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
52845 struct sis190_private *tp = netdev_priv(dev);
52846 struct pci_dev *isa_bridge;
52847 u8 reg, tmp8;
52848diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
52849index e13685a..60c948c 100644
52850--- a/drivers/net/sundance.c
52851+++ b/drivers/net/sundance.c
52852@@ -225,7 +225,7 @@ enum {
52853 struct pci_id_info {
52854 const char *name;
52855 };
52856-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
52857+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
52858 {"D-Link DFE-550TX FAST Ethernet Adapter"},
52859 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
52860 {"D-Link DFE-580TX 4 port Server Adapter"},
52861diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
52862index 529f55a..cccaa18 100644
52863--- a/drivers/net/tg3.h
52864+++ b/drivers/net/tg3.h
52865@@ -95,6 +95,7 @@
52866 #define CHIPREV_ID_5750_A0 0x4000
52867 #define CHIPREV_ID_5750_A1 0x4001
52868 #define CHIPREV_ID_5750_A3 0x4003
52869+#define CHIPREV_ID_5750_C1 0x4201
52870 #define CHIPREV_ID_5750_C2 0x4202
52871 #define CHIPREV_ID_5752_A0_HW 0x5000
52872 #define CHIPREV_ID_5752_A0 0x6000
52873diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
52874index b9db1b5..720f9ce 100644
52875--- a/drivers/net/tokenring/abyss.c
52876+++ b/drivers/net/tokenring/abyss.c
52877@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
52878
52879 static int __init abyss_init (void)
52880 {
52881- abyss_netdev_ops = tms380tr_netdev_ops;
52882+ pax_open_kernel();
52883+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
52884
52885- abyss_netdev_ops.ndo_open = abyss_open;
52886- abyss_netdev_ops.ndo_stop = abyss_close;
52887+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
52888+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
52889+ pax_close_kernel();
52890
52891 return pci_register_driver(&abyss_driver);
52892 }
52893diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
52894index 456f8bf..373e56d 100644
52895--- a/drivers/net/tokenring/madgemc.c
52896+++ b/drivers/net/tokenring/madgemc.c
52897@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
52898
52899 static int __init madgemc_init (void)
52900 {
52901- madgemc_netdev_ops = tms380tr_netdev_ops;
52902- madgemc_netdev_ops.ndo_open = madgemc_open;
52903- madgemc_netdev_ops.ndo_stop = madgemc_close;
52904+ pax_open_kernel();
52905+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
52906+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
52907+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
52908+ pax_close_kernel();
52909
52910 return mca_register_driver (&madgemc_driver);
52911 }
52912diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
52913index 16e8783..925bd49 100644
52914--- a/drivers/net/tokenring/proteon.c
52915+++ b/drivers/net/tokenring/proteon.c
52916@@ -353,9 +353,11 @@ static int __init proteon_init(void)
52917 struct platform_device *pdev;
52918 int i, num = 0, err = 0;
52919
52920- proteon_netdev_ops = tms380tr_netdev_ops;
52921- proteon_netdev_ops.ndo_open = proteon_open;
52922- proteon_netdev_ops.ndo_stop = tms380tr_close;
52923+ pax_open_kernel();
52924+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
52925+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
52926+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
52927+ pax_close_kernel();
52928
52929 err = platform_driver_register(&proteon_driver);
52930 if (err)
52931diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
52932index 46db5c5..37c1536 100644
52933--- a/drivers/net/tokenring/skisa.c
52934+++ b/drivers/net/tokenring/skisa.c
52935@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
52936 struct platform_device *pdev;
52937 int i, num = 0, err = 0;
52938
52939- sk_isa_netdev_ops = tms380tr_netdev_ops;
52940- sk_isa_netdev_ops.ndo_open = sk_isa_open;
52941- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
52942+ pax_open_kernel();
52943+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
52944+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
52945+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
52946+ pax_close_kernel();
52947
52948 err = platform_driver_register(&sk_isa_driver);
52949 if (err)
52950diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
52951index 74e5ba4..5cf6bc9 100644
52952--- a/drivers/net/tulip/de2104x.c
52953+++ b/drivers/net/tulip/de2104x.c
52954@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
52955 struct de_srom_info_leaf *il;
52956 void *bufp;
52957
52958+ pax_track_stack();
52959+
52960 /* download entire eeprom */
52961 for (i = 0; i < DE_EEPROM_WORDS; i++)
52962 ((__le16 *)ee_data)[i] =
52963diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
52964index a8349b7..90f9dfe 100644
52965--- a/drivers/net/tulip/de4x5.c
52966+++ b/drivers/net/tulip/de4x5.c
52967@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
52968 for (i=0; i<ETH_ALEN; i++) {
52969 tmp.addr[i] = dev->dev_addr[i];
52970 }
52971- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
52972+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
52973 break;
52974
52975 case DE4X5_SET_HWADDR: /* Set the hardware address */
52976@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
52977 spin_lock_irqsave(&lp->lock, flags);
52978 memcpy(&statbuf, &lp->pktStats, ioc->len);
52979 spin_unlock_irqrestore(&lp->lock, flags);
52980- if (copy_to_user(ioc->data, &statbuf, ioc->len))
52981+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
52982 return -EFAULT;
52983 break;
52984 }
52985diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
52986index 391acd3..56d11cd 100644
52987--- a/drivers/net/tulip/eeprom.c
52988+++ b/drivers/net/tulip/eeprom.c
52989@@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
52990 {NULL}};
52991
52992
52993-static const char *block_name[] __devinitdata = {
52994+static const char *block_name[] __devinitconst = {
52995 "21140 non-MII",
52996 "21140 MII PHY",
52997 "21142 Serial PHY",
52998diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
52999index b38d3b7..b1cff23 100644
53000--- a/drivers/net/tulip/winbond-840.c
53001+++ b/drivers/net/tulip/winbond-840.c
53002@@ -235,7 +235,7 @@ struct pci_id_info {
53003 int drv_flags; /* Driver use, intended as capability flags. */
53004 };
53005
53006-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
53007+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
53008 { /* Sometime a Level-One switch card. */
53009 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
53010 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
53011diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
53012index f450bc9..2b747c8 100644
53013--- a/drivers/net/usb/hso.c
53014+++ b/drivers/net/usb/hso.c
53015@@ -71,7 +71,7 @@
53016 #include <asm/byteorder.h>
53017 #include <linux/serial_core.h>
53018 #include <linux/serial.h>
53019-
53020+#include <asm/local.h>
53021
53022 #define DRIVER_VERSION "1.2"
53023 #define MOD_AUTHOR "Option Wireless"
53024@@ -258,7 +258,7 @@ struct hso_serial {
53025
53026 /* from usb_serial_port */
53027 struct tty_struct *tty;
53028- int open_count;
53029+ local_t open_count;
53030 spinlock_t serial_lock;
53031
53032 int (*write_data) (struct hso_serial *serial);
53033@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
53034 struct urb *urb;
53035
53036 urb = serial->rx_urb[0];
53037- if (serial->open_count > 0) {
53038+ if (local_read(&serial->open_count) > 0) {
53039 count = put_rxbuf_data(urb, serial);
53040 if (count == -1)
53041 return;
53042@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
53043 DUMP1(urb->transfer_buffer, urb->actual_length);
53044
53045 /* Anyone listening? */
53046- if (serial->open_count == 0)
53047+ if (local_read(&serial->open_count) == 0)
53048 return;
53049
53050 if (status == 0) {
53051@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
53052 spin_unlock_irq(&serial->serial_lock);
53053
53054 /* check for port already opened, if not set the termios */
53055- serial->open_count++;
53056- if (serial->open_count == 1) {
53057+ if (local_inc_return(&serial->open_count) == 1) {
53058 tty->low_latency = 1;
53059 serial->rx_state = RX_IDLE;
53060 /* Force default termio settings */
53061@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
53062 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
53063 if (result) {
53064 hso_stop_serial_device(serial->parent);
53065- serial->open_count--;
53066+ local_dec(&serial->open_count);
53067 kref_put(&serial->parent->ref, hso_serial_ref_free);
53068 }
53069 } else {
53070@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
53071
53072 /* reset the rts and dtr */
53073 /* do the actual close */
53074- serial->open_count--;
53075+ local_dec(&serial->open_count);
53076
53077- if (serial->open_count <= 0) {
53078- serial->open_count = 0;
53079+ if (local_read(&serial->open_count) <= 0) {
53080+ local_set(&serial->open_count, 0);
53081 spin_lock_irq(&serial->serial_lock);
53082 if (serial->tty == tty) {
53083 serial->tty->driver_data = NULL;
53084@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
53085
53086 /* the actual setup */
53087 spin_lock_irqsave(&serial->serial_lock, flags);
53088- if (serial->open_count)
53089+ if (local_read(&serial->open_count))
53090 _hso_serial_set_termios(tty, old);
53091 else
53092 tty->termios = old;
53093@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
53094 /* Start all serial ports */
53095 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
53096 if (serial_table[i] && (serial_table[i]->interface == iface)) {
53097- if (dev2ser(serial_table[i])->open_count) {
53098+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
53099 result =
53100 hso_start_serial_device(serial_table[i], GFP_NOIO);
53101 hso_kick_transmit(dev2ser(serial_table[i]));
53102diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
53103index 3e94f0c..ffdd926 100644
53104--- a/drivers/net/vxge/vxge-config.h
53105+++ b/drivers/net/vxge/vxge-config.h
53106@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
53107 void (*link_down)(struct __vxge_hw_device *devh);
53108 void (*crit_err)(struct __vxge_hw_device *devh,
53109 enum vxge_hw_event type, u64 ext_data);
53110-};
53111+} __no_const;
53112
53113 /*
53114 * struct __vxge_hw_blockpool_entry - Block private data structure
53115diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
53116index 068d7a9..35293de 100644
53117--- a/drivers/net/vxge/vxge-main.c
53118+++ b/drivers/net/vxge/vxge-main.c
53119@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
53120 struct sk_buff *completed[NR_SKB_COMPLETED];
53121 int more;
53122
53123+ pax_track_stack();
53124+
53125 do {
53126 more = 0;
53127 skb_ptr = completed;
53128@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
53129 u8 mtable[256] = {0}; /* CPU to vpath mapping */
53130 int index;
53131
53132+ pax_track_stack();
53133+
53134 /*
53135 * Filling
53136 * - itable with bucket numbers
53137diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
53138index 461742b..81be42e 100644
53139--- a/drivers/net/vxge/vxge-traffic.h
53140+++ b/drivers/net/vxge/vxge-traffic.h
53141@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
53142 struct vxge_hw_mempool_dma *dma_object,
53143 u32 index,
53144 u32 is_last);
53145-};
53146+} __no_const;
53147
53148 void
53149 __vxge_hw_mempool_destroy(
53150diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
53151index cd8cb95..4153b79 100644
53152--- a/drivers/net/wan/cycx_x25.c
53153+++ b/drivers/net/wan/cycx_x25.c
53154@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
53155 unsigned char hex[1024],
53156 * phex = hex;
53157
53158+ pax_track_stack();
53159+
53160 if (len >= (sizeof(hex) / 2))
53161 len = (sizeof(hex) / 2) - 1;
53162
53163diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
53164index aa9248f..a4e3c3b 100644
53165--- a/drivers/net/wan/hdlc_x25.c
53166+++ b/drivers/net/wan/hdlc_x25.c
53167@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
53168
53169 static int x25_open(struct net_device *dev)
53170 {
53171- struct lapb_register_struct cb;
53172+ static struct lapb_register_struct cb = {
53173+ .connect_confirmation = x25_connected,
53174+ .connect_indication = x25_connected,
53175+ .disconnect_confirmation = x25_disconnected,
53176+ .disconnect_indication = x25_disconnected,
53177+ .data_indication = x25_data_indication,
53178+ .data_transmit = x25_data_transmit
53179+ };
53180 int result;
53181
53182- cb.connect_confirmation = x25_connected;
53183- cb.connect_indication = x25_connected;
53184- cb.disconnect_confirmation = x25_disconnected;
53185- cb.disconnect_indication = x25_disconnected;
53186- cb.data_indication = x25_data_indication;
53187- cb.data_transmit = x25_data_transmit;
53188-
53189 result = lapb_register(dev, &cb);
53190 if (result != LAPB_OK)
53191 return result;
53192diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
53193index 5ad287c..783b020 100644
53194--- a/drivers/net/wimax/i2400m/usb-fw.c
53195+++ b/drivers/net/wimax/i2400m/usb-fw.c
53196@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
53197 int do_autopm = 1;
53198 DECLARE_COMPLETION_ONSTACK(notif_completion);
53199
53200+ pax_track_stack();
53201+
53202 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
53203 i2400m, ack, ack_size);
53204 BUG_ON(_ack == i2400m->bm_ack_buf);
53205diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
53206index 6c26840..62c97c3 100644
53207--- a/drivers/net/wireless/airo.c
53208+++ b/drivers/net/wireless/airo.c
53209@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
53210 BSSListElement * loop_net;
53211 BSSListElement * tmp_net;
53212
53213+ pax_track_stack();
53214+
53215 /* Blow away current list of scan results */
53216 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
53217 list_move_tail (&loop_net->list, &ai->network_free_list);
53218@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
53219 WepKeyRid wkr;
53220 int rc;
53221
53222+ pax_track_stack();
53223+
53224 memset( &mySsid, 0, sizeof( mySsid ) );
53225 kfree (ai->flash);
53226 ai->flash = NULL;
53227@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
53228 __le32 *vals = stats.vals;
53229 int len;
53230
53231+ pax_track_stack();
53232+
53233 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
53234 return -ENOMEM;
53235 data = (struct proc_data *)file->private_data;
53236@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
53237 /* If doLoseSync is not 1, we won't do a Lose Sync */
53238 int doLoseSync = -1;
53239
53240+ pax_track_stack();
53241+
53242 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
53243 return -ENOMEM;
53244 data = (struct proc_data *)file->private_data;
53245@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
53246 int i;
53247 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
53248
53249+ pax_track_stack();
53250+
53251 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
53252 if (!qual)
53253 return -ENOMEM;
53254@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
53255 CapabilityRid cap_rid;
53256 __le32 *vals = stats_rid.vals;
53257
53258+ pax_track_stack();
53259+
53260 /* Get stats out of the card */
53261 clear_bit(JOB_WSTATS, &local->jobs);
53262 if (local->power.event) {
53263diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
53264index 747508c..82e965d 100644
53265--- a/drivers/net/wireless/ath/ath5k/debug.c
53266+++ b/drivers/net/wireless/ath/ath5k/debug.c
53267@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
53268 unsigned int v;
53269 u64 tsf;
53270
53271+ pax_track_stack();
53272+
53273 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
53274 len += snprintf(buf+len, sizeof(buf)-len,
53275 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
53276@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
53277 unsigned int len = 0;
53278 unsigned int i;
53279
53280+ pax_track_stack();
53281+
53282 len += snprintf(buf+len, sizeof(buf)-len,
53283 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
53284
53285diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
53286index 2be4c22..593b1eb 100644
53287--- a/drivers/net/wireless/ath/ath9k/debug.c
53288+++ b/drivers/net/wireless/ath/ath9k/debug.c
53289@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
53290 char buf[512];
53291 unsigned int len = 0;
53292
53293+ pax_track_stack();
53294+
53295 len += snprintf(buf + len, sizeof(buf) - len,
53296 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
53297 len += snprintf(buf + len, sizeof(buf) - len,
53298@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
53299 int i;
53300 u8 addr[ETH_ALEN];
53301
53302+ pax_track_stack();
53303+
53304 len += snprintf(buf + len, sizeof(buf) - len,
53305 "primary: %s (%s chan=%d ht=%d)\n",
53306 wiphy_name(sc->pri_wiphy->hw->wiphy),
53307diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
53308index 80b19a4..dab3a45 100644
53309--- a/drivers/net/wireless/b43/debugfs.c
53310+++ b/drivers/net/wireless/b43/debugfs.c
53311@@ -43,7 +43,7 @@ static struct dentry *rootdir;
53312 struct b43_debugfs_fops {
53313 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
53314 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
53315- struct file_operations fops;
53316+ const struct file_operations fops;
53317 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
53318 size_t file_struct_offset;
53319 };
53320diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
53321index 1f85ac5..c99b4b4 100644
53322--- a/drivers/net/wireless/b43legacy/debugfs.c
53323+++ b/drivers/net/wireless/b43legacy/debugfs.c
53324@@ -44,7 +44,7 @@ static struct dentry *rootdir;
53325 struct b43legacy_debugfs_fops {
53326 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
53327 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
53328- struct file_operations fops;
53329+ const struct file_operations fops;
53330 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
53331 size_t file_struct_offset;
53332 /* Take wl->irq_lock before calling read/write? */
53333diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
53334index 43102bf..3b569c3 100644
53335--- a/drivers/net/wireless/ipw2x00/ipw2100.c
53336+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
53337@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
53338 int err;
53339 DECLARE_SSID_BUF(ssid);
53340
53341+ pax_track_stack();
53342+
53343 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
53344
53345 if (ssid_len)
53346@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
53347 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
53348 int err;
53349
53350+ pax_track_stack();
53351+
53352 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
53353 idx, keylen, len);
53354
53355diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
53356index 282b1f7..169f0cf 100644
53357--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
53358+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
53359@@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
53360 unsigned long flags;
53361 DECLARE_SSID_BUF(ssid);
53362
53363+ pax_track_stack();
53364+
53365 LIBIPW_DEBUG_SCAN("'%s' (%pM"
53366 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
53367 print_ssid(ssid, info_element->data, info_element->len),
53368diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
53369index 950267a..80d5fd2 100644
53370--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
53371+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
53372@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
53373 },
53374 };
53375
53376-static struct iwl_ops iwl1000_ops = {
53377+static const struct iwl_ops iwl1000_ops = {
53378 .ucode = &iwl5000_ucode,
53379 .lib = &iwl1000_lib,
53380 .hcmd = &iwl5000_hcmd,
53381diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
53382index 56bfcc3..b348020 100644
53383--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
53384+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
53385@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
53386 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
53387 };
53388
53389-static struct iwl_ops iwl3945_ops = {
53390+static const struct iwl_ops iwl3945_ops = {
53391 .ucode = &iwl3945_ucode,
53392 .lib = &iwl3945_lib,
53393 .hcmd = &iwl3945_hcmd,
53394diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
53395index 585b8d4..e142963 100644
53396--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
53397+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
53398@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
53399 },
53400 };
53401
53402-static struct iwl_ops iwl4965_ops = {
53403+static const struct iwl_ops iwl4965_ops = {
53404 .ucode = &iwl4965_ucode,
53405 .lib = &iwl4965_lib,
53406 .hcmd = &iwl4965_hcmd,
53407diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
53408index 1f423f2..e37c192 100644
53409--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
53410+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
53411@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
53412 },
53413 };
53414
53415-struct iwl_ops iwl5000_ops = {
53416+const struct iwl_ops iwl5000_ops = {
53417 .ucode = &iwl5000_ucode,
53418 .lib = &iwl5000_lib,
53419 .hcmd = &iwl5000_hcmd,
53420 .utils = &iwl5000_hcmd_utils,
53421 };
53422
53423-static struct iwl_ops iwl5150_ops = {
53424+static const struct iwl_ops iwl5150_ops = {
53425 .ucode = &iwl5000_ucode,
53426 .lib = &iwl5150_lib,
53427 .hcmd = &iwl5000_hcmd,
53428diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
53429index 1473452..f07d5e1 100644
53430--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
53431+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
53432@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
53433 .calc_rssi = iwl5000_calc_rssi,
53434 };
53435
53436-static struct iwl_ops iwl6000_ops = {
53437+static const struct iwl_ops iwl6000_ops = {
53438 .ucode = &iwl5000_ucode,
53439 .lib = &iwl6000_lib,
53440 .hcmd = &iwl5000_hcmd,
53441diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
53442index 1a3dfa2..b3e0a61 100644
53443--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
53444+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
53445@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
53446 u8 active_index = 0;
53447 s32 tpt = 0;
53448
53449+ pax_track_stack();
53450+
53451 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
53452
53453 if (!ieee80211_is_data(hdr->frame_control) ||
53454@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
53455 u8 valid_tx_ant = 0;
53456 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
53457
53458+ pax_track_stack();
53459+
53460 /* Override starting rate (index 0) if needed for debug purposes */
53461 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
53462
53463diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
53464index 0e56d78..6a3c107 100644
53465--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
53466+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
53467@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
53468 if (iwl_debug_level & IWL_DL_INFO)
53469 dev_printk(KERN_DEBUG, &(pdev->dev),
53470 "Disabling hw_scan\n");
53471- iwl_hw_ops.hw_scan = NULL;
53472+ pax_open_kernel();
53473+ *(void **)&iwl_hw_ops.hw_scan = NULL;
53474+ pax_close_kernel();
53475 }
53476
53477 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
53478diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
53479index cbc6290..eb323d7 100644
53480--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
53481+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
53482@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
53483 #endif
53484
53485 #else
53486-#define IWL_DEBUG(__priv, level, fmt, args...)
53487-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
53488+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
53489+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
53490 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
53491 void *p, u32 len)
53492 {}
53493diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
53494index a198bcf..8e68233 100644
53495--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
53496+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
53497@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
53498 int pos = 0;
53499 const size_t bufsz = sizeof(buf);
53500
53501+ pax_track_stack();
53502+
53503 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
53504 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
53505 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
53506@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
53507 const size_t bufsz = sizeof(buf);
53508 ssize_t ret;
53509
53510+ pax_track_stack();
53511+
53512 for (i = 0; i < AC_NUM; i++) {
53513 pos += scnprintf(buf + pos, bufsz - pos,
53514 "\tcw_min\tcw_max\taifsn\ttxop\n");
53515diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
53516index 3539ea4..b174bfa 100644
53517--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
53518+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
53519@@ -68,7 +68,7 @@ struct iwl_tx_queue;
53520
53521 /* shared structures from iwl-5000.c */
53522 extern struct iwl_mod_params iwl50_mod_params;
53523-extern struct iwl_ops iwl5000_ops;
53524+extern const struct iwl_ops iwl5000_ops;
53525 extern struct iwl_ucode_ops iwl5000_ucode;
53526 extern struct iwl_lib_ops iwl5000_lib;
53527 extern struct iwl_hcmd_ops iwl5000_hcmd;
53528diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
53529index 619590d..69235ee 100644
53530--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
53531+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
53532@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
53533 */
53534 if (iwl3945_mod_params.disable_hw_scan) {
53535 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
53536- iwl3945_hw_ops.hw_scan = NULL;
53537+ pax_open_kernel();
53538+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
53539+ pax_close_kernel();
53540 }
53541
53542
53543diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
53544index 1465379..fe4d78b 100644
53545--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
53546+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
53547@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
53548 int buf_len = 512;
53549 size_t len = 0;
53550
53551+ pax_track_stack();
53552+
53553 if (*ppos != 0)
53554 return 0;
53555 if (count < sizeof(buf))
53556diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
53557index 893a55c..7f66a50 100644
53558--- a/drivers/net/wireless/libertas/debugfs.c
53559+++ b/drivers/net/wireless/libertas/debugfs.c
53560@@ -708,7 +708,7 @@ out_unlock:
53561 struct lbs_debugfs_files {
53562 const char *name;
53563 int perm;
53564- struct file_operations fops;
53565+ const struct file_operations fops;
53566 };
53567
53568 static const struct lbs_debugfs_files debugfs_files[] = {
53569diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
53570index 2ecbedb..42704f0 100644
53571--- a/drivers/net/wireless/rndis_wlan.c
53572+++ b/drivers/net/wireless/rndis_wlan.c
53573@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
53574
53575 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
53576
53577- if (rts_threshold < 0 || rts_threshold > 2347)
53578+ if (rts_threshold > 2347)
53579 rts_threshold = 2347;
53580
53581 tmp = cpu_to_le32(rts_threshold);
53582diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
53583index 334ccd6..47f8944 100644
53584--- a/drivers/oprofile/buffer_sync.c
53585+++ b/drivers/oprofile/buffer_sync.c
53586@@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
53587 if (cookie == NO_COOKIE)
53588 offset = pc;
53589 if (cookie == INVALID_COOKIE) {
53590- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
53591+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
53592 offset = pc;
53593 }
53594 if (cookie != last_cookie) {
53595@@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
53596 /* add userspace sample */
53597
53598 if (!mm) {
53599- atomic_inc(&oprofile_stats.sample_lost_no_mm);
53600+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
53601 return 0;
53602 }
53603
53604 cookie = lookup_dcookie(mm, s->eip, &offset);
53605
53606 if (cookie == INVALID_COOKIE) {
53607- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
53608+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
53609 return 0;
53610 }
53611
53612@@ -562,7 +562,7 @@ void sync_buffer(int cpu)
53613 /* ignore backtraces if failed to add a sample */
53614 if (state == sb_bt_start) {
53615 state = sb_bt_ignore;
53616- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
53617+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
53618 }
53619 }
53620 release_mm(mm);
53621diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
53622index 5df60a6..72f5c1c 100644
53623--- a/drivers/oprofile/event_buffer.c
53624+++ b/drivers/oprofile/event_buffer.c
53625@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
53626 }
53627
53628 if (buffer_pos == buffer_size) {
53629- atomic_inc(&oprofile_stats.event_lost_overflow);
53630+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
53631 return;
53632 }
53633
53634diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
53635index dc8a042..fe5f315 100644
53636--- a/drivers/oprofile/oprof.c
53637+++ b/drivers/oprofile/oprof.c
53638@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
53639 if (oprofile_ops.switch_events())
53640 return;
53641
53642- atomic_inc(&oprofile_stats.multiplex_counter);
53643+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
53644 start_switch_worker();
53645 }
53646
53647diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
53648index 61689e8..387f7f8 100644
53649--- a/drivers/oprofile/oprofile_stats.c
53650+++ b/drivers/oprofile/oprofile_stats.c
53651@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
53652 cpu_buf->sample_invalid_eip = 0;
53653 }
53654
53655- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
53656- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
53657- atomic_set(&oprofile_stats.event_lost_overflow, 0);
53658- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
53659- atomic_set(&oprofile_stats.multiplex_counter, 0);
53660+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
53661+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
53662+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
53663+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
53664+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
53665 }
53666
53667
53668diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
53669index 0b54e46..a37c527 100644
53670--- a/drivers/oprofile/oprofile_stats.h
53671+++ b/drivers/oprofile/oprofile_stats.h
53672@@ -13,11 +13,11 @@
53673 #include <asm/atomic.h>
53674
53675 struct oprofile_stat_struct {
53676- atomic_t sample_lost_no_mm;
53677- atomic_t sample_lost_no_mapping;
53678- atomic_t bt_lost_no_mapping;
53679- atomic_t event_lost_overflow;
53680- atomic_t multiplex_counter;
53681+ atomic_unchecked_t sample_lost_no_mm;
53682+ atomic_unchecked_t sample_lost_no_mapping;
53683+ atomic_unchecked_t bt_lost_no_mapping;
53684+ atomic_unchecked_t event_lost_overflow;
53685+ atomic_unchecked_t multiplex_counter;
53686 };
53687
53688 extern struct oprofile_stat_struct oprofile_stats;
53689diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
53690index 2766a6d..80c77e2 100644
53691--- a/drivers/oprofile/oprofilefs.c
53692+++ b/drivers/oprofile/oprofilefs.c
53693@@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
53694
53695
53696 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
53697- char const *name, atomic_t *val)
53698+ char const *name, atomic_unchecked_t *val)
53699 {
53700 struct dentry *d = __oprofilefs_create_file(sb, root, name,
53701 &atomic_ro_fops, 0444);
53702diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
53703index 13a64bc..ad62835 100644
53704--- a/drivers/parisc/pdc_stable.c
53705+++ b/drivers/parisc/pdc_stable.c
53706@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
53707 return ret;
53708 }
53709
53710-static struct sysfs_ops pdcspath_attr_ops = {
53711+static const struct sysfs_ops pdcspath_attr_ops = {
53712 .show = pdcspath_attr_show,
53713 .store = pdcspath_attr_store,
53714 };
53715diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
53716index 8eefe56..40751a7 100644
53717--- a/drivers/parport/procfs.c
53718+++ b/drivers/parport/procfs.c
53719@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
53720
53721 *ppos += len;
53722
53723- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
53724+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
53725 }
53726
53727 #ifdef CONFIG_PARPORT_1284
53728@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
53729
53730 *ppos += len;
53731
53732- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
53733+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
53734 }
53735 #endif /* IEEE1284.3 support. */
53736
53737diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
53738index 73e7d8e..c80f3d2 100644
53739--- a/drivers/pci/hotplug/acpiphp_glue.c
53740+++ b/drivers/pci/hotplug/acpiphp_glue.c
53741@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
53742 }
53743
53744
53745-static struct acpi_dock_ops acpiphp_dock_ops = {
53746+static const struct acpi_dock_ops acpiphp_dock_ops = {
53747 .handler = handle_hotplug_event_func,
53748 };
53749
53750diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
53751index 9fff878..ad0ad53 100644
53752--- a/drivers/pci/hotplug/cpci_hotplug.h
53753+++ b/drivers/pci/hotplug/cpci_hotplug.h
53754@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
53755 int (*hardware_test) (struct slot* slot, u32 value);
53756 u8 (*get_power) (struct slot* slot);
53757 int (*set_power) (struct slot* slot, int value);
53758-};
53759+} __no_const;
53760
53761 struct cpci_hp_controller {
53762 unsigned int irq;
53763diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
53764index 76ba8a1..20ca857 100644
53765--- a/drivers/pci/hotplug/cpqphp_nvram.c
53766+++ b/drivers/pci/hotplug/cpqphp_nvram.c
53767@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
53768
53769 void compaq_nvram_init (void __iomem *rom_start)
53770 {
53771+
53772+#ifndef CONFIG_PAX_KERNEXEC
53773 if (rom_start) {
53774 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
53775 }
53776+#endif
53777+
53778 dbg("int15 entry = %p\n", compaq_int15_entry_point);
53779
53780 /* initialize our int15 lock */
53781diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
53782index 6151389..0a894ef 100644
53783--- a/drivers/pci/hotplug/fakephp.c
53784+++ b/drivers/pci/hotplug/fakephp.c
53785@@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
53786 }
53787
53788 static struct kobj_type legacy_ktype = {
53789- .sysfs_ops = &(struct sysfs_ops){
53790+ .sysfs_ops = &(const struct sysfs_ops){
53791 .store = legacy_store, .show = legacy_show
53792 },
53793 .release = &legacy_release,
53794diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
53795index 5b680df..fe05b7e 100644
53796--- a/drivers/pci/intel-iommu.c
53797+++ b/drivers/pci/intel-iommu.c
53798@@ -2643,7 +2643,7 @@ error:
53799 return 0;
53800 }
53801
53802-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
53803+dma_addr_t intel_map_page(struct device *dev, struct page *page,
53804 unsigned long offset, size_t size,
53805 enum dma_data_direction dir,
53806 struct dma_attrs *attrs)
53807@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
53808 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
53809 }
53810
53811-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
53812+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
53813 size_t size, enum dma_data_direction dir,
53814 struct dma_attrs *attrs)
53815 {
53816@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
53817 }
53818 }
53819
53820-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
53821+void *intel_alloc_coherent(struct device *hwdev, size_t size,
53822 dma_addr_t *dma_handle, gfp_t flags)
53823 {
53824 void *vaddr;
53825@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
53826 return NULL;
53827 }
53828
53829-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
53830+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
53831 dma_addr_t dma_handle)
53832 {
53833 int order;
53834@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
53835 free_pages((unsigned long)vaddr, order);
53836 }
53837
53838-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
53839+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
53840 int nelems, enum dma_data_direction dir,
53841 struct dma_attrs *attrs)
53842 {
53843@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
53844 return nelems;
53845 }
53846
53847-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
53848+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
53849 enum dma_data_direction dir, struct dma_attrs *attrs)
53850 {
53851 int i;
53852@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
53853 return nelems;
53854 }
53855
53856-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
53857+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
53858 {
53859 return !dma_addr;
53860 }
53861
53862-struct dma_map_ops intel_dma_ops = {
53863+const struct dma_map_ops intel_dma_ops = {
53864 .alloc_coherent = intel_alloc_coherent,
53865 .free_coherent = intel_free_coherent,
53866 .map_sg = intel_map_sg,
53867diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
53868index 5b7056c..607bc94 100644
53869--- a/drivers/pci/pcie/aspm.c
53870+++ b/drivers/pci/pcie/aspm.c
53871@@ -27,9 +27,9 @@
53872 #define MODULE_PARAM_PREFIX "pcie_aspm."
53873
53874 /* Note: those are not register definitions */
53875-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
53876-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
53877-#define ASPM_STATE_L1 (4) /* L1 state */
53878+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
53879+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
53880+#define ASPM_STATE_L1 (4U) /* L1 state */
53881 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
53882 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
53883
53884diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
53885index 8105e32..ca10419 100644
53886--- a/drivers/pci/probe.c
53887+++ b/drivers/pci/probe.c
53888@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
53889 return ret;
53890 }
53891
53892-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
53893+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
53894 struct device_attribute *attr,
53895 char *buf)
53896 {
53897 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
53898 }
53899
53900-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
53901+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
53902 struct device_attribute *attr,
53903 char *buf)
53904 {
53905diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
53906index a03ad8c..024b0da 100644
53907--- a/drivers/pci/proc.c
53908+++ b/drivers/pci/proc.c
53909@@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
53910 static int __init pci_proc_init(void)
53911 {
53912 struct pci_dev *dev = NULL;
53913+
53914+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53915+#ifdef CONFIG_GRKERNSEC_PROC_USER
53916+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
53917+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53918+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
53919+#endif
53920+#else
53921 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
53922+#endif
53923 proc_create("devices", 0, proc_bus_pci_dir,
53924 &proc_bus_pci_dev_operations);
53925 proc_initialized = 1;
53926diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
53927index 8c02b6c..5584d8e 100644
53928--- a/drivers/pci/slot.c
53929+++ b/drivers/pci/slot.c
53930@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
53931 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
53932 }
53933
53934-static struct sysfs_ops pci_slot_sysfs_ops = {
53935+static const struct sysfs_ops pci_slot_sysfs_ops = {
53936 .show = pci_slot_attr_show,
53937 .store = pci_slot_attr_store,
53938 };
53939diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
53940index 30cf71d2..50938f1 100644
53941--- a/drivers/pcmcia/pcmcia_ioctl.c
53942+++ b/drivers/pcmcia/pcmcia_ioctl.c
53943@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
53944 return -EFAULT;
53945 }
53946 }
53947- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
53948+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
53949 if (!buf)
53950 return -ENOMEM;
53951
53952diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
53953index 52183c4..b224c69 100644
53954--- a/drivers/platform/x86/acer-wmi.c
53955+++ b/drivers/platform/x86/acer-wmi.c
53956@@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
53957 return 0;
53958 }
53959
53960-static struct backlight_ops acer_bl_ops = {
53961+static const struct backlight_ops acer_bl_ops = {
53962 .get_brightness = read_brightness,
53963 .update_status = update_bl_status,
53964 };
53965diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
53966index 767cb61..a87380b 100644
53967--- a/drivers/platform/x86/asus-laptop.c
53968+++ b/drivers/platform/x86/asus-laptop.c
53969@@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
53970 */
53971 static int read_brightness(struct backlight_device *bd);
53972 static int update_bl_status(struct backlight_device *bd);
53973-static struct backlight_ops asusbl_ops = {
53974+static const struct backlight_ops asusbl_ops = {
53975 .get_brightness = read_brightness,
53976 .update_status = update_bl_status,
53977 };
53978diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
53979index d66c07a..a4abaac 100644
53980--- a/drivers/platform/x86/asus_acpi.c
53981+++ b/drivers/platform/x86/asus_acpi.c
53982@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
53983 return 0;
53984 }
53985
53986-static struct backlight_ops asus_backlight_data = {
53987+static const struct backlight_ops asus_backlight_data = {
53988 .get_brightness = read_brightness,
53989 .update_status = set_brightness_status,
53990 };
53991diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
53992index 11003bb..550ff1b 100644
53993--- a/drivers/platform/x86/compal-laptop.c
53994+++ b/drivers/platform/x86/compal-laptop.c
53995@@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
53996 return set_lcd_level(b->props.brightness);
53997 }
53998
53999-static struct backlight_ops compalbl_ops = {
54000+static const struct backlight_ops compalbl_ops = {
54001 .get_brightness = bl_get_brightness,
54002 .update_status = bl_update_status,
54003 };
54004diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
54005index 07a74da..9dc99fa 100644
54006--- a/drivers/platform/x86/dell-laptop.c
54007+++ b/drivers/platform/x86/dell-laptop.c
54008@@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
54009 return buffer.output[1];
54010 }
54011
54012-static struct backlight_ops dell_ops = {
54013+static const struct backlight_ops dell_ops = {
54014 .get_brightness = dell_get_intensity,
54015 .update_status = dell_send_intensity,
54016 };
54017diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
54018index c533b1c..5c81f22 100644
54019--- a/drivers/platform/x86/eeepc-laptop.c
54020+++ b/drivers/platform/x86/eeepc-laptop.c
54021@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
54022 */
54023 static int read_brightness(struct backlight_device *bd);
54024 static int update_bl_status(struct backlight_device *bd);
54025-static struct backlight_ops eeepcbl_ops = {
54026+static const struct backlight_ops eeepcbl_ops = {
54027 .get_brightness = read_brightness,
54028 .update_status = update_bl_status,
54029 };
54030diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
54031index bcd4ba8..a249b35 100644
54032--- a/drivers/platform/x86/fujitsu-laptop.c
54033+++ b/drivers/platform/x86/fujitsu-laptop.c
54034@@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
54035 return ret;
54036 }
54037
54038-static struct backlight_ops fujitsubl_ops = {
54039+static const struct backlight_ops fujitsubl_ops = {
54040 .get_brightness = bl_get_brightness,
54041 .update_status = bl_update_status,
54042 };
54043diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
54044index 759763d..1093ba2 100644
54045--- a/drivers/platform/x86/msi-laptop.c
54046+++ b/drivers/platform/x86/msi-laptop.c
54047@@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
54048 return set_lcd_level(b->props.brightness);
54049 }
54050
54051-static struct backlight_ops msibl_ops = {
54052+static const struct backlight_ops msibl_ops = {
54053 .get_brightness = bl_get_brightness,
54054 .update_status = bl_update_status,
54055 };
54056diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
54057index fe7cf01..9012d8d 100644
54058--- a/drivers/platform/x86/panasonic-laptop.c
54059+++ b/drivers/platform/x86/panasonic-laptop.c
54060@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
54061 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
54062 }
54063
54064-static struct backlight_ops pcc_backlight_ops = {
54065+static const struct backlight_ops pcc_backlight_ops = {
54066 .get_brightness = bl_get,
54067 .update_status = bl_set_status,
54068 };
54069diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
54070index a2a742c..b37e25e 100644
54071--- a/drivers/platform/x86/sony-laptop.c
54072+++ b/drivers/platform/x86/sony-laptop.c
54073@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
54074 }
54075
54076 static struct backlight_device *sony_backlight_device;
54077-static struct backlight_ops sony_backlight_ops = {
54078+static const struct backlight_ops sony_backlight_ops = {
54079 .update_status = sony_backlight_update_status,
54080 .get_brightness = sony_backlight_get_brightness,
54081 };
54082diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
54083index 68271ae..5e8fb10 100644
54084--- a/drivers/platform/x86/thinkpad_acpi.c
54085+++ b/drivers/platform/x86/thinkpad_acpi.c
54086@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
54087 return 0;
54088 }
54089
54090-void static hotkey_mask_warn_incomplete_mask(void)
54091+static void hotkey_mask_warn_incomplete_mask(void)
54092 {
54093 /* log only what the user can fix... */
54094 const u32 wantedmask = hotkey_driver_mask &
54095@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
54096 BACKLIGHT_UPDATE_HOTKEY);
54097 }
54098
54099-static struct backlight_ops ibm_backlight_data = {
54100+static const struct backlight_ops ibm_backlight_data = {
54101 .get_brightness = brightness_get,
54102 .update_status = brightness_update_status,
54103 };
54104diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
54105index 51c0a8b..0786629 100644
54106--- a/drivers/platform/x86/toshiba_acpi.c
54107+++ b/drivers/platform/x86/toshiba_acpi.c
54108@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
54109 return AE_OK;
54110 }
54111
54112-static struct backlight_ops toshiba_backlight_data = {
54113+static const struct backlight_ops toshiba_backlight_data = {
54114 .get_brightness = get_lcd,
54115 .update_status = set_lcd_status,
54116 };
54117diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
54118index fc83783c..cf370d7 100644
54119--- a/drivers/pnp/pnpbios/bioscalls.c
54120+++ b/drivers/pnp/pnpbios/bioscalls.c
54121@@ -60,7 +60,7 @@ do { \
54122 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
54123 } while(0)
54124
54125-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
54126+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
54127 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
54128
54129 /*
54130@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
54131
54132 cpu = get_cpu();
54133 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
54134+
54135+ pax_open_kernel();
54136 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
54137+ pax_close_kernel();
54138
54139 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
54140 spin_lock_irqsave(&pnp_bios_lock, flags);
54141@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
54142 :"memory");
54143 spin_unlock_irqrestore(&pnp_bios_lock, flags);
54144
54145+ pax_open_kernel();
54146 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
54147+ pax_close_kernel();
54148+
54149 put_cpu();
54150
54151 /* If we get here and this is set then the PnP BIOS faulted on us. */
54152@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
54153 return status;
54154 }
54155
54156-void pnpbios_calls_init(union pnp_bios_install_struct *header)
54157+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
54158 {
54159 int i;
54160
54161@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
54162 pnp_bios_callpoint.offset = header->fields.pm16offset;
54163 pnp_bios_callpoint.segment = PNP_CS16;
54164
54165+ pax_open_kernel();
54166+
54167 for_each_possible_cpu(i) {
54168 struct desc_struct *gdt = get_cpu_gdt_table(i);
54169 if (!gdt)
54170@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
54171 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
54172 (unsigned long)__va(header->fields.pm16dseg));
54173 }
54174+
54175+ pax_close_kernel();
54176 }
54177diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
54178index ba97654..66b99d4 100644
54179--- a/drivers/pnp/resource.c
54180+++ b/drivers/pnp/resource.c
54181@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
54182 return 1;
54183
54184 /* check if the resource is valid */
54185- if (*irq < 0 || *irq > 15)
54186+ if (*irq > 15)
54187 return 0;
54188
54189 /* check if the resource is reserved */
54190@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
54191 return 1;
54192
54193 /* check if the resource is valid */
54194- if (*dma < 0 || *dma == 4 || *dma > 7)
54195+ if (*dma == 4 || *dma > 7)
54196 return 0;
54197
54198 /* check if the resource is reserved */
54199diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
54200index 62bb981..24a2dc9 100644
54201--- a/drivers/power/bq27x00_battery.c
54202+++ b/drivers/power/bq27x00_battery.c
54203@@ -44,7 +44,7 @@ struct bq27x00_device_info;
54204 struct bq27x00_access_methods {
54205 int (*read)(u8 reg, int *rt_value, int b_single,
54206 struct bq27x00_device_info *di);
54207-};
54208+} __no_const;
54209
54210 struct bq27x00_device_info {
54211 struct device *dev;
54212diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
54213index 62227cd..b5b538b 100644
54214--- a/drivers/rtc/rtc-dev.c
54215+++ b/drivers/rtc/rtc-dev.c
54216@@ -14,6 +14,7 @@
54217 #include <linux/module.h>
54218 #include <linux/rtc.h>
54219 #include <linux/sched.h>
54220+#include <linux/grsecurity.h>
54221 #include "rtc-core.h"
54222
54223 static dev_t rtc_devt;
54224@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
54225 if (copy_from_user(&tm, uarg, sizeof(tm)))
54226 return -EFAULT;
54227
54228+ gr_log_timechange();
54229+
54230 return rtc_set_time(rtc, &tm);
54231
54232 case RTC_PIE_ON:
54233diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
54234index 968e3c7..fbc637a 100644
54235--- a/drivers/s390/cio/qdio_perf.c
54236+++ b/drivers/s390/cio/qdio_perf.c
54237@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
54238 static int qdio_perf_proc_show(struct seq_file *m, void *v)
54239 {
54240 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
54241- (long)atomic_long_read(&perf_stats.qdio_int));
54242+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
54243 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
54244- (long)atomic_long_read(&perf_stats.pci_int));
54245+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
54246 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
54247- (long)atomic_long_read(&perf_stats.thin_int));
54248+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
54249 seq_printf(m, "\n");
54250 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
54251- (long)atomic_long_read(&perf_stats.tasklet_inbound));
54252+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
54253 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
54254- (long)atomic_long_read(&perf_stats.tasklet_outbound));
54255+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
54256 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
54257- (long)atomic_long_read(&perf_stats.tasklet_thinint),
54258- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
54259+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
54260+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
54261 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
54262- (long)atomic_long_read(&perf_stats.thinint_inbound),
54263- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
54264+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
54265+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
54266 seq_printf(m, "\n");
54267 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
54268- (long)atomic_long_read(&perf_stats.siga_in));
54269+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
54270 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
54271- (long)atomic_long_read(&perf_stats.siga_out));
54272+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
54273 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
54274- (long)atomic_long_read(&perf_stats.siga_sync));
54275+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
54276 seq_printf(m, "\n");
54277 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
54278- (long)atomic_long_read(&perf_stats.inbound_handler));
54279+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
54280 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
54281- (long)atomic_long_read(&perf_stats.outbound_handler));
54282+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
54283 seq_printf(m, "\n");
54284 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
54285- (long)atomic_long_read(&perf_stats.fast_requeue));
54286+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
54287 seq_printf(m, "Number of outbound target full condition\t: %li\n",
54288- (long)atomic_long_read(&perf_stats.outbound_target_full));
54289+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
54290 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
54291- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
54292+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
54293 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
54294- (long)atomic_long_read(&perf_stats.debug_stop_polling));
54295+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
54296 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
54297- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
54298+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
54299 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
54300- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
54301- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
54302+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
54303+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
54304 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
54305- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
54306- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
54307+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
54308+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
54309 seq_printf(m, "\n");
54310 return 0;
54311 }
54312diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
54313index ff4504c..b3604c3 100644
54314--- a/drivers/s390/cio/qdio_perf.h
54315+++ b/drivers/s390/cio/qdio_perf.h
54316@@ -13,46 +13,46 @@
54317
54318 struct qdio_perf_stats {
54319 /* interrupt handler calls */
54320- atomic_long_t qdio_int;
54321- atomic_long_t pci_int;
54322- atomic_long_t thin_int;
54323+ atomic_long_unchecked_t qdio_int;
54324+ atomic_long_unchecked_t pci_int;
54325+ atomic_long_unchecked_t thin_int;
54326
54327 /* tasklet runs */
54328- atomic_long_t tasklet_inbound;
54329- atomic_long_t tasklet_outbound;
54330- atomic_long_t tasklet_thinint;
54331- atomic_long_t tasklet_thinint_loop;
54332- atomic_long_t thinint_inbound;
54333- atomic_long_t thinint_inbound_loop;
54334- atomic_long_t thinint_inbound_loop2;
54335+ atomic_long_unchecked_t tasklet_inbound;
54336+ atomic_long_unchecked_t tasklet_outbound;
54337+ atomic_long_unchecked_t tasklet_thinint;
54338+ atomic_long_unchecked_t tasklet_thinint_loop;
54339+ atomic_long_unchecked_t thinint_inbound;
54340+ atomic_long_unchecked_t thinint_inbound_loop;
54341+ atomic_long_unchecked_t thinint_inbound_loop2;
54342
54343 /* signal adapter calls */
54344- atomic_long_t siga_out;
54345- atomic_long_t siga_in;
54346- atomic_long_t siga_sync;
54347+ atomic_long_unchecked_t siga_out;
54348+ atomic_long_unchecked_t siga_in;
54349+ atomic_long_unchecked_t siga_sync;
54350
54351 /* misc */
54352- atomic_long_t inbound_handler;
54353- atomic_long_t outbound_handler;
54354- atomic_long_t fast_requeue;
54355- atomic_long_t outbound_target_full;
54356+ atomic_long_unchecked_t inbound_handler;
54357+ atomic_long_unchecked_t outbound_handler;
54358+ atomic_long_unchecked_t fast_requeue;
54359+ atomic_long_unchecked_t outbound_target_full;
54360
54361 /* for debugging */
54362- atomic_long_t debug_tl_out_timer;
54363- atomic_long_t debug_stop_polling;
54364- atomic_long_t debug_eqbs_all;
54365- atomic_long_t debug_eqbs_incomplete;
54366- atomic_long_t debug_sqbs_all;
54367- atomic_long_t debug_sqbs_incomplete;
54368+ atomic_long_unchecked_t debug_tl_out_timer;
54369+ atomic_long_unchecked_t debug_stop_polling;
54370+ atomic_long_unchecked_t debug_eqbs_all;
54371+ atomic_long_unchecked_t debug_eqbs_incomplete;
54372+ atomic_long_unchecked_t debug_sqbs_all;
54373+ atomic_long_unchecked_t debug_sqbs_incomplete;
54374 };
54375
54376 extern struct qdio_perf_stats perf_stats;
54377 extern int qdio_performance_stats;
54378
54379-static inline void qdio_perf_stat_inc(atomic_long_t *count)
54380+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
54381 {
54382 if (qdio_performance_stats)
54383- atomic_long_inc(count);
54384+ atomic_long_inc_unchecked(count);
54385 }
54386
54387 int qdio_setup_perf_stats(void);
54388diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
54389new file mode 100644
54390index 0000000..7d18a18
54391--- /dev/null
54392+++ b/drivers/scsi/3w-sas.c
54393@@ -0,0 +1,1933 @@
54394+/*
54395+ 3w-sas.c -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
54396+
54397+ Written By: Adam Radford <linuxraid@lsi.com>
54398+
54399+ Copyright (C) 2009 LSI Corporation.
54400+
54401+ This program is free software; you can redistribute it and/or modify
54402+ it under the terms of the GNU General Public License as published by
54403+ the Free Software Foundation; version 2 of the License.
54404+
54405+ This program is distributed in the hope that it will be useful,
54406+ but WITHOUT ANY WARRANTY; without even the implied warranty of
54407+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
54408+ GNU General Public License for more details.
54409+
54410+ NO WARRANTY
54411+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
54412+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
54413+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
54414+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
54415+ solely responsible for determining the appropriateness of using and
54416+ distributing the Program and assumes all risks associated with its
54417+ exercise of rights under this Agreement, including but not limited to
54418+ the risks and costs of program errors, damage to or loss of data,
54419+ programs or equipment, and unavailability or interruption of operations.
54420+
54421+ DISCLAIMER OF LIABILITY
54422+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
54423+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54424+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
54425+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
54426+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
54427+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
54428+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
54429+
54430+ You should have received a copy of the GNU General Public License
54431+ along with this program; if not, write to the Free Software
54432+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
54433+
54434+ Controllers supported by this driver:
54435+
54436+ LSI 3ware 9750 6Gb/s SAS/SATA-RAID
54437+
54438+ Bugs/Comments/Suggestions should be mailed to:
54439+ linuxraid@lsi.com
54440+
54441+ For more information, goto:
54442+ http://www.lsi.com
54443+
54444+ History
54445+ -------
54446+ 3.26.00.000 - Initial driver release.
54447+*/
54448+
54449+#include <linux/module.h>
54450+#include <linux/reboot.h>
54451+#include <linux/spinlock.h>
54452+#include <linux/interrupt.h>
54453+#include <linux/moduleparam.h>
54454+#include <linux/errno.h>
54455+#include <linux/types.h>
54456+#include <linux/delay.h>
54457+#include <linux/pci.h>
54458+#include <linux/time.h>
54459+#include <linux/mutex.h>
54460+#include <linux/smp_lock.h>
54461+#include <asm/io.h>
54462+#include <asm/irq.h>
54463+#include <asm/uaccess.h>
54464+#include <scsi/scsi.h>
54465+#include <scsi/scsi_host.h>
54466+#include <scsi/scsi_tcq.h>
54467+#include <scsi/scsi_cmnd.h>
54468+#include "3w-sas.h"
54469+
54470+/* Globals */
54471+#define TW_DRIVER_VERSION "3.26.00.028-2.6.32RH"
54472+static TW_Device_Extension *twl_device_extension_list[TW_MAX_SLOT];
54473+static unsigned int twl_device_extension_count;
54474+static int twl_major = -1;
54475+extern struct timezone sys_tz;
54476+
54477+/* Module parameters */
54478+MODULE_AUTHOR ("LSI");
54479+MODULE_DESCRIPTION ("LSI 3ware SAS/SATA-RAID Linux Driver");
54480+MODULE_LICENSE("GPL");
54481+MODULE_VERSION(TW_DRIVER_VERSION);
54482+
54483+static int use_msi = 0;
54484+module_param(use_msi, int, S_IRUGO);
54485+MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
54486+
54487+/* Function prototypes */
54488+static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset);
54489+
54490+/* Functions */
54491+
54492+/* This function returns AENs through sysfs */
54493+static ssize_t twl_sysfs_aen_read(struct file *filp, struct kobject *kobj,
54494+ struct bin_attribute *bin_attr,
54495+ char *outbuf, loff_t offset, size_t count)
54496+{
54497+ struct device *dev = container_of(kobj, struct device, kobj);
54498+ struct Scsi_Host *shost = class_to_shost(dev);
54499+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
54500+ unsigned long flags = 0;
54501+ ssize_t ret;
54502+
54503+ if (!capable(CAP_SYS_ADMIN))
54504+ return -EACCES;
54505+
54506+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
54507+ ret = memory_read_from_buffer(outbuf, count, &offset, tw_dev->event_queue[0], sizeof(TW_Event) * TW_Q_LENGTH);
54508+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
54509+
54510+ return ret;
54511+} /* End twl_sysfs_aen_read() */
54512+
54513+/* aen_read sysfs attribute initializer */
54514+static struct bin_attribute twl_sysfs_aen_read_attr = {
54515+ .attr = {
54516+ .name = "3ware_aen_read",
54517+ .mode = S_IRUSR,
54518+ },
54519+ .size = 0,
54520+ .read = twl_sysfs_aen_read
54521+};
54522+
54523+/* This function returns driver compatibility info through sysfs */
54524+static ssize_t twl_sysfs_compat_info(struct file *filp, struct kobject *kobj,
54525+ struct bin_attribute *bin_attr,
54526+ char *outbuf, loff_t offset, size_t count)
54527+{
54528+ struct device *dev = container_of(kobj, struct device, kobj);
54529+ struct Scsi_Host *shost = class_to_shost(dev);
54530+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)shost->hostdata;
54531+ unsigned long flags = 0;
54532+ ssize_t ret;
54533+
54534+ if (!capable(CAP_SYS_ADMIN))
54535+ return -EACCES;
54536+
54537+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
54538+ ret = memory_read_from_buffer(outbuf, count, &offset, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
54539+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
54540+
54541+ return ret;
54542+} /* End twl_sysfs_compat_info() */
54543+
54544+/* compat_info sysfs attribute initializer */
54545+static struct bin_attribute twl_sysfs_compat_info_attr = {
54546+ .attr = {
54547+ .name = "3ware_compat_info",
54548+ .mode = S_IRUSR,
54549+ },
54550+ .size = 0,
54551+ .read = twl_sysfs_compat_info
54552+};
54553+
54554+/* Show some statistics about the card */
54555+static ssize_t twl_show_stats(struct device *dev,
54556+ struct device_attribute *attr, char *buf)
54557+{
54558+ struct Scsi_Host *host = class_to_shost(dev);
54559+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
54560+ unsigned long flags = 0;
54561+ ssize_t len;
54562+
54563+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
54564+ len = snprintf(buf, PAGE_SIZE, "3w-sas Driver version: %s\n"
54565+ "Current commands posted: %4d\n"
54566+ "Max commands posted: %4d\n"
54567+ "Last sgl length: %4d\n"
54568+ "Max sgl length: %4d\n"
54569+ "Last sector count: %4d\n"
54570+ "Max sector count: %4d\n"
54571+ "SCSI Host Resets: %4d\n"
54572+ "AEN's: %4d\n",
54573+ TW_DRIVER_VERSION,
54574+ tw_dev->posted_request_count,
54575+ tw_dev->max_posted_request_count,
54576+ tw_dev->sgl_entries,
54577+ tw_dev->max_sgl_entries,
54578+ tw_dev->sector_count,
54579+ tw_dev->max_sector_count,
54580+ tw_dev->num_resets,
54581+ tw_dev->aen_count);
54582+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
54583+ return len;
54584+} /* End twl_show_stats() */
54585+
54586+/* This function will set a devices queue depth */
54587+static int twl_change_queue_depth(struct scsi_device *sdev, int queue_depth,
54588+ int reason)
54589+{
54590+ if (reason != SCSI_QDEPTH_DEFAULT)
54591+ return -EOPNOTSUPP;
54592+
54593+ if (queue_depth > TW_Q_LENGTH-2)
54594+ queue_depth = TW_Q_LENGTH-2;
54595+ scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
54596+ return queue_depth;
54597+} /* End twl_change_queue_depth() */
54598+
54599+/* stats sysfs attribute initializer */
54600+static struct device_attribute twl_host_stats_attr = {
54601+ .attr = {
54602+ .name = "3ware_stats",
54603+ .mode = S_IRUGO,
54604+ },
54605+ .show = twl_show_stats
54606+};
54607+
54608+/* Host attributes initializer */
54609+static struct device_attribute *twl_host_attrs[] = {
54610+ &twl_host_stats_attr,
54611+ NULL,
54612+};
54613+
54614+/* This function will look up an AEN severity string */
54615+static char *twl_aen_severity_lookup(unsigned char severity_code)
54616+{
54617+ char *retval = NULL;
54618+
54619+ if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
54620+ (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
54621+ goto out;
54622+
54623+ retval = twl_aen_severity_table[severity_code];
54624+out:
54625+ return retval;
54626+} /* End twl_aen_severity_lookup() */
54627+
54628+/* This function will queue an event */
54629+static void twl_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
54630+{
54631+ u32 local_time;
54632+ struct timeval time;
54633+ TW_Event *event;
54634+ unsigned short aen;
54635+ char host[16];
54636+ char *error_str;
54637+
54638+ tw_dev->aen_count++;
54639+
54640+ /* Fill out event info */
54641+ event = tw_dev->event_queue[tw_dev->error_index];
54642+
54643+ host[0] = '\0';
54644+ if (tw_dev->host)
54645+ sprintf(host, " scsi%d:", tw_dev->host->host_no);
54646+
54647+ aen = le16_to_cpu(header->status_block.error);
54648+ memset(event, 0, sizeof(TW_Event));
54649+
54650+ event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
54651+ do_gettimeofday(&time);
54652+ local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
54653+ event->time_stamp_sec = local_time;
54654+ event->aen_code = aen;
54655+ event->retrieved = TW_AEN_NOT_RETRIEVED;
54656+ event->sequence_id = tw_dev->error_sequence_id;
54657+ tw_dev->error_sequence_id++;
54658+
54659+ /* Check for embedded error string */
54660+ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
54661+
54662+ header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
54663+ event->parameter_len = strlen(header->err_specific_desc);
54664+ memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + 1 + strlen(error_str));
54665+ if (event->severity != TW_AEN_SEVERITY_DEBUG)
54666+ printk(KERN_WARNING "3w-sas:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
54667+ host,
54668+ twl_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
54669+ TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, error_str,
54670+ header->err_specific_desc);
54671+ else
54672+ tw_dev->aen_count--;
54673+
54674+ tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
54675+} /* End twl_aen_queue_event() */
54676+
54677+/* This function will attempt to post a command packet to the board */
54678+static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
54679+{
54680+ dma_addr_t command_que_value;
54681+
54682+ command_que_value = tw_dev->command_packet_phys[request_id];
54683+ command_que_value += TW_COMMAND_OFFSET;
54684+
54685+ /* First write upper 4 bytes */
54686+ writel((u32)((u64)command_que_value >> 32), TWL_HIBQPH_REG_ADDR(tw_dev));
54687+ /* Then the lower 4 bytes */
54688+ writel((u32)(command_que_value | TWL_PULL_MODE), TWL_HIBQPL_REG_ADDR(tw_dev));
54689+
54690+ tw_dev->state[request_id] = TW_S_POSTED;
54691+ tw_dev->posted_request_count++;
54692+ if (tw_dev->posted_request_count > tw_dev->max_posted_request_count)
54693+ tw_dev->max_posted_request_count = tw_dev->posted_request_count;
54694+
54695+ return 0;
54696+} /* End twl_post_command_packet() */
54697+
54698+/* This function will perform a pci-dma mapping for a scatter gather list */
54699+static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
54700+{
54701+ int use_sg;
54702+ struct scsi_cmnd *cmd = tw_dev->srb[request_id];
54703+
54704+ use_sg = scsi_dma_map(cmd);
54705+ if (!use_sg)
54706+ return 0;
54707+ else if (use_sg < 0) {
54708+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
54709+ return 0;
54710+ }
54711+
54712+ cmd->SCp.phase = TW_PHASE_SGLIST;
54713+ cmd->SCp.have_data_in = use_sg;
54714+
54715+ return use_sg;
54716+} /* End twl_map_scsi_sg_data() */
54717+
54718+/* This function hands scsi cdb's to the firmware */
54719+static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
54720+{
54721+ TW_Command_Full *full_command_packet;
54722+ TW_Command_Apache *command_packet;
54723+ int i, sg_count;
54724+ struct scsi_cmnd *srb = NULL;
54725+ struct scatterlist *sglist = NULL, *sg;
54726+ int retval = 1;
54727+
54728+ if (tw_dev->srb[request_id]) {
54729+ srb = tw_dev->srb[request_id];
54730+ if (scsi_sglist(srb))
54731+ sglist = scsi_sglist(srb);
54732+ }
54733+
54734+ /* Initialize command packet */
54735+ full_command_packet = tw_dev->command_packet_virt[request_id];
54736+ full_command_packet->header.header_desc.size_header = 128;
54737+ full_command_packet->header.status_block.error = 0;
54738+ full_command_packet->header.status_block.severity__reserved = 0;
54739+
54740+ command_packet = &full_command_packet->command.newcommand;
54741+ command_packet->status = 0;
54742+ command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
54743+
54744+ /* We forced 16 byte cdb use earlier */
54745+ if (!cdb)
54746+ memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
54747+ else
54748+ memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
54749+
54750+ if (srb) {
54751+ command_packet->unit = srb->device->id;
54752+ command_packet->request_id__lunl =
54753+ cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
54754+ } else {
54755+ command_packet->request_id__lunl =
54756+ cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
54757+ command_packet->unit = 0;
54758+ }
54759+
54760+ command_packet->sgl_offset = 16;
54761+
54762+ if (!sglistarg) {
54763+ /* Map sglist from scsi layer to cmd packet */
54764+ if (scsi_sg_count(srb)) {
54765+ sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
54766+ if (sg_count == 0)
54767+ goto out;
54768+
54769+ scsi_for_each_sg(srb, sg, sg_count, i) {
54770+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
54771+ command_packet->sg_list[i].length = TW_CPU_TO_SGL(sg_dma_len(sg));
54772+ }
54773+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
54774+ }
54775+ } else {
54776+ /* Internal cdb post */
54777+ for (i = 0; i < use_sg; i++) {
54778+ command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
54779+ command_packet->sg_list[i].length = TW_CPU_TO_SGL(sglistarg[i].length);
54780+ }
54781+ command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
54782+ }
54783+
54784+ /* Update some stats */
54785+ if (srb) {
54786+ tw_dev->sector_count = scsi_bufflen(srb) / 512;
54787+ if (tw_dev->sector_count > tw_dev->max_sector_count)
54788+ tw_dev->max_sector_count = tw_dev->sector_count;
54789+ tw_dev->sgl_entries = scsi_sg_count(srb);
54790+ if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
54791+ tw_dev->max_sgl_entries = tw_dev->sgl_entries;
54792+ }
54793+
54794+ /* Now post the command to the board */
54795+ retval = twl_post_command_packet(tw_dev, request_id);
54796+
54797+out:
54798+ return retval;
54799+} /* End twl_scsiop_execute_scsi() */
54800+
54801+/* This function will read the aen queue from the isr */
54802+static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
54803+{
54804+ char cdb[TW_MAX_CDB_LEN];
54805+ TW_SG_Entry_ISO sglist[1];
54806+ TW_Command_Full *full_command_packet;
54807+ int retval = 1;
54808+
54809+ full_command_packet = tw_dev->command_packet_virt[request_id];
54810+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
54811+
54812+ /* Initialize cdb */
54813+ memset(&cdb, 0, TW_MAX_CDB_LEN);
54814+ cdb[0] = REQUEST_SENSE; /* opcode */
54815+ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
54816+
54817+ /* Initialize sglist */
54818+ memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
54819+ sglist[0].length = TW_SECTOR_SIZE;
54820+ sglist[0].address = tw_dev->generic_buffer_phys[request_id];
54821+
54822+ /* Mark internal command */
54823+ tw_dev->srb[request_id] = NULL;
54824+
54825+ /* Now post the command packet */
54826+ if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
54827+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Post failed while reading AEN queue");
54828+ goto out;
54829+ }
54830+ retval = 0;
54831+out:
54832+ return retval;
54833+} /* End twl_aen_read_queue() */
54834+
54835+/* This function will sync firmware time with the host time */
54836+static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
54837+{
54838+ u32 schedulertime;
54839+ struct timeval utc;
54840+ TW_Command_Full *full_command_packet;
54841+ TW_Command *command_packet;
54842+ TW_Param_Apache *param;
54843+ u32 local_time;
54844+
54845+ /* Fill out the command packet */
54846+ full_command_packet = tw_dev->command_packet_virt[request_id];
54847+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
54848+ command_packet = &full_command_packet->command.oldcommand;
54849+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
54850+ command_packet->request_id = request_id;
54851+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
54852+ command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
54853+ command_packet->size = TW_COMMAND_SIZE;
54854+ command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
54855+
54856+ /* Setup the param */
54857+ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
54858+ memset(param, 0, TW_SECTOR_SIZE);
54859+ param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
54860+ param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
54861+ param->parameter_size_bytes = cpu_to_le16(4);
54862+
54863+ /* Convert system time in UTC to local time seconds since last
54864+ Sunday 12:00AM */
54865+ do_gettimeofday(&utc);
54866+ local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
54867+ schedulertime = local_time - (3 * 86400);
54868+ schedulertime = cpu_to_le32(schedulertime % 604800);
54869+
54870+ memcpy(param->data, &schedulertime, sizeof(u32));
54871+
54872+ /* Mark internal command */
54873+ tw_dev->srb[request_id] = NULL;
54874+
54875+ /* Now post the command */
54876+ twl_post_command_packet(tw_dev, request_id);
54877+} /* End twl_aen_sync_time() */
54878+
54879+/* This function will assign an available request id */
54880+static void twl_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
54881+{
54882+ *request_id = tw_dev->free_queue[tw_dev->free_head];
54883+ tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
54884+ tw_dev->state[*request_id] = TW_S_STARTED;
54885+} /* End twl_get_request_id() */
54886+
54887+/* This function will free a request id */
54888+static void twl_free_request_id(TW_Device_Extension *tw_dev, int request_id)
54889+{
54890+ tw_dev->free_queue[tw_dev->free_tail] = request_id;
54891+ tw_dev->state[request_id] = TW_S_FINISHED;
54892+ tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
54893+} /* End twl_free_request_id() */
54894+
54895+/* This function will complete an aen request from the isr */
54896+static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id)
54897+{
54898+ TW_Command_Full *full_command_packet;
54899+ TW_Command *command_packet;
54900+ TW_Command_Apache_Header *header;
54901+ unsigned short aen;
54902+ int retval = 1;
54903+
54904+ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
54905+ tw_dev->posted_request_count--;
54906+ aen = le16_to_cpu(header->status_block.error);
54907+ full_command_packet = tw_dev->command_packet_virt[request_id];
54908+ command_packet = &full_command_packet->command.oldcommand;
54909+
54910+ /* First check for internal completion of set param for time sync */
54911+ if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
54912+ /* Keep reading the queue in case there are more aen's */
54913+ if (twl_aen_read_queue(tw_dev, request_id))
54914+ goto out2;
54915+ else {
54916+ retval = 0;
54917+ goto out;
54918+ }
54919+ }
54920+
54921+ switch (aen) {
54922+ case TW_AEN_QUEUE_EMPTY:
54923+ /* Quit reading the queue if this is the last one */
54924+ break;
54925+ case TW_AEN_SYNC_TIME_WITH_HOST:
54926+ twl_aen_sync_time(tw_dev, request_id);
54927+ retval = 0;
54928+ goto out;
54929+ default:
54930+ twl_aen_queue_event(tw_dev, header);
54931+
54932+ /* If there are more aen's, keep reading the queue */
54933+ if (twl_aen_read_queue(tw_dev, request_id))
54934+ goto out2;
54935+ else {
54936+ retval = 0;
54937+ goto out;
54938+ }
54939+ }
54940+ retval = 0;
54941+out2:
54942+ tw_dev->state[request_id] = TW_S_COMPLETED;
54943+ twl_free_request_id(tw_dev, request_id);
54944+ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
54945+out:
54946+ return retval;
54947+} /* End twl_aen_complete() */
54948+
54949+/* This function will poll for a response */
54950+static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
54951+{
54952+ unsigned long before;
54953+ dma_addr_t mfa;
54954+ u32 regh, regl;
54955+ u32 response;
54956+ int retval = 1;
54957+ int found = 0;
54958+
54959+ before = jiffies;
54960+
54961+ while (!found) {
54962+ if (sizeof(dma_addr_t) > 4) {
54963+ regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
54964+ regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
54965+ mfa = ((u64)regh << 32) | regl;
54966+ } else
54967+ mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
54968+
54969+ response = (u32)mfa;
54970+
54971+ if (TW_RESID_OUT(response) == request_id)
54972+ found = 1;
54973+
54974+ if (time_after(jiffies, before + HZ * seconds))
54975+ goto out;
54976+
54977+ msleep(50);
54978+ }
54979+ retval = 0;
54980+out:
54981+ return retval;
54982+} /* End twl_poll_response() */
54983+
54984+/* This function will drain the aen queue */
54985+static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
54986+{
54987+ int request_id = 0;
54988+ char cdb[TW_MAX_CDB_LEN];
54989+ TW_SG_Entry_ISO sglist[1];
54990+ int finished = 0, count = 0;
54991+ TW_Command_Full *full_command_packet;
54992+ TW_Command_Apache_Header *header;
54993+ unsigned short aen;
54994+ int first_reset = 0, queue = 0, retval = 1;
54995+
54996+ if (no_check_reset)
54997+ first_reset = 0;
54998+ else
54999+ first_reset = 1;
55000+
55001+ full_command_packet = tw_dev->command_packet_virt[request_id];
55002+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55003+
55004+ /* Initialize cdb */
55005+ memset(&cdb, 0, TW_MAX_CDB_LEN);
55006+ cdb[0] = REQUEST_SENSE; /* opcode */
55007+ cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
55008+
55009+ /* Initialize sglist */
55010+ memset(&sglist, 0, sizeof(TW_SG_Entry_ISO));
55011+ sglist[0].length = TW_SECTOR_SIZE;
55012+ sglist[0].address = tw_dev->generic_buffer_phys[request_id];
55013+
55014+ /* Mark internal command */
55015+ tw_dev->srb[request_id] = NULL;
55016+
55017+ do {
55018+ /* Send command to the board */
55019+ if (twl_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
55020+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "Error posting request sense");
55021+ goto out;
55022+ }
55023+
55024+ /* Now poll for completion */
55025+ if (twl_poll_response(tw_dev, request_id, 30)) {
55026+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "No valid response while draining AEN queue");
55027+ tw_dev->posted_request_count--;
55028+ goto out;
55029+ }
55030+
55031+ tw_dev->posted_request_count--;
55032+ header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
55033+ aen = le16_to_cpu(header->status_block.error);
55034+ queue = 0;
55035+ count++;
55036+
55037+ switch (aen) {
55038+ case TW_AEN_QUEUE_EMPTY:
55039+ if (first_reset != 1)
55040+ goto out;
55041+ else
55042+ finished = 1;
55043+ break;
55044+ case TW_AEN_SOFT_RESET:
55045+ if (first_reset == 0)
55046+ first_reset = 1;
55047+ else
55048+ queue = 1;
55049+ break;
55050+ case TW_AEN_SYNC_TIME_WITH_HOST:
55051+ break;
55052+ default:
55053+ queue = 1;
55054+ }
55055+
55056+ /* Now queue an event info */
55057+ if (queue)
55058+ twl_aen_queue_event(tw_dev, header);
55059+ } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
55060+
55061+ if (count == TW_MAX_AEN_DRAIN)
55062+ goto out;
55063+
55064+ retval = 0;
55065+out:
55066+ tw_dev->state[request_id] = TW_S_INITIAL;
55067+ return retval;
55068+} /* End twl_aen_drain_queue() */
55069+
55070+/* This function will allocate memory and check if it is correctly aligned */
55071+static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
55072+{
55073+ int i;
55074+ dma_addr_t dma_handle;
55075+ unsigned long *cpu_addr;
55076+ int retval = 1;
55077+
55078+ cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
55079+ if (!cpu_addr) {
55080+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
55081+ goto out;
55082+ }
55083+
55084+ memset(cpu_addr, 0, size*TW_Q_LENGTH);
55085+
55086+ for (i = 0; i < TW_Q_LENGTH; i++) {
55087+ switch(which) {
55088+ case 0:
55089+ tw_dev->command_packet_phys[i] = dma_handle+(i*size);
55090+ tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
55091+ break;
55092+ case 1:
55093+ tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
55094+ tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
55095+ break;
55096+ case 2:
55097+ tw_dev->sense_buffer_phys[i] = dma_handle+(i*size);
55098+ tw_dev->sense_buffer_virt[i] = (TW_Command_Apache_Header *)((unsigned char *)cpu_addr + (i*size));
55099+ break;
55100+ }
55101+ }
55102+ retval = 0;
55103+out:
55104+ return retval;
55105+} /* End twl_allocate_memory() */
55106+
55107+/* This function will load the request id and various sgls for ioctls */
55108+static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
55109+{
55110+ TW_Command *oldcommand;
55111+ TW_Command_Apache *newcommand;
55112+ TW_SG_Entry_ISO *sgl;
55113+ unsigned int pae = 0;
55114+
55115+ if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
55116+ pae = 1;
55117+
55118+ if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
55119+ newcommand = &full_command_packet->command.newcommand;
55120+ newcommand->request_id__lunl =
55121+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
55122+ if (length) {
55123+ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
55124+ newcommand->sg_list[0].length = TW_CPU_TO_SGL(length);
55125+ }
55126+ newcommand->sgl_entries__lunh =
55127+ cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
55128+ } else {
55129+ oldcommand = &full_command_packet->command.oldcommand;
55130+ oldcommand->request_id = request_id;
55131+
55132+ if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
55133+ /* Load the sg list */
55134+ sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0));
55135+ sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
55136+ sgl->length = TW_CPU_TO_SGL(length);
55137+ oldcommand->size += pae;
55138+ oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0;
55139+ }
55140+ }
55141+} /* End twl_load_sgl() */
55142+
55143+/* This function handles ioctl for the character device
55144+ This interface is used by smartmontools open source software */
55145+static int twl_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
55146+{
55147+ long timeout;
55148+ unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
55149+ dma_addr_t dma_handle;
55150+ int request_id = 0;
55151+ TW_Ioctl_Driver_Command driver_command;
55152+ TW_Ioctl_Buf_Apache *tw_ioctl;
55153+ TW_Command_Full *full_command_packet;
55154+ TW_Device_Extension *tw_dev = twl_device_extension_list[iminor(inode)];
55155+ int retval = -EFAULT;
55156+ void __user *argp = (void __user *)arg;
55157+
55158+ /* Only let one of these through at a time */
55159+ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
55160+ retval = -EINTR;
55161+ goto out;
55162+ }
55163+
55164+ /* First copy down the driver command */
55165+ if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
55166+ goto out2;
55167+
55168+ /* Check data buffer size */
55169+ if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
55170+ retval = -EINVAL;
55171+ goto out2;
55172+ }
55173+
55174+ /* Hardware can only do multiple of 512 byte transfers */
55175+ data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
55176+
55177+ /* Now allocate ioctl buf memory */
55178+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
55179+ if (!cpu_addr) {
55180+ retval = -ENOMEM;
55181+ goto out2;
55182+ }
55183+
55184+ tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
55185+
55186+ /* Now copy down the entire ioctl */
55187+ if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
55188+ goto out3;
55189+
55190+ /* See which ioctl we are doing */
55191+ switch (cmd) {
55192+ case TW_IOCTL_FIRMWARE_PASS_THROUGH:
55193+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55194+ twl_get_request_id(tw_dev, &request_id);
55195+
55196+ /* Flag internal command */
55197+ tw_dev->srb[request_id] = NULL;
55198+
55199+ /* Flag chrdev ioctl */
55200+ tw_dev->chrdev_request_id = request_id;
55201+
55202+ full_command_packet = (TW_Command_Full *)&tw_ioctl->firmware_command;
55203+
55204+ /* Load request id and sglist for both command types */
55205+ twl_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
55206+
55207+ memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
55208+
55209+ /* Now post the command packet to the controller */
55210+ twl_post_command_packet(tw_dev, request_id);
55211+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55212+
55213+ timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
55214+
55215+ /* Now wait for command to complete */
55216+ timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
55217+
55218+ /* We timed out, and didn't get an interrupt */
55219+ if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
55220+ /* Now we need to reset the board */
55221+ printk(KERN_WARNING "3w-sas: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
55222+ tw_dev->host->host_no, TW_DRIVER, 0x6,
55223+ cmd);
55224+ retval = -EIO;
55225+ twl_reset_device_extension(tw_dev, 1);
55226+ goto out3;
55227+ }
55228+
55229+ /* Now copy in the command packet response */
55230+ memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
55231+
55232+ /* Now complete the io */
55233+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55234+ tw_dev->posted_request_count--;
55235+ tw_dev->state[request_id] = TW_S_COMPLETED;
55236+ twl_free_request_id(tw_dev, request_id);
55237+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55238+ break;
55239+ default:
55240+ retval = -ENOTTY;
55241+ goto out3;
55242+ }
55243+
55244+ /* Now copy the entire response to userspace */
55245+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
55246+ retval = 0;
55247+out3:
55248+ /* Now free ioctl buf memory */
55249+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
55250+out2:
55251+ mutex_unlock(&tw_dev->ioctl_lock);
55252+out:
55253+ return retval;
55254+} /* End twl_chrdev_ioctl() */
55255+
55256+/* This function handles open for the character device */
55257+static int twl_chrdev_open(struct inode *inode, struct file *file)
55258+{
55259+ unsigned int minor_number;
55260+ int retval = -ENODEV;
55261+
55262+ if (!capable(CAP_SYS_ADMIN)) {
55263+ retval = -EACCES;
55264+ goto out;
55265+ }
55266+
55267+ cycle_kernel_lock();
55268+ minor_number = iminor(inode);
55269+ if (minor_number >= twl_device_extension_count)
55270+ goto out;
55271+ retval = 0;
55272+out:
55273+ return retval;
55274+} /* End twl_chrdev_open() */
55275+
55276+/* File operations struct for character device */
55277+static const struct file_operations twl_fops = {
55278+ .owner = THIS_MODULE,
55279+ .ioctl = twl_chrdev_ioctl,
55280+ .open = twl_chrdev_open,
55281+ .release = NULL
55282+};
55283+
55284+/* This function passes sense data from firmware to scsi layer */
55285+static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, int copy_sense, int print_host)
55286+{
55287+ TW_Command_Apache_Header *header;
55288+ TW_Command_Full *full_command_packet;
55289+ unsigned short error;
55290+ char *error_str;
55291+ int retval = 1;
55292+
55293+ header = tw_dev->sense_buffer_virt[i];
55294+ full_command_packet = tw_dev->command_packet_virt[request_id];
55295+
55296+ /* Get embedded firmware error string */
55297+ error_str = &(header->err_specific_desc[strlen(header->err_specific_desc) + 1]);
55298+
55299+ /* Don't print error for Logical unit not supported during rollcall */
55300+ error = le16_to_cpu(header->status_block.error);
55301+ if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE) && (error != TW_ERROR_INVALID_FIELD_IN_CDB)) {
55302+ if (print_host)
55303+ printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
55304+ tw_dev->host->host_no,
55305+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
55306+ header->status_block.error,
55307+ error_str,
55308+ header->err_specific_desc);
55309+ else
55310+ printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n",
55311+ TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
55312+ header->status_block.error,
55313+ error_str,
55314+ header->err_specific_desc);
55315+ }
55316+
55317+ if (copy_sense) {
55318+ memcpy(tw_dev->srb[request_id]->sense_buffer, header->sense_data, TW_SENSE_DATA_LENGTH);
55319+ tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
55320+ goto out;
55321+ }
55322+out:
55323+ return retval;
55324+} /* End twl_fill_sense() */
55325+
55326+/* This function will free up device extension resources */
55327+static void twl_free_device_extension(TW_Device_Extension *tw_dev)
55328+{
55329+ if (tw_dev->command_packet_virt[0])
55330+ pci_free_consistent(tw_dev->tw_pci_dev,
55331+ sizeof(TW_Command_Full)*TW_Q_LENGTH,
55332+ tw_dev->command_packet_virt[0],
55333+ tw_dev->command_packet_phys[0]);
55334+
55335+ if (tw_dev->generic_buffer_virt[0])
55336+ pci_free_consistent(tw_dev->tw_pci_dev,
55337+ TW_SECTOR_SIZE*TW_Q_LENGTH,
55338+ tw_dev->generic_buffer_virt[0],
55339+ tw_dev->generic_buffer_phys[0]);
55340+
55341+ if (tw_dev->sense_buffer_virt[0])
55342+ pci_free_consistent(tw_dev->tw_pci_dev,
55343+ sizeof(TW_Command_Apache_Header)*
55344+ TW_Q_LENGTH,
55345+ tw_dev->sense_buffer_virt[0],
55346+ tw_dev->sense_buffer_phys[0]);
55347+
55348+ kfree(tw_dev->event_queue[0]);
55349+} /* End twl_free_device_extension() */
55350+
55351+/* This function will get parameter table entries from the firmware */
55352+static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
55353+{
55354+ TW_Command_Full *full_command_packet;
55355+ TW_Command *command_packet;
55356+ TW_Param_Apache *param;
55357+ void *retval = NULL;
55358+
55359+ /* Setup the command packet */
55360+ full_command_packet = tw_dev->command_packet_virt[request_id];
55361+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55362+ command_packet = &full_command_packet->command.oldcommand;
55363+
55364+ command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
55365+ command_packet->size = TW_COMMAND_SIZE;
55366+ command_packet->request_id = request_id;
55367+ command_packet->byte6_offset.block_count = cpu_to_le16(1);
55368+
55369+ /* Now setup the param */
55370+ param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
55371+ memset(param, 0, TW_SECTOR_SIZE);
55372+ param->table_id = cpu_to_le16(table_id | 0x8000);
55373+ param->parameter_id = cpu_to_le16(parameter_id);
55374+ param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
55375+
55376+ command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
55377+ command_packet->byte8_offset.param.sgl[0].length = TW_CPU_TO_SGL(TW_SECTOR_SIZE);
55378+
55379+ /* Post the command packet to the board */
55380+ twl_post_command_packet(tw_dev, request_id);
55381+
55382+ /* Poll for completion */
55383+ if (twl_poll_response(tw_dev, request_id, 30))
55384+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "No valid response during get param")
55385+ else
55386+ retval = (void *)&(param->data[0]);
55387+
55388+ tw_dev->posted_request_count--;
55389+ tw_dev->state[request_id] = TW_S_INITIAL;
55390+
55391+ return retval;
55392+} /* End twl_get_param() */
55393+
55394+/* This function will send an initconnection command to controller */
55395+static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
55396+ u32 set_features, unsigned short current_fw_srl,
55397+ unsigned short current_fw_arch_id,
55398+ unsigned short current_fw_branch,
55399+ unsigned short current_fw_build,
55400+ unsigned short *fw_on_ctlr_srl,
55401+ unsigned short *fw_on_ctlr_arch_id,
55402+ unsigned short *fw_on_ctlr_branch,
55403+ unsigned short *fw_on_ctlr_build,
55404+ u32 *init_connect_result)
55405+{
55406+ TW_Command_Full *full_command_packet;
55407+ TW_Initconnect *tw_initconnect;
55408+ int request_id = 0, retval = 1;
55409+
55410+ /* Initialize InitConnection command packet */
55411+ full_command_packet = tw_dev->command_packet_virt[request_id];
55412+ memset(full_command_packet, 0, sizeof(TW_Command_Full));
55413+ full_command_packet->header.header_desc.size_header = 128;
55414+
55415+ tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
55416+ tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
55417+ tw_initconnect->request_id = request_id;
55418+ tw_initconnect->message_credits = cpu_to_le16(message_credits);
55419+ tw_initconnect->features = set_features;
55420+
55421+ /* Turn on 64-bit sgl support if we need to */
55422+ tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
55423+
55424+ tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
55425+
55426+ if (set_features & TW_EXTENDED_INIT_CONNECT) {
55427+ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
55428+ tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
55429+ tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
55430+ tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
55431+ tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
55432+ } else
55433+ tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
55434+
55435+ /* Send command packet to the board */
55436+ twl_post_command_packet(tw_dev, request_id);
55437+
55438+ /* Poll for completion */
55439+ if (twl_poll_response(tw_dev, request_id, 30)) {
55440+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x8, "No valid response during init connection");
55441+ } else {
55442+ if (set_features & TW_EXTENDED_INIT_CONNECT) {
55443+ *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
55444+ *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
55445+ *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
55446+ *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
55447+ *init_connect_result = le32_to_cpu(tw_initconnect->result);
55448+ }
55449+ retval = 0;
55450+ }
55451+
55452+ tw_dev->posted_request_count--;
55453+ tw_dev->state[request_id] = TW_S_INITIAL;
55454+
55455+ return retval;
55456+} /* End twl_initconnection() */
55457+
55458+/* This function will initialize the fields of a device extension */
55459+static int twl_initialize_device_extension(TW_Device_Extension *tw_dev)
55460+{
55461+ int i, retval = 1;
55462+
55463+ /* Initialize command packet buffers */
55464+ if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
55465+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x9, "Command packet memory allocation failed");
55466+ goto out;
55467+ }
55468+
55469+ /* Initialize generic buffer */
55470+ if (twl_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
55471+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Generic memory allocation failed");
55472+ goto out;
55473+ }
55474+
55475+ /* Allocate sense buffers */
55476+ if (twl_allocate_memory(tw_dev, sizeof(TW_Command_Apache_Header), 2)) {
55477+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xb, "Sense buffer allocation failed");
55478+ goto out;
55479+ }
55480+
55481+ /* Allocate event info space */
55482+ tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
55483+ if (!tw_dev->event_queue[0]) {
55484+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "Event info memory allocation failed");
55485+ goto out;
55486+ }
55487+
55488+ for (i = 0; i < TW_Q_LENGTH; i++) {
55489+ tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
55490+ tw_dev->free_queue[i] = i;
55491+ tw_dev->state[i] = TW_S_INITIAL;
55492+ }
55493+
55494+ tw_dev->free_head = TW_Q_START;
55495+ tw_dev->free_tail = TW_Q_START;
55496+ tw_dev->error_sequence_id = 1;
55497+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
55498+
55499+ mutex_init(&tw_dev->ioctl_lock);
55500+ init_waitqueue_head(&tw_dev->ioctl_wqueue);
55501+
55502+ retval = 0;
55503+out:
55504+ return retval;
55505+} /* End twl_initialize_device_extension() */
55506+
55507+/* This function will perform a pci-dma unmap */
55508+static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
55509+{
55510+ struct scsi_cmnd *cmd = tw_dev->srb[request_id];
55511+
55512+ if (cmd->SCp.phase == TW_PHASE_SGLIST)
55513+ scsi_dma_unmap(cmd);
55514+} /* End twl_unmap_scsi_data() */
55515+
55516+/* This function will handle attention interrupts */
55517+static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
55518+{
55519+ int retval = 1;
55520+ u32 request_id, doorbell;
55521+
55522+ /* Read doorbell status */
55523+ doorbell = readl(TWL_HOBDB_REG_ADDR(tw_dev));
55524+
55525+ /* Check for controller errors */
55526+ if (doorbell & TWL_DOORBELL_CONTROLLER_ERROR) {
55527+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "Microcontroller Error: clearing");
55528+ goto out;
55529+ }
55530+
55531+ /* Check if we need to perform an AEN drain */
55532+ if (doorbell & TWL_DOORBELL_ATTENTION_INTERRUPT) {
55533+ if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
55534+ twl_get_request_id(tw_dev, &request_id);
55535+ if (twl_aen_read_queue(tw_dev, request_id)) {
55536+ tw_dev->state[request_id] = TW_S_COMPLETED;
55537+ twl_free_request_id(tw_dev, request_id);
55538+ clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
55539+ }
55540+ }
55541+ }
55542+
55543+ retval = 0;
55544+out:
55545+ /* Clear doorbell interrupt */
55546+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
55547+
55548+ /* Make sure the clear was flushed by reading it back */
55549+ readl(TWL_HOBDBC_REG_ADDR(tw_dev));
55550+
55551+ return retval;
55552+} /* End twl_handle_attention_interrupt() */
55553+
55554+/* Interrupt service routine */
55555+static irqreturn_t twl_interrupt(int irq, void *dev_instance)
55556+{
55557+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
55558+ int i, handled = 0, error = 0;
55559+ dma_addr_t mfa = 0;
55560+ u32 reg, regl, regh, response, request_id = 0;
55561+ struct scsi_cmnd *cmd;
55562+ TW_Command_Full *full_command_packet;
55563+
55564+ spin_lock(tw_dev->host->host_lock);
55565+
55566+ /* Read host interrupt status */
55567+ reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
55568+
55569+ /* Check if this is our interrupt, otherwise bail */
55570+ if (!(reg & TWL_HISTATUS_VALID_INTERRUPT))
55571+ goto twl_interrupt_bail;
55572+
55573+ handled = 1;
55574+
55575+ /* If we are resetting, bail */
55576+ if (test_bit(TW_IN_RESET, &tw_dev->flags))
55577+ goto twl_interrupt_bail;
55578+
55579+ /* Attention interrupt */
55580+ if (reg & TWL_HISTATUS_ATTENTION_INTERRUPT) {
55581+ if (twl_handle_attention_interrupt(tw_dev)) {
55582+ TWL_MASK_INTERRUPTS(tw_dev);
55583+ goto twl_interrupt_bail;
55584+ }
55585+ }
55586+
55587+ /* Response interrupt */
55588+ while (reg & TWL_HISTATUS_RESPONSE_INTERRUPT) {
55589+ if (sizeof(dma_addr_t) > 4) {
55590+ regh = readl(TWL_HOBQPH_REG_ADDR(tw_dev));
55591+ regl = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
55592+ mfa = ((u64)regh << 32) | regl;
55593+ } else
55594+ mfa = readl(TWL_HOBQPL_REG_ADDR(tw_dev));
55595+
55596+ error = 0;
55597+ response = (u32)mfa;
55598+
55599+ /* Check for command packet error */
55600+ if (!TW_NOTMFA_OUT(response)) {
55601+ for (i=0;i<TW_Q_LENGTH;i++) {
55602+ if (tw_dev->sense_buffer_phys[i] == mfa) {
55603+ request_id = le16_to_cpu(tw_dev->sense_buffer_virt[i]->header_desc.request_id);
55604+ if (tw_dev->srb[request_id] != NULL)
55605+ error = twl_fill_sense(tw_dev, i, request_id, 1, 1);
55606+ else {
55607+ /* Skip ioctl error prints */
55608+ if (request_id != tw_dev->chrdev_request_id)
55609+ error = twl_fill_sense(tw_dev, i, request_id, 0, 1);
55610+ else
55611+ memcpy(tw_dev->command_packet_virt[request_id], tw_dev->sense_buffer_virt[i], sizeof(TW_Command_Apache_Header));
55612+ }
55613+
55614+ /* Now re-post the sense buffer */
55615+ writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
55616+ writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
55617+ break;
55618+ }
55619+ }
55620+ } else
55621+ request_id = TW_RESID_OUT(response);
55622+
55623+ full_command_packet = tw_dev->command_packet_virt[request_id];
55624+
55625+ /* Check for correct state */
55626+ if (tw_dev->state[request_id] != TW_S_POSTED) {
55627+ if (tw_dev->srb[request_id] != NULL) {
55628+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Received a request id that wasn't posted");
55629+ TWL_MASK_INTERRUPTS(tw_dev);
55630+ goto twl_interrupt_bail;
55631+ }
55632+ }
55633+
55634+ /* Check for internal command completion */
55635+ if (tw_dev->srb[request_id] == NULL) {
55636+ if (request_id != tw_dev->chrdev_request_id) {
55637+ if (twl_aen_complete(tw_dev, request_id))
55638+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "Error completing AEN during attention interrupt");
55639+ } else {
55640+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
55641+ wake_up(&tw_dev->ioctl_wqueue);
55642+ }
55643+ } else {
55644+ cmd = tw_dev->srb[request_id];
55645+
55646+ if (!error)
55647+ cmd->result = (DID_OK << 16);
55648+
55649+ /* Report residual bytes for single sgl */
55650+ if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
55651+ if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
55652+ scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
55653+ }
55654+
55655+ /* Now complete the io */
55656+ tw_dev->state[request_id] = TW_S_COMPLETED;
55657+ twl_free_request_id(tw_dev, request_id);
55658+ tw_dev->posted_request_count--;
55659+ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
55660+ twl_unmap_scsi_data(tw_dev, request_id);
55661+ }
55662+
55663+ /* Check for another response interrupt */
55664+ reg = readl(TWL_HISTAT_REG_ADDR(tw_dev));
55665+ }
55666+
55667+twl_interrupt_bail:
55668+ spin_unlock(tw_dev->host->host_lock);
55669+ return IRQ_RETVAL(handled);
55670+} /* End twl_interrupt() */
55671+
55672+/* This function will poll for a register change */
55673+static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value, u32 result, int seconds)
55674+{
55675+ unsigned long before;
55676+ int retval = 1;
55677+ u32 reg_value;
55678+
55679+ reg_value = readl(reg);
55680+ before = jiffies;
55681+
55682+ while ((reg_value & value) != result) {
55683+ reg_value = readl(reg);
55684+ if (time_after(jiffies, before + HZ * seconds))
55685+ goto out;
55686+ msleep(50);
55687+ }
55688+ retval = 0;
55689+out:
55690+ return retval;
55691+} /* End twl_poll_register() */
55692+
55693+/* This function will reset a controller */
55694+static int twl_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
55695+{
55696+ int retval = 1;
55697+ int i = 0;
55698+ u32 status = 0;
55699+ unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
55700+ unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
55701+ u32 init_connect_result = 0;
55702+ int tries = 0;
55703+ int do_soft_reset = soft_reset;
55704+
55705+ while (tries < TW_MAX_RESET_TRIES) {
55706+ /* Do a soft reset if one is needed */
55707+ if (do_soft_reset) {
55708+ TWL_SOFT_RESET(tw_dev);
55709+
55710+ /* Make sure controller is in a good state */
55711+ if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, 0x0, 30)) {
55712+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Controller never went non-ready during reset sequence");
55713+ tries++;
55714+ continue;
55715+ }
55716+ if (twl_poll_register(tw_dev, TWL_SCRPD3_REG_ADDR(tw_dev), TWL_CONTROLLER_READY, TWL_CONTROLLER_READY, 60)) {
55717+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x11, "Controller not ready during reset sequence");
55718+ tries++;
55719+ continue;
55720+ }
55721+ }
55722+
55723+ /* Initconnect */
55724+ if (twl_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
55725+ TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
55726+ TW_9750_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
55727+ TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
55728+ &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
55729+ &fw_on_ctlr_build, &init_connect_result)) {
55730+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x12, "Initconnection failed while checking SRL");
55731+ do_soft_reset = 1;
55732+ tries++;
55733+ continue;
55734+ }
55735+
55736+ /* Load sense buffers */
55737+ while (i < TW_Q_LENGTH) {
55738+ writel((u32)((u64)tw_dev->sense_buffer_phys[i] >> 32), TWL_HOBQPH_REG_ADDR(tw_dev));
55739+ writel((u32)tw_dev->sense_buffer_phys[i], TWL_HOBQPL_REG_ADDR(tw_dev));
55740+
55741+ /* Check status for over-run after each write */
55742+ status = readl(TWL_STATUS_REG_ADDR(tw_dev));
55743+ if (!(status & TWL_STATUS_OVERRUN_SUBMIT))
55744+ i++;
55745+ }
55746+
55747+ /* Now check status */
55748+ status = readl(TWL_STATUS_REG_ADDR(tw_dev));
55749+ if (status) {
55750+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "Bad controller status after loading sense buffers");
55751+ do_soft_reset = 1;
55752+ tries++;
55753+ continue;
55754+ }
55755+
55756+ /* Drain the AEN queue */
55757+ if (twl_aen_drain_queue(tw_dev, soft_reset)) {
55758+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x14, "AEN drain failed during reset sequence");
55759+ do_soft_reset = 1;
55760+ tries++;
55761+ continue;
55762+ }
55763+
55764+ /* Load rest of compatibility struct */
55765+ strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
55766+ tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
55767+ tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
55768+ tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
55769+ tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
55770+ tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
55771+ tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
55772+ tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
55773+ tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
55774+ tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
55775+
55776+ /* If we got here, controller is in a good state */
55777+ retval = 0;
55778+ goto out;
55779+ }
55780+out:
55781+ return retval;
55782+} /* End twl_reset_sequence() */
55783+
55784+/* This function will reset a device extension */
55785+static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_reset)
55786+{
55787+ int i = 0, retval = 1;
55788+ unsigned long flags = 0;
55789+
55790+ /* Block SCSI requests while we are resetting */
55791+ if (ioctl_reset)
55792+ scsi_block_requests(tw_dev->host);
55793+
55794+ set_bit(TW_IN_RESET, &tw_dev->flags);
55795+ TWL_MASK_INTERRUPTS(tw_dev);
55796+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
55797+
55798+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
55799+
55800+ /* Abort all requests that are in progress */
55801+ for (i = 0; i < TW_Q_LENGTH; i++) {
55802+ if ((tw_dev->state[i] != TW_S_FINISHED) &&
55803+ (tw_dev->state[i] != TW_S_INITIAL) &&
55804+ (tw_dev->state[i] != TW_S_COMPLETED)) {
55805+ if (tw_dev->srb[i]) {
55806+ tw_dev->srb[i]->result = (DID_RESET << 16);
55807+ tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
55808+ twl_unmap_scsi_data(tw_dev, i);
55809+ }
55810+ }
55811+ }
55812+
55813+ /* Reset queues and counts */
55814+ for (i = 0; i < TW_Q_LENGTH; i++) {
55815+ tw_dev->free_queue[i] = i;
55816+ tw_dev->state[i] = TW_S_INITIAL;
55817+ }
55818+ tw_dev->free_head = TW_Q_START;
55819+ tw_dev->free_tail = TW_Q_START;
55820+ tw_dev->posted_request_count = 0;
55821+
55822+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
55823+
55824+ if (twl_reset_sequence(tw_dev, 1))
55825+ goto out;
55826+
55827+ TWL_UNMASK_INTERRUPTS(tw_dev);
55828+
55829+ clear_bit(TW_IN_RESET, &tw_dev->flags);
55830+ tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
55831+
55832+ retval = 0;
55833+out:
55834+ if (ioctl_reset)
55835+ scsi_unblock_requests(tw_dev->host);
55836+ return retval;
55837+} /* End twl_reset_device_extension() */
55838+
55839+/* This funciton returns unit geometry in cylinders/heads/sectors */
55840+static int twl_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
55841+{
55842+ int heads, sectors, cylinders;
55843+ TW_Device_Extension *tw_dev;
55844+
55845+ tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
55846+
55847+ if (capacity >= 0x200000) {
55848+ heads = 255;
55849+ sectors = 63;
55850+ cylinders = sector_div(capacity, heads * sectors);
55851+ } else {
55852+ heads = 64;
55853+ sectors = 32;
55854+ cylinders = sector_div(capacity, heads * sectors);
55855+ }
55856+
55857+ geom[0] = heads;
55858+ geom[1] = sectors;
55859+ geom[2] = cylinders;
55860+
55861+ return 0;
55862+} /* End twl_scsi_biosparam() */
55863+
55864+/* This is the new scsi eh reset function */
55865+static int twl_scsi_eh_reset(struct scsi_cmnd *SCpnt)
55866+{
55867+ TW_Device_Extension *tw_dev = NULL;
55868+ int retval = FAILED;
55869+
55870+ tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
55871+
55872+ tw_dev->num_resets++;
55873+
55874+ sdev_printk(KERN_WARNING, SCpnt->device,
55875+ "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
55876+ TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
55877+
55878+ /* Make sure we are not issuing an ioctl or resetting from ioctl */
55879+ mutex_lock(&tw_dev->ioctl_lock);
55880+
55881+ /* Now reset the card and some of the device extension data */
55882+ if (twl_reset_device_extension(tw_dev, 0)) {
55883+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "Controller reset failed during scsi host reset");
55884+ goto out;
55885+ }
55886+
55887+ retval = SUCCESS;
55888+out:
55889+ mutex_unlock(&tw_dev->ioctl_lock);
55890+ return retval;
55891+} /* End twl_scsi_eh_reset() */
55892+
55893+/* This is the main scsi queue function to handle scsi opcodes */
55894+static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
55895+{
55896+ int request_id, retval;
55897+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
55898+
55899+ /* If we are resetting due to timed out ioctl, report as busy */
55900+ if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
55901+ retval = SCSI_MLQUEUE_HOST_BUSY;
55902+ goto out;
55903+ }
55904+
55905+ /* Save done function into scsi_cmnd struct */
55906+ SCpnt->scsi_done = done;
55907+
55908+ /* Get a free request id */
55909+ twl_get_request_id(tw_dev, &request_id);
55910+
55911+ /* Save the scsi command for use by the ISR */
55912+ tw_dev->srb[request_id] = SCpnt;
55913+
55914+ /* Initialize phase to zero */
55915+ SCpnt->SCp.phase = TW_PHASE_INITIAL;
55916+
55917+ retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
55918+ if (retval) {
55919+ tw_dev->state[request_id] = TW_S_COMPLETED;
55920+ twl_free_request_id(tw_dev, request_id);
55921+ SCpnt->result = (DID_ERROR << 16);
55922+ done(SCpnt);
55923+ retval = 0;
55924+ }
55925+out:
55926+ return retval;
55927+} /* End twl_scsi_queue() */
55928+
55929+/* This function tells the controller to shut down */
55930+static void __twl_shutdown(TW_Device_Extension *tw_dev)
55931+{
55932+ /* Disable interrupts */
55933+ TWL_MASK_INTERRUPTS(tw_dev);
55934+
55935+ /* Free up the IRQ */
55936+ free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
55937+
55938+ printk(KERN_WARNING "3w-sas: Shutting down host %d.\n", tw_dev->host->host_no);
55939+
55940+ /* Tell the card we are shutting down */
55941+ if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
55942+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Connection shutdown failed");
55943+ } else {
55944+ printk(KERN_WARNING "3w-sas: Shutdown complete.\n");
55945+ }
55946+
55947+ /* Clear doorbell interrupt just before exit */
55948+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
55949+} /* End __twl_shutdown() */
55950+
55951+/* Wrapper for __twl_shutdown */
55952+static void twl_shutdown(struct pci_dev *pdev)
55953+{
55954+ struct Scsi_Host *host = pci_get_drvdata(pdev);
55955+ TW_Device_Extension *tw_dev;
55956+
55957+ if (!host)
55958+ return;
55959+
55960+ tw_dev = (TW_Device_Extension *)host->hostdata;
55961+
55962+ if (tw_dev->online)
55963+ __twl_shutdown(tw_dev);
55964+} /* End twl_shutdown() */
55965+
55966+/* This function configures unit settings when a unit is coming on-line */
55967+static int twl_slave_configure(struct scsi_device *sdev)
55968+{
55969+ /* Force 60 second timeout */
55970+ blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
55971+
55972+ return 0;
55973+} /* End twl_slave_configure() */
55974+
55975+/* scsi_host_template initializer */
55976+static struct scsi_host_template driver_template = {
55977+ .module = THIS_MODULE,
55978+ .name = "3w-sas",
55979+ .queuecommand = twl_scsi_queue,
55980+ .eh_host_reset_handler = twl_scsi_eh_reset,
55981+ .bios_param = twl_scsi_biosparam,
55982+ .change_queue_depth = twl_change_queue_depth,
55983+ .can_queue = TW_Q_LENGTH-2,
55984+ .slave_configure = twl_slave_configure,
55985+ .this_id = -1,
55986+ .sg_tablesize = TW_LIBERATOR_MAX_SGL_LENGTH,
55987+ .max_sectors = TW_MAX_SECTORS,
55988+ .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
55989+ .use_clustering = ENABLE_CLUSTERING,
55990+ .shost_attrs = twl_host_attrs,
55991+ .emulated = 1
55992+};
55993+
55994+/* This function will probe and initialize a card */
55995+static int __devinit twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
55996+{
55997+ struct Scsi_Host *host = NULL;
55998+ TW_Device_Extension *tw_dev;
55999+ resource_size_t mem_addr, mem_len;
56000+ int retval = -ENODEV;
56001+ int *ptr_phycount, phycount=0;
56002+
56003+ retval = pci_enable_device(pdev);
56004+ if (retval) {
56005+ TW_PRINTK(host, TW_DRIVER, 0x17, "Failed to enable pci device");
56006+ goto out_disable_device;
56007+ }
56008+
56009+ pci_set_master(pdev);
56010+ pci_try_set_mwi(pdev);
56011+
56012+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
56013+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
56014+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
56015+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
56016+ TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
56017+ retval = -ENODEV;
56018+ goto out_disable_device;
56019+ }
56020+
56021+ host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
56022+ if (!host) {
56023+ TW_PRINTK(host, TW_DRIVER, 0x19, "Failed to allocate memory for device extension");
56024+ retval = -ENOMEM;
56025+ goto out_disable_device;
56026+ }
56027+ tw_dev = (TW_Device_Extension *)host->hostdata;
56028+
56029+ /* Save values to device extension */
56030+ tw_dev->host = host;
56031+ tw_dev->tw_pci_dev = pdev;
56032+
56033+ if (twl_initialize_device_extension(tw_dev)) {
56034+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
56035+ goto out_free_device_extension;
56036+ }
56037+
56038+ /* Request IO regions */
56039+ retval = pci_request_regions(pdev, "3w-sas");
56040+ if (retval) {
56041+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Failed to get mem region");
56042+ goto out_free_device_extension;
56043+ }
56044+
56045+ /* Use region 1 */
56046+ mem_addr = pci_resource_start(pdev, 1);
56047+ mem_len = pci_resource_len(pdev, 1);
56048+
56049+ /* Save base address */
56050+ tw_dev->base_addr = ioremap(mem_addr, mem_len);
56051+
56052+ if (!tw_dev->base_addr) {
56053+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
56054+ goto out_release_mem_region;
56055+ }
56056+
56057+ /* Disable interrupts on the card */
56058+ TWL_MASK_INTERRUPTS(tw_dev);
56059+
56060+ /* Initialize the card */
56061+ if (twl_reset_sequence(tw_dev, 0)) {
56062+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
56063+ goto out_iounmap;
56064+ }
56065+
56066+ /* Set host specific parameters */
56067+ host->max_id = TW_MAX_UNITS;
56068+ host->max_cmd_len = TW_MAX_CDB_LEN;
56069+ host->max_lun = TW_MAX_LUNS;
56070+ host->max_channel = 0;
56071+
56072+ /* Register the card with the kernel SCSI layer */
56073+ retval = scsi_add_host(host, &pdev->dev);
56074+ if (retval) {
56075+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "scsi add host failed");
56076+ goto out_iounmap;
56077+ }
56078+
56079+ pci_set_drvdata(pdev, host);
56080+
56081+ printk(KERN_WARNING "3w-sas: scsi%d: Found an LSI 3ware %s Controller at 0x%llx, IRQ: %d.\n",
56082+ host->host_no,
56083+ (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
56084+ TW_PARAM_MODEL, TW_PARAM_MODEL_LENGTH),
56085+ (u64)mem_addr, pdev->irq);
56086+
56087+ ptr_phycount = twl_get_param(tw_dev, 2, TW_PARAM_PHY_SUMMARY_TABLE,
56088+ TW_PARAM_PHYCOUNT, TW_PARAM_PHYCOUNT_LENGTH);
56089+ if (ptr_phycount)
56090+ phycount = le32_to_cpu(*(int *)ptr_phycount);
56091+
56092+ printk(KERN_WARNING "3w-sas: scsi%d: Firmware %s, BIOS %s, Phys: %d.\n",
56093+ host->host_no,
56094+ (char *)twl_get_param(tw_dev, 1, TW_VERSION_TABLE,
56095+ TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
56096+ (char *)twl_get_param(tw_dev, 2, TW_VERSION_TABLE,
56097+ TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
56098+ phycount);
56099+
56100+ /* Try to enable MSI */
56101+ if (use_msi && !pci_enable_msi(pdev))
56102+ set_bit(TW_USING_MSI, &tw_dev->flags);
56103+
56104+ /* Now setup the interrupt handler */
56105+ retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
56106+ if (retval) {
56107+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Error requesting IRQ");
56108+ goto out_remove_host;
56109+ }
56110+
56111+ twl_device_extension_list[twl_device_extension_count] = tw_dev;
56112+ twl_device_extension_count++;
56113+
56114+ /* Re-enable interrupts on the card */
56115+ TWL_UNMASK_INTERRUPTS(tw_dev);
56116+
56117+ /* Finally, scan the host */
56118+ scsi_scan_host(host);
56119+
56120+ /* Add sysfs binary files */
56121+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr))
56122+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Failed to create sysfs binary file: 3ware_aen_read");
56123+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr))
56124+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Failed to create sysfs binary file: 3ware_compat_info");
56125+
56126+ if (twl_major == -1) {
56127+ if ((twl_major = register_chrdev (0, "twl", &twl_fops)) < 0)
56128+ TW_PRINTK(host, TW_DRIVER, 0x22, "Failed to register character device");
56129+ }
56130+ tw_dev->online = 1;
56131+ return 0;
56132+
56133+out_remove_host:
56134+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
56135+ pci_disable_msi(pdev);
56136+ scsi_remove_host(host);
56137+out_iounmap:
56138+ iounmap(tw_dev->base_addr);
56139+out_release_mem_region:
56140+ pci_release_regions(pdev);
56141+out_free_device_extension:
56142+ twl_free_device_extension(tw_dev);
56143+ scsi_host_put(host);
56144+out_disable_device:
56145+ pci_disable_device(pdev);
56146+
56147+ return retval;
56148+} /* End twl_probe() */
56149+
56150+/* This function is called to remove a device */
56151+static void twl_remove(struct pci_dev *pdev)
56152+{
56153+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56154+ TW_Device_Extension *tw_dev;
56155+
56156+ if (!host)
56157+ return;
56158+
56159+ tw_dev = (TW_Device_Extension *)host->hostdata;
56160+
56161+ if (!tw_dev->online)
56162+ return;
56163+
56164+ /* Remove sysfs binary files */
56165+ sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_aen_read_attr);
56166+ sysfs_remove_bin_file(&host->shost_dev.kobj, &twl_sysfs_compat_info_attr);
56167+
56168+ scsi_remove_host(tw_dev->host);
56169+
56170+ /* Unregister character device */
56171+ if (twl_major >= 0) {
56172+ unregister_chrdev(twl_major, "twl");
56173+ twl_major = -1;
56174+ }
56175+
56176+ /* Shutdown the card */
56177+ __twl_shutdown(tw_dev);
56178+
56179+ /* Disable MSI if enabled */
56180+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
56181+ pci_disable_msi(pdev);
56182+
56183+ /* Free IO remapping */
56184+ iounmap(tw_dev->base_addr);
56185+
56186+ /* Free up the mem region */
56187+ pci_release_regions(pdev);
56188+
56189+ /* Free up device extension resources */
56190+ twl_free_device_extension(tw_dev);
56191+
56192+ scsi_host_put(tw_dev->host);
56193+ pci_disable_device(pdev);
56194+ twl_device_extension_count--;
56195+} /* End twl_remove() */
56196+
56197+#ifdef CONFIG_PM
56198+/* This function is called on PCI suspend */
56199+static int twl_suspend(struct pci_dev *pdev, pm_message_t state)
56200+{
56201+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56202+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
56203+
56204+ printk(KERN_WARNING "3w-sas: Suspending host %d.\n", tw_dev->host->host_no);
56205+ /* Disable interrupts */
56206+ TWL_MASK_INTERRUPTS(tw_dev);
56207+
56208+ free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
56209+
56210+ /* Tell the card we are shutting down */
56211+ if (twl_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
56212+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x23, "Connection shutdown failed during suspend");
56213+ } else {
56214+ printk(KERN_WARNING "3w-sas: Suspend complete.\n");
56215+ }
56216+
56217+ /* Clear doorbell interrupt */
56218+ TWL_CLEAR_DB_INTERRUPT(tw_dev);
56219+
56220+ pci_save_state(pdev);
56221+ pci_disable_device(pdev);
56222+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
56223+
56224+ return 0;
56225+} /* End twl_suspend() */
56226+
56227+/* This function is called on PCI resume */
56228+static int twl_resume(struct pci_dev *pdev)
56229+{
56230+ int retval = 0;
56231+ struct Scsi_Host *host = pci_get_drvdata(pdev);
56232+ TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
56233+
56234+ printk(KERN_WARNING "3w-sas: Resuming host %d.\n", tw_dev->host->host_no);
56235+ pci_set_power_state(pdev, PCI_D0);
56236+ pci_enable_wake(pdev, PCI_D0, 0);
56237+ pci_restore_state(pdev);
56238+
56239+ retval = pci_enable_device(pdev);
56240+ if (retval) {
56241+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x24, "Enable device failed during resume");
56242+ return retval;
56243+ }
56244+
56245+ pci_set_master(pdev);
56246+ pci_try_set_mwi(pdev);
56247+
56248+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
56249+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
56250+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
56251+ || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
56252+ TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
56253+ retval = -ENODEV;
56254+ goto out_disable_device;
56255+ }
56256+
56257+ /* Initialize the card */
56258+ if (twl_reset_sequence(tw_dev, 0)) {
56259+ retval = -ENODEV;
56260+ goto out_disable_device;
56261+ }
56262+
56263+ /* Now setup the interrupt handler */
56264+ retval = request_irq(pdev->irq, twl_interrupt, IRQF_SHARED, "3w-sas", tw_dev);
56265+ if (retval) {
56266+ TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Error requesting IRQ during resume");
56267+ retval = -ENODEV;
56268+ goto out_disable_device;
56269+ }
56270+
56271+ /* Now enable MSI if enabled */
56272+ if (test_bit(TW_USING_MSI, &tw_dev->flags))
56273+ pci_enable_msi(pdev);
56274+
56275+ /* Re-enable interrupts on the card */
56276+ TWL_UNMASK_INTERRUPTS(tw_dev);
56277+
56278+ printk(KERN_WARNING "3w-sas: Resume complete.\n");
56279+ return 0;
56280+
56281+out_disable_device:
56282+ scsi_remove_host(host);
56283+ pci_disable_device(pdev);
56284+
56285+ return retval;
56286+} /* End twl_resume() */
56287+#endif
56288+
56289+/* PCI Devices supported by this driver */
56290+static struct pci_device_id twl_pci_tbl[] __devinitdata = {
56291+ { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9750,
56292+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
56293+ { }
56294+};
56295+MODULE_DEVICE_TABLE(pci, twl_pci_tbl);
56296+
56297+/* pci_driver initializer */
56298+static struct pci_driver twl_driver = {
56299+ .name = "3w-sas",
56300+ .id_table = twl_pci_tbl,
56301+ .probe = twl_probe,
56302+ .remove = twl_remove,
56303+#ifdef CONFIG_PM
56304+ .suspend = twl_suspend,
56305+ .resume = twl_resume,
56306+#endif
56307+ .shutdown = twl_shutdown
56308+};
56309+
56310+/* This function is called on driver initialization */
56311+static int __init twl_init(void)
56312+{
56313+ printk(KERN_WARNING "LSI 3ware SAS/SATA-RAID Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
56314+
56315+ return pci_register_driver(&twl_driver);
56316+} /* End twl_init() */
56317+
56318+/* This function is called on driver exit */
56319+static void __exit twl_exit(void)
56320+{
56321+ pci_unregister_driver(&twl_driver);
56322+} /* End twl_exit() */
56323+
56324+module_init(twl_init);
56325+module_exit(twl_exit);
56326+
56327diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
56328new file mode 100644
56329index 0000000..e620505
56330--- /dev/null
56331+++ b/drivers/scsi/3w-sas.h
56332@@ -0,0 +1,396 @@
56333+/*
56334+ 3w-sas.h -- LSI 3ware SAS/SATA-RAID Controller device driver for Linux.
56335+
56336+ Written By: Adam Radford <linuxraid@lsi.com>
56337+
56338+ Copyright (C) 2009 LSI Corporation.
56339+
56340+ This program is free software; you can redistribute it and/or modify
56341+ it under the terms of the GNU General Public License as published by
56342+ the Free Software Foundation; version 2 of the License.
56343+
56344+ This program is distributed in the hope that it will be useful,
56345+ but WITHOUT ANY WARRANTY; without even the implied warranty of
56346+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
56347+ GNU General Public License for more details.
56348+
56349+ NO WARRANTY
56350+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
56351+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
56352+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
56353+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
56354+ solely responsible for determining the appropriateness of using and
56355+ distributing the Program and assumes all risks associated with its
56356+ exercise of rights under this Agreement, including but not limited to
56357+ the risks and costs of program errors, damage to or loss of data,
56358+ programs or equipment, and unavailability or interruption of operations.
56359+
56360+ DISCLAIMER OF LIABILITY
56361+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
56362+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56363+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
56364+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
56365+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
56366+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
56367+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
56368+
56369+ You should have received a copy of the GNU General Public License
56370+ along with this program; if not, write to the Free Software
56371+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
56372+
56373+ Bugs/Comments/Suggestions should be mailed to:
56374+ linuxraid@lsi.com
56375+
56376+ For more information, goto:
56377+ http://www.lsi.com
56378+*/
56379+
56380+#ifndef _3W_SAS_H
56381+#define _3W_SAS_H
56382+
56383+/* AEN severity table */
56384+static char *twl_aen_severity_table[] =
56385+{
56386+ "None", "ERROR", "WARNING", "INFO", "DEBUG", (char*) 0
56387+};
56388+
56389+/* Liberator register offsets */
56390+#define TWL_STATUS 0x0 /* Status */
56391+#define TWL_HIBDB 0x20 /* Inbound doorbell */
56392+#define TWL_HISTAT 0x30 /* Host interrupt status */
56393+#define TWL_HIMASK 0x34 /* Host interrupt mask */
56394+#define TWL_HOBDB 0x9C /* Outbound doorbell */
56395+#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */
56396+#define TWL_SCRPD3 0xBC /* Scratchpad */
56397+#define TWL_HIBQPL 0xC0 /* Host inbound Q low */
56398+#define TWL_HIBQPH 0xC4 /* Host inbound Q high */
56399+#define TWL_HOBQPL 0xC8 /* Host outbound Q low */
56400+#define TWL_HOBQPH 0xCC /* Host outbound Q high */
56401+#define TWL_HISTATUS_VALID_INTERRUPT 0xC
56402+#define TWL_HISTATUS_ATTENTION_INTERRUPT 0x4
56403+#define TWL_HISTATUS_RESPONSE_INTERRUPT 0x8
56404+#define TWL_STATUS_OVERRUN_SUBMIT 0x2000
56405+#define TWL_ISSUE_SOFT_RESET 0x100
56406+#define TWL_CONTROLLER_READY 0x2000
56407+#define TWL_DOORBELL_CONTROLLER_ERROR 0x200000
56408+#define TWL_DOORBELL_ATTENTION_INTERRUPT 0x40000
56409+#define TWL_PULL_MODE 0x1
56410+
56411+/* Command packet opcodes used by the driver */
56412+#define TW_OP_INIT_CONNECTION 0x1
56413+#define TW_OP_GET_PARAM 0x12
56414+#define TW_OP_SET_PARAM 0x13
56415+#define TW_OP_EXECUTE_SCSI 0x10
56416+
56417+/* Asynchronous Event Notification (AEN) codes used by the driver */
56418+#define TW_AEN_QUEUE_EMPTY 0x0000
56419+#define TW_AEN_SOFT_RESET 0x0001
56420+#define TW_AEN_SYNC_TIME_WITH_HOST 0x031
56421+#define TW_AEN_SEVERITY_ERROR 0x1
56422+#define TW_AEN_SEVERITY_DEBUG 0x4
56423+#define TW_AEN_NOT_RETRIEVED 0x1
56424+
56425+/* Command state defines */
56426+#define TW_S_INITIAL 0x1 /* Initial state */
56427+#define TW_S_STARTED 0x2 /* Id in use */
56428+#define TW_S_POSTED 0x4 /* Posted to the controller */
56429+#define TW_S_COMPLETED 0x8 /* Completed by isr */
56430+#define TW_S_FINISHED 0x10 /* I/O completely done */
56431+
56432+/* Compatibility defines */
56433+#define TW_9750_ARCH_ID 10
56434+#define TW_CURRENT_DRIVER_SRL 40
56435+#define TW_CURRENT_DRIVER_BUILD 0
56436+#define TW_CURRENT_DRIVER_BRANCH 0
56437+
56438+/* Phase defines */
56439+#define TW_PHASE_INITIAL 0
56440+#define TW_PHASE_SGLIST 2
56441+
56442+/* Misc defines */
56443+#define TW_SECTOR_SIZE 512
56444+#define TW_MAX_UNITS 32
56445+#define TW_INIT_MESSAGE_CREDITS 0x100
56446+#define TW_INIT_COMMAND_PACKET_SIZE 0x3
56447+#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6
56448+#define TW_EXTENDED_INIT_CONNECT 0x2
56449+#define TW_BASE_FW_SRL 24
56450+#define TW_BASE_FW_BRANCH 0
56451+#define TW_BASE_FW_BUILD 1
56452+#define TW_Q_LENGTH 256
56453+#define TW_Q_START 0
56454+#define TW_MAX_SLOT 32
56455+#define TW_MAX_RESET_TRIES 2
56456+#define TW_MAX_CMDS_PER_LUN 254
56457+#define TW_MAX_AEN_DRAIN 255
56458+#define TW_IN_RESET 2
56459+#define TW_USING_MSI 3
56460+#define TW_IN_ATTENTION_LOOP 4
56461+#define TW_MAX_SECTORS 256
56462+#define TW_MAX_CDB_LEN 16
56463+#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
56464+#define TW_IOCTL_CHRDEV_FREE -1
56465+#define TW_COMMAND_OFFSET 128 /* 128 bytes */
56466+#define TW_VERSION_TABLE 0x0402
56467+#define TW_TIMEKEEP_TABLE 0x040A
56468+#define TW_INFORMATION_TABLE 0x0403
56469+#define TW_PARAM_FWVER 3
56470+#define TW_PARAM_FWVER_LENGTH 16
56471+#define TW_PARAM_BIOSVER 4
56472+#define TW_PARAM_BIOSVER_LENGTH 16
56473+#define TW_PARAM_MODEL 8
56474+#define TW_PARAM_MODEL_LENGTH 16
56475+#define TW_PARAM_PHY_SUMMARY_TABLE 1
56476+#define TW_PARAM_PHYCOUNT 2
56477+#define TW_PARAM_PHYCOUNT_LENGTH 1
56478+#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools
56479+#define TW_ALLOCATION_LENGTH 128
56480+#define TW_SENSE_DATA_LENGTH 18
56481+#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a
56482+#define TW_ERROR_INVALID_FIELD_IN_CDB 0x10d
56483+#define TW_ERROR_UNIT_OFFLINE 0x128
56484+#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3
56485+#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4
56486+#define TW_DRIVER 6
56487+#ifndef PCI_DEVICE_ID_3WARE_9750
56488+#define PCI_DEVICE_ID_3WARE_9750 0x1010
56489+#endif
56490+
56491+/* Bitmask macros to eliminate bitfields */
56492+
56493+/* opcode: 5, reserved: 3 */
56494+#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f))
56495+#define TW_OP_OUT(x) (x & 0x1f)
56496+
56497+/* opcode: 5, sgloffset: 3 */
56498+#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f))
56499+#define TW_SGL_OUT(x) ((x >> 5) & 0x7)
56500+
56501+/* severity: 3, reserved: 5 */
56502+#define TW_SEV_OUT(x) (x & 0x7)
56503+
56504+/* not_mfa: 1, reserved: 7, status: 8, request_id: 16 */
56505+#define TW_RESID_OUT(x) ((x >> 16) & 0xffff)
56506+#define TW_NOTMFA_OUT(x) (x & 0x1)
56507+
56508+/* request_id: 12, lun: 4 */
56509+#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff))
56510+#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
56511+
56512+/* Register access macros */
56513+#define TWL_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_STATUS)
56514+#define TWL_HOBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL)
56515+#define TWL_HOBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH)
56516+#define TWL_HOBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDB)
56517+#define TWL_HOBDBC_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC)
56518+#define TWL_HIMASK_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIMASK)
56519+#define TWL_HISTAT_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HISTAT)
56520+#define TWL_HIBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH)
56521+#define TWL_HIBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL)
56522+#define TWL_HIBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBDB)
56523+#define TWL_SCRPD3_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3)
56524+#define TWL_MASK_INTERRUPTS(x) (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev)))
56525+#define TWL_UNMASK_INTERRUPTS(x) (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev)))
56526+#define TWL_CLEAR_DB_INTERRUPT(x) (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev)))
56527+#define TWL_SOFT_RESET(x) (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev)))
56528+
56529+/* Macros */
56530+#define TW_PRINTK(h,a,b,c) { \
56531+if (h) \
56532+printk(KERN_WARNING "3w-sas: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \
56533+else \
56534+printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \
56535+}
56536+#define TW_MAX_LUNS 16
56537+#define TW_COMMAND_SIZE (sizeof(dma_addr_t) > 4 ? 6 : 4)
56538+#define TW_LIBERATOR_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 46 : 92)
56539+#define TW_LIBERATOR_MAX_SGL_LENGTH_OLD (sizeof(dma_addr_t) > 4 ? 47 : 94)
56540+#define TW_PADDING_LENGTH_LIBERATOR 136
56541+#define TW_PADDING_LENGTH_LIBERATOR_OLD 132
56542+#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x))
56543+
56544+#pragma pack(1)
56545+
56546+/* SGL entry */
56547+typedef struct TAG_TW_SG_Entry_ISO {
56548+ dma_addr_t address;
56549+ dma_addr_t length;
56550+} TW_SG_Entry_ISO;
56551+
56552+/* Old Command Packet with ISO SGL */
56553+typedef struct TW_Command {
56554+ unsigned char opcode__sgloffset;
56555+ unsigned char size;
56556+ unsigned char request_id;
56557+ unsigned char unit__hostid;
56558+ /* Second DWORD */
56559+ unsigned char status;
56560+ unsigned char flags;
56561+ union {
56562+ unsigned short block_count;
56563+ unsigned short parameter_count;
56564+ } byte6_offset;
56565+ union {
56566+ struct {
56567+ u32 lba;
56568+ TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
56569+ unsigned char padding[TW_PADDING_LENGTH_LIBERATOR_OLD];
56570+ } io;
56571+ struct {
56572+ TW_SG_Entry_ISO sgl[TW_LIBERATOR_MAX_SGL_LENGTH_OLD];
56573+ u32 padding;
56574+ unsigned char padding2[TW_PADDING_LENGTH_LIBERATOR_OLD];
56575+ } param;
56576+ } byte8_offset;
56577+} TW_Command;
56578+
56579+/* New Command Packet with ISO SGL */
56580+typedef struct TAG_TW_Command_Apache {
56581+ unsigned char opcode__reserved;
56582+ unsigned char unit;
56583+ unsigned short request_id__lunl;
56584+ unsigned char status;
56585+ unsigned char sgl_offset;
56586+ unsigned short sgl_entries__lunh;
56587+ unsigned char cdb[16];
56588+ TW_SG_Entry_ISO sg_list[TW_LIBERATOR_MAX_SGL_LENGTH];
56589+ unsigned char padding[TW_PADDING_LENGTH_LIBERATOR];
56590+} TW_Command_Apache;
56591+
56592+/* New command packet header */
56593+typedef struct TAG_TW_Command_Apache_Header {
56594+ unsigned char sense_data[TW_SENSE_DATA_LENGTH];
56595+ struct {
56596+ char reserved[4];
56597+ unsigned short error;
56598+ unsigned char padding;
56599+ unsigned char severity__reserved;
56600+ } status_block;
56601+ unsigned char err_specific_desc[98];
56602+ struct {
56603+ unsigned char size_header;
56604+ unsigned short request_id;
56605+ unsigned char size_sense;
56606+ } header_desc;
56607+} TW_Command_Apache_Header;
56608+
56609+/* This struct is a union of the 2 command packets */
56610+typedef struct TAG_TW_Command_Full {
56611+ TW_Command_Apache_Header header;
56612+ union {
56613+ TW_Command oldcommand;
56614+ TW_Command_Apache newcommand;
56615+ } command;
56616+} TW_Command_Full;
56617+
56618+/* Initconnection structure */
56619+typedef struct TAG_TW_Initconnect {
56620+ unsigned char opcode__reserved;
56621+ unsigned char size;
56622+ unsigned char request_id;
56623+ unsigned char res2;
56624+ unsigned char status;
56625+ unsigned char flags;
56626+ unsigned short message_credits;
56627+ u32 features;
56628+ unsigned short fw_srl;
56629+ unsigned short fw_arch_id;
56630+ unsigned short fw_branch;
56631+ unsigned short fw_build;
56632+ u32 result;
56633+} TW_Initconnect;
56634+
56635+/* Event info structure */
56636+typedef struct TAG_TW_Event
56637+{
56638+ unsigned int sequence_id;
56639+ unsigned int time_stamp_sec;
56640+ unsigned short aen_code;
56641+ unsigned char severity;
56642+ unsigned char retrieved;
56643+ unsigned char repeat_count;
56644+ unsigned char parameter_len;
56645+ unsigned char parameter_data[98];
56646+} TW_Event;
56647+
56648+typedef struct TAG_TW_Ioctl_Driver_Command {
56649+ unsigned int control_code;
56650+ unsigned int status;
56651+ unsigned int unique_id;
56652+ unsigned int sequence_id;
56653+ unsigned int os_specific;
56654+ unsigned int buffer_length;
56655+} TW_Ioctl_Driver_Command;
56656+
56657+typedef struct TAG_TW_Ioctl_Apache {
56658+ TW_Ioctl_Driver_Command driver_command;
56659+ char padding[488];
56660+ TW_Command_Full firmware_command;
56661+ char data_buffer[1];
56662+} TW_Ioctl_Buf_Apache;
56663+
56664+/* GetParam descriptor */
56665+typedef struct {
56666+ unsigned short table_id;
56667+ unsigned short parameter_id;
56668+ unsigned short parameter_size_bytes;
56669+ unsigned short actual_parameter_size_bytes;
56670+ unsigned char data[1];
56671+} TW_Param_Apache;
56672+
56673+/* Compatibility information structure */
56674+typedef struct TAG_TW_Compatibility_Info
56675+{
56676+ char driver_version[32];
56677+ unsigned short working_srl;
56678+ unsigned short working_branch;
56679+ unsigned short working_build;
56680+ unsigned short driver_srl_high;
56681+ unsigned short driver_branch_high;
56682+ unsigned short driver_build_high;
56683+ unsigned short driver_srl_low;
56684+ unsigned short driver_branch_low;
56685+ unsigned short driver_build_low;
56686+ unsigned short fw_on_ctlr_srl;
56687+ unsigned short fw_on_ctlr_branch;
56688+ unsigned short fw_on_ctlr_build;
56689+} TW_Compatibility_Info;
56690+
56691+#pragma pack()
56692+
56693+typedef struct TAG_TW_Device_Extension {
56694+ void __iomem *base_addr;
56695+ unsigned long *generic_buffer_virt[TW_Q_LENGTH];
56696+ dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
56697+ TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
56698+ dma_addr_t command_packet_phys[TW_Q_LENGTH];
56699+ TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH];
56700+ dma_addr_t sense_buffer_phys[TW_Q_LENGTH];
56701+ struct pci_dev *tw_pci_dev;
56702+ struct scsi_cmnd *srb[TW_Q_LENGTH];
56703+ unsigned char free_queue[TW_Q_LENGTH];
56704+ unsigned char free_head;
56705+ unsigned char free_tail;
56706+ int state[TW_Q_LENGTH];
56707+ unsigned int posted_request_count;
56708+ unsigned int max_posted_request_count;
56709+ unsigned int max_sgl_entries;
56710+ unsigned int sgl_entries;
56711+ unsigned int num_resets;
56712+ unsigned int sector_count;
56713+ unsigned int max_sector_count;
56714+ unsigned int aen_count;
56715+ struct Scsi_Host *host;
56716+ long flags;
56717+ TW_Event *event_queue[TW_Q_LENGTH];
56718+ unsigned char error_index;
56719+ unsigned int error_sequence_id;
56720+ int chrdev_request_id;
56721+ wait_queue_head_t ioctl_wqueue;
56722+ struct mutex ioctl_lock;
56723+ TW_Compatibility_Info tw_compat_info;
56724+ char online;
56725+} TW_Device_Extension;
56726+
56727+#endif /* _3W_SAS_H */
56728+
56729diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
56730index 1ddcf40..a85f062 100644
56731--- a/drivers/scsi/BusLogic.c
56732+++ b/drivers/scsi/BusLogic.c
56733@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
56734 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
56735 *PrototypeHostAdapter)
56736 {
56737+ pax_track_stack();
56738+
56739 /*
56740 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
56741 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
56742diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
56743index e11cca4..4295679 100644
56744--- a/drivers/scsi/Kconfig
56745+++ b/drivers/scsi/Kconfig
56746@@ -399,6 +399,17 @@ config SCSI_3W_9XXX
56747 Please read the comments at the top of
56748 <file:drivers/scsi/3w-9xxx.c>.
56749
56750+config SCSI_3W_SAS
56751+ tristate "3ware 97xx SAS/SATA-RAID support"
56752+ depends on PCI && SCSI
56753+ help
56754+ This driver supports the LSI 3ware 9750 6Gb/s SAS/SATA-RAID cards.
56755+
56756+ <http://www.lsi.com>
56757+
56758+ Please read the comments at the top of
56759+ <file:drivers/scsi/3w-sas.c>.
56760+
56761 config SCSI_7000FASST
56762 tristate "7000FASST SCSI support"
56763 depends on ISA && SCSI && ISA_DMA_API
56764@@ -621,6 +632,14 @@ config SCSI_FLASHPOINT
56765 substantial, so users of MultiMaster Host Adapters may not
56766 wish to include it.
56767
56768+config VMWARE_PVSCSI
56769+ tristate "VMware PVSCSI driver support"
56770+ depends on PCI && SCSI && X86
56771+ help
56772+ This driver supports VMware's para virtualized SCSI HBA.
56773+ To compile this driver as a module, choose M here: the
56774+ module will be called vmw_pvscsi.
56775+
56776 config LIBFC
56777 tristate "LibFC module"
56778 select SCSI_FC_ATTRS
56779diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
56780index 3ad61db..c938975 100644
56781--- a/drivers/scsi/Makefile
56782+++ b/drivers/scsi/Makefile
56783@@ -113,6 +113,7 @@ obj-$(CONFIG_SCSI_MESH) += mesh.o
56784 obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
56785 obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
56786 obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
56787+obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
56788 obj-$(CONFIG_SCSI_PPA) += ppa.o
56789 obj-$(CONFIG_SCSI_IMM) += imm.o
56790 obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
56791@@ -133,6 +134,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
56792 obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
56793 obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
56794 obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
56795+obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
56796
56797 obj-$(CONFIG_ARM) += arm/
56798
56799diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
56800index cdbdec9..b7d560b 100644
56801--- a/drivers/scsi/aacraid/aacraid.h
56802+++ b/drivers/scsi/aacraid/aacraid.h
56803@@ -471,7 +471,7 @@ struct adapter_ops
56804 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
56805 /* Administrative operations */
56806 int (*adapter_comm)(struct aac_dev * dev, int comm);
56807-};
56808+} __no_const;
56809
56810 /*
56811 * Define which interrupt handler needs to be installed
56812diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
56813index a5b8e7b..a6a0e43 100644
56814--- a/drivers/scsi/aacraid/commctrl.c
56815+++ b/drivers/scsi/aacraid/commctrl.c
56816@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
56817 u32 actual_fibsize64, actual_fibsize = 0;
56818 int i;
56819
56820+ pax_track_stack();
56821
56822 if (dev->in_reset) {
56823 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
56824diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
56825index 9b97c3e..f099725 100644
56826--- a/drivers/scsi/aacraid/linit.c
56827+++ b/drivers/scsi/aacraid/linit.c
56828@@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
56829 #elif defined(__devinitconst)
56830 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
56831 #else
56832-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
56833+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
56834 #endif
56835 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
56836 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
56837diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
56838index 996f722..9127845 100644
56839--- a/drivers/scsi/aic94xx/aic94xx_init.c
56840+++ b/drivers/scsi/aic94xx/aic94xx_init.c
56841@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
56842 flash_error_table[i].reason);
56843 }
56844
56845-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
56846+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
56847 asd_show_update_bios, asd_store_update_bios);
56848
56849 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
56850@@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
56851 .lldd_control_phy = asd_control_phy,
56852 };
56853
56854-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
56855+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
56856 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
56857 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
56858 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
56859diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
56860index 58efd4b..cb48dc7 100644
56861--- a/drivers/scsi/bfa/bfa_ioc.h
56862+++ b/drivers/scsi/bfa/bfa_ioc.h
56863@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
56864 bfa_ioc_disable_cbfn_t disable_cbfn;
56865 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
56866 bfa_ioc_reset_cbfn_t reset_cbfn;
56867-};
56868+} __no_const;
56869
56870 /**
56871 * Heartbeat failure notification queue element.
56872diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
56873index 7ad177e..5503586 100644
56874--- a/drivers/scsi/bfa/bfa_iocfc.h
56875+++ b/drivers/scsi/bfa/bfa_iocfc.h
56876@@ -61,7 +61,7 @@ struct bfa_hwif_s {
56877 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
56878 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
56879 u32 *nvecs, u32 *maxvec);
56880-};
56881+} __no_const;
56882 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
56883
56884 struct bfa_iocfc_s {
56885diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
56886index 4967643..cbec06b 100644
56887--- a/drivers/scsi/dpt_i2o.c
56888+++ b/drivers/scsi/dpt_i2o.c
56889@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
56890 dma_addr_t addr;
56891 ulong flags = 0;
56892
56893+ pax_track_stack();
56894+
56895 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
56896 // get user msg size in u32s
56897 if(get_user(size, &user_msg[0])){
56898@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
56899 s32 rcode;
56900 dma_addr_t addr;
56901
56902+ pax_track_stack();
56903+
56904 memset(msg, 0 , sizeof(msg));
56905 len = scsi_bufflen(cmd);
56906 direction = 0x00000000;
56907diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
56908index c7076ce..e20c67c 100644
56909--- a/drivers/scsi/eata.c
56910+++ b/drivers/scsi/eata.c
56911@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
56912 struct hostdata *ha;
56913 char name[16];
56914
56915+ pax_track_stack();
56916+
56917 sprintf(name, "%s%d", driver_name, j);
56918
56919 if (!request_region(port_base, REGION_SIZE, driver_name)) {
56920diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
56921index 11ae5c9..891daec 100644
56922--- a/drivers/scsi/fcoe/libfcoe.c
56923+++ b/drivers/scsi/fcoe/libfcoe.c
56924@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
56925 size_t rlen;
56926 size_t dlen;
56927
56928+ pax_track_stack();
56929+
56930 fiph = (struct fip_header *)skb->data;
56931 sub = fiph->fip_subcode;
56932 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
56933diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
56934index 71c7bbe..e93088a 100644
56935--- a/drivers/scsi/fnic/fnic_main.c
56936+++ b/drivers/scsi/fnic/fnic_main.c
56937@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
56938 /* Start local port initiatialization */
56939
56940 lp->link_up = 0;
56941- lp->tt = fnic_transport_template;
56942+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
56943
56944 lp->max_retry_count = fnic->config.flogi_retries;
56945 lp->max_rport_retry_count = fnic->config.plogi_retries;
56946diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
56947index bb96d74..9ec3ce4 100644
56948--- a/drivers/scsi/gdth.c
56949+++ b/drivers/scsi/gdth.c
56950@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
56951 ulong flags;
56952 gdth_ha_str *ha;
56953
56954+ pax_track_stack();
56955+
56956 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
56957 return -EFAULT;
56958 ha = gdth_find_ha(ldrv.ionode);
56959@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
56960 gdth_ha_str *ha;
56961 int rval;
56962
56963+ pax_track_stack();
56964+
56965 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
56966 res.number >= MAX_HDRIVES)
56967 return -EFAULT;
56968@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
56969 gdth_ha_str *ha;
56970 int rval;
56971
56972+ pax_track_stack();
56973+
56974 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
56975 return -EFAULT;
56976 ha = gdth_find_ha(gen.ionode);
56977@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
56978 int i;
56979 gdth_cmd_str gdtcmd;
56980 char cmnd[MAX_COMMAND_SIZE];
56981+
56982+ pax_track_stack();
56983+
56984 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
56985
56986 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
56987diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
56988index 1258da3..20d8ae6 100644
56989--- a/drivers/scsi/gdth_proc.c
56990+++ b/drivers/scsi/gdth_proc.c
56991@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
56992 ulong64 paddr;
56993
56994 char cmnd[MAX_COMMAND_SIZE];
56995+
56996+ pax_track_stack();
56997+
56998 memset(cmnd, 0xff, 12);
56999 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
57000
57001@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
57002 gdth_hget_str *phg;
57003 char cmnd[MAX_COMMAND_SIZE];
57004
57005+ pax_track_stack();
57006+
57007 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
57008 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
57009 if (!gdtcmd || !estr)
57010diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
57011index d03a926..f324286 100644
57012--- a/drivers/scsi/hosts.c
57013+++ b/drivers/scsi/hosts.c
57014@@ -40,7 +40,7 @@
57015 #include "scsi_logging.h"
57016
57017
57018-static atomic_t scsi_host_next_hn; /* host_no for next new host */
57019+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
57020
57021
57022 static void scsi_host_cls_release(struct device *dev)
57023@@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
57024 * subtract one because we increment first then return, but we need to
57025 * know what the next host number was before increment
57026 */
57027- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
57028+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
57029 shost->dma_channel = 0xff;
57030
57031 /* These three are default values which can be overridden */
57032diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
57033index a601159..55e19d2 100644
57034--- a/drivers/scsi/ipr.c
57035+++ b/drivers/scsi/ipr.c
57036@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
57037 return true;
57038 }
57039
57040-static struct ata_port_operations ipr_sata_ops = {
57041+static const struct ata_port_operations ipr_sata_ops = {
57042 .phy_reset = ipr_ata_phy_reset,
57043 .hardreset = ipr_sata_reset,
57044 .post_internal_cmd = ipr_ata_post_internal,
57045diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
57046index 4e49fbc..97907ff 100644
57047--- a/drivers/scsi/ips.h
57048+++ b/drivers/scsi/ips.h
57049@@ -1027,7 +1027,7 @@ typedef struct {
57050 int (*intr)(struct ips_ha *);
57051 void (*enableint)(struct ips_ha *);
57052 uint32_t (*statupd)(struct ips_ha *);
57053-} ips_hw_func_t;
57054+} __no_const ips_hw_func_t;
57055
57056 typedef struct ips_ha {
57057 uint8_t ha_id[IPS_MAX_CHANNELS+1];
57058diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
57059index c1c1574..a9c9348 100644
57060--- a/drivers/scsi/libfc/fc_exch.c
57061+++ b/drivers/scsi/libfc/fc_exch.c
57062@@ -86,12 +86,12 @@ struct fc_exch_mgr {
57063 * all together if not used XXX
57064 */
57065 struct {
57066- atomic_t no_free_exch;
57067- atomic_t no_free_exch_xid;
57068- atomic_t xid_not_found;
57069- atomic_t xid_busy;
57070- atomic_t seq_not_found;
57071- atomic_t non_bls_resp;
57072+ atomic_unchecked_t no_free_exch;
57073+ atomic_unchecked_t no_free_exch_xid;
57074+ atomic_unchecked_t xid_not_found;
57075+ atomic_unchecked_t xid_busy;
57076+ atomic_unchecked_t seq_not_found;
57077+ atomic_unchecked_t non_bls_resp;
57078 } stats;
57079 };
57080 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
57081@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
57082 /* allocate memory for exchange */
57083 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
57084 if (!ep) {
57085- atomic_inc(&mp->stats.no_free_exch);
57086+ atomic_inc_unchecked(&mp->stats.no_free_exch);
57087 goto out;
57088 }
57089 memset(ep, 0, sizeof(*ep));
57090@@ -557,7 +557,7 @@ out:
57091 return ep;
57092 err:
57093 spin_unlock_bh(&pool->lock);
57094- atomic_inc(&mp->stats.no_free_exch_xid);
57095+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
57096 mempool_free(ep, mp->ep_pool);
57097 return NULL;
57098 }
57099@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57100 xid = ntohs(fh->fh_ox_id); /* we originated exch */
57101 ep = fc_exch_find(mp, xid);
57102 if (!ep) {
57103- atomic_inc(&mp->stats.xid_not_found);
57104+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57105 reject = FC_RJT_OX_ID;
57106 goto out;
57107 }
57108@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57109 ep = fc_exch_find(mp, xid);
57110 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
57111 if (ep) {
57112- atomic_inc(&mp->stats.xid_busy);
57113+ atomic_inc_unchecked(&mp->stats.xid_busy);
57114 reject = FC_RJT_RX_ID;
57115 goto rel;
57116 }
57117@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57118 }
57119 xid = ep->xid; /* get our XID */
57120 } else if (!ep) {
57121- atomic_inc(&mp->stats.xid_not_found);
57122+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57123 reject = FC_RJT_RX_ID; /* XID not found */
57124 goto out;
57125 }
57126@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
57127 } else {
57128 sp = &ep->seq;
57129 if (sp->id != fh->fh_seq_id) {
57130- atomic_inc(&mp->stats.seq_not_found);
57131+ atomic_inc_unchecked(&mp->stats.seq_not_found);
57132 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
57133 goto rel;
57134 }
57135@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57136
57137 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
57138 if (!ep) {
57139- atomic_inc(&mp->stats.xid_not_found);
57140+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57141 goto out;
57142 }
57143 if (ep->esb_stat & ESB_ST_COMPLETE) {
57144- atomic_inc(&mp->stats.xid_not_found);
57145+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57146 goto out;
57147 }
57148 if (ep->rxid == FC_XID_UNKNOWN)
57149 ep->rxid = ntohs(fh->fh_rx_id);
57150 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
57151- atomic_inc(&mp->stats.xid_not_found);
57152+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57153 goto rel;
57154 }
57155 if (ep->did != ntoh24(fh->fh_s_id) &&
57156 ep->did != FC_FID_FLOGI) {
57157- atomic_inc(&mp->stats.xid_not_found);
57158+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57159 goto rel;
57160 }
57161 sof = fr_sof(fp);
57162@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57163 } else {
57164 sp = &ep->seq;
57165 if (sp->id != fh->fh_seq_id) {
57166- atomic_inc(&mp->stats.seq_not_found);
57167+ atomic_inc_unchecked(&mp->stats.seq_not_found);
57168 goto rel;
57169 }
57170 }
57171@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57172 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
57173
57174 if (!sp)
57175- atomic_inc(&mp->stats.xid_not_found);
57176+ atomic_inc_unchecked(&mp->stats.xid_not_found);
57177 else
57178- atomic_inc(&mp->stats.non_bls_resp);
57179+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
57180
57181 fc_frame_free(fp);
57182 }
57183diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
57184index 0ee989f..a582241 100644
57185--- a/drivers/scsi/libsas/sas_ata.c
57186+++ b/drivers/scsi/libsas/sas_ata.c
57187@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
57188 }
57189 }
57190
57191-static struct ata_port_operations sas_sata_ops = {
57192+static const struct ata_port_operations sas_sata_ops = {
57193 .phy_reset = sas_ata_phy_reset,
57194 .post_internal_cmd = sas_ata_post_internal,
57195 .qc_defer = ata_std_qc_defer,
57196diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
57197index aa10f79..5cc79e4 100644
57198--- a/drivers/scsi/lpfc/lpfc.h
57199+++ b/drivers/scsi/lpfc/lpfc.h
57200@@ -400,7 +400,7 @@ struct lpfc_vport {
57201 struct dentry *debug_nodelist;
57202 struct dentry *vport_debugfs_root;
57203 struct lpfc_debugfs_trc *disc_trc;
57204- atomic_t disc_trc_cnt;
57205+ atomic_unchecked_t disc_trc_cnt;
57206 #endif
57207 uint8_t stat_data_enabled;
57208 uint8_t stat_data_blocked;
57209@@ -725,8 +725,8 @@ struct lpfc_hba {
57210 struct timer_list fabric_block_timer;
57211 unsigned long bit_flags;
57212 #define FABRIC_COMANDS_BLOCKED 0
57213- atomic_t num_rsrc_err;
57214- atomic_t num_cmd_success;
57215+ atomic_unchecked_t num_rsrc_err;
57216+ atomic_unchecked_t num_cmd_success;
57217 unsigned long last_rsrc_error_time;
57218 unsigned long last_ramp_down_time;
57219 unsigned long last_ramp_up_time;
57220@@ -740,7 +740,7 @@ struct lpfc_hba {
57221 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
57222 struct dentry *debug_slow_ring_trc;
57223 struct lpfc_debugfs_trc *slow_ring_trc;
57224- atomic_t slow_ring_trc_cnt;
57225+ atomic_unchecked_t slow_ring_trc_cnt;
57226 #endif
57227
57228 /* Used for deferred freeing of ELS data buffers */
57229diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
57230index 8d0f0de..7c77a62 100644
57231--- a/drivers/scsi/lpfc/lpfc_debugfs.c
57232+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
57233@@ -124,7 +124,7 @@ struct lpfc_debug {
57234 int len;
57235 };
57236
57237-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
57238+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
57239 static unsigned long lpfc_debugfs_start_time = 0L;
57240
57241 /**
57242@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
57243 lpfc_debugfs_enable = 0;
57244
57245 len = 0;
57246- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
57247+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
57248 (lpfc_debugfs_max_disc_trc - 1);
57249 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
57250 dtp = vport->disc_trc + i;
57251@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
57252 lpfc_debugfs_enable = 0;
57253
57254 len = 0;
57255- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
57256+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
57257 (lpfc_debugfs_max_slow_ring_trc - 1);
57258 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
57259 dtp = phba->slow_ring_trc + i;
57260@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
57261 uint32_t *ptr;
57262 char buffer[1024];
57263
57264+ pax_track_stack();
57265+
57266 off = 0;
57267 spin_lock_irq(&phba->hbalock);
57268
57269@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
57270 !vport || !vport->disc_trc)
57271 return;
57272
57273- index = atomic_inc_return(&vport->disc_trc_cnt) &
57274+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
57275 (lpfc_debugfs_max_disc_trc - 1);
57276 dtp = vport->disc_trc + index;
57277 dtp->fmt = fmt;
57278 dtp->data1 = data1;
57279 dtp->data2 = data2;
57280 dtp->data3 = data3;
57281- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
57282+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
57283 dtp->jif = jiffies;
57284 #endif
57285 return;
57286@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
57287 !phba || !phba->slow_ring_trc)
57288 return;
57289
57290- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
57291+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
57292 (lpfc_debugfs_max_slow_ring_trc - 1);
57293 dtp = phba->slow_ring_trc + index;
57294 dtp->fmt = fmt;
57295 dtp->data1 = data1;
57296 dtp->data2 = data2;
57297 dtp->data3 = data3;
57298- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
57299+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
57300 dtp->jif = jiffies;
57301 #endif
57302 return;
57303@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
57304 "slow_ring buffer\n");
57305 goto debug_failed;
57306 }
57307- atomic_set(&phba->slow_ring_trc_cnt, 0);
57308+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
57309 memset(phba->slow_ring_trc, 0,
57310 (sizeof(struct lpfc_debugfs_trc) *
57311 lpfc_debugfs_max_slow_ring_trc));
57312@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
57313 "buffer\n");
57314 goto debug_failed;
57315 }
57316- atomic_set(&vport->disc_trc_cnt, 0);
57317+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
57318
57319 snprintf(name, sizeof(name), "discovery_trace");
57320 vport->debug_disc_trc =
57321diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
57322index 549bc7d..8189dbb 100644
57323--- a/drivers/scsi/lpfc/lpfc_init.c
57324+++ b/drivers/scsi/lpfc/lpfc_init.c
57325@@ -8021,8 +8021,10 @@ lpfc_init(void)
57326 printk(LPFC_COPYRIGHT "\n");
57327
57328 if (lpfc_enable_npiv) {
57329- lpfc_transport_functions.vport_create = lpfc_vport_create;
57330- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
57331+ pax_open_kernel();
57332+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
57333+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
57334+ pax_close_kernel();
57335 }
57336 lpfc_transport_template =
57337 fc_attach_transport(&lpfc_transport_functions);
57338diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
57339index c88f59f..ff2a42f 100644
57340--- a/drivers/scsi/lpfc/lpfc_scsi.c
57341+++ b/drivers/scsi/lpfc/lpfc_scsi.c
57342@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
57343 uint32_t evt_posted;
57344
57345 spin_lock_irqsave(&phba->hbalock, flags);
57346- atomic_inc(&phba->num_rsrc_err);
57347+ atomic_inc_unchecked(&phba->num_rsrc_err);
57348 phba->last_rsrc_error_time = jiffies;
57349
57350 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
57351@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
57352 unsigned long flags;
57353 struct lpfc_hba *phba = vport->phba;
57354 uint32_t evt_posted;
57355- atomic_inc(&phba->num_cmd_success);
57356+ atomic_inc_unchecked(&phba->num_cmd_success);
57357
57358 if (vport->cfg_lun_queue_depth <= queue_depth)
57359 return;
57360@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
57361 int i;
57362 struct lpfc_rport_data *rdata;
57363
57364- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
57365- num_cmd_success = atomic_read(&phba->num_cmd_success);
57366+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
57367+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
57368
57369 vports = lpfc_create_vport_work_array(phba);
57370 if (vports != NULL)
57371@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
57372 }
57373 }
57374 lpfc_destroy_vport_work_array(phba, vports);
57375- atomic_set(&phba->num_rsrc_err, 0);
57376- atomic_set(&phba->num_cmd_success, 0);
57377+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
57378+ atomic_set_unchecked(&phba->num_cmd_success, 0);
57379 }
57380
57381 /**
57382@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
57383 }
57384 }
57385 lpfc_destroy_vport_work_array(phba, vports);
57386- atomic_set(&phba->num_rsrc_err, 0);
57387- atomic_set(&phba->num_cmd_success, 0);
57388+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
57389+ atomic_set_unchecked(&phba->num_cmd_success, 0);
57390 }
57391
57392 /**
57393diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
57394index 234f0b7..3020aea 100644
57395--- a/drivers/scsi/megaraid/megaraid_mbox.c
57396+++ b/drivers/scsi/megaraid/megaraid_mbox.c
57397@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
57398 int rval;
57399 int i;
57400
57401+ pax_track_stack();
57402+
57403 // Allocate memory for the base list of scb for management module.
57404 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
57405
57406diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
57407index 7a117c1..ee01e9e 100644
57408--- a/drivers/scsi/osd/osd_initiator.c
57409+++ b/drivers/scsi/osd/osd_initiator.c
57410@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
57411 int nelem = ARRAY_SIZE(get_attrs), a = 0;
57412 int ret;
57413
57414+ pax_track_stack();
57415+
57416 or = osd_start_request(od, GFP_KERNEL);
57417 if (!or)
57418 return -ENOMEM;
57419diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
57420index 9ab8c86..9425ad3 100644
57421--- a/drivers/scsi/pmcraid.c
57422+++ b/drivers/scsi/pmcraid.c
57423@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
57424 res->scsi_dev = scsi_dev;
57425 scsi_dev->hostdata = res;
57426 res->change_detected = 0;
57427- atomic_set(&res->read_failures, 0);
57428- atomic_set(&res->write_failures, 0);
57429+ atomic_set_unchecked(&res->read_failures, 0);
57430+ atomic_set_unchecked(&res->write_failures, 0);
57431 rc = 0;
57432 }
57433 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
57434@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
57435
57436 /* If this was a SCSI read/write command keep count of errors */
57437 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
57438- atomic_inc(&res->read_failures);
57439+ atomic_inc_unchecked(&res->read_failures);
57440 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
57441- atomic_inc(&res->write_failures);
57442+ atomic_inc_unchecked(&res->write_failures);
57443
57444 if (!RES_IS_GSCSI(res->cfg_entry) &&
57445 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
57446@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
57447
57448 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
57449 /* add resources only after host is added into system */
57450- if (!atomic_read(&pinstance->expose_resources))
57451+ if (!atomic_read_unchecked(&pinstance->expose_resources))
57452 return;
57453
57454 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
57455@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
57456 init_waitqueue_head(&pinstance->reset_wait_q);
57457
57458 atomic_set(&pinstance->outstanding_cmds, 0);
57459- atomic_set(&pinstance->expose_resources, 0);
57460+ atomic_set_unchecked(&pinstance->expose_resources, 0);
57461
57462 INIT_LIST_HEAD(&pinstance->free_res_q);
57463 INIT_LIST_HEAD(&pinstance->used_res_q);
57464@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
57465 /* Schedule worker thread to handle CCN and take care of adding and
57466 * removing devices to OS
57467 */
57468- atomic_set(&pinstance->expose_resources, 1);
57469+ atomic_set_unchecked(&pinstance->expose_resources, 1);
57470 schedule_work(&pinstance->worker_q);
57471 return rc;
57472
57473diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
57474index 3441b3f..6cbe8f7 100644
57475--- a/drivers/scsi/pmcraid.h
57476+++ b/drivers/scsi/pmcraid.h
57477@@ -690,7 +690,7 @@ struct pmcraid_instance {
57478 atomic_t outstanding_cmds;
57479
57480 /* should add/delete resources to mid-layer now ?*/
57481- atomic_t expose_resources;
57482+ atomic_unchecked_t expose_resources;
57483
57484 /* Tasklet to handle deferred processing */
57485 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
57486@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
57487 struct list_head queue; /* link to "to be exposed" resources */
57488 struct pmcraid_config_table_entry cfg_entry;
57489 struct scsi_device *scsi_dev; /* Link scsi_device structure */
57490- atomic_t read_failures; /* count of failed READ commands */
57491- atomic_t write_failures; /* count of failed WRITE commands */
57492+ atomic_unchecked_t read_failures; /* count of failed READ commands */
57493+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
57494
57495 /* To indicate add/delete/modify during CCN */
57496 u8 change_detected;
57497diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
57498index 2150618..7034215 100644
57499--- a/drivers/scsi/qla2xxx/qla_def.h
57500+++ b/drivers/scsi/qla2xxx/qla_def.h
57501@@ -2089,7 +2089,7 @@ struct isp_operations {
57502
57503 int (*get_flash_version) (struct scsi_qla_host *, void *);
57504 int (*start_scsi) (srb_t *);
57505-};
57506+} __no_const;
57507
57508 /* MSI-X Support *************************************************************/
57509
57510diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
57511index 81b5f29..2ae1fad 100644
57512--- a/drivers/scsi/qla4xxx/ql4_def.h
57513+++ b/drivers/scsi/qla4xxx/ql4_def.h
57514@@ -240,7 +240,7 @@ struct ddb_entry {
57515 atomic_t retry_relogin_timer; /* Min Time between relogins
57516 * (4000 only) */
57517 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
57518- atomic_t relogin_retry_count; /* Num of times relogin has been
57519+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
57520 * retried */
57521
57522 uint16_t port;
57523diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
57524index af8c323..515dd51 100644
57525--- a/drivers/scsi/qla4xxx/ql4_init.c
57526+++ b/drivers/scsi/qla4xxx/ql4_init.c
57527@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
57528 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
57529 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
57530 atomic_set(&ddb_entry->relogin_timer, 0);
57531- atomic_set(&ddb_entry->relogin_retry_count, 0);
57532+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
57533 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
57534 list_add_tail(&ddb_entry->list, &ha->ddb_list);
57535 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
57536@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
57537 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
57538 atomic_set(&ddb_entry->port_down_timer,
57539 ha->port_down_retry_count);
57540- atomic_set(&ddb_entry->relogin_retry_count, 0);
57541+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
57542 atomic_set(&ddb_entry->relogin_timer, 0);
57543 clear_bit(DF_RELOGIN, &ddb_entry->flags);
57544 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
57545diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
57546index 83c8b5e..a82b348 100644
57547--- a/drivers/scsi/qla4xxx/ql4_os.c
57548+++ b/drivers/scsi/qla4xxx/ql4_os.c
57549@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
57550 ddb_entry->fw_ddb_device_state ==
57551 DDB_DS_SESSION_FAILED) {
57552 /* Reset retry relogin timer */
57553- atomic_inc(&ddb_entry->relogin_retry_count);
57554+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
57555 DEBUG2(printk("scsi%ld: index[%d] relogin"
57556 " timed out-retrying"
57557 " relogin (%d)\n",
57558 ha->host_no,
57559 ddb_entry->fw_ddb_index,
57560- atomic_read(&ddb_entry->
57561+ atomic_read_unchecked(&ddb_entry->
57562 relogin_retry_count))
57563 );
57564 start_dpc++;
57565diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
57566index dd098ca..686ce01 100644
57567--- a/drivers/scsi/scsi.c
57568+++ b/drivers/scsi/scsi.c
57569@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
57570 unsigned long timeout;
57571 int rtn = 0;
57572
57573- atomic_inc(&cmd->device->iorequest_cnt);
57574+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
57575
57576 /* check if the device is still usable */
57577 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
57578diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
57579index bc3e363..e1a8e50 100644
57580--- a/drivers/scsi/scsi_debug.c
57581+++ b/drivers/scsi/scsi_debug.c
57582@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
57583 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
57584 unsigned char *cmd = (unsigned char *)scp->cmnd;
57585
57586+ pax_track_stack();
57587+
57588 if ((errsts = check_readiness(scp, 1, devip)))
57589 return errsts;
57590 memset(arr, 0, sizeof(arr));
57591@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
57592 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
57593 unsigned char *cmd = (unsigned char *)scp->cmnd;
57594
57595+ pax_track_stack();
57596+
57597 if ((errsts = check_readiness(scp, 1, devip)))
57598 return errsts;
57599 memset(arr, 0, sizeof(arr));
57600diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
57601index 8df12522..c4c1472 100644
57602--- a/drivers/scsi/scsi_lib.c
57603+++ b/drivers/scsi/scsi_lib.c
57604@@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
57605 shost = sdev->host;
57606 scsi_init_cmd_errh(cmd);
57607 cmd->result = DID_NO_CONNECT << 16;
57608- atomic_inc(&cmd->device->iorequest_cnt);
57609+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
57610
57611 /*
57612 * SCSI request completion path will do scsi_device_unbusy(),
57613@@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
57614 */
57615 cmd->serial_number = 0;
57616
57617- atomic_inc(&cmd->device->iodone_cnt);
57618+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
57619 if (cmd->result)
57620- atomic_inc(&cmd->device->ioerr_cnt);
57621+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
57622
57623 disposition = scsi_decide_disposition(cmd);
57624 if (disposition != SUCCESS &&
57625diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
57626index 91a93e0..eae0fe3 100644
57627--- a/drivers/scsi/scsi_sysfs.c
57628+++ b/drivers/scsi/scsi_sysfs.c
57629@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
57630 char *buf) \
57631 { \
57632 struct scsi_device *sdev = to_scsi_device(dev); \
57633- unsigned long long count = atomic_read(&sdev->field); \
57634+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
57635 return snprintf(buf, 20, "0x%llx\n", count); \
57636 } \
57637 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
57638diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
57639index 1030327..f91fd30 100644
57640--- a/drivers/scsi/scsi_tgt_lib.c
57641+++ b/drivers/scsi/scsi_tgt_lib.c
57642@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
57643 int err;
57644
57645 dprintk("%lx %u\n", uaddr, len);
57646- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
57647+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
57648 if (err) {
57649 /*
57650 * TODO: need to fixup sg_tablesize, max_segment_size,
57651diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
57652index db02e31..1b42ea9 100644
57653--- a/drivers/scsi/scsi_transport_fc.c
57654+++ b/drivers/scsi/scsi_transport_fc.c
57655@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
57656 * Netlink Infrastructure
57657 */
57658
57659-static atomic_t fc_event_seq;
57660+static atomic_unchecked_t fc_event_seq;
57661
57662 /**
57663 * fc_get_event_number - Obtain the next sequential FC event number
57664@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
57665 u32
57666 fc_get_event_number(void)
57667 {
57668- return atomic_add_return(1, &fc_event_seq);
57669+ return atomic_add_return_unchecked(1, &fc_event_seq);
57670 }
57671 EXPORT_SYMBOL(fc_get_event_number);
57672
57673@@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
57674 {
57675 int error;
57676
57677- atomic_set(&fc_event_seq, 0);
57678+ atomic_set_unchecked(&fc_event_seq, 0);
57679
57680 error = transport_class_register(&fc_host_class);
57681 if (error)
57682diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
57683index de2f8c4..63c5278 100644
57684--- a/drivers/scsi/scsi_transport_iscsi.c
57685+++ b/drivers/scsi/scsi_transport_iscsi.c
57686@@ -81,7 +81,7 @@ struct iscsi_internal {
57687 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
57688 };
57689
57690-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
57691+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
57692 static struct workqueue_struct *iscsi_eh_timer_workq;
57693
57694 /*
57695@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
57696 int err;
57697
57698 ihost = shost->shost_data;
57699- session->sid = atomic_add_return(1, &iscsi_session_nr);
57700+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
57701
57702 if (id == ISCSI_MAX_TARGET) {
57703 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
57704@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
57705 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
57706 ISCSI_TRANSPORT_VERSION);
57707
57708- atomic_set(&iscsi_session_nr, 0);
57709+ atomic_set_unchecked(&iscsi_session_nr, 0);
57710
57711 err = class_register(&iscsi_transport_class);
57712 if (err)
57713diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
57714index 21a045e..ec89e03 100644
57715--- a/drivers/scsi/scsi_transport_srp.c
57716+++ b/drivers/scsi/scsi_transport_srp.c
57717@@ -33,7 +33,7 @@
57718 #include "scsi_transport_srp_internal.h"
57719
57720 struct srp_host_attrs {
57721- atomic_t next_port_id;
57722+ atomic_unchecked_t next_port_id;
57723 };
57724 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
57725
57726@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
57727 struct Scsi_Host *shost = dev_to_shost(dev);
57728 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
57729
57730- atomic_set(&srp_host->next_port_id, 0);
57731+ atomic_set_unchecked(&srp_host->next_port_id, 0);
57732 return 0;
57733 }
57734
57735@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
57736 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
57737 rport->roles = ids->roles;
57738
57739- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
57740+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
57741 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
57742
57743 transport_setup_device(&rport->dev);
57744diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
57745index 040f751..98a5ed2 100644
57746--- a/drivers/scsi/sg.c
57747+++ b/drivers/scsi/sg.c
57748@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
57749 sdp->disk->disk_name,
57750 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
57751 NULL,
57752- (char *)arg);
57753+ (char __user *)arg);
57754 case BLKTRACESTART:
57755 return blk_trace_startstop(sdp->device->request_queue, 1);
57756 case BLKTRACESTOP:
57757@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
57758 const struct file_operations * fops;
57759 };
57760
57761-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
57762+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
57763 {"allow_dio", &adio_fops},
57764 {"debug", &debug_fops},
57765 {"def_reserved_size", &dressz_fops},
57766@@ -2307,7 +2307,7 @@ sg_proc_init(void)
57767 {
57768 int k, mask;
57769 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
57770- struct sg_proc_leaf * leaf;
57771+ const struct sg_proc_leaf * leaf;
57772
57773 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
57774 if (!sg_proc_sgp)
57775diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
57776index c19ca5e..3eb5959 100644
57777--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
57778+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
57779@@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
57780 int do_iounmap = 0;
57781 int do_disable_device = 1;
57782
57783+ pax_track_stack();
57784+
57785 memset(&sym_dev, 0, sizeof(sym_dev));
57786 memset(&nvram, 0, sizeof(nvram));
57787 sym_dev.pdev = pdev;
57788diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
57789new file mode 100644
57790index 0000000..eabb432
57791--- /dev/null
57792+++ b/drivers/scsi/vmw_pvscsi.c
57793@@ -0,0 +1,1401 @@
57794+/*
57795+ * Linux driver for VMware's para-virtualized SCSI HBA.
57796+ *
57797+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
57798+ *
57799+ * This program is free software; you can redistribute it and/or modify it
57800+ * under the terms of the GNU General Public License as published by the
57801+ * Free Software Foundation; version 2 of the License and no later version.
57802+ *
57803+ * This program is distributed in the hope that it will be useful, but
57804+ * WITHOUT ANY WARRANTY; without even the implied warranty of
57805+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
57806+ * NON INFRINGEMENT. See the GNU General Public License for more
57807+ * details.
57808+ *
57809+ * You should have received a copy of the GNU General Public License
57810+ * along with this program; if not, write to the Free Software
57811+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
57812+ *
57813+ * Maintained by: Alok N Kataria <akataria@vmware.com>
57814+ *
57815+ */
57816+
57817+#include <linux/kernel.h>
57818+#include <linux/module.h>
57819+#include <linux/moduleparam.h>
57820+#include <linux/types.h>
57821+#include <linux/interrupt.h>
57822+#include <linux/workqueue.h>
57823+#include <linux/pci.h>
57824+
57825+#include <scsi/scsi.h>
57826+#include <scsi/scsi_host.h>
57827+#include <scsi/scsi_cmnd.h>
57828+#include <scsi/scsi_device.h>
57829+
57830+#include "vmw_pvscsi.h"
57831+
57832+#define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
57833+
57834+MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
57835+MODULE_AUTHOR("VMware, Inc.");
57836+MODULE_LICENSE("GPL");
57837+MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
57838+
57839+#define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
57840+#define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
57841+#define PVSCSI_DEFAULT_QUEUE_DEPTH 64
57842+#define SGL_SIZE PAGE_SIZE
57843+
57844+#define pvscsi_dev(adapter) (&(adapter->dev->dev))
57845+
57846+struct pvscsi_sg_list {
57847+ struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
57848+};
57849+
57850+struct pvscsi_ctx {
57851+ /*
57852+ * The index of the context in cmd_map serves as the context ID for a
57853+ * 1-to-1 mapping completions back to requests.
57854+ */
57855+ struct scsi_cmnd *cmd;
57856+ struct pvscsi_sg_list *sgl;
57857+ struct list_head list;
57858+ dma_addr_t dataPA;
57859+ dma_addr_t sensePA;
57860+ dma_addr_t sglPA;
57861+};
57862+
57863+struct pvscsi_adapter {
57864+ char *mmioBase;
57865+ unsigned int irq;
57866+ u8 rev;
57867+ bool use_msi;
57868+ bool use_msix;
57869+ bool use_msg;
57870+
57871+ spinlock_t hw_lock;
57872+
57873+ struct workqueue_struct *workqueue;
57874+ struct work_struct work;
57875+
57876+ struct PVSCSIRingReqDesc *req_ring;
57877+ unsigned req_pages;
57878+ unsigned req_depth;
57879+ dma_addr_t reqRingPA;
57880+
57881+ struct PVSCSIRingCmpDesc *cmp_ring;
57882+ unsigned cmp_pages;
57883+ dma_addr_t cmpRingPA;
57884+
57885+ struct PVSCSIRingMsgDesc *msg_ring;
57886+ unsigned msg_pages;
57887+ dma_addr_t msgRingPA;
57888+
57889+ struct PVSCSIRingsState *rings_state;
57890+ dma_addr_t ringStatePA;
57891+
57892+ struct pci_dev *dev;
57893+ struct Scsi_Host *host;
57894+
57895+ struct list_head cmd_pool;
57896+ struct pvscsi_ctx *cmd_map;
57897+};
57898+
57899+
57900+/* Command line parameters */
57901+static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
57902+static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
57903+static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
57904+static bool pvscsi_disable_msi;
57905+static bool pvscsi_disable_msix;
57906+static bool pvscsi_use_msg = true;
57907+
57908+#define PVSCSI_RW (S_IRUSR | S_IWUSR)
57909+
57910+module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
57911+MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
57912+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
57913+
57914+module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
57915+MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
57916+ __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
57917+
57918+module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
57919+MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
57920+ __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
57921+
57922+module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
57923+MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
57924+
57925+module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
57926+MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
57927+
57928+module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
57929+MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
57930+
57931+static const struct pci_device_id pvscsi_pci_tbl[] = {
57932+ { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
57933+ { 0 }
57934+};
57935+
57936+MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
57937+
57938+static struct pvscsi_ctx *
57939+pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
57940+{
57941+ struct pvscsi_ctx *ctx, *end;
57942+
57943+ end = &adapter->cmd_map[adapter->req_depth];
57944+ for (ctx = adapter->cmd_map; ctx < end; ctx++)
57945+ if (ctx->cmd == cmd)
57946+ return ctx;
57947+
57948+ return NULL;
57949+}
57950+
57951+static struct pvscsi_ctx *
57952+pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
57953+{
57954+ struct pvscsi_ctx *ctx;
57955+
57956+ if (list_empty(&adapter->cmd_pool))
57957+ return NULL;
57958+
57959+ ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
57960+ ctx->cmd = cmd;
57961+ list_del(&ctx->list);
57962+
57963+ return ctx;
57964+}
57965+
57966+static void pvscsi_release_context(struct pvscsi_adapter *adapter,
57967+ struct pvscsi_ctx *ctx)
57968+{
57969+ ctx->cmd = NULL;
57970+ list_add(&ctx->list, &adapter->cmd_pool);
57971+}
57972+
57973+/*
57974+ * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
57975+ * non-zero integer. ctx always points to an entry in cmd_map array, hence
57976+ * the return value is always >=1.
57977+ */
57978+static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
57979+ const struct pvscsi_ctx *ctx)
57980+{
57981+ return ctx - adapter->cmd_map + 1;
57982+}
57983+
57984+static struct pvscsi_ctx *
57985+pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
57986+{
57987+ return &adapter->cmd_map[context - 1];
57988+}
57989+
57990+static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
57991+ u32 offset, u32 val)
57992+{
57993+ writel(val, adapter->mmioBase + offset);
57994+}
57995+
57996+static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
57997+{
57998+ return readl(adapter->mmioBase + offset);
57999+}
58000+
58001+static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
58002+{
58003+ return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
58004+}
58005+
58006+static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
58007+ u32 val)
58008+{
58009+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
58010+}
58011+
58012+static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
58013+{
58014+ u32 intr_bits;
58015+
58016+ intr_bits = PVSCSI_INTR_CMPL_MASK;
58017+ if (adapter->use_msg)
58018+ intr_bits |= PVSCSI_INTR_MSG_MASK;
58019+
58020+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
58021+}
58022+
58023+static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
58024+{
58025+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
58026+}
58027+
58028+static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
58029+ u32 cmd, const void *desc, size_t len)
58030+{
58031+ const u32 *ptr = desc;
58032+ size_t i;
58033+
58034+ len /= sizeof(*ptr);
58035+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
58036+ for (i = 0; i < len; i++)
58037+ pvscsi_reg_write(adapter,
58038+ PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
58039+}
58040+
58041+static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
58042+ const struct pvscsi_ctx *ctx)
58043+{
58044+ struct PVSCSICmdDescAbortCmd cmd = { 0 };
58045+
58046+ cmd.target = ctx->cmd->device->id;
58047+ cmd.context = pvscsi_map_context(adapter, ctx);
58048+
58049+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
58050+}
58051+
58052+static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
58053+{
58054+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
58055+}
58056+
58057+static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
58058+{
58059+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
58060+}
58061+
58062+static int scsi_is_rw(unsigned char op)
58063+{
58064+ return op == READ_6 || op == WRITE_6 ||
58065+ op == READ_10 || op == WRITE_10 ||
58066+ op == READ_12 || op == WRITE_12 ||
58067+ op == READ_16 || op == WRITE_16;
58068+}
58069+
58070+static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
58071+ unsigned char op)
58072+{
58073+ if (scsi_is_rw(op))
58074+ pvscsi_kick_rw_io(adapter);
58075+ else
58076+ pvscsi_process_request_ring(adapter);
58077+}
58078+
58079+static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
58080+{
58081+ dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
58082+
58083+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
58084+}
58085+
58086+static void ll_bus_reset(const struct pvscsi_adapter *adapter)
58087+{
58088+ dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter);
58089+
58090+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
58091+}
58092+
58093+static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
58094+{
58095+ struct PVSCSICmdDescResetDevice cmd = { 0 };
58096+
58097+ dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target);
58098+
58099+ cmd.target = target;
58100+
58101+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
58102+ &cmd, sizeof(cmd));
58103+}
58104+
58105+static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
58106+ struct scatterlist *sg, unsigned count)
58107+{
58108+ unsigned i;
58109+ struct PVSCSISGElement *sge;
58110+
58111+ BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
58112+
58113+ sge = &ctx->sgl->sge[0];
58114+ for (i = 0; i < count; i++, sg++) {
58115+ sge[i].addr = sg_dma_address(sg);
58116+ sge[i].length = sg_dma_len(sg);
58117+ sge[i].flags = 0;
58118+ }
58119+}
58120+
58121+/*
58122+ * Map all data buffers for a command into PCI space and
58123+ * setup the scatter/gather list if needed.
58124+ */
58125+static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
58126+ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
58127+ struct PVSCSIRingReqDesc *e)
58128+{
58129+ unsigned count;
58130+ unsigned bufflen = scsi_bufflen(cmd);
58131+ struct scatterlist *sg;
58132+
58133+ e->dataLen = bufflen;
58134+ e->dataAddr = 0;
58135+ if (bufflen == 0)
58136+ return;
58137+
58138+ sg = scsi_sglist(cmd);
58139+ count = scsi_sg_count(cmd);
58140+ if (count != 0) {
58141+ int segs = scsi_dma_map(cmd);
58142+ if (segs > 1) {
58143+ pvscsi_create_sg(ctx, sg, segs);
58144+
58145+ e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
58146+ ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
58147+ SGL_SIZE, PCI_DMA_TODEVICE);
58148+ e->dataAddr = ctx->sglPA;
58149+ } else
58150+ e->dataAddr = sg_dma_address(sg);
58151+ } else {
58152+ /*
58153+ * In case there is no S/G list, scsi_sglist points
58154+ * directly to the buffer.
58155+ */
58156+ ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
58157+ cmd->sc_data_direction);
58158+ e->dataAddr = ctx->dataPA;
58159+ }
58160+}
58161+
58162+static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
58163+ struct pvscsi_ctx *ctx)
58164+{
58165+ struct scsi_cmnd *cmd;
58166+ unsigned bufflen;
58167+
58168+ cmd = ctx->cmd;
58169+ bufflen = scsi_bufflen(cmd);
58170+
58171+ if (bufflen != 0) {
58172+ unsigned count = scsi_sg_count(cmd);
58173+
58174+ if (count != 0) {
58175+ scsi_dma_unmap(cmd);
58176+ if (ctx->sglPA) {
58177+ pci_unmap_single(adapter->dev, ctx->sglPA,
58178+ SGL_SIZE, PCI_DMA_TODEVICE);
58179+ ctx->sglPA = 0;
58180+ }
58181+ } else
58182+ pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
58183+ cmd->sc_data_direction);
58184+ }
58185+ if (cmd->sense_buffer)
58186+ pci_unmap_single(adapter->dev, ctx->sensePA,
58187+ SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
58188+}
58189+
58190+static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
58191+{
58192+ adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
58193+ &adapter->ringStatePA);
58194+ if (!adapter->rings_state)
58195+ return -ENOMEM;
58196+
58197+ adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
58198+ pvscsi_ring_pages);
58199+ adapter->req_depth = adapter->req_pages
58200+ * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
58201+ adapter->req_ring = pci_alloc_consistent(adapter->dev,
58202+ adapter->req_pages * PAGE_SIZE,
58203+ &adapter->reqRingPA);
58204+ if (!adapter->req_ring)
58205+ return -ENOMEM;
58206+
58207+ adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
58208+ pvscsi_ring_pages);
58209+ adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
58210+ adapter->cmp_pages * PAGE_SIZE,
58211+ &adapter->cmpRingPA);
58212+ if (!adapter->cmp_ring)
58213+ return -ENOMEM;
58214+
58215+ BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
58216+ BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
58217+ BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
58218+
58219+ if (!adapter->use_msg)
58220+ return 0;
58221+
58222+ adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
58223+ pvscsi_msg_ring_pages);
58224+ adapter->msg_ring = pci_alloc_consistent(adapter->dev,
58225+ adapter->msg_pages * PAGE_SIZE,
58226+ &adapter->msgRingPA);
58227+ if (!adapter->msg_ring)
58228+ return -ENOMEM;
58229+ BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
58230+
58231+ return 0;
58232+}
58233+
58234+static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
58235+{
58236+ struct PVSCSICmdDescSetupRings cmd = { 0 };
58237+ dma_addr_t base;
58238+ unsigned i;
58239+
58240+ cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
58241+ cmd.reqRingNumPages = adapter->req_pages;
58242+ cmd.cmpRingNumPages = adapter->cmp_pages;
58243+
58244+ base = adapter->reqRingPA;
58245+ for (i = 0; i < adapter->req_pages; i++) {
58246+ cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
58247+ base += PAGE_SIZE;
58248+ }
58249+
58250+ base = adapter->cmpRingPA;
58251+ for (i = 0; i < adapter->cmp_pages; i++) {
58252+ cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
58253+ base += PAGE_SIZE;
58254+ }
58255+
58256+ memset(adapter->rings_state, 0, PAGE_SIZE);
58257+ memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
58258+ memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
58259+
58260+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
58261+ &cmd, sizeof(cmd));
58262+
58263+ if (adapter->use_msg) {
58264+ struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
58265+
58266+ cmd_msg.numPages = adapter->msg_pages;
58267+
58268+ base = adapter->msgRingPA;
58269+ for (i = 0; i < adapter->msg_pages; i++) {
58270+ cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
58271+ base += PAGE_SIZE;
58272+ }
58273+ memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
58274+
58275+ pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
58276+ &cmd_msg, sizeof(cmd_msg));
58277+ }
58278+}
58279+
58280+/*
58281+ * Pull a completion descriptor off and pass the completion back
58282+ * to the SCSI mid layer.
58283+ */
58284+static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
58285+ const struct PVSCSIRingCmpDesc *e)
58286+{
58287+ struct pvscsi_ctx *ctx;
58288+ struct scsi_cmnd *cmd;
58289+ u32 btstat = e->hostStatus;
58290+ u32 sdstat = e->scsiStatus;
58291+
58292+ ctx = pvscsi_get_context(adapter, e->context);
58293+ cmd = ctx->cmd;
58294+ pvscsi_unmap_buffers(adapter, ctx);
58295+ pvscsi_release_context(adapter, ctx);
58296+ cmd->result = 0;
58297+
58298+ if (sdstat != SAM_STAT_GOOD &&
58299+ (btstat == BTSTAT_SUCCESS ||
58300+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
58301+ btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
58302+ cmd->result = (DID_OK << 16) | sdstat;
58303+ if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
58304+ cmd->result |= (DRIVER_SENSE << 24);
58305+ } else
58306+ switch (btstat) {
58307+ case BTSTAT_SUCCESS:
58308+ case BTSTAT_LINKED_COMMAND_COMPLETED:
58309+ case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
58310+ /* If everything went fine, let's move on.. */
58311+ cmd->result = (DID_OK << 16);
58312+ break;
58313+
58314+ case BTSTAT_DATARUN:
58315+ case BTSTAT_DATA_UNDERRUN:
58316+ /* Report residual data in underruns */
58317+ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
58318+ cmd->result = (DID_ERROR << 16);
58319+ break;
58320+
58321+ case BTSTAT_SELTIMEO:
58322+ /* Our emulation returns this for non-connected devs */
58323+ cmd->result = (DID_BAD_TARGET << 16);
58324+ break;
58325+
58326+ case BTSTAT_LUNMISMATCH:
58327+ case BTSTAT_TAGREJECT:
58328+ case BTSTAT_BADMSG:
58329+ cmd->result = (DRIVER_INVALID << 24);
58330+ /* fall through */
58331+
58332+ case BTSTAT_HAHARDWARE:
58333+ case BTSTAT_INVPHASE:
58334+ case BTSTAT_HATIMEOUT:
58335+ case BTSTAT_NORESPONSE:
58336+ case BTSTAT_DISCONNECT:
58337+ case BTSTAT_HASOFTWARE:
58338+ case BTSTAT_BUSFREE:
58339+ case BTSTAT_SENSFAILED:
58340+ cmd->result |= (DID_ERROR << 16);
58341+ break;
58342+
58343+ case BTSTAT_SENTRST:
58344+ case BTSTAT_RECVRST:
58345+ case BTSTAT_BUSRESET:
58346+ cmd->result = (DID_RESET << 16);
58347+ break;
58348+
58349+ case BTSTAT_ABORTQUEUE:
58350+ cmd->result = (DID_ABORT << 16);
58351+ break;
58352+
58353+ case BTSTAT_SCSIPARITY:
58354+ cmd->result = (DID_PARITY << 16);
58355+ break;
58356+
58357+ default:
58358+ cmd->result = (DID_ERROR << 16);
58359+ scmd_printk(KERN_DEBUG, cmd,
58360+ "Unknown completion status: 0x%x\n",
58361+ btstat);
58362+ }
58363+
58364+ dev_dbg(&cmd->device->sdev_gendev,
58365+ "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
58366+ cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
58367+
58368+ cmd->scsi_done(cmd);
58369+}
58370+
58371+/*
58372+ * barrier usage : Since the PVSCSI device is emulated, there could be cases
58373+ * where we may want to serialize some accesses between the driver and the
58374+ * emulation layer. We use compiler barriers instead of the more expensive
58375+ * memory barriers because PVSCSI is only supported on X86 which has strong
58376+ * memory access ordering.
58377+ */
58378+static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
58379+{
58380+ struct PVSCSIRingsState *s = adapter->rings_state;
58381+ struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
58382+ u32 cmp_entries = s->cmpNumEntriesLog2;
58383+
58384+ while (s->cmpConsIdx != s->cmpProdIdx) {
58385+ struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
58386+ MASK(cmp_entries));
58387+ /*
58388+ * This barrier() ensures that *e is not dereferenced while
58389+ * the device emulation still writes data into the slot.
58390+ * Since the device emulation advances s->cmpProdIdx only after
58391+ * updating the slot we want to check it first.
58392+ */
58393+ barrier();
58394+ pvscsi_complete_request(adapter, e);
58395+ /*
58396+ * This barrier() ensures that compiler doesn't reorder write
58397+ * to s->cmpConsIdx before the read of (*e) inside
58398+ * pvscsi_complete_request. Otherwise, device emulation may
58399+ * overwrite *e before we had a chance to read it.
58400+ */
58401+ barrier();
58402+ s->cmpConsIdx++;
58403+ }
58404+}
58405+
58406+/*
58407+ * Translate a Linux SCSI request into a request ring entry.
58408+ */
58409+static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
58410+ struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
58411+{
58412+ struct PVSCSIRingsState *s;
58413+ struct PVSCSIRingReqDesc *e;
58414+ struct scsi_device *sdev;
58415+ u32 req_entries;
58416+
58417+ s = adapter->rings_state;
58418+ sdev = cmd->device;
58419+ req_entries = s->reqNumEntriesLog2;
58420+
58421+ /*
58422+ * If this condition holds, we might have room on the request ring, but
58423+ * we might not have room on the completion ring for the response.
58424+ * However, we have already ruled out this possibility - we would not
58425+ * have successfully allocated a context if it were true, since we only
58426+ * have one context per request entry. Check for it anyway, since it
58427+ * would be a serious bug.
58428+ */
58429+ if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
58430+ scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
58431+ "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
58432+ s->reqProdIdx, s->cmpConsIdx);
58433+ return -1;
58434+ }
58435+
58436+ e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
58437+
58438+ e->bus = sdev->channel;
58439+ e->target = sdev->id;
58440+ memset(e->lun, 0, sizeof(e->lun));
58441+ e->lun[1] = sdev->lun;
58442+
58443+ if (cmd->sense_buffer) {
58444+ ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
58445+ SCSI_SENSE_BUFFERSIZE,
58446+ PCI_DMA_FROMDEVICE);
58447+ e->senseAddr = ctx->sensePA;
58448+ e->senseLen = SCSI_SENSE_BUFFERSIZE;
58449+ } else {
58450+ e->senseLen = 0;
58451+ e->senseAddr = 0;
58452+ }
58453+ e->cdbLen = cmd->cmd_len;
58454+ e->vcpuHint = smp_processor_id();
58455+ memcpy(e->cdb, cmd->cmnd, e->cdbLen);
58456+
58457+ e->tag = SIMPLE_QUEUE_TAG;
58458+ if (sdev->tagged_supported &&
58459+ (cmd->tag == HEAD_OF_QUEUE_TAG ||
58460+ cmd->tag == ORDERED_QUEUE_TAG))
58461+ e->tag = cmd->tag;
58462+
58463+ if (cmd->sc_data_direction == DMA_FROM_DEVICE)
58464+ e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
58465+ else if (cmd->sc_data_direction == DMA_TO_DEVICE)
58466+ e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
58467+ else if (cmd->sc_data_direction == DMA_NONE)
58468+ e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
58469+ else
58470+ e->flags = 0;
58471+
58472+ pvscsi_map_buffers(adapter, ctx, cmd, e);
58473+
58474+ e->context = pvscsi_map_context(adapter, ctx);
58475+
58476+ barrier();
58477+
58478+ s->reqProdIdx++;
58479+
58480+ return 0;
58481+}
58482+
58483+static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
58484+{
58485+ struct Scsi_Host *host = cmd->device->host;
58486+ struct pvscsi_adapter *adapter = shost_priv(host);
58487+ struct pvscsi_ctx *ctx;
58488+ unsigned long flags;
58489+
58490+ spin_lock_irqsave(&adapter->hw_lock, flags);
58491+
58492+ ctx = pvscsi_acquire_context(adapter, cmd);
58493+ if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
58494+ if (ctx)
58495+ pvscsi_release_context(adapter, ctx);
58496+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
58497+ return SCSI_MLQUEUE_HOST_BUSY;
58498+ }
58499+
58500+ cmd->scsi_done = done;
58501+
58502+ dev_dbg(&cmd->device->sdev_gendev,
58503+ "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
58504+
58505+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
58506+
58507+ pvscsi_kick_io(adapter, cmd->cmnd[0]);
58508+
58509+ return 0;
58510+}
58511+
58512+static int pvscsi_abort(struct scsi_cmnd *cmd)
58513+{
58514+ struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
58515+ struct pvscsi_ctx *ctx;
58516+ unsigned long flags;
58517+
58518+ scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
58519+ adapter->host->host_no, cmd);
58520+
58521+ spin_lock_irqsave(&adapter->hw_lock, flags);
58522+
58523+ /*
58524+ * Poll the completion ring first - we might be trying to abort
58525+ * a command that is waiting to be dispatched in the completion ring.
58526+ */
58527+ pvscsi_process_completion_ring(adapter);
58528+
58529+ /*
58530+ * If there is no context for the command, it either already succeeded
58531+ * or else was never properly issued. Not our problem.
58532+ */
58533+ ctx = pvscsi_find_context(adapter, cmd);
58534+ if (!ctx) {
58535+ scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
58536+ goto out;
58537+ }
58538+
58539+ pvscsi_abort_cmd(adapter, ctx);
58540+
58541+ pvscsi_process_completion_ring(adapter);
58542+
58543+out:
58544+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
58545+ return SUCCESS;
58546+}
58547+
58548+/*
58549+ * Abort all outstanding requests. This is only safe to use if the completion
58550+ * ring will never be walked again or the device has been reset, because it
58551+ * destroys the 1-1 mapping between context field passed to emulation and our
58552+ * request structure.
58553+ */
58554+static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
58555+{
58556+ unsigned i;
58557+
58558+ for (i = 0; i < adapter->req_depth; i++) {
58559+ struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
58560+ struct scsi_cmnd *cmd = ctx->cmd;
58561+ if (cmd) {
58562+ scmd_printk(KERN_ERR, cmd,
58563+ "Forced reset on cmd %p\n", cmd);
58564+ pvscsi_unmap_buffers(adapter, ctx);
58565+ pvscsi_release_context(adapter, ctx);
58566+ cmd->result = (DID_RESET << 16);
58567+ cmd->scsi_done(cmd);
58568+ }
58569+ }
58570+}
58571+
58572+static int pvscsi_host_reset(struct scsi_cmnd *cmd)
58573+{
58574+ struct Scsi_Host *host = cmd->device->host;
58575+ struct pvscsi_adapter *adapter = shost_priv(host);
58576+ unsigned long flags;
58577+ bool use_msg;
58578+
58579+ scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
58580+
58581+ spin_lock_irqsave(&adapter->hw_lock, flags);
58582+
58583+ use_msg = adapter->use_msg;
58584+
58585+ if (use_msg) {
58586+ adapter->use_msg = 0;
58587+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
58588+
58589+ /*
58590+ * Now that we know that the ISR won't add more work on the
58591+ * workqueue we can safely flush any outstanding work.
58592+ */
58593+ flush_workqueue(adapter->workqueue);
58594+ spin_lock_irqsave(&adapter->hw_lock, flags);
58595+ }
58596+
58597+ /*
58598+ * We're going to tear down the entire ring structure and set it back
58599+ * up, so stalling new requests until all completions are flushed and
58600+ * the rings are back in place.
58601+ */
58602+
58603+ pvscsi_process_request_ring(adapter);
58604+
58605+ ll_adapter_reset(adapter);
58606+
58607+ /*
58608+ * Now process any completions. Note we do this AFTER adapter reset,
58609+ * which is strange, but stops races where completions get posted
58610+ * between processing the ring and issuing the reset. The backend will
58611+ * not touch the ring memory after reset, so the immediately pre-reset
58612+ * completion ring state is still valid.
58613+ */
58614+ pvscsi_process_completion_ring(adapter);
58615+
58616+ pvscsi_reset_all(adapter);
58617+ adapter->use_msg = use_msg;
58618+ pvscsi_setup_all_rings(adapter);
58619+ pvscsi_unmask_intr(adapter);
58620+
58621+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
58622+
58623+ return SUCCESS;
58624+}
58625+
58626+static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
58627+{
58628+ struct Scsi_Host *host = cmd->device->host;
58629+ struct pvscsi_adapter *adapter = shost_priv(host);
58630+ unsigned long flags;
58631+
58632+ scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
58633+
58634+ /*
58635+ * We don't want to queue new requests for this bus after
58636+ * flushing all pending requests to emulation, since new
58637+ * requests could then sneak in during this bus reset phase,
58638+ * so take the lock now.
58639+ */
58640+ spin_lock_irqsave(&adapter->hw_lock, flags);
58641+
58642+ pvscsi_process_request_ring(adapter);
58643+ ll_bus_reset(adapter);
58644+ pvscsi_process_completion_ring(adapter);
58645+
58646+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
58647+
58648+ return SUCCESS;
58649+}
58650+
58651+static int pvscsi_device_reset(struct scsi_cmnd *cmd)
58652+{
58653+ struct Scsi_Host *host = cmd->device->host;
58654+ struct pvscsi_adapter *adapter = shost_priv(host);
58655+ unsigned long flags;
58656+
58657+ scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
58658+ host->host_no, cmd->device->id);
58659+
58660+ /*
58661+ * We don't want to queue new requests for this device after flushing
58662+ * all pending requests to emulation, since new requests could then
58663+ * sneak in during this device reset phase, so take the lock now.
58664+ */
58665+ spin_lock_irqsave(&adapter->hw_lock, flags);
58666+
58667+ pvscsi_process_request_ring(adapter);
58668+ ll_device_reset(adapter, cmd->device->id);
58669+ pvscsi_process_completion_ring(adapter);
58670+
58671+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
58672+
58673+ return SUCCESS;
58674+}
58675+
58676+static struct scsi_host_template pvscsi_template;
58677+
58678+static const char *pvscsi_info(struct Scsi_Host *host)
58679+{
58680+ struct pvscsi_adapter *adapter = shost_priv(host);
58681+ static char buf[256];
58682+
58683+ sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
58684+ "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
58685+ adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
58686+ pvscsi_template.cmd_per_lun);
58687+
58688+ return buf;
58689+}
58690+
58691+static struct scsi_host_template pvscsi_template = {
58692+ .module = THIS_MODULE,
58693+ .name = "VMware PVSCSI Host Adapter",
58694+ .proc_name = "vmw_pvscsi",
58695+ .info = pvscsi_info,
58696+ .queuecommand = pvscsi_queue,
58697+ .this_id = -1,
58698+ .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
58699+ .dma_boundary = UINT_MAX,
58700+ .max_sectors = 0xffff,
58701+ .use_clustering = ENABLE_CLUSTERING,
58702+ .eh_abort_handler = pvscsi_abort,
58703+ .eh_device_reset_handler = pvscsi_device_reset,
58704+ .eh_bus_reset_handler = pvscsi_bus_reset,
58705+ .eh_host_reset_handler = pvscsi_host_reset,
58706+};
58707+
58708+static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
58709+ const struct PVSCSIRingMsgDesc *e)
58710+{
58711+ struct PVSCSIRingsState *s = adapter->rings_state;
58712+ struct Scsi_Host *host = adapter->host;
58713+ struct scsi_device *sdev;
58714+
58715+ printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
58716+ e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
58717+
58718+ BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
58719+
58720+ if (e->type == PVSCSI_MSG_DEV_ADDED) {
58721+ struct PVSCSIMsgDescDevStatusChanged *desc;
58722+ desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
58723+
58724+ printk(KERN_INFO
58725+ "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
58726+ desc->bus, desc->target, desc->lun[1]);
58727+
58728+ if (!scsi_host_get(host))
58729+ return;
58730+
58731+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
58732+ desc->lun[1]);
58733+ if (sdev) {
58734+ printk(KERN_INFO "vmw_pvscsi: device already exists\n");
58735+ scsi_device_put(sdev);
58736+ } else
58737+ scsi_add_device(adapter->host, desc->bus,
58738+ desc->target, desc->lun[1]);
58739+
58740+ scsi_host_put(host);
58741+ } else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
58742+ struct PVSCSIMsgDescDevStatusChanged *desc;
58743+ desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
58744+
58745+ printk(KERN_INFO
58746+ "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
58747+ desc->bus, desc->target, desc->lun[1]);
58748+
58749+ if (!scsi_host_get(host))
58750+ return;
58751+
58752+ sdev = scsi_device_lookup(host, desc->bus, desc->target,
58753+ desc->lun[1]);
58754+ if (sdev) {
58755+ scsi_remove_device(sdev);
58756+ scsi_device_put(sdev);
58757+ } else
58758+ printk(KERN_INFO
58759+ "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
58760+ desc->bus, desc->target, desc->lun[1]);
58761+
58762+ scsi_host_put(host);
58763+ }
58764+}
58765+
58766+static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
58767+{
58768+ struct PVSCSIRingsState *s = adapter->rings_state;
58769+
58770+ return s->msgProdIdx != s->msgConsIdx;
58771+}
58772+
58773+static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
58774+{
58775+ struct PVSCSIRingsState *s = adapter->rings_state;
58776+ struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
58777+ u32 msg_entries = s->msgNumEntriesLog2;
58778+
58779+ while (pvscsi_msg_pending(adapter)) {
58780+ struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
58781+ MASK(msg_entries));
58782+
58783+ barrier();
58784+ pvscsi_process_msg(adapter, e);
58785+ barrier();
58786+ s->msgConsIdx++;
58787+ }
58788+}
58789+
58790+static void pvscsi_msg_workqueue_handler(struct work_struct *data)
58791+{
58792+ struct pvscsi_adapter *adapter;
58793+
58794+ adapter = container_of(data, struct pvscsi_adapter, work);
58795+
58796+ pvscsi_process_msg_ring(adapter);
58797+}
58798+
58799+static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
58800+{
58801+ char name[32];
58802+
58803+ if (!pvscsi_use_msg)
58804+ return 0;
58805+
58806+ pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
58807+ PVSCSI_CMD_SETUP_MSG_RING);
58808+
58809+ if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
58810+ return 0;
58811+
58812+ snprintf(name, sizeof(name),
58813+ "vmw_pvscsi_wq_%u", adapter->host->host_no);
58814+
58815+ adapter->workqueue = create_singlethread_workqueue(name);
58816+ if (!adapter->workqueue) {
58817+ printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
58818+ return 0;
58819+ }
58820+ INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
58821+
58822+ return 1;
58823+}
58824+
58825+static irqreturn_t pvscsi_isr(int irq, void *devp)
58826+{
58827+ struct pvscsi_adapter *adapter = devp;
58828+ int handled;
58829+
58830+ if (adapter->use_msi || adapter->use_msix)
58831+ handled = true;
58832+ else {
58833+ u32 val = pvscsi_read_intr_status(adapter);
58834+ handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
58835+ if (handled)
58836+ pvscsi_write_intr_status(devp, val);
58837+ }
58838+
58839+ if (handled) {
58840+ unsigned long flags;
58841+
58842+ spin_lock_irqsave(&adapter->hw_lock, flags);
58843+
58844+ pvscsi_process_completion_ring(adapter);
58845+ if (adapter->use_msg && pvscsi_msg_pending(adapter))
58846+ queue_work(adapter->workqueue, &adapter->work);
58847+
58848+ spin_unlock_irqrestore(&adapter->hw_lock, flags);
58849+ }
58850+
58851+ return IRQ_RETVAL(handled);
58852+}
58853+
58854+static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
58855+{
58856+ struct pvscsi_ctx *ctx = adapter->cmd_map;
58857+ unsigned i;
58858+
58859+ for (i = 0; i < adapter->req_depth; ++i, ++ctx)
58860+ kfree(ctx->sgl);
58861+}
58862+
58863+static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, int *irq)
58864+{
58865+ struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
58866+ int ret;
58867+
58868+ ret = pci_enable_msix(adapter->dev, &entry, 1);
58869+ if (ret)
58870+ return ret;
58871+
58872+ *irq = entry.vector;
58873+
58874+ return 0;
58875+}
58876+
58877+static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
58878+{
58879+ if (adapter->irq) {
58880+ free_irq(adapter->irq, adapter);
58881+ adapter->irq = 0;
58882+ }
58883+ if (adapter->use_msi) {
58884+ pci_disable_msi(adapter->dev);
58885+ adapter->use_msi = 0;
58886+ } else if (adapter->use_msix) {
58887+ pci_disable_msix(adapter->dev);
58888+ adapter->use_msix = 0;
58889+ }
58890+}
58891+
58892+static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
58893+{
58894+ pvscsi_shutdown_intr(adapter);
58895+
58896+ if (adapter->workqueue)
58897+ destroy_workqueue(adapter->workqueue);
58898+
58899+ if (adapter->mmioBase)
58900+ pci_iounmap(adapter->dev, adapter->mmioBase);
58901+
58902+ pci_release_regions(adapter->dev);
58903+
58904+ if (adapter->cmd_map) {
58905+ pvscsi_free_sgls(adapter);
58906+ kfree(adapter->cmd_map);
58907+ }
58908+
58909+ if (adapter->rings_state)
58910+ pci_free_consistent(adapter->dev, PAGE_SIZE,
58911+ adapter->rings_state, adapter->ringStatePA);
58912+
58913+ if (adapter->req_ring)
58914+ pci_free_consistent(adapter->dev,
58915+ adapter->req_pages * PAGE_SIZE,
58916+ adapter->req_ring, adapter->reqRingPA);
58917+
58918+ if (adapter->cmp_ring)
58919+ pci_free_consistent(adapter->dev,
58920+ adapter->cmp_pages * PAGE_SIZE,
58921+ adapter->cmp_ring, adapter->cmpRingPA);
58922+
58923+ if (adapter->msg_ring)
58924+ pci_free_consistent(adapter->dev,
58925+ adapter->msg_pages * PAGE_SIZE,
58926+ adapter->msg_ring, adapter->msgRingPA);
58927+}
58928+
58929+/*
58930+ * Allocate scatter gather lists.
58931+ *
58932+ * These are statically allocated. Trying to be clever was not worth it.
58933+ *
58934+ * Dynamic allocation can fail, and we can't go deeep into the memory
58935+ * allocator, since we're a SCSI driver, and trying too hard to allocate
58936+ * memory might generate disk I/O. We also don't want to fail disk I/O
58937+ * in that case because we can't get an allocation - the I/O could be
58938+ * trying to swap out data to free memory. Since that is pathological,
58939+ * just use a statically allocated scatter list.
58940+ *
58941+ */
58942+static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
58943+{
58944+ struct pvscsi_ctx *ctx;
58945+ int i;
58946+
58947+ ctx = adapter->cmd_map;
58948+ BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
58949+
58950+ for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
58951+ ctx->sgl = kmalloc(SGL_SIZE, GFP_KERNEL);
58952+ ctx->sglPA = 0;
58953+ BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
58954+ if (!ctx->sgl) {
58955+ for (; i >= 0; --i, --ctx) {
58956+ kfree(ctx->sgl);
58957+ ctx->sgl = NULL;
58958+ }
58959+ return -ENOMEM;
58960+ }
58961+ }
58962+
58963+ return 0;
58964+}
58965+
58966+static int __devinit pvscsi_probe(struct pci_dev *pdev,
58967+ const struct pci_device_id *id)
58968+{
58969+ struct pvscsi_adapter *adapter;
58970+ struct Scsi_Host *host;
58971+ unsigned int i;
58972+ int error;
58973+
58974+ error = -ENODEV;
58975+
58976+ if (pci_enable_device(pdev))
58977+ return error;
58978+
58979+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
58980+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
58981+ printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
58982+ } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
58983+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
58984+ printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
58985+ } else {
58986+ printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
58987+ goto out_disable_device;
58988+ }
58989+
58990+ pvscsi_template.can_queue =
58991+ min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
58992+ PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
58993+ pvscsi_template.cmd_per_lun =
58994+ min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
58995+ host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
58996+ if (!host) {
58997+ printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
58998+ goto out_disable_device;
58999+ }
59000+
59001+ adapter = shost_priv(host);
59002+ memset(adapter, 0, sizeof(*adapter));
59003+ adapter->dev = pdev;
59004+ adapter->host = host;
59005+
59006+ spin_lock_init(&adapter->hw_lock);
59007+
59008+ host->max_channel = 0;
59009+ host->max_id = 16;
59010+ host->max_lun = 1;
59011+ host->max_cmd_len = 16;
59012+
59013+ adapter->rev = pdev->revision;
59014+
59015+ if (pci_request_regions(pdev, "vmw_pvscsi")) {
59016+ printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
59017+ goto out_free_host;
59018+ }
59019+
59020+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
59021+ if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
59022+ continue;
59023+
59024+ if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
59025+ continue;
59026+
59027+ break;
59028+ }
59029+
59030+ if (i == DEVICE_COUNT_RESOURCE) {
59031+ printk(KERN_ERR
59032+ "vmw_pvscsi: adapter has no suitable MMIO region\n");
59033+ goto out_release_resources;
59034+ }
59035+
59036+ adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
59037+
59038+ if (!adapter->mmioBase) {
59039+ printk(KERN_ERR
59040+ "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
59041+ i, PVSCSI_MEM_SPACE_SIZE);
59042+ goto out_release_resources;
59043+ }
59044+
59045+ pci_set_master(pdev);
59046+ pci_set_drvdata(pdev, host);
59047+
59048+ ll_adapter_reset(adapter);
59049+
59050+ adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
59051+
59052+ error = pvscsi_allocate_rings(adapter);
59053+ if (error) {
59054+ printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
59055+ goto out_release_resources;
59056+ }
59057+
59058+ /*
59059+ * From this point on we should reset the adapter if anything goes
59060+ * wrong.
59061+ */
59062+ pvscsi_setup_all_rings(adapter);
59063+
59064+ adapter->cmd_map = kcalloc(adapter->req_depth,
59065+ sizeof(struct pvscsi_ctx), GFP_KERNEL);
59066+ if (!adapter->cmd_map) {
59067+ printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
59068+ error = -ENOMEM;
59069+ goto out_reset_adapter;
59070+ }
59071+
59072+ INIT_LIST_HEAD(&adapter->cmd_pool);
59073+ for (i = 0; i < adapter->req_depth; i++) {
59074+ struct pvscsi_ctx *ctx = adapter->cmd_map + i;
59075+ list_add(&ctx->list, &adapter->cmd_pool);
59076+ }
59077+
59078+ error = pvscsi_allocate_sg(adapter);
59079+ if (error) {
59080+ printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
59081+ goto out_reset_adapter;
59082+ }
59083+
59084+ if (!pvscsi_disable_msix &&
59085+ pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
59086+ printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
59087+ adapter->use_msix = 1;
59088+ } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
59089+ printk(KERN_INFO "vmw_pvscsi: using MSI\n");
59090+ adapter->use_msi = 1;
59091+ adapter->irq = pdev->irq;
59092+ } else {
59093+ printk(KERN_INFO "vmw_pvscsi: using INTx\n");
59094+ adapter->irq = pdev->irq;
59095+ }
59096+
59097+ error = request_irq(adapter->irq, pvscsi_isr, IRQF_SHARED,
59098+ "vmw_pvscsi", adapter);
59099+ if (error) {
59100+ printk(KERN_ERR
59101+ "vmw_pvscsi: unable to request IRQ: %d\n", error);
59102+ adapter->irq = 0;
59103+ goto out_reset_adapter;
59104+ }
59105+
59106+ error = scsi_add_host(host, &pdev->dev);
59107+ if (error) {
59108+ printk(KERN_ERR
59109+ "vmw_pvscsi: scsi_add_host failed: %d\n", error);
59110+ goto out_reset_adapter;
59111+ }
59112+
59113+ dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
59114+ adapter->rev, host->host_no);
59115+
59116+ pvscsi_unmask_intr(adapter);
59117+
59118+ scsi_scan_host(host);
59119+
59120+ return 0;
59121+
59122+out_reset_adapter:
59123+ ll_adapter_reset(adapter);
59124+out_release_resources:
59125+ pvscsi_release_resources(adapter);
59126+out_free_host:
59127+ scsi_host_put(host);
59128+out_disable_device:
59129+ pci_set_drvdata(pdev, NULL);
59130+ pci_disable_device(pdev);
59131+
59132+ return error;
59133+}
59134+
59135+static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
59136+{
59137+ pvscsi_mask_intr(adapter);
59138+
59139+ if (adapter->workqueue)
59140+ flush_workqueue(adapter->workqueue);
59141+
59142+ pvscsi_shutdown_intr(adapter);
59143+
59144+ pvscsi_process_request_ring(adapter);
59145+ pvscsi_process_completion_ring(adapter);
59146+ ll_adapter_reset(adapter);
59147+}
59148+
59149+static void pvscsi_shutdown(struct pci_dev *dev)
59150+{
59151+ struct Scsi_Host *host = pci_get_drvdata(dev);
59152+ struct pvscsi_adapter *adapter = shost_priv(host);
59153+
59154+ __pvscsi_shutdown(adapter);
59155+}
59156+
59157+static void pvscsi_remove(struct pci_dev *pdev)
59158+{
59159+ struct Scsi_Host *host = pci_get_drvdata(pdev);
59160+ struct pvscsi_adapter *adapter = shost_priv(host);
59161+
59162+ scsi_remove_host(host);
59163+
59164+ __pvscsi_shutdown(adapter);
59165+ pvscsi_release_resources(adapter);
59166+
59167+ scsi_host_put(host);
59168+
59169+ pci_set_drvdata(pdev, NULL);
59170+ pci_disable_device(pdev);
59171+}
59172+
59173+static struct pci_driver pvscsi_pci_driver = {
59174+ .name = "vmw_pvscsi",
59175+ .id_table = pvscsi_pci_tbl,
59176+ .probe = pvscsi_probe,
59177+ .remove = __devexit_p(pvscsi_remove),
59178+ .shutdown = pvscsi_shutdown,
59179+};
59180+
59181+static int __init pvscsi_init(void)
59182+{
59183+ pr_info("%s - version %s\n",
59184+ PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
59185+ return pci_register_driver(&pvscsi_pci_driver);
59186+}
59187+
59188+static void __exit pvscsi_exit(void)
59189+{
59190+ pci_unregister_driver(&pvscsi_pci_driver);
59191+}
59192+
59193+module_init(pvscsi_init);
59194+module_exit(pvscsi_exit);
59195diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
59196new file mode 100644
59197index 0000000..62e36e7
59198--- /dev/null
59199+++ b/drivers/scsi/vmw_pvscsi.h
59200@@ -0,0 +1,397 @@
59201+/*
59202+ * VMware PVSCSI header file
59203+ *
59204+ * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
59205+ *
59206+ * This program is free software; you can redistribute it and/or modify it
59207+ * under the terms of the GNU General Public License as published by the
59208+ * Free Software Foundation; version 2 of the License and no later version.
59209+ *
59210+ * This program is distributed in the hope that it will be useful, but
59211+ * WITHOUT ANY WARRANTY; without even the implied warranty of
59212+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
59213+ * NON INFRINGEMENT. See the GNU General Public License for more
59214+ * details.
59215+ *
59216+ * You should have received a copy of the GNU General Public License
59217+ * along with this program; if not, write to the Free Software
59218+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
59219+ *
59220+ * Maintained by: Alok N Kataria <akataria@vmware.com>
59221+ *
59222+ */
59223+
59224+#ifndef _VMW_PVSCSI_H_
59225+#define _VMW_PVSCSI_H_
59226+
59227+#include <linux/types.h>
59228+
59229+#define PVSCSI_DRIVER_VERSION_STRING "1.0.1.0-k"
59230+
59231+#define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128
59232+
59233+#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
59234+
59235+#define PCI_VENDOR_ID_VMWARE 0x15AD
59236+#define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0
59237+
59238+/*
59239+ * host adapter status/error codes
59240+ */
59241+enum HostBusAdapterStatus {
59242+ BTSTAT_SUCCESS = 0x00, /* CCB complete normally with no errors */
59243+ BTSTAT_LINKED_COMMAND_COMPLETED = 0x0a,
59244+ BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG = 0x0b,
59245+ BTSTAT_DATA_UNDERRUN = 0x0c,
59246+ BTSTAT_SELTIMEO = 0x11, /* SCSI selection timeout */
59247+ BTSTAT_DATARUN = 0x12, /* data overrun/underrun */
59248+ BTSTAT_BUSFREE = 0x13, /* unexpected bus free */
59249+ BTSTAT_INVPHASE = 0x14, /* invalid bus phase or sequence requested by target */
59250+ BTSTAT_LUNMISMATCH = 0x17, /* linked CCB has different LUN from first CCB */
59251+ BTSTAT_SENSFAILED = 0x1b, /* auto request sense failed */
59252+ BTSTAT_TAGREJECT = 0x1c, /* SCSI II tagged queueing message rejected by target */
59253+ BTSTAT_BADMSG = 0x1d, /* unsupported message received by the host adapter */
59254+ BTSTAT_HAHARDWARE = 0x20, /* host adapter hardware failed */
59255+ BTSTAT_NORESPONSE = 0x21, /* target did not respond to SCSI ATN, sent a SCSI RST */
59256+ BTSTAT_SENTRST = 0x22, /* host adapter asserted a SCSI RST */
59257+ BTSTAT_RECVRST = 0x23, /* other SCSI devices asserted a SCSI RST */
59258+ BTSTAT_DISCONNECT = 0x24, /* target device reconnected improperly (w/o tag) */
59259+ BTSTAT_BUSRESET = 0x25, /* host adapter issued BUS device reset */
59260+ BTSTAT_ABORTQUEUE = 0x26, /* abort queue generated */
59261+ BTSTAT_HASOFTWARE = 0x27, /* host adapter software error */
59262+ BTSTAT_HATIMEOUT = 0x30, /* host adapter hardware timeout error */
59263+ BTSTAT_SCSIPARITY = 0x34, /* SCSI parity error detected */
59264+};
59265+
59266+/*
59267+ * Register offsets.
59268+ *
59269+ * These registers are accessible both via i/o space and mm i/o.
59270+ */
59271+
59272+enum PVSCSIRegOffset {
59273+ PVSCSI_REG_OFFSET_COMMAND = 0x0,
59274+ PVSCSI_REG_OFFSET_COMMAND_DATA = 0x4,
59275+ PVSCSI_REG_OFFSET_COMMAND_STATUS = 0x8,
59276+ PVSCSI_REG_OFFSET_LAST_STS_0 = 0x100,
59277+ PVSCSI_REG_OFFSET_LAST_STS_1 = 0x104,
59278+ PVSCSI_REG_OFFSET_LAST_STS_2 = 0x108,
59279+ PVSCSI_REG_OFFSET_LAST_STS_3 = 0x10c,
59280+ PVSCSI_REG_OFFSET_INTR_STATUS = 0x100c,
59281+ PVSCSI_REG_OFFSET_INTR_MASK = 0x2010,
59282+ PVSCSI_REG_OFFSET_KICK_NON_RW_IO = 0x3014,
59283+ PVSCSI_REG_OFFSET_DEBUG = 0x3018,
59284+ PVSCSI_REG_OFFSET_KICK_RW_IO = 0x4018,
59285+};
59286+
59287+/*
59288+ * Virtual h/w commands.
59289+ */
59290+
59291+enum PVSCSICommands {
59292+ PVSCSI_CMD_FIRST = 0, /* has to be first */
59293+
59294+ PVSCSI_CMD_ADAPTER_RESET = 1,
59295+ PVSCSI_CMD_ISSUE_SCSI = 2,
59296+ PVSCSI_CMD_SETUP_RINGS = 3,
59297+ PVSCSI_CMD_RESET_BUS = 4,
59298+ PVSCSI_CMD_RESET_DEVICE = 5,
59299+ PVSCSI_CMD_ABORT_CMD = 6,
59300+ PVSCSI_CMD_CONFIG = 7,
59301+ PVSCSI_CMD_SETUP_MSG_RING = 8,
59302+ PVSCSI_CMD_DEVICE_UNPLUG = 9,
59303+
59304+ PVSCSI_CMD_LAST = 10 /* has to be last */
59305+};
59306+
59307+/*
59308+ * Command descriptor for PVSCSI_CMD_RESET_DEVICE --
59309+ */
59310+
59311+struct PVSCSICmdDescResetDevice {
59312+ u32 target;
59313+ u8 lun[8];
59314+} __packed;
59315+
59316+/*
59317+ * Command descriptor for PVSCSI_CMD_ABORT_CMD --
59318+ *
59319+ * - currently does not support specifying the LUN.
59320+ * - _pad should be 0.
59321+ */
59322+
59323+struct PVSCSICmdDescAbortCmd {
59324+ u64 context;
59325+ u32 target;
59326+ u32 _pad;
59327+} __packed;
59328+
59329+/*
59330+ * Command descriptor for PVSCSI_CMD_SETUP_RINGS --
59331+ *
59332+ * Notes:
59333+ * - reqRingNumPages and cmpRingNumPages need to be power of two.
59334+ * - reqRingNumPages and cmpRingNumPages need to be different from 0,
59335+ * - reqRingNumPages and cmpRingNumPages need to be inferior to
59336+ * PVSCSI_SETUP_RINGS_MAX_NUM_PAGES.
59337+ */
59338+
59339+#define PVSCSI_SETUP_RINGS_MAX_NUM_PAGES 32
59340+struct PVSCSICmdDescSetupRings {
59341+ u32 reqRingNumPages;
59342+ u32 cmpRingNumPages;
59343+ u64 ringsStatePPN;
59344+ u64 reqRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
59345+ u64 cmpRingPPNs[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
59346+} __packed;
59347+
59348+/*
59349+ * Command descriptor for PVSCSI_CMD_SETUP_MSG_RING --
59350+ *
59351+ * Notes:
59352+ * - this command was not supported in the initial revision of the h/w
59353+ * interface. Before using it, you need to check that it is supported by
59354+ * writing PVSCSI_CMD_SETUP_MSG_RING to the 'command' register, then
59355+ * immediately after read the 'command status' register:
59356+ * * a value of -1 means that the cmd is NOT supported,
59357+ * * a value != -1 means that the cmd IS supported.
59358+ * If it's supported the 'command status' register should return:
59359+ * sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(u32).
59360+ * - this command should be issued _after_ the usual SETUP_RINGS so that the
59361+ * RingsState page is already setup. If not, the command is a nop.
59362+ * - numPages needs to be a power of two,
59363+ * - numPages needs to be different from 0,
59364+ * - _pad should be zero.
59365+ */
59366+
59367+#define PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES 16
59368+
59369+struct PVSCSICmdDescSetupMsgRing {
59370+ u32 numPages;
59371+ u32 _pad;
59372+ u64 ringPPNs[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
59373+} __packed;
59374+
59375+enum PVSCSIMsgType {
59376+ PVSCSI_MSG_DEV_ADDED = 0,
59377+ PVSCSI_MSG_DEV_REMOVED = 1,
59378+ PVSCSI_MSG_LAST = 2,
59379+};
59380+
59381+/*
59382+ * Msg descriptor.
59383+ *
59384+ * sizeof(struct PVSCSIRingMsgDesc) == 128.
59385+ *
59386+ * - type is of type enum PVSCSIMsgType.
59387+ * - the content of args depend on the type of event being delivered.
59388+ */
59389+
59390+struct PVSCSIRingMsgDesc {
59391+ u32 type;
59392+ u32 args[31];
59393+} __packed;
59394+
59395+struct PVSCSIMsgDescDevStatusChanged {
59396+ u32 type; /* PVSCSI_MSG_DEV _ADDED / _REMOVED */
59397+ u32 bus;
59398+ u32 target;
59399+ u8 lun[8];
59400+ u32 pad[27];
59401+} __packed;
59402+
59403+/*
59404+ * Rings state.
59405+ *
59406+ * - the fields:
59407+ * . msgProdIdx,
59408+ * . msgConsIdx,
59409+ * . msgNumEntriesLog2,
59410+ * .. are only used once the SETUP_MSG_RING cmd has been issued.
59411+ * - '_pad' helps to ensure that the msg related fields are on their own
59412+ * cache-line.
59413+ */
59414+
59415+struct PVSCSIRingsState {
59416+ u32 reqProdIdx;
59417+ u32 reqConsIdx;
59418+ u32 reqNumEntriesLog2;
59419+
59420+ u32 cmpProdIdx;
59421+ u32 cmpConsIdx;
59422+ u32 cmpNumEntriesLog2;
59423+
59424+ u8 _pad[104];
59425+
59426+ u32 msgProdIdx;
59427+ u32 msgConsIdx;
59428+ u32 msgNumEntriesLog2;
59429+} __packed;
59430+
59431+/*
59432+ * Request descriptor.
59433+ *
59434+ * sizeof(RingReqDesc) = 128
59435+ *
59436+ * - context: is a unique identifier of a command. It could normally be any
59437+ * 64bit value, however we currently store it in the serialNumber variable
59438+ * of struct SCSI_Command, so we have the following restrictions due to the
59439+ * way this field is handled in the vmkernel storage stack:
59440+ * * this value can't be 0,
59441+ * * the upper 32bit need to be 0 since serialNumber is as a u32.
59442+ * Currently tracked as PR 292060.
59443+ * - dataLen: contains the total number of bytes that need to be transferred.
59444+ * - dataAddr:
59445+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is set: dataAddr is the PA of the first
59446+ * s/g table segment, each s/g segment is entirely contained on a single
59447+ * page of physical memory,
59448+ * * if PVSCSI_FLAG_CMD_WITH_SG_LIST is NOT set, then dataAddr is the PA of
59449+ * the buffer used for the DMA transfer,
59450+ * - flags:
59451+ * * PVSCSI_FLAG_CMD_WITH_SG_LIST: see dataAddr above,
59452+ * * PVSCSI_FLAG_CMD_DIR_NONE: no DMA involved,
59453+ * * PVSCSI_FLAG_CMD_DIR_TOHOST: transfer from device to main memory,
59454+ * * PVSCSI_FLAG_CMD_DIR_TODEVICE: transfer from main memory to device,
59455+ * * PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB: reserved to handle CDBs larger than
59456+ * 16bytes. To be specified.
59457+ * - vcpuHint: vcpuId of the processor that will be most likely waiting for the
59458+ * completion of the i/o. For guest OSes that use lowest priority message
59459+ * delivery mode (such as windows), we use this "hint" to deliver the
59460+ * completion action to the proper vcpu. For now, we can use the vcpuId of
59461+ * the processor that initiated the i/o as a likely candidate for the vcpu
59462+ * that will be waiting for the completion..
59463+ * - bus should be 0: we currently only support bus 0 for now.
59464+ * - unused should be zero'd.
59465+ */
59466+
59467+#define PVSCSI_FLAG_CMD_WITH_SG_LIST (1 << 0)
59468+#define PVSCSI_FLAG_CMD_OUT_OF_BAND_CDB (1 << 1)
59469+#define PVSCSI_FLAG_CMD_DIR_NONE (1 << 2)
59470+#define PVSCSI_FLAG_CMD_DIR_TOHOST (1 << 3)
59471+#define PVSCSI_FLAG_CMD_DIR_TODEVICE (1 << 4)
59472+
59473+struct PVSCSIRingReqDesc {
59474+ u64 context;
59475+ u64 dataAddr;
59476+ u64 dataLen;
59477+ u64 senseAddr;
59478+ u32 senseLen;
59479+ u32 flags;
59480+ u8 cdb[16];
59481+ u8 cdbLen;
59482+ u8 lun[8];
59483+ u8 tag;
59484+ u8 bus;
59485+ u8 target;
59486+ u8 vcpuHint;
59487+ u8 unused[59];
59488+} __packed;
59489+
59490+/*
59491+ * Scatter-gather list management.
59492+ *
59493+ * As described above, when PVSCSI_FLAG_CMD_WITH_SG_LIST is set in the
59494+ * RingReqDesc.flags, then RingReqDesc.dataAddr is the PA of the first s/g
59495+ * table segment.
59496+ *
59497+ * - each segment of the s/g table contain a succession of struct
59498+ * PVSCSISGElement.
59499+ * - each segment is entirely contained on a single physical page of memory.
59500+ * - a "chain" s/g element has the flag PVSCSI_SGE_FLAG_CHAIN_ELEMENT set in
59501+ * PVSCSISGElement.flags and in this case:
59502+ * * addr is the PA of the next s/g segment,
59503+ * * length is undefined, assumed to be 0.
59504+ */
59505+
59506+struct PVSCSISGElement {
59507+ u64 addr;
59508+ u32 length;
59509+ u32 flags;
59510+} __packed;
59511+
59512+/*
59513+ * Completion descriptor.
59514+ *
59515+ * sizeof(RingCmpDesc) = 32
59516+ *
59517+ * - context: identifier of the command. The same thing that was specified
59518+ * under "context" as part of struct RingReqDesc at initiation time,
59519+ * - dataLen: number of bytes transferred for the actual i/o operation,
59520+ * - senseLen: number of bytes written into the sense buffer,
59521+ * - hostStatus: adapter status,
59522+ * - scsiStatus: device status,
59523+ * - _pad should be zero.
59524+ */
59525+
59526+struct PVSCSIRingCmpDesc {
59527+ u64 context;
59528+ u64 dataLen;
59529+ u32 senseLen;
59530+ u16 hostStatus;
59531+ u16 scsiStatus;
59532+ u32 _pad[2];
59533+} __packed;
59534+
59535+/*
59536+ * Interrupt status / IRQ bits.
59537+ */
59538+
59539+#define PVSCSI_INTR_CMPL_0 (1 << 0)
59540+#define PVSCSI_INTR_CMPL_1 (1 << 1)
59541+#define PVSCSI_INTR_CMPL_MASK MASK(2)
59542+
59543+#define PVSCSI_INTR_MSG_0 (1 << 2)
59544+#define PVSCSI_INTR_MSG_1 (1 << 3)
59545+#define PVSCSI_INTR_MSG_MASK (MASK(2) << 2)
59546+
59547+#define PVSCSI_INTR_ALL_SUPPORTED MASK(4)
59548+
59549+/*
59550+ * Number of MSI-X vectors supported.
59551+ */
59552+#define PVSCSI_MAX_INTRS 24
59553+
59554+/*
59555+ * Enumeration of supported MSI-X vectors
59556+ */
59557+#define PVSCSI_VECTOR_COMPLETION 0
59558+
59559+/*
59560+ * Misc constants for the rings.
59561+ */
59562+
59563+#define PVSCSI_MAX_NUM_PAGES_REQ_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
59564+#define PVSCSI_MAX_NUM_PAGES_CMP_RING PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
59565+#define PVSCSI_MAX_NUM_PAGES_MSG_RING PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES
59566+
59567+#define PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE \
59568+ (PAGE_SIZE / sizeof(struct PVSCSIRingReqDesc))
59569+
59570+#define PVSCSI_MAX_REQ_QUEUE_DEPTH \
59571+ (PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE)
59572+
59573+#define PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES 1
59574+#define PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES 1
59575+#define PVSCSI_MEM_SPACE_MISC_NUM_PAGES 2
59576+#define PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES 2
59577+#define PVSCSI_MEM_SPACE_MSIX_NUM_PAGES 2
59578+
59579+enum PVSCSIMemSpace {
59580+ PVSCSI_MEM_SPACE_COMMAND_PAGE = 0,
59581+ PVSCSI_MEM_SPACE_INTR_STATUS_PAGE = 1,
59582+ PVSCSI_MEM_SPACE_MISC_PAGE = 2,
59583+ PVSCSI_MEM_SPACE_KICK_IO_PAGE = 4,
59584+ PVSCSI_MEM_SPACE_MSIX_TABLE_PAGE = 6,
59585+ PVSCSI_MEM_SPACE_MSIX_PBA_PAGE = 7,
59586+};
59587+
59588+#define PVSCSI_MEM_SPACE_NUM_PAGES \
59589+ (PVSCSI_MEM_SPACE_COMMAND_NUM_PAGES + \
59590+ PVSCSI_MEM_SPACE_INTR_STATUS_NUM_PAGES + \
59591+ PVSCSI_MEM_SPACE_MISC_NUM_PAGES + \
59592+ PVSCSI_MEM_SPACE_KICK_IO_NUM_PAGES + \
59593+ PVSCSI_MEM_SPACE_MSIX_NUM_PAGES)
59594+
59595+#define PVSCSI_MEM_SPACE_SIZE (PVSCSI_MEM_SPACE_NUM_PAGES * PAGE_SIZE)
59596+
59597+#endif /* _VMW_PVSCSI_H_ */
59598diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
59599index eadc1ab..2d81457 100644
59600--- a/drivers/serial/kgdboc.c
59601+++ b/drivers/serial/kgdboc.c
59602@@ -18,7 +18,7 @@
59603
59604 #define MAX_CONFIG_LEN 40
59605
59606-static struct kgdb_io kgdboc_io_ops;
59607+static const struct kgdb_io kgdboc_io_ops;
59608
59609 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
59610 static int configured = -1;
59611@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
59612 module_put(THIS_MODULE);
59613 }
59614
59615-static struct kgdb_io kgdboc_io_ops = {
59616+static const struct kgdb_io kgdboc_io_ops = {
59617 .name = "kgdboc",
59618 .read_char = kgdboc_get_char,
59619 .write_char = kgdboc_put_char,
59620diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
59621index b76f246..7f41af7 100644
59622--- a/drivers/spi/spi.c
59623+++ b/drivers/spi/spi.c
59624@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
59625 EXPORT_SYMBOL_GPL(spi_sync);
59626
59627 /* portable code must never pass more than 32 bytes */
59628-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
59629+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
59630
59631 static u8 *buf;
59632
59633diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
59634index b9b37ff..19dfa23 100644
59635--- a/drivers/staging/android/binder.c
59636+++ b/drivers/staging/android/binder.c
59637@@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
59638 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
59639 }
59640
59641-static struct vm_operations_struct binder_vm_ops = {
59642+static const struct vm_operations_struct binder_vm_ops = {
59643 .open = binder_vma_open,
59644 .close = binder_vma_close,
59645 };
59646diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
59647index cda26bb..39fed3f 100644
59648--- a/drivers/staging/b3dfg/b3dfg.c
59649+++ b/drivers/staging/b3dfg/b3dfg.c
59650@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
59651 return VM_FAULT_NOPAGE;
59652 }
59653
59654-static struct vm_operations_struct b3dfg_vm_ops = {
59655+static const struct vm_operations_struct b3dfg_vm_ops = {
59656 .fault = b3dfg_vma_fault,
59657 };
59658
59659@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
59660 return r;
59661 }
59662
59663-static struct file_operations b3dfg_fops = {
59664+static const struct file_operations b3dfg_fops = {
59665 .owner = THIS_MODULE,
59666 .open = b3dfg_open,
59667 .release = b3dfg_release,
59668diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
59669index 908f25a..c9a579b 100644
59670--- a/drivers/staging/comedi/comedi_fops.c
59671+++ b/drivers/staging/comedi/comedi_fops.c
59672@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
59673 mutex_unlock(&dev->mutex);
59674 }
59675
59676-static struct vm_operations_struct comedi_vm_ops = {
59677+static const struct vm_operations_struct comedi_vm_ops = {
59678 .close = comedi_unmap,
59679 };
59680
59681diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
59682index e55a0db..577b776 100644
59683--- a/drivers/staging/dream/qdsp5/adsp_driver.c
59684+++ b/drivers/staging/dream/qdsp5/adsp_driver.c
59685@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
59686 static dev_t adsp_devno;
59687 static struct class *adsp_class;
59688
59689-static struct file_operations adsp_fops = {
59690+static const struct file_operations adsp_fops = {
59691 .owner = THIS_MODULE,
59692 .open = adsp_open,
59693 .unlocked_ioctl = adsp_ioctl,
59694diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
59695index ad2390f..4116ee8 100644
59696--- a/drivers/staging/dream/qdsp5/audio_aac.c
59697+++ b/drivers/staging/dream/qdsp5/audio_aac.c
59698@@ -1022,7 +1022,7 @@ done:
59699 return rc;
59700 }
59701
59702-static struct file_operations audio_aac_fops = {
59703+static const struct file_operations audio_aac_fops = {
59704 .owner = THIS_MODULE,
59705 .open = audio_open,
59706 .release = audio_release,
59707diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
59708index cd818a5..870b37b 100644
59709--- a/drivers/staging/dream/qdsp5/audio_amrnb.c
59710+++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
59711@@ -833,7 +833,7 @@ done:
59712 return rc;
59713 }
59714
59715-static struct file_operations audio_amrnb_fops = {
59716+static const struct file_operations audio_amrnb_fops = {
59717 .owner = THIS_MODULE,
59718 .open = audamrnb_open,
59719 .release = audamrnb_release,
59720diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
59721index 4b43e18..cedafda 100644
59722--- a/drivers/staging/dream/qdsp5/audio_evrc.c
59723+++ b/drivers/staging/dream/qdsp5/audio_evrc.c
59724@@ -805,7 +805,7 @@ dma_fail:
59725 return rc;
59726 }
59727
59728-static struct file_operations audio_evrc_fops = {
59729+static const struct file_operations audio_evrc_fops = {
59730 .owner = THIS_MODULE,
59731 .open = audevrc_open,
59732 .release = audevrc_release,
59733diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
59734index 3d950a2..9431118 100644
59735--- a/drivers/staging/dream/qdsp5/audio_in.c
59736+++ b/drivers/staging/dream/qdsp5/audio_in.c
59737@@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
59738 return 0;
59739 }
59740
59741-static struct file_operations audio_fops = {
59742+static const struct file_operations audio_fops = {
59743 .owner = THIS_MODULE,
59744 .open = audio_in_open,
59745 .release = audio_in_release,
59746@@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
59747 .unlocked_ioctl = audio_in_ioctl,
59748 };
59749
59750-static struct file_operations audpre_fops = {
59751+static const struct file_operations audpre_fops = {
59752 .owner = THIS_MODULE,
59753 .open = audpre_open,
59754 .unlocked_ioctl = audpre_ioctl,
59755diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
59756index b95574f..286c2f4 100644
59757--- a/drivers/staging/dream/qdsp5/audio_mp3.c
59758+++ b/drivers/staging/dream/qdsp5/audio_mp3.c
59759@@ -941,7 +941,7 @@ done:
59760 return rc;
59761 }
59762
59763-static struct file_operations audio_mp3_fops = {
59764+static const struct file_operations audio_mp3_fops = {
59765 .owner = THIS_MODULE,
59766 .open = audio_open,
59767 .release = audio_release,
59768diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
59769index d1adcf6..f8f9833 100644
59770--- a/drivers/staging/dream/qdsp5/audio_out.c
59771+++ b/drivers/staging/dream/qdsp5/audio_out.c
59772@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
59773 return 0;
59774 }
59775
59776-static struct file_operations audio_fops = {
59777+static const struct file_operations audio_fops = {
59778 .owner = THIS_MODULE,
59779 .open = audio_open,
59780 .release = audio_release,
59781@@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
59782 .unlocked_ioctl = audio_ioctl,
59783 };
59784
59785-static struct file_operations audpp_fops = {
59786+static const struct file_operations audpp_fops = {
59787 .owner = THIS_MODULE,
59788 .open = audpp_open,
59789 .unlocked_ioctl = audpp_ioctl,
59790diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
59791index f0f50e3..f6b9dbc 100644
59792--- a/drivers/staging/dream/qdsp5/audio_qcelp.c
59793+++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
59794@@ -816,7 +816,7 @@ err:
59795 return rc;
59796 }
59797
59798-static struct file_operations audio_qcelp_fops = {
59799+static const struct file_operations audio_qcelp_fops = {
59800 .owner = THIS_MODULE,
59801 .open = audqcelp_open,
59802 .release = audqcelp_release,
59803diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
59804index 037d7ff..5469ec3 100644
59805--- a/drivers/staging/dream/qdsp5/snd.c
59806+++ b/drivers/staging/dream/qdsp5/snd.c
59807@@ -242,7 +242,7 @@ err:
59808 return rc;
59809 }
59810
59811-static struct file_operations snd_fops = {
59812+static const struct file_operations snd_fops = {
59813 .owner = THIS_MODULE,
59814 .open = snd_open,
59815 .release = snd_release,
59816diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
59817index d4e7d88..0ea632a 100644
59818--- a/drivers/staging/dream/smd/smd_qmi.c
59819+++ b/drivers/staging/dream/smd/smd_qmi.c
59820@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
59821 return 0;
59822 }
59823
59824-static struct file_operations qmi_fops = {
59825+static const struct file_operations qmi_fops = {
59826 .owner = THIS_MODULE,
59827 .read = qmi_read,
59828 .write = qmi_write,
59829diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
59830index cd3910b..ff053d3 100644
59831--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
59832+++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
59833@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
59834 return rc;
59835 }
59836
59837-static struct file_operations rpcrouter_server_fops = {
59838+static const struct file_operations rpcrouter_server_fops = {
59839 .owner = THIS_MODULE,
59840 .open = rpcrouter_open,
59841 .release = rpcrouter_release,
59842@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
59843 .unlocked_ioctl = rpcrouter_ioctl,
59844 };
59845
59846-static struct file_operations rpcrouter_router_fops = {
59847+static const struct file_operations rpcrouter_router_fops = {
59848 .owner = THIS_MODULE,
59849 .open = rpcrouter_open,
59850 .release = rpcrouter_release,
59851diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
59852index c24e4e0..07665be 100644
59853--- a/drivers/staging/dst/dcore.c
59854+++ b/drivers/staging/dst/dcore.c
59855@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
59856 return 0;
59857 }
59858
59859-static struct block_device_operations dst_blk_ops = {
59860+static const struct block_device_operations dst_blk_ops = {
59861 .open = dst_bdev_open,
59862 .release = dst_bdev_release,
59863 .owner = THIS_MODULE,
59864@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
59865 n->size = ctl->size;
59866
59867 atomic_set(&n->refcnt, 1);
59868- atomic_long_set(&n->gen, 0);
59869+ atomic_long_set_unchecked(&n->gen, 0);
59870 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
59871
59872 err = dst_node_sysfs_init(n);
59873diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
59874index 557d372..8d84422 100644
59875--- a/drivers/staging/dst/trans.c
59876+++ b/drivers/staging/dst/trans.c
59877@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
59878 t->error = 0;
59879 t->retries = 0;
59880 atomic_set(&t->refcnt, 1);
59881- t->gen = atomic_long_inc_return(&n->gen);
59882+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
59883
59884 t->enc = bio_data_dir(bio);
59885 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
59886diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
59887index 94f7752..d051514 100644
59888--- a/drivers/staging/et131x/et1310_tx.c
59889+++ b/drivers/staging/et131x/et1310_tx.c
59890@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
59891 struct net_device_stats *stats = &etdev->net_stats;
59892
59893 if (pMpTcb->Flags & fMP_DEST_BROAD)
59894- atomic_inc(&etdev->Stats.brdcstxmt);
59895+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
59896 else if (pMpTcb->Flags & fMP_DEST_MULTI)
59897- atomic_inc(&etdev->Stats.multixmt);
59898+ atomic_inc_unchecked(&etdev->Stats.multixmt);
59899 else
59900- atomic_inc(&etdev->Stats.unixmt);
59901+ atomic_inc_unchecked(&etdev->Stats.unixmt);
59902
59903 if (pMpTcb->Packet) {
59904 stats->tx_bytes += pMpTcb->Packet->len;
59905diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
59906index 1dfe06f..f469b4d 100644
59907--- a/drivers/staging/et131x/et131x_adapter.h
59908+++ b/drivers/staging/et131x/et131x_adapter.h
59909@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
59910 * operations
59911 */
59912 u32 unircv; /* # multicast packets received */
59913- atomic_t unixmt; /* # multicast packets for Tx */
59914+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
59915 u32 multircv; /* # multicast packets received */
59916- atomic_t multixmt; /* # multicast packets for Tx */
59917+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
59918 u32 brdcstrcv; /* # broadcast packets received */
59919- atomic_t brdcstxmt; /* # broadcast packets for Tx */
59920+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
59921 u32 norcvbuf; /* # Rx packets discarded */
59922 u32 noxmtbuf; /* # Tx packets discarded */
59923
59924diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
59925index 4bd353a..e28f455 100644
59926--- a/drivers/staging/go7007/go7007-v4l2.c
59927+++ b/drivers/staging/go7007/go7007-v4l2.c
59928@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
59929 return 0;
59930 }
59931
59932-static struct vm_operations_struct go7007_vm_ops = {
59933+static const struct vm_operations_struct go7007_vm_ops = {
59934 .open = go7007_vm_open,
59935 .close = go7007_vm_close,
59936 .fault = go7007_vm_fault,
59937diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
59938index 366dc95..b974d87 100644
59939--- a/drivers/staging/hv/Channel.c
59940+++ b/drivers/staging/hv/Channel.c
59941@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
59942
59943 DPRINT_ENTER(VMBUS);
59944
59945- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
59946- atomic_inc(&gVmbusConnection.NextGpadlHandle);
59947+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
59948+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
59949
59950 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
59951 ASSERT(msgInfo != NULL);
59952diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
59953index b12237f..01ae28a 100644
59954--- a/drivers/staging/hv/Hv.c
59955+++ b/drivers/staging/hv/Hv.c
59956@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
59957 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
59958 u32 outputAddressHi = outputAddress >> 32;
59959 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
59960- volatile void *hypercallPage = gHvContext.HypercallPage;
59961+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
59962
59963 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
59964 Control, Input, Output);
59965diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
59966index d089bb1..2ebc158 100644
59967--- a/drivers/staging/hv/VmbusApi.h
59968+++ b/drivers/staging/hv/VmbusApi.h
59969@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
59970 u32 *GpadlHandle);
59971 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
59972 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
59973-};
59974+} __no_const;
59975
59976 /* Base driver object */
59977 struct hv_driver {
59978diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
59979index 5a37cce..6ecc88c 100644
59980--- a/drivers/staging/hv/VmbusPrivate.h
59981+++ b/drivers/staging/hv/VmbusPrivate.h
59982@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
59983 struct VMBUS_CONNECTION {
59984 enum VMBUS_CONNECT_STATE ConnectState;
59985
59986- atomic_t NextGpadlHandle;
59987+ atomic_unchecked_t NextGpadlHandle;
59988
59989 /*
59990 * Represents channel interrupts. Each bit position represents a
59991diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
59992index 871a202..ca50ddf 100644
59993--- a/drivers/staging/hv/blkvsc_drv.c
59994+++ b/drivers/staging/hv/blkvsc_drv.c
59995@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
59996 /* The one and only one */
59997 static struct blkvsc_driver_context g_blkvsc_drv;
59998
59999-static struct block_device_operations block_ops = {
60000+static const struct block_device_operations block_ops = {
60001 .owner = THIS_MODULE,
60002 .open = blkvsc_open,
60003 .release = blkvsc_release,
60004diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
60005index 6acc49a..fbc8d46 100644
60006--- a/drivers/staging/hv/vmbus_drv.c
60007+++ b/drivers/staging/hv/vmbus_drv.c
60008@@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
60009 to_device_context(root_device_obj);
60010 struct device_context *child_device_ctx =
60011 to_device_context(child_device_obj);
60012- static atomic_t device_num = ATOMIC_INIT(0);
60013+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
60014
60015 DPRINT_ENTER(VMBUS_DRV);
60016
60017@@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
60018
60019 /* Set the device name. Otherwise, device_register() will fail. */
60020 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
60021- atomic_inc_return(&device_num));
60022+ atomic_inc_return_unchecked(&device_num));
60023
60024 /* The new device belongs to this bus */
60025 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
60026diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
60027index d926189..17b19fd 100644
60028--- a/drivers/staging/iio/ring_generic.h
60029+++ b/drivers/staging/iio/ring_generic.h
60030@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
60031
60032 int (*is_enabled)(struct iio_ring_buffer *ring);
60033 int (*enable)(struct iio_ring_buffer *ring);
60034-};
60035+} __no_const;
60036
60037 /**
60038 * struct iio_ring_buffer - general ring buffer structure
60039diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
60040index 1b237b7..88c624e 100644
60041--- a/drivers/staging/octeon/ethernet-rx.c
60042+++ b/drivers/staging/octeon/ethernet-rx.c
60043@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
60044 /* Increment RX stats for virtual ports */
60045 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
60046 #ifdef CONFIG_64BIT
60047- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
60048- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
60049+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
60050+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
60051 #else
60052- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
60053- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
60054+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
60055+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
60056 #endif
60057 }
60058 netif_receive_skb(skb);
60059@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
60060 dev->name);
60061 */
60062 #ifdef CONFIG_64BIT
60063- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
60064+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
60065 #else
60066- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
60067+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
60068 #endif
60069 dev_kfree_skb_irq(skb);
60070 }
60071diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
60072index 492c502..d9909f1 100644
60073--- a/drivers/staging/octeon/ethernet.c
60074+++ b/drivers/staging/octeon/ethernet.c
60075@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
60076 * since the RX tasklet also increments it.
60077 */
60078 #ifdef CONFIG_64BIT
60079- atomic64_add(rx_status.dropped_packets,
60080- (atomic64_t *)&priv->stats.rx_dropped);
60081+ atomic64_add_unchecked(rx_status.dropped_packets,
60082+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
60083 #else
60084- atomic_add(rx_status.dropped_packets,
60085- (atomic_t *)&priv->stats.rx_dropped);
60086+ atomic_add_unchecked(rx_status.dropped_packets,
60087+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
60088 #endif
60089 }
60090
60091diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
60092index a35bd5d..28fff45 100644
60093--- a/drivers/staging/otus/80211core/pub_zfi.h
60094+++ b/drivers/staging/otus/80211core/pub_zfi.h
60095@@ -531,7 +531,7 @@ struct zsCbFuncTbl
60096 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
60097
60098 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
60099-};
60100+} __no_const;
60101
60102 extern void zfZeroMemory(u8_t* va, u16_t length);
60103 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
60104diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
60105index c39a25f..696f5aa 100644
60106--- a/drivers/staging/panel/panel.c
60107+++ b/drivers/staging/panel/panel.c
60108@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
60109 return 0;
60110 }
60111
60112-static struct file_operations lcd_fops = {
60113+static const struct file_operations lcd_fops = {
60114 .write = lcd_write,
60115 .open = lcd_open,
60116 .release = lcd_release,
60117@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
60118 return 0;
60119 }
60120
60121-static struct file_operations keypad_fops = {
60122+static const struct file_operations keypad_fops = {
60123 .read = keypad_read, /* read */
60124 .open = keypad_open, /* open */
60125 .release = keypad_release, /* close */
60126diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
60127index 270ebcb..37e46af 100644
60128--- a/drivers/staging/phison/phison.c
60129+++ b/drivers/staging/phison/phison.c
60130@@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
60131 ATA_BMDMA_SHT(DRV_NAME),
60132 };
60133
60134-static struct ata_port_operations phison_ops = {
60135+static const struct ata_port_operations phison_ops = {
60136 .inherits = &ata_bmdma_port_ops,
60137 .prereset = phison_pre_reset,
60138 };
60139diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
60140index 2eb8e3d..57616a7 100644
60141--- a/drivers/staging/poch/poch.c
60142+++ b/drivers/staging/poch/poch.c
60143@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
60144 return 0;
60145 }
60146
60147-static struct file_operations poch_fops = {
60148+static const struct file_operations poch_fops = {
60149 .owner = THIS_MODULE,
60150 .open = poch_open,
60151 .release = poch_release,
60152diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
60153index c94de31..19402bc 100644
60154--- a/drivers/staging/pohmelfs/inode.c
60155+++ b/drivers/staging/pohmelfs/inode.c
60156@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
60157 mutex_init(&psb->mcache_lock);
60158 psb->mcache_root = RB_ROOT;
60159 psb->mcache_timeout = msecs_to_jiffies(5000);
60160- atomic_long_set(&psb->mcache_gen, 0);
60161+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
60162
60163 psb->trans_max_pages = 100;
60164
60165@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
60166 INIT_LIST_HEAD(&psb->crypto_ready_list);
60167 INIT_LIST_HEAD(&psb->crypto_active_list);
60168
60169- atomic_set(&psb->trans_gen, 1);
60170+ atomic_set_unchecked(&psb->trans_gen, 1);
60171 atomic_long_set(&psb->total_inodes, 0);
60172
60173 mutex_init(&psb->state_lock);
60174diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
60175index e22665c..a2a9390 100644
60176--- a/drivers/staging/pohmelfs/mcache.c
60177+++ b/drivers/staging/pohmelfs/mcache.c
60178@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
60179 m->data = data;
60180 m->start = start;
60181 m->size = size;
60182- m->gen = atomic_long_inc_return(&psb->mcache_gen);
60183+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
60184
60185 mutex_lock(&psb->mcache_lock);
60186 err = pohmelfs_mcache_insert(psb, m);
60187diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
60188index 623a07d..4035c19 100644
60189--- a/drivers/staging/pohmelfs/netfs.h
60190+++ b/drivers/staging/pohmelfs/netfs.h
60191@@ -570,14 +570,14 @@ struct pohmelfs_config;
60192 struct pohmelfs_sb {
60193 struct rb_root mcache_root;
60194 struct mutex mcache_lock;
60195- atomic_long_t mcache_gen;
60196+ atomic_long_unchecked_t mcache_gen;
60197 unsigned long mcache_timeout;
60198
60199 unsigned int idx;
60200
60201 unsigned int trans_retries;
60202
60203- atomic_t trans_gen;
60204+ atomic_unchecked_t trans_gen;
60205
60206 unsigned int crypto_attached_size;
60207 unsigned int crypto_align_size;
60208diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
60209index 36a2535..0591bf4 100644
60210--- a/drivers/staging/pohmelfs/trans.c
60211+++ b/drivers/staging/pohmelfs/trans.c
60212@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
60213 int err;
60214 struct netfs_cmd *cmd = t->iovec.iov_base;
60215
60216- t->gen = atomic_inc_return(&psb->trans_gen);
60217+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
60218
60219 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
60220 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
60221diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
60222index f890a16..509ece8 100644
60223--- a/drivers/staging/sep/sep_driver.c
60224+++ b/drivers/staging/sep/sep_driver.c
60225@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
60226 static dev_t sep_devno;
60227
60228 /* the files operations structure of the driver */
60229-static struct file_operations sep_file_operations = {
60230+static const struct file_operations sep_file_operations = {
60231 .owner = THIS_MODULE,
60232 .ioctl = sep_ioctl,
60233 .poll = sep_poll,
60234diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
60235index 5e16bc3..7655b10 100644
60236--- a/drivers/staging/usbip/usbip_common.h
60237+++ b/drivers/staging/usbip/usbip_common.h
60238@@ -374,7 +374,7 @@ struct usbip_device {
60239 void (*shutdown)(struct usbip_device *);
60240 void (*reset)(struct usbip_device *);
60241 void (*unusable)(struct usbip_device *);
60242- } eh_ops;
60243+ } __no_const eh_ops;
60244 };
60245
60246
60247diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
60248index 57f7946..d9df23d 100644
60249--- a/drivers/staging/usbip/vhci.h
60250+++ b/drivers/staging/usbip/vhci.h
60251@@ -92,7 +92,7 @@ struct vhci_hcd {
60252 unsigned resuming:1;
60253 unsigned long re_timeout;
60254
60255- atomic_t seqnum;
60256+ atomic_unchecked_t seqnum;
60257
60258 /*
60259 * NOTE:
60260diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
60261index 20cd7db..c2693ff 100644
60262--- a/drivers/staging/usbip/vhci_hcd.c
60263+++ b/drivers/staging/usbip/vhci_hcd.c
60264@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
60265 return;
60266 }
60267
60268- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
60269+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
60270 if (priv->seqnum == 0xffff)
60271 usbip_uinfo("seqnum max\n");
60272
60273@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
60274 return -ENOMEM;
60275 }
60276
60277- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
60278+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
60279 if (unlink->seqnum == 0xffff)
60280 usbip_uinfo("seqnum max\n");
60281
60282@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
60283 vdev->rhport = rhport;
60284 }
60285
60286- atomic_set(&vhci->seqnum, 0);
60287+ atomic_set_unchecked(&vhci->seqnum, 0);
60288 spin_lock_init(&vhci->lock);
60289
60290
60291diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
60292index 7fd76fe..673695a 100644
60293--- a/drivers/staging/usbip/vhci_rx.c
60294+++ b/drivers/staging/usbip/vhci_rx.c
60295@@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
60296 usbip_uerr("cannot find a urb of seqnum %u\n",
60297 pdu->base.seqnum);
60298 usbip_uinfo("max seqnum %d\n",
60299- atomic_read(&the_controller->seqnum));
60300+ atomic_read_unchecked(&the_controller->seqnum));
60301 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
60302 return;
60303 }
60304diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
60305index 7891288..8e31300 100644
60306--- a/drivers/staging/vme/devices/vme_user.c
60307+++ b/drivers/staging/vme/devices/vme_user.c
60308@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
60309 static int __init vme_user_probe(struct device *, int, int);
60310 static int __exit vme_user_remove(struct device *, int, int);
60311
60312-static struct file_operations vme_user_fops = {
60313+static const struct file_operations vme_user_fops = {
60314 .open = vme_user_open,
60315 .release = vme_user_release,
60316 .read = vme_user_read,
60317diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
60318index 58abf44..00c1fc8 100644
60319--- a/drivers/staging/vt6655/hostap.c
60320+++ b/drivers/staging/vt6655/hostap.c
60321@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
60322 PSDevice apdev_priv;
60323 struct net_device *dev = pDevice->dev;
60324 int ret;
60325- const struct net_device_ops apdev_netdev_ops = {
60326+ net_device_ops_no_const apdev_netdev_ops = {
60327 .ndo_start_xmit = pDevice->tx_80211,
60328 };
60329
60330diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
60331index 0c8267a..db1f363 100644
60332--- a/drivers/staging/vt6656/hostap.c
60333+++ b/drivers/staging/vt6656/hostap.c
60334@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
60335 PSDevice apdev_priv;
60336 struct net_device *dev = pDevice->dev;
60337 int ret;
60338- const struct net_device_ops apdev_netdev_ops = {
60339+ net_device_ops_no_const apdev_netdev_ops = {
60340 .ndo_start_xmit = pDevice->tx_80211,
60341 };
60342
60343diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
60344index 925678b..da7f5ed 100644
60345--- a/drivers/staging/wlan-ng/hfa384x_usb.c
60346+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
60347@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
60348
60349 struct usbctlx_completor {
60350 int (*complete) (struct usbctlx_completor *);
60351-};
60352+} __no_const;
60353 typedef struct usbctlx_completor usbctlx_completor_t;
60354
60355 static int
60356diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
60357index 40de151..924f268 100644
60358--- a/drivers/telephony/ixj.c
60359+++ b/drivers/telephony/ixj.c
60360@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
60361 bool mContinue;
60362 char *pIn, *pOut;
60363
60364+ pax_track_stack();
60365+
60366 if (!SCI_Prepare(j))
60367 return 0;
60368
60369diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
60370index e941367..b631f5a 100644
60371--- a/drivers/uio/uio.c
60372+++ b/drivers/uio/uio.c
60373@@ -23,6 +23,7 @@
60374 #include <linux/string.h>
60375 #include <linux/kobject.h>
60376 #include <linux/uio_driver.h>
60377+#include <asm/local.h>
60378
60379 #define UIO_MAX_DEVICES 255
60380
60381@@ -30,10 +31,10 @@ struct uio_device {
60382 struct module *owner;
60383 struct device *dev;
60384 int minor;
60385- atomic_t event;
60386+ atomic_unchecked_t event;
60387 struct fasync_struct *async_queue;
60388 wait_queue_head_t wait;
60389- int vma_count;
60390+ local_t vma_count;
60391 struct uio_info *info;
60392 struct kobject *map_dir;
60393 struct kobject *portio_dir;
60394@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
60395 return entry->show(mem, buf);
60396 }
60397
60398-static struct sysfs_ops map_sysfs_ops = {
60399+static const struct sysfs_ops map_sysfs_ops = {
60400 .show = map_type_show,
60401 };
60402
60403@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
60404 return entry->show(port, buf);
60405 }
60406
60407-static struct sysfs_ops portio_sysfs_ops = {
60408+static const struct sysfs_ops portio_sysfs_ops = {
60409 .show = portio_type_show,
60410 };
60411
60412@@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
60413 struct uio_device *idev = dev_get_drvdata(dev);
60414 if (idev)
60415 return sprintf(buf, "%u\n",
60416- (unsigned int)atomic_read(&idev->event));
60417+ (unsigned int)atomic_read_unchecked(&idev->event));
60418 else
60419 return -ENODEV;
60420 }
60421@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
60422 {
60423 struct uio_device *idev = info->uio_dev;
60424
60425- atomic_inc(&idev->event);
60426+ atomic_inc_unchecked(&idev->event);
60427 wake_up_interruptible(&idev->wait);
60428 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
60429 }
60430@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
60431 }
60432
60433 listener->dev = idev;
60434- listener->event_count = atomic_read(&idev->event);
60435+ listener->event_count = atomic_read_unchecked(&idev->event);
60436 filep->private_data = listener;
60437
60438 if (idev->info->open) {
60439@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
60440 return -EIO;
60441
60442 poll_wait(filep, &idev->wait, wait);
60443- if (listener->event_count != atomic_read(&idev->event))
60444+ if (listener->event_count != atomic_read_unchecked(&idev->event))
60445 return POLLIN | POLLRDNORM;
60446 return 0;
60447 }
60448@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
60449 do {
60450 set_current_state(TASK_INTERRUPTIBLE);
60451
60452- event_count = atomic_read(&idev->event);
60453+ event_count = atomic_read_unchecked(&idev->event);
60454 if (event_count != listener->event_count) {
60455 if (copy_to_user(buf, &event_count, count))
60456 retval = -EFAULT;
60457@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
60458 static void uio_vma_open(struct vm_area_struct *vma)
60459 {
60460 struct uio_device *idev = vma->vm_private_data;
60461- idev->vma_count++;
60462+ local_inc(&idev->vma_count);
60463 }
60464
60465 static void uio_vma_close(struct vm_area_struct *vma)
60466 {
60467 struct uio_device *idev = vma->vm_private_data;
60468- idev->vma_count--;
60469+ local_dec(&idev->vma_count);
60470 }
60471
60472 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
60473@@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
60474 idev->owner = owner;
60475 idev->info = info;
60476 init_waitqueue_head(&idev->wait);
60477- atomic_set(&idev->event, 0);
60478+ atomic_set_unchecked(&idev->event, 0);
60479
60480 ret = uio_get_minor(idev);
60481 if (ret)
60482diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
60483index fbea856..06efea6 100644
60484--- a/drivers/usb/atm/usbatm.c
60485+++ b/drivers/usb/atm/usbatm.c
60486@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
60487 if (printk_ratelimit())
60488 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
60489 __func__, vpi, vci);
60490- atomic_inc(&vcc->stats->rx_err);
60491+ atomic_inc_unchecked(&vcc->stats->rx_err);
60492 return;
60493 }
60494
60495@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
60496 if (length > ATM_MAX_AAL5_PDU) {
60497 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
60498 __func__, length, vcc);
60499- atomic_inc(&vcc->stats->rx_err);
60500+ atomic_inc_unchecked(&vcc->stats->rx_err);
60501 goto out;
60502 }
60503
60504@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
60505 if (sarb->len < pdu_length) {
60506 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
60507 __func__, pdu_length, sarb->len, vcc);
60508- atomic_inc(&vcc->stats->rx_err);
60509+ atomic_inc_unchecked(&vcc->stats->rx_err);
60510 goto out;
60511 }
60512
60513 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
60514 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
60515 __func__, vcc);
60516- atomic_inc(&vcc->stats->rx_err);
60517+ atomic_inc_unchecked(&vcc->stats->rx_err);
60518 goto out;
60519 }
60520
60521@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
60522 if (printk_ratelimit())
60523 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
60524 __func__, length);
60525- atomic_inc(&vcc->stats->rx_drop);
60526+ atomic_inc_unchecked(&vcc->stats->rx_drop);
60527 goto out;
60528 }
60529
60530@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
60531
60532 vcc->push(vcc, skb);
60533
60534- atomic_inc(&vcc->stats->rx);
60535+ atomic_inc_unchecked(&vcc->stats->rx);
60536 out:
60537 skb_trim(sarb, 0);
60538 }
60539@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
60540 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
60541
60542 usbatm_pop(vcc, skb);
60543- atomic_inc(&vcc->stats->tx);
60544+ atomic_inc_unchecked(&vcc->stats->tx);
60545
60546 skb = skb_dequeue(&instance->sndqueue);
60547 }
60548@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
60549 if (!left--)
60550 return sprintf(page,
60551 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
60552- atomic_read(&atm_dev->stats.aal5.tx),
60553- atomic_read(&atm_dev->stats.aal5.tx_err),
60554- atomic_read(&atm_dev->stats.aal5.rx),
60555- atomic_read(&atm_dev->stats.aal5.rx_err),
60556- atomic_read(&atm_dev->stats.aal5.rx_drop));
60557+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
60558+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
60559+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
60560+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
60561+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
60562
60563 if (!left--) {
60564 if (instance->disconnected)
60565diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
60566index 24e6205..fe5a5d4 100644
60567--- a/drivers/usb/core/hcd.c
60568+++ b/drivers/usb/core/hcd.c
60569@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
60570
60571 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
60572
60573-struct usb_mon_operations *mon_ops;
60574+const struct usb_mon_operations *mon_ops;
60575
60576 /*
60577 * The registration is unlocked.
60578@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
60579 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
60580 */
60581
60582-int usb_mon_register (struct usb_mon_operations *ops)
60583+int usb_mon_register (const struct usb_mon_operations *ops)
60584 {
60585
60586 if (mon_ops)
60587diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
60588index bcbe104..9cfd1c6 100644
60589--- a/drivers/usb/core/hcd.h
60590+++ b/drivers/usb/core/hcd.h
60591@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
60592 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
60593
60594 struct usb_mon_operations {
60595- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
60596- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
60597- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
60598+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
60599+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
60600+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
60601 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
60602 };
60603
60604-extern struct usb_mon_operations *mon_ops;
60605+extern const struct usb_mon_operations *mon_ops;
60606
60607 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
60608 {
60609@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
60610 (*mon_ops->urb_complete)(bus, urb, status);
60611 }
60612
60613-int usb_mon_register(struct usb_mon_operations *ops);
60614+int usb_mon_register(const struct usb_mon_operations *ops);
60615 void usb_mon_deregister(void);
60616
60617 #else
60618diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
60619index 409cc94..a673bad 100644
60620--- a/drivers/usb/core/message.c
60621+++ b/drivers/usb/core/message.c
60622@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
60623 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
60624 if (buf) {
60625 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
60626- if (len > 0) {
60627- smallbuf = kmalloc(++len, GFP_NOIO);
60628+ if (len++ > 0) {
60629+ smallbuf = kmalloc(len, GFP_NOIO);
60630 if (!smallbuf)
60631 return buf;
60632 memcpy(smallbuf, buf, len);
60633diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
60634index 62ff5e7..530b74e 100644
60635--- a/drivers/usb/misc/appledisplay.c
60636+++ b/drivers/usb/misc/appledisplay.c
60637@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
60638 return pdata->msgdata[1];
60639 }
60640
60641-static struct backlight_ops appledisplay_bl_data = {
60642+static const struct backlight_ops appledisplay_bl_data = {
60643 .get_brightness = appledisplay_bl_get_brightness,
60644 .update_status = appledisplay_bl_update_status,
60645 };
60646diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
60647index e0c2db3..bd8cb66 100644
60648--- a/drivers/usb/mon/mon_main.c
60649+++ b/drivers/usb/mon/mon_main.c
60650@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
60651 /*
60652 * Ops
60653 */
60654-static struct usb_mon_operations mon_ops_0 = {
60655+static const struct usb_mon_operations mon_ops_0 = {
60656 .urb_submit = mon_submit,
60657 .urb_submit_error = mon_submit_error,
60658 .urb_complete = mon_complete,
60659diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
60660index d6bea3e..60b250e 100644
60661--- a/drivers/usb/wusbcore/wa-hc.h
60662+++ b/drivers/usb/wusbcore/wa-hc.h
60663@@ -192,7 +192,7 @@ struct wahc {
60664 struct list_head xfer_delayed_list;
60665 spinlock_t xfer_list_lock;
60666 struct work_struct xfer_work;
60667- atomic_t xfer_id_count;
60668+ atomic_unchecked_t xfer_id_count;
60669 };
60670
60671
60672@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
60673 INIT_LIST_HEAD(&wa->xfer_delayed_list);
60674 spin_lock_init(&wa->xfer_list_lock);
60675 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
60676- atomic_set(&wa->xfer_id_count, 1);
60677+ atomic_set_unchecked(&wa->xfer_id_count, 1);
60678 }
60679
60680 /**
60681diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
60682index 613a5fc..3174865 100644
60683--- a/drivers/usb/wusbcore/wa-xfer.c
60684+++ b/drivers/usb/wusbcore/wa-xfer.c
60685@@ -293,7 +293,7 @@ out:
60686 */
60687 static void wa_xfer_id_init(struct wa_xfer *xfer)
60688 {
60689- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
60690+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
60691 }
60692
60693 /*
60694diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
60695index aa42fce..f8a828c 100644
60696--- a/drivers/uwb/wlp/messages.c
60697+++ b/drivers/uwb/wlp/messages.c
60698@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
60699 size_t len = skb->len;
60700 size_t used;
60701 ssize_t result;
60702- struct wlp_nonce enonce, rnonce;
60703+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
60704 enum wlp_assc_error assc_err;
60705 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
60706 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
60707diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
60708index 0370399..6627c94 100644
60709--- a/drivers/uwb/wlp/sysfs.c
60710+++ b/drivers/uwb/wlp/sysfs.c
60711@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
60712 return ret;
60713 }
60714
60715-static
60716-struct sysfs_ops wss_sysfs_ops = {
60717+static const struct sysfs_ops wss_sysfs_ops = {
60718 .show = wlp_wss_attr_show,
60719 .store = wlp_wss_attr_store,
60720 };
60721diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
60722index 8c5e432..5ee90ea 100644
60723--- a/drivers/video/atmel_lcdfb.c
60724+++ b/drivers/video/atmel_lcdfb.c
60725@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
60726 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
60727 }
60728
60729-static struct backlight_ops atmel_lcdc_bl_ops = {
60730+static const struct backlight_ops atmel_lcdc_bl_ops = {
60731 .update_status = atmel_bl_update_status,
60732 .get_brightness = atmel_bl_get_brightness,
60733 };
60734diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
60735index e4e4d43..66bcbcc 100644
60736--- a/drivers/video/aty/aty128fb.c
60737+++ b/drivers/video/aty/aty128fb.c
60738@@ -149,7 +149,7 @@ enum {
60739 };
60740
60741 /* Must match above enum */
60742-static const char *r128_family[] __devinitdata = {
60743+static const char *r128_family[] __devinitconst = {
60744 "AGP",
60745 "PCI",
60746 "PRO AGP",
60747@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
60748 return bd->props.brightness;
60749 }
60750
60751-static struct backlight_ops aty128_bl_data = {
60752+static const struct backlight_ops aty128_bl_data = {
60753 .get_brightness = aty128_bl_get_brightness,
60754 .update_status = aty128_bl_update_status,
60755 };
60756diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
60757index 913b4a4..9295a38 100644
60758--- a/drivers/video/aty/atyfb_base.c
60759+++ b/drivers/video/aty/atyfb_base.c
60760@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
60761 return bd->props.brightness;
60762 }
60763
60764-static struct backlight_ops aty_bl_data = {
60765+static const struct backlight_ops aty_bl_data = {
60766 .get_brightness = aty_bl_get_brightness,
60767 .update_status = aty_bl_update_status,
60768 };
60769diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
60770index 1a056ad..221bd6a 100644
60771--- a/drivers/video/aty/radeon_backlight.c
60772+++ b/drivers/video/aty/radeon_backlight.c
60773@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
60774 return bd->props.brightness;
60775 }
60776
60777-static struct backlight_ops radeon_bl_data = {
60778+static const struct backlight_ops radeon_bl_data = {
60779 .get_brightness = radeon_bl_get_brightness,
60780 .update_status = radeon_bl_update_status,
60781 };
60782diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
60783index ad05da5..3cb2cb9 100644
60784--- a/drivers/video/backlight/adp5520_bl.c
60785+++ b/drivers/video/backlight/adp5520_bl.c
60786@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
60787 return error ? data->current_brightness : reg_val;
60788 }
60789
60790-static struct backlight_ops adp5520_bl_ops = {
60791+static const struct backlight_ops adp5520_bl_ops = {
60792 .update_status = adp5520_bl_update_status,
60793 .get_brightness = adp5520_bl_get_brightness,
60794 };
60795diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
60796index 2c3bdfc..d769b0b 100644
60797--- a/drivers/video/backlight/adx_bl.c
60798+++ b/drivers/video/backlight/adx_bl.c
60799@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
60800 return 1;
60801 }
60802
60803-static struct backlight_ops adx_backlight_ops = {
60804+static const struct backlight_ops adx_backlight_ops = {
60805 .options = 0,
60806 .update_status = adx_backlight_update_status,
60807 .get_brightness = adx_backlight_get_brightness,
60808diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
60809index 505c082..6b6b3cc 100644
60810--- a/drivers/video/backlight/atmel-pwm-bl.c
60811+++ b/drivers/video/backlight/atmel-pwm-bl.c
60812@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
60813 return pwm_channel_enable(&pwmbl->pwmc);
60814 }
60815
60816-static struct backlight_ops atmel_pwm_bl_ops = {
60817+static const struct backlight_ops atmel_pwm_bl_ops = {
60818 .get_brightness = atmel_pwm_bl_get_intensity,
60819 .update_status = atmel_pwm_bl_set_intensity,
60820 };
60821diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
60822index 5e20e6e..89025e6 100644
60823--- a/drivers/video/backlight/backlight.c
60824+++ b/drivers/video/backlight/backlight.c
60825@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
60826 * ERR_PTR() or a pointer to the newly allocated device.
60827 */
60828 struct backlight_device *backlight_device_register(const char *name,
60829- struct device *parent, void *devdata, struct backlight_ops *ops)
60830+ struct device *parent, void *devdata, const struct backlight_ops *ops)
60831 {
60832 struct backlight_device *new_bd;
60833 int rc;
60834diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
60835index 9677494..b4bcf80 100644
60836--- a/drivers/video/backlight/corgi_lcd.c
60837+++ b/drivers/video/backlight/corgi_lcd.c
60838@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
60839 }
60840 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
60841
60842-static struct backlight_ops corgi_bl_ops = {
60843+static const struct backlight_ops corgi_bl_ops = {
60844 .get_brightness = corgi_bl_get_intensity,
60845 .update_status = corgi_bl_update_status,
60846 };
60847diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
60848index b9fe62b..2914bf1 100644
60849--- a/drivers/video/backlight/cr_bllcd.c
60850+++ b/drivers/video/backlight/cr_bllcd.c
60851@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
60852 return intensity;
60853 }
60854
60855-static struct backlight_ops cr_backlight_ops = {
60856+static const struct backlight_ops cr_backlight_ops = {
60857 .get_brightness = cr_backlight_get_intensity,
60858 .update_status = cr_backlight_set_intensity,
60859 };
60860diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
60861index 701a108..feacfd5 100644
60862--- a/drivers/video/backlight/da903x_bl.c
60863+++ b/drivers/video/backlight/da903x_bl.c
60864@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
60865 return data->current_brightness;
60866 }
60867
60868-static struct backlight_ops da903x_backlight_ops = {
60869+static const struct backlight_ops da903x_backlight_ops = {
60870 .update_status = da903x_backlight_update_status,
60871 .get_brightness = da903x_backlight_get_brightness,
60872 };
60873diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
60874index 6d27f62..e6d348e 100644
60875--- a/drivers/video/backlight/generic_bl.c
60876+++ b/drivers/video/backlight/generic_bl.c
60877@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
60878 }
60879 EXPORT_SYMBOL(corgibl_limit_intensity);
60880
60881-static struct backlight_ops genericbl_ops = {
60882+static const struct backlight_ops genericbl_ops = {
60883 .options = BL_CORE_SUSPENDRESUME,
60884 .get_brightness = genericbl_get_intensity,
60885 .update_status = genericbl_send_intensity,
60886diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
60887index 7fb4eef..f7cc528 100644
60888--- a/drivers/video/backlight/hp680_bl.c
60889+++ b/drivers/video/backlight/hp680_bl.c
60890@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
60891 return current_intensity;
60892 }
60893
60894-static struct backlight_ops hp680bl_ops = {
60895+static const struct backlight_ops hp680bl_ops = {
60896 .get_brightness = hp680bl_get_intensity,
60897 .update_status = hp680bl_set_intensity,
60898 };
60899diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
60900index 7aed256..db9071f 100644
60901--- a/drivers/video/backlight/jornada720_bl.c
60902+++ b/drivers/video/backlight/jornada720_bl.c
60903@@ -93,7 +93,7 @@ out:
60904 return ret;
60905 }
60906
60907-static struct backlight_ops jornada_bl_ops = {
60908+static const struct backlight_ops jornada_bl_ops = {
60909 .get_brightness = jornada_bl_get_brightness,
60910 .update_status = jornada_bl_update_status,
60911 .options = BL_CORE_SUSPENDRESUME,
60912diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
60913index a38fda1..939e7b8 100644
60914--- a/drivers/video/backlight/kb3886_bl.c
60915+++ b/drivers/video/backlight/kb3886_bl.c
60916@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
60917 return kb3886bl_intensity;
60918 }
60919
60920-static struct backlight_ops kb3886bl_ops = {
60921+static const struct backlight_ops kb3886bl_ops = {
60922 .get_brightness = kb3886bl_get_intensity,
60923 .update_status = kb3886bl_send_intensity,
60924 };
60925diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
60926index 6b488b8..00a9591 100644
60927--- a/drivers/video/backlight/locomolcd.c
60928+++ b/drivers/video/backlight/locomolcd.c
60929@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
60930 return current_intensity;
60931 }
60932
60933-static struct backlight_ops locomobl_data = {
60934+static const struct backlight_ops locomobl_data = {
60935 .get_brightness = locomolcd_get_intensity,
60936 .update_status = locomolcd_set_intensity,
60937 };
60938diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
60939index 99bdfa8..3dac448 100644
60940--- a/drivers/video/backlight/mbp_nvidia_bl.c
60941+++ b/drivers/video/backlight/mbp_nvidia_bl.c
60942@@ -33,7 +33,7 @@ struct dmi_match_data {
60943 unsigned long iostart;
60944 unsigned long iolen;
60945 /* Backlight operations structure. */
60946- struct backlight_ops backlight_ops;
60947+ const struct backlight_ops backlight_ops;
60948 };
60949
60950 /* Module parameters. */
60951diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
60952index cbad67e..3cf900e 100644
60953--- a/drivers/video/backlight/omap1_bl.c
60954+++ b/drivers/video/backlight/omap1_bl.c
60955@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
60956 return bl->current_intensity;
60957 }
60958
60959-static struct backlight_ops omapbl_ops = {
60960+static const struct backlight_ops omapbl_ops = {
60961 .get_brightness = omapbl_get_intensity,
60962 .update_status = omapbl_update_status,
60963 };
60964diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
60965index 9edaf24..075786e 100644
60966--- a/drivers/video/backlight/progear_bl.c
60967+++ b/drivers/video/backlight/progear_bl.c
60968@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
60969 return intensity - HW_LEVEL_MIN;
60970 }
60971
60972-static struct backlight_ops progearbl_ops = {
60973+static const struct backlight_ops progearbl_ops = {
60974 .get_brightness = progearbl_get_intensity,
60975 .update_status = progearbl_set_intensity,
60976 };
60977diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
60978index 8871662..df9e0b3 100644
60979--- a/drivers/video/backlight/pwm_bl.c
60980+++ b/drivers/video/backlight/pwm_bl.c
60981@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
60982 return bl->props.brightness;
60983 }
60984
60985-static struct backlight_ops pwm_backlight_ops = {
60986+static const struct backlight_ops pwm_backlight_ops = {
60987 .update_status = pwm_backlight_update_status,
60988 .get_brightness = pwm_backlight_get_brightness,
60989 };
60990diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
60991index 43edbad..e14ce4d 100644
60992--- a/drivers/video/backlight/tosa_bl.c
60993+++ b/drivers/video/backlight/tosa_bl.c
60994@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
60995 return props->brightness;
60996 }
60997
60998-static struct backlight_ops bl_ops = {
60999+static const struct backlight_ops bl_ops = {
61000 .get_brightness = tosa_bl_get_brightness,
61001 .update_status = tosa_bl_update_status,
61002 };
61003diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
61004index 467bdb7..e32add3 100644
61005--- a/drivers/video/backlight/wm831x_bl.c
61006+++ b/drivers/video/backlight/wm831x_bl.c
61007@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
61008 return data->current_brightness;
61009 }
61010
61011-static struct backlight_ops wm831x_backlight_ops = {
61012+static const struct backlight_ops wm831x_backlight_ops = {
61013 .options = BL_CORE_SUSPENDRESUME,
61014 .update_status = wm831x_backlight_update_status,
61015 .get_brightness = wm831x_backlight_get_brightness,
61016diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
61017index e49ae5e..db4e6f7 100644
61018--- a/drivers/video/bf54x-lq043fb.c
61019+++ b/drivers/video/bf54x-lq043fb.c
61020@@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
61021 return 0;
61022 }
61023
61024-static struct backlight_ops bfin_lq043fb_bl_ops = {
61025+static const struct backlight_ops bfin_lq043fb_bl_ops = {
61026 .get_brightness = bl_get_brightness,
61027 };
61028
61029diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
61030index 2c72a7c..d523e52 100644
61031--- a/drivers/video/bfin-t350mcqb-fb.c
61032+++ b/drivers/video/bfin-t350mcqb-fb.c
61033@@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
61034 return 0;
61035 }
61036
61037-static struct backlight_ops bfin_lq043fb_bl_ops = {
61038+static const struct backlight_ops bfin_lq043fb_bl_ops = {
61039 .get_brightness = bl_get_brightness,
61040 };
61041
61042diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
61043index f53b9f1..958bf4e 100644
61044--- a/drivers/video/fbcmap.c
61045+++ b/drivers/video/fbcmap.c
61046@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
61047 rc = -ENODEV;
61048 goto out;
61049 }
61050- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
61051- !info->fbops->fb_setcmap)) {
61052+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
61053 rc = -EINVAL;
61054 goto out1;
61055 }
61056diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
61057index 99bbd28..ad3829e 100644
61058--- a/drivers/video/fbmem.c
61059+++ b/drivers/video/fbmem.c
61060@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
61061 image->dx += image->width + 8;
61062 }
61063 } else if (rotate == FB_ROTATE_UD) {
61064- for (x = 0; x < num && image->dx >= 0; x++) {
61065+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
61066 info->fbops->fb_imageblit(info, image);
61067 image->dx -= image->width + 8;
61068 }
61069@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
61070 image->dy += image->height + 8;
61071 }
61072 } else if (rotate == FB_ROTATE_CCW) {
61073- for (x = 0; x < num && image->dy >= 0; x++) {
61074+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
61075 info->fbops->fb_imageblit(info, image);
61076 image->dy -= image->height + 8;
61077 }
61078@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
61079 int flags = info->flags;
61080 int ret = 0;
61081
61082+ pax_track_stack();
61083+
61084 if (var->activate & FB_ACTIVATE_INV_MODE) {
61085 struct fb_videomode mode1, mode2;
61086
61087@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
61088 void __user *argp = (void __user *)arg;
61089 long ret = 0;
61090
61091+ pax_track_stack();
61092+
61093 switch (cmd) {
61094 case FBIOGET_VSCREENINFO:
61095 if (!lock_fb_info(info))
61096@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
61097 return -EFAULT;
61098 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
61099 return -EINVAL;
61100- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
61101+ if (con2fb.framebuffer >= FB_MAX)
61102 return -EINVAL;
61103 if (!registered_fb[con2fb.framebuffer])
61104 request_module("fb%d", con2fb.framebuffer);
61105diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
61106index f20eff8..3e4f622 100644
61107--- a/drivers/video/geode/gx1fb_core.c
61108+++ b/drivers/video/geode/gx1fb_core.c
61109@@ -30,7 +30,7 @@ static int crt_option = 1;
61110 static char panel_option[32] = "";
61111
61112 /* Modes relevant to the GX1 (taken from modedb.c) */
61113-static const struct fb_videomode __initdata gx1_modedb[] = {
61114+static const struct fb_videomode __initconst gx1_modedb[] = {
61115 /* 640x480-60 VESA */
61116 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
61117 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
61118diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
61119index 896e53d..4d87d0b 100644
61120--- a/drivers/video/gxt4500.c
61121+++ b/drivers/video/gxt4500.c
61122@@ -156,7 +156,7 @@ struct gxt4500_par {
61123 static char *mode_option;
61124
61125 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
61126-static const struct fb_videomode defaultmode __devinitdata = {
61127+static const struct fb_videomode defaultmode __devinitconst = {
61128 .refresh = 60,
61129 .xres = 1280,
61130 .yres = 1024,
61131@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
61132 return 0;
61133 }
61134
61135-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
61136+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
61137 .id = "IBM GXT4500P",
61138 .type = FB_TYPE_PACKED_PIXELS,
61139 .visual = FB_VISUAL_PSEUDOCOLOR,
61140diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
61141index f5bedee..28c6028 100644
61142--- a/drivers/video/i810/i810_accel.c
61143+++ b/drivers/video/i810/i810_accel.c
61144@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
61145 }
61146 }
61147 printk("ringbuffer lockup!!!\n");
61148+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
61149 i810_report_error(mmio);
61150 par->dev_flags |= LOCKUP;
61151 info->pixmap.scan_align = 1;
61152diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
61153index 5743ea2..457f82c 100644
61154--- a/drivers/video/i810/i810_main.c
61155+++ b/drivers/video/i810/i810_main.c
61156@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
61157 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
61158
61159 /* PCI */
61160-static const char *i810_pci_list[] __devinitdata = {
61161+static const char *i810_pci_list[] __devinitconst = {
61162 "Intel(R) 810 Framebuffer Device" ,
61163 "Intel(R) 810-DC100 Framebuffer Device" ,
61164 "Intel(R) 810E Framebuffer Device" ,
61165diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
61166index 3c14e43..eafa544 100644
61167--- a/drivers/video/logo/logo_linux_clut224.ppm
61168+++ b/drivers/video/logo/logo_linux_clut224.ppm
61169@@ -1,1604 +1,1123 @@
61170 P3
61171-# Standard 224-color Linux logo
61172 80 80
61173 255
61174- 0 0 0 0 0 0 0 0 0 0 0 0
61175- 0 0 0 0 0 0 0 0 0 0 0 0
61176- 0 0 0 0 0 0 0 0 0 0 0 0
61177- 0 0 0 0 0 0 0 0 0 0 0 0
61178- 0 0 0 0 0 0 0 0 0 0 0 0
61179- 0 0 0 0 0 0 0 0 0 0 0 0
61180- 0 0 0 0 0 0 0 0 0 0 0 0
61181- 0 0 0 0 0 0 0 0 0 0 0 0
61182- 0 0 0 0 0 0 0 0 0 0 0 0
61183- 6 6 6 6 6 6 10 10 10 10 10 10
61184- 10 10 10 6 6 6 6 6 6 6 6 6
61185- 0 0 0 0 0 0 0 0 0 0 0 0
61186- 0 0 0 0 0 0 0 0 0 0 0 0
61187- 0 0 0 0 0 0 0 0 0 0 0 0
61188- 0 0 0 0 0 0 0 0 0 0 0 0
61189- 0 0 0 0 0 0 0 0 0 0 0 0
61190- 0 0 0 0 0 0 0 0 0 0 0 0
61191- 0 0 0 0 0 0 0 0 0 0 0 0
61192- 0 0 0 0 0 0 0 0 0 0 0 0
61193- 0 0 0 0 0 0 0 0 0 0 0 0
61194- 0 0 0 0 0 0 0 0 0 0 0 0
61195- 0 0 0 0 0 0 0 0 0 0 0 0
61196- 0 0 0 0 0 0 0 0 0 0 0 0
61197- 0 0 0 0 0 0 0 0 0 0 0 0
61198- 0 0 0 0 0 0 0 0 0 0 0 0
61199- 0 0 0 0 0 0 0 0 0 0 0 0
61200- 0 0 0 0 0 0 0 0 0 0 0 0
61201- 0 0 0 0 0 0 0 0 0 0 0 0
61202- 0 0 0 6 6 6 10 10 10 14 14 14
61203- 22 22 22 26 26 26 30 30 30 34 34 34
61204- 30 30 30 30 30 30 26 26 26 18 18 18
61205- 14 14 14 10 10 10 6 6 6 0 0 0
61206- 0 0 0 0 0 0 0 0 0 0 0 0
61207- 0 0 0 0 0 0 0 0 0 0 0 0
61208- 0 0 0 0 0 0 0 0 0 0 0 0
61209- 0 0 0 0 0 0 0 0 0 0 0 0
61210- 0 0 0 0 0 0 0 0 0 0 0 0
61211- 0 0 0 0 0 0 0 0 0 0 0 0
61212- 0 0 0 0 0 0 0 0 0 0 0 0
61213- 0 0 0 0 0 0 0 0 0 0 0 0
61214- 0 0 0 0 0 0 0 0 0 0 0 0
61215- 0 0 0 0 0 1 0 0 1 0 0 0
61216- 0 0 0 0 0 0 0 0 0 0 0 0
61217- 0 0 0 0 0 0 0 0 0 0 0 0
61218- 0 0 0 0 0 0 0 0 0 0 0 0
61219- 0 0 0 0 0 0 0 0 0 0 0 0
61220- 0 0 0 0 0 0 0 0 0 0 0 0
61221- 0 0 0 0 0 0 0 0 0 0 0 0
61222- 6 6 6 14 14 14 26 26 26 42 42 42
61223- 54 54 54 66 66 66 78 78 78 78 78 78
61224- 78 78 78 74 74 74 66 66 66 54 54 54
61225- 42 42 42 26 26 26 18 18 18 10 10 10
61226- 6 6 6 0 0 0 0 0 0 0 0 0
61227- 0 0 0 0 0 0 0 0 0 0 0 0
61228- 0 0 0 0 0 0 0 0 0 0 0 0
61229- 0 0 0 0 0 0 0 0 0 0 0 0
61230- 0 0 0 0 0 0 0 0 0 0 0 0
61231- 0 0 0 0 0 0 0 0 0 0 0 0
61232- 0 0 0 0 0 0 0 0 0 0 0 0
61233- 0 0 0 0 0 0 0 0 0 0 0 0
61234- 0 0 0 0 0 0 0 0 0 0 0 0
61235- 0 0 1 0 0 0 0 0 0 0 0 0
61236- 0 0 0 0 0 0 0 0 0 0 0 0
61237- 0 0 0 0 0 0 0 0 0 0 0 0
61238- 0 0 0 0 0 0 0 0 0 0 0 0
61239- 0 0 0 0 0 0 0 0 0 0 0 0
61240- 0 0 0 0 0 0 0 0 0 0 0 0
61241- 0 0 0 0 0 0 0 0 0 10 10 10
61242- 22 22 22 42 42 42 66 66 66 86 86 86
61243- 66 66 66 38 38 38 38 38 38 22 22 22
61244- 26 26 26 34 34 34 54 54 54 66 66 66
61245- 86 86 86 70 70 70 46 46 46 26 26 26
61246- 14 14 14 6 6 6 0 0 0 0 0 0
61247- 0 0 0 0 0 0 0 0 0 0 0 0
61248- 0 0 0 0 0 0 0 0 0 0 0 0
61249- 0 0 0 0 0 0 0 0 0 0 0 0
61250- 0 0 0 0 0 0 0 0 0 0 0 0
61251- 0 0 0 0 0 0 0 0 0 0 0 0
61252- 0 0 0 0 0 0 0 0 0 0 0 0
61253- 0 0 0 0 0 0 0 0 0 0 0 0
61254- 0 0 0 0 0 0 0 0 0 0 0 0
61255- 0 0 1 0 0 1 0 0 1 0 0 0
61256- 0 0 0 0 0 0 0 0 0 0 0 0
61257- 0 0 0 0 0 0 0 0 0 0 0 0
61258- 0 0 0 0 0 0 0 0 0 0 0 0
61259- 0 0 0 0 0 0 0 0 0 0 0 0
61260- 0 0 0 0 0 0 0 0 0 0 0 0
61261- 0 0 0 0 0 0 10 10 10 26 26 26
61262- 50 50 50 82 82 82 58 58 58 6 6 6
61263- 2 2 6 2 2 6 2 2 6 2 2 6
61264- 2 2 6 2 2 6 2 2 6 2 2 6
61265- 6 6 6 54 54 54 86 86 86 66 66 66
61266- 38 38 38 18 18 18 6 6 6 0 0 0
61267- 0 0 0 0 0 0 0 0 0 0 0 0
61268- 0 0 0 0 0 0 0 0 0 0 0 0
61269- 0 0 0 0 0 0 0 0 0 0 0 0
61270- 0 0 0 0 0 0 0 0 0 0 0 0
61271- 0 0 0 0 0 0 0 0 0 0 0 0
61272- 0 0 0 0 0 0 0 0 0 0 0 0
61273- 0 0 0 0 0 0 0 0 0 0 0 0
61274- 0 0 0 0 0 0 0 0 0 0 0 0
61275- 0 0 0 0 0 0 0 0 0 0 0 0
61276- 0 0 0 0 0 0 0 0 0 0 0 0
61277- 0 0 0 0 0 0 0 0 0 0 0 0
61278- 0 0 0 0 0 0 0 0 0 0 0 0
61279- 0 0 0 0 0 0 0 0 0 0 0 0
61280- 0 0 0 0 0 0 0 0 0 0 0 0
61281- 0 0 0 6 6 6 22 22 22 50 50 50
61282- 78 78 78 34 34 34 2 2 6 2 2 6
61283- 2 2 6 2 2 6 2 2 6 2 2 6
61284- 2 2 6 2 2 6 2 2 6 2 2 6
61285- 2 2 6 2 2 6 6 6 6 70 70 70
61286- 78 78 78 46 46 46 22 22 22 6 6 6
61287- 0 0 0 0 0 0 0 0 0 0 0 0
61288- 0 0 0 0 0 0 0 0 0 0 0 0
61289- 0 0 0 0 0 0 0 0 0 0 0 0
61290- 0 0 0 0 0 0 0 0 0 0 0 0
61291- 0 0 0 0 0 0 0 0 0 0 0 0
61292- 0 0 0 0 0 0 0 0 0 0 0 0
61293- 0 0 0 0 0 0 0 0 0 0 0 0
61294- 0 0 0 0 0 0 0 0 0 0 0 0
61295- 0 0 1 0 0 1 0 0 1 0 0 0
61296- 0 0 0 0 0 0 0 0 0 0 0 0
61297- 0 0 0 0 0 0 0 0 0 0 0 0
61298- 0 0 0 0 0 0 0 0 0 0 0 0
61299- 0 0 0 0 0 0 0 0 0 0 0 0
61300- 0 0 0 0 0 0 0 0 0 0 0 0
61301- 6 6 6 18 18 18 42 42 42 82 82 82
61302- 26 26 26 2 2 6 2 2 6 2 2 6
61303- 2 2 6 2 2 6 2 2 6 2 2 6
61304- 2 2 6 2 2 6 2 2 6 14 14 14
61305- 46 46 46 34 34 34 6 6 6 2 2 6
61306- 42 42 42 78 78 78 42 42 42 18 18 18
61307- 6 6 6 0 0 0 0 0 0 0 0 0
61308- 0 0 0 0 0 0 0 0 0 0 0 0
61309- 0 0 0 0 0 0 0 0 0 0 0 0
61310- 0 0 0 0 0 0 0 0 0 0 0 0
61311- 0 0 0 0 0 0 0 0 0 0 0 0
61312- 0 0 0 0 0 0 0 0 0 0 0 0
61313- 0 0 0 0 0 0 0 0 0 0 0 0
61314- 0 0 0 0 0 0 0 0 0 0 0 0
61315- 0 0 1 0 0 0 0 0 1 0 0 0
61316- 0 0 0 0 0 0 0 0 0 0 0 0
61317- 0 0 0 0 0 0 0 0 0 0 0 0
61318- 0 0 0 0 0 0 0 0 0 0 0 0
61319- 0 0 0 0 0 0 0 0 0 0 0 0
61320- 0 0 0 0 0 0 0 0 0 0 0 0
61321- 10 10 10 30 30 30 66 66 66 58 58 58
61322- 2 2 6 2 2 6 2 2 6 2 2 6
61323- 2 2 6 2 2 6 2 2 6 2 2 6
61324- 2 2 6 2 2 6 2 2 6 26 26 26
61325- 86 86 86 101 101 101 46 46 46 10 10 10
61326- 2 2 6 58 58 58 70 70 70 34 34 34
61327- 10 10 10 0 0 0 0 0 0 0 0 0
61328- 0 0 0 0 0 0 0 0 0 0 0 0
61329- 0 0 0 0 0 0 0 0 0 0 0 0
61330- 0 0 0 0 0 0 0 0 0 0 0 0
61331- 0 0 0 0 0 0 0 0 0 0 0 0
61332- 0 0 0 0 0 0 0 0 0 0 0 0
61333- 0 0 0 0 0 0 0 0 0 0 0 0
61334- 0 0 0 0 0 0 0 0 0 0 0 0
61335- 0 0 1 0 0 1 0 0 1 0 0 0
61336- 0 0 0 0 0 0 0 0 0 0 0 0
61337- 0 0 0 0 0 0 0 0 0 0 0 0
61338- 0 0 0 0 0 0 0 0 0 0 0 0
61339- 0 0 0 0 0 0 0 0 0 0 0 0
61340- 0 0 0 0 0 0 0 0 0 0 0 0
61341- 14 14 14 42 42 42 86 86 86 10 10 10
61342- 2 2 6 2 2 6 2 2 6 2 2 6
61343- 2 2 6 2 2 6 2 2 6 2 2 6
61344- 2 2 6 2 2 6 2 2 6 30 30 30
61345- 94 94 94 94 94 94 58 58 58 26 26 26
61346- 2 2 6 6 6 6 78 78 78 54 54 54
61347- 22 22 22 6 6 6 0 0 0 0 0 0
61348- 0 0 0 0 0 0 0 0 0 0 0 0
61349- 0 0 0 0 0 0 0 0 0 0 0 0
61350- 0 0 0 0 0 0 0 0 0 0 0 0
61351- 0 0 0 0 0 0 0 0 0 0 0 0
61352- 0 0 0 0 0 0 0 0 0 0 0 0
61353- 0 0 0 0 0 0 0 0 0 0 0 0
61354- 0 0 0 0 0 0 0 0 0 0 0 0
61355- 0 0 0 0 0 0 0 0 0 0 0 0
61356- 0 0 0 0 0 0 0 0 0 0 0 0
61357- 0 0 0 0 0 0 0 0 0 0 0 0
61358- 0 0 0 0 0 0 0 0 0 0 0 0
61359- 0 0 0 0 0 0 0 0 0 0 0 0
61360- 0 0 0 0 0 0 0 0 0 6 6 6
61361- 22 22 22 62 62 62 62 62 62 2 2 6
61362- 2 2 6 2 2 6 2 2 6 2 2 6
61363- 2 2 6 2 2 6 2 2 6 2 2 6
61364- 2 2 6 2 2 6 2 2 6 26 26 26
61365- 54 54 54 38 38 38 18 18 18 10 10 10
61366- 2 2 6 2 2 6 34 34 34 82 82 82
61367- 38 38 38 14 14 14 0 0 0 0 0 0
61368- 0 0 0 0 0 0 0 0 0 0 0 0
61369- 0 0 0 0 0 0 0 0 0 0 0 0
61370- 0 0 0 0 0 0 0 0 0 0 0 0
61371- 0 0 0 0 0 0 0 0 0 0 0 0
61372- 0 0 0 0 0 0 0 0 0 0 0 0
61373- 0 0 0 0 0 0 0 0 0 0 0 0
61374- 0 0 0 0 0 0 0 0 0 0 0 0
61375- 0 0 0 0 0 1 0 0 1 0 0 0
61376- 0 0 0 0 0 0 0 0 0 0 0 0
61377- 0 0 0 0 0 0 0 0 0 0 0 0
61378- 0 0 0 0 0 0 0 0 0 0 0 0
61379- 0 0 0 0 0 0 0 0 0 0 0 0
61380- 0 0 0 0 0 0 0 0 0 6 6 6
61381- 30 30 30 78 78 78 30 30 30 2 2 6
61382- 2 2 6 2 2 6 2 2 6 2 2 6
61383- 2 2 6 2 2 6 2 2 6 2 2 6
61384- 2 2 6 2 2 6 2 2 6 10 10 10
61385- 10 10 10 2 2 6 2 2 6 2 2 6
61386- 2 2 6 2 2 6 2 2 6 78 78 78
61387- 50 50 50 18 18 18 6 6 6 0 0 0
61388- 0 0 0 0 0 0 0 0 0 0 0 0
61389- 0 0 0 0 0 0 0 0 0 0 0 0
61390- 0 0 0 0 0 0 0 0 0 0 0 0
61391- 0 0 0 0 0 0 0 0 0 0 0 0
61392- 0 0 0 0 0 0 0 0 0 0 0 0
61393- 0 0 0 0 0 0 0 0 0 0 0 0
61394- 0 0 0 0 0 0 0 0 0 0 0 0
61395- 0 0 1 0 0 0 0 0 0 0 0 0
61396- 0 0 0 0 0 0 0 0 0 0 0 0
61397- 0 0 0 0 0 0 0 0 0 0 0 0
61398- 0 0 0 0 0 0 0 0 0 0 0 0
61399- 0 0 0 0 0 0 0 0 0 0 0 0
61400- 0 0 0 0 0 0 0 0 0 10 10 10
61401- 38 38 38 86 86 86 14 14 14 2 2 6
61402- 2 2 6 2 2 6 2 2 6 2 2 6
61403- 2 2 6 2 2 6 2 2 6 2 2 6
61404- 2 2 6 2 2 6 2 2 6 2 2 6
61405- 2 2 6 2 2 6 2 2 6 2 2 6
61406- 2 2 6 2 2 6 2 2 6 54 54 54
61407- 66 66 66 26 26 26 6 6 6 0 0 0
61408- 0 0 0 0 0 0 0 0 0 0 0 0
61409- 0 0 0 0 0 0 0 0 0 0 0 0
61410- 0 0 0 0 0 0 0 0 0 0 0 0
61411- 0 0 0 0 0 0 0 0 0 0 0 0
61412- 0 0 0 0 0 0 0 0 0 0 0 0
61413- 0 0 0 0 0 0 0 0 0 0 0 0
61414- 0 0 0 0 0 0 0 0 0 0 0 0
61415- 0 0 0 0 0 1 0 0 1 0 0 0
61416- 0 0 0 0 0 0 0 0 0 0 0 0
61417- 0 0 0 0 0 0 0 0 0 0 0 0
61418- 0 0 0 0 0 0 0 0 0 0 0 0
61419- 0 0 0 0 0 0 0 0 0 0 0 0
61420- 0 0 0 0 0 0 0 0 0 14 14 14
61421- 42 42 42 82 82 82 2 2 6 2 2 6
61422- 2 2 6 6 6 6 10 10 10 2 2 6
61423- 2 2 6 2 2 6 2 2 6 2 2 6
61424- 2 2 6 2 2 6 2 2 6 6 6 6
61425- 14 14 14 10 10 10 2 2 6 2 2 6
61426- 2 2 6 2 2 6 2 2 6 18 18 18
61427- 82 82 82 34 34 34 10 10 10 0 0 0
61428- 0 0 0 0 0 0 0 0 0 0 0 0
61429- 0 0 0 0 0 0 0 0 0 0 0 0
61430- 0 0 0 0 0 0 0 0 0 0 0 0
61431- 0 0 0 0 0 0 0 0 0 0 0 0
61432- 0 0 0 0 0 0 0 0 0 0 0 0
61433- 0 0 0 0 0 0 0 0 0 0 0 0
61434- 0 0 0 0 0 0 0 0 0 0 0 0
61435- 0 0 1 0 0 0 0 0 0 0 0 0
61436- 0 0 0 0 0 0 0 0 0 0 0 0
61437- 0 0 0 0 0 0 0 0 0 0 0 0
61438- 0 0 0 0 0 0 0 0 0 0 0 0
61439- 0 0 0 0 0 0 0 0 0 0 0 0
61440- 0 0 0 0 0 0 0 0 0 14 14 14
61441- 46 46 46 86 86 86 2 2 6 2 2 6
61442- 6 6 6 6 6 6 22 22 22 34 34 34
61443- 6 6 6 2 2 6 2 2 6 2 2 6
61444- 2 2 6 2 2 6 18 18 18 34 34 34
61445- 10 10 10 50 50 50 22 22 22 2 2 6
61446- 2 2 6 2 2 6 2 2 6 10 10 10
61447- 86 86 86 42 42 42 14 14 14 0 0 0
61448- 0 0 0 0 0 0 0 0 0 0 0 0
61449- 0 0 0 0 0 0 0 0 0 0 0 0
61450- 0 0 0 0 0 0 0 0 0 0 0 0
61451- 0 0 0 0 0 0 0 0 0 0 0 0
61452- 0 0 0 0 0 0 0 0 0 0 0 0
61453- 0 0 0 0 0 0 0 0 0 0 0 0
61454- 0 0 0 0 0 0 0 0 0 0 0 0
61455- 0 0 1 0 0 1 0 0 1 0 0 0
61456- 0 0 0 0 0 0 0 0 0 0 0 0
61457- 0 0 0 0 0 0 0 0 0 0 0 0
61458- 0 0 0 0 0 0 0 0 0 0 0 0
61459- 0 0 0 0 0 0 0 0 0 0 0 0
61460- 0 0 0 0 0 0 0 0 0 14 14 14
61461- 46 46 46 86 86 86 2 2 6 2 2 6
61462- 38 38 38 116 116 116 94 94 94 22 22 22
61463- 22 22 22 2 2 6 2 2 6 2 2 6
61464- 14 14 14 86 86 86 138 138 138 162 162 162
61465-154 154 154 38 38 38 26 26 26 6 6 6
61466- 2 2 6 2 2 6 2 2 6 2 2 6
61467- 86 86 86 46 46 46 14 14 14 0 0 0
61468- 0 0 0 0 0 0 0 0 0 0 0 0
61469- 0 0 0 0 0 0 0 0 0 0 0 0
61470- 0 0 0 0 0 0 0 0 0 0 0 0
61471- 0 0 0 0 0 0 0 0 0 0 0 0
61472- 0 0 0 0 0 0 0 0 0 0 0 0
61473- 0 0 0 0 0 0 0 0 0 0 0 0
61474- 0 0 0 0 0 0 0 0 0 0 0 0
61475- 0 0 0 0 0 0 0 0 0 0 0 0
61476- 0 0 0 0 0 0 0 0 0 0 0 0
61477- 0 0 0 0 0 0 0 0 0 0 0 0
61478- 0 0 0 0 0 0 0 0 0 0 0 0
61479- 0 0 0 0 0 0 0 0 0 0 0 0
61480- 0 0 0 0 0 0 0 0 0 14 14 14
61481- 46 46 46 86 86 86 2 2 6 14 14 14
61482-134 134 134 198 198 198 195 195 195 116 116 116
61483- 10 10 10 2 2 6 2 2 6 6 6 6
61484-101 98 89 187 187 187 210 210 210 218 218 218
61485-214 214 214 134 134 134 14 14 14 6 6 6
61486- 2 2 6 2 2 6 2 2 6 2 2 6
61487- 86 86 86 50 50 50 18 18 18 6 6 6
61488- 0 0 0 0 0 0 0 0 0 0 0 0
61489- 0 0 0 0 0 0 0 0 0 0 0 0
61490- 0 0 0 0 0 0 0 0 0 0 0 0
61491- 0 0 0 0 0 0 0 0 0 0 0 0
61492- 0 0 0 0 0 0 0 0 0 0 0 0
61493- 0 0 0 0 0 0 0 0 0 0 0 0
61494- 0 0 0 0 0 0 0 0 1 0 0 0
61495- 0 0 1 0 0 1 0 0 1 0 0 0
61496- 0 0 0 0 0 0 0 0 0 0 0 0
61497- 0 0 0 0 0 0 0 0 0 0 0 0
61498- 0 0 0 0 0 0 0 0 0 0 0 0
61499- 0 0 0 0 0 0 0 0 0 0 0 0
61500- 0 0 0 0 0 0 0 0 0 14 14 14
61501- 46 46 46 86 86 86 2 2 6 54 54 54
61502-218 218 218 195 195 195 226 226 226 246 246 246
61503- 58 58 58 2 2 6 2 2 6 30 30 30
61504-210 210 210 253 253 253 174 174 174 123 123 123
61505-221 221 221 234 234 234 74 74 74 2 2 6
61506- 2 2 6 2 2 6 2 2 6 2 2 6
61507- 70 70 70 58 58 58 22 22 22 6 6 6
61508- 0 0 0 0 0 0 0 0 0 0 0 0
61509- 0 0 0 0 0 0 0 0 0 0 0 0
61510- 0 0 0 0 0 0 0 0 0 0 0 0
61511- 0 0 0 0 0 0 0 0 0 0 0 0
61512- 0 0 0 0 0 0 0 0 0 0 0 0
61513- 0 0 0 0 0 0 0 0 0 0 0 0
61514- 0 0 0 0 0 0 0 0 0 0 0 0
61515- 0 0 0 0 0 0 0 0 0 0 0 0
61516- 0 0 0 0 0 0 0 0 0 0 0 0
61517- 0 0 0 0 0 0 0 0 0 0 0 0
61518- 0 0 0 0 0 0 0 0 0 0 0 0
61519- 0 0 0 0 0 0 0 0 0 0 0 0
61520- 0 0 0 0 0 0 0 0 0 14 14 14
61521- 46 46 46 82 82 82 2 2 6 106 106 106
61522-170 170 170 26 26 26 86 86 86 226 226 226
61523-123 123 123 10 10 10 14 14 14 46 46 46
61524-231 231 231 190 190 190 6 6 6 70 70 70
61525- 90 90 90 238 238 238 158 158 158 2 2 6
61526- 2 2 6 2 2 6 2 2 6 2 2 6
61527- 70 70 70 58 58 58 22 22 22 6 6 6
61528- 0 0 0 0 0 0 0 0 0 0 0 0
61529- 0 0 0 0 0 0 0 0 0 0 0 0
61530- 0 0 0 0 0 0 0 0 0 0 0 0
61531- 0 0 0 0 0 0 0 0 0 0 0 0
61532- 0 0 0 0 0 0 0 0 0 0 0 0
61533- 0 0 0 0 0 0 0 0 0 0 0 0
61534- 0 0 0 0 0 0 0 0 1 0 0 0
61535- 0 0 1 0 0 1 0 0 1 0 0 0
61536- 0 0 0 0 0 0 0 0 0 0 0 0
61537- 0 0 0 0 0 0 0 0 0 0 0 0
61538- 0 0 0 0 0 0 0 0 0 0 0 0
61539- 0 0 0 0 0 0 0 0 0 0 0 0
61540- 0 0 0 0 0 0 0 0 0 14 14 14
61541- 42 42 42 86 86 86 6 6 6 116 116 116
61542-106 106 106 6 6 6 70 70 70 149 149 149
61543-128 128 128 18 18 18 38 38 38 54 54 54
61544-221 221 221 106 106 106 2 2 6 14 14 14
61545- 46 46 46 190 190 190 198 198 198 2 2 6
61546- 2 2 6 2 2 6 2 2 6 2 2 6
61547- 74 74 74 62 62 62 22 22 22 6 6 6
61548- 0 0 0 0 0 0 0 0 0 0 0 0
61549- 0 0 0 0 0 0 0 0 0 0 0 0
61550- 0 0 0 0 0 0 0 0 0 0 0 0
61551- 0 0 0 0 0 0 0 0 0 0 0 0
61552- 0 0 0 0 0 0 0 0 0 0 0 0
61553- 0 0 0 0 0 0 0 0 0 0 0 0
61554- 0 0 0 0 0 0 0 0 1 0 0 0
61555- 0 0 1 0 0 0 0 0 1 0 0 0
61556- 0 0 0 0 0 0 0 0 0 0 0 0
61557- 0 0 0 0 0 0 0 0 0 0 0 0
61558- 0 0 0 0 0 0 0 0 0 0 0 0
61559- 0 0 0 0 0 0 0 0 0 0 0 0
61560- 0 0 0 0 0 0 0 0 0 14 14 14
61561- 42 42 42 94 94 94 14 14 14 101 101 101
61562-128 128 128 2 2 6 18 18 18 116 116 116
61563-118 98 46 121 92 8 121 92 8 98 78 10
61564-162 162 162 106 106 106 2 2 6 2 2 6
61565- 2 2 6 195 195 195 195 195 195 6 6 6
61566- 2 2 6 2 2 6 2 2 6 2 2 6
61567- 74 74 74 62 62 62 22 22 22 6 6 6
61568- 0 0 0 0 0 0 0 0 0 0 0 0
61569- 0 0 0 0 0 0 0 0 0 0 0 0
61570- 0 0 0 0 0 0 0 0 0 0 0 0
61571- 0 0 0 0 0 0 0 0 0 0 0 0
61572- 0 0 0 0 0 0 0 0 0 0 0 0
61573- 0 0 0 0 0 0 0 0 0 0 0 0
61574- 0 0 0 0 0 0 0 0 1 0 0 1
61575- 0 0 1 0 0 0 0 0 1 0 0 0
61576- 0 0 0 0 0 0 0 0 0 0 0 0
61577- 0 0 0 0 0 0 0 0 0 0 0 0
61578- 0 0 0 0 0 0 0 0 0 0 0 0
61579- 0 0 0 0 0 0 0 0 0 0 0 0
61580- 0 0 0 0 0 0 0 0 0 10 10 10
61581- 38 38 38 90 90 90 14 14 14 58 58 58
61582-210 210 210 26 26 26 54 38 6 154 114 10
61583-226 170 11 236 186 11 225 175 15 184 144 12
61584-215 174 15 175 146 61 37 26 9 2 2 6
61585- 70 70 70 246 246 246 138 138 138 2 2 6
61586- 2 2 6 2 2 6 2 2 6 2 2 6
61587- 70 70 70 66 66 66 26 26 26 6 6 6
61588- 0 0 0 0 0 0 0 0 0 0 0 0
61589- 0 0 0 0 0 0 0 0 0 0 0 0
61590- 0 0 0 0 0 0 0 0 0 0 0 0
61591- 0 0 0 0 0 0 0 0 0 0 0 0
61592- 0 0 0 0 0 0 0 0 0 0 0 0
61593- 0 0 0 0 0 0 0 0 0 0 0 0
61594- 0 0 0 0 0 0 0 0 0 0 0 0
61595- 0 0 0 0 0 0 0 0 0 0 0 0
61596- 0 0 0 0 0 0 0 0 0 0 0 0
61597- 0 0 0 0 0 0 0 0 0 0 0 0
61598- 0 0 0 0 0 0 0 0 0 0 0 0
61599- 0 0 0 0 0 0 0 0 0 0 0 0
61600- 0 0 0 0 0 0 0 0 0 10 10 10
61601- 38 38 38 86 86 86 14 14 14 10 10 10
61602-195 195 195 188 164 115 192 133 9 225 175 15
61603-239 182 13 234 190 10 232 195 16 232 200 30
61604-245 207 45 241 208 19 232 195 16 184 144 12
61605-218 194 134 211 206 186 42 42 42 2 2 6
61606- 2 2 6 2 2 6 2 2 6 2 2 6
61607- 50 50 50 74 74 74 30 30 30 6 6 6
61608- 0 0 0 0 0 0 0 0 0 0 0 0
61609- 0 0 0 0 0 0 0 0 0 0 0 0
61610- 0 0 0 0 0 0 0 0 0 0 0 0
61611- 0 0 0 0 0 0 0 0 0 0 0 0
61612- 0 0 0 0 0 0 0 0 0 0 0 0
61613- 0 0 0 0 0 0 0 0 0 0 0 0
61614- 0 0 0 0 0 0 0 0 0 0 0 0
61615- 0 0 0 0 0 0 0 0 0 0 0 0
61616- 0 0 0 0 0 0 0 0 0 0 0 0
61617- 0 0 0 0 0 0 0 0 0 0 0 0
61618- 0 0 0 0 0 0 0 0 0 0 0 0
61619- 0 0 0 0 0 0 0 0 0 0 0 0
61620- 0 0 0 0 0 0 0 0 0 10 10 10
61621- 34 34 34 86 86 86 14 14 14 2 2 6
61622-121 87 25 192 133 9 219 162 10 239 182 13
61623-236 186 11 232 195 16 241 208 19 244 214 54
61624-246 218 60 246 218 38 246 215 20 241 208 19
61625-241 208 19 226 184 13 121 87 25 2 2 6
61626- 2 2 6 2 2 6 2 2 6 2 2 6
61627- 50 50 50 82 82 82 34 34 34 10 10 10
61628- 0 0 0 0 0 0 0 0 0 0 0 0
61629- 0 0 0 0 0 0 0 0 0 0 0 0
61630- 0 0 0 0 0 0 0 0 0 0 0 0
61631- 0 0 0 0 0 0 0 0 0 0 0 0
61632- 0 0 0 0 0 0 0 0 0 0 0 0
61633- 0 0 0 0 0 0 0 0 0 0 0 0
61634- 0 0 0 0 0 0 0 0 0 0 0 0
61635- 0 0 0 0 0 0 0 0 0 0 0 0
61636- 0 0 0 0 0 0 0 0 0 0 0 0
61637- 0 0 0 0 0 0 0 0 0 0 0 0
61638- 0 0 0 0 0 0 0 0 0 0 0 0
61639- 0 0 0 0 0 0 0 0 0 0 0 0
61640- 0 0 0 0 0 0 0 0 0 10 10 10
61641- 34 34 34 82 82 82 30 30 30 61 42 6
61642-180 123 7 206 145 10 230 174 11 239 182 13
61643-234 190 10 238 202 15 241 208 19 246 218 74
61644-246 218 38 246 215 20 246 215 20 246 215 20
61645-226 184 13 215 174 15 184 144 12 6 6 6
61646- 2 2 6 2 2 6 2 2 6 2 2 6
61647- 26 26 26 94 94 94 42 42 42 14 14 14
61648- 0 0 0 0 0 0 0 0 0 0 0 0
61649- 0 0 0 0 0 0 0 0 0 0 0 0
61650- 0 0 0 0 0 0 0 0 0 0 0 0
61651- 0 0 0 0 0 0 0 0 0 0 0 0
61652- 0 0 0 0 0 0 0 0 0 0 0 0
61653- 0 0 0 0 0 0 0 0 0 0 0 0
61654- 0 0 0 0 0 0 0 0 0 0 0 0
61655- 0 0 0 0 0 0 0 0 0 0 0 0
61656- 0 0 0 0 0 0 0 0 0 0 0 0
61657- 0 0 0 0 0 0 0 0 0 0 0 0
61658- 0 0 0 0 0 0 0 0 0 0 0 0
61659- 0 0 0 0 0 0 0 0 0 0 0 0
61660- 0 0 0 0 0 0 0 0 0 10 10 10
61661- 30 30 30 78 78 78 50 50 50 104 69 6
61662-192 133 9 216 158 10 236 178 12 236 186 11
61663-232 195 16 241 208 19 244 214 54 245 215 43
61664-246 215 20 246 215 20 241 208 19 198 155 10
61665-200 144 11 216 158 10 156 118 10 2 2 6
61666- 2 2 6 2 2 6 2 2 6 2 2 6
61667- 6 6 6 90 90 90 54 54 54 18 18 18
61668- 6 6 6 0 0 0 0 0 0 0 0 0
61669- 0 0 0 0 0 0 0 0 0 0 0 0
61670- 0 0 0 0 0 0 0 0 0 0 0 0
61671- 0 0 0 0 0 0 0 0 0 0 0 0
61672- 0 0 0 0 0 0 0 0 0 0 0 0
61673- 0 0 0 0 0 0 0 0 0 0 0 0
61674- 0 0 0 0 0 0 0 0 0 0 0 0
61675- 0 0 0 0 0 0 0 0 0 0 0 0
61676- 0 0 0 0 0 0 0 0 0 0 0 0
61677- 0 0 0 0 0 0 0 0 0 0 0 0
61678- 0 0 0 0 0 0 0 0 0 0 0 0
61679- 0 0 0 0 0 0 0 0 0 0 0 0
61680- 0 0 0 0 0 0 0 0 0 10 10 10
61681- 30 30 30 78 78 78 46 46 46 22 22 22
61682-137 92 6 210 162 10 239 182 13 238 190 10
61683-238 202 15 241 208 19 246 215 20 246 215 20
61684-241 208 19 203 166 17 185 133 11 210 150 10
61685-216 158 10 210 150 10 102 78 10 2 2 6
61686- 6 6 6 54 54 54 14 14 14 2 2 6
61687- 2 2 6 62 62 62 74 74 74 30 30 30
61688- 10 10 10 0 0 0 0 0 0 0 0 0
61689- 0 0 0 0 0 0 0 0 0 0 0 0
61690- 0 0 0 0 0 0 0 0 0 0 0 0
61691- 0 0 0 0 0 0 0 0 0 0 0 0
61692- 0 0 0 0 0 0 0 0 0 0 0 0
61693- 0 0 0 0 0 0 0 0 0 0 0 0
61694- 0 0 0 0 0 0 0 0 0 0 0 0
61695- 0 0 0 0 0 0 0 0 0 0 0 0
61696- 0 0 0 0 0 0 0 0 0 0 0 0
61697- 0 0 0 0 0 0 0 0 0 0 0 0
61698- 0 0 0 0 0 0 0 0 0 0 0 0
61699- 0 0 0 0 0 0 0 0 0 0 0 0
61700- 0 0 0 0 0 0 0 0 0 10 10 10
61701- 34 34 34 78 78 78 50 50 50 6 6 6
61702- 94 70 30 139 102 15 190 146 13 226 184 13
61703-232 200 30 232 195 16 215 174 15 190 146 13
61704-168 122 10 192 133 9 210 150 10 213 154 11
61705-202 150 34 182 157 106 101 98 89 2 2 6
61706- 2 2 6 78 78 78 116 116 116 58 58 58
61707- 2 2 6 22 22 22 90 90 90 46 46 46
61708- 18 18 18 6 6 6 0 0 0 0 0 0
61709- 0 0 0 0 0 0 0 0 0 0 0 0
61710- 0 0 0 0 0 0 0 0 0 0 0 0
61711- 0 0 0 0 0 0 0 0 0 0 0 0
61712- 0 0 0 0 0 0 0 0 0 0 0 0
61713- 0 0 0 0 0 0 0 0 0 0 0 0
61714- 0 0 0 0 0 0 0 0 0 0 0 0
61715- 0 0 0 0 0 0 0 0 0 0 0 0
61716- 0 0 0 0 0 0 0 0 0 0 0 0
61717- 0 0 0 0 0 0 0 0 0 0 0 0
61718- 0 0 0 0 0 0 0 0 0 0 0 0
61719- 0 0 0 0 0 0 0 0 0 0 0 0
61720- 0 0 0 0 0 0 0 0 0 10 10 10
61721- 38 38 38 86 86 86 50 50 50 6 6 6
61722-128 128 128 174 154 114 156 107 11 168 122 10
61723-198 155 10 184 144 12 197 138 11 200 144 11
61724-206 145 10 206 145 10 197 138 11 188 164 115
61725-195 195 195 198 198 198 174 174 174 14 14 14
61726- 2 2 6 22 22 22 116 116 116 116 116 116
61727- 22 22 22 2 2 6 74 74 74 70 70 70
61728- 30 30 30 10 10 10 0 0 0 0 0 0
61729- 0 0 0 0 0 0 0 0 0 0 0 0
61730- 0 0 0 0 0 0 0 0 0 0 0 0
61731- 0 0 0 0 0 0 0 0 0 0 0 0
61732- 0 0 0 0 0 0 0 0 0 0 0 0
61733- 0 0 0 0 0 0 0 0 0 0 0 0
61734- 0 0 0 0 0 0 0 0 0 0 0 0
61735- 0 0 0 0 0 0 0 0 0 0 0 0
61736- 0 0 0 0 0 0 0 0 0 0 0 0
61737- 0 0 0 0 0 0 0 0 0 0 0 0
61738- 0 0 0 0 0 0 0 0 0 0 0 0
61739- 0 0 0 0 0 0 0 0 0 0 0 0
61740- 0 0 0 0 0 0 6 6 6 18 18 18
61741- 50 50 50 101 101 101 26 26 26 10 10 10
61742-138 138 138 190 190 190 174 154 114 156 107 11
61743-197 138 11 200 144 11 197 138 11 192 133 9
61744-180 123 7 190 142 34 190 178 144 187 187 187
61745-202 202 202 221 221 221 214 214 214 66 66 66
61746- 2 2 6 2 2 6 50 50 50 62 62 62
61747- 6 6 6 2 2 6 10 10 10 90 90 90
61748- 50 50 50 18 18 18 6 6 6 0 0 0
61749- 0 0 0 0 0 0 0 0 0 0 0 0
61750- 0 0 0 0 0 0 0 0 0 0 0 0
61751- 0 0 0 0 0 0 0 0 0 0 0 0
61752- 0 0 0 0 0 0 0 0 0 0 0 0
61753- 0 0 0 0 0 0 0 0 0 0 0 0
61754- 0 0 0 0 0 0 0 0 0 0 0 0
61755- 0 0 0 0 0 0 0 0 0 0 0 0
61756- 0 0 0 0 0 0 0 0 0 0 0 0
61757- 0 0 0 0 0 0 0 0 0 0 0 0
61758- 0 0 0 0 0 0 0 0 0 0 0 0
61759- 0 0 0 0 0 0 0 0 0 0 0 0
61760- 0 0 0 0 0 0 10 10 10 34 34 34
61761- 74 74 74 74 74 74 2 2 6 6 6 6
61762-144 144 144 198 198 198 190 190 190 178 166 146
61763-154 121 60 156 107 11 156 107 11 168 124 44
61764-174 154 114 187 187 187 190 190 190 210 210 210
61765-246 246 246 253 253 253 253 253 253 182 182 182
61766- 6 6 6 2 2 6 2 2 6 2 2 6
61767- 2 2 6 2 2 6 2 2 6 62 62 62
61768- 74 74 74 34 34 34 14 14 14 0 0 0
61769- 0 0 0 0 0 0 0 0 0 0 0 0
61770- 0 0 0 0 0 0 0 0 0 0 0 0
61771- 0 0 0 0 0 0 0 0 0 0 0 0
61772- 0 0 0 0 0 0 0 0 0 0 0 0
61773- 0 0 0 0 0 0 0 0 0 0 0 0
61774- 0 0 0 0 0 0 0 0 0 0 0 0
61775- 0 0 0 0 0 0 0 0 0 0 0 0
61776- 0 0 0 0 0 0 0 0 0 0 0 0
61777- 0 0 0 0 0 0 0 0 0 0 0 0
61778- 0 0 0 0 0 0 0 0 0 0 0 0
61779- 0 0 0 0 0 0 0 0 0 0 0 0
61780- 0 0 0 10 10 10 22 22 22 54 54 54
61781- 94 94 94 18 18 18 2 2 6 46 46 46
61782-234 234 234 221 221 221 190 190 190 190 190 190
61783-190 190 190 187 187 187 187 187 187 190 190 190
61784-190 190 190 195 195 195 214 214 214 242 242 242
61785-253 253 253 253 253 253 253 253 253 253 253 253
61786- 82 82 82 2 2 6 2 2 6 2 2 6
61787- 2 2 6 2 2 6 2 2 6 14 14 14
61788- 86 86 86 54 54 54 22 22 22 6 6 6
61789- 0 0 0 0 0 0 0 0 0 0 0 0
61790- 0 0 0 0 0 0 0 0 0 0 0 0
61791- 0 0 0 0 0 0 0 0 0 0 0 0
61792- 0 0 0 0 0 0 0 0 0 0 0 0
61793- 0 0 0 0 0 0 0 0 0 0 0 0
61794- 0 0 0 0 0 0 0 0 0 0 0 0
61795- 0 0 0 0 0 0 0 0 0 0 0 0
61796- 0 0 0 0 0 0 0 0 0 0 0 0
61797- 0 0 0 0 0 0 0 0 0 0 0 0
61798- 0 0 0 0 0 0 0 0 0 0 0 0
61799- 0 0 0 0 0 0 0 0 0 0 0 0
61800- 6 6 6 18 18 18 46 46 46 90 90 90
61801- 46 46 46 18 18 18 6 6 6 182 182 182
61802-253 253 253 246 246 246 206 206 206 190 190 190
61803-190 190 190 190 190 190 190 190 190 190 190 190
61804-206 206 206 231 231 231 250 250 250 253 253 253
61805-253 253 253 253 253 253 253 253 253 253 253 253
61806-202 202 202 14 14 14 2 2 6 2 2 6
61807- 2 2 6 2 2 6 2 2 6 2 2 6
61808- 42 42 42 86 86 86 42 42 42 18 18 18
61809- 6 6 6 0 0 0 0 0 0 0 0 0
61810- 0 0 0 0 0 0 0 0 0 0 0 0
61811- 0 0 0 0 0 0 0 0 0 0 0 0
61812- 0 0 0 0 0 0 0 0 0 0 0 0
61813- 0 0 0 0 0 0 0 0 0 0 0 0
61814- 0 0 0 0 0 0 0 0 0 0 0 0
61815- 0 0 0 0 0 0 0 0 0 0 0 0
61816- 0 0 0 0 0 0 0 0 0 0 0 0
61817- 0 0 0 0 0 0 0 0 0 0 0 0
61818- 0 0 0 0 0 0 0 0 0 0 0 0
61819- 0 0 0 0 0 0 0 0 0 6 6 6
61820- 14 14 14 38 38 38 74 74 74 66 66 66
61821- 2 2 6 6 6 6 90 90 90 250 250 250
61822-253 253 253 253 253 253 238 238 238 198 198 198
61823-190 190 190 190 190 190 195 195 195 221 221 221
61824-246 246 246 253 253 253 253 253 253 253 253 253
61825-253 253 253 253 253 253 253 253 253 253 253 253
61826-253 253 253 82 82 82 2 2 6 2 2 6
61827- 2 2 6 2 2 6 2 2 6 2 2 6
61828- 2 2 6 78 78 78 70 70 70 34 34 34
61829- 14 14 14 6 6 6 0 0 0 0 0 0
61830- 0 0 0 0 0 0 0 0 0 0 0 0
61831- 0 0 0 0 0 0 0 0 0 0 0 0
61832- 0 0 0 0 0 0 0 0 0 0 0 0
61833- 0 0 0 0 0 0 0 0 0 0 0 0
61834- 0 0 0 0 0 0 0 0 0 0 0 0
61835- 0 0 0 0 0 0 0 0 0 0 0 0
61836- 0 0 0 0 0 0 0 0 0 0 0 0
61837- 0 0 0 0 0 0 0 0 0 0 0 0
61838- 0 0 0 0 0 0 0 0 0 0 0 0
61839- 0 0 0 0 0 0 0 0 0 14 14 14
61840- 34 34 34 66 66 66 78 78 78 6 6 6
61841- 2 2 6 18 18 18 218 218 218 253 253 253
61842-253 253 253 253 253 253 253 253 253 246 246 246
61843-226 226 226 231 231 231 246 246 246 253 253 253
61844-253 253 253 253 253 253 253 253 253 253 253 253
61845-253 253 253 253 253 253 253 253 253 253 253 253
61846-253 253 253 178 178 178 2 2 6 2 2 6
61847- 2 2 6 2 2 6 2 2 6 2 2 6
61848- 2 2 6 18 18 18 90 90 90 62 62 62
61849- 30 30 30 10 10 10 0 0 0 0 0 0
61850- 0 0 0 0 0 0 0 0 0 0 0 0
61851- 0 0 0 0 0 0 0 0 0 0 0 0
61852- 0 0 0 0 0 0 0 0 0 0 0 0
61853- 0 0 0 0 0 0 0 0 0 0 0 0
61854- 0 0 0 0 0 0 0 0 0 0 0 0
61855- 0 0 0 0 0 0 0 0 0 0 0 0
61856- 0 0 0 0 0 0 0 0 0 0 0 0
61857- 0 0 0 0 0 0 0 0 0 0 0 0
61858- 0 0 0 0 0 0 0 0 0 0 0 0
61859- 0 0 0 0 0 0 10 10 10 26 26 26
61860- 58 58 58 90 90 90 18 18 18 2 2 6
61861- 2 2 6 110 110 110 253 253 253 253 253 253
61862-253 253 253 253 253 253 253 253 253 253 253 253
61863-250 250 250 253 253 253 253 253 253 253 253 253
61864-253 253 253 253 253 253 253 253 253 253 253 253
61865-253 253 253 253 253 253 253 253 253 253 253 253
61866-253 253 253 231 231 231 18 18 18 2 2 6
61867- 2 2 6 2 2 6 2 2 6 2 2 6
61868- 2 2 6 2 2 6 18 18 18 94 94 94
61869- 54 54 54 26 26 26 10 10 10 0 0 0
61870- 0 0 0 0 0 0 0 0 0 0 0 0
61871- 0 0 0 0 0 0 0 0 0 0 0 0
61872- 0 0 0 0 0 0 0 0 0 0 0 0
61873- 0 0 0 0 0 0 0 0 0 0 0 0
61874- 0 0 0 0 0 0 0 0 0 0 0 0
61875- 0 0 0 0 0 0 0 0 0 0 0 0
61876- 0 0 0 0 0 0 0 0 0 0 0 0
61877- 0 0 0 0 0 0 0 0 0 0 0 0
61878- 0 0 0 0 0 0 0 0 0 0 0 0
61879- 0 0 0 6 6 6 22 22 22 50 50 50
61880- 90 90 90 26 26 26 2 2 6 2 2 6
61881- 14 14 14 195 195 195 250 250 250 253 253 253
61882-253 253 253 253 253 253 253 253 253 253 253 253
61883-253 253 253 253 253 253 253 253 253 253 253 253
61884-253 253 253 253 253 253 253 253 253 253 253 253
61885-253 253 253 253 253 253 253 253 253 253 253 253
61886-250 250 250 242 242 242 54 54 54 2 2 6
61887- 2 2 6 2 2 6 2 2 6 2 2 6
61888- 2 2 6 2 2 6 2 2 6 38 38 38
61889- 86 86 86 50 50 50 22 22 22 6 6 6
61890- 0 0 0 0 0 0 0 0 0 0 0 0
61891- 0 0 0 0 0 0 0 0 0 0 0 0
61892- 0 0 0 0 0 0 0 0 0 0 0 0
61893- 0 0 0 0 0 0 0 0 0 0 0 0
61894- 0 0 0 0 0 0 0 0 0 0 0 0
61895- 0 0 0 0 0 0 0 0 0 0 0 0
61896- 0 0 0 0 0 0 0 0 0 0 0 0
61897- 0 0 0 0 0 0 0 0 0 0 0 0
61898- 0 0 0 0 0 0 0 0 0 0 0 0
61899- 6 6 6 14 14 14 38 38 38 82 82 82
61900- 34 34 34 2 2 6 2 2 6 2 2 6
61901- 42 42 42 195 195 195 246 246 246 253 253 253
61902-253 253 253 253 253 253 253 253 253 250 250 250
61903-242 242 242 242 242 242 250 250 250 253 253 253
61904-253 253 253 253 253 253 253 253 253 253 253 253
61905-253 253 253 250 250 250 246 246 246 238 238 238
61906-226 226 226 231 231 231 101 101 101 6 6 6
61907- 2 2 6 2 2 6 2 2 6 2 2 6
61908- 2 2 6 2 2 6 2 2 6 2 2 6
61909- 38 38 38 82 82 82 42 42 42 14 14 14
61910- 6 6 6 0 0 0 0 0 0 0 0 0
61911- 0 0 0 0 0 0 0 0 0 0 0 0
61912- 0 0 0 0 0 0 0 0 0 0 0 0
61913- 0 0 0 0 0 0 0 0 0 0 0 0
61914- 0 0 0 0 0 0 0 0 0 0 0 0
61915- 0 0 0 0 0 0 0 0 0 0 0 0
61916- 0 0 0 0 0 0 0 0 0 0 0 0
61917- 0 0 0 0 0 0 0 0 0 0 0 0
61918- 0 0 0 0 0 0 0 0 0 0 0 0
61919- 10 10 10 26 26 26 62 62 62 66 66 66
61920- 2 2 6 2 2 6 2 2 6 6 6 6
61921- 70 70 70 170 170 170 206 206 206 234 234 234
61922-246 246 246 250 250 250 250 250 250 238 238 238
61923-226 226 226 231 231 231 238 238 238 250 250 250
61924-250 250 250 250 250 250 246 246 246 231 231 231
61925-214 214 214 206 206 206 202 202 202 202 202 202
61926-198 198 198 202 202 202 182 182 182 18 18 18
61927- 2 2 6 2 2 6 2 2 6 2 2 6
61928- 2 2 6 2 2 6 2 2 6 2 2 6
61929- 2 2 6 62 62 62 66 66 66 30 30 30
61930- 10 10 10 0 0 0 0 0 0 0 0 0
61931- 0 0 0 0 0 0 0 0 0 0 0 0
61932- 0 0 0 0 0 0 0 0 0 0 0 0
61933- 0 0 0 0 0 0 0 0 0 0 0 0
61934- 0 0 0 0 0 0 0 0 0 0 0 0
61935- 0 0 0 0 0 0 0 0 0 0 0 0
61936- 0 0 0 0 0 0 0 0 0 0 0 0
61937- 0 0 0 0 0 0 0 0 0 0 0 0
61938- 0 0 0 0 0 0 0 0 0 0 0 0
61939- 14 14 14 42 42 42 82 82 82 18 18 18
61940- 2 2 6 2 2 6 2 2 6 10 10 10
61941- 94 94 94 182 182 182 218 218 218 242 242 242
61942-250 250 250 253 253 253 253 253 253 250 250 250
61943-234 234 234 253 253 253 253 253 253 253 253 253
61944-253 253 253 253 253 253 253 253 253 246 246 246
61945-238 238 238 226 226 226 210 210 210 202 202 202
61946-195 195 195 195 195 195 210 210 210 158 158 158
61947- 6 6 6 14 14 14 50 50 50 14 14 14
61948- 2 2 6 2 2 6 2 2 6 2 2 6
61949- 2 2 6 6 6 6 86 86 86 46 46 46
61950- 18 18 18 6 6 6 0 0 0 0 0 0
61951- 0 0 0 0 0 0 0 0 0 0 0 0
61952- 0 0 0 0 0 0 0 0 0 0 0 0
61953- 0 0 0 0 0 0 0 0 0 0 0 0
61954- 0 0 0 0 0 0 0 0 0 0 0 0
61955- 0 0 0 0 0 0 0 0 0 0 0 0
61956- 0 0 0 0 0 0 0 0 0 0 0 0
61957- 0 0 0 0 0 0 0 0 0 0 0 0
61958- 0 0 0 0 0 0 0 0 0 6 6 6
61959- 22 22 22 54 54 54 70 70 70 2 2 6
61960- 2 2 6 10 10 10 2 2 6 22 22 22
61961-166 166 166 231 231 231 250 250 250 253 253 253
61962-253 253 253 253 253 253 253 253 253 250 250 250
61963-242 242 242 253 253 253 253 253 253 253 253 253
61964-253 253 253 253 253 253 253 253 253 253 253 253
61965-253 253 253 253 253 253 253 253 253 246 246 246
61966-231 231 231 206 206 206 198 198 198 226 226 226
61967- 94 94 94 2 2 6 6 6 6 38 38 38
61968- 30 30 30 2 2 6 2 2 6 2 2 6
61969- 2 2 6 2 2 6 62 62 62 66 66 66
61970- 26 26 26 10 10 10 0 0 0 0 0 0
61971- 0 0 0 0 0 0 0 0 0 0 0 0
61972- 0 0 0 0 0 0 0 0 0 0 0 0
61973- 0 0 0 0 0 0 0 0 0 0 0 0
61974- 0 0 0 0 0 0 0 0 0 0 0 0
61975- 0 0 0 0 0 0 0 0 0 0 0 0
61976- 0 0 0 0 0 0 0 0 0 0 0 0
61977- 0 0 0 0 0 0 0 0 0 0 0 0
61978- 0 0 0 0 0 0 0 0 0 10 10 10
61979- 30 30 30 74 74 74 50 50 50 2 2 6
61980- 26 26 26 26 26 26 2 2 6 106 106 106
61981-238 238 238 253 253 253 253 253 253 253 253 253
61982-253 253 253 253 253 253 253 253 253 253 253 253
61983-253 253 253 253 253 253 253 253 253 253 253 253
61984-253 253 253 253 253 253 253 253 253 253 253 253
61985-253 253 253 253 253 253 253 253 253 253 253 253
61986-253 253 253 246 246 246 218 218 218 202 202 202
61987-210 210 210 14 14 14 2 2 6 2 2 6
61988- 30 30 30 22 22 22 2 2 6 2 2 6
61989- 2 2 6 2 2 6 18 18 18 86 86 86
61990- 42 42 42 14 14 14 0 0 0 0 0 0
61991- 0 0 0 0 0 0 0 0 0 0 0 0
61992- 0 0 0 0 0 0 0 0 0 0 0 0
61993- 0 0 0 0 0 0 0 0 0 0 0 0
61994- 0 0 0 0 0 0 0 0 0 0 0 0
61995- 0 0 0 0 0 0 0 0 0 0 0 0
61996- 0 0 0 0 0 0 0 0 0 0 0 0
61997- 0 0 0 0 0 0 0 0 0 0 0 0
61998- 0 0 0 0 0 0 0 0 0 14 14 14
61999- 42 42 42 90 90 90 22 22 22 2 2 6
62000- 42 42 42 2 2 6 18 18 18 218 218 218
62001-253 253 253 253 253 253 253 253 253 253 253 253
62002-253 253 253 253 253 253 253 253 253 253 253 253
62003-253 253 253 253 253 253 253 253 253 253 253 253
62004-253 253 253 253 253 253 253 253 253 253 253 253
62005-253 253 253 253 253 253 253 253 253 253 253 253
62006-253 253 253 253 253 253 250 250 250 221 221 221
62007-218 218 218 101 101 101 2 2 6 14 14 14
62008- 18 18 18 38 38 38 10 10 10 2 2 6
62009- 2 2 6 2 2 6 2 2 6 78 78 78
62010- 58 58 58 22 22 22 6 6 6 0 0 0
62011- 0 0 0 0 0 0 0 0 0 0 0 0
62012- 0 0 0 0 0 0 0 0 0 0 0 0
62013- 0 0 0 0 0 0 0 0 0 0 0 0
62014- 0 0 0 0 0 0 0 0 0 0 0 0
62015- 0 0 0 0 0 0 0 0 0 0 0 0
62016- 0 0 0 0 0 0 0 0 0 0 0 0
62017- 0 0 0 0 0 0 0 0 0 0 0 0
62018- 0 0 0 0 0 0 6 6 6 18 18 18
62019- 54 54 54 82 82 82 2 2 6 26 26 26
62020- 22 22 22 2 2 6 123 123 123 253 253 253
62021-253 253 253 253 253 253 253 253 253 253 253 253
62022-253 253 253 253 253 253 253 253 253 253 253 253
62023-253 253 253 253 253 253 253 253 253 253 253 253
62024-253 253 253 253 253 253 253 253 253 253 253 253
62025-253 253 253 253 253 253 253 253 253 253 253 253
62026-253 253 253 253 253 253 253 253 253 250 250 250
62027-238 238 238 198 198 198 6 6 6 38 38 38
62028- 58 58 58 26 26 26 38 38 38 2 2 6
62029- 2 2 6 2 2 6 2 2 6 46 46 46
62030- 78 78 78 30 30 30 10 10 10 0 0 0
62031- 0 0 0 0 0 0 0 0 0 0 0 0
62032- 0 0 0 0 0 0 0 0 0 0 0 0
62033- 0 0 0 0 0 0 0 0 0 0 0 0
62034- 0 0 0 0 0 0 0 0 0 0 0 0
62035- 0 0 0 0 0 0 0 0 0 0 0 0
62036- 0 0 0 0 0 0 0 0 0 0 0 0
62037- 0 0 0 0 0 0 0 0 0 0 0 0
62038- 0 0 0 0 0 0 10 10 10 30 30 30
62039- 74 74 74 58 58 58 2 2 6 42 42 42
62040- 2 2 6 22 22 22 231 231 231 253 253 253
62041-253 253 253 253 253 253 253 253 253 253 253 253
62042-253 253 253 253 253 253 253 253 253 250 250 250
62043-253 253 253 253 253 253 253 253 253 253 253 253
62044-253 253 253 253 253 253 253 253 253 253 253 253
62045-253 253 253 253 253 253 253 253 253 253 253 253
62046-253 253 253 253 253 253 253 253 253 253 253 253
62047-253 253 253 246 246 246 46 46 46 38 38 38
62048- 42 42 42 14 14 14 38 38 38 14 14 14
62049- 2 2 6 2 2 6 2 2 6 6 6 6
62050- 86 86 86 46 46 46 14 14 14 0 0 0
62051- 0 0 0 0 0 0 0 0 0 0 0 0
62052- 0 0 0 0 0 0 0 0 0 0 0 0
62053- 0 0 0 0 0 0 0 0 0 0 0 0
62054- 0 0 0 0 0 0 0 0 0 0 0 0
62055- 0 0 0 0 0 0 0 0 0 0 0 0
62056- 0 0 0 0 0 0 0 0 0 0 0 0
62057- 0 0 0 0 0 0 0 0 0 0 0 0
62058- 0 0 0 6 6 6 14 14 14 42 42 42
62059- 90 90 90 18 18 18 18 18 18 26 26 26
62060- 2 2 6 116 116 116 253 253 253 253 253 253
62061-253 253 253 253 253 253 253 253 253 253 253 253
62062-253 253 253 253 253 253 250 250 250 238 238 238
62063-253 253 253 253 253 253 253 253 253 253 253 253
62064-253 253 253 253 253 253 253 253 253 253 253 253
62065-253 253 253 253 253 253 253 253 253 253 253 253
62066-253 253 253 253 253 253 253 253 253 253 253 253
62067-253 253 253 253 253 253 94 94 94 6 6 6
62068- 2 2 6 2 2 6 10 10 10 34 34 34
62069- 2 2 6 2 2 6 2 2 6 2 2 6
62070- 74 74 74 58 58 58 22 22 22 6 6 6
62071- 0 0 0 0 0 0 0 0 0 0 0 0
62072- 0 0 0 0 0 0 0 0 0 0 0 0
62073- 0 0 0 0 0 0 0 0 0 0 0 0
62074- 0 0 0 0 0 0 0 0 0 0 0 0
62075- 0 0 0 0 0 0 0 0 0 0 0 0
62076- 0 0 0 0 0 0 0 0 0 0 0 0
62077- 0 0 0 0 0 0 0 0 0 0 0 0
62078- 0 0 0 10 10 10 26 26 26 66 66 66
62079- 82 82 82 2 2 6 38 38 38 6 6 6
62080- 14 14 14 210 210 210 253 253 253 253 253 253
62081-253 253 253 253 253 253 253 253 253 253 253 253
62082-253 253 253 253 253 253 246 246 246 242 242 242
62083-253 253 253 253 253 253 253 253 253 253 253 253
62084-253 253 253 253 253 253 253 253 253 253 253 253
62085-253 253 253 253 253 253 253 253 253 253 253 253
62086-253 253 253 253 253 253 253 253 253 253 253 253
62087-253 253 253 253 253 253 144 144 144 2 2 6
62088- 2 2 6 2 2 6 2 2 6 46 46 46
62089- 2 2 6 2 2 6 2 2 6 2 2 6
62090- 42 42 42 74 74 74 30 30 30 10 10 10
62091- 0 0 0 0 0 0 0 0 0 0 0 0
62092- 0 0 0 0 0 0 0 0 0 0 0 0
62093- 0 0 0 0 0 0 0 0 0 0 0 0
62094- 0 0 0 0 0 0 0 0 0 0 0 0
62095- 0 0 0 0 0 0 0 0 0 0 0 0
62096- 0 0 0 0 0 0 0 0 0 0 0 0
62097- 0 0 0 0 0 0 0 0 0 0 0 0
62098- 6 6 6 14 14 14 42 42 42 90 90 90
62099- 26 26 26 6 6 6 42 42 42 2 2 6
62100- 74 74 74 250 250 250 253 253 253 253 253 253
62101-253 253 253 253 253 253 253 253 253 253 253 253
62102-253 253 253 253 253 253 242 242 242 242 242 242
62103-253 253 253 253 253 253 253 253 253 253 253 253
62104-253 253 253 253 253 253 253 253 253 253 253 253
62105-253 253 253 253 253 253 253 253 253 253 253 253
62106-253 253 253 253 253 253 253 253 253 253 253 253
62107-253 253 253 253 253 253 182 182 182 2 2 6
62108- 2 2 6 2 2 6 2 2 6 46 46 46
62109- 2 2 6 2 2 6 2 2 6 2 2 6
62110- 10 10 10 86 86 86 38 38 38 10 10 10
62111- 0 0 0 0 0 0 0 0 0 0 0 0
62112- 0 0 0 0 0 0 0 0 0 0 0 0
62113- 0 0 0 0 0 0 0 0 0 0 0 0
62114- 0 0 0 0 0 0 0 0 0 0 0 0
62115- 0 0 0 0 0 0 0 0 0 0 0 0
62116- 0 0 0 0 0 0 0 0 0 0 0 0
62117- 0 0 0 0 0 0 0 0 0 0 0 0
62118- 10 10 10 26 26 26 66 66 66 82 82 82
62119- 2 2 6 22 22 22 18 18 18 2 2 6
62120-149 149 149 253 253 253 253 253 253 253 253 253
62121-253 253 253 253 253 253 253 253 253 253 253 253
62122-253 253 253 253 253 253 234 234 234 242 242 242
62123-253 253 253 253 253 253 253 253 253 253 253 253
62124-253 253 253 253 253 253 253 253 253 253 253 253
62125-253 253 253 253 253 253 253 253 253 253 253 253
62126-253 253 253 253 253 253 253 253 253 253 253 253
62127-253 253 253 253 253 253 206 206 206 2 2 6
62128- 2 2 6 2 2 6 2 2 6 38 38 38
62129- 2 2 6 2 2 6 2 2 6 2 2 6
62130- 6 6 6 86 86 86 46 46 46 14 14 14
62131- 0 0 0 0 0 0 0 0 0 0 0 0
62132- 0 0 0 0 0 0 0 0 0 0 0 0
62133- 0 0 0 0 0 0 0 0 0 0 0 0
62134- 0 0 0 0 0 0 0 0 0 0 0 0
62135- 0 0 0 0 0 0 0 0 0 0 0 0
62136- 0 0 0 0 0 0 0 0 0 0 0 0
62137- 0 0 0 0 0 0 0 0 0 6 6 6
62138- 18 18 18 46 46 46 86 86 86 18 18 18
62139- 2 2 6 34 34 34 10 10 10 6 6 6
62140-210 210 210 253 253 253 253 253 253 253 253 253
62141-253 253 253 253 253 253 253 253 253 253 253 253
62142-253 253 253 253 253 253 234 234 234 242 242 242
62143-253 253 253 253 253 253 253 253 253 253 253 253
62144-253 253 253 253 253 253 253 253 253 253 253 253
62145-253 253 253 253 253 253 253 253 253 253 253 253
62146-253 253 253 253 253 253 253 253 253 253 253 253
62147-253 253 253 253 253 253 221 221 221 6 6 6
62148- 2 2 6 2 2 6 6 6 6 30 30 30
62149- 2 2 6 2 2 6 2 2 6 2 2 6
62150- 2 2 6 82 82 82 54 54 54 18 18 18
62151- 6 6 6 0 0 0 0 0 0 0 0 0
62152- 0 0 0 0 0 0 0 0 0 0 0 0
62153- 0 0 0 0 0 0 0 0 0 0 0 0
62154- 0 0 0 0 0 0 0 0 0 0 0 0
62155- 0 0 0 0 0 0 0 0 0 0 0 0
62156- 0 0 0 0 0 0 0 0 0 0 0 0
62157- 0 0 0 0 0 0 0 0 0 10 10 10
62158- 26 26 26 66 66 66 62 62 62 2 2 6
62159- 2 2 6 38 38 38 10 10 10 26 26 26
62160-238 238 238 253 253 253 253 253 253 253 253 253
62161-253 253 253 253 253 253 253 253 253 253 253 253
62162-253 253 253 253 253 253 231 231 231 238 238 238
62163-253 253 253 253 253 253 253 253 253 253 253 253
62164-253 253 253 253 253 253 253 253 253 253 253 253
62165-253 253 253 253 253 253 253 253 253 253 253 253
62166-253 253 253 253 253 253 253 253 253 253 253 253
62167-253 253 253 253 253 253 231 231 231 6 6 6
62168- 2 2 6 2 2 6 10 10 10 30 30 30
62169- 2 2 6 2 2 6 2 2 6 2 2 6
62170- 2 2 6 66 66 66 58 58 58 22 22 22
62171- 6 6 6 0 0 0 0 0 0 0 0 0
62172- 0 0 0 0 0 0 0 0 0 0 0 0
62173- 0 0 0 0 0 0 0 0 0 0 0 0
62174- 0 0 0 0 0 0 0 0 0 0 0 0
62175- 0 0 0 0 0 0 0 0 0 0 0 0
62176- 0 0 0 0 0 0 0 0 0 0 0 0
62177- 0 0 0 0 0 0 0 0 0 10 10 10
62178- 38 38 38 78 78 78 6 6 6 2 2 6
62179- 2 2 6 46 46 46 14 14 14 42 42 42
62180-246 246 246 253 253 253 253 253 253 253 253 253
62181-253 253 253 253 253 253 253 253 253 253 253 253
62182-253 253 253 253 253 253 231 231 231 242 242 242
62183-253 253 253 253 253 253 253 253 253 253 253 253
62184-253 253 253 253 253 253 253 253 253 253 253 253
62185-253 253 253 253 253 253 253 253 253 253 253 253
62186-253 253 253 253 253 253 253 253 253 253 253 253
62187-253 253 253 253 253 253 234 234 234 10 10 10
62188- 2 2 6 2 2 6 22 22 22 14 14 14
62189- 2 2 6 2 2 6 2 2 6 2 2 6
62190- 2 2 6 66 66 66 62 62 62 22 22 22
62191- 6 6 6 0 0 0 0 0 0 0 0 0
62192- 0 0 0 0 0 0 0 0 0 0 0 0
62193- 0 0 0 0 0 0 0 0 0 0 0 0
62194- 0 0 0 0 0 0 0 0 0 0 0 0
62195- 0 0 0 0 0 0 0 0 0 0 0 0
62196- 0 0 0 0 0 0 0 0 0 0 0 0
62197- 0 0 0 0 0 0 6 6 6 18 18 18
62198- 50 50 50 74 74 74 2 2 6 2 2 6
62199- 14 14 14 70 70 70 34 34 34 62 62 62
62200-250 250 250 253 253 253 253 253 253 253 253 253
62201-253 253 253 253 253 253 253 253 253 253 253 253
62202-253 253 253 253 253 253 231 231 231 246 246 246
62203-253 253 253 253 253 253 253 253 253 253 253 253
62204-253 253 253 253 253 253 253 253 253 253 253 253
62205-253 253 253 253 253 253 253 253 253 253 253 253
62206-253 253 253 253 253 253 253 253 253 253 253 253
62207-253 253 253 253 253 253 234 234 234 14 14 14
62208- 2 2 6 2 2 6 30 30 30 2 2 6
62209- 2 2 6 2 2 6 2 2 6 2 2 6
62210- 2 2 6 66 66 66 62 62 62 22 22 22
62211- 6 6 6 0 0 0 0 0 0 0 0 0
62212- 0 0 0 0 0 0 0 0 0 0 0 0
62213- 0 0 0 0 0 0 0 0 0 0 0 0
62214- 0 0 0 0 0 0 0 0 0 0 0 0
62215- 0 0 0 0 0 0 0 0 0 0 0 0
62216- 0 0 0 0 0 0 0 0 0 0 0 0
62217- 0 0 0 0 0 0 6 6 6 18 18 18
62218- 54 54 54 62 62 62 2 2 6 2 2 6
62219- 2 2 6 30 30 30 46 46 46 70 70 70
62220-250 250 250 253 253 253 253 253 253 253 253 253
62221-253 253 253 253 253 253 253 253 253 253 253 253
62222-253 253 253 253 253 253 231 231 231 246 246 246
62223-253 253 253 253 253 253 253 253 253 253 253 253
62224-253 253 253 253 253 253 253 253 253 253 253 253
62225-253 253 253 253 253 253 253 253 253 253 253 253
62226-253 253 253 253 253 253 253 253 253 253 253 253
62227-253 253 253 253 253 253 226 226 226 10 10 10
62228- 2 2 6 6 6 6 30 30 30 2 2 6
62229- 2 2 6 2 2 6 2 2 6 2 2 6
62230- 2 2 6 66 66 66 58 58 58 22 22 22
62231- 6 6 6 0 0 0 0 0 0 0 0 0
62232- 0 0 0 0 0 0 0 0 0 0 0 0
62233- 0 0 0 0 0 0 0 0 0 0 0 0
62234- 0 0 0 0 0 0 0 0 0 0 0 0
62235- 0 0 0 0 0 0 0 0 0 0 0 0
62236- 0 0 0 0 0 0 0 0 0 0 0 0
62237- 0 0 0 0 0 0 6 6 6 22 22 22
62238- 58 58 58 62 62 62 2 2 6 2 2 6
62239- 2 2 6 2 2 6 30 30 30 78 78 78
62240-250 250 250 253 253 253 253 253 253 253 253 253
62241-253 253 253 253 253 253 253 253 253 253 253 253
62242-253 253 253 253 253 253 231 231 231 246 246 246
62243-253 253 253 253 253 253 253 253 253 253 253 253
62244-253 253 253 253 253 253 253 253 253 253 253 253
62245-253 253 253 253 253 253 253 253 253 253 253 253
62246-253 253 253 253 253 253 253 253 253 253 253 253
62247-253 253 253 253 253 253 206 206 206 2 2 6
62248- 22 22 22 34 34 34 18 14 6 22 22 22
62249- 26 26 26 18 18 18 6 6 6 2 2 6
62250- 2 2 6 82 82 82 54 54 54 18 18 18
62251- 6 6 6 0 0 0 0 0 0 0 0 0
62252- 0 0 0 0 0 0 0 0 0 0 0 0
62253- 0 0 0 0 0 0 0 0 0 0 0 0
62254- 0 0 0 0 0 0 0 0 0 0 0 0
62255- 0 0 0 0 0 0 0 0 0 0 0 0
62256- 0 0 0 0 0 0 0 0 0 0 0 0
62257- 0 0 0 0 0 0 6 6 6 26 26 26
62258- 62 62 62 106 106 106 74 54 14 185 133 11
62259-210 162 10 121 92 8 6 6 6 62 62 62
62260-238 238 238 253 253 253 253 253 253 253 253 253
62261-253 253 253 253 253 253 253 253 253 253 253 253
62262-253 253 253 253 253 253 231 231 231 246 246 246
62263-253 253 253 253 253 253 253 253 253 253 253 253
62264-253 253 253 253 253 253 253 253 253 253 253 253
62265-253 253 253 253 253 253 253 253 253 253 253 253
62266-253 253 253 253 253 253 253 253 253 253 253 253
62267-253 253 253 253 253 253 158 158 158 18 18 18
62268- 14 14 14 2 2 6 2 2 6 2 2 6
62269- 6 6 6 18 18 18 66 66 66 38 38 38
62270- 6 6 6 94 94 94 50 50 50 18 18 18
62271- 6 6 6 0 0 0 0 0 0 0 0 0
62272- 0 0 0 0 0 0 0 0 0 0 0 0
62273- 0 0 0 0 0 0 0 0 0 0 0 0
62274- 0 0 0 0 0 0 0 0 0 0 0 0
62275- 0 0 0 0 0 0 0 0 0 0 0 0
62276- 0 0 0 0 0 0 0 0 0 6 6 6
62277- 10 10 10 10 10 10 18 18 18 38 38 38
62278- 78 78 78 142 134 106 216 158 10 242 186 14
62279-246 190 14 246 190 14 156 118 10 10 10 10
62280- 90 90 90 238 238 238 253 253 253 253 253 253
62281-253 253 253 253 253 253 253 253 253 253 253 253
62282-253 253 253 253 253 253 231 231 231 250 250 250
62283-253 253 253 253 253 253 253 253 253 253 253 253
62284-253 253 253 253 253 253 253 253 253 253 253 253
62285-253 253 253 253 253 253 253 253 253 253 253 253
62286-253 253 253 253 253 253 253 253 253 246 230 190
62287-238 204 91 238 204 91 181 142 44 37 26 9
62288- 2 2 6 2 2 6 2 2 6 2 2 6
62289- 2 2 6 2 2 6 38 38 38 46 46 46
62290- 26 26 26 106 106 106 54 54 54 18 18 18
62291- 6 6 6 0 0 0 0 0 0 0 0 0
62292- 0 0 0 0 0 0 0 0 0 0 0 0
62293- 0 0 0 0 0 0 0 0 0 0 0 0
62294- 0 0 0 0 0 0 0 0 0 0 0 0
62295- 0 0 0 0 0 0 0 0 0 0 0 0
62296- 0 0 0 6 6 6 14 14 14 22 22 22
62297- 30 30 30 38 38 38 50 50 50 70 70 70
62298-106 106 106 190 142 34 226 170 11 242 186 14
62299-246 190 14 246 190 14 246 190 14 154 114 10
62300- 6 6 6 74 74 74 226 226 226 253 253 253
62301-253 253 253 253 253 253 253 253 253 253 253 253
62302-253 253 253 253 253 253 231 231 231 250 250 250
62303-253 253 253 253 253 253 253 253 253 253 253 253
62304-253 253 253 253 253 253 253 253 253 253 253 253
62305-253 253 253 253 253 253 253 253 253 253 253 253
62306-253 253 253 253 253 253 253 253 253 228 184 62
62307-241 196 14 241 208 19 232 195 16 38 30 10
62308- 2 2 6 2 2 6 2 2 6 2 2 6
62309- 2 2 6 6 6 6 30 30 30 26 26 26
62310-203 166 17 154 142 90 66 66 66 26 26 26
62311- 6 6 6 0 0 0 0 0 0 0 0 0
62312- 0 0 0 0 0 0 0 0 0 0 0 0
62313- 0 0 0 0 0 0 0 0 0 0 0 0
62314- 0 0 0 0 0 0 0 0 0 0 0 0
62315- 0 0 0 0 0 0 0 0 0 0 0 0
62316- 6 6 6 18 18 18 38 38 38 58 58 58
62317- 78 78 78 86 86 86 101 101 101 123 123 123
62318-175 146 61 210 150 10 234 174 13 246 186 14
62319-246 190 14 246 190 14 246 190 14 238 190 10
62320-102 78 10 2 2 6 46 46 46 198 198 198
62321-253 253 253 253 253 253 253 253 253 253 253 253
62322-253 253 253 253 253 253 234 234 234 242 242 242
62323-253 253 253 253 253 253 253 253 253 253 253 253
62324-253 253 253 253 253 253 253 253 253 253 253 253
62325-253 253 253 253 253 253 253 253 253 253 253 253
62326-253 253 253 253 253 253 253 253 253 224 178 62
62327-242 186 14 241 196 14 210 166 10 22 18 6
62328- 2 2 6 2 2 6 2 2 6 2 2 6
62329- 2 2 6 2 2 6 6 6 6 121 92 8
62330-238 202 15 232 195 16 82 82 82 34 34 34
62331- 10 10 10 0 0 0 0 0 0 0 0 0
62332- 0 0 0 0 0 0 0 0 0 0 0 0
62333- 0 0 0 0 0 0 0 0 0 0 0 0
62334- 0 0 0 0 0 0 0 0 0 0 0 0
62335- 0 0 0 0 0 0 0 0 0 0 0 0
62336- 14 14 14 38 38 38 70 70 70 154 122 46
62337-190 142 34 200 144 11 197 138 11 197 138 11
62338-213 154 11 226 170 11 242 186 14 246 190 14
62339-246 190 14 246 190 14 246 190 14 246 190 14
62340-225 175 15 46 32 6 2 2 6 22 22 22
62341-158 158 158 250 250 250 253 253 253 253 253 253
62342-253 253 253 253 253 253 253 253 253 253 253 253
62343-253 253 253 253 253 253 253 253 253 253 253 253
62344-253 253 253 253 253 253 253 253 253 253 253 253
62345-253 253 253 253 253 253 253 253 253 253 253 253
62346-253 253 253 250 250 250 242 242 242 224 178 62
62347-239 182 13 236 186 11 213 154 11 46 32 6
62348- 2 2 6 2 2 6 2 2 6 2 2 6
62349- 2 2 6 2 2 6 61 42 6 225 175 15
62350-238 190 10 236 186 11 112 100 78 42 42 42
62351- 14 14 14 0 0 0 0 0 0 0 0 0
62352- 0 0 0 0 0 0 0 0 0 0 0 0
62353- 0 0 0 0 0 0 0 0 0 0 0 0
62354- 0 0 0 0 0 0 0 0 0 0 0 0
62355- 0 0 0 0 0 0 0 0 0 6 6 6
62356- 22 22 22 54 54 54 154 122 46 213 154 11
62357-226 170 11 230 174 11 226 170 11 226 170 11
62358-236 178 12 242 186 14 246 190 14 246 190 14
62359-246 190 14 246 190 14 246 190 14 246 190 14
62360-241 196 14 184 144 12 10 10 10 2 2 6
62361- 6 6 6 116 116 116 242 242 242 253 253 253
62362-253 253 253 253 253 253 253 253 253 253 253 253
62363-253 253 253 253 253 253 253 253 253 253 253 253
62364-253 253 253 253 253 253 253 253 253 253 253 253
62365-253 253 253 253 253 253 253 253 253 253 253 253
62366-253 253 253 231 231 231 198 198 198 214 170 54
62367-236 178 12 236 178 12 210 150 10 137 92 6
62368- 18 14 6 2 2 6 2 2 6 2 2 6
62369- 6 6 6 70 47 6 200 144 11 236 178 12
62370-239 182 13 239 182 13 124 112 88 58 58 58
62371- 22 22 22 6 6 6 0 0 0 0 0 0
62372- 0 0 0 0 0 0 0 0 0 0 0 0
62373- 0 0 0 0 0 0 0 0 0 0 0 0
62374- 0 0 0 0 0 0 0 0 0 0 0 0
62375- 0 0 0 0 0 0 0 0 0 10 10 10
62376- 30 30 30 70 70 70 180 133 36 226 170 11
62377-239 182 13 242 186 14 242 186 14 246 186 14
62378-246 190 14 246 190 14 246 190 14 246 190 14
62379-246 190 14 246 190 14 246 190 14 246 190 14
62380-246 190 14 232 195 16 98 70 6 2 2 6
62381- 2 2 6 2 2 6 66 66 66 221 221 221
62382-253 253 253 253 253 253 253 253 253 253 253 253
62383-253 253 253 253 253 253 253 253 253 253 253 253
62384-253 253 253 253 253 253 253 253 253 253 253 253
62385-253 253 253 253 253 253 253 253 253 253 253 253
62386-253 253 253 206 206 206 198 198 198 214 166 58
62387-230 174 11 230 174 11 216 158 10 192 133 9
62388-163 110 8 116 81 8 102 78 10 116 81 8
62389-167 114 7 197 138 11 226 170 11 239 182 13
62390-242 186 14 242 186 14 162 146 94 78 78 78
62391- 34 34 34 14 14 14 6 6 6 0 0 0
62392- 0 0 0 0 0 0 0 0 0 0 0 0
62393- 0 0 0 0 0 0 0 0 0 0 0 0
62394- 0 0 0 0 0 0 0 0 0 0 0 0
62395- 0 0 0 0 0 0 0 0 0 6 6 6
62396- 30 30 30 78 78 78 190 142 34 226 170 11
62397-239 182 13 246 190 14 246 190 14 246 190 14
62398-246 190 14 246 190 14 246 190 14 246 190 14
62399-246 190 14 246 190 14 246 190 14 246 190 14
62400-246 190 14 241 196 14 203 166 17 22 18 6
62401- 2 2 6 2 2 6 2 2 6 38 38 38
62402-218 218 218 253 253 253 253 253 253 253 253 253
62403-253 253 253 253 253 253 253 253 253 253 253 253
62404-253 253 253 253 253 253 253 253 253 253 253 253
62405-253 253 253 253 253 253 253 253 253 253 253 253
62406-250 250 250 206 206 206 198 198 198 202 162 69
62407-226 170 11 236 178 12 224 166 10 210 150 10
62408-200 144 11 197 138 11 192 133 9 197 138 11
62409-210 150 10 226 170 11 242 186 14 246 190 14
62410-246 190 14 246 186 14 225 175 15 124 112 88
62411- 62 62 62 30 30 30 14 14 14 6 6 6
62412- 0 0 0 0 0 0 0 0 0 0 0 0
62413- 0 0 0 0 0 0 0 0 0 0 0 0
62414- 0 0 0 0 0 0 0 0 0 0 0 0
62415- 0 0 0 0 0 0 0 0 0 10 10 10
62416- 30 30 30 78 78 78 174 135 50 224 166 10
62417-239 182 13 246 190 14 246 190 14 246 190 14
62418-246 190 14 246 190 14 246 190 14 246 190 14
62419-246 190 14 246 190 14 246 190 14 246 190 14
62420-246 190 14 246 190 14 241 196 14 139 102 15
62421- 2 2 6 2 2 6 2 2 6 2 2 6
62422- 78 78 78 250 250 250 253 253 253 253 253 253
62423-253 253 253 253 253 253 253 253 253 253 253 253
62424-253 253 253 253 253 253 253 253 253 253 253 253
62425-253 253 253 253 253 253 253 253 253 253 253 253
62426-250 250 250 214 214 214 198 198 198 190 150 46
62427-219 162 10 236 178 12 234 174 13 224 166 10
62428-216 158 10 213 154 11 213 154 11 216 158 10
62429-226 170 11 239 182 13 246 190 14 246 190 14
62430-246 190 14 246 190 14 242 186 14 206 162 42
62431-101 101 101 58 58 58 30 30 30 14 14 14
62432- 6 6 6 0 0 0 0 0 0 0 0 0
62433- 0 0 0 0 0 0 0 0 0 0 0 0
62434- 0 0 0 0 0 0 0 0 0 0 0 0
62435- 0 0 0 0 0 0 0 0 0 10 10 10
62436- 30 30 30 74 74 74 174 135 50 216 158 10
62437-236 178 12 246 190 14 246 190 14 246 190 14
62438-246 190 14 246 190 14 246 190 14 246 190 14
62439-246 190 14 246 190 14 246 190 14 246 190 14
62440-246 190 14 246 190 14 241 196 14 226 184 13
62441- 61 42 6 2 2 6 2 2 6 2 2 6
62442- 22 22 22 238 238 238 253 253 253 253 253 253
62443-253 253 253 253 253 253 253 253 253 253 253 253
62444-253 253 253 253 253 253 253 253 253 253 253 253
62445-253 253 253 253 253 253 253 253 253 253 253 253
62446-253 253 253 226 226 226 187 187 187 180 133 36
62447-216 158 10 236 178 12 239 182 13 236 178 12
62448-230 174 11 226 170 11 226 170 11 230 174 11
62449-236 178 12 242 186 14 246 190 14 246 190 14
62450-246 190 14 246 190 14 246 186 14 239 182 13
62451-206 162 42 106 106 106 66 66 66 34 34 34
62452- 14 14 14 6 6 6 0 0 0 0 0 0
62453- 0 0 0 0 0 0 0 0 0 0 0 0
62454- 0 0 0 0 0 0 0 0 0 0 0 0
62455- 0 0 0 0 0 0 0 0 0 6 6 6
62456- 26 26 26 70 70 70 163 133 67 213 154 11
62457-236 178 12 246 190 14 246 190 14 246 190 14
62458-246 190 14 246 190 14 246 190 14 246 190 14
62459-246 190 14 246 190 14 246 190 14 246 190 14
62460-246 190 14 246 190 14 246 190 14 241 196 14
62461-190 146 13 18 14 6 2 2 6 2 2 6
62462- 46 46 46 246 246 246 253 253 253 253 253 253
62463-253 253 253 253 253 253 253 253 253 253 253 253
62464-253 253 253 253 253 253 253 253 253 253 253 253
62465-253 253 253 253 253 253 253 253 253 253 253 253
62466-253 253 253 221 221 221 86 86 86 156 107 11
62467-216 158 10 236 178 12 242 186 14 246 186 14
62468-242 186 14 239 182 13 239 182 13 242 186 14
62469-242 186 14 246 186 14 246 190 14 246 190 14
62470-246 190 14 246 190 14 246 190 14 246 190 14
62471-242 186 14 225 175 15 142 122 72 66 66 66
62472- 30 30 30 10 10 10 0 0 0 0 0 0
62473- 0 0 0 0 0 0 0 0 0 0 0 0
62474- 0 0 0 0 0 0 0 0 0 0 0 0
62475- 0 0 0 0 0 0 0 0 0 6 6 6
62476- 26 26 26 70 70 70 163 133 67 210 150 10
62477-236 178 12 246 190 14 246 190 14 246 190 14
62478-246 190 14 246 190 14 246 190 14 246 190 14
62479-246 190 14 246 190 14 246 190 14 246 190 14
62480-246 190 14 246 190 14 246 190 14 246 190 14
62481-232 195 16 121 92 8 34 34 34 106 106 106
62482-221 221 221 253 253 253 253 253 253 253 253 253
62483-253 253 253 253 253 253 253 253 253 253 253 253
62484-253 253 253 253 253 253 253 253 253 253 253 253
62485-253 253 253 253 253 253 253 253 253 253 253 253
62486-242 242 242 82 82 82 18 14 6 163 110 8
62487-216 158 10 236 178 12 242 186 14 246 190 14
62488-246 190 14 246 190 14 246 190 14 246 190 14
62489-246 190 14 246 190 14 246 190 14 246 190 14
62490-246 190 14 246 190 14 246 190 14 246 190 14
62491-246 190 14 246 190 14 242 186 14 163 133 67
62492- 46 46 46 18 18 18 6 6 6 0 0 0
62493- 0 0 0 0 0 0 0 0 0 0 0 0
62494- 0 0 0 0 0 0 0 0 0 0 0 0
62495- 0 0 0 0 0 0 0 0 0 10 10 10
62496- 30 30 30 78 78 78 163 133 67 210 150 10
62497-236 178 12 246 186 14 246 190 14 246 190 14
62498-246 190 14 246 190 14 246 190 14 246 190 14
62499-246 190 14 246 190 14 246 190 14 246 190 14
62500-246 190 14 246 190 14 246 190 14 246 190 14
62501-241 196 14 215 174 15 190 178 144 253 253 253
62502-253 253 253 253 253 253 253 253 253 253 253 253
62503-253 253 253 253 253 253 253 253 253 253 253 253
62504-253 253 253 253 253 253 253 253 253 253 253 253
62505-253 253 253 253 253 253 253 253 253 218 218 218
62506- 58 58 58 2 2 6 22 18 6 167 114 7
62507-216 158 10 236 178 12 246 186 14 246 190 14
62508-246 190 14 246 190 14 246 190 14 246 190 14
62509-246 190 14 246 190 14 246 190 14 246 190 14
62510-246 190 14 246 190 14 246 190 14 246 190 14
62511-246 190 14 246 186 14 242 186 14 190 150 46
62512- 54 54 54 22 22 22 6 6 6 0 0 0
62513- 0 0 0 0 0 0 0 0 0 0 0 0
62514- 0 0 0 0 0 0 0 0 0 0 0 0
62515- 0 0 0 0 0 0 0 0 0 14 14 14
62516- 38 38 38 86 86 86 180 133 36 213 154 11
62517-236 178 12 246 186 14 246 190 14 246 190 14
62518-246 190 14 246 190 14 246 190 14 246 190 14
62519-246 190 14 246 190 14 246 190 14 246 190 14
62520-246 190 14 246 190 14 246 190 14 246 190 14
62521-246 190 14 232 195 16 190 146 13 214 214 214
62522-253 253 253 253 253 253 253 253 253 253 253 253
62523-253 253 253 253 253 253 253 253 253 253 253 253
62524-253 253 253 253 253 253 253 253 253 253 253 253
62525-253 253 253 250 250 250 170 170 170 26 26 26
62526- 2 2 6 2 2 6 37 26 9 163 110 8
62527-219 162 10 239 182 13 246 186 14 246 190 14
62528-246 190 14 246 190 14 246 190 14 246 190 14
62529-246 190 14 246 190 14 246 190 14 246 190 14
62530-246 190 14 246 190 14 246 190 14 246 190 14
62531-246 186 14 236 178 12 224 166 10 142 122 72
62532- 46 46 46 18 18 18 6 6 6 0 0 0
62533- 0 0 0 0 0 0 0 0 0 0 0 0
62534- 0 0 0 0 0 0 0 0 0 0 0 0
62535- 0 0 0 0 0 0 6 6 6 18 18 18
62536- 50 50 50 109 106 95 192 133 9 224 166 10
62537-242 186 14 246 190 14 246 190 14 246 190 14
62538-246 190 14 246 190 14 246 190 14 246 190 14
62539-246 190 14 246 190 14 246 190 14 246 190 14
62540-246 190 14 246 190 14 246 190 14 246 190 14
62541-242 186 14 226 184 13 210 162 10 142 110 46
62542-226 226 226 253 253 253 253 253 253 253 253 253
62543-253 253 253 253 253 253 253 253 253 253 253 253
62544-253 253 253 253 253 253 253 253 253 253 253 253
62545-198 198 198 66 66 66 2 2 6 2 2 6
62546- 2 2 6 2 2 6 50 34 6 156 107 11
62547-219 162 10 239 182 13 246 186 14 246 190 14
62548-246 190 14 246 190 14 246 190 14 246 190 14
62549-246 190 14 246 190 14 246 190 14 246 190 14
62550-246 190 14 246 190 14 246 190 14 242 186 14
62551-234 174 13 213 154 11 154 122 46 66 66 66
62552- 30 30 30 10 10 10 0 0 0 0 0 0
62553- 0 0 0 0 0 0 0 0 0 0 0 0
62554- 0 0 0 0 0 0 0 0 0 0 0 0
62555- 0 0 0 0 0 0 6 6 6 22 22 22
62556- 58 58 58 154 121 60 206 145 10 234 174 13
62557-242 186 14 246 186 14 246 190 14 246 190 14
62558-246 190 14 246 190 14 246 190 14 246 190 14
62559-246 190 14 246 190 14 246 190 14 246 190 14
62560-246 190 14 246 190 14 246 190 14 246 190 14
62561-246 186 14 236 178 12 210 162 10 163 110 8
62562- 61 42 6 138 138 138 218 218 218 250 250 250
62563-253 253 253 253 253 253 253 253 253 250 250 250
62564-242 242 242 210 210 210 144 144 144 66 66 66
62565- 6 6 6 2 2 6 2 2 6 2 2 6
62566- 2 2 6 2 2 6 61 42 6 163 110 8
62567-216 158 10 236 178 12 246 190 14 246 190 14
62568-246 190 14 246 190 14 246 190 14 246 190 14
62569-246 190 14 246 190 14 246 190 14 246 190 14
62570-246 190 14 239 182 13 230 174 11 216 158 10
62571-190 142 34 124 112 88 70 70 70 38 38 38
62572- 18 18 18 6 6 6 0 0 0 0 0 0
62573- 0 0 0 0 0 0 0 0 0 0 0 0
62574- 0 0 0 0 0 0 0 0 0 0 0 0
62575- 0 0 0 0 0 0 6 6 6 22 22 22
62576- 62 62 62 168 124 44 206 145 10 224 166 10
62577-236 178 12 239 182 13 242 186 14 242 186 14
62578-246 186 14 246 190 14 246 190 14 246 190 14
62579-246 190 14 246 190 14 246 190 14 246 190 14
62580-246 190 14 246 190 14 246 190 14 246 190 14
62581-246 190 14 236 178 12 216 158 10 175 118 6
62582- 80 54 7 2 2 6 6 6 6 30 30 30
62583- 54 54 54 62 62 62 50 50 50 38 38 38
62584- 14 14 14 2 2 6 2 2 6 2 2 6
62585- 2 2 6 2 2 6 2 2 6 2 2 6
62586- 2 2 6 6 6 6 80 54 7 167 114 7
62587-213 154 11 236 178 12 246 190 14 246 190 14
62588-246 190 14 246 190 14 246 190 14 246 190 14
62589-246 190 14 242 186 14 239 182 13 239 182 13
62590-230 174 11 210 150 10 174 135 50 124 112 88
62591- 82 82 82 54 54 54 34 34 34 18 18 18
62592- 6 6 6 0 0 0 0 0 0 0 0 0
62593- 0 0 0 0 0 0 0 0 0 0 0 0
62594- 0 0 0 0 0 0 0 0 0 0 0 0
62595- 0 0 0 0 0 0 6 6 6 18 18 18
62596- 50 50 50 158 118 36 192 133 9 200 144 11
62597-216 158 10 219 162 10 224 166 10 226 170 11
62598-230 174 11 236 178 12 239 182 13 239 182 13
62599-242 186 14 246 186 14 246 190 14 246 190 14
62600-246 190 14 246 190 14 246 190 14 246 190 14
62601-246 186 14 230 174 11 210 150 10 163 110 8
62602-104 69 6 10 10 10 2 2 6 2 2 6
62603- 2 2 6 2 2 6 2 2 6 2 2 6
62604- 2 2 6 2 2 6 2 2 6 2 2 6
62605- 2 2 6 2 2 6 2 2 6 2 2 6
62606- 2 2 6 6 6 6 91 60 6 167 114 7
62607-206 145 10 230 174 11 242 186 14 246 190 14
62608-246 190 14 246 190 14 246 186 14 242 186 14
62609-239 182 13 230 174 11 224 166 10 213 154 11
62610-180 133 36 124 112 88 86 86 86 58 58 58
62611- 38 38 38 22 22 22 10 10 10 6 6 6
62612- 0 0 0 0 0 0 0 0 0 0 0 0
62613- 0 0 0 0 0 0 0 0 0 0 0 0
62614- 0 0 0 0 0 0 0 0 0 0 0 0
62615- 0 0 0 0 0 0 0 0 0 14 14 14
62616- 34 34 34 70 70 70 138 110 50 158 118 36
62617-167 114 7 180 123 7 192 133 9 197 138 11
62618-200 144 11 206 145 10 213 154 11 219 162 10
62619-224 166 10 230 174 11 239 182 13 242 186 14
62620-246 186 14 246 186 14 246 186 14 246 186 14
62621-239 182 13 216 158 10 185 133 11 152 99 6
62622-104 69 6 18 14 6 2 2 6 2 2 6
62623- 2 2 6 2 2 6 2 2 6 2 2 6
62624- 2 2 6 2 2 6 2 2 6 2 2 6
62625- 2 2 6 2 2 6 2 2 6 2 2 6
62626- 2 2 6 6 6 6 80 54 7 152 99 6
62627-192 133 9 219 162 10 236 178 12 239 182 13
62628-246 186 14 242 186 14 239 182 13 236 178 12
62629-224 166 10 206 145 10 192 133 9 154 121 60
62630- 94 94 94 62 62 62 42 42 42 22 22 22
62631- 14 14 14 6 6 6 0 0 0 0 0 0
62632- 0 0 0 0 0 0 0 0 0 0 0 0
62633- 0 0 0 0 0 0 0 0 0 0 0 0
62634- 0 0 0 0 0 0 0 0 0 0 0 0
62635- 0 0 0 0 0 0 0 0 0 6 6 6
62636- 18 18 18 34 34 34 58 58 58 78 78 78
62637-101 98 89 124 112 88 142 110 46 156 107 11
62638-163 110 8 167 114 7 175 118 6 180 123 7
62639-185 133 11 197 138 11 210 150 10 219 162 10
62640-226 170 11 236 178 12 236 178 12 234 174 13
62641-219 162 10 197 138 11 163 110 8 130 83 6
62642- 91 60 6 10 10 10 2 2 6 2 2 6
62643- 18 18 18 38 38 38 38 38 38 38 38 38
62644- 38 38 38 38 38 38 38 38 38 38 38 38
62645- 38 38 38 38 38 38 26 26 26 2 2 6
62646- 2 2 6 6 6 6 70 47 6 137 92 6
62647-175 118 6 200 144 11 219 162 10 230 174 11
62648-234 174 13 230 174 11 219 162 10 210 150 10
62649-192 133 9 163 110 8 124 112 88 82 82 82
62650- 50 50 50 30 30 30 14 14 14 6 6 6
62651- 0 0 0 0 0 0 0 0 0 0 0 0
62652- 0 0 0 0 0 0 0 0 0 0 0 0
62653- 0 0 0 0 0 0 0 0 0 0 0 0
62654- 0 0 0 0 0 0 0 0 0 0 0 0
62655- 0 0 0 0 0 0 0 0 0 0 0 0
62656- 6 6 6 14 14 14 22 22 22 34 34 34
62657- 42 42 42 58 58 58 74 74 74 86 86 86
62658-101 98 89 122 102 70 130 98 46 121 87 25
62659-137 92 6 152 99 6 163 110 8 180 123 7
62660-185 133 11 197 138 11 206 145 10 200 144 11
62661-180 123 7 156 107 11 130 83 6 104 69 6
62662- 50 34 6 54 54 54 110 110 110 101 98 89
62663- 86 86 86 82 82 82 78 78 78 78 78 78
62664- 78 78 78 78 78 78 78 78 78 78 78 78
62665- 78 78 78 82 82 82 86 86 86 94 94 94
62666-106 106 106 101 101 101 86 66 34 124 80 6
62667-156 107 11 180 123 7 192 133 9 200 144 11
62668-206 145 10 200 144 11 192 133 9 175 118 6
62669-139 102 15 109 106 95 70 70 70 42 42 42
62670- 22 22 22 10 10 10 0 0 0 0 0 0
62671- 0 0 0 0 0 0 0 0 0 0 0 0
62672- 0 0 0 0 0 0 0 0 0 0 0 0
62673- 0 0 0 0 0 0 0 0 0 0 0 0
62674- 0 0 0 0 0 0 0 0 0 0 0 0
62675- 0 0 0 0 0 0 0 0 0 0 0 0
62676- 0 0 0 0 0 0 6 6 6 10 10 10
62677- 14 14 14 22 22 22 30 30 30 38 38 38
62678- 50 50 50 62 62 62 74 74 74 90 90 90
62679-101 98 89 112 100 78 121 87 25 124 80 6
62680-137 92 6 152 99 6 152 99 6 152 99 6
62681-138 86 6 124 80 6 98 70 6 86 66 30
62682-101 98 89 82 82 82 58 58 58 46 46 46
62683- 38 38 38 34 34 34 34 34 34 34 34 34
62684- 34 34 34 34 34 34 34 34 34 34 34 34
62685- 34 34 34 34 34 34 38 38 38 42 42 42
62686- 54 54 54 82 82 82 94 86 76 91 60 6
62687-134 86 6 156 107 11 167 114 7 175 118 6
62688-175 118 6 167 114 7 152 99 6 121 87 25
62689-101 98 89 62 62 62 34 34 34 18 18 18
62690- 6 6 6 0 0 0 0 0 0 0 0 0
62691- 0 0 0 0 0 0 0 0 0 0 0 0
62692- 0 0 0 0 0 0 0 0 0 0 0 0
62693- 0 0 0 0 0 0 0 0 0 0 0 0
62694- 0 0 0 0 0 0 0 0 0 0 0 0
62695- 0 0 0 0 0 0 0 0 0 0 0 0
62696- 0 0 0 0 0 0 0 0 0 0 0 0
62697- 0 0 0 6 6 6 6 6 6 10 10 10
62698- 18 18 18 22 22 22 30 30 30 42 42 42
62699- 50 50 50 66 66 66 86 86 86 101 98 89
62700-106 86 58 98 70 6 104 69 6 104 69 6
62701-104 69 6 91 60 6 82 62 34 90 90 90
62702- 62 62 62 38 38 38 22 22 22 14 14 14
62703- 10 10 10 10 10 10 10 10 10 10 10 10
62704- 10 10 10 10 10 10 6 6 6 10 10 10
62705- 10 10 10 10 10 10 10 10 10 14 14 14
62706- 22 22 22 42 42 42 70 70 70 89 81 66
62707- 80 54 7 104 69 6 124 80 6 137 92 6
62708-134 86 6 116 81 8 100 82 52 86 86 86
62709- 58 58 58 30 30 30 14 14 14 6 6 6
62710- 0 0 0 0 0 0 0 0 0 0 0 0
62711- 0 0 0 0 0 0 0 0 0 0 0 0
62712- 0 0 0 0 0 0 0 0 0 0 0 0
62713- 0 0 0 0 0 0 0 0 0 0 0 0
62714- 0 0 0 0 0 0 0 0 0 0 0 0
62715- 0 0 0 0 0 0 0 0 0 0 0 0
62716- 0 0 0 0 0 0 0 0 0 0 0 0
62717- 0 0 0 0 0 0 0 0 0 0 0 0
62718- 0 0 0 6 6 6 10 10 10 14 14 14
62719- 18 18 18 26 26 26 38 38 38 54 54 54
62720- 70 70 70 86 86 86 94 86 76 89 81 66
62721- 89 81 66 86 86 86 74 74 74 50 50 50
62722- 30 30 30 14 14 14 6 6 6 0 0 0
62723- 0 0 0 0 0 0 0 0 0 0 0 0
62724- 0 0 0 0 0 0 0 0 0 0 0 0
62725- 0 0 0 0 0 0 0 0 0 0 0 0
62726- 6 6 6 18 18 18 34 34 34 58 58 58
62727- 82 82 82 89 81 66 89 81 66 89 81 66
62728- 94 86 66 94 86 76 74 74 74 50 50 50
62729- 26 26 26 14 14 14 6 6 6 0 0 0
62730- 0 0 0 0 0 0 0 0 0 0 0 0
62731- 0 0 0 0 0 0 0 0 0 0 0 0
62732- 0 0 0 0 0 0 0 0 0 0 0 0
62733- 0 0 0 0 0 0 0 0 0 0 0 0
62734- 0 0 0 0 0 0 0 0 0 0 0 0
62735- 0 0 0 0 0 0 0 0 0 0 0 0
62736- 0 0 0 0 0 0 0 0 0 0 0 0
62737- 0 0 0 0 0 0 0 0 0 0 0 0
62738- 0 0 0 0 0 0 0 0 0 0 0 0
62739- 6 6 6 6 6 6 14 14 14 18 18 18
62740- 30 30 30 38 38 38 46 46 46 54 54 54
62741- 50 50 50 42 42 42 30 30 30 18 18 18
62742- 10 10 10 0 0 0 0 0 0 0 0 0
62743- 0 0 0 0 0 0 0 0 0 0 0 0
62744- 0 0 0 0 0 0 0 0 0 0 0 0
62745- 0 0 0 0 0 0 0 0 0 0 0 0
62746- 0 0 0 6 6 6 14 14 14 26 26 26
62747- 38 38 38 50 50 50 58 58 58 58 58 58
62748- 54 54 54 42 42 42 30 30 30 18 18 18
62749- 10 10 10 0 0 0 0 0 0 0 0 0
62750- 0 0 0 0 0 0 0 0 0 0 0 0
62751- 0 0 0 0 0 0 0 0 0 0 0 0
62752- 0 0 0 0 0 0 0 0 0 0 0 0
62753- 0 0 0 0 0 0 0 0 0 0 0 0
62754- 0 0 0 0 0 0 0 0 0 0 0 0
62755- 0 0 0 0 0 0 0 0 0 0 0 0
62756- 0 0 0 0 0 0 0 0 0 0 0 0
62757- 0 0 0 0 0 0 0 0 0 0 0 0
62758- 0 0 0 0 0 0 0 0 0 0 0 0
62759- 0 0 0 0 0 0 0 0 0 6 6 6
62760- 6 6 6 10 10 10 14 14 14 18 18 18
62761- 18 18 18 14 14 14 10 10 10 6 6 6
62762- 0 0 0 0 0 0 0 0 0 0 0 0
62763- 0 0 0 0 0 0 0 0 0 0 0 0
62764- 0 0 0 0 0 0 0 0 0 0 0 0
62765- 0 0 0 0 0 0 0 0 0 0 0 0
62766- 0 0 0 0 0 0 0 0 0 6 6 6
62767- 14 14 14 18 18 18 22 22 22 22 22 22
62768- 18 18 18 14 14 14 10 10 10 6 6 6
62769- 0 0 0 0 0 0 0 0 0 0 0 0
62770- 0 0 0 0 0 0 0 0 0 0 0 0
62771- 0 0 0 0 0 0 0 0 0 0 0 0
62772- 0 0 0 0 0 0 0 0 0 0 0 0
62773- 0 0 0 0 0 0 0 0 0 0 0 0
62774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62787+4 4 4 4 4 4
62788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62801+4 4 4 4 4 4
62802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62815+4 4 4 4 4 4
62816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62829+4 4 4 4 4 4
62830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62843+4 4 4 4 4 4
62844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62857+4 4 4 4 4 4
62858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62862+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
62863+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
62864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62867+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
62868+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
62869+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
62870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62871+4 4 4 4 4 4
62872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62876+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
62877+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
62878+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62881+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
62882+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
62883+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
62884+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62885+4 4 4 4 4 4
62886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62890+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
62891+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
62892+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
62893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62895+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
62896+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
62897+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
62898+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
62899+4 4 4 4 4 4
62900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62903+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
62904+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
62905+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
62906+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
62907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62908+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
62909+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
62910+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
62911+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
62912+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
62913+4 4 4 4 4 4
62914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62916+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62917+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
62918+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
62919+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
62920+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
62921+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
62922+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
62923+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
62924+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
62925+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
62926+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
62927+4 4 4 4 4 4
62928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62930+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
62931+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
62932+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
62933+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
62934+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
62935+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
62936+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
62937+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
62938+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
62939+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
62940+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
62941+4 4 4 4 4 4
62942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62943+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62944+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
62945+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
62946+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
62947+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
62948+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
62949+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
62950+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
62951+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
62952+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
62953+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
62954+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
62955+4 4 4 4 4 4
62956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62957+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62958+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
62959+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
62960+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
62961+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
62962+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
62963+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
62964+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
62965+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
62966+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
62967+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
62968+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
62969+4 4 4 4 4 4
62970+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62971+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62972+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
62973+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
62974+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
62975+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
62976+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
62977+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
62978+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
62979+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
62980+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
62981+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
62982+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
62983+4 4 4 4 4 4
62984+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62985+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62986+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
62987+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
62988+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
62989+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
62990+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
62991+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
62992+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
62993+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
62994+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
62995+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
62996+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
62997+4 4 4 4 4 4
62998+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
62999+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
63000+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
63001+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
63002+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
63003+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
63004+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
63005+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
63006+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
63007+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
63008+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
63009+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
63010+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
63011+4 4 4 4 4 4
63012+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63013+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
63014+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
63015+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
63016+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
63017+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
63018+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
63019+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
63020+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
63021+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
63022+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
63023+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
63024+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
63025+0 0 0 4 4 4
63026+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
63027+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
63028+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
63029+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
63030+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
63031+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
63032+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
63033+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
63034+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
63035+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
63036+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
63037+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
63038+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
63039+2 0 0 0 0 0
63040+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
63041+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
63042+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
63043+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
63044+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
63045+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
63046+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
63047+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
63048+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
63049+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
63050+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
63051+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
63052+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
63053+37 38 37 0 0 0
63054+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
63055+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
63056+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
63057+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
63058+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
63059+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
63060+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
63061+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
63062+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
63063+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
63064+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
63065+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
63066+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
63067+85 115 134 4 0 0
63068+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
63069+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
63070+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
63071+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
63072+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
63073+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
63074+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
63075+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
63076+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
63077+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
63078+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
63079+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
63080+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
63081+60 73 81 4 0 0
63082+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
63083+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
63084+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
63085+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
63086+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
63087+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
63088+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
63089+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
63090+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
63091+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
63092+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
63093+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
63094+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
63095+16 19 21 4 0 0
63096+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
63097+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
63098+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
63099+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
63100+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
63101+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
63102+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
63103+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
63104+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
63105+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
63106+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
63107+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
63108+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
63109+4 0 0 4 3 3
63110+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
63111+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
63112+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
63113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
63114+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
63115+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
63116+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
63117+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
63118+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
63119+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
63120+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
63121+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
63122+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
63123+3 2 2 4 4 4
63124+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
63125+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
63126+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
63127+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
63128+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
63129+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
63130+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
63131+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
63132+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
63133+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
63134+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
63135+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
63136+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
63137+4 4 4 4 4 4
63138+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
63139+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
63140+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
63141+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
63142+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
63143+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
63144+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
63145+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
63146+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
63147+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
63148+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
63149+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
63150+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
63151+4 4 4 4 4 4
63152+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
63153+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
63154+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
63155+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
63156+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
63157+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
63158+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
63159+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
63160+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
63161+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
63162+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
63163+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
63164+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
63165+5 5 5 5 5 5
63166+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
63167+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
63168+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
63169+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
63170+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
63171+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63172+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
63173+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
63174+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
63175+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
63176+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
63177+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
63178+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
63179+5 5 5 4 4 4
63180+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
63181+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
63182+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
63183+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
63184+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63185+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
63186+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
63187+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
63188+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
63189+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
63190+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
63191+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
63192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63193+4 4 4 4 4 4
63194+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
63195+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
63196+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
63197+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
63198+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
63199+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63200+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63201+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
63202+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
63203+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
63204+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
63205+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
63206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63207+4 4 4 4 4 4
63208+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
63209+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
63210+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
63211+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
63212+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63213+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
63214+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
63215+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
63216+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
63217+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
63218+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
63219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63221+4 4 4 4 4 4
63222+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
63223+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
63224+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
63225+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
63226+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63227+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63228+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
63229+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
63230+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
63231+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
63232+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
63233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63235+4 4 4 4 4 4
63236+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
63237+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
63238+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
63239+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
63240+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63241+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
63242+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
63243+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
63244+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
63245+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
63246+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63249+4 4 4 4 4 4
63250+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
63251+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
63252+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
63253+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
63254+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
63255+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
63256+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
63257+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
63258+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
63259+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
63260+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
63261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63263+4 4 4 4 4 4
63264+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
63265+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
63266+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
63267+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
63268+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
63269+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
63270+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
63271+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
63272+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
63273+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
63274+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
63275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63277+4 4 4 4 4 4
63278+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
63279+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
63280+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
63281+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
63282+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
63283+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
63284+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
63285+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
63286+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
63287+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
63288+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63291+4 4 4 4 4 4
63292+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
63293+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
63294+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
63295+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
63296+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63297+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
63298+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
63299+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
63300+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
63301+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
63302+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63305+4 4 4 4 4 4
63306+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
63307+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
63308+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
63309+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
63310+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63311+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
63312+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
63313+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
63314+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
63315+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
63316+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63319+4 4 4 4 4 4
63320+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
63321+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
63322+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
63323+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
63324+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63325+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
63326+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
63327+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
63328+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
63329+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63330+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63333+4 4 4 4 4 4
63334+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
63335+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
63336+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
63337+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
63338+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
63339+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
63340+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
63341+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
63342+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63343+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63344+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63347+4 4 4 4 4 4
63348+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
63349+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
63350+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
63351+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
63352+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63353+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
63354+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
63355+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
63356+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
63357+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63358+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63361+4 4 4 4 4 4
63362+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
63363+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
63364+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
63365+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
63366+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
63367+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
63368+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
63369+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
63370+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63371+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63372+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63375+4 4 4 4 4 4
63376+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
63377+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
63378+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
63379+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
63380+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
63381+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
63382+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
63383+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
63384+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
63385+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63386+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63389+4 4 4 4 4 4
63390+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
63391+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
63392+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
63393+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
63394+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
63395+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
63396+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
63397+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
63398+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63399+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63400+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63403+4 4 4 4 4 4
63404+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
63405+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
63406+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
63407+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
63408+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
63409+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
63410+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
63411+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
63412+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
63413+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63414+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63417+4 4 4 4 4 4
63418+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
63419+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
63420+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
63421+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
63422+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
63423+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
63424+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
63425+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
63426+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63427+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63428+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63431+4 4 4 4 4 4
63432+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
63433+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
63434+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
63435+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
63436+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
63437+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
63438+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
63439+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
63440+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
63441+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63442+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63445+4 4 4 4 4 4
63446+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
63447+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
63448+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
63449+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
63450+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
63451+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
63452+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63453+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
63454+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
63455+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63456+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63459+4 4 4 4 4 4
63460+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
63461+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
63462+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
63463+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
63464+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
63465+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
63466+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
63467+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
63468+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
63469+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63470+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63473+4 4 4 4 4 4
63474+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
63475+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
63476+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
63477+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
63478+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
63479+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
63480+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
63481+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
63482+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
63483+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63484+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63487+4 4 4 4 4 4
63488+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
63489+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
63490+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
63491+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
63492+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
63493+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
63494+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
63495+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
63496+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
63497+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63498+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63501+4 4 4 4 4 4
63502+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
63503+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
63504+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
63505+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
63506+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
63507+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
63508+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
63509+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
63510+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
63511+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63512+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63515+4 4 4 4 4 4
63516+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
63517+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
63518+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
63519+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
63520+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
63521+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
63522+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
63523+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
63524+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
63525+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
63526+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63529+4 4 4 4 4 4
63530+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
63531+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
63532+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
63533+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
63534+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
63535+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
63536+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
63537+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
63538+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
63539+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
63540+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63543+4 4 4 4 4 4
63544+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
63545+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
63546+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
63547+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
63548+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
63549+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
63550+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
63551+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
63552+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
63553+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
63554+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63557+4 4 4 4 4 4
63558+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
63559+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
63560+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
63561+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
63562+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
63563+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
63564+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63565+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
63566+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
63567+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
63568+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
63569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63571+4 4 4 4 4 4
63572+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
63573+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
63574+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
63575+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
63576+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
63577+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
63578+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
63579+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
63580+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
63581+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
63582+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63585+4 4 4 4 4 4
63586+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
63587+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
63588+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
63589+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
63590+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
63591+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
63592+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
63593+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
63594+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
63595+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
63596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63599+4 4 4 4 4 4
63600+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
63601+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
63602+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
63603+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
63604+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
63605+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
63606+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
63607+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
63608+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
63609+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
63610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63613+4 4 4 4 4 4
63614+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
63615+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
63616+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
63617+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
63618+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
63619+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
63620+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
63621+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
63622+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
63623+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
63624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63627+4 4 4 4 4 4
63628+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
63629+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
63630+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
63631+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
63632+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
63633+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
63634+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
63635+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
63636+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
63637+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
63638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63641+4 4 4 4 4 4
63642+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
63643+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
63644+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
63645+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
63646+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
63647+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
63648+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
63649+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
63650+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
63651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63655+4 4 4 4 4 4
63656+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
63657+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
63658+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
63659+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
63660+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
63661+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
63662+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
63663+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
63664+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
63665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63669+4 4 4 4 4 4
63670+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
63671+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
63672+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
63673+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
63674+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
63675+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
63676+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
63677+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
63678+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63683+4 4 4 4 4 4
63684+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
63685+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
63686+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
63687+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
63688+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
63689+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
63690+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
63691+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
63692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63697+4 4 4 4 4 4
63698+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
63699+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
63700+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
63701+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
63702+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
63703+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
63704+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
63705+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
63706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63711+4 4 4 4 4 4
63712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
63713+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
63714+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
63715+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
63716+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
63717+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
63718+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
63719+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
63720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63725+4 4 4 4 4 4
63726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63727+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
63728+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
63729+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
63730+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
63731+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
63732+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
63733+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
63734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63739+4 4 4 4 4 4
63740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63741+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
63742+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
63743+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
63744+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
63745+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
63746+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
63747+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
63748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63753+4 4 4 4 4 4
63754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63756+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
63757+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
63758+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
63759+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
63760+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
63761+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
63762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63767+4 4 4 4 4 4
63768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
63771+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
63772+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
63773+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
63774+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
63775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63781+4 4 4 4 4 4
63782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63785+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
63786+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
63787+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
63788+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
63789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63795+4 4 4 4 4 4
63796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63799+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
63800+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
63801+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
63802+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
63803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63809+4 4 4 4 4 4
63810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63813+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
63814+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
63815+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
63816+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
63817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63823+4 4 4 4 4 4
63824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
63828+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
63829+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
63830+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
63831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63837+4 4 4 4 4 4
63838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63842+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
63843+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
63844+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
63845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63851+4 4 4 4 4 4
63852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63856+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
63857+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
63858+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63865+4 4 4 4 4 4
63866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63867+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63870+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
63871+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
63872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63879+4 4 4 4 4 4
63880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63881+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63884+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
63885+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
63886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
63893+4 4 4 4 4 4
63894diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
63895index 443e3c8..c443d6a 100644
63896--- a/drivers/video/nvidia/nv_backlight.c
63897+++ b/drivers/video/nvidia/nv_backlight.c
63898@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
63899 return bd->props.brightness;
63900 }
63901
63902-static struct backlight_ops nvidia_bl_ops = {
63903+static const struct backlight_ops nvidia_bl_ops = {
63904 .get_brightness = nvidia_bl_get_brightness,
63905 .update_status = nvidia_bl_update_status,
63906 };
63907diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
63908index d94c57f..912984c 100644
63909--- a/drivers/video/riva/fbdev.c
63910+++ b/drivers/video/riva/fbdev.c
63911@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
63912 return bd->props.brightness;
63913 }
63914
63915-static struct backlight_ops riva_bl_ops = {
63916+static const struct backlight_ops riva_bl_ops = {
63917 .get_brightness = riva_bl_get_brightness,
63918 .update_status = riva_bl_update_status,
63919 };
63920diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
63921index 54fbb29..2c108fc 100644
63922--- a/drivers/video/uvesafb.c
63923+++ b/drivers/video/uvesafb.c
63924@@ -18,6 +18,7 @@
63925 #include <linux/fb.h>
63926 #include <linux/io.h>
63927 #include <linux/mutex.h>
63928+#include <linux/moduleloader.h>
63929 #include <video/edid.h>
63930 #include <video/uvesafb.h>
63931 #ifdef CONFIG_X86
63932@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
63933 NULL,
63934 };
63935
63936- return call_usermodehelper(v86d_path, argv, envp, 1);
63937+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
63938 }
63939
63940 /*
63941@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
63942 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
63943 par->pmi_setpal = par->ypan = 0;
63944 } else {
63945+
63946+#ifdef CONFIG_PAX_KERNEXEC
63947+#ifdef CONFIG_MODULES
63948+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
63949+#endif
63950+ if (!par->pmi_code) {
63951+ par->pmi_setpal = par->ypan = 0;
63952+ return 0;
63953+ }
63954+#endif
63955+
63956 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
63957 + task->t.regs.edi);
63958+
63959+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
63960+ pax_open_kernel();
63961+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
63962+ pax_close_kernel();
63963+
63964+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
63965+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
63966+#else
63967 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
63968 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
63969+#endif
63970+
63971 printk(KERN_INFO "uvesafb: protected mode interface info at "
63972 "%04x:%04x\n",
63973 (u16)task->t.regs.es, (u16)task->t.regs.edi);
63974@@ -1799,6 +1822,11 @@ out:
63975 if (par->vbe_modes)
63976 kfree(par->vbe_modes);
63977
63978+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
63979+ if (par->pmi_code)
63980+ module_free_exec(NULL, par->pmi_code);
63981+#endif
63982+
63983 framebuffer_release(info);
63984 return err;
63985 }
63986@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
63987 kfree(par->vbe_state_orig);
63988 if (par->vbe_state_saved)
63989 kfree(par->vbe_state_saved);
63990+
63991+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
63992+ if (par->pmi_code)
63993+ module_free_exec(NULL, par->pmi_code);
63994+#endif
63995+
63996 }
63997
63998 framebuffer_release(info);
63999diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
64000index bd37ee1..cb827e8 100644
64001--- a/drivers/video/vesafb.c
64002+++ b/drivers/video/vesafb.c
64003@@ -9,6 +9,7 @@
64004 */
64005
64006 #include <linux/module.h>
64007+#include <linux/moduleloader.h>
64008 #include <linux/kernel.h>
64009 #include <linux/errno.h>
64010 #include <linux/string.h>
64011@@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
64012 static int vram_total __initdata; /* Set total amount of memory */
64013 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
64014 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
64015-static void (*pmi_start)(void) __read_mostly;
64016-static void (*pmi_pal) (void) __read_mostly;
64017+static void (*pmi_start)(void) __read_only;
64018+static void (*pmi_pal) (void) __read_only;
64019 static int depth __read_mostly;
64020 static int vga_compat __read_mostly;
64021 /* --------------------------------------------------------------------- */
64022@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
64023 unsigned int size_vmode;
64024 unsigned int size_remap;
64025 unsigned int size_total;
64026+ void *pmi_code = NULL;
64027
64028 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
64029 return -ENODEV;
64030@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
64031 size_remap = size_total;
64032 vesafb_fix.smem_len = size_remap;
64033
64034-#ifndef __i386__
64035- screen_info.vesapm_seg = 0;
64036-#endif
64037-
64038 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
64039 printk(KERN_WARNING
64040 "vesafb: cannot reserve video memory at 0x%lx\n",
64041@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
64042 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
64043 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
64044
64045+#ifdef __i386__
64046+
64047+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64048+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
64049+ if (!pmi_code)
64050+#elif !defined(CONFIG_PAX_KERNEXEC)
64051+ if (0)
64052+#endif
64053+
64054+#endif
64055+ screen_info.vesapm_seg = 0;
64056+
64057 if (screen_info.vesapm_seg) {
64058- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
64059- screen_info.vesapm_seg,screen_info.vesapm_off);
64060+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
64061+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
64062 }
64063
64064 if (screen_info.vesapm_seg < 0xc000)
64065@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
64066
64067 if (ypan || pmi_setpal) {
64068 unsigned short *pmi_base;
64069+
64070 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
64071- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
64072- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
64073+
64074+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64075+ pax_open_kernel();
64076+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
64077+#else
64078+ pmi_code = pmi_base;
64079+#endif
64080+
64081+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
64082+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
64083+
64084+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64085+ pmi_start = ktva_ktla(pmi_start);
64086+ pmi_pal = ktva_ktla(pmi_pal);
64087+ pax_close_kernel();
64088+#endif
64089+
64090 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
64091 if (pmi_base[3]) {
64092 printk(KERN_INFO "vesafb: pmi: ports = ");
64093@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
64094 info->node, info->fix.id);
64095 return 0;
64096 err:
64097+
64098+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
64099+ module_free_exec(NULL, pmi_code);
64100+#endif
64101+
64102 if (info->screen_base)
64103 iounmap(info->screen_base);
64104 framebuffer_release(info);
64105diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
64106index 88a60e0..6783cc2 100644
64107--- a/drivers/xen/sys-hypervisor.c
64108+++ b/drivers/xen/sys-hypervisor.c
64109@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
64110 return 0;
64111 }
64112
64113-static struct sysfs_ops hyp_sysfs_ops = {
64114+static const struct sysfs_ops hyp_sysfs_ops = {
64115 .show = hyp_sysfs_show,
64116 .store = hyp_sysfs_store,
64117 };
64118diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
64119index 18f74ec..3227009 100644
64120--- a/fs/9p/vfs_inode.c
64121+++ b/fs/9p/vfs_inode.c
64122@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
64123 static void
64124 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
64125 {
64126- char *s = nd_get_link(nd);
64127+ const char *s = nd_get_link(nd);
64128
64129 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
64130 IS_ERR(s) ? "<error>" : s);
64131diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
64132index bb4cc5b..df5eaa0 100644
64133--- a/fs/Kconfig.binfmt
64134+++ b/fs/Kconfig.binfmt
64135@@ -86,7 +86,7 @@ config HAVE_AOUT
64136
64137 config BINFMT_AOUT
64138 tristate "Kernel support for a.out and ECOFF binaries"
64139- depends on HAVE_AOUT
64140+ depends on HAVE_AOUT && BROKEN
64141 ---help---
64142 A.out (Assembler.OUTput) is a set of formats for libraries and
64143 executables used in the earliest versions of UNIX. Linux used
64144diff --git a/fs/aio.c b/fs/aio.c
64145index 22a19ad..d484e5b 100644
64146--- a/fs/aio.c
64147+++ b/fs/aio.c
64148@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
64149 size += sizeof(struct io_event) * nr_events;
64150 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
64151
64152- if (nr_pages < 0)
64153+ if (nr_pages <= 0)
64154 return -EINVAL;
64155
64156 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
64157@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
64158 struct aio_timeout to;
64159 int retry = 0;
64160
64161+ pax_track_stack();
64162+
64163 /* needed to zero any padding within an entry (there shouldn't be
64164 * any, but C is fun!
64165 */
64166@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
64167 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
64168 {
64169 ssize_t ret;
64170+ struct iovec iovstack;
64171
64172 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
64173 kiocb->ki_nbytes, 1,
64174- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
64175+ &iovstack, &kiocb->ki_iovec);
64176 if (ret < 0)
64177 goto out;
64178
64179+ if (kiocb->ki_iovec == &iovstack) {
64180+ kiocb->ki_inline_vec = iovstack;
64181+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
64182+ }
64183 kiocb->ki_nr_segs = kiocb->ki_nbytes;
64184 kiocb->ki_cur_seg = 0;
64185 /* ki_nbytes/left now reflect bytes instead of segs */
64186diff --git a/fs/attr.c b/fs/attr.c
64187index 96d394b..33cf5b4 100644
64188--- a/fs/attr.c
64189+++ b/fs/attr.c
64190@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
64191 unsigned long limit;
64192
64193 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64194+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
64195 if (limit != RLIM_INFINITY && offset > limit)
64196 goto out_sig;
64197 if (offset > inode->i_sb->s_maxbytes)
64198diff --git a/fs/autofs/root.c b/fs/autofs/root.c
64199index 4a1401c..05eb5ca 100644
64200--- a/fs/autofs/root.c
64201+++ b/fs/autofs/root.c
64202@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
64203 set_bit(n,sbi->symlink_bitmap);
64204 sl = &sbi->symlink[n];
64205 sl->len = strlen(symname);
64206- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
64207+ slsize = sl->len+1;
64208+ sl->data = kmalloc(slsize, GFP_KERNEL);
64209 if (!sl->data) {
64210 clear_bit(n,sbi->symlink_bitmap);
64211 unlock_kernel();
64212diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
64213index b4ea829..e63ef18 100644
64214--- a/fs/autofs4/symlink.c
64215+++ b/fs/autofs4/symlink.c
64216@@ -15,7 +15,7 @@
64217 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
64218 {
64219 struct autofs_info *ino = autofs4_dentry_ino(dentry);
64220- nd_set_link(nd, (char *)ino->u.symlink);
64221+ nd_set_link(nd, ino->u.symlink);
64222 return NULL;
64223 }
64224
64225diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
64226index 136a0d6..a287331 100644
64227--- a/fs/autofs4/waitq.c
64228+++ b/fs/autofs4/waitq.c
64229@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
64230 {
64231 unsigned long sigpipe, flags;
64232 mm_segment_t fs;
64233- const char *data = (const char *)addr;
64234+ const char __user *data = (const char __force_user *)addr;
64235 ssize_t wr = 0;
64236
64237 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
64238diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
64239index 9158c07..3f06659 100644
64240--- a/fs/befs/linuxvfs.c
64241+++ b/fs/befs/linuxvfs.c
64242@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
64243 {
64244 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
64245 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
64246- char *link = nd_get_link(nd);
64247+ const char *link = nd_get_link(nd);
64248 if (!IS_ERR(link))
64249 kfree(link);
64250 }
64251diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
64252index 0133b5a..3710d09 100644
64253--- a/fs/binfmt_aout.c
64254+++ b/fs/binfmt_aout.c
64255@@ -16,6 +16,7 @@
64256 #include <linux/string.h>
64257 #include <linux/fs.h>
64258 #include <linux/file.h>
64259+#include <linux/security.h>
64260 #include <linux/stat.h>
64261 #include <linux/fcntl.h>
64262 #include <linux/ptrace.h>
64263@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64264 #endif
64265 # define START_STACK(u) (u.start_stack)
64266
64267+ memset(&dump, 0, sizeof(dump));
64268+
64269 fs = get_fs();
64270 set_fs(KERNEL_DS);
64271 has_dumped = 1;
64272@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64273
64274 /* If the size of the dump file exceeds the rlimit, then see what would happen
64275 if we wrote the stack, but not the data area. */
64276+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
64277 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
64278 dump.u_dsize = 0;
64279
64280 /* Make sure we have enough room to write the stack and data areas. */
64281+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
64282 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
64283 dump.u_ssize = 0;
64284
64285@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
64286 dump_size = dump.u_ssize << PAGE_SHIFT;
64287 DUMP_WRITE(dump_start,dump_size);
64288 }
64289-/* Finally dump the task struct. Not be used by gdb, but could be useful */
64290- set_fs(KERNEL_DS);
64291- DUMP_WRITE(current,sizeof(*current));
64292+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
64293 end_coredump:
64294 set_fs(fs);
64295 return has_dumped;
64296@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64297 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
64298 if (rlim >= RLIM_INFINITY)
64299 rlim = ~0;
64300+
64301+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
64302 if (ex.a_data + ex.a_bss > rlim)
64303 return -ENOMEM;
64304
64305@@ -274,9 +279,37 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64306 current->mm->free_area_cache = current->mm->mmap_base;
64307 current->mm->cached_hole_size = 0;
64308
64309+ retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
64310+ if (retval < 0) {
64311+ /* Someone check-me: is this error path enough? */
64312+ send_sig(SIGKILL, current, 0);
64313+ return retval;
64314+ }
64315+
64316 install_exec_creds(bprm);
64317 current->flags &= ~PF_FORKNOEXEC;
64318
64319+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
64320+ current->mm->pax_flags = 0UL;
64321+#endif
64322+
64323+#ifdef CONFIG_PAX_PAGEEXEC
64324+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
64325+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
64326+
64327+#ifdef CONFIG_PAX_EMUTRAMP
64328+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
64329+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
64330+#endif
64331+
64332+#ifdef CONFIG_PAX_MPROTECT
64333+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
64334+ current->mm->pax_flags |= MF_PAX_MPROTECT;
64335+#endif
64336+
64337+ }
64338+#endif
64339+
64340 if (N_MAGIC(ex) == OMAGIC) {
64341 unsigned long text_addr, map_size;
64342 loff_t pos;
64343@@ -349,7 +382,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
64344
64345 down_write(&current->mm->mmap_sem);
64346 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
64347- PROT_READ | PROT_WRITE | PROT_EXEC,
64348+ PROT_READ | PROT_WRITE,
64349 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
64350 fd_offset + ex.a_text);
64351 up_write(&current->mm->mmap_sem);
64352@@ -367,13 +400,6 @@ beyond_if:
64353 return retval;
64354 }
64355
64356- retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
64357- if (retval < 0) {
64358- /* Someone check-me: is this error path enough? */
64359- send_sig(SIGKILL, current, 0);
64360- return retval;
64361- }
64362-
64363 current->mm->start_stack =
64364 (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
64365 #ifdef __alpha__
64366diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
64367index 1ed37ba..66794b9 100644
64368--- a/fs/binfmt_elf.c
64369+++ b/fs/binfmt_elf.c
64370@@ -31,6 +31,7 @@
64371 #include <linux/random.h>
64372 #include <linux/elf.h>
64373 #include <linux/utsname.h>
64374+#include <linux/xattr.h>
64375 #include <asm/uaccess.h>
64376 #include <asm/param.h>
64377 #include <asm/page.h>
64378@@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
64379 #define elf_core_dump NULL
64380 #endif
64381
64382+#ifdef CONFIG_PAX_MPROTECT
64383+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
64384+#endif
64385+
64386 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
64387 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
64388 #else
64389@@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
64390 .load_binary = load_elf_binary,
64391 .load_shlib = load_elf_library,
64392 .core_dump = elf_core_dump,
64393+
64394+#ifdef CONFIG_PAX_MPROTECT
64395+ .handle_mprotect= elf_handle_mprotect,
64396+#endif
64397+
64398 .min_coredump = ELF_EXEC_PAGESIZE,
64399 .hasvdso = 1
64400 };
64401@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
64402
64403 static int set_brk(unsigned long start, unsigned long end)
64404 {
64405+ unsigned long e = end;
64406+
64407 start = ELF_PAGEALIGN(start);
64408 end = ELF_PAGEALIGN(end);
64409 if (end > start) {
64410@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
64411 if (BAD_ADDR(addr))
64412 return addr;
64413 }
64414- current->mm->start_brk = current->mm->brk = end;
64415+ current->mm->start_brk = current->mm->brk = e;
64416 return 0;
64417 }
64418
64419@@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
64420 elf_addr_t __user *u_rand_bytes;
64421 const char *k_platform = ELF_PLATFORM;
64422 const char *k_base_platform = ELF_BASE_PLATFORM;
64423- unsigned char k_rand_bytes[16];
64424+ u32 k_rand_bytes[4];
64425 int items;
64426 elf_addr_t *elf_info;
64427 int ei_index = 0;
64428 const struct cred *cred = current_cred();
64429 struct vm_area_struct *vma;
64430+ unsigned long saved_auxv[AT_VECTOR_SIZE];
64431+
64432+ pax_track_stack();
64433
64434 /*
64435 * In some cases (e.g. Hyper-Threading), we want to avoid L1
64436@@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
64437 * Generate 16 random bytes for userspace PRNG seeding.
64438 */
64439 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
64440- u_rand_bytes = (elf_addr_t __user *)
64441- STACK_ALLOC(p, sizeof(k_rand_bytes));
64442+ srandom32(k_rand_bytes[0] ^ random32());
64443+ srandom32(k_rand_bytes[1] ^ random32());
64444+ srandom32(k_rand_bytes[2] ^ random32());
64445+ srandom32(k_rand_bytes[3] ^ random32());
64446+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
64447+ u_rand_bytes = (elf_addr_t __user *) p;
64448 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
64449 return -EFAULT;
64450
64451@@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
64452 return -EFAULT;
64453 current->mm->env_end = p;
64454
64455+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
64456+
64457 /* Put the elf_info on the stack in the right place. */
64458 sp = (elf_addr_t __user *)envp + 1;
64459- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
64460+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
64461 return -EFAULT;
64462 return 0;
64463 }
64464@@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
64465 {
64466 struct elf_phdr *elf_phdata;
64467 struct elf_phdr *eppnt;
64468- unsigned long load_addr = 0;
64469+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
64470 int load_addr_set = 0;
64471 unsigned long last_bss = 0, elf_bss = 0;
64472- unsigned long error = ~0UL;
64473+ unsigned long error = -EINVAL;
64474 unsigned long total_size;
64475 int retval, i, size;
64476
64477@@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
64478 goto out_close;
64479 }
64480
64481+#ifdef CONFIG_PAX_SEGMEXEC
64482+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
64483+ pax_task_size = SEGMEXEC_TASK_SIZE;
64484+#endif
64485+
64486 eppnt = elf_phdata;
64487 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
64488 if (eppnt->p_type == PT_LOAD) {
64489@@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
64490 k = load_addr + eppnt->p_vaddr;
64491 if (BAD_ADDR(k) ||
64492 eppnt->p_filesz > eppnt->p_memsz ||
64493- eppnt->p_memsz > TASK_SIZE ||
64494- TASK_SIZE - eppnt->p_memsz < k) {
64495+ eppnt->p_memsz > pax_task_size ||
64496+ pax_task_size - eppnt->p_memsz < k) {
64497 error = -ENOMEM;
64498 goto out_close;
64499 }
64500@@ -532,6 +558,351 @@ out:
64501 return error;
64502 }
64503
64504+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
64505+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
64506+{
64507+ unsigned long pax_flags = 0UL;
64508+
64509+#ifdef CONFIG_PAX_PT_PAX_FLAGS
64510+
64511+#ifdef CONFIG_PAX_PAGEEXEC
64512+ if (elf_phdata->p_flags & PF_PAGEEXEC)
64513+ pax_flags |= MF_PAX_PAGEEXEC;
64514+#endif
64515+
64516+#ifdef CONFIG_PAX_SEGMEXEC
64517+ if (elf_phdata->p_flags & PF_SEGMEXEC)
64518+ pax_flags |= MF_PAX_SEGMEXEC;
64519+#endif
64520+
64521+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
64522+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
64523+ if (nx_enabled)
64524+ pax_flags &= ~MF_PAX_SEGMEXEC;
64525+ else
64526+ pax_flags &= ~MF_PAX_PAGEEXEC;
64527+ }
64528+#endif
64529+
64530+#ifdef CONFIG_PAX_EMUTRAMP
64531+ if (elf_phdata->p_flags & PF_EMUTRAMP)
64532+ pax_flags |= MF_PAX_EMUTRAMP;
64533+#endif
64534+
64535+#ifdef CONFIG_PAX_MPROTECT
64536+ if (elf_phdata->p_flags & PF_MPROTECT)
64537+ pax_flags |= MF_PAX_MPROTECT;
64538+#endif
64539+
64540+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
64541+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
64542+ pax_flags |= MF_PAX_RANDMMAP;
64543+#endif
64544+
64545+#endif
64546+
64547+ return pax_flags;
64548+}
64549+
64550+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
64551+{
64552+ unsigned long pax_flags = 0UL;
64553+
64554+#ifdef CONFIG_PAX_PT_PAX_FLAGS
64555+
64556+#ifdef CONFIG_PAX_PAGEEXEC
64557+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
64558+ pax_flags |= MF_PAX_PAGEEXEC;
64559+#endif
64560+
64561+#ifdef CONFIG_PAX_SEGMEXEC
64562+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
64563+ pax_flags |= MF_PAX_SEGMEXEC;
64564+#endif
64565+
64566+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
64567+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
64568+ if (nx_enabled)
64569+ pax_flags &= ~MF_PAX_SEGMEXEC;
64570+ else
64571+ pax_flags &= ~MF_PAX_PAGEEXEC;
64572+ }
64573+#endif
64574+
64575+#ifdef CONFIG_PAX_EMUTRAMP
64576+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
64577+ pax_flags |= MF_PAX_EMUTRAMP;
64578+#endif
64579+
64580+#ifdef CONFIG_PAX_MPROTECT
64581+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
64582+ pax_flags |= MF_PAX_MPROTECT;
64583+#endif
64584+
64585+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
64586+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
64587+ pax_flags |= MF_PAX_RANDMMAP;
64588+#endif
64589+
64590+#endif
64591+
64592+ return pax_flags;
64593+}
64594+
64595+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
64596+{
64597+ unsigned long pax_flags = 0UL;
64598+
64599+#ifdef CONFIG_PAX_EI_PAX
64600+
64601+#ifdef CONFIG_PAX_PAGEEXEC
64602+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
64603+ pax_flags |= MF_PAX_PAGEEXEC;
64604+#endif
64605+
64606+#ifdef CONFIG_PAX_SEGMEXEC
64607+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
64608+ pax_flags |= MF_PAX_SEGMEXEC;
64609+#endif
64610+
64611+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
64612+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
64613+ if (nx_enabled)
64614+ pax_flags &= ~MF_PAX_SEGMEXEC;
64615+ else
64616+ pax_flags &= ~MF_PAX_PAGEEXEC;
64617+ }
64618+#endif
64619+
64620+#ifdef CONFIG_PAX_EMUTRAMP
64621+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
64622+ pax_flags |= MF_PAX_EMUTRAMP;
64623+#endif
64624+
64625+#ifdef CONFIG_PAX_MPROTECT
64626+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
64627+ pax_flags |= MF_PAX_MPROTECT;
64628+#endif
64629+
64630+#ifdef CONFIG_PAX_ASLR
64631+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
64632+ pax_flags |= MF_PAX_RANDMMAP;
64633+#endif
64634+
64635+#else
64636+
64637+#ifdef CONFIG_PAX_PAGEEXEC
64638+ pax_flags |= MF_PAX_PAGEEXEC;
64639+#endif
64640+
64641+#ifdef CONFIG_PAX_MPROTECT
64642+ pax_flags |= MF_PAX_MPROTECT;
64643+#endif
64644+
64645+#ifdef CONFIG_PAX_RANDMMAP
64646+ pax_flags |= MF_PAX_RANDMMAP;
64647+#endif
64648+
64649+#ifdef CONFIG_PAX_SEGMEXEC
64650+ if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
64651+ pax_flags &= ~MF_PAX_PAGEEXEC;
64652+ pax_flags |= MF_PAX_SEGMEXEC;
64653+ }
64654+#endif
64655+
64656+#endif
64657+
64658+ return pax_flags;
64659+}
64660+
64661+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
64662+{
64663+
64664+#ifdef CONFIG_PAX_PT_PAX_FLAGS
64665+ unsigned long i;
64666+
64667+ for (i = 0UL; i < elf_ex->e_phnum; i++)
64668+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
64669+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
64670+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
64671+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
64672+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
64673+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
64674+ return ~0UL;
64675+
64676+#ifdef CONFIG_PAX_SOFTMODE
64677+ if (pax_softmode)
64678+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
64679+ else
64680+#endif
64681+
64682+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
64683+ break;
64684+ }
64685+#endif
64686+
64687+ return ~0UL;
64688+}
64689+
64690+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
64691+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
64692+{
64693+ unsigned long pax_flags = 0UL;
64694+
64695+#ifdef CONFIG_PAX_PAGEEXEC
64696+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
64697+ pax_flags |= MF_PAX_PAGEEXEC;
64698+#endif
64699+
64700+#ifdef CONFIG_PAX_SEGMEXEC
64701+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
64702+ pax_flags |= MF_PAX_SEGMEXEC;
64703+#endif
64704+
64705+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
64706+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
64707+ if ((__supported_pte_mask & _PAGE_NX))
64708+ pax_flags &= ~MF_PAX_SEGMEXEC;
64709+ else
64710+ pax_flags &= ~MF_PAX_PAGEEXEC;
64711+ }
64712+#endif
64713+
64714+#ifdef CONFIG_PAX_EMUTRAMP
64715+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
64716+ pax_flags |= MF_PAX_EMUTRAMP;
64717+#endif
64718+
64719+#ifdef CONFIG_PAX_MPROTECT
64720+ if (pax_flags_softmode & MF_PAX_MPROTECT)
64721+ pax_flags |= MF_PAX_MPROTECT;
64722+#endif
64723+
64724+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
64725+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
64726+ pax_flags |= MF_PAX_RANDMMAP;
64727+#endif
64728+
64729+ return pax_flags;
64730+}
64731+
64732+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
64733+{
64734+ unsigned long pax_flags = 0UL;
64735+
64736+#ifdef CONFIG_PAX_PAGEEXEC
64737+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
64738+ pax_flags |= MF_PAX_PAGEEXEC;
64739+#endif
64740+
64741+#ifdef CONFIG_PAX_SEGMEXEC
64742+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
64743+ pax_flags |= MF_PAX_SEGMEXEC;
64744+#endif
64745+
64746+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
64747+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
64748+ if ((__supported_pte_mask & _PAGE_NX))
64749+ pax_flags &= ~MF_PAX_SEGMEXEC;
64750+ else
64751+ pax_flags &= ~MF_PAX_PAGEEXEC;
64752+ }
64753+#endif
64754+
64755+#ifdef CONFIG_PAX_EMUTRAMP
64756+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
64757+ pax_flags |= MF_PAX_EMUTRAMP;
64758+#endif
64759+
64760+#ifdef CONFIG_PAX_MPROTECT
64761+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
64762+ pax_flags |= MF_PAX_MPROTECT;
64763+#endif
64764+
64765+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
64766+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
64767+ pax_flags |= MF_PAX_RANDMMAP;
64768+#endif
64769+
64770+ return pax_flags;
64771+}
64772+#endif
64773+
64774+static unsigned long pax_parse_xattr_pax(struct file * const file)
64775+{
64776+
64777+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
64778+ ssize_t xattr_size, i;
64779+ unsigned char xattr_value[5];
64780+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
64781+
64782+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
64783+ if (xattr_size <= 0)
64784+ return ~0UL;
64785+
64786+ for (i = 0; i < xattr_size; i++)
64787+ switch (xattr_value[i]) {
64788+ default:
64789+ return ~0UL;
64790+
64791+#define parse_flag(option1, option2, flag) \
64792+ case option1: \
64793+ pax_flags_hardmode |= MF_PAX_##flag; \
64794+ break; \
64795+ case option2: \
64796+ pax_flags_softmode |= MF_PAX_##flag; \
64797+ break;
64798+
64799+ parse_flag('p', 'P', PAGEEXEC);
64800+ parse_flag('e', 'E', EMUTRAMP);
64801+ parse_flag('m', 'M', MPROTECT);
64802+ parse_flag('r', 'R', RANDMMAP);
64803+ parse_flag('s', 'S', SEGMEXEC);
64804+
64805+#undef parse_flag
64806+ }
64807+
64808+ if (pax_flags_hardmode & pax_flags_softmode)
64809+ return ~0UL;
64810+
64811+#ifdef CONFIG_PAX_SOFTMODE
64812+ if (pax_softmode)
64813+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
64814+ else
64815+#endif
64816+
64817+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
64818+#else
64819+ return ~0UL;
64820+#endif
64821+
64822+}
64823+
64824+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
64825+{
64826+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
64827+
64828+ pax_flags = pax_parse_ei_pax(elf_ex);
64829+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
64830+ xattr_pax_flags = pax_parse_xattr_pax(file);
64831+
64832+ if (pt_pax_flags == ~0UL)
64833+ pt_pax_flags = xattr_pax_flags;
64834+ else if (xattr_pax_flags == ~0UL)
64835+ xattr_pax_flags = pt_pax_flags;
64836+ if (pt_pax_flags != xattr_pax_flags)
64837+ return -EINVAL;
64838+ if (pt_pax_flags != ~0UL)
64839+ pax_flags = pt_pax_flags;
64840+
64841+ if (0 > pax_check_flags(&pax_flags))
64842+ return -EINVAL;
64843+
64844+ current->mm->pax_flags = pax_flags;
64845+ return 0;
64846+}
64847+#endif
64848+
64849 /*
64850 * These are the functions used to load ELF style executables and shared
64851 * libraries. There is no binary dependent code anywhere else.
64852@@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
64853 {
64854 unsigned int random_variable = 0;
64855
64856+#ifdef CONFIG_PAX_RANDUSTACK
64857+ if (randomize_va_space)
64858+ return stack_top - current->mm->delta_stack;
64859+#endif
64860+
64861 if ((current->flags & PF_RANDOMIZE) &&
64862 !(current->personality & ADDR_NO_RANDOMIZE)) {
64863 random_variable = get_random_int() & STACK_RND_MASK;
64864@@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
64865 unsigned long load_addr = 0, load_bias = 0;
64866 int load_addr_set = 0;
64867 char * elf_interpreter = NULL;
64868- unsigned long error;
64869+ unsigned long error = 0;
64870 struct elf_phdr *elf_ppnt, *elf_phdata;
64871 unsigned long elf_bss, elf_brk;
64872 int retval, i;
64873@@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
64874 unsigned long start_code, end_code, start_data, end_data;
64875 unsigned long reloc_func_desc = 0;
64876 int executable_stack = EXSTACK_DEFAULT;
64877- unsigned long def_flags = 0;
64878 struct {
64879 struct elfhdr elf_ex;
64880 struct elfhdr interp_elf_ex;
64881 } *loc;
64882+ unsigned long pax_task_size = TASK_SIZE;
64883
64884 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
64885 if (!loc) {
64886@@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
64887
64888 /* OK, This is the point of no return */
64889 current->flags &= ~PF_FORKNOEXEC;
64890- current->mm->def_flags = def_flags;
64891+
64892+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
64893+ current->mm->pax_flags = 0UL;
64894+#endif
64895+
64896+#ifdef CONFIG_PAX_DLRESOLVE
64897+ current->mm->call_dl_resolve = 0UL;
64898+#endif
64899+
64900+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
64901+ current->mm->call_syscall = 0UL;
64902+#endif
64903+
64904+#ifdef CONFIG_PAX_ASLR
64905+ current->mm->delta_mmap = 0UL;
64906+ current->mm->delta_stack = 0UL;
64907+#endif
64908+
64909+ current->mm->def_flags = 0;
64910+
64911+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
64912+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
64913+ send_sig(SIGKILL, current, 0);
64914+ goto out_free_dentry;
64915+ }
64916+#endif
64917+
64918+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
64919+ pax_set_initial_flags(bprm);
64920+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
64921+ if (pax_set_initial_flags_func)
64922+ (pax_set_initial_flags_func)(bprm);
64923+#endif
64924+
64925+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
64926+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
64927+ current->mm->context.user_cs_limit = PAGE_SIZE;
64928+ current->mm->def_flags |= VM_PAGEEXEC;
64929+ }
64930+#endif
64931+
64932+#ifdef CONFIG_PAX_SEGMEXEC
64933+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
64934+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
64935+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
64936+ pax_task_size = SEGMEXEC_TASK_SIZE;
64937+ }
64938+#endif
64939+
64940+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
64941+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
64942+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
64943+ put_cpu();
64944+ }
64945+#endif
64946
64947 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
64948 may depend on the personality. */
64949 SET_PERSONALITY(loc->elf_ex);
64950+
64951+#ifdef CONFIG_PAX_ASLR
64952+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
64953+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
64954+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
64955+ }
64956+#endif
64957+
64958+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
64959+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
64960+ executable_stack = EXSTACK_DISABLE_X;
64961+ current->personality &= ~READ_IMPLIES_EXEC;
64962+ } else
64963+#endif
64964+
64965 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
64966 current->personality |= READ_IMPLIES_EXEC;
64967
64968@@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
64969 * might try to exec. This is because the brk will
64970 * follow the loader, and is not movable. */
64971 #ifdef CONFIG_X86
64972- load_bias = 0;
64973+ if (current->flags & PF_RANDOMIZE)
64974+ load_bias = 0;
64975+ else
64976+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
64977 #else
64978 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
64979 #endif
64980+
64981+#ifdef CONFIG_PAX_RANDMMAP
64982+ /* PaX: randomize base address at the default exe base if requested */
64983+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
64984+#ifdef CONFIG_SPARC64
64985+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
64986+#else
64987+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
64988+#endif
64989+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
64990+ elf_flags |= MAP_FIXED;
64991+ }
64992+#endif
64993+
64994 }
64995
64996 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
64997@@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
64998 * allowed task size. Note that p_filesz must always be
64999 * <= p_memsz so it is only necessary to check p_memsz.
65000 */
65001- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
65002- elf_ppnt->p_memsz > TASK_SIZE ||
65003- TASK_SIZE - elf_ppnt->p_memsz < k) {
65004+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
65005+ elf_ppnt->p_memsz > pax_task_size ||
65006+ pax_task_size - elf_ppnt->p_memsz < k) {
65007 /* set_brk can never work. Avoid overflows. */
65008 send_sig(SIGKILL, current, 0);
65009 retval = -EINVAL;
65010@@ -866,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65011 start_data += load_bias;
65012 end_data += load_bias;
65013
65014+#ifdef CONFIG_PAX_RANDMMAP
65015+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
65016+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
65017+#endif
65018+
65019 /* Calling set_brk effectively mmaps the pages that we need
65020 * for the bss and break sections. We must do this before
65021 * mapping in the interpreter, to make sure it doesn't wind
65022@@ -877,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
65023 goto out_free_dentry;
65024 }
65025 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
65026- send_sig(SIGSEGV, current, 0);
65027- retval = -EFAULT; /* Nobody gets to see this, but.. */
65028- goto out_free_dentry;
65029+ /*
65030+ * This bss-zeroing can fail if the ELF
65031+ * file specifies odd protections. So
65032+ * we don't check the return value
65033+ */
65034 }
65035
65036 if (elf_interpreter) {
65037@@ -1112,8 +1581,10 @@ static int dump_seek(struct file *file, loff_t off)
65038 unsigned long n = off;
65039 if (n > PAGE_SIZE)
65040 n = PAGE_SIZE;
65041- if (!dump_write(file, buf, n))
65042+ if (!dump_write(file, buf, n)) {
65043+ free_page((unsigned long)buf);
65044 return 0;
65045+ }
65046 off -= n;
65047 }
65048 free_page((unsigned long)buf);
65049@@ -1125,7 +1596,7 @@ static int dump_seek(struct file *file, loff_t off)
65050 * Decide what to dump of a segment, part, all or none.
65051 */
65052 static unsigned long vma_dump_size(struct vm_area_struct *vma,
65053- unsigned long mm_flags)
65054+ unsigned long mm_flags, long signr)
65055 {
65056 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
65057
65058@@ -1159,7 +1630,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
65059 if (vma->vm_file == NULL)
65060 return 0;
65061
65062- if (FILTER(MAPPED_PRIVATE))
65063+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
65064 goto whole;
65065
65066 /*
65067@@ -1255,8 +1726,11 @@ static int writenote(struct memelfnote *men, struct file *file,
65068 #undef DUMP_WRITE
65069
65070 #define DUMP_WRITE(addr, nr) \
65071+ do { \
65072+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
65073 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
65074- goto end_coredump;
65075+ goto end_coredump; \
65076+ } while (0);
65077
65078 static void fill_elf_header(struct elfhdr *elf, int segs,
65079 u16 machine, u32 flags, u8 osabi)
65080@@ -1385,9 +1859,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
65081 {
65082 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
65083 int i = 0;
65084- do
65085+ do {
65086 i += 2;
65087- while (auxv[i - 2] != AT_NULL);
65088+ } while (auxv[i - 2] != AT_NULL);
65089 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
65090 }
65091
65092@@ -1452,7 +1926,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
65093 for (i = 1; i < view->n; ++i) {
65094 const struct user_regset *regset = &view->regsets[i];
65095 do_thread_regset_writeback(t->task, regset);
65096- if (regset->core_note_type &&
65097+ if (regset->core_note_type && regset->get &&
65098 (!regset->active || regset->active(t->task, regset))) {
65099 int ret;
65100 size_t size = regset->n * regset->size;
65101@@ -1973,7 +2447,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65102 phdr.p_offset = offset;
65103 phdr.p_vaddr = vma->vm_start;
65104 phdr.p_paddr = 0;
65105- phdr.p_filesz = vma_dump_size(vma, mm_flags);
65106+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
65107 phdr.p_memsz = vma->vm_end - vma->vm_start;
65108 offset += phdr.p_filesz;
65109 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
65110@@ -2006,7 +2480,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65111 unsigned long addr;
65112 unsigned long end;
65113
65114- end = vma->vm_start + vma_dump_size(vma, mm_flags);
65115+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
65116
65117 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
65118 struct page *page;
65119@@ -2015,6 +2489,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
65120 page = get_dump_page(addr);
65121 if (page) {
65122 void *kaddr = kmap(page);
65123+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
65124 stop = ((size += PAGE_SIZE) > limit) ||
65125 !dump_write(file, kaddr, PAGE_SIZE);
65126 kunmap(page);
65127@@ -2042,6 +2517,97 @@ out:
65128
65129 #endif /* USE_ELF_CORE_DUMP */
65130
65131+#ifdef CONFIG_PAX_MPROTECT
65132+/* PaX: non-PIC ELF libraries need relocations on their executable segments
65133+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
65134+ * we'll remove VM_MAYWRITE for good on RELRO segments.
65135+ *
65136+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
65137+ * basis because we want to allow the common case and not the special ones.
65138+ */
65139+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
65140+{
65141+ struct elfhdr elf_h;
65142+ struct elf_phdr elf_p;
65143+ unsigned long i;
65144+ unsigned long oldflags;
65145+ bool is_textrel_rw, is_textrel_rx, is_relro;
65146+
65147+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
65148+ return;
65149+
65150+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
65151+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
65152+
65153+#ifdef CONFIG_PAX_ELFRELOCS
65154+ /* possible TEXTREL */
65155+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
65156+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
65157+#else
65158+ is_textrel_rw = false;
65159+ is_textrel_rx = false;
65160+#endif
65161+
65162+ /* possible RELRO */
65163+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
65164+
65165+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
65166+ return;
65167+
65168+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
65169+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
65170+
65171+#ifdef CONFIG_PAX_ETEXECRELOCS
65172+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
65173+#else
65174+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
65175+#endif
65176+
65177+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
65178+ !elf_check_arch(&elf_h) ||
65179+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
65180+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
65181+ return;
65182+
65183+ for (i = 0UL; i < elf_h.e_phnum; i++) {
65184+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
65185+ return;
65186+ switch (elf_p.p_type) {
65187+ case PT_DYNAMIC:
65188+ if (!is_textrel_rw && !is_textrel_rx)
65189+ continue;
65190+ i = 0UL;
65191+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
65192+ elf_dyn dyn;
65193+
65194+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
65195+ return;
65196+ if (dyn.d_tag == DT_NULL)
65197+ return;
65198+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
65199+ gr_log_textrel(vma);
65200+ if (is_textrel_rw)
65201+ vma->vm_flags |= VM_MAYWRITE;
65202+ else
65203+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
65204+ vma->vm_flags &= ~VM_MAYWRITE;
65205+ return;
65206+ }
65207+ i++;
65208+ }
65209+ return;
65210+
65211+ case PT_GNU_RELRO:
65212+ if (!is_relro)
65213+ continue;
65214+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
65215+ vma->vm_flags &= ~VM_MAYWRITE;
65216+ return;
65217+ }
65218+ }
65219+}
65220+#endif
65221+
65222 static int __init init_elf_binfmt(void)
65223 {
65224 return register_binfmt(&elf_format);
65225diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
65226index ca88c46..f155a60 100644
65227--- a/fs/binfmt_flat.c
65228+++ b/fs/binfmt_flat.c
65229@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
65230 realdatastart = (unsigned long) -ENOMEM;
65231 printk("Unable to allocate RAM for process data, errno %d\n",
65232 (int)-realdatastart);
65233+ down_write(&current->mm->mmap_sem);
65234 do_munmap(current->mm, textpos, text_len);
65235+ up_write(&current->mm->mmap_sem);
65236 ret = realdatastart;
65237 goto err;
65238 }
65239@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
65240 }
65241 if (IS_ERR_VALUE(result)) {
65242 printk("Unable to read data+bss, errno %d\n", (int)-result);
65243+ down_write(&current->mm->mmap_sem);
65244 do_munmap(current->mm, textpos, text_len);
65245 do_munmap(current->mm, realdatastart, data_len + extra);
65246+ up_write(&current->mm->mmap_sem);
65247 ret = result;
65248 goto err;
65249 }
65250@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
65251 }
65252 if (IS_ERR_VALUE(result)) {
65253 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
65254+ down_write(&current->mm->mmap_sem);
65255 do_munmap(current->mm, textpos, text_len + data_len + extra +
65256 MAX_SHARED_LIBS * sizeof(unsigned long));
65257+ up_write(&current->mm->mmap_sem);
65258 ret = result;
65259 goto err;
65260 }
65261diff --git a/fs/bio.c b/fs/bio.c
65262index e696713..83de133 100644
65263--- a/fs/bio.c
65264+++ b/fs/bio.c
65265@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
65266
65267 i = 0;
65268 while (i < bio_slab_nr) {
65269- struct bio_slab *bslab = &bio_slabs[i];
65270+ bslab = &bio_slabs[i];
65271
65272 if (!bslab->slab && entry == -1)
65273 entry = i;
65274@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
65275 const int read = bio_data_dir(bio) == READ;
65276 struct bio_map_data *bmd = bio->bi_private;
65277 int i;
65278- char *p = bmd->sgvecs[0].iov_base;
65279+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
65280
65281 __bio_for_each_segment(bvec, bio, i, 0) {
65282 char *addr = page_address(bvec->bv_page);
65283diff --git a/fs/block_dev.c b/fs/block_dev.c
65284index e65efa2..04fae57 100644
65285--- a/fs/block_dev.c
65286+++ b/fs/block_dev.c
65287@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
65288 else if (bdev->bd_contains == bdev)
65289 res = 0; /* is a whole device which isn't held */
65290
65291- else if (bdev->bd_contains->bd_holder == bd_claim)
65292+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
65293 res = 0; /* is a partition of a device that is being partitioned */
65294 else if (bdev->bd_contains->bd_holder != NULL)
65295 res = -EBUSY; /* is a partition of a held device */
65296diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
65297index c4bc570..42acd8d 100644
65298--- a/fs/btrfs/ctree.c
65299+++ b/fs/btrfs/ctree.c
65300@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
65301 free_extent_buffer(buf);
65302 add_root_to_dirty_list(root);
65303 } else {
65304- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
65305- parent_start = parent->start;
65306- else
65307+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
65308+ if (parent)
65309+ parent_start = parent->start;
65310+ else
65311+ parent_start = 0;
65312+ } else
65313 parent_start = 0;
65314
65315 WARN_ON(trans->transid != btrfs_header_generation(parent));
65316@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
65317
65318 ret = 0;
65319 if (slot == 0) {
65320- struct btrfs_disk_key disk_key;
65321 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
65322 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
65323 }
65324diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
65325index f447188..59c17c5 100644
65326--- a/fs/btrfs/disk-io.c
65327+++ b/fs/btrfs/disk-io.c
65328@@ -39,7 +39,7 @@
65329 #include "tree-log.h"
65330 #include "free-space-cache.h"
65331
65332-static struct extent_io_ops btree_extent_io_ops;
65333+static const struct extent_io_ops btree_extent_io_ops;
65334 static void end_workqueue_fn(struct btrfs_work *work);
65335 static void free_fs_root(struct btrfs_root *root);
65336
65337@@ -2607,7 +2607,7 @@ out:
65338 return 0;
65339 }
65340
65341-static struct extent_io_ops btree_extent_io_ops = {
65342+static const struct extent_io_ops btree_extent_io_ops = {
65343 .write_cache_pages_lock_hook = btree_lock_page_hook,
65344 .readpage_end_io_hook = btree_readpage_end_io_hook,
65345 .submit_bio_hook = btree_submit_bio_hook,
65346diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
65347index 559f724..a026171 100644
65348--- a/fs/btrfs/extent-tree.c
65349+++ b/fs/btrfs/extent-tree.c
65350@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
65351 u64 group_start = group->key.objectid;
65352 new_extents = kmalloc(sizeof(*new_extents),
65353 GFP_NOFS);
65354+ if (!new_extents) {
65355+ ret = -ENOMEM;
65356+ goto out;
65357+ }
65358 nr_extents = 1;
65359 ret = get_new_locations(reloc_inode,
65360 extent_key,
65361diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
65362index 36de250..7ec75c7 100644
65363--- a/fs/btrfs/extent_io.h
65364+++ b/fs/btrfs/extent_io.h
65365@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
65366 struct bio *bio, int mirror_num,
65367 unsigned long bio_flags);
65368 struct extent_io_ops {
65369- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
65370+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
65371 u64 start, u64 end, int *page_started,
65372 unsigned long *nr_written);
65373- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
65374- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
65375+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
65376+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
65377 extent_submit_bio_hook_t *submit_bio_hook;
65378- int (*merge_bio_hook)(struct page *page, unsigned long offset,
65379+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
65380 size_t size, struct bio *bio,
65381 unsigned long bio_flags);
65382- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
65383- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
65384+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
65385+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
65386 u64 start, u64 end,
65387 struct extent_state *state);
65388- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
65389+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
65390 u64 start, u64 end,
65391 struct extent_state *state);
65392- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
65393+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
65394 struct extent_state *state);
65395- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
65396+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
65397 struct extent_state *state, int uptodate);
65398- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
65399+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
65400 unsigned long old, unsigned long bits);
65401- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
65402+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
65403 unsigned long bits);
65404- int (*merge_extent_hook)(struct inode *inode,
65405+ int (* const merge_extent_hook)(struct inode *inode,
65406 struct extent_state *new,
65407 struct extent_state *other);
65408- int (*split_extent_hook)(struct inode *inode,
65409+ int (* const split_extent_hook)(struct inode *inode,
65410 struct extent_state *orig, u64 split);
65411- int (*write_cache_pages_lock_hook)(struct page *page);
65412+ int (* const write_cache_pages_lock_hook)(struct page *page);
65413 };
65414
65415 struct extent_io_tree {
65416@@ -88,7 +88,7 @@ struct extent_io_tree {
65417 u64 dirty_bytes;
65418 spinlock_t lock;
65419 spinlock_t buffer_lock;
65420- struct extent_io_ops *ops;
65421+ const struct extent_io_ops *ops;
65422 };
65423
65424 struct extent_state {
65425diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
65426index cb2849f..3718fb4 100644
65427--- a/fs/btrfs/free-space-cache.c
65428+++ b/fs/btrfs/free-space-cache.c
65429@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
65430
65431 while(1) {
65432 if (entry->bytes < bytes || entry->offset < min_start) {
65433- struct rb_node *node;
65434-
65435 node = rb_next(&entry->offset_index);
65436 if (!node)
65437 break;
65438@@ -1226,7 +1224,7 @@ again:
65439 */
65440 while (entry->bitmap || found_bitmap ||
65441 (!entry->bitmap && entry->bytes < min_bytes)) {
65442- struct rb_node *node = rb_next(&entry->offset_index);
65443+ node = rb_next(&entry->offset_index);
65444
65445 if (entry->bitmap && entry->bytes > bytes + empty_size) {
65446 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
65447diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
65448index e03a836..323837e 100644
65449--- a/fs/btrfs/inode.c
65450+++ b/fs/btrfs/inode.c
65451@@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
65452 static const struct address_space_operations btrfs_aops;
65453 static const struct address_space_operations btrfs_symlink_aops;
65454 static const struct file_operations btrfs_dir_file_operations;
65455-static struct extent_io_ops btrfs_extent_io_ops;
65456+static const struct extent_io_ops btrfs_extent_io_ops;
65457
65458 static struct kmem_cache *btrfs_inode_cachep;
65459 struct kmem_cache *btrfs_trans_handle_cachep;
65460@@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
65461 1, 0, NULL, GFP_NOFS);
65462 while (start < end) {
65463 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
65464+ BUG_ON(!async_cow);
65465 async_cow->inode = inode;
65466 async_cow->root = root;
65467 async_cow->locked_page = locked_page;
65468@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
65469 inline_size = btrfs_file_extent_inline_item_len(leaf,
65470 btrfs_item_nr(leaf, path->slots[0]));
65471 tmp = kmalloc(inline_size, GFP_NOFS);
65472+ if (!tmp)
65473+ return -ENOMEM;
65474 ptr = btrfs_file_extent_inline_start(item);
65475
65476 read_extent_buffer(leaf, tmp, ptr, inline_size);
65477@@ -5410,7 +5413,7 @@ fail:
65478 return -ENOMEM;
65479 }
65480
65481-static int btrfs_getattr(struct vfsmount *mnt,
65482+int btrfs_getattr(struct vfsmount *mnt,
65483 struct dentry *dentry, struct kstat *stat)
65484 {
65485 struct inode *inode = dentry->d_inode;
65486@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
65487 return 0;
65488 }
65489
65490+EXPORT_SYMBOL(btrfs_getattr);
65491+
65492+dev_t get_btrfs_dev_from_inode(struct inode *inode)
65493+{
65494+ return BTRFS_I(inode)->root->anon_super.s_dev;
65495+}
65496+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
65497+
65498 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
65499 struct inode *new_dir, struct dentry *new_dentry)
65500 {
65501@@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
65502 .fsync = btrfs_sync_file,
65503 };
65504
65505-static struct extent_io_ops btrfs_extent_io_ops = {
65506+static const struct extent_io_ops btrfs_extent_io_ops = {
65507 .fill_delalloc = run_delalloc_range,
65508 .submit_bio_hook = btrfs_submit_bio_hook,
65509 .merge_bio_hook = btrfs_merge_bio_hook,
65510diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
65511index ab7ab53..94e0781 100644
65512--- a/fs/btrfs/relocation.c
65513+++ b/fs/btrfs/relocation.c
65514@@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
65515 }
65516 spin_unlock(&rc->reloc_root_tree.lock);
65517
65518- BUG_ON((struct btrfs_root *)node->data != root);
65519+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
65520
65521 if (!del) {
65522 spin_lock(&rc->reloc_root_tree.lock);
65523diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
65524index a240b6f..4ce16ef 100644
65525--- a/fs/btrfs/sysfs.c
65526+++ b/fs/btrfs/sysfs.c
65527@@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
65528 complete(&root->kobj_unregister);
65529 }
65530
65531-static struct sysfs_ops btrfs_super_attr_ops = {
65532+static const struct sysfs_ops btrfs_super_attr_ops = {
65533 .show = btrfs_super_attr_show,
65534 .store = btrfs_super_attr_store,
65535 };
65536
65537-static struct sysfs_ops btrfs_root_attr_ops = {
65538+static const struct sysfs_ops btrfs_root_attr_ops = {
65539 .show = btrfs_root_attr_show,
65540 .store = btrfs_root_attr_store,
65541 };
65542diff --git a/fs/buffer.c b/fs/buffer.c
65543index 6fa5302..395d9f6 100644
65544--- a/fs/buffer.c
65545+++ b/fs/buffer.c
65546@@ -25,6 +25,7 @@
65547 #include <linux/percpu.h>
65548 #include <linux/slab.h>
65549 #include <linux/capability.h>
65550+#include <linux/security.h>
65551 #include <linux/blkdev.h>
65552 #include <linux/file.h>
65553 #include <linux/quotaops.h>
65554diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
65555index 3797e00..ce776f6 100644
65556--- a/fs/cachefiles/bind.c
65557+++ b/fs/cachefiles/bind.c
65558@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
65559 args);
65560
65561 /* start by checking things over */
65562- ASSERT(cache->fstop_percent >= 0 &&
65563- cache->fstop_percent < cache->fcull_percent &&
65564+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
65565 cache->fcull_percent < cache->frun_percent &&
65566 cache->frun_percent < 100);
65567
65568- ASSERT(cache->bstop_percent >= 0 &&
65569- cache->bstop_percent < cache->bcull_percent &&
65570+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
65571 cache->bcull_percent < cache->brun_percent &&
65572 cache->brun_percent < 100);
65573
65574diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
65575index 4618516..bb30d01 100644
65576--- a/fs/cachefiles/daemon.c
65577+++ b/fs/cachefiles/daemon.c
65578@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
65579 if (test_bit(CACHEFILES_DEAD, &cache->flags))
65580 return -EIO;
65581
65582- if (datalen < 0 || datalen > PAGE_SIZE - 1)
65583+ if (datalen > PAGE_SIZE - 1)
65584 return -EOPNOTSUPP;
65585
65586 /* drag the command string into the kernel so we can parse it */
65587@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
65588 if (args[0] != '%' || args[1] != '\0')
65589 return -EINVAL;
65590
65591- if (fstop < 0 || fstop >= cache->fcull_percent)
65592+ if (fstop >= cache->fcull_percent)
65593 return cachefiles_daemon_range_error(cache, args);
65594
65595 cache->fstop_percent = fstop;
65596@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
65597 if (args[0] != '%' || args[1] != '\0')
65598 return -EINVAL;
65599
65600- if (bstop < 0 || bstop >= cache->bcull_percent)
65601+ if (bstop >= cache->bcull_percent)
65602 return cachefiles_daemon_range_error(cache, args);
65603
65604 cache->bstop_percent = bstop;
65605diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
65606index f7c255f..fcd61de 100644
65607--- a/fs/cachefiles/internal.h
65608+++ b/fs/cachefiles/internal.h
65609@@ -56,7 +56,7 @@ struct cachefiles_cache {
65610 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
65611 struct rb_root active_nodes; /* active nodes (can't be culled) */
65612 rwlock_t active_lock; /* lock for active_nodes */
65613- atomic_t gravecounter; /* graveyard uniquifier */
65614+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
65615 unsigned frun_percent; /* when to stop culling (% files) */
65616 unsigned fcull_percent; /* when to start culling (% files) */
65617 unsigned fstop_percent; /* when to stop allocating (% files) */
65618@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
65619 * proc.c
65620 */
65621 #ifdef CONFIG_CACHEFILES_HISTOGRAM
65622-extern atomic_t cachefiles_lookup_histogram[HZ];
65623-extern atomic_t cachefiles_mkdir_histogram[HZ];
65624-extern atomic_t cachefiles_create_histogram[HZ];
65625+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
65626+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
65627+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
65628
65629 extern int __init cachefiles_proc_init(void);
65630 extern void cachefiles_proc_cleanup(void);
65631 static inline
65632-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
65633+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
65634 {
65635 unsigned long jif = jiffies - start_jif;
65636 if (jif >= HZ)
65637 jif = HZ - 1;
65638- atomic_inc(&histogram[jif]);
65639+ atomic_inc_unchecked(&histogram[jif]);
65640 }
65641
65642 #else
65643diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
65644index 14ac480..a62766c 100644
65645--- a/fs/cachefiles/namei.c
65646+++ b/fs/cachefiles/namei.c
65647@@ -250,7 +250,7 @@ try_again:
65648 /* first step is to make up a grave dentry in the graveyard */
65649 sprintf(nbuffer, "%08x%08x",
65650 (uint32_t) get_seconds(),
65651- (uint32_t) atomic_inc_return(&cache->gravecounter));
65652+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
65653
65654 /* do the multiway lock magic */
65655 trap = lock_rename(cache->graveyard, dir);
65656diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
65657index eccd339..4c1d995 100644
65658--- a/fs/cachefiles/proc.c
65659+++ b/fs/cachefiles/proc.c
65660@@ -14,9 +14,9 @@
65661 #include <linux/seq_file.h>
65662 #include "internal.h"
65663
65664-atomic_t cachefiles_lookup_histogram[HZ];
65665-atomic_t cachefiles_mkdir_histogram[HZ];
65666-atomic_t cachefiles_create_histogram[HZ];
65667+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
65668+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
65669+atomic_unchecked_t cachefiles_create_histogram[HZ];
65670
65671 /*
65672 * display the latency histogram
65673@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
65674 return 0;
65675 default:
65676 index = (unsigned long) v - 3;
65677- x = atomic_read(&cachefiles_lookup_histogram[index]);
65678- y = atomic_read(&cachefiles_mkdir_histogram[index]);
65679- z = atomic_read(&cachefiles_create_histogram[index]);
65680+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
65681+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
65682+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
65683 if (x == 0 && y == 0 && z == 0)
65684 return 0;
65685
65686diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
65687index a6c8c6f..5cf8517 100644
65688--- a/fs/cachefiles/rdwr.c
65689+++ b/fs/cachefiles/rdwr.c
65690@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
65691 old_fs = get_fs();
65692 set_fs(KERNEL_DS);
65693 ret = file->f_op->write(
65694- file, (const void __user *) data, len, &pos);
65695+ file, (const void __force_user *) data, len, &pos);
65696 set_fs(old_fs);
65697 kunmap(page);
65698 if (ret != len)
65699diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
65700index 42cec2a..2aba466 100644
65701--- a/fs/cifs/cifs_debug.c
65702+++ b/fs/cifs/cifs_debug.c
65703@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
65704 tcon = list_entry(tmp3,
65705 struct cifsTconInfo,
65706 tcon_list);
65707- atomic_set(&tcon->num_smbs_sent, 0);
65708- atomic_set(&tcon->num_writes, 0);
65709- atomic_set(&tcon->num_reads, 0);
65710- atomic_set(&tcon->num_oplock_brks, 0);
65711- atomic_set(&tcon->num_opens, 0);
65712- atomic_set(&tcon->num_posixopens, 0);
65713- atomic_set(&tcon->num_posixmkdirs, 0);
65714- atomic_set(&tcon->num_closes, 0);
65715- atomic_set(&tcon->num_deletes, 0);
65716- atomic_set(&tcon->num_mkdirs, 0);
65717- atomic_set(&tcon->num_rmdirs, 0);
65718- atomic_set(&tcon->num_renames, 0);
65719- atomic_set(&tcon->num_t2renames, 0);
65720- atomic_set(&tcon->num_ffirst, 0);
65721- atomic_set(&tcon->num_fnext, 0);
65722- atomic_set(&tcon->num_fclose, 0);
65723- atomic_set(&tcon->num_hardlinks, 0);
65724- atomic_set(&tcon->num_symlinks, 0);
65725- atomic_set(&tcon->num_locks, 0);
65726+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
65727+ atomic_set_unchecked(&tcon->num_writes, 0);
65728+ atomic_set_unchecked(&tcon->num_reads, 0);
65729+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
65730+ atomic_set_unchecked(&tcon->num_opens, 0);
65731+ atomic_set_unchecked(&tcon->num_posixopens, 0);
65732+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
65733+ atomic_set_unchecked(&tcon->num_closes, 0);
65734+ atomic_set_unchecked(&tcon->num_deletes, 0);
65735+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
65736+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
65737+ atomic_set_unchecked(&tcon->num_renames, 0);
65738+ atomic_set_unchecked(&tcon->num_t2renames, 0);
65739+ atomic_set_unchecked(&tcon->num_ffirst, 0);
65740+ atomic_set_unchecked(&tcon->num_fnext, 0);
65741+ atomic_set_unchecked(&tcon->num_fclose, 0);
65742+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
65743+ atomic_set_unchecked(&tcon->num_symlinks, 0);
65744+ atomic_set_unchecked(&tcon->num_locks, 0);
65745 }
65746 }
65747 }
65748@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
65749 if (tcon->need_reconnect)
65750 seq_puts(m, "\tDISCONNECTED ");
65751 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
65752- atomic_read(&tcon->num_smbs_sent),
65753- atomic_read(&tcon->num_oplock_brks));
65754+ atomic_read_unchecked(&tcon->num_smbs_sent),
65755+ atomic_read_unchecked(&tcon->num_oplock_brks));
65756 seq_printf(m, "\nReads: %d Bytes: %lld",
65757- atomic_read(&tcon->num_reads),
65758+ atomic_read_unchecked(&tcon->num_reads),
65759 (long long)(tcon->bytes_read));
65760 seq_printf(m, "\nWrites: %d Bytes: %lld",
65761- atomic_read(&tcon->num_writes),
65762+ atomic_read_unchecked(&tcon->num_writes),
65763 (long long)(tcon->bytes_written));
65764 seq_printf(m, "\nFlushes: %d",
65765- atomic_read(&tcon->num_flushes));
65766+ atomic_read_unchecked(&tcon->num_flushes));
65767 seq_printf(m, "\nLocks: %d HardLinks: %d "
65768 "Symlinks: %d",
65769- atomic_read(&tcon->num_locks),
65770- atomic_read(&tcon->num_hardlinks),
65771- atomic_read(&tcon->num_symlinks));
65772+ atomic_read_unchecked(&tcon->num_locks),
65773+ atomic_read_unchecked(&tcon->num_hardlinks),
65774+ atomic_read_unchecked(&tcon->num_symlinks));
65775 seq_printf(m, "\nOpens: %d Closes: %d "
65776 "Deletes: %d",
65777- atomic_read(&tcon->num_opens),
65778- atomic_read(&tcon->num_closes),
65779- atomic_read(&tcon->num_deletes));
65780+ atomic_read_unchecked(&tcon->num_opens),
65781+ atomic_read_unchecked(&tcon->num_closes),
65782+ atomic_read_unchecked(&tcon->num_deletes));
65783 seq_printf(m, "\nPosix Opens: %d "
65784 "Posix Mkdirs: %d",
65785- atomic_read(&tcon->num_posixopens),
65786- atomic_read(&tcon->num_posixmkdirs));
65787+ atomic_read_unchecked(&tcon->num_posixopens),
65788+ atomic_read_unchecked(&tcon->num_posixmkdirs));
65789 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
65790- atomic_read(&tcon->num_mkdirs),
65791- atomic_read(&tcon->num_rmdirs));
65792+ atomic_read_unchecked(&tcon->num_mkdirs),
65793+ atomic_read_unchecked(&tcon->num_rmdirs));
65794 seq_printf(m, "\nRenames: %d T2 Renames %d",
65795- atomic_read(&tcon->num_renames),
65796- atomic_read(&tcon->num_t2renames));
65797+ atomic_read_unchecked(&tcon->num_renames),
65798+ atomic_read_unchecked(&tcon->num_t2renames));
65799 seq_printf(m, "\nFindFirst: %d FNext %d "
65800 "FClose %d",
65801- atomic_read(&tcon->num_ffirst),
65802- atomic_read(&tcon->num_fnext),
65803- atomic_read(&tcon->num_fclose));
65804+ atomic_read_unchecked(&tcon->num_ffirst),
65805+ atomic_read_unchecked(&tcon->num_fnext),
65806+ atomic_read_unchecked(&tcon->num_fclose));
65807 }
65808 }
65809 }
65810diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
65811index 1445407..68cb0dc 100644
65812--- a/fs/cifs/cifsfs.c
65813+++ b/fs/cifs/cifsfs.c
65814@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
65815 cifs_req_cachep = kmem_cache_create("cifs_request",
65816 CIFSMaxBufSize +
65817 MAX_CIFS_HDR_SIZE, 0,
65818- SLAB_HWCACHE_ALIGN, NULL);
65819+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
65820 if (cifs_req_cachep == NULL)
65821 return -ENOMEM;
65822
65823@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
65824 efficient to alloc 1 per page off the slab compared to 17K (5page)
65825 alloc of large cifs buffers even when page debugging is on */
65826 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
65827- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
65828+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
65829 NULL);
65830 if (cifs_sm_req_cachep == NULL) {
65831 mempool_destroy(cifs_req_poolp);
65832@@ -991,8 +991,8 @@ init_cifs(void)
65833 atomic_set(&bufAllocCount, 0);
65834 atomic_set(&smBufAllocCount, 0);
65835 #ifdef CONFIG_CIFS_STATS2
65836- atomic_set(&totBufAllocCount, 0);
65837- atomic_set(&totSmBufAllocCount, 0);
65838+ atomic_set_unchecked(&totBufAllocCount, 0);
65839+ atomic_set_unchecked(&totSmBufAllocCount, 0);
65840 #endif /* CONFIG_CIFS_STATS2 */
65841
65842 atomic_set(&midCount, 0);
65843diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
65844index e29581e..1c22bab 100644
65845--- a/fs/cifs/cifsglob.h
65846+++ b/fs/cifs/cifsglob.h
65847@@ -252,28 +252,28 @@ struct cifsTconInfo {
65848 __u16 Flags; /* optional support bits */
65849 enum statusEnum tidStatus;
65850 #ifdef CONFIG_CIFS_STATS
65851- atomic_t num_smbs_sent;
65852- atomic_t num_writes;
65853- atomic_t num_reads;
65854- atomic_t num_flushes;
65855- atomic_t num_oplock_brks;
65856- atomic_t num_opens;
65857- atomic_t num_closes;
65858- atomic_t num_deletes;
65859- atomic_t num_mkdirs;
65860- atomic_t num_posixopens;
65861- atomic_t num_posixmkdirs;
65862- atomic_t num_rmdirs;
65863- atomic_t num_renames;
65864- atomic_t num_t2renames;
65865- atomic_t num_ffirst;
65866- atomic_t num_fnext;
65867- atomic_t num_fclose;
65868- atomic_t num_hardlinks;
65869- atomic_t num_symlinks;
65870- atomic_t num_locks;
65871- atomic_t num_acl_get;
65872- atomic_t num_acl_set;
65873+ atomic_unchecked_t num_smbs_sent;
65874+ atomic_unchecked_t num_writes;
65875+ atomic_unchecked_t num_reads;
65876+ atomic_unchecked_t num_flushes;
65877+ atomic_unchecked_t num_oplock_brks;
65878+ atomic_unchecked_t num_opens;
65879+ atomic_unchecked_t num_closes;
65880+ atomic_unchecked_t num_deletes;
65881+ atomic_unchecked_t num_mkdirs;
65882+ atomic_unchecked_t num_posixopens;
65883+ atomic_unchecked_t num_posixmkdirs;
65884+ atomic_unchecked_t num_rmdirs;
65885+ atomic_unchecked_t num_renames;
65886+ atomic_unchecked_t num_t2renames;
65887+ atomic_unchecked_t num_ffirst;
65888+ atomic_unchecked_t num_fnext;
65889+ atomic_unchecked_t num_fclose;
65890+ atomic_unchecked_t num_hardlinks;
65891+ atomic_unchecked_t num_symlinks;
65892+ atomic_unchecked_t num_locks;
65893+ atomic_unchecked_t num_acl_get;
65894+ atomic_unchecked_t num_acl_set;
65895 #ifdef CONFIG_CIFS_STATS2
65896 unsigned long long time_writes;
65897 unsigned long long time_reads;
65898@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
65899 }
65900
65901 #ifdef CONFIG_CIFS_STATS
65902-#define cifs_stats_inc atomic_inc
65903+#define cifs_stats_inc atomic_inc_unchecked
65904
65905 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
65906 unsigned int bytes)
65907@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
65908 /* Various Debug counters */
65909 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
65910 #ifdef CONFIG_CIFS_STATS2
65911-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
65912-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
65913+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
65914+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
65915 #endif
65916 GLOBAL_EXTERN atomic_t smBufAllocCount;
65917 GLOBAL_EXTERN atomic_t midCount;
65918diff --git a/fs/cifs/link.c b/fs/cifs/link.c
65919index fc1e048..28b3441 100644
65920--- a/fs/cifs/link.c
65921+++ b/fs/cifs/link.c
65922@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
65923
65924 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
65925 {
65926- char *p = nd_get_link(nd);
65927+ const char *p = nd_get_link(nd);
65928 if (!IS_ERR(p))
65929 kfree(p);
65930 }
65931diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
65932index 95b82e8..12a538d 100644
65933--- a/fs/cifs/misc.c
65934+++ b/fs/cifs/misc.c
65935@@ -155,7 +155,7 @@ cifs_buf_get(void)
65936 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
65937 atomic_inc(&bufAllocCount);
65938 #ifdef CONFIG_CIFS_STATS2
65939- atomic_inc(&totBufAllocCount);
65940+ atomic_inc_unchecked(&totBufAllocCount);
65941 #endif /* CONFIG_CIFS_STATS2 */
65942 }
65943
65944@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
65945 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
65946 atomic_inc(&smBufAllocCount);
65947 #ifdef CONFIG_CIFS_STATS2
65948- atomic_inc(&totSmBufAllocCount);
65949+ atomic_inc_unchecked(&totSmBufAllocCount);
65950 #endif /* CONFIG_CIFS_STATS2 */
65951
65952 }
65953diff --git a/fs/coda/cache.c b/fs/coda/cache.c
65954index a5bf577..6d19845 100644
65955--- a/fs/coda/cache.c
65956+++ b/fs/coda/cache.c
65957@@ -24,14 +24,14 @@
65958 #include <linux/coda_fs_i.h>
65959 #include <linux/coda_cache.h>
65960
65961-static atomic_t permission_epoch = ATOMIC_INIT(0);
65962+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
65963
65964 /* replace or extend an acl cache hit */
65965 void coda_cache_enter(struct inode *inode, int mask)
65966 {
65967 struct coda_inode_info *cii = ITOC(inode);
65968
65969- cii->c_cached_epoch = atomic_read(&permission_epoch);
65970+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
65971 if (cii->c_uid != current_fsuid()) {
65972 cii->c_uid = current_fsuid();
65973 cii->c_cached_perm = mask;
65974@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
65975 void coda_cache_clear_inode(struct inode *inode)
65976 {
65977 struct coda_inode_info *cii = ITOC(inode);
65978- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
65979+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
65980 }
65981
65982 /* remove all acl caches */
65983 void coda_cache_clear_all(struct super_block *sb)
65984 {
65985- atomic_inc(&permission_epoch);
65986+ atomic_inc_unchecked(&permission_epoch);
65987 }
65988
65989
65990@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
65991
65992 hit = (mask & cii->c_cached_perm) == mask &&
65993 cii->c_uid == current_fsuid() &&
65994- cii->c_cached_epoch == atomic_read(&permission_epoch);
65995+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
65996
65997 return hit;
65998 }
65999diff --git a/fs/compat.c b/fs/compat.c
66000index d1e2411..9a958d2 100644
66001--- a/fs/compat.c
66002+++ b/fs/compat.c
66003@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
66004 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
66005 {
66006 compat_ino_t ino = stat->ino;
66007- typeof(ubuf->st_uid) uid = 0;
66008- typeof(ubuf->st_gid) gid = 0;
66009+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
66010+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
66011 int err;
66012
66013 SET_UID(uid, stat->uid);
66014@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
66015
66016 set_fs(KERNEL_DS);
66017 /* The __user pointer cast is valid because of the set_fs() */
66018- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
66019+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
66020 set_fs(oldfs);
66021 /* truncating is ok because it's a user address */
66022 if (!ret)
66023@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
66024
66025 struct compat_readdir_callback {
66026 struct compat_old_linux_dirent __user *dirent;
66027+ struct file * file;
66028 int result;
66029 };
66030
66031@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
66032 buf->result = -EOVERFLOW;
66033 return -EOVERFLOW;
66034 }
66035+
66036+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66037+ return 0;
66038+
66039 buf->result++;
66040 dirent = buf->dirent;
66041 if (!access_ok(VERIFY_WRITE, dirent,
66042@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
66043
66044 buf.result = 0;
66045 buf.dirent = dirent;
66046+ buf.file = file;
66047
66048 error = vfs_readdir(file, compat_fillonedir, &buf);
66049 if (buf.result)
66050@@ -899,6 +905,7 @@ struct compat_linux_dirent {
66051 struct compat_getdents_callback {
66052 struct compat_linux_dirent __user *current_dir;
66053 struct compat_linux_dirent __user *previous;
66054+ struct file * file;
66055 int count;
66056 int error;
66057 };
66058@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
66059 buf->error = -EOVERFLOW;
66060 return -EOVERFLOW;
66061 }
66062+
66063+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66064+ return 0;
66065+
66066 dirent = buf->previous;
66067 if (dirent) {
66068 if (__put_user(offset, &dirent->d_off))
66069@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
66070 buf.previous = NULL;
66071 buf.count = count;
66072 buf.error = 0;
66073+ buf.file = file;
66074
66075 error = vfs_readdir(file, compat_filldir, &buf);
66076 if (error >= 0)
66077@@ -987,6 +999,7 @@ out:
66078 struct compat_getdents_callback64 {
66079 struct linux_dirent64 __user *current_dir;
66080 struct linux_dirent64 __user *previous;
66081+ struct file * file;
66082 int count;
66083 int error;
66084 };
66085@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
66086 buf->error = -EINVAL; /* only used if we fail.. */
66087 if (reclen > buf->count)
66088 return -EINVAL;
66089+
66090+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
66091+ return 0;
66092+
66093 dirent = buf->previous;
66094
66095 if (dirent) {
66096@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
66097 buf.previous = NULL;
66098 buf.count = count;
66099 buf.error = 0;
66100+ buf.file = file;
66101
66102 error = vfs_readdir(file, compat_filldir64, &buf);
66103 if (error >= 0)
66104 error = buf.error;
66105 lastdirent = buf.previous;
66106 if (lastdirent) {
66107- typeof(lastdirent->d_off) d_off = file->f_pos;
66108+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
66109 if (__put_user_unaligned(d_off, &lastdirent->d_off))
66110 error = -EFAULT;
66111 else
66112@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
66113 * verify all the pointers
66114 */
66115 ret = -EINVAL;
66116- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
66117+ if (nr_segs > UIO_MAXIOV)
66118 goto out;
66119 if (!file->f_op)
66120 goto out;
66121@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
66122 compat_uptr_t __user *envp,
66123 struct pt_regs * regs)
66124 {
66125+#ifdef CONFIG_GRKERNSEC
66126+ struct file *old_exec_file;
66127+ struct acl_subject_label *old_acl;
66128+ struct rlimit old_rlim[RLIM_NLIMITS];
66129+#endif
66130 struct linux_binprm *bprm;
66131 struct file *file;
66132 struct files_struct *displaced;
66133 bool clear_in_exec;
66134 int retval;
66135+ const struct cred *cred = current_cred();
66136+
66137+ /*
66138+ * We move the actual failure in case of RLIMIT_NPROC excess from
66139+ * set*uid() to execve() because too many poorly written programs
66140+ * don't check setuid() return code. Here we additionally recheck
66141+ * whether NPROC limit is still exceeded.
66142+ */
66143+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
66144+
66145+ if ((current->flags & PF_NPROC_EXCEEDED) &&
66146+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
66147+ retval = -EAGAIN;
66148+ goto out_ret;
66149+ }
66150+
66151+ /* We're below the limit (still or again), so we don't want to make
66152+ * further execve() calls fail. */
66153+ current->flags &= ~PF_NPROC_EXCEEDED;
66154
66155 retval = unshare_files(&displaced);
66156 if (retval)
66157@@ -1493,12 +1535,26 @@ int compat_do_execve(char * filename,
66158 if (IS_ERR(file))
66159 goto out_unmark;
66160
66161+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
66162+ retval = -EPERM;
66163+ goto out_file;
66164+ }
66165+
66166 sched_exec();
66167
66168 bprm->file = file;
66169 bprm->filename = filename;
66170 bprm->interp = filename;
66171
66172+ if (gr_process_user_ban()) {
66173+ retval = -EPERM;
66174+ goto out_file;
66175+ }
66176+
66177+ retval = -EACCES;
66178+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
66179+ goto out_file;
66180+
66181 retval = bprm_mm_init(bprm);
66182 if (retval)
66183 goto out_file;
66184@@ -1515,24 +1571,63 @@ int compat_do_execve(char * filename,
66185 if (retval < 0)
66186 goto out;
66187
66188+#ifdef CONFIG_GRKERNSEC
66189+ old_acl = current->acl;
66190+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
66191+ old_exec_file = current->exec_file;
66192+ get_file(file);
66193+ current->exec_file = file;
66194+#endif
66195+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66196+ /* limit suid stack to 8MB
66197+ we saved the old limits above and will restore them if this exec fails
66198+ */
66199+ if ((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid()))
66200+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
66201+#endif
66202+
66203+ if (!gr_tpe_allow(file)) {
66204+ retval = -EACCES;
66205+ goto out_fail;
66206+ }
66207+
66208+ if (gr_check_crash_exec(file)) {
66209+ retval = -EACCES;
66210+ goto out_fail;
66211+ }
66212+
66213+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
66214+ bprm->unsafe);
66215+ if (retval < 0)
66216+ goto out_fail;
66217+
66218 retval = copy_strings_kernel(1, &bprm->filename, bprm);
66219 if (retval < 0)
66220- goto out;
66221+ goto out_fail;
66222
66223 bprm->exec = bprm->p;
66224 retval = compat_copy_strings(bprm->envc, envp, bprm);
66225 if (retval < 0)
66226- goto out;
66227+ goto out_fail;
66228
66229 retval = compat_copy_strings(bprm->argc, argv, bprm);
66230 if (retval < 0)
66231- goto out;
66232+ goto out_fail;
66233+
66234+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
66235+
66236+ gr_handle_exec_args_compat(bprm, argv);
66237
66238 retval = search_binary_handler(bprm, regs);
66239 if (retval < 0)
66240- goto out;
66241+ goto out_fail;
66242+#ifdef CONFIG_GRKERNSEC
66243+ if (old_exec_file)
66244+ fput(old_exec_file);
66245+#endif
66246
66247 /* execve succeeded */
66248+ increment_exec_counter();
66249 current->fs->in_exec = 0;
66250 current->in_execve = 0;
66251 acct_update_integrals(current);
66252@@ -1541,6 +1636,14 @@ int compat_do_execve(char * filename,
66253 put_files_struct(displaced);
66254 return retval;
66255
66256+out_fail:
66257+#ifdef CONFIG_GRKERNSEC
66258+ current->acl = old_acl;
66259+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
66260+ fput(current->exec_file);
66261+ current->exec_file = old_exec_file;
66262+#endif
66263+
66264 out:
66265 if (bprm->mm) {
66266 acct_arg_size(bprm, 0);
66267@@ -1711,6 +1814,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
66268 struct fdtable *fdt;
66269 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
66270
66271+ pax_track_stack();
66272+
66273 if (n < 0)
66274 goto out_nofds;
66275
66276@@ -2151,7 +2256,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
66277 oldfs = get_fs();
66278 set_fs(KERNEL_DS);
66279 /* The __user pointer casts are valid because of the set_fs() */
66280- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
66281+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
66282 set_fs(oldfs);
66283
66284 if (err)
66285diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
66286index 0adced2..bbb1b0d 100644
66287--- a/fs/compat_binfmt_elf.c
66288+++ b/fs/compat_binfmt_elf.c
66289@@ -29,10 +29,12 @@
66290 #undef elfhdr
66291 #undef elf_phdr
66292 #undef elf_note
66293+#undef elf_dyn
66294 #undef elf_addr_t
66295 #define elfhdr elf32_hdr
66296 #define elf_phdr elf32_phdr
66297 #define elf_note elf32_note
66298+#define elf_dyn Elf32_Dyn
66299 #define elf_addr_t Elf32_Addr
66300
66301 /*
66302diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
66303index d84e705..d8c364c 100644
66304--- a/fs/compat_ioctl.c
66305+++ b/fs/compat_ioctl.c
66306@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
66307 up = (struct compat_video_spu_palette __user *) arg;
66308 err = get_user(palp, &up->palette);
66309 err |= get_user(length, &up->length);
66310+ if (err)
66311+ return -EFAULT;
66312
66313 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
66314 err = put_user(compat_ptr(palp), &up_native->palette);
66315@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
66316 return -EFAULT;
66317 if (__get_user(udata, &ss32->iomem_base))
66318 return -EFAULT;
66319- ss.iomem_base = compat_ptr(udata);
66320+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
66321 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
66322 __get_user(ss.port_high, &ss32->port_high))
66323 return -EFAULT;
66324@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
66325 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
66326 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
66327 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
66328- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
66329+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
66330 return -EFAULT;
66331
66332 return ioctl_preallocate(file, p);
66333diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
66334index 8e48b52..f01ed91 100644
66335--- a/fs/configfs/dir.c
66336+++ b/fs/configfs/dir.c
66337@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66338 }
66339 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
66340 struct configfs_dirent *next;
66341- const char * name;
66342+ const unsigned char * name;
66343+ char d_name[sizeof(next->s_dentry->d_iname)];
66344 int len;
66345
66346 next = list_entry(p, struct configfs_dirent,
66347@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66348 continue;
66349
66350 name = configfs_get_name(next);
66351- len = strlen(name);
66352+ if (next->s_dentry && name == next->s_dentry->d_iname) {
66353+ len = next->s_dentry->d_name.len;
66354+ memcpy(d_name, name, len);
66355+ name = d_name;
66356+ } else
66357+ len = strlen(name);
66358 if (next->s_dentry)
66359 ino = next->s_dentry->d_inode->i_ino;
66360 else
66361diff --git a/fs/dcache.c b/fs/dcache.c
66362index 44c0aea..2529092 100644
66363--- a/fs/dcache.c
66364+++ b/fs/dcache.c
66365@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
66366
66367 static struct kmem_cache *dentry_cache __read_mostly;
66368
66369-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66370-
66371 /*
66372 * This is the single most critical data structure when it comes
66373 * to the dcache: the hashtable for lookups. Somebody should try
66374@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
66375 mempages -= reserve;
66376
66377 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
66378- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
66379+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
66380
66381 dcache_init();
66382 inode_init();
66383diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
66384index 39c6ee8..dcee0f1 100644
66385--- a/fs/debugfs/inode.c
66386+++ b/fs/debugfs/inode.c
66387@@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
66388 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
66389 {
66390 return debugfs_create_file(name,
66391+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
66392+ S_IFDIR | S_IRWXU,
66393+#else
66394 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
66395+#endif
66396 parent, NULL, NULL);
66397 }
66398 EXPORT_SYMBOL_GPL(debugfs_create_dir);
66399diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
66400index c010ecf..a8d8c59 100644
66401--- a/fs/dlm/lockspace.c
66402+++ b/fs/dlm/lockspace.c
66403@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
66404 kfree(ls);
66405 }
66406
66407-static struct sysfs_ops dlm_attr_ops = {
66408+static const struct sysfs_ops dlm_attr_ops = {
66409 .show = dlm_attr_show,
66410 .store = dlm_attr_store,
66411 };
66412diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
66413index 7a5f1ac..62fa913 100644
66414--- a/fs/ecryptfs/crypto.c
66415+++ b/fs/ecryptfs/crypto.c
66416@@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
66417 rc);
66418 goto out;
66419 }
66420- if (unlikely(ecryptfs_verbosity > 0)) {
66421- ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
66422- "with iv:\n");
66423- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
66424- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
66425- "encryption:\n");
66426- ecryptfs_dump_hex((char *)
66427- (page_address(page)
66428- + (extent_offset * crypt_stat->extent_size)),
66429- 8);
66430- }
66431 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
66432 page, (extent_offset
66433 * crypt_stat->extent_size),
66434@@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
66435 goto out;
66436 }
66437 rc = 0;
66438- if (unlikely(ecryptfs_verbosity > 0)) {
66439- ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
66440- "rc = [%d]\n", (extent_base + extent_offset),
66441- rc);
66442- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
66443- "encryption:\n");
66444- ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
66445- }
66446 out:
66447 return rc;
66448 }
66449@@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
66450 rc);
66451 goto out;
66452 }
66453- if (unlikely(ecryptfs_verbosity > 0)) {
66454- ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
66455- "with iv:\n");
66456- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
66457- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
66458- "decryption:\n");
66459- ecryptfs_dump_hex((char *)
66460- (page_address(enc_extent_page)
66461- + (extent_offset * crypt_stat->extent_size)),
66462- 8);
66463- }
66464 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
66465 (extent_offset
66466 * crypt_stat->extent_size),
66467@@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
66468 goto out;
66469 }
66470 rc = 0;
66471- if (unlikely(ecryptfs_verbosity > 0)) {
66472- ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
66473- "rc = [%d]\n", (extent_base + extent_offset),
66474- rc);
66475- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
66476- "decryption:\n");
66477- ecryptfs_dump_hex((char *)(page_address(page)
66478- + (extent_offset
66479- * crypt_stat->extent_size)), 8);
66480- }
66481 out:
66482 return rc;
66483 }
66484@@ -1455,6 +1415,25 @@ static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
66485 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
66486 }
66487
66488+void ecryptfs_i_size_init(const char *page_virt, struct inode *inode)
66489+{
66490+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
66491+ struct ecryptfs_crypt_stat *crypt_stat;
66492+ u64 file_size;
66493+
66494+ crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
66495+ mount_crypt_stat =
66496+ &ecryptfs_superblock_to_private(inode->i_sb)->mount_crypt_stat;
66497+ if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
66498+ file_size = i_size_read(ecryptfs_inode_to_lower(inode));
66499+ if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
66500+ file_size += crypt_stat->num_header_bytes_at_front;
66501+ } else
66502+ file_size = get_unaligned_be64(page_virt);
66503+ i_size_write(inode, (loff_t)file_size);
66504+ crypt_stat->flags |= ECRYPTFS_I_SIZE_INITIALIZED;
66505+}
66506+
66507 /**
66508 * ecryptfs_read_headers_virt
66509 * @page_virt: The virtual address into which to read the headers
66510@@ -1485,6 +1464,8 @@ static int ecryptfs_read_headers_virt(char *page_virt,
66511 rc = -EINVAL;
66512 goto out;
66513 }
66514+ if (!(crypt_stat->flags & ECRYPTFS_I_SIZE_INITIALIZED))
66515+ ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
66516 offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
66517 rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset),
66518 &bytes_read);
66519diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
66520index 542f625..9685315 100644
66521--- a/fs/ecryptfs/ecryptfs_kernel.h
66522+++ b/fs/ecryptfs/ecryptfs_kernel.h
66523@@ -270,6 +270,7 @@ struct ecryptfs_crypt_stat {
66524 #define ECRYPTFS_ENCFN_USE_MOUNT_FNEK 0x00001000
66525 #define ECRYPTFS_ENCFN_USE_FEK 0x00002000
66526 #define ECRYPTFS_UNLINK_SIGS 0x00004000
66527+#define ECRYPTFS_I_SIZE_INITIALIZED 0x00008000
66528 u32 flags;
66529 unsigned int file_version;
66530 size_t iv_bytes;
66531@@ -619,6 +620,7 @@ struct ecryptfs_open_req {
66532 int ecryptfs_interpose(struct dentry *hidden_dentry,
66533 struct dentry *this_dentry, struct super_block *sb,
66534 u32 flags);
66535+void ecryptfs_i_size_init(const char *page_virt, struct inode *inode);
66536 int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
66537 struct dentry *lower_dentry,
66538 struct inode *ecryptfs_dir_inode,
66539diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
66540index 3015389..49129f4 100644
66541--- a/fs/ecryptfs/file.c
66542+++ b/fs/ecryptfs/file.c
66543@@ -237,7 +237,8 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
66544 goto out_free;
66545 }
66546 rc = 0;
66547- crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
66548+ crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
66549+ | ECRYPTFS_ENCRYPTED);
66550 mutex_unlock(&crypt_stat->cs_mutex);
66551 goto out;
66552 }
66553@@ -347,7 +348,6 @@ const struct file_operations ecryptfs_main_fops = {
66554 #ifdef CONFIG_COMPAT
66555 .compat_ioctl = ecryptfs_compat_ioctl,
66556 #endif
66557- .mmap = generic_file_mmap,
66558 .open = ecryptfs_open,
66559 .flush = ecryptfs_flush,
66560 .release = ecryptfs_release,
66561diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
66562index 4434e8f..fa05803 100644
66563--- a/fs/ecryptfs/inode.c
66564+++ b/fs/ecryptfs/inode.c
66565@@ -256,10 +256,8 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
66566 struct dentry *lower_dir_dentry;
66567 struct vfsmount *lower_mnt;
66568 struct inode *lower_inode;
66569- struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
66570 struct ecryptfs_crypt_stat *crypt_stat;
66571 char *page_virt = NULL;
66572- u64 file_size;
66573 int rc = 0;
66574
66575 lower_dir_dentry = lower_dentry->d_parent;
66576@@ -334,18 +332,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
66577 }
66578 crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
66579 }
66580- mount_crypt_stat = &ecryptfs_superblock_to_private(
66581- ecryptfs_dentry->d_sb)->mount_crypt_stat;
66582- if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
66583- if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
66584- file_size = (crypt_stat->num_header_bytes_at_front
66585- + i_size_read(lower_dentry->d_inode));
66586- else
66587- file_size = i_size_read(lower_dentry->d_inode);
66588- } else {
66589- file_size = get_unaligned_be64(page_virt);
66590- }
66591- i_size_write(ecryptfs_dentry->d_inode, (loff_t)file_size);
66592+ ecryptfs_i_size_init(page_virt, ecryptfs_dentry->d_inode);
66593 out_free_kmem:
66594 kmem_cache_free(ecryptfs_header_cache_2, page_virt);
66595 goto out;
66596@@ -660,7 +647,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
66597 old_fs = get_fs();
66598 set_fs(get_ds());
66599 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
66600- (char __user *)lower_buf,
66601+ (char __force_user *)lower_buf,
66602 lower_bufsiz);
66603 set_fs(old_fs);
66604 if (rc < 0)
66605@@ -706,7 +693,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
66606 }
66607 old_fs = get_fs();
66608 set_fs(get_ds());
66609- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
66610+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
66611 set_fs(old_fs);
66612 if (rc < 0)
66613 goto out_free;
66614@@ -964,7 +951,8 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
66615 goto out;
66616 }
66617 rc = 0;
66618- crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
66619+ crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
66620+ | ECRYPTFS_ENCRYPTED);
66621 }
66622 }
66623 mutex_unlock(&crypt_stat->cs_mutex);
66624diff --git a/fs/exec.c b/fs/exec.c
66625index 86fafc6..6a041a8 100644
66626--- a/fs/exec.c
66627+++ b/fs/exec.c
66628@@ -56,12 +56,28 @@
66629 #include <linux/fsnotify.h>
66630 #include <linux/fs_struct.h>
66631 #include <linux/pipe_fs_i.h>
66632+#include <linux/random.h>
66633+#include <linux/seq_file.h>
66634+
66635+#ifdef CONFIG_PAX_REFCOUNT
66636+#include <linux/kallsyms.h>
66637+#include <linux/kdebug.h>
66638+#endif
66639
66640 #include <asm/uaccess.h>
66641 #include <asm/mmu_context.h>
66642 #include <asm/tlb.h>
66643 #include "internal.h"
66644
66645+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
66646+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
66647+#endif
66648+
66649+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
66650+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
66651+EXPORT_SYMBOL(pax_set_initial_flags_func);
66652+#endif
66653+
66654 int core_uses_pid;
66655 char core_pattern[CORENAME_MAX_SIZE] = "core";
66656 unsigned int core_pipe_limit;
66657@@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
66658 int write)
66659 {
66660 struct page *page;
66661- int ret;
66662
66663-#ifdef CONFIG_STACK_GROWSUP
66664- if (write) {
66665- ret = expand_stack_downwards(bprm->vma, pos);
66666- if (ret < 0)
66667- return NULL;
66668- }
66669-#endif
66670- ret = get_user_pages(current, bprm->mm, pos,
66671- 1, write, 1, &page, NULL);
66672- if (ret <= 0)
66673+ if (0 > expand_stack_downwards(bprm->vma, pos))
66674+ return NULL;
66675+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
66676 return NULL;
66677
66678 if (write) {
66679@@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
66680 if (size <= ARG_MAX)
66681 return page;
66682
66683+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66684+ // only allow 512KB for argv+env on suid/sgid binaries
66685+ // to prevent easy ASLR exhaustion
66686+ if (((bprm->cred->euid != current_euid()) ||
66687+ (bprm->cred->egid != current_egid())) &&
66688+ (size > (512 * 1024))) {
66689+ put_page(page);
66690+ return NULL;
66691+ }
66692+#endif
66693+
66694 /*
66695 * Limit to 1/4-th the stack size for the argv+env strings.
66696 * This ensures that:
66697@@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
66698 vma->vm_end = STACK_TOP_MAX;
66699 vma->vm_start = vma->vm_end - PAGE_SIZE;
66700 vma->vm_flags = VM_STACK_FLAGS;
66701+
66702+#ifdef CONFIG_PAX_SEGMEXEC
66703+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
66704+#endif
66705+
66706 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
66707
66708 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
66709@@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
66710 mm->stack_vm = mm->total_vm = 1;
66711 up_write(&mm->mmap_sem);
66712 bprm->p = vma->vm_end - sizeof(void *);
66713+
66714+#ifdef CONFIG_PAX_RANDUSTACK
66715+ if (randomize_va_space)
66716+ bprm->p ^= random32() & ~PAGE_MASK;
66717+#endif
66718+
66719 return 0;
66720 err:
66721 up_write(&mm->mmap_sem);
66722@@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
66723 int r;
66724 mm_segment_t oldfs = get_fs();
66725 set_fs(KERNEL_DS);
66726- r = copy_strings(argc, (char __user * __user *)argv, bprm);
66727+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
66728 set_fs(oldfs);
66729 return r;
66730 }
66731@@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
66732 unsigned long new_end = old_end - shift;
66733 struct mmu_gather *tlb;
66734
66735- BUG_ON(new_start > new_end);
66736+ if (new_start >= new_end || new_start < mmap_min_addr)
66737+ return -ENOMEM;
66738
66739 /*
66740 * ensure there are no vmas between where we want to go
66741@@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
66742 if (vma != find_vma(mm, new_start))
66743 return -EFAULT;
66744
66745+#ifdef CONFIG_PAX_SEGMEXEC
66746+ BUG_ON(pax_find_mirror_vma(vma));
66747+#endif
66748+
66749 /*
66750 * cover the whole range: [new_start, old_end)
66751 */
66752@@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
66753 stack_top = arch_align_stack(stack_top);
66754 stack_top = PAGE_ALIGN(stack_top);
66755
66756- if (unlikely(stack_top < mmap_min_addr) ||
66757- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
66758- return -ENOMEM;
66759-
66760 stack_shift = vma->vm_end - stack_top;
66761
66762 bprm->p -= stack_shift;
66763@@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
66764 bprm->exec -= stack_shift;
66765
66766 down_write(&mm->mmap_sem);
66767+
66768+ /* Move stack pages down in memory. */
66769+ if (stack_shift) {
66770+ ret = shift_arg_pages(vma, stack_shift);
66771+ if (ret)
66772+ goto out_unlock;
66773+ }
66774+
66775 vm_flags = VM_STACK_FLAGS;
66776
66777 /*
66778@@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
66779 vm_flags &= ~VM_EXEC;
66780 vm_flags |= mm->def_flags;
66781
66782+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
66783+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66784+ vm_flags &= ~VM_EXEC;
66785+
66786+#ifdef CONFIG_PAX_MPROTECT
66787+ if (mm->pax_flags & MF_PAX_MPROTECT)
66788+ vm_flags &= ~VM_MAYEXEC;
66789+#endif
66790+
66791+ }
66792+#endif
66793+
66794 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
66795 vm_flags);
66796 if (ret)
66797 goto out_unlock;
66798 BUG_ON(prev != vma);
66799
66800- /* Move stack pages down in memory. */
66801- if (stack_shift) {
66802- ret = shift_arg_pages(vma, stack_shift);
66803- if (ret)
66804- goto out_unlock;
66805- }
66806-
66807 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
66808 stack_size = vma->vm_end - vma->vm_start;
66809 /*
66810@@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset,
66811 old_fs = get_fs();
66812 set_fs(get_ds());
66813 /* The cast to a user pointer is valid due to the set_fs() */
66814- result = vfs_read(file, (void __user *)addr, count, &pos);
66815+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
66816 set_fs(old_fs);
66817 return result;
66818 }
66819@@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
66820 perf_event_comm(tsk);
66821 }
66822
66823+static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
66824+{
66825+ int i, ch;
66826+
66827+ /* Copies the binary name from after last slash */
66828+ for (i = 0; (ch = *(fn++)) != '\0';) {
66829+ if (ch == '/')
66830+ i = 0; /* overwrite what we wrote */
66831+ else
66832+ if (i < len - 1)
66833+ tcomm[i++] = ch;
66834+ }
66835+ tcomm[i] = '\0';
66836+}
66837+
66838 int flush_old_exec(struct linux_binprm * bprm)
66839 {
66840 int retval;
66841@@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm)
66842
66843 set_mm_exe_file(bprm->mm, bprm->file);
66844
66845+ filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
66846 /*
66847 * Release all of the old mmap stuff
66848 */
66849@@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec);
66850
66851 void setup_new_exec(struct linux_binprm * bprm)
66852 {
66853- int i, ch;
66854- char * name;
66855- char tcomm[sizeof(current->comm)];
66856-
66857 arch_pick_mmap_layout(current->mm);
66858
66859 /* This is the point of no return */
66860@@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm)
66861 else
66862 set_dumpable(current->mm, suid_dumpable);
66863
66864- name = bprm->filename;
66865-
66866- /* Copies the binary name from after last slash */
66867- for (i=0; (ch = *(name++)) != '\0';) {
66868- if (ch == '/')
66869- i = 0; /* overwrite what we wrote */
66870- else
66871- if (i < (sizeof(tcomm) - 1))
66872- tcomm[i++] = ch;
66873- }
66874- tcomm[i] = '\0';
66875- set_task_comm(current, tcomm);
66876+ set_task_comm(current, bprm->tcomm);
66877
66878 /* Set the new mm task size. We have to do that late because it may
66879 * depend on TIF_32BIT which is only updated in flush_thread() on
66880@@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
66881 }
66882 rcu_read_unlock();
66883
66884- if (p->fs->users > n_fs) {
66885+ if (atomic_read(&p->fs->users) > n_fs) {
66886 bprm->unsafe |= LSM_UNSAFE_SHARE;
66887 } else {
66888 res = -EAGAIN;
66889@@ -1339,6 +1384,21 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
66890
66891 EXPORT_SYMBOL(search_binary_handler);
66892
66893+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66894+DEFINE_PER_CPU(u64, exec_counter);
66895+static int __init init_exec_counters(void)
66896+{
66897+ unsigned int cpu;
66898+
66899+ for_each_possible_cpu(cpu) {
66900+ per_cpu(exec_counter, cpu) = (u64)cpu;
66901+ }
66902+
66903+ return 0;
66904+}
66905+early_initcall(init_exec_counters);
66906+#endif
66907+
66908 /*
66909 * sys_execve() executes a new program.
66910 */
66911@@ -1347,11 +1407,35 @@ int do_execve(char * filename,
66912 char __user *__user *envp,
66913 struct pt_regs * regs)
66914 {
66915+#ifdef CONFIG_GRKERNSEC
66916+ struct file *old_exec_file;
66917+ struct acl_subject_label *old_acl;
66918+ struct rlimit old_rlim[RLIM_NLIMITS];
66919+#endif
66920 struct linux_binprm *bprm;
66921 struct file *file;
66922 struct files_struct *displaced;
66923 bool clear_in_exec;
66924 int retval;
66925+ const struct cred *cred = current_cred();
66926+
66927+ /*
66928+ * We move the actual failure in case of RLIMIT_NPROC excess from
66929+ * set*uid() to execve() because too many poorly written programs
66930+ * don't check setuid() return code. Here we additionally recheck
66931+ * whether NPROC limit is still exceeded.
66932+ */
66933+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
66934+
66935+ if ((current->flags & PF_NPROC_EXCEEDED) &&
66936+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
66937+ retval = -EAGAIN;
66938+ goto out_ret;
66939+ }
66940+
66941+ /* We're below the limit (still or again), so we don't want to make
66942+ * further execve() calls fail. */
66943+ current->flags &= ~PF_NPROC_EXCEEDED;
66944
66945 retval = unshare_files(&displaced);
66946 if (retval)
66947@@ -1377,12 +1461,27 @@ int do_execve(char * filename,
66948 if (IS_ERR(file))
66949 goto out_unmark;
66950
66951+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
66952+ retval = -EPERM;
66953+ goto out_file;
66954+ }
66955+
66956 sched_exec();
66957
66958 bprm->file = file;
66959 bprm->filename = filename;
66960 bprm->interp = filename;
66961
66962+ if (gr_process_user_ban()) {
66963+ retval = -EPERM;
66964+ goto out_file;
66965+ }
66966+
66967+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
66968+ retval = -EACCES;
66969+ goto out_file;
66970+ }
66971+
66972 retval = bprm_mm_init(bprm);
66973 if (retval)
66974 goto out_file;
66975@@ -1399,25 +1498,66 @@ int do_execve(char * filename,
66976 if (retval < 0)
66977 goto out;
66978
66979+#ifdef CONFIG_GRKERNSEC
66980+ old_acl = current->acl;
66981+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
66982+ old_exec_file = current->exec_file;
66983+ get_file(file);
66984+ current->exec_file = file;
66985+#endif
66986+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66987+ /* limit suid stack to 8MB
66988+ we saved the old limits above and will restore them if this exec fails
66989+ */
66990+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
66991+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
66992+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
66993+#endif
66994+
66995+ if (!gr_tpe_allow(file)) {
66996+ retval = -EACCES;
66997+ goto out_fail;
66998+ }
66999+
67000+ if (gr_check_crash_exec(file)) {
67001+ retval = -EACCES;
67002+ goto out_fail;
67003+ }
67004+
67005+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
67006+ bprm->unsafe);
67007+ if (retval < 0)
67008+ goto out_fail;
67009+
67010 retval = copy_strings_kernel(1, &bprm->filename, bprm);
67011 if (retval < 0)
67012- goto out;
67013+ goto out_fail;
67014
67015 bprm->exec = bprm->p;
67016 retval = copy_strings(bprm->envc, envp, bprm);
67017 if (retval < 0)
67018- goto out;
67019+ goto out_fail;
67020
67021 retval = copy_strings(bprm->argc, argv, bprm);
67022 if (retval < 0)
67023- goto out;
67024+ goto out_fail;
67025+
67026+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
67027+
67028+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
67029
67030 current->flags &= ~PF_KTHREAD;
67031 retval = search_binary_handler(bprm,regs);
67032 if (retval < 0)
67033- goto out;
67034+ goto out_fail;
67035+#ifdef CONFIG_GRKERNSEC
67036+ if (old_exec_file)
67037+ fput(old_exec_file);
67038+#endif
67039
67040 /* execve succeeded */
67041+
67042+ increment_exec_counter();
67043 current->fs->in_exec = 0;
67044 current->in_execve = 0;
67045 acct_update_integrals(current);
67046@@ -1426,6 +1566,14 @@ int do_execve(char * filename,
67047 put_files_struct(displaced);
67048 return retval;
67049
67050+out_fail:
67051+#ifdef CONFIG_GRKERNSEC
67052+ current->acl = old_acl;
67053+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
67054+ fput(current->exec_file);
67055+ current->exec_file = old_exec_file;
67056+#endif
67057+
67058 out:
67059 if (bprm->mm) {
67060 acct_arg_size(bprm, 0);
67061@@ -1591,6 +1739,219 @@ out:
67062 return ispipe;
67063 }
67064
67065+int pax_check_flags(unsigned long *flags)
67066+{
67067+ int retval = 0;
67068+
67069+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
67070+ if (*flags & MF_PAX_SEGMEXEC)
67071+ {
67072+ *flags &= ~MF_PAX_SEGMEXEC;
67073+ retval = -EINVAL;
67074+ }
67075+#endif
67076+
67077+ if ((*flags & MF_PAX_PAGEEXEC)
67078+
67079+#ifdef CONFIG_PAX_PAGEEXEC
67080+ && (*flags & MF_PAX_SEGMEXEC)
67081+#endif
67082+
67083+ )
67084+ {
67085+ *flags &= ~MF_PAX_PAGEEXEC;
67086+ retval = -EINVAL;
67087+ }
67088+
67089+ if ((*flags & MF_PAX_MPROTECT)
67090+
67091+#ifdef CONFIG_PAX_MPROTECT
67092+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
67093+#endif
67094+
67095+ )
67096+ {
67097+ *flags &= ~MF_PAX_MPROTECT;
67098+ retval = -EINVAL;
67099+ }
67100+
67101+ if ((*flags & MF_PAX_EMUTRAMP)
67102+
67103+#ifdef CONFIG_PAX_EMUTRAMP
67104+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
67105+#endif
67106+
67107+ )
67108+ {
67109+ *flags &= ~MF_PAX_EMUTRAMP;
67110+ retval = -EINVAL;
67111+ }
67112+
67113+ return retval;
67114+}
67115+
67116+EXPORT_SYMBOL(pax_check_flags);
67117+
67118+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
67119+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
67120+{
67121+ struct task_struct *tsk = current;
67122+ struct mm_struct *mm = current->mm;
67123+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
67124+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
67125+ char *path_exec = NULL;
67126+ char *path_fault = NULL;
67127+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
67128+
67129+ if (buffer_exec && buffer_fault) {
67130+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
67131+
67132+ down_read(&mm->mmap_sem);
67133+ vma = mm->mmap;
67134+ while (vma && (!vma_exec || !vma_fault)) {
67135+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
67136+ vma_exec = vma;
67137+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
67138+ vma_fault = vma;
67139+ vma = vma->vm_next;
67140+ }
67141+ if (vma_exec) {
67142+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
67143+ if (IS_ERR(path_exec))
67144+ path_exec = "<path too long>";
67145+ else {
67146+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
67147+ if (path_exec) {
67148+ *path_exec = 0;
67149+ path_exec = buffer_exec;
67150+ } else
67151+ path_exec = "<path too long>";
67152+ }
67153+ }
67154+ if (vma_fault) {
67155+ start = vma_fault->vm_start;
67156+ end = vma_fault->vm_end;
67157+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
67158+ if (vma_fault->vm_file) {
67159+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
67160+ if (IS_ERR(path_fault))
67161+ path_fault = "<path too long>";
67162+ else {
67163+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
67164+ if (path_fault) {
67165+ *path_fault = 0;
67166+ path_fault = buffer_fault;
67167+ } else
67168+ path_fault = "<path too long>";
67169+ }
67170+ } else
67171+ path_fault = "<anonymous mapping>";
67172+ }
67173+ up_read(&mm->mmap_sem);
67174+ }
67175+ if (tsk->signal->curr_ip)
67176+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
67177+ else
67178+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
67179+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
67180+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
67181+ task_uid(tsk), task_euid(tsk), pc, sp);
67182+ free_page((unsigned long)buffer_exec);
67183+ free_page((unsigned long)buffer_fault);
67184+ pax_report_insns(regs, pc, sp);
67185+ do_coredump(SIGKILL, SIGKILL, regs);
67186+}
67187+#endif
67188+
67189+#ifdef CONFIG_PAX_REFCOUNT
67190+void pax_report_refcount_overflow(struct pt_regs *regs)
67191+{
67192+ if (current->signal->curr_ip)
67193+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
67194+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
67195+ else
67196+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
67197+ current->comm, task_pid_nr(current), current_uid(), current_euid());
67198+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
67199+ show_regs(regs);
67200+ force_sig_specific(SIGKILL, current);
67201+}
67202+#endif
67203+
67204+#ifdef CONFIG_PAX_USERCOPY
67205+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
67206+int object_is_on_stack(const void *obj, unsigned long len)
67207+{
67208+ const void * const stack = task_stack_page(current);
67209+ const void * const stackend = stack + THREAD_SIZE;
67210+
67211+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
67212+ const void *frame = NULL;
67213+ const void *oldframe;
67214+#endif
67215+
67216+ if (obj + len < obj)
67217+ return -1;
67218+
67219+ if (obj + len <= stack || stackend <= obj)
67220+ return 0;
67221+
67222+ if (obj < stack || stackend < obj + len)
67223+ return -1;
67224+
67225+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
67226+ oldframe = __builtin_frame_address(1);
67227+ if (oldframe)
67228+ frame = __builtin_frame_address(2);
67229+ /*
67230+ low ----------------------------------------------> high
67231+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
67232+ ^----------------^
67233+ allow copies only within here
67234+ */
67235+ while (stack <= frame && frame < stackend) {
67236+ /* if obj + len extends past the last frame, this
67237+ check won't pass and the next frame will be 0,
67238+ causing us to bail out and correctly report
67239+ the copy as invalid
67240+ */
67241+ if (obj + len <= frame)
67242+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
67243+ oldframe = frame;
67244+ frame = *(const void * const *)frame;
67245+ }
67246+ return -1;
67247+#else
67248+ return 1;
67249+#endif
67250+}
67251+
67252+__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
67253+{
67254+ if (current->signal->curr_ip)
67255+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
67256+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
67257+ else
67258+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
67259+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
67260+
67261+ dump_stack();
67262+ gr_handle_kernel_exploit();
67263+ do_group_exit(SIGKILL);
67264+}
67265+#endif
67266+
67267+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
67268+void pax_track_stack(void)
67269+{
67270+ unsigned long sp = (unsigned long)&sp;
67271+ if (sp < current_thread_info()->lowest_stack &&
67272+ sp > (unsigned long)task_stack_page(current))
67273+ current_thread_info()->lowest_stack = sp;
67274+}
67275+EXPORT_SYMBOL(pax_track_stack);
67276+#endif
67277+
67278 static int zap_process(struct task_struct *start)
67279 {
67280 struct task_struct *t;
67281@@ -1793,17 +2154,17 @@ static void wait_for_dump_helpers(struct file *file)
67282 pipe = file->f_path.dentry->d_inode->i_pipe;
67283
67284 pipe_lock(pipe);
67285- pipe->readers++;
67286- pipe->writers--;
67287+ atomic_inc(&pipe->readers);
67288+ atomic_dec(&pipe->writers);
67289
67290- while ((pipe->readers > 1) && (!signal_pending(current))) {
67291+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
67292 wake_up_interruptible_sync(&pipe->wait);
67293 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
67294 pipe_wait(pipe);
67295 }
67296
67297- pipe->readers--;
67298- pipe->writers++;
67299+ atomic_dec(&pipe->readers);
67300+ atomic_inc(&pipe->writers);
67301 pipe_unlock(pipe);
67302
67303 }
67304@@ -1826,10 +2187,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67305 char **helper_argv = NULL;
67306 int helper_argc = 0;
67307 int dump_count = 0;
67308- static atomic_t core_dump_count = ATOMIC_INIT(0);
67309+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
67310
67311 audit_core_dumps(signr);
67312
67313+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
67314+ gr_handle_brute_attach(current, mm->flags);
67315+
67316 binfmt = mm->binfmt;
67317 if (!binfmt || !binfmt->core_dump)
67318 goto fail;
67319@@ -1874,6 +2238,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67320 */
67321 clear_thread_flag(TIF_SIGPENDING);
67322
67323+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
67324+
67325 /*
67326 * lock_kernel() because format_corename() is controlled by sysctl, which
67327 * uses lock_kernel()
67328@@ -1908,7 +2274,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
67329 goto fail_unlock;
67330 }
67331
67332- dump_count = atomic_inc_return(&core_dump_count);
67333+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
67334 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
67335 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
67336 task_tgid_vnr(current), current->comm);
67337@@ -1972,7 +2338,7 @@ close_fail:
67338 filp_close(file, NULL);
67339 fail_dropcount:
67340 if (dump_count)
67341- atomic_dec(&core_dump_count);
67342+ atomic_dec_unchecked(&core_dump_count);
67343 fail_unlock:
67344 if (helper_argv)
67345 argv_free(helper_argv);
67346diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
67347index 7f8d2e5..a1abdbb 100644
67348--- a/fs/ext2/balloc.c
67349+++ b/fs/ext2/balloc.c
67350@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
67351
67352 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
67353 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
67354- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
67355+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
67356 sbi->s_resuid != current_fsuid() &&
67357 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
67358 return 0;
67359diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
67360index 27967f9..9f2a5fb 100644
67361--- a/fs/ext3/balloc.c
67362+++ b/fs/ext3/balloc.c
67363@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
67364
67365 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
67366 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
67367- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
67368+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
67369 sbi->s_resuid != current_fsuid() &&
67370 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
67371 return 0;
67372diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
67373index e85b63c..80398e6 100644
67374--- a/fs/ext4/balloc.c
67375+++ b/fs/ext4/balloc.c
67376@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
67377 /* Hm, nope. Are (enough) root reserved blocks available? */
67378 if (sbi->s_resuid == current_fsuid() ||
67379 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
67380- capable(CAP_SYS_RESOURCE)) {
67381+ capable_nolog(CAP_SYS_RESOURCE)) {
67382 if (free_blocks >= (nblocks + dirty_blocks))
67383 return 1;
67384 }
67385diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
67386index 67c46ed..1f237e5 100644
67387--- a/fs/ext4/ext4.h
67388+++ b/fs/ext4/ext4.h
67389@@ -1077,19 +1077,19 @@ struct ext4_sb_info {
67390
67391 /* stats for buddy allocator */
67392 spinlock_t s_mb_pa_lock;
67393- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
67394- atomic_t s_bal_success; /* we found long enough chunks */
67395- atomic_t s_bal_allocated; /* in blocks */
67396- atomic_t s_bal_ex_scanned; /* total extents scanned */
67397- atomic_t s_bal_goals; /* goal hits */
67398- atomic_t s_bal_breaks; /* too long searches */
67399- atomic_t s_bal_2orders; /* 2^order hits */
67400+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
67401+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
67402+ atomic_unchecked_t s_bal_allocated; /* in blocks */
67403+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
67404+ atomic_unchecked_t s_bal_goals; /* goal hits */
67405+ atomic_unchecked_t s_bal_breaks; /* too long searches */
67406+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
67407 spinlock_t s_bal_lock;
67408 unsigned long s_mb_buddies_generated;
67409 unsigned long long s_mb_generation_time;
67410- atomic_t s_mb_lost_chunks;
67411- atomic_t s_mb_preallocated;
67412- atomic_t s_mb_discarded;
67413+ atomic_unchecked_t s_mb_lost_chunks;
67414+ atomic_unchecked_t s_mb_preallocated;
67415+ atomic_unchecked_t s_mb_discarded;
67416 atomic_t s_lock_busy;
67417
67418 /* locality groups */
67419diff --git a/fs/ext4/file.c b/fs/ext4/file.c
67420index 2a60541..7439d61 100644
67421--- a/fs/ext4/file.c
67422+++ b/fs/ext4/file.c
67423@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
67424 cp = d_path(&path, buf, sizeof(buf));
67425 path_put(&path);
67426 if (!IS_ERR(cp)) {
67427- memcpy(sbi->s_es->s_last_mounted, cp,
67428- sizeof(sbi->s_es->s_last_mounted));
67429+ strlcpy(sbi->s_es->s_last_mounted, cp,
67430+ sizeof(sbi->s_es->s_last_mounted));
67431 sb->s_dirt = 1;
67432 }
67433 }
67434diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
67435index 42bac1b..0aab9d8 100644
67436--- a/fs/ext4/mballoc.c
67437+++ b/fs/ext4/mballoc.c
67438@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
67439 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
67440
67441 if (EXT4_SB(sb)->s_mb_stats)
67442- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
67443+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
67444
67445 break;
67446 }
67447@@ -2131,7 +2131,7 @@ repeat:
67448 ac->ac_status = AC_STATUS_CONTINUE;
67449 ac->ac_flags |= EXT4_MB_HINT_FIRST;
67450 cr = 3;
67451- atomic_inc(&sbi->s_mb_lost_chunks);
67452+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
67453 goto repeat;
67454 }
67455 }
67456@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
67457 ext4_grpblk_t counters[16];
67458 } sg;
67459
67460+ pax_track_stack();
67461+
67462 group--;
67463 if (group == 0)
67464 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
67465@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
67466 if (sbi->s_mb_stats) {
67467 printk(KERN_INFO
67468 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
67469- atomic_read(&sbi->s_bal_allocated),
67470- atomic_read(&sbi->s_bal_reqs),
67471- atomic_read(&sbi->s_bal_success));
67472+ atomic_read_unchecked(&sbi->s_bal_allocated),
67473+ atomic_read_unchecked(&sbi->s_bal_reqs),
67474+ atomic_read_unchecked(&sbi->s_bal_success));
67475 printk(KERN_INFO
67476 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
67477 "%u 2^N hits, %u breaks, %u lost\n",
67478- atomic_read(&sbi->s_bal_ex_scanned),
67479- atomic_read(&sbi->s_bal_goals),
67480- atomic_read(&sbi->s_bal_2orders),
67481- atomic_read(&sbi->s_bal_breaks),
67482- atomic_read(&sbi->s_mb_lost_chunks));
67483+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
67484+ atomic_read_unchecked(&sbi->s_bal_goals),
67485+ atomic_read_unchecked(&sbi->s_bal_2orders),
67486+ atomic_read_unchecked(&sbi->s_bal_breaks),
67487+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
67488 printk(KERN_INFO
67489 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
67490 sbi->s_mb_buddies_generated++,
67491 sbi->s_mb_generation_time);
67492 printk(KERN_INFO
67493 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
67494- atomic_read(&sbi->s_mb_preallocated),
67495- atomic_read(&sbi->s_mb_discarded));
67496+ atomic_read_unchecked(&sbi->s_mb_preallocated),
67497+ atomic_read_unchecked(&sbi->s_mb_discarded));
67498 }
67499
67500 free_percpu(sbi->s_locality_groups);
67501@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
67502 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
67503
67504 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
67505- atomic_inc(&sbi->s_bal_reqs);
67506- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
67507+ atomic_inc_unchecked(&sbi->s_bal_reqs);
67508+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
67509 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
67510- atomic_inc(&sbi->s_bal_success);
67511- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
67512+ atomic_inc_unchecked(&sbi->s_bal_success);
67513+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
67514 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
67515 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
67516- atomic_inc(&sbi->s_bal_goals);
67517+ atomic_inc_unchecked(&sbi->s_bal_goals);
67518 if (ac->ac_found > sbi->s_mb_max_to_scan)
67519- atomic_inc(&sbi->s_bal_breaks);
67520+ atomic_inc_unchecked(&sbi->s_bal_breaks);
67521 }
67522
67523 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
67524@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
67525 trace_ext4_mb_new_inode_pa(ac, pa);
67526
67527 ext4_mb_use_inode_pa(ac, pa);
67528- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
67529+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
67530
67531 ei = EXT4_I(ac->ac_inode);
67532 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
67533@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
67534 trace_ext4_mb_new_group_pa(ac, pa);
67535
67536 ext4_mb_use_group_pa(ac, pa);
67537- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
67538+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
67539
67540 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
67541 lg = ac->ac_lg;
67542@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
67543 * from the bitmap and continue.
67544 */
67545 }
67546- atomic_add(free, &sbi->s_mb_discarded);
67547+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
67548
67549 return err;
67550 }
67551@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
67552 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
67553 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
67554 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
67555- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
67556+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
67557
67558 if (ac) {
67559 ac->ac_sb = sb;
67560diff --git a/fs/ext4/super.c b/fs/ext4/super.c
67561index f1e7077..edd86b2 100644
67562--- a/fs/ext4/super.c
67563+++ b/fs/ext4/super.c
67564@@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
67565 }
67566
67567
67568-static struct sysfs_ops ext4_attr_ops = {
67569+static const struct sysfs_ops ext4_attr_ops = {
67570 .show = ext4_attr_show,
67571 .store = ext4_attr_store,
67572 };
67573diff --git a/fs/fcntl.c b/fs/fcntl.c
67574index 97e01dc..e9aab2d 100644
67575--- a/fs/fcntl.c
67576+++ b/fs/fcntl.c
67577@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
67578 if (err)
67579 return err;
67580
67581+ if (gr_handle_chroot_fowner(pid, type))
67582+ return -ENOENT;
67583+ if (gr_check_protected_task_fowner(pid, type))
67584+ return -EACCES;
67585+
67586 f_modown(filp, pid, type, force);
67587 return 0;
67588 }
67589@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
67590
67591 static int f_setown_ex(struct file *filp, unsigned long arg)
67592 {
67593- struct f_owner_ex * __user owner_p = (void * __user)arg;
67594+ struct f_owner_ex __user *owner_p = (void __user *)arg;
67595 struct f_owner_ex owner;
67596 struct pid *pid;
67597 int type;
67598@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
67599
67600 static int f_getown_ex(struct file *filp, unsigned long arg)
67601 {
67602- struct f_owner_ex * __user owner_p = (void * __user)arg;
67603+ struct f_owner_ex __user *owner_p = (void __user *)arg;
67604 struct f_owner_ex owner;
67605 int ret = 0;
67606
67607@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
67608 switch (cmd) {
67609 case F_DUPFD:
67610 case F_DUPFD_CLOEXEC:
67611+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
67612 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
67613 break;
67614 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
67615diff --git a/fs/fifo.c b/fs/fifo.c
67616index f8f97b8..b1f2259 100644
67617--- a/fs/fifo.c
67618+++ b/fs/fifo.c
67619@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
67620 */
67621 filp->f_op = &read_pipefifo_fops;
67622 pipe->r_counter++;
67623- if (pipe->readers++ == 0)
67624+ if (atomic_inc_return(&pipe->readers) == 1)
67625 wake_up_partner(inode);
67626
67627- if (!pipe->writers) {
67628+ if (!atomic_read(&pipe->writers)) {
67629 if ((filp->f_flags & O_NONBLOCK)) {
67630 /* suppress POLLHUP until we have
67631 * seen a writer */
67632@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
67633 * errno=ENXIO when there is no process reading the FIFO.
67634 */
67635 ret = -ENXIO;
67636- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
67637+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
67638 goto err;
67639
67640 filp->f_op = &write_pipefifo_fops;
67641 pipe->w_counter++;
67642- if (!pipe->writers++)
67643+ if (atomic_inc_return(&pipe->writers) == 1)
67644 wake_up_partner(inode);
67645
67646- if (!pipe->readers) {
67647+ if (!atomic_read(&pipe->readers)) {
67648 wait_for_partner(inode, &pipe->r_counter);
67649 if (signal_pending(current))
67650 goto err_wr;
67651@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
67652 */
67653 filp->f_op = &rdwr_pipefifo_fops;
67654
67655- pipe->readers++;
67656- pipe->writers++;
67657+ atomic_inc(&pipe->readers);
67658+ atomic_inc(&pipe->writers);
67659 pipe->r_counter++;
67660 pipe->w_counter++;
67661- if (pipe->readers == 1 || pipe->writers == 1)
67662+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
67663 wake_up_partner(inode);
67664 break;
67665
67666@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
67667 return 0;
67668
67669 err_rd:
67670- if (!--pipe->readers)
67671+ if (atomic_dec_and_test(&pipe->readers))
67672 wake_up_interruptible(&pipe->wait);
67673 ret = -ERESTARTSYS;
67674 goto err;
67675
67676 err_wr:
67677- if (!--pipe->writers)
67678+ if (atomic_dec_and_test(&pipe->writers))
67679 wake_up_interruptible(&pipe->wait);
67680 ret = -ERESTARTSYS;
67681 goto err;
67682
67683 err:
67684- if (!pipe->readers && !pipe->writers)
67685+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
67686 free_pipe_info(inode);
67687
67688 err_nocleanup:
67689diff --git a/fs/file.c b/fs/file.c
67690index 87e1290..a930cc4 100644
67691--- a/fs/file.c
67692+++ b/fs/file.c
67693@@ -14,6 +14,7 @@
67694 #include <linux/slab.h>
67695 #include <linux/vmalloc.h>
67696 #include <linux/file.h>
67697+#include <linux/security.h>
67698 #include <linux/fdtable.h>
67699 #include <linux/bitops.h>
67700 #include <linux/interrupt.h>
67701@@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
67702 * N.B. For clone tasks sharing a files structure, this test
67703 * will limit the total number of files that can be opened.
67704 */
67705+
67706+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
67707 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
67708 return -EMFILE;
67709
67710diff --git a/fs/filesystems.c b/fs/filesystems.c
67711index a24c58e..53f91ee 100644
67712--- a/fs/filesystems.c
67713+++ b/fs/filesystems.c
67714@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
67715 int len = dot ? dot - name : strlen(name);
67716
67717 fs = __get_fs_type(name, len);
67718+
67719+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67720+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
67721+#else
67722 if (!fs && (request_module("%.*s", len, name) == 0))
67723+#endif
67724 fs = __get_fs_type(name, len);
67725
67726 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
67727diff --git a/fs/fs_struct.c b/fs/fs_struct.c
67728index eee0590..1181166 100644
67729--- a/fs/fs_struct.c
67730+++ b/fs/fs_struct.c
67731@@ -4,6 +4,7 @@
67732 #include <linux/path.h>
67733 #include <linux/slab.h>
67734 #include <linux/fs_struct.h>
67735+#include <linux/grsecurity.h>
67736
67737 /*
67738 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
67739@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
67740 old_root = fs->root;
67741 fs->root = *path;
67742 path_get(path);
67743+ gr_set_chroot_entries(current, path);
67744 write_unlock(&fs->lock);
67745 if (old_root.dentry)
67746 path_put(&old_root);
67747@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
67748 && fs->root.mnt == old_root->mnt) {
67749 path_get(new_root);
67750 fs->root = *new_root;
67751+ gr_set_chroot_entries(p, new_root);
67752 count++;
67753 }
67754 if (fs->pwd.dentry == old_root->dentry
67755@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
67756 task_lock(tsk);
67757 write_lock(&fs->lock);
67758 tsk->fs = NULL;
67759- kill = !--fs->users;
67760+ gr_clear_chroot_entries(tsk);
67761+ kill = !atomic_dec_return(&fs->users);
67762 write_unlock(&fs->lock);
67763 task_unlock(tsk);
67764 if (kill)
67765@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
67766 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
67767 /* We don't need to lock fs - think why ;-) */
67768 if (fs) {
67769- fs->users = 1;
67770+ atomic_set(&fs->users, 1);
67771 fs->in_exec = 0;
67772 rwlock_init(&fs->lock);
67773 fs->umask = old->umask;
67774@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
67775
67776 task_lock(current);
67777 write_lock(&fs->lock);
67778- kill = !--fs->users;
67779+ kill = !atomic_dec_return(&fs->users);
67780 current->fs = new_fs;
67781+ gr_set_chroot_entries(current, &new_fs->root);
67782 write_unlock(&fs->lock);
67783 task_unlock(current);
67784
67785@@ -141,13 +146,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
67786
67787 int current_umask(void)
67788 {
67789- return current->fs->umask;
67790+ return current->fs->umask | gr_acl_umask();
67791 }
67792 EXPORT_SYMBOL(current_umask);
67793
67794 /* to be mentioned only in INIT_TASK */
67795 struct fs_struct init_fs = {
67796- .users = 1,
67797+ .users = ATOMIC_INIT(1),
67798 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
67799 .umask = 0022,
67800 };
67801@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
67802 task_lock(current);
67803
67804 write_lock(&init_fs.lock);
67805- init_fs.users++;
67806+ atomic_inc(&init_fs.users);
67807 write_unlock(&init_fs.lock);
67808
67809 write_lock(&fs->lock);
67810 current->fs = &init_fs;
67811- kill = !--fs->users;
67812+ gr_set_chroot_entries(current, &current->fs->root);
67813+ kill = !atomic_dec_return(&fs->users);
67814 write_unlock(&fs->lock);
67815
67816 task_unlock(current);
67817diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
67818index 9905350..02eaec4 100644
67819--- a/fs/fscache/cookie.c
67820+++ b/fs/fscache/cookie.c
67821@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
67822 parent ? (char *) parent->def->name : "<no-parent>",
67823 def->name, netfs_data);
67824
67825- fscache_stat(&fscache_n_acquires);
67826+ fscache_stat_unchecked(&fscache_n_acquires);
67827
67828 /* if there's no parent cookie, then we don't create one here either */
67829 if (!parent) {
67830- fscache_stat(&fscache_n_acquires_null);
67831+ fscache_stat_unchecked(&fscache_n_acquires_null);
67832 _leave(" [no parent]");
67833 return NULL;
67834 }
67835@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
67836 /* allocate and initialise a cookie */
67837 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
67838 if (!cookie) {
67839- fscache_stat(&fscache_n_acquires_oom);
67840+ fscache_stat_unchecked(&fscache_n_acquires_oom);
67841 _leave(" [ENOMEM]");
67842 return NULL;
67843 }
67844@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
67845
67846 switch (cookie->def->type) {
67847 case FSCACHE_COOKIE_TYPE_INDEX:
67848- fscache_stat(&fscache_n_cookie_index);
67849+ fscache_stat_unchecked(&fscache_n_cookie_index);
67850 break;
67851 case FSCACHE_COOKIE_TYPE_DATAFILE:
67852- fscache_stat(&fscache_n_cookie_data);
67853+ fscache_stat_unchecked(&fscache_n_cookie_data);
67854 break;
67855 default:
67856- fscache_stat(&fscache_n_cookie_special);
67857+ fscache_stat_unchecked(&fscache_n_cookie_special);
67858 break;
67859 }
67860
67861@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
67862 if (fscache_acquire_non_index_cookie(cookie) < 0) {
67863 atomic_dec(&parent->n_children);
67864 __fscache_cookie_put(cookie);
67865- fscache_stat(&fscache_n_acquires_nobufs);
67866+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
67867 _leave(" = NULL");
67868 return NULL;
67869 }
67870 }
67871
67872- fscache_stat(&fscache_n_acquires_ok);
67873+ fscache_stat_unchecked(&fscache_n_acquires_ok);
67874 _leave(" = %p", cookie);
67875 return cookie;
67876 }
67877@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
67878 cache = fscache_select_cache_for_object(cookie->parent);
67879 if (!cache) {
67880 up_read(&fscache_addremove_sem);
67881- fscache_stat(&fscache_n_acquires_no_cache);
67882+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
67883 _leave(" = -ENOMEDIUM [no cache]");
67884 return -ENOMEDIUM;
67885 }
67886@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
67887 object = cache->ops->alloc_object(cache, cookie);
67888 fscache_stat_d(&fscache_n_cop_alloc_object);
67889 if (IS_ERR(object)) {
67890- fscache_stat(&fscache_n_object_no_alloc);
67891+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
67892 ret = PTR_ERR(object);
67893 goto error;
67894 }
67895
67896- fscache_stat(&fscache_n_object_alloc);
67897+ fscache_stat_unchecked(&fscache_n_object_alloc);
67898
67899 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
67900
67901@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
67902 struct fscache_object *object;
67903 struct hlist_node *_p;
67904
67905- fscache_stat(&fscache_n_updates);
67906+ fscache_stat_unchecked(&fscache_n_updates);
67907
67908 if (!cookie) {
67909- fscache_stat(&fscache_n_updates_null);
67910+ fscache_stat_unchecked(&fscache_n_updates_null);
67911 _leave(" [no cookie]");
67912 return;
67913 }
67914@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
67915 struct fscache_object *object;
67916 unsigned long event;
67917
67918- fscache_stat(&fscache_n_relinquishes);
67919+ fscache_stat_unchecked(&fscache_n_relinquishes);
67920 if (retire)
67921- fscache_stat(&fscache_n_relinquishes_retire);
67922+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
67923
67924 if (!cookie) {
67925- fscache_stat(&fscache_n_relinquishes_null);
67926+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
67927 _leave(" [no cookie]");
67928 return;
67929 }
67930@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
67931
67932 /* wait for the cookie to finish being instantiated (or to fail) */
67933 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
67934- fscache_stat(&fscache_n_relinquishes_waitcrt);
67935+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
67936 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
67937 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
67938 }
67939diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
67940index edd7434..0725e66 100644
67941--- a/fs/fscache/internal.h
67942+++ b/fs/fscache/internal.h
67943@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
67944 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
67945 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
67946
67947-extern atomic_t fscache_n_op_pend;
67948-extern atomic_t fscache_n_op_run;
67949-extern atomic_t fscache_n_op_enqueue;
67950-extern atomic_t fscache_n_op_deferred_release;
67951-extern atomic_t fscache_n_op_release;
67952-extern atomic_t fscache_n_op_gc;
67953-extern atomic_t fscache_n_op_cancelled;
67954-extern atomic_t fscache_n_op_rejected;
67955+extern atomic_unchecked_t fscache_n_op_pend;
67956+extern atomic_unchecked_t fscache_n_op_run;
67957+extern atomic_unchecked_t fscache_n_op_enqueue;
67958+extern atomic_unchecked_t fscache_n_op_deferred_release;
67959+extern atomic_unchecked_t fscache_n_op_release;
67960+extern atomic_unchecked_t fscache_n_op_gc;
67961+extern atomic_unchecked_t fscache_n_op_cancelled;
67962+extern atomic_unchecked_t fscache_n_op_rejected;
67963
67964-extern atomic_t fscache_n_attr_changed;
67965-extern atomic_t fscache_n_attr_changed_ok;
67966-extern atomic_t fscache_n_attr_changed_nobufs;
67967-extern atomic_t fscache_n_attr_changed_nomem;
67968-extern atomic_t fscache_n_attr_changed_calls;
67969+extern atomic_unchecked_t fscache_n_attr_changed;
67970+extern atomic_unchecked_t fscache_n_attr_changed_ok;
67971+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
67972+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
67973+extern atomic_unchecked_t fscache_n_attr_changed_calls;
67974
67975-extern atomic_t fscache_n_allocs;
67976-extern atomic_t fscache_n_allocs_ok;
67977-extern atomic_t fscache_n_allocs_wait;
67978-extern atomic_t fscache_n_allocs_nobufs;
67979-extern atomic_t fscache_n_allocs_intr;
67980-extern atomic_t fscache_n_allocs_object_dead;
67981-extern atomic_t fscache_n_alloc_ops;
67982-extern atomic_t fscache_n_alloc_op_waits;
67983+extern atomic_unchecked_t fscache_n_allocs;
67984+extern atomic_unchecked_t fscache_n_allocs_ok;
67985+extern atomic_unchecked_t fscache_n_allocs_wait;
67986+extern atomic_unchecked_t fscache_n_allocs_nobufs;
67987+extern atomic_unchecked_t fscache_n_allocs_intr;
67988+extern atomic_unchecked_t fscache_n_allocs_object_dead;
67989+extern atomic_unchecked_t fscache_n_alloc_ops;
67990+extern atomic_unchecked_t fscache_n_alloc_op_waits;
67991
67992-extern atomic_t fscache_n_retrievals;
67993-extern atomic_t fscache_n_retrievals_ok;
67994-extern atomic_t fscache_n_retrievals_wait;
67995-extern atomic_t fscache_n_retrievals_nodata;
67996-extern atomic_t fscache_n_retrievals_nobufs;
67997-extern atomic_t fscache_n_retrievals_intr;
67998-extern atomic_t fscache_n_retrievals_nomem;
67999-extern atomic_t fscache_n_retrievals_object_dead;
68000-extern atomic_t fscache_n_retrieval_ops;
68001-extern atomic_t fscache_n_retrieval_op_waits;
68002+extern atomic_unchecked_t fscache_n_retrievals;
68003+extern atomic_unchecked_t fscache_n_retrievals_ok;
68004+extern atomic_unchecked_t fscache_n_retrievals_wait;
68005+extern atomic_unchecked_t fscache_n_retrievals_nodata;
68006+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
68007+extern atomic_unchecked_t fscache_n_retrievals_intr;
68008+extern atomic_unchecked_t fscache_n_retrievals_nomem;
68009+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
68010+extern atomic_unchecked_t fscache_n_retrieval_ops;
68011+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
68012
68013-extern atomic_t fscache_n_stores;
68014-extern atomic_t fscache_n_stores_ok;
68015-extern atomic_t fscache_n_stores_again;
68016-extern atomic_t fscache_n_stores_nobufs;
68017-extern atomic_t fscache_n_stores_oom;
68018-extern atomic_t fscache_n_store_ops;
68019-extern atomic_t fscache_n_store_calls;
68020-extern atomic_t fscache_n_store_pages;
68021-extern atomic_t fscache_n_store_radix_deletes;
68022-extern atomic_t fscache_n_store_pages_over_limit;
68023+extern atomic_unchecked_t fscache_n_stores;
68024+extern atomic_unchecked_t fscache_n_stores_ok;
68025+extern atomic_unchecked_t fscache_n_stores_again;
68026+extern atomic_unchecked_t fscache_n_stores_nobufs;
68027+extern atomic_unchecked_t fscache_n_stores_oom;
68028+extern atomic_unchecked_t fscache_n_store_ops;
68029+extern atomic_unchecked_t fscache_n_store_calls;
68030+extern atomic_unchecked_t fscache_n_store_pages;
68031+extern atomic_unchecked_t fscache_n_store_radix_deletes;
68032+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
68033
68034-extern atomic_t fscache_n_store_vmscan_not_storing;
68035-extern atomic_t fscache_n_store_vmscan_gone;
68036-extern atomic_t fscache_n_store_vmscan_busy;
68037-extern atomic_t fscache_n_store_vmscan_cancelled;
68038+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
68039+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
68040+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
68041+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
68042
68043-extern atomic_t fscache_n_marks;
68044-extern atomic_t fscache_n_uncaches;
68045+extern atomic_unchecked_t fscache_n_marks;
68046+extern atomic_unchecked_t fscache_n_uncaches;
68047
68048-extern atomic_t fscache_n_acquires;
68049-extern atomic_t fscache_n_acquires_null;
68050-extern atomic_t fscache_n_acquires_no_cache;
68051-extern atomic_t fscache_n_acquires_ok;
68052-extern atomic_t fscache_n_acquires_nobufs;
68053-extern atomic_t fscache_n_acquires_oom;
68054+extern atomic_unchecked_t fscache_n_acquires;
68055+extern atomic_unchecked_t fscache_n_acquires_null;
68056+extern atomic_unchecked_t fscache_n_acquires_no_cache;
68057+extern atomic_unchecked_t fscache_n_acquires_ok;
68058+extern atomic_unchecked_t fscache_n_acquires_nobufs;
68059+extern atomic_unchecked_t fscache_n_acquires_oom;
68060
68061-extern atomic_t fscache_n_updates;
68062-extern atomic_t fscache_n_updates_null;
68063-extern atomic_t fscache_n_updates_run;
68064+extern atomic_unchecked_t fscache_n_updates;
68065+extern atomic_unchecked_t fscache_n_updates_null;
68066+extern atomic_unchecked_t fscache_n_updates_run;
68067
68068-extern atomic_t fscache_n_relinquishes;
68069-extern atomic_t fscache_n_relinquishes_null;
68070-extern atomic_t fscache_n_relinquishes_waitcrt;
68071-extern atomic_t fscache_n_relinquishes_retire;
68072+extern atomic_unchecked_t fscache_n_relinquishes;
68073+extern atomic_unchecked_t fscache_n_relinquishes_null;
68074+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
68075+extern atomic_unchecked_t fscache_n_relinquishes_retire;
68076
68077-extern atomic_t fscache_n_cookie_index;
68078-extern atomic_t fscache_n_cookie_data;
68079-extern atomic_t fscache_n_cookie_special;
68080+extern atomic_unchecked_t fscache_n_cookie_index;
68081+extern atomic_unchecked_t fscache_n_cookie_data;
68082+extern atomic_unchecked_t fscache_n_cookie_special;
68083
68084-extern atomic_t fscache_n_object_alloc;
68085-extern atomic_t fscache_n_object_no_alloc;
68086-extern atomic_t fscache_n_object_lookups;
68087-extern atomic_t fscache_n_object_lookups_negative;
68088-extern atomic_t fscache_n_object_lookups_positive;
68089-extern atomic_t fscache_n_object_lookups_timed_out;
68090-extern atomic_t fscache_n_object_created;
68091-extern atomic_t fscache_n_object_avail;
68092-extern atomic_t fscache_n_object_dead;
68093+extern atomic_unchecked_t fscache_n_object_alloc;
68094+extern atomic_unchecked_t fscache_n_object_no_alloc;
68095+extern atomic_unchecked_t fscache_n_object_lookups;
68096+extern atomic_unchecked_t fscache_n_object_lookups_negative;
68097+extern atomic_unchecked_t fscache_n_object_lookups_positive;
68098+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
68099+extern atomic_unchecked_t fscache_n_object_created;
68100+extern atomic_unchecked_t fscache_n_object_avail;
68101+extern atomic_unchecked_t fscache_n_object_dead;
68102
68103-extern atomic_t fscache_n_checkaux_none;
68104-extern atomic_t fscache_n_checkaux_okay;
68105-extern atomic_t fscache_n_checkaux_update;
68106-extern atomic_t fscache_n_checkaux_obsolete;
68107+extern atomic_unchecked_t fscache_n_checkaux_none;
68108+extern atomic_unchecked_t fscache_n_checkaux_okay;
68109+extern atomic_unchecked_t fscache_n_checkaux_update;
68110+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
68111
68112 extern atomic_t fscache_n_cop_alloc_object;
68113 extern atomic_t fscache_n_cop_lookup_object;
68114@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
68115 atomic_inc(stat);
68116 }
68117
68118+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
68119+{
68120+ atomic_inc_unchecked(stat);
68121+}
68122+
68123 static inline void fscache_stat_d(atomic_t *stat)
68124 {
68125 atomic_dec(stat);
68126@@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
68127
68128 #define __fscache_stat(stat) (NULL)
68129 #define fscache_stat(stat) do {} while (0)
68130+#define fscache_stat_unchecked(stat) do {} while (0)
68131 #define fscache_stat_d(stat) do {} while (0)
68132 #endif
68133
68134diff --git a/fs/fscache/object.c b/fs/fscache/object.c
68135index e513ac5..e888d34 100644
68136--- a/fs/fscache/object.c
68137+++ b/fs/fscache/object.c
68138@@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68139 /* update the object metadata on disk */
68140 case FSCACHE_OBJECT_UPDATING:
68141 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
68142- fscache_stat(&fscache_n_updates_run);
68143+ fscache_stat_unchecked(&fscache_n_updates_run);
68144 fscache_stat(&fscache_n_cop_update_object);
68145 object->cache->ops->update_object(object);
68146 fscache_stat_d(&fscache_n_cop_update_object);
68147@@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68148 spin_lock(&object->lock);
68149 object->state = FSCACHE_OBJECT_DEAD;
68150 spin_unlock(&object->lock);
68151- fscache_stat(&fscache_n_object_dead);
68152+ fscache_stat_unchecked(&fscache_n_object_dead);
68153 goto terminal_transit;
68154
68155 /* handle the parent cache of this object being withdrawn from
68156@@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
68157 spin_lock(&object->lock);
68158 object->state = FSCACHE_OBJECT_DEAD;
68159 spin_unlock(&object->lock);
68160- fscache_stat(&fscache_n_object_dead);
68161+ fscache_stat_unchecked(&fscache_n_object_dead);
68162 goto terminal_transit;
68163
68164 /* complain about the object being woken up once it is
68165@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
68166 parent->cookie->def->name, cookie->def->name,
68167 object->cache->tag->name);
68168
68169- fscache_stat(&fscache_n_object_lookups);
68170+ fscache_stat_unchecked(&fscache_n_object_lookups);
68171 fscache_stat(&fscache_n_cop_lookup_object);
68172 ret = object->cache->ops->lookup_object(object);
68173 fscache_stat_d(&fscache_n_cop_lookup_object);
68174@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
68175 if (ret == -ETIMEDOUT) {
68176 /* probably stuck behind another object, so move this one to
68177 * the back of the queue */
68178- fscache_stat(&fscache_n_object_lookups_timed_out);
68179+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
68180 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
68181 }
68182
68183@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
68184
68185 spin_lock(&object->lock);
68186 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
68187- fscache_stat(&fscache_n_object_lookups_negative);
68188+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
68189
68190 /* transit here to allow write requests to begin stacking up
68191 * and read requests to begin returning ENODATA */
68192@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
68193 * result, in which case there may be data available */
68194 spin_lock(&object->lock);
68195 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
68196- fscache_stat(&fscache_n_object_lookups_positive);
68197+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
68198
68199 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
68200
68201@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
68202 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
68203 } else {
68204 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
68205- fscache_stat(&fscache_n_object_created);
68206+ fscache_stat_unchecked(&fscache_n_object_created);
68207
68208 object->state = FSCACHE_OBJECT_AVAILABLE;
68209 spin_unlock(&object->lock);
68210@@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
68211 fscache_enqueue_dependents(object);
68212
68213 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
68214- fscache_stat(&fscache_n_object_avail);
68215+ fscache_stat_unchecked(&fscache_n_object_avail);
68216
68217 _leave("");
68218 }
68219@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
68220 enum fscache_checkaux result;
68221
68222 if (!object->cookie->def->check_aux) {
68223- fscache_stat(&fscache_n_checkaux_none);
68224+ fscache_stat_unchecked(&fscache_n_checkaux_none);
68225 return FSCACHE_CHECKAUX_OKAY;
68226 }
68227
68228@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
68229 switch (result) {
68230 /* entry okay as is */
68231 case FSCACHE_CHECKAUX_OKAY:
68232- fscache_stat(&fscache_n_checkaux_okay);
68233+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
68234 break;
68235
68236 /* entry requires update */
68237 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
68238- fscache_stat(&fscache_n_checkaux_update);
68239+ fscache_stat_unchecked(&fscache_n_checkaux_update);
68240 break;
68241
68242 /* entry requires deletion */
68243 case FSCACHE_CHECKAUX_OBSOLETE:
68244- fscache_stat(&fscache_n_checkaux_obsolete);
68245+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
68246 break;
68247
68248 default:
68249diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
68250index 313e79a..775240f 100644
68251--- a/fs/fscache/operation.c
68252+++ b/fs/fscache/operation.c
68253@@ -16,7 +16,7 @@
68254 #include <linux/seq_file.h>
68255 #include "internal.h"
68256
68257-atomic_t fscache_op_debug_id;
68258+atomic_unchecked_t fscache_op_debug_id;
68259 EXPORT_SYMBOL(fscache_op_debug_id);
68260
68261 /**
68262@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
68263 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
68264 ASSERTCMP(atomic_read(&op->usage), >, 0);
68265
68266- fscache_stat(&fscache_n_op_enqueue);
68267+ fscache_stat_unchecked(&fscache_n_op_enqueue);
68268 switch (op->flags & FSCACHE_OP_TYPE) {
68269 case FSCACHE_OP_FAST:
68270 _debug("queue fast");
68271@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
68272 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
68273 if (op->processor)
68274 fscache_enqueue_operation(op);
68275- fscache_stat(&fscache_n_op_run);
68276+ fscache_stat_unchecked(&fscache_n_op_run);
68277 }
68278
68279 /*
68280@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
68281 if (object->n_ops > 0) {
68282 atomic_inc(&op->usage);
68283 list_add_tail(&op->pend_link, &object->pending_ops);
68284- fscache_stat(&fscache_n_op_pend);
68285+ fscache_stat_unchecked(&fscache_n_op_pend);
68286 } else if (!list_empty(&object->pending_ops)) {
68287 atomic_inc(&op->usage);
68288 list_add_tail(&op->pend_link, &object->pending_ops);
68289- fscache_stat(&fscache_n_op_pend);
68290+ fscache_stat_unchecked(&fscache_n_op_pend);
68291 fscache_start_operations(object);
68292 } else {
68293 ASSERTCMP(object->n_in_progress, ==, 0);
68294@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
68295 object->n_exclusive++; /* reads and writes must wait */
68296 atomic_inc(&op->usage);
68297 list_add_tail(&op->pend_link, &object->pending_ops);
68298- fscache_stat(&fscache_n_op_pend);
68299+ fscache_stat_unchecked(&fscache_n_op_pend);
68300 ret = 0;
68301 } else {
68302 /* not allowed to submit ops in any other state */
68303@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
68304 if (object->n_exclusive > 0) {
68305 atomic_inc(&op->usage);
68306 list_add_tail(&op->pend_link, &object->pending_ops);
68307- fscache_stat(&fscache_n_op_pend);
68308+ fscache_stat_unchecked(&fscache_n_op_pend);
68309 } else if (!list_empty(&object->pending_ops)) {
68310 atomic_inc(&op->usage);
68311 list_add_tail(&op->pend_link, &object->pending_ops);
68312- fscache_stat(&fscache_n_op_pend);
68313+ fscache_stat_unchecked(&fscache_n_op_pend);
68314 fscache_start_operations(object);
68315 } else {
68316 ASSERTCMP(object->n_exclusive, ==, 0);
68317@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
68318 object->n_ops++;
68319 atomic_inc(&op->usage);
68320 list_add_tail(&op->pend_link, &object->pending_ops);
68321- fscache_stat(&fscache_n_op_pend);
68322+ fscache_stat_unchecked(&fscache_n_op_pend);
68323 ret = 0;
68324 } else if (object->state == FSCACHE_OBJECT_DYING ||
68325 object->state == FSCACHE_OBJECT_LC_DYING ||
68326 object->state == FSCACHE_OBJECT_WITHDRAWING) {
68327- fscache_stat(&fscache_n_op_rejected);
68328+ fscache_stat_unchecked(&fscache_n_op_rejected);
68329 ret = -ENOBUFS;
68330 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
68331 fscache_report_unexpected_submission(object, op, ostate);
68332@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
68333
68334 ret = -EBUSY;
68335 if (!list_empty(&op->pend_link)) {
68336- fscache_stat(&fscache_n_op_cancelled);
68337+ fscache_stat_unchecked(&fscache_n_op_cancelled);
68338 list_del_init(&op->pend_link);
68339 object->n_ops--;
68340 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
68341@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
68342 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
68343 BUG();
68344
68345- fscache_stat(&fscache_n_op_release);
68346+ fscache_stat_unchecked(&fscache_n_op_release);
68347
68348 if (op->release) {
68349 op->release(op);
68350@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
68351 * lock, and defer it otherwise */
68352 if (!spin_trylock(&object->lock)) {
68353 _debug("defer put");
68354- fscache_stat(&fscache_n_op_deferred_release);
68355+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
68356
68357 cache = object->cache;
68358 spin_lock(&cache->op_gc_list_lock);
68359@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
68360
68361 _debug("GC DEFERRED REL OBJ%x OP%x",
68362 object->debug_id, op->debug_id);
68363- fscache_stat(&fscache_n_op_gc);
68364+ fscache_stat_unchecked(&fscache_n_op_gc);
68365
68366 ASSERTCMP(atomic_read(&op->usage), ==, 0);
68367
68368diff --git a/fs/fscache/page.c b/fs/fscache/page.c
68369index c598ea4..6aac13e 100644
68370--- a/fs/fscache/page.c
68371+++ b/fs/fscache/page.c
68372@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
68373 val = radix_tree_lookup(&cookie->stores, page->index);
68374 if (!val) {
68375 rcu_read_unlock();
68376- fscache_stat(&fscache_n_store_vmscan_not_storing);
68377+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
68378 __fscache_uncache_page(cookie, page);
68379 return true;
68380 }
68381@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
68382 spin_unlock(&cookie->stores_lock);
68383
68384 if (xpage) {
68385- fscache_stat(&fscache_n_store_vmscan_cancelled);
68386- fscache_stat(&fscache_n_store_radix_deletes);
68387+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
68388+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
68389 ASSERTCMP(xpage, ==, page);
68390 } else {
68391- fscache_stat(&fscache_n_store_vmscan_gone);
68392+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
68393 }
68394
68395 wake_up_bit(&cookie->flags, 0);
68396@@ -106,7 +106,7 @@ page_busy:
68397 /* we might want to wait here, but that could deadlock the allocator as
68398 * the slow-work threads writing to the cache may all end up sleeping
68399 * on memory allocation */
68400- fscache_stat(&fscache_n_store_vmscan_busy);
68401+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
68402 return false;
68403 }
68404 EXPORT_SYMBOL(__fscache_maybe_release_page);
68405@@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
68406 FSCACHE_COOKIE_STORING_TAG);
68407 if (!radix_tree_tag_get(&cookie->stores, page->index,
68408 FSCACHE_COOKIE_PENDING_TAG)) {
68409- fscache_stat(&fscache_n_store_radix_deletes);
68410+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
68411 xpage = radix_tree_delete(&cookie->stores, page->index);
68412 }
68413 spin_unlock(&cookie->stores_lock);
68414@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
68415
68416 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
68417
68418- fscache_stat(&fscache_n_attr_changed_calls);
68419+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
68420
68421 if (fscache_object_is_active(object)) {
68422 fscache_set_op_state(op, "CallFS");
68423@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
68424
68425 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
68426
68427- fscache_stat(&fscache_n_attr_changed);
68428+ fscache_stat_unchecked(&fscache_n_attr_changed);
68429
68430 op = kzalloc(sizeof(*op), GFP_KERNEL);
68431 if (!op) {
68432- fscache_stat(&fscache_n_attr_changed_nomem);
68433+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
68434 _leave(" = -ENOMEM");
68435 return -ENOMEM;
68436 }
68437@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
68438 if (fscache_submit_exclusive_op(object, op) < 0)
68439 goto nobufs;
68440 spin_unlock(&cookie->lock);
68441- fscache_stat(&fscache_n_attr_changed_ok);
68442+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
68443 fscache_put_operation(op);
68444 _leave(" = 0");
68445 return 0;
68446@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
68447 nobufs:
68448 spin_unlock(&cookie->lock);
68449 kfree(op);
68450- fscache_stat(&fscache_n_attr_changed_nobufs);
68451+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
68452 _leave(" = %d", -ENOBUFS);
68453 return -ENOBUFS;
68454 }
68455@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
68456 /* allocate a retrieval operation and attempt to submit it */
68457 op = kzalloc(sizeof(*op), GFP_NOIO);
68458 if (!op) {
68459- fscache_stat(&fscache_n_retrievals_nomem);
68460+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
68461 return NULL;
68462 }
68463
68464@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
68465 return 0;
68466 }
68467
68468- fscache_stat(&fscache_n_retrievals_wait);
68469+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
68470
68471 jif = jiffies;
68472 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
68473 fscache_wait_bit_interruptible,
68474 TASK_INTERRUPTIBLE) != 0) {
68475- fscache_stat(&fscache_n_retrievals_intr);
68476+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
68477 _leave(" = -ERESTARTSYS");
68478 return -ERESTARTSYS;
68479 }
68480@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
68481 */
68482 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
68483 struct fscache_retrieval *op,
68484- atomic_t *stat_op_waits,
68485- atomic_t *stat_object_dead)
68486+ atomic_unchecked_t *stat_op_waits,
68487+ atomic_unchecked_t *stat_object_dead)
68488 {
68489 int ret;
68490
68491@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
68492 goto check_if_dead;
68493
68494 _debug(">>> WT");
68495- fscache_stat(stat_op_waits);
68496+ fscache_stat_unchecked(stat_op_waits);
68497 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
68498 fscache_wait_bit_interruptible,
68499 TASK_INTERRUPTIBLE) < 0) {
68500@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
68501
68502 check_if_dead:
68503 if (unlikely(fscache_object_is_dead(object))) {
68504- fscache_stat(stat_object_dead);
68505+ fscache_stat_unchecked(stat_object_dead);
68506 return -ENOBUFS;
68507 }
68508 return 0;
68509@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
68510
68511 _enter("%p,%p,,,", cookie, page);
68512
68513- fscache_stat(&fscache_n_retrievals);
68514+ fscache_stat_unchecked(&fscache_n_retrievals);
68515
68516 if (hlist_empty(&cookie->backing_objects))
68517 goto nobufs;
68518@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
68519 goto nobufs_unlock;
68520 spin_unlock(&cookie->lock);
68521
68522- fscache_stat(&fscache_n_retrieval_ops);
68523+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
68524
68525 /* pin the netfs read context in case we need to do the actual netfs
68526 * read because we've encountered a cache read failure */
68527@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
68528
68529 error:
68530 if (ret == -ENOMEM)
68531- fscache_stat(&fscache_n_retrievals_nomem);
68532+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
68533 else if (ret == -ERESTARTSYS)
68534- fscache_stat(&fscache_n_retrievals_intr);
68535+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
68536 else if (ret == -ENODATA)
68537- fscache_stat(&fscache_n_retrievals_nodata);
68538+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
68539 else if (ret < 0)
68540- fscache_stat(&fscache_n_retrievals_nobufs);
68541+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
68542 else
68543- fscache_stat(&fscache_n_retrievals_ok);
68544+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
68545
68546 fscache_put_retrieval(op);
68547 _leave(" = %d", ret);
68548@@ -453,7 +453,7 @@ nobufs_unlock:
68549 spin_unlock(&cookie->lock);
68550 kfree(op);
68551 nobufs:
68552- fscache_stat(&fscache_n_retrievals_nobufs);
68553+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
68554 _leave(" = -ENOBUFS");
68555 return -ENOBUFS;
68556 }
68557@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
68558
68559 _enter("%p,,%d,,,", cookie, *nr_pages);
68560
68561- fscache_stat(&fscache_n_retrievals);
68562+ fscache_stat_unchecked(&fscache_n_retrievals);
68563
68564 if (hlist_empty(&cookie->backing_objects))
68565 goto nobufs;
68566@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
68567 goto nobufs_unlock;
68568 spin_unlock(&cookie->lock);
68569
68570- fscache_stat(&fscache_n_retrieval_ops);
68571+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
68572
68573 /* pin the netfs read context in case we need to do the actual netfs
68574 * read because we've encountered a cache read failure */
68575@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
68576
68577 error:
68578 if (ret == -ENOMEM)
68579- fscache_stat(&fscache_n_retrievals_nomem);
68580+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
68581 else if (ret == -ERESTARTSYS)
68582- fscache_stat(&fscache_n_retrievals_intr);
68583+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
68584 else if (ret == -ENODATA)
68585- fscache_stat(&fscache_n_retrievals_nodata);
68586+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
68587 else if (ret < 0)
68588- fscache_stat(&fscache_n_retrievals_nobufs);
68589+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
68590 else
68591- fscache_stat(&fscache_n_retrievals_ok);
68592+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
68593
68594 fscache_put_retrieval(op);
68595 _leave(" = %d", ret);
68596@@ -570,7 +570,7 @@ nobufs_unlock:
68597 spin_unlock(&cookie->lock);
68598 kfree(op);
68599 nobufs:
68600- fscache_stat(&fscache_n_retrievals_nobufs);
68601+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
68602 _leave(" = -ENOBUFS");
68603 return -ENOBUFS;
68604 }
68605@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
68606
68607 _enter("%p,%p,,,", cookie, page);
68608
68609- fscache_stat(&fscache_n_allocs);
68610+ fscache_stat_unchecked(&fscache_n_allocs);
68611
68612 if (hlist_empty(&cookie->backing_objects))
68613 goto nobufs;
68614@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
68615 goto nobufs_unlock;
68616 spin_unlock(&cookie->lock);
68617
68618- fscache_stat(&fscache_n_alloc_ops);
68619+ fscache_stat_unchecked(&fscache_n_alloc_ops);
68620
68621 ret = fscache_wait_for_retrieval_activation(
68622 object, op,
68623@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
68624
68625 error:
68626 if (ret == -ERESTARTSYS)
68627- fscache_stat(&fscache_n_allocs_intr);
68628+ fscache_stat_unchecked(&fscache_n_allocs_intr);
68629 else if (ret < 0)
68630- fscache_stat(&fscache_n_allocs_nobufs);
68631+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
68632 else
68633- fscache_stat(&fscache_n_allocs_ok);
68634+ fscache_stat_unchecked(&fscache_n_allocs_ok);
68635
68636 fscache_put_retrieval(op);
68637 _leave(" = %d", ret);
68638@@ -651,7 +651,7 @@ nobufs_unlock:
68639 spin_unlock(&cookie->lock);
68640 kfree(op);
68641 nobufs:
68642- fscache_stat(&fscache_n_allocs_nobufs);
68643+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
68644 _leave(" = -ENOBUFS");
68645 return -ENOBUFS;
68646 }
68647@@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
68648
68649 spin_lock(&cookie->stores_lock);
68650
68651- fscache_stat(&fscache_n_store_calls);
68652+ fscache_stat_unchecked(&fscache_n_store_calls);
68653
68654 /* find a page to store */
68655 page = NULL;
68656@@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
68657 page = results[0];
68658 _debug("gang %d [%lx]", n, page->index);
68659 if (page->index > op->store_limit) {
68660- fscache_stat(&fscache_n_store_pages_over_limit);
68661+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
68662 goto superseded;
68663 }
68664
68665@@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
68666
68667 if (page) {
68668 fscache_set_op_state(&op->op, "Store");
68669- fscache_stat(&fscache_n_store_pages);
68670+ fscache_stat_unchecked(&fscache_n_store_pages);
68671 fscache_stat(&fscache_n_cop_write_page);
68672 ret = object->cache->ops->write_page(op, page);
68673 fscache_stat_d(&fscache_n_cop_write_page);
68674@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
68675 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
68676 ASSERT(PageFsCache(page));
68677
68678- fscache_stat(&fscache_n_stores);
68679+ fscache_stat_unchecked(&fscache_n_stores);
68680
68681 op = kzalloc(sizeof(*op), GFP_NOIO);
68682 if (!op)
68683@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
68684 spin_unlock(&cookie->stores_lock);
68685 spin_unlock(&object->lock);
68686
68687- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
68688+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
68689 op->store_limit = object->store_limit;
68690
68691 if (fscache_submit_op(object, &op->op) < 0)
68692@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
68693
68694 spin_unlock(&cookie->lock);
68695 radix_tree_preload_end();
68696- fscache_stat(&fscache_n_store_ops);
68697- fscache_stat(&fscache_n_stores_ok);
68698+ fscache_stat_unchecked(&fscache_n_store_ops);
68699+ fscache_stat_unchecked(&fscache_n_stores_ok);
68700
68701 /* the slow work queue now carries its own ref on the object */
68702 fscache_put_operation(&op->op);
68703@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
68704 return 0;
68705
68706 already_queued:
68707- fscache_stat(&fscache_n_stores_again);
68708+ fscache_stat_unchecked(&fscache_n_stores_again);
68709 already_pending:
68710 spin_unlock(&cookie->stores_lock);
68711 spin_unlock(&object->lock);
68712 spin_unlock(&cookie->lock);
68713 radix_tree_preload_end();
68714 kfree(op);
68715- fscache_stat(&fscache_n_stores_ok);
68716+ fscache_stat_unchecked(&fscache_n_stores_ok);
68717 _leave(" = 0");
68718 return 0;
68719
68720@@ -886,14 +886,14 @@ nobufs:
68721 spin_unlock(&cookie->lock);
68722 radix_tree_preload_end();
68723 kfree(op);
68724- fscache_stat(&fscache_n_stores_nobufs);
68725+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
68726 _leave(" = -ENOBUFS");
68727 return -ENOBUFS;
68728
68729 nomem_free:
68730 kfree(op);
68731 nomem:
68732- fscache_stat(&fscache_n_stores_oom);
68733+ fscache_stat_unchecked(&fscache_n_stores_oom);
68734 _leave(" = -ENOMEM");
68735 return -ENOMEM;
68736 }
68737@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
68738 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
68739 ASSERTCMP(page, !=, NULL);
68740
68741- fscache_stat(&fscache_n_uncaches);
68742+ fscache_stat_unchecked(&fscache_n_uncaches);
68743
68744 /* cache withdrawal may beat us to it */
68745 if (!PageFsCache(page))
68746@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
68747 unsigned long loop;
68748
68749 #ifdef CONFIG_FSCACHE_STATS
68750- atomic_add(pagevec->nr, &fscache_n_marks);
68751+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
68752 #endif
68753
68754 for (loop = 0; loop < pagevec->nr; loop++) {
68755diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
68756index 46435f3..8cddf18 100644
68757--- a/fs/fscache/stats.c
68758+++ b/fs/fscache/stats.c
68759@@ -18,95 +18,95 @@
68760 /*
68761 * operation counters
68762 */
68763-atomic_t fscache_n_op_pend;
68764-atomic_t fscache_n_op_run;
68765-atomic_t fscache_n_op_enqueue;
68766-atomic_t fscache_n_op_requeue;
68767-atomic_t fscache_n_op_deferred_release;
68768-atomic_t fscache_n_op_release;
68769-atomic_t fscache_n_op_gc;
68770-atomic_t fscache_n_op_cancelled;
68771-atomic_t fscache_n_op_rejected;
68772+atomic_unchecked_t fscache_n_op_pend;
68773+atomic_unchecked_t fscache_n_op_run;
68774+atomic_unchecked_t fscache_n_op_enqueue;
68775+atomic_unchecked_t fscache_n_op_requeue;
68776+atomic_unchecked_t fscache_n_op_deferred_release;
68777+atomic_unchecked_t fscache_n_op_release;
68778+atomic_unchecked_t fscache_n_op_gc;
68779+atomic_unchecked_t fscache_n_op_cancelled;
68780+atomic_unchecked_t fscache_n_op_rejected;
68781
68782-atomic_t fscache_n_attr_changed;
68783-atomic_t fscache_n_attr_changed_ok;
68784-atomic_t fscache_n_attr_changed_nobufs;
68785-atomic_t fscache_n_attr_changed_nomem;
68786-atomic_t fscache_n_attr_changed_calls;
68787+atomic_unchecked_t fscache_n_attr_changed;
68788+atomic_unchecked_t fscache_n_attr_changed_ok;
68789+atomic_unchecked_t fscache_n_attr_changed_nobufs;
68790+atomic_unchecked_t fscache_n_attr_changed_nomem;
68791+atomic_unchecked_t fscache_n_attr_changed_calls;
68792
68793-atomic_t fscache_n_allocs;
68794-atomic_t fscache_n_allocs_ok;
68795-atomic_t fscache_n_allocs_wait;
68796-atomic_t fscache_n_allocs_nobufs;
68797-atomic_t fscache_n_allocs_intr;
68798-atomic_t fscache_n_allocs_object_dead;
68799-atomic_t fscache_n_alloc_ops;
68800-atomic_t fscache_n_alloc_op_waits;
68801+atomic_unchecked_t fscache_n_allocs;
68802+atomic_unchecked_t fscache_n_allocs_ok;
68803+atomic_unchecked_t fscache_n_allocs_wait;
68804+atomic_unchecked_t fscache_n_allocs_nobufs;
68805+atomic_unchecked_t fscache_n_allocs_intr;
68806+atomic_unchecked_t fscache_n_allocs_object_dead;
68807+atomic_unchecked_t fscache_n_alloc_ops;
68808+atomic_unchecked_t fscache_n_alloc_op_waits;
68809
68810-atomic_t fscache_n_retrievals;
68811-atomic_t fscache_n_retrievals_ok;
68812-atomic_t fscache_n_retrievals_wait;
68813-atomic_t fscache_n_retrievals_nodata;
68814-atomic_t fscache_n_retrievals_nobufs;
68815-atomic_t fscache_n_retrievals_intr;
68816-atomic_t fscache_n_retrievals_nomem;
68817-atomic_t fscache_n_retrievals_object_dead;
68818-atomic_t fscache_n_retrieval_ops;
68819-atomic_t fscache_n_retrieval_op_waits;
68820+atomic_unchecked_t fscache_n_retrievals;
68821+atomic_unchecked_t fscache_n_retrievals_ok;
68822+atomic_unchecked_t fscache_n_retrievals_wait;
68823+atomic_unchecked_t fscache_n_retrievals_nodata;
68824+atomic_unchecked_t fscache_n_retrievals_nobufs;
68825+atomic_unchecked_t fscache_n_retrievals_intr;
68826+atomic_unchecked_t fscache_n_retrievals_nomem;
68827+atomic_unchecked_t fscache_n_retrievals_object_dead;
68828+atomic_unchecked_t fscache_n_retrieval_ops;
68829+atomic_unchecked_t fscache_n_retrieval_op_waits;
68830
68831-atomic_t fscache_n_stores;
68832-atomic_t fscache_n_stores_ok;
68833-atomic_t fscache_n_stores_again;
68834-atomic_t fscache_n_stores_nobufs;
68835-atomic_t fscache_n_stores_oom;
68836-atomic_t fscache_n_store_ops;
68837-atomic_t fscache_n_store_calls;
68838-atomic_t fscache_n_store_pages;
68839-atomic_t fscache_n_store_radix_deletes;
68840-atomic_t fscache_n_store_pages_over_limit;
68841+atomic_unchecked_t fscache_n_stores;
68842+atomic_unchecked_t fscache_n_stores_ok;
68843+atomic_unchecked_t fscache_n_stores_again;
68844+atomic_unchecked_t fscache_n_stores_nobufs;
68845+atomic_unchecked_t fscache_n_stores_oom;
68846+atomic_unchecked_t fscache_n_store_ops;
68847+atomic_unchecked_t fscache_n_store_calls;
68848+atomic_unchecked_t fscache_n_store_pages;
68849+atomic_unchecked_t fscache_n_store_radix_deletes;
68850+atomic_unchecked_t fscache_n_store_pages_over_limit;
68851
68852-atomic_t fscache_n_store_vmscan_not_storing;
68853-atomic_t fscache_n_store_vmscan_gone;
68854-atomic_t fscache_n_store_vmscan_busy;
68855-atomic_t fscache_n_store_vmscan_cancelled;
68856+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
68857+atomic_unchecked_t fscache_n_store_vmscan_gone;
68858+atomic_unchecked_t fscache_n_store_vmscan_busy;
68859+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
68860
68861-atomic_t fscache_n_marks;
68862-atomic_t fscache_n_uncaches;
68863+atomic_unchecked_t fscache_n_marks;
68864+atomic_unchecked_t fscache_n_uncaches;
68865
68866-atomic_t fscache_n_acquires;
68867-atomic_t fscache_n_acquires_null;
68868-atomic_t fscache_n_acquires_no_cache;
68869-atomic_t fscache_n_acquires_ok;
68870-atomic_t fscache_n_acquires_nobufs;
68871-atomic_t fscache_n_acquires_oom;
68872+atomic_unchecked_t fscache_n_acquires;
68873+atomic_unchecked_t fscache_n_acquires_null;
68874+atomic_unchecked_t fscache_n_acquires_no_cache;
68875+atomic_unchecked_t fscache_n_acquires_ok;
68876+atomic_unchecked_t fscache_n_acquires_nobufs;
68877+atomic_unchecked_t fscache_n_acquires_oom;
68878
68879-atomic_t fscache_n_updates;
68880-atomic_t fscache_n_updates_null;
68881-atomic_t fscache_n_updates_run;
68882+atomic_unchecked_t fscache_n_updates;
68883+atomic_unchecked_t fscache_n_updates_null;
68884+atomic_unchecked_t fscache_n_updates_run;
68885
68886-atomic_t fscache_n_relinquishes;
68887-atomic_t fscache_n_relinquishes_null;
68888-atomic_t fscache_n_relinquishes_waitcrt;
68889-atomic_t fscache_n_relinquishes_retire;
68890+atomic_unchecked_t fscache_n_relinquishes;
68891+atomic_unchecked_t fscache_n_relinquishes_null;
68892+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
68893+atomic_unchecked_t fscache_n_relinquishes_retire;
68894
68895-atomic_t fscache_n_cookie_index;
68896-atomic_t fscache_n_cookie_data;
68897-atomic_t fscache_n_cookie_special;
68898+atomic_unchecked_t fscache_n_cookie_index;
68899+atomic_unchecked_t fscache_n_cookie_data;
68900+atomic_unchecked_t fscache_n_cookie_special;
68901
68902-atomic_t fscache_n_object_alloc;
68903-atomic_t fscache_n_object_no_alloc;
68904-atomic_t fscache_n_object_lookups;
68905-atomic_t fscache_n_object_lookups_negative;
68906-atomic_t fscache_n_object_lookups_positive;
68907-atomic_t fscache_n_object_lookups_timed_out;
68908-atomic_t fscache_n_object_created;
68909-atomic_t fscache_n_object_avail;
68910-atomic_t fscache_n_object_dead;
68911+atomic_unchecked_t fscache_n_object_alloc;
68912+atomic_unchecked_t fscache_n_object_no_alloc;
68913+atomic_unchecked_t fscache_n_object_lookups;
68914+atomic_unchecked_t fscache_n_object_lookups_negative;
68915+atomic_unchecked_t fscache_n_object_lookups_positive;
68916+atomic_unchecked_t fscache_n_object_lookups_timed_out;
68917+atomic_unchecked_t fscache_n_object_created;
68918+atomic_unchecked_t fscache_n_object_avail;
68919+atomic_unchecked_t fscache_n_object_dead;
68920
68921-atomic_t fscache_n_checkaux_none;
68922-atomic_t fscache_n_checkaux_okay;
68923-atomic_t fscache_n_checkaux_update;
68924-atomic_t fscache_n_checkaux_obsolete;
68925+atomic_unchecked_t fscache_n_checkaux_none;
68926+atomic_unchecked_t fscache_n_checkaux_okay;
68927+atomic_unchecked_t fscache_n_checkaux_update;
68928+atomic_unchecked_t fscache_n_checkaux_obsolete;
68929
68930 atomic_t fscache_n_cop_alloc_object;
68931 atomic_t fscache_n_cop_lookup_object;
68932@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
68933 seq_puts(m, "FS-Cache statistics\n");
68934
68935 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
68936- atomic_read(&fscache_n_cookie_index),
68937- atomic_read(&fscache_n_cookie_data),
68938- atomic_read(&fscache_n_cookie_special));
68939+ atomic_read_unchecked(&fscache_n_cookie_index),
68940+ atomic_read_unchecked(&fscache_n_cookie_data),
68941+ atomic_read_unchecked(&fscache_n_cookie_special));
68942
68943 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
68944- atomic_read(&fscache_n_object_alloc),
68945- atomic_read(&fscache_n_object_no_alloc),
68946- atomic_read(&fscache_n_object_avail),
68947- atomic_read(&fscache_n_object_dead));
68948+ atomic_read_unchecked(&fscache_n_object_alloc),
68949+ atomic_read_unchecked(&fscache_n_object_no_alloc),
68950+ atomic_read_unchecked(&fscache_n_object_avail),
68951+ atomic_read_unchecked(&fscache_n_object_dead));
68952 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
68953- atomic_read(&fscache_n_checkaux_none),
68954- atomic_read(&fscache_n_checkaux_okay),
68955- atomic_read(&fscache_n_checkaux_update),
68956- atomic_read(&fscache_n_checkaux_obsolete));
68957+ atomic_read_unchecked(&fscache_n_checkaux_none),
68958+ atomic_read_unchecked(&fscache_n_checkaux_okay),
68959+ atomic_read_unchecked(&fscache_n_checkaux_update),
68960+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
68961
68962 seq_printf(m, "Pages : mrk=%u unc=%u\n",
68963- atomic_read(&fscache_n_marks),
68964- atomic_read(&fscache_n_uncaches));
68965+ atomic_read_unchecked(&fscache_n_marks),
68966+ atomic_read_unchecked(&fscache_n_uncaches));
68967
68968 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
68969 " oom=%u\n",
68970- atomic_read(&fscache_n_acquires),
68971- atomic_read(&fscache_n_acquires_null),
68972- atomic_read(&fscache_n_acquires_no_cache),
68973- atomic_read(&fscache_n_acquires_ok),
68974- atomic_read(&fscache_n_acquires_nobufs),
68975- atomic_read(&fscache_n_acquires_oom));
68976+ atomic_read_unchecked(&fscache_n_acquires),
68977+ atomic_read_unchecked(&fscache_n_acquires_null),
68978+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
68979+ atomic_read_unchecked(&fscache_n_acquires_ok),
68980+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
68981+ atomic_read_unchecked(&fscache_n_acquires_oom));
68982
68983 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
68984- atomic_read(&fscache_n_object_lookups),
68985- atomic_read(&fscache_n_object_lookups_negative),
68986- atomic_read(&fscache_n_object_lookups_positive),
68987- atomic_read(&fscache_n_object_lookups_timed_out),
68988- atomic_read(&fscache_n_object_created));
68989+ atomic_read_unchecked(&fscache_n_object_lookups),
68990+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
68991+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
68992+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
68993+ atomic_read_unchecked(&fscache_n_object_created));
68994
68995 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
68996- atomic_read(&fscache_n_updates),
68997- atomic_read(&fscache_n_updates_null),
68998- atomic_read(&fscache_n_updates_run));
68999+ atomic_read_unchecked(&fscache_n_updates),
69000+ atomic_read_unchecked(&fscache_n_updates_null),
69001+ atomic_read_unchecked(&fscache_n_updates_run));
69002
69003 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
69004- atomic_read(&fscache_n_relinquishes),
69005- atomic_read(&fscache_n_relinquishes_null),
69006- atomic_read(&fscache_n_relinquishes_waitcrt),
69007- atomic_read(&fscache_n_relinquishes_retire));
69008+ atomic_read_unchecked(&fscache_n_relinquishes),
69009+ atomic_read_unchecked(&fscache_n_relinquishes_null),
69010+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
69011+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
69012
69013 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
69014- atomic_read(&fscache_n_attr_changed),
69015- atomic_read(&fscache_n_attr_changed_ok),
69016- atomic_read(&fscache_n_attr_changed_nobufs),
69017- atomic_read(&fscache_n_attr_changed_nomem),
69018- atomic_read(&fscache_n_attr_changed_calls));
69019+ atomic_read_unchecked(&fscache_n_attr_changed),
69020+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
69021+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
69022+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
69023+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
69024
69025 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
69026- atomic_read(&fscache_n_allocs),
69027- atomic_read(&fscache_n_allocs_ok),
69028- atomic_read(&fscache_n_allocs_wait),
69029- atomic_read(&fscache_n_allocs_nobufs),
69030- atomic_read(&fscache_n_allocs_intr));
69031+ atomic_read_unchecked(&fscache_n_allocs),
69032+ atomic_read_unchecked(&fscache_n_allocs_ok),
69033+ atomic_read_unchecked(&fscache_n_allocs_wait),
69034+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
69035+ atomic_read_unchecked(&fscache_n_allocs_intr));
69036 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
69037- atomic_read(&fscache_n_alloc_ops),
69038- atomic_read(&fscache_n_alloc_op_waits),
69039- atomic_read(&fscache_n_allocs_object_dead));
69040+ atomic_read_unchecked(&fscache_n_alloc_ops),
69041+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
69042+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
69043
69044 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
69045 " int=%u oom=%u\n",
69046- atomic_read(&fscache_n_retrievals),
69047- atomic_read(&fscache_n_retrievals_ok),
69048- atomic_read(&fscache_n_retrievals_wait),
69049- atomic_read(&fscache_n_retrievals_nodata),
69050- atomic_read(&fscache_n_retrievals_nobufs),
69051- atomic_read(&fscache_n_retrievals_intr),
69052- atomic_read(&fscache_n_retrievals_nomem));
69053+ atomic_read_unchecked(&fscache_n_retrievals),
69054+ atomic_read_unchecked(&fscache_n_retrievals_ok),
69055+ atomic_read_unchecked(&fscache_n_retrievals_wait),
69056+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
69057+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
69058+ atomic_read_unchecked(&fscache_n_retrievals_intr),
69059+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
69060 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
69061- atomic_read(&fscache_n_retrieval_ops),
69062- atomic_read(&fscache_n_retrieval_op_waits),
69063- atomic_read(&fscache_n_retrievals_object_dead));
69064+ atomic_read_unchecked(&fscache_n_retrieval_ops),
69065+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
69066+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
69067
69068 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
69069- atomic_read(&fscache_n_stores),
69070- atomic_read(&fscache_n_stores_ok),
69071- atomic_read(&fscache_n_stores_again),
69072- atomic_read(&fscache_n_stores_nobufs),
69073- atomic_read(&fscache_n_stores_oom));
69074+ atomic_read_unchecked(&fscache_n_stores),
69075+ atomic_read_unchecked(&fscache_n_stores_ok),
69076+ atomic_read_unchecked(&fscache_n_stores_again),
69077+ atomic_read_unchecked(&fscache_n_stores_nobufs),
69078+ atomic_read_unchecked(&fscache_n_stores_oom));
69079 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
69080- atomic_read(&fscache_n_store_ops),
69081- atomic_read(&fscache_n_store_calls),
69082- atomic_read(&fscache_n_store_pages),
69083- atomic_read(&fscache_n_store_radix_deletes),
69084- atomic_read(&fscache_n_store_pages_over_limit));
69085+ atomic_read_unchecked(&fscache_n_store_ops),
69086+ atomic_read_unchecked(&fscache_n_store_calls),
69087+ atomic_read_unchecked(&fscache_n_store_pages),
69088+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
69089+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
69090
69091 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
69092- atomic_read(&fscache_n_store_vmscan_not_storing),
69093- atomic_read(&fscache_n_store_vmscan_gone),
69094- atomic_read(&fscache_n_store_vmscan_busy),
69095- atomic_read(&fscache_n_store_vmscan_cancelled));
69096+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
69097+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
69098+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
69099+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
69100
69101 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
69102- atomic_read(&fscache_n_op_pend),
69103- atomic_read(&fscache_n_op_run),
69104- atomic_read(&fscache_n_op_enqueue),
69105- atomic_read(&fscache_n_op_cancelled),
69106- atomic_read(&fscache_n_op_rejected));
69107+ atomic_read_unchecked(&fscache_n_op_pend),
69108+ atomic_read_unchecked(&fscache_n_op_run),
69109+ atomic_read_unchecked(&fscache_n_op_enqueue),
69110+ atomic_read_unchecked(&fscache_n_op_cancelled),
69111+ atomic_read_unchecked(&fscache_n_op_rejected));
69112 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
69113- atomic_read(&fscache_n_op_deferred_release),
69114- atomic_read(&fscache_n_op_release),
69115- atomic_read(&fscache_n_op_gc));
69116+ atomic_read_unchecked(&fscache_n_op_deferred_release),
69117+ atomic_read_unchecked(&fscache_n_op_release),
69118+ atomic_read_unchecked(&fscache_n_op_gc));
69119
69120 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
69121 atomic_read(&fscache_n_cop_alloc_object),
69122diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
69123index de792dc..448b532 100644
69124--- a/fs/fuse/cuse.c
69125+++ b/fs/fuse/cuse.c
69126@@ -576,10 +576,12 @@ static int __init cuse_init(void)
69127 INIT_LIST_HEAD(&cuse_conntbl[i]);
69128
69129 /* inherit and extend fuse_dev_operations */
69130- cuse_channel_fops = fuse_dev_operations;
69131- cuse_channel_fops.owner = THIS_MODULE;
69132- cuse_channel_fops.open = cuse_channel_open;
69133- cuse_channel_fops.release = cuse_channel_release;
69134+ pax_open_kernel();
69135+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
69136+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
69137+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
69138+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
69139+ pax_close_kernel();
69140
69141 cuse_class = class_create(THIS_MODULE, "cuse");
69142 if (IS_ERR(cuse_class))
69143diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
69144index 1facb39..7f48557 100644
69145--- a/fs/fuse/dev.c
69146+++ b/fs/fuse/dev.c
69147@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69148 {
69149 struct fuse_notify_inval_entry_out outarg;
69150 int err = -EINVAL;
69151- char buf[FUSE_NAME_MAX+1];
69152+ char *buf = NULL;
69153 struct qstr name;
69154
69155 if (size < sizeof(outarg))
69156@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69157 if (outarg.namelen > FUSE_NAME_MAX)
69158 goto err;
69159
69160+ err = -ENOMEM;
69161+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
69162+ if (!buf)
69163+ goto err;
69164+
69165 err = -EINVAL;
69166 if (size != sizeof(outarg) + outarg.namelen + 1)
69167 goto err;
69168@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
69169
69170 down_read(&fc->killsb);
69171 err = -ENOENT;
69172- if (!fc->sb)
69173- goto err_unlock;
69174-
69175- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
69176-
69177-err_unlock:
69178+ if (fc->sb)
69179+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
69180 up_read(&fc->killsb);
69181+ kfree(buf);
69182 return err;
69183
69184 err:
69185 fuse_copy_finish(cs);
69186+ kfree(buf);
69187 return err;
69188 }
69189
69190diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
69191index 4787ae6..73efff7 100644
69192--- a/fs/fuse/dir.c
69193+++ b/fs/fuse/dir.c
69194@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
69195 return link;
69196 }
69197
69198-static void free_link(char *link)
69199+static void free_link(const char *link)
69200 {
69201 if (!IS_ERR(link))
69202 free_page((unsigned long) link);
69203diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
69204index 247436c..e650ccb 100644
69205--- a/fs/gfs2/ops_inode.c
69206+++ b/fs/gfs2/ops_inode.c
69207@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
69208 unsigned int x;
69209 int error;
69210
69211+ pax_track_stack();
69212+
69213 if (ndentry->d_inode) {
69214 nip = GFS2_I(ndentry->d_inode);
69215 if (ip == nip)
69216diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
69217index 4463297..4fed53b 100644
69218--- a/fs/gfs2/sys.c
69219+++ b/fs/gfs2/sys.c
69220@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
69221 return a->store ? a->store(sdp, buf, len) : len;
69222 }
69223
69224-static struct sysfs_ops gfs2_attr_ops = {
69225+static const struct sysfs_ops gfs2_attr_ops = {
69226 .show = gfs2_attr_show,
69227 .store = gfs2_attr_store,
69228 };
69229@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
69230 return 0;
69231 }
69232
69233-static struct kset_uevent_ops gfs2_uevent_ops = {
69234+static const struct kset_uevent_ops gfs2_uevent_ops = {
69235 .uevent = gfs2_uevent,
69236 };
69237
69238diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
69239index f6874ac..7cd98a8 100644
69240--- a/fs/hfsplus/catalog.c
69241+++ b/fs/hfsplus/catalog.c
69242@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
69243 int err;
69244 u16 type;
69245
69246+ pax_track_stack();
69247+
69248 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
69249 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
69250 if (err)
69251@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
69252 int entry_size;
69253 int err;
69254
69255+ pax_track_stack();
69256+
69257 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
69258 sb = dir->i_sb;
69259 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
69260@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
69261 int entry_size, type;
69262 int err = 0;
69263
69264+ pax_track_stack();
69265+
69266 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
69267 dst_dir->i_ino, dst_name->name);
69268 sb = src_dir->i_sb;
69269diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
69270index 5f40236..dac3421 100644
69271--- a/fs/hfsplus/dir.c
69272+++ b/fs/hfsplus/dir.c
69273@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
69274 struct hfsplus_readdir_data *rd;
69275 u16 type;
69276
69277+ pax_track_stack();
69278+
69279 if (filp->f_pos >= inode->i_size)
69280 return 0;
69281
69282diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
69283index 1bcf597..905a251 100644
69284--- a/fs/hfsplus/inode.c
69285+++ b/fs/hfsplus/inode.c
69286@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
69287 int res = 0;
69288 u16 type;
69289
69290+ pax_track_stack();
69291+
69292 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
69293
69294 HFSPLUS_I(inode).dev = 0;
69295@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
69296 struct hfs_find_data fd;
69297 hfsplus_cat_entry entry;
69298
69299+ pax_track_stack();
69300+
69301 if (HFSPLUS_IS_RSRC(inode))
69302 main_inode = HFSPLUS_I(inode).rsrc_inode;
69303
69304diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
69305index f457d2c..7ef4ad5 100644
69306--- a/fs/hfsplus/ioctl.c
69307+++ b/fs/hfsplus/ioctl.c
69308@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
69309 struct hfsplus_cat_file *file;
69310 int res;
69311
69312+ pax_track_stack();
69313+
69314 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
69315 return -EOPNOTSUPP;
69316
69317@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
69318 struct hfsplus_cat_file *file;
69319 ssize_t res = 0;
69320
69321+ pax_track_stack();
69322+
69323 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
69324 return -EOPNOTSUPP;
69325
69326diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
69327index 43022f3..7298079 100644
69328--- a/fs/hfsplus/super.c
69329+++ b/fs/hfsplus/super.c
69330@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
69331 struct nls_table *nls = NULL;
69332 int err = -EINVAL;
69333
69334+ pax_track_stack();
69335+
69336 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
69337 if (!sbi)
69338 return -ENOMEM;
69339diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
69340index 87a1258..5694d91 100644
69341--- a/fs/hugetlbfs/inode.c
69342+++ b/fs/hugetlbfs/inode.c
69343@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
69344 .kill_sb = kill_litter_super,
69345 };
69346
69347-static struct vfsmount *hugetlbfs_vfsmount;
69348+struct vfsmount *hugetlbfs_vfsmount;
69349
69350 static int can_do_hugetlb_shm(void)
69351 {
69352diff --git a/fs/ioctl.c b/fs/ioctl.c
69353index 6c75110..19d2c3c 100644
69354--- a/fs/ioctl.c
69355+++ b/fs/ioctl.c
69356@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
69357 u64 phys, u64 len, u32 flags)
69358 {
69359 struct fiemap_extent extent;
69360- struct fiemap_extent *dest = fieinfo->fi_extents_start;
69361+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
69362
69363 /* only count the extents */
69364 if (fieinfo->fi_extents_max == 0) {
69365@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
69366
69367 fieinfo.fi_flags = fiemap.fm_flags;
69368 fieinfo.fi_extents_max = fiemap.fm_extent_count;
69369- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
69370+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
69371
69372 if (fiemap.fm_extent_count != 0 &&
69373 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
69374@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
69375 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
69376 fiemap.fm_flags = fieinfo.fi_flags;
69377 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
69378- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
69379+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
69380 error = -EFAULT;
69381
69382 return error;
69383diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
69384index b0435dd..81ee0be 100644
69385--- a/fs/jbd/checkpoint.c
69386+++ b/fs/jbd/checkpoint.c
69387@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
69388 tid_t this_tid;
69389 int result;
69390
69391+ pax_track_stack();
69392+
69393 jbd_debug(1, "Start checkpoint\n");
69394
69395 /*
69396diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
69397index 546d153..736896c 100644
69398--- a/fs/jffs2/compr_rtime.c
69399+++ b/fs/jffs2/compr_rtime.c
69400@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
69401 int outpos = 0;
69402 int pos=0;
69403
69404+ pax_track_stack();
69405+
69406 memset(positions,0,sizeof(positions));
69407
69408 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
69409@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
69410 int outpos = 0;
69411 int pos=0;
69412
69413+ pax_track_stack();
69414+
69415 memset(positions,0,sizeof(positions));
69416
69417 while (outpos<destlen) {
69418diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
69419index 170d289..3254b98 100644
69420--- a/fs/jffs2/compr_rubin.c
69421+++ b/fs/jffs2/compr_rubin.c
69422@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
69423 int ret;
69424 uint32_t mysrclen, mydstlen;
69425
69426+ pax_track_stack();
69427+
69428 mysrclen = *sourcelen;
69429 mydstlen = *dstlen - 8;
69430
69431diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
69432index b47679b..00d65d3 100644
69433--- a/fs/jffs2/erase.c
69434+++ b/fs/jffs2/erase.c
69435@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
69436 struct jffs2_unknown_node marker = {
69437 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
69438 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
69439- .totlen = cpu_to_je32(c->cleanmarker_size)
69440+ .totlen = cpu_to_je32(c->cleanmarker_size),
69441+ .hdr_crc = cpu_to_je32(0)
69442 };
69443
69444 jffs2_prealloc_raw_node_refs(c, jeb, 1);
69445diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
69446index 5ef7bac..4fd1e3c 100644
69447--- a/fs/jffs2/wbuf.c
69448+++ b/fs/jffs2/wbuf.c
69449@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
69450 {
69451 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
69452 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
69453- .totlen = constant_cpu_to_je32(8)
69454+ .totlen = constant_cpu_to_je32(8),
69455+ .hdr_crc = constant_cpu_to_je32(0)
69456 };
69457
69458 /*
69459diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
69460index 082e844..52012a1 100644
69461--- a/fs/jffs2/xattr.c
69462+++ b/fs/jffs2/xattr.c
69463@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
69464
69465 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
69466
69467+ pax_track_stack();
69468+
69469 /* Phase.1 : Merge same xref */
69470 for (i=0; i < XREF_TMPHASH_SIZE; i++)
69471 xref_tmphash[i] = NULL;
69472diff --git a/fs/jfs/super.c b/fs/jfs/super.c
69473index 2234c73..f6e6e6b 100644
69474--- a/fs/jfs/super.c
69475+++ b/fs/jfs/super.c
69476@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
69477
69478 jfs_inode_cachep =
69479 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
69480- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
69481+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
69482 init_once);
69483 if (jfs_inode_cachep == NULL)
69484 return -ENOMEM;
69485diff --git a/fs/libfs.c b/fs/libfs.c
69486index ba36e93..3153fce 100644
69487--- a/fs/libfs.c
69488+++ b/fs/libfs.c
69489@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
69490
69491 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
69492 struct dentry *next;
69493+ char d_name[sizeof(next->d_iname)];
69494+ const unsigned char *name;
69495+
69496 next = list_entry(p, struct dentry, d_u.d_child);
69497 if (d_unhashed(next) || !next->d_inode)
69498 continue;
69499
69500 spin_unlock(&dcache_lock);
69501- if (filldir(dirent, next->d_name.name,
69502+ name = next->d_name.name;
69503+ if (name == next->d_iname) {
69504+ memcpy(d_name, name, next->d_name.len);
69505+ name = d_name;
69506+ }
69507+ if (filldir(dirent, name,
69508 next->d_name.len, filp->f_pos,
69509 next->d_inode->i_ino,
69510 dt_type(next->d_inode)) < 0)
69511diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
69512index c325a83..d15b07b 100644
69513--- a/fs/lockd/clntproc.c
69514+++ b/fs/lockd/clntproc.c
69515@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
69516 /*
69517 * Cookie counter for NLM requests
69518 */
69519-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
69520+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
69521
69522 void nlmclnt_next_cookie(struct nlm_cookie *c)
69523 {
69524- u32 cookie = atomic_inc_return(&nlm_cookie);
69525+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
69526
69527 memcpy(c->data, &cookie, 4);
69528 c->len=4;
69529@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
69530 struct nlm_rqst reqst, *req;
69531 int status;
69532
69533+ pax_track_stack();
69534+
69535 req = &reqst;
69536 memset(req, 0, sizeof(*req));
69537 locks_init_lock(&req->a_args.lock.fl);
69538diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
69539index 1a54ae1..6a16c27 100644
69540--- a/fs/lockd/svc.c
69541+++ b/fs/lockd/svc.c
69542@@ -43,7 +43,7 @@
69543
69544 static struct svc_program nlmsvc_program;
69545
69546-struct nlmsvc_binding * nlmsvc_ops;
69547+const struct nlmsvc_binding * nlmsvc_ops;
69548 EXPORT_SYMBOL_GPL(nlmsvc_ops);
69549
69550 static DEFINE_MUTEX(nlmsvc_mutex);
69551diff --git a/fs/locks.c b/fs/locks.c
69552index a8794f2..4041e55 100644
69553--- a/fs/locks.c
69554+++ b/fs/locks.c
69555@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
69556
69557 static struct kmem_cache *filelock_cache __read_mostly;
69558
69559+static void locks_init_lock_always(struct file_lock *fl)
69560+{
69561+ fl->fl_next = NULL;
69562+ fl->fl_fasync = NULL;
69563+ fl->fl_owner = NULL;
69564+ fl->fl_pid = 0;
69565+ fl->fl_nspid = NULL;
69566+ fl->fl_file = NULL;
69567+ fl->fl_flags = 0;
69568+ fl->fl_type = 0;
69569+ fl->fl_start = fl->fl_end = 0;
69570+}
69571+
69572 /* Allocate an empty lock structure. */
69573 static struct file_lock *locks_alloc_lock(void)
69574 {
69575- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
69576+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
69577+
69578+ if (fl)
69579+ locks_init_lock_always(fl);
69580+
69581+ return fl;
69582 }
69583
69584 void locks_release_private(struct file_lock *fl)
69585@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
69586 INIT_LIST_HEAD(&fl->fl_link);
69587 INIT_LIST_HEAD(&fl->fl_block);
69588 init_waitqueue_head(&fl->fl_wait);
69589- fl->fl_next = NULL;
69590- fl->fl_fasync = NULL;
69591- fl->fl_owner = NULL;
69592- fl->fl_pid = 0;
69593- fl->fl_nspid = NULL;
69594- fl->fl_file = NULL;
69595- fl->fl_flags = 0;
69596- fl->fl_type = 0;
69597- fl->fl_start = fl->fl_end = 0;
69598 fl->fl_ops = NULL;
69599 fl->fl_lmops = NULL;
69600+ locks_init_lock_always(fl);
69601 }
69602
69603 EXPORT_SYMBOL(locks_init_lock);
69604@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
69605 return;
69606
69607 if (filp->f_op && filp->f_op->flock) {
69608- struct file_lock fl = {
69609+ struct file_lock flock = {
69610 .fl_pid = current->tgid,
69611 .fl_file = filp,
69612 .fl_flags = FL_FLOCK,
69613 .fl_type = F_UNLCK,
69614 .fl_end = OFFSET_MAX,
69615 };
69616- filp->f_op->flock(filp, F_SETLKW, &fl);
69617- if (fl.fl_ops && fl.fl_ops->fl_release_private)
69618- fl.fl_ops->fl_release_private(&fl);
69619+ filp->f_op->flock(filp, F_SETLKW, &flock);
69620+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
69621+ flock.fl_ops->fl_release_private(&flock);
69622 }
69623
69624 lock_kernel();
69625diff --git a/fs/mbcache.c b/fs/mbcache.c
69626index ec88ff3..b843a82 100644
69627--- a/fs/mbcache.c
69628+++ b/fs/mbcache.c
69629@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
69630 if (!cache)
69631 goto fail;
69632 cache->c_name = name;
69633- cache->c_op.free = NULL;
69634+ *(void **)&cache->c_op.free = NULL;
69635 if (cache_op)
69636- cache->c_op.free = cache_op->free;
69637+ *(void **)&cache->c_op.free = cache_op->free;
69638 atomic_set(&cache->c_entry_count, 0);
69639 cache->c_bucket_bits = bucket_bits;
69640 #ifdef MB_CACHE_INDEXES_COUNT
69641diff --git a/fs/namei.c b/fs/namei.c
69642index b0afbd4..8d065a1 100644
69643--- a/fs/namei.c
69644+++ b/fs/namei.c
69645@@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
69646 return ret;
69647
69648 /*
69649+ * Searching includes executable on directories, else just read.
69650+ */
69651+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
69652+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
69653+ if (capable(CAP_DAC_READ_SEARCH))
69654+ return 0;
69655+
69656+ /*
69657 * Read/write DACs are always overridable.
69658 * Executable DACs are overridable if at least one exec bit is set.
69659 */
69660@@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
69661 if (capable(CAP_DAC_OVERRIDE))
69662 return 0;
69663
69664- /*
69665- * Searching includes executable on directories, else just read.
69666- */
69667- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
69668- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
69669- if (capable(CAP_DAC_READ_SEARCH))
69670- return 0;
69671-
69672 return -EACCES;
69673 }
69674
69675@@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
69676 if (!ret)
69677 goto ok;
69678
69679- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
69680+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
69681+ capable(CAP_DAC_OVERRIDE))
69682 goto ok;
69683
69684 return ret;
69685@@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
69686 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
69687 error = PTR_ERR(cookie);
69688 if (!IS_ERR(cookie)) {
69689- char *s = nd_get_link(nd);
69690+ const char *s = nd_get_link(nd);
69691 error = 0;
69692 if (s)
69693 error = __vfs_follow_link(nd, s);
69694@@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
69695 err = security_inode_follow_link(path->dentry, nd);
69696 if (err)
69697 goto loop;
69698+
69699+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
69700+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
69701+ err = -EACCES;
69702+ goto loop;
69703+ }
69704+
69705 current->link_count++;
69706 current->total_link_count++;
69707 nd->depth++;
69708@@ -1016,11 +1024,19 @@ return_reval:
69709 break;
69710 }
69711 return_base:
69712+ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
69713+ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
69714+ path_put(&nd->path);
69715+ return -ENOENT;
69716+ }
69717 return 0;
69718 out_dput:
69719 path_put_conditional(&next, nd);
69720 break;
69721 }
69722+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
69723+ err = -ENOENT;
69724+
69725 path_put(&nd->path);
69726 return_err:
69727 return err;
69728@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
69729 int retval = path_init(dfd, name, flags, nd);
69730 if (!retval)
69731 retval = path_walk(name, nd);
69732- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
69733- nd->path.dentry->d_inode))
69734- audit_inode(name, nd->path.dentry);
69735+
69736+ if (likely(!retval)) {
69737+ if (nd->path.dentry && nd->path.dentry->d_inode) {
69738+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
69739+ retval = -ENOENT;
69740+ if (!audit_dummy_context())
69741+ audit_inode(name, nd->path.dentry);
69742+ }
69743+ }
69744 if (nd->root.mnt) {
69745 path_put(&nd->root);
69746 nd->root.mnt = NULL;
69747 }
69748+
69749 return retval;
69750 }
69751
69752@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
69753 if (error)
69754 goto err_out;
69755
69756+
69757+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
69758+ error = -EPERM;
69759+ goto err_out;
69760+ }
69761+ if (gr_handle_rawio(inode)) {
69762+ error = -EPERM;
69763+ goto err_out;
69764+ }
69765+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
69766+ error = -EACCES;
69767+ goto err_out;
69768+ }
69769+
69770 if (flag & O_TRUNC) {
69771 error = get_write_access(inode);
69772 if (error)
69773@@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
69774 {
69775 int error;
69776 struct dentry *dir = nd->path.dentry;
69777+ int acc_mode = ACC_MODE(flag);
69778+
69779+ if (flag & O_TRUNC)
69780+ acc_mode |= MAY_WRITE;
69781+ if (flag & O_APPEND)
69782+ acc_mode |= MAY_APPEND;
69783+
69784+ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
69785+ error = -EACCES;
69786+ goto out_unlock;
69787+ }
69788
69789 if (!IS_POSIXACL(dir->d_inode))
69790 mode &= ~current_umask();
69791@@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
69792 if (error)
69793 goto out_unlock;
69794 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
69795+ if (!error)
69796+ gr_handle_create(path->dentry, nd->path.mnt);
69797 out_unlock:
69798 mutex_unlock(&dir->d_inode->i_mutex);
69799 dput(nd->path.dentry);
69800@@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
69801 &nd, flag);
69802 if (error)
69803 return ERR_PTR(error);
69804+
69805+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
69806+ error = -EPERM;
69807+ goto exit;
69808+ }
69809+
69810+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
69811+ error = -EPERM;
69812+ goto exit;
69813+ }
69814+
69815+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
69816+ error = -EACCES;
69817+ goto exit;
69818+ }
69819+
69820 goto ok;
69821 }
69822
69823@@ -1795,6 +1861,19 @@ do_last:
69824 /*
69825 * It already exists.
69826 */
69827+
69828+ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
69829+ error = -ENOENT;
69830+ goto exit_mutex_unlock;
69831+ }
69832+
69833+ /* only check if O_CREAT is specified, all other checks need
69834+ to go into may_open */
69835+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
69836+ error = -EACCES;
69837+ goto exit_mutex_unlock;
69838+ }
69839+
69840 mutex_unlock(&dir->d_inode->i_mutex);
69841 audit_inode(pathname, path.dentry);
69842
69843@@ -1887,6 +1966,13 @@ do_link:
69844 error = security_inode_follow_link(path.dentry, &nd);
69845 if (error)
69846 goto exit_dput;
69847+
69848+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
69849+ path.dentry, nd.path.mnt)) {
69850+ error = -EACCES;
69851+ goto exit_dput;
69852+ }
69853+
69854 error = __do_follow_link(&path, &nd);
69855 if (error) {
69856 /* Does someone understand code flow here? Or it is only
69857@@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
69858 }
69859 return dentry;
69860 eexist:
69861+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
69862+ dput(dentry);
69863+ return ERR_PTR(-ENOENT);
69864+ }
69865 dput(dentry);
69866 dentry = ERR_PTR(-EEXIST);
69867 fail:
69868@@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
69869 error = may_mknod(mode);
69870 if (error)
69871 goto out_dput;
69872+
69873+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
69874+ error = -EPERM;
69875+ goto out_dput;
69876+ }
69877+
69878+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
69879+ error = -EACCES;
69880+ goto out_dput;
69881+ }
69882+
69883 error = mnt_want_write(nd.path.mnt);
69884 if (error)
69885 goto out_dput;
69886@@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
69887 }
69888 out_drop_write:
69889 mnt_drop_write(nd.path.mnt);
69890+
69891+ if (!error)
69892+ gr_handle_create(dentry, nd.path.mnt);
69893 out_dput:
69894 dput(dentry);
69895 out_unlock:
69896@@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
69897 if (IS_ERR(dentry))
69898 goto out_unlock;
69899
69900+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
69901+ error = -EACCES;
69902+ goto out_dput;
69903+ }
69904+
69905 if (!IS_POSIXACL(nd.path.dentry->d_inode))
69906 mode &= ~current_umask();
69907 error = mnt_want_write(nd.path.mnt);
69908@@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
69909 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
69910 out_drop_write:
69911 mnt_drop_write(nd.path.mnt);
69912+
69913+ if (!error)
69914+ gr_handle_create(dentry, nd.path.mnt);
69915+
69916 out_dput:
69917 dput(dentry);
69918 out_unlock:
69919@@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
69920 char * name;
69921 struct dentry *dentry;
69922 struct nameidata nd;
69923+ ino_t saved_ino = 0;
69924+ dev_t saved_dev = 0;
69925
69926 error = user_path_parent(dfd, pathname, &nd, &name);
69927 if (error)
69928@@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
69929 error = PTR_ERR(dentry);
69930 if (IS_ERR(dentry))
69931 goto exit2;
69932+
69933+ if (dentry->d_inode != NULL) {
69934+ saved_ino = dentry->d_inode->i_ino;
69935+ saved_dev = gr_get_dev_from_dentry(dentry);
69936+
69937+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
69938+ error = -EACCES;
69939+ goto exit3;
69940+ }
69941+ }
69942+
69943 error = mnt_want_write(nd.path.mnt);
69944 if (error)
69945 goto exit3;
69946@@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
69947 if (error)
69948 goto exit4;
69949 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
69950+ if (!error && (saved_dev || saved_ino))
69951+ gr_handle_delete(saved_ino, saved_dev);
69952 exit4:
69953 mnt_drop_write(nd.path.mnt);
69954 exit3:
69955@@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
69956 struct dentry *dentry;
69957 struct nameidata nd;
69958 struct inode *inode = NULL;
69959+ ino_t saved_ino = 0;
69960+ dev_t saved_dev = 0;
69961
69962 error = user_path_parent(dfd, pathname, &nd, &name);
69963 if (error)
69964@@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
69965 if (nd.last.name[nd.last.len])
69966 goto slashes;
69967 inode = dentry->d_inode;
69968- if (inode)
69969+ if (inode) {
69970+ if (inode->i_nlink <= 1) {
69971+ saved_ino = inode->i_ino;
69972+ saved_dev = gr_get_dev_from_dentry(dentry);
69973+ }
69974+
69975 atomic_inc(&inode->i_count);
69976+
69977+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
69978+ error = -EACCES;
69979+ goto exit2;
69980+ }
69981+ }
69982 error = mnt_want_write(nd.path.mnt);
69983 if (error)
69984 goto exit2;
69985@@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
69986 if (error)
69987 goto exit3;
69988 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
69989+ if (!error && (saved_ino || saved_dev))
69990+ gr_handle_delete(saved_ino, saved_dev);
69991 exit3:
69992 mnt_drop_write(nd.path.mnt);
69993 exit2:
69994@@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
69995 if (IS_ERR(dentry))
69996 goto out_unlock;
69997
69998+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
69999+ error = -EACCES;
70000+ goto out_dput;
70001+ }
70002+
70003 error = mnt_want_write(nd.path.mnt);
70004 if (error)
70005 goto out_dput;
70006@@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
70007 if (error)
70008 goto out_drop_write;
70009 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
70010+ if (!error)
70011+ gr_handle_create(dentry, nd.path.mnt);
70012 out_drop_write:
70013 mnt_drop_write(nd.path.mnt);
70014 out_dput:
70015@@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
70016 error = PTR_ERR(new_dentry);
70017 if (IS_ERR(new_dentry))
70018 goto out_unlock;
70019+
70020+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
70021+ old_path.dentry->d_inode,
70022+ old_path.dentry->d_inode->i_mode, to)) {
70023+ error = -EACCES;
70024+ goto out_dput;
70025+ }
70026+
70027+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
70028+ old_path.dentry, old_path.mnt, to)) {
70029+ error = -EACCES;
70030+ goto out_dput;
70031+ }
70032+
70033 error = mnt_want_write(nd.path.mnt);
70034 if (error)
70035 goto out_dput;
70036@@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
70037 if (error)
70038 goto out_drop_write;
70039 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
70040+ if (!error)
70041+ gr_handle_create(new_dentry, nd.path.mnt);
70042 out_drop_write:
70043 mnt_drop_write(nd.path.mnt);
70044 out_dput:
70045@@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70046 char *to;
70047 int error;
70048
70049+ pax_track_stack();
70050+
70051 error = user_path_parent(olddfd, oldname, &oldnd, &from);
70052 if (error)
70053 goto exit;
70054@@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70055 if (new_dentry == trap)
70056 goto exit5;
70057
70058+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
70059+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
70060+ to);
70061+ if (error)
70062+ goto exit5;
70063+
70064 error = mnt_want_write(oldnd.path.mnt);
70065 if (error)
70066 goto exit5;
70067@@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
70068 goto exit6;
70069 error = vfs_rename(old_dir->d_inode, old_dentry,
70070 new_dir->d_inode, new_dentry);
70071+ if (!error)
70072+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
70073+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
70074 exit6:
70075 mnt_drop_write(oldnd.path.mnt);
70076 exit5:
70077@@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
70078
70079 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
70080 {
70081+ char tmpbuf[64];
70082+ const char *newlink;
70083 int len;
70084
70085 len = PTR_ERR(link);
70086@@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
70087 len = strlen(link);
70088 if (len > (unsigned) buflen)
70089 len = buflen;
70090- if (copy_to_user(buffer, link, len))
70091+
70092+ if (len < sizeof(tmpbuf)) {
70093+ memcpy(tmpbuf, link, len);
70094+ newlink = tmpbuf;
70095+ } else
70096+ newlink = link;
70097+
70098+ if (copy_to_user(buffer, newlink, len))
70099 len = -EFAULT;
70100 out:
70101 return len;
70102diff --git a/fs/namespace.c b/fs/namespace.c
70103index 2beb0fb..11a95a5 100644
70104--- a/fs/namespace.c
70105+++ b/fs/namespace.c
70106@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
70107 if (!(sb->s_flags & MS_RDONLY))
70108 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
70109 up_write(&sb->s_umount);
70110+
70111+ gr_log_remount(mnt->mnt_devname, retval);
70112+
70113 return retval;
70114 }
70115
70116@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
70117 security_sb_umount_busy(mnt);
70118 up_write(&namespace_sem);
70119 release_mounts(&umount_list);
70120+
70121+ gr_log_unmount(mnt->mnt_devname, retval);
70122+
70123 return retval;
70124 }
70125
70126@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
70127 if (retval)
70128 goto dput_out;
70129
70130+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
70131+ retval = -EPERM;
70132+ goto dput_out;
70133+ }
70134+
70135+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
70136+ retval = -EPERM;
70137+ goto dput_out;
70138+ }
70139+
70140 if (flags & MS_REMOUNT)
70141 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
70142 data_page);
70143@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
70144 dev_name, data_page);
70145 dput_out:
70146 path_put(&path);
70147+
70148+ gr_log_mount(dev_name, dir_name, retval);
70149+
70150 return retval;
70151 }
70152
70153@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
70154 goto out1;
70155 }
70156
70157+ if (gr_handle_chroot_pivot()) {
70158+ error = -EPERM;
70159+ path_put(&old);
70160+ goto out1;
70161+ }
70162+
70163 read_lock(&current->fs->lock);
70164 root = current->fs->root;
70165 path_get(&current->fs->root);
70166diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
70167index b8b5b30..2bd9ccb 100644
70168--- a/fs/ncpfs/dir.c
70169+++ b/fs/ncpfs/dir.c
70170@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
70171 int res, val = 0, len;
70172 __u8 __name[NCP_MAXPATHLEN + 1];
70173
70174+ pax_track_stack();
70175+
70176 parent = dget_parent(dentry);
70177 dir = parent->d_inode;
70178
70179@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
70180 int error, res, len;
70181 __u8 __name[NCP_MAXPATHLEN + 1];
70182
70183+ pax_track_stack();
70184+
70185 lock_kernel();
70186 error = -EIO;
70187 if (!ncp_conn_valid(server))
70188@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
70189 int error, result, len;
70190 int opmode;
70191 __u8 __name[NCP_MAXPATHLEN + 1];
70192-
70193+
70194 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
70195 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
70196
70197+ pax_track_stack();
70198+
70199 error = -EIO;
70200 lock_kernel();
70201 if (!ncp_conn_valid(server))
70202@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
70203 int error, len;
70204 __u8 __name[NCP_MAXPATHLEN + 1];
70205
70206+ pax_track_stack();
70207+
70208 DPRINTK("ncp_mkdir: making %s/%s\n",
70209 dentry->d_parent->d_name.name, dentry->d_name.name);
70210
70211@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
70212 if (!ncp_conn_valid(server))
70213 goto out;
70214
70215+ pax_track_stack();
70216+
70217 ncp_age_dentry(server, dentry);
70218 len = sizeof(__name);
70219 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
70220@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
70221 int old_len, new_len;
70222 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
70223
70224+ pax_track_stack();
70225+
70226 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
70227 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
70228 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
70229diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
70230index cf98da1..da890a9 100644
70231--- a/fs/ncpfs/inode.c
70232+++ b/fs/ncpfs/inode.c
70233@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
70234 #endif
70235 struct ncp_entry_info finfo;
70236
70237+ pax_track_stack();
70238+
70239 data.wdog_pid = NULL;
70240 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
70241 if (!server)
70242diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
70243index bfaef7b..e9d03ca 100644
70244--- a/fs/nfs/inode.c
70245+++ b/fs/nfs/inode.c
70246@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
70247 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
70248 nfsi->attrtimeo_timestamp = jiffies;
70249
70250- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
70251+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
70252 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
70253 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
70254 else
70255@@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
70256 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
70257 }
70258
70259-static atomic_long_t nfs_attr_generation_counter;
70260+static atomic_long_unchecked_t nfs_attr_generation_counter;
70261
70262 static unsigned long nfs_read_attr_generation_counter(void)
70263 {
70264- return atomic_long_read(&nfs_attr_generation_counter);
70265+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
70266 }
70267
70268 unsigned long nfs_inc_attr_generation_counter(void)
70269 {
70270- return atomic_long_inc_return(&nfs_attr_generation_counter);
70271+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
70272 }
70273
70274 void nfs_fattr_init(struct nfs_fattr *fattr)
70275diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
70276index cc2f505..f6a236f 100644
70277--- a/fs/nfsd/lockd.c
70278+++ b/fs/nfsd/lockd.c
70279@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
70280 fput(filp);
70281 }
70282
70283-static struct nlmsvc_binding nfsd_nlm_ops = {
70284+static const struct nlmsvc_binding nfsd_nlm_ops = {
70285 .fopen = nlm_fopen, /* open file for locking */
70286 .fclose = nlm_fclose, /* close file */
70287 };
70288diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
70289index cfc3391..dcc083a 100644
70290--- a/fs/nfsd/nfs4state.c
70291+++ b/fs/nfsd/nfs4state.c
70292@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
70293 unsigned int cmd;
70294 int err;
70295
70296+ pax_track_stack();
70297+
70298 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
70299 (long long) lock->lk_offset,
70300 (long long) lock->lk_length);
70301diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
70302index 4a82a96..0d5fb49 100644
70303--- a/fs/nfsd/nfs4xdr.c
70304+++ b/fs/nfsd/nfs4xdr.c
70305@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
70306 struct nfsd4_compoundres *resp = rqstp->rq_resp;
70307 u32 minorversion = resp->cstate.minorversion;
70308
70309+ pax_track_stack();
70310+
70311 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
70312 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
70313 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
70314diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
70315index 2e09588..596421d 100644
70316--- a/fs/nfsd/vfs.c
70317+++ b/fs/nfsd/vfs.c
70318@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
70319 } else {
70320 oldfs = get_fs();
70321 set_fs(KERNEL_DS);
70322- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
70323+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
70324 set_fs(oldfs);
70325 }
70326
70327@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
70328
70329 /* Write the data. */
70330 oldfs = get_fs(); set_fs(KERNEL_DS);
70331- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
70332+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
70333 set_fs(oldfs);
70334 if (host_err < 0)
70335 goto out_nfserr;
70336@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
70337 */
70338
70339 oldfs = get_fs(); set_fs(KERNEL_DS);
70340- host_err = inode->i_op->readlink(dentry, buf, *lenp);
70341+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
70342 set_fs(oldfs);
70343
70344 if (host_err < 0)
70345diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
70346index f6af760..d0adf34 100644
70347--- a/fs/nilfs2/ioctl.c
70348+++ b/fs/nilfs2/ioctl.c
70349@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
70350 unsigned int cmd, void __user *argp)
70351 {
70352 struct nilfs_argv argv[5];
70353- const static size_t argsz[5] = {
70354+ static const size_t argsz[5] = {
70355 sizeof(struct nilfs_vdesc),
70356 sizeof(struct nilfs_period),
70357 sizeof(__u64),
70358@@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
70359 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
70360 goto out_free;
70361
70362+ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
70363+ goto out_free;
70364+
70365 len = argv[n].v_size * argv[n].v_nmembs;
70366 base = (void __user *)(unsigned long)argv[n].v_base;
70367 if (len == 0) {
70368diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
70369index 7e54e52..9337248 100644
70370--- a/fs/notify/dnotify/dnotify.c
70371+++ b/fs/notify/dnotify/dnotify.c
70372@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
70373 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
70374 }
70375
70376-static struct fsnotify_ops dnotify_fsnotify_ops = {
70377+static const struct fsnotify_ops dnotify_fsnotify_ops = {
70378 .handle_event = dnotify_handle_event,
70379 .should_send_event = dnotify_should_send_event,
70380 .free_group_priv = NULL,
70381diff --git a/fs/notify/notification.c b/fs/notify/notification.c
70382index b8bf53b..c518688 100644
70383--- a/fs/notify/notification.c
70384+++ b/fs/notify/notification.c
70385@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
70386 * get set to 0 so it will never get 'freed'
70387 */
70388 static struct fsnotify_event q_overflow_event;
70389-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
70390+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
70391
70392 /**
70393 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
70394@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
70395 */
70396 u32 fsnotify_get_cookie(void)
70397 {
70398- return atomic_inc_return(&fsnotify_sync_cookie);
70399+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
70400 }
70401 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
70402
70403diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
70404index 5a9e344..0f8cd28 100644
70405--- a/fs/ntfs/dir.c
70406+++ b/fs/ntfs/dir.c
70407@@ -1328,7 +1328,7 @@ find_next_index_buffer:
70408 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
70409 ~(s64)(ndir->itype.index.block_size - 1)));
70410 /* Bounds checks. */
70411- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
70412+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
70413 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
70414 "inode 0x%lx or driver bug.", vdir->i_ino);
70415 goto err_out;
70416diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
70417index 663c0e3..b6868e9 100644
70418--- a/fs/ntfs/file.c
70419+++ b/fs/ntfs/file.c
70420@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
70421 #endif /* NTFS_RW */
70422 };
70423
70424-const struct file_operations ntfs_empty_file_ops = {};
70425+const struct file_operations ntfs_empty_file_ops __read_only;
70426
70427-const struct inode_operations ntfs_empty_inode_ops = {};
70428+const struct inode_operations ntfs_empty_inode_ops __read_only;
70429diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
70430index 1cd2934..880b5d2 100644
70431--- a/fs/ocfs2/cluster/masklog.c
70432+++ b/fs/ocfs2/cluster/masklog.c
70433@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
70434 return mlog_mask_store(mlog_attr->mask, buf, count);
70435 }
70436
70437-static struct sysfs_ops mlog_attr_ops = {
70438+static const struct sysfs_ops mlog_attr_ops = {
70439 .show = mlog_show,
70440 .store = mlog_store,
70441 };
70442diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
70443index ac10f83..2cd2607 100644
70444--- a/fs/ocfs2/localalloc.c
70445+++ b/fs/ocfs2/localalloc.c
70446@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
70447 goto bail;
70448 }
70449
70450- atomic_inc(&osb->alloc_stats.moves);
70451+ atomic_inc_unchecked(&osb->alloc_stats.moves);
70452
70453 status = 0;
70454 bail:
70455diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
70456index f010b22..9f9ed34 100644
70457--- a/fs/ocfs2/namei.c
70458+++ b/fs/ocfs2/namei.c
70459@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
70460 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
70461 struct ocfs2_dir_lookup_result target_insert = { NULL, };
70462
70463+ pax_track_stack();
70464+
70465 /* At some point it might be nice to break this function up a
70466 * bit. */
70467
70468diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
70469index d963d86..914cfbd 100644
70470--- a/fs/ocfs2/ocfs2.h
70471+++ b/fs/ocfs2/ocfs2.h
70472@@ -217,11 +217,11 @@ enum ocfs2_vol_state
70473
70474 struct ocfs2_alloc_stats
70475 {
70476- atomic_t moves;
70477- atomic_t local_data;
70478- atomic_t bitmap_data;
70479- atomic_t bg_allocs;
70480- atomic_t bg_extends;
70481+ atomic_unchecked_t moves;
70482+ atomic_unchecked_t local_data;
70483+ atomic_unchecked_t bitmap_data;
70484+ atomic_unchecked_t bg_allocs;
70485+ atomic_unchecked_t bg_extends;
70486 };
70487
70488 enum ocfs2_local_alloc_state
70489diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
70490index 79b5dac..d322952 100644
70491--- a/fs/ocfs2/suballoc.c
70492+++ b/fs/ocfs2/suballoc.c
70493@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
70494 mlog_errno(status);
70495 goto bail;
70496 }
70497- atomic_inc(&osb->alloc_stats.bg_extends);
70498+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
70499
70500 /* You should never ask for this much metadata */
70501 BUG_ON(bits_wanted >
70502@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
70503 mlog_errno(status);
70504 goto bail;
70505 }
70506- atomic_inc(&osb->alloc_stats.bg_allocs);
70507+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
70508
70509 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
70510 ac->ac_bits_given += (*num_bits);
70511@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
70512 mlog_errno(status);
70513 goto bail;
70514 }
70515- atomic_inc(&osb->alloc_stats.bg_allocs);
70516+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
70517
70518 BUG_ON(num_bits != 1);
70519
70520@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
70521 cluster_start,
70522 num_clusters);
70523 if (!status)
70524- atomic_inc(&osb->alloc_stats.local_data);
70525+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
70526 } else {
70527 if (min_clusters > (osb->bitmap_cpg - 1)) {
70528 /* The only paths asking for contiguousness
70529@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
70530 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
70531 bg_blkno,
70532 bg_bit_off);
70533- atomic_inc(&osb->alloc_stats.bitmap_data);
70534+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
70535 }
70536 }
70537 if (status < 0) {
70538diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
70539index 9f55be4..a3f8048 100644
70540--- a/fs/ocfs2/super.c
70541+++ b/fs/ocfs2/super.c
70542@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
70543 "%10s => GlobalAllocs: %d LocalAllocs: %d "
70544 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
70545 "Stats",
70546- atomic_read(&osb->alloc_stats.bitmap_data),
70547- atomic_read(&osb->alloc_stats.local_data),
70548- atomic_read(&osb->alloc_stats.bg_allocs),
70549- atomic_read(&osb->alloc_stats.moves),
70550- atomic_read(&osb->alloc_stats.bg_extends));
70551+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
70552+ atomic_read_unchecked(&osb->alloc_stats.local_data),
70553+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
70554+ atomic_read_unchecked(&osb->alloc_stats.moves),
70555+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
70556
70557 out += snprintf(buf + out, len - out,
70558 "%10s => State: %u Descriptor: %llu Size: %u bits "
70559@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
70560 spin_lock_init(&osb->osb_xattr_lock);
70561 ocfs2_init_inode_steal_slot(osb);
70562
70563- atomic_set(&osb->alloc_stats.moves, 0);
70564- atomic_set(&osb->alloc_stats.local_data, 0);
70565- atomic_set(&osb->alloc_stats.bitmap_data, 0);
70566- atomic_set(&osb->alloc_stats.bg_allocs, 0);
70567- atomic_set(&osb->alloc_stats.bg_extends, 0);
70568+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
70569+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
70570+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
70571+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
70572+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
70573
70574 /* Copy the blockcheck stats from the superblock probe */
70575 osb->osb_ecc_stats = *stats;
70576diff --git a/fs/open.c b/fs/open.c
70577index 4f01e06..2a8057a 100644
70578--- a/fs/open.c
70579+++ b/fs/open.c
70580@@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
70581 error = locks_verify_truncate(inode, NULL, length);
70582 if (!error)
70583 error = security_path_truncate(&path, length, 0);
70584+
70585+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
70586+ error = -EACCES;
70587+
70588 if (!error) {
70589 vfs_dq_init(inode);
70590 error = do_truncate(path.dentry, length, 0, NULL);
70591@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
70592 if (__mnt_is_readonly(path.mnt))
70593 res = -EROFS;
70594
70595+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
70596+ res = -EACCES;
70597+
70598 out_path_release:
70599 path_put(&path);
70600 out:
70601@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
70602 if (error)
70603 goto dput_and_out;
70604
70605+ gr_log_chdir(path.dentry, path.mnt);
70606+
70607 set_fs_pwd(current->fs, &path);
70608
70609 dput_and_out:
70610@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
70611 goto out_putf;
70612
70613 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
70614+
70615+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
70616+ error = -EPERM;
70617+
70618+ if (!error)
70619+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
70620+
70621 if (!error)
70622 set_fs_pwd(current->fs, &file->f_path);
70623 out_putf:
70624@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
70625 if (!capable(CAP_SYS_CHROOT))
70626 goto dput_and_out;
70627
70628+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
70629+ goto dput_and_out;
70630+
70631 set_fs_root(current->fs, &path);
70632+
70633+ gr_handle_chroot_chdir(&path);
70634+
70635 error = 0;
70636 dput_and_out:
70637 path_put(&path);
70638@@ -596,66 +618,57 @@ out:
70639 return error;
70640 }
70641
70642+static int chmod_common(struct path *path, umode_t mode)
70643+{
70644+ struct inode *inode = path->dentry->d_inode;
70645+ struct iattr newattrs;
70646+ int error;
70647+
70648+ error = mnt_want_write(path->mnt);
70649+ if (error)
70650+ return error;
70651+ mutex_lock(&inode->i_mutex);
70652+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
70653+ error = -EACCES;
70654+ goto out_unlock;
70655+ }
70656+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
70657+ error = -EPERM;
70658+ goto out_unlock;
70659+ }
70660+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
70661+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
70662+ error = notify_change(path->dentry, &newattrs);
70663+out_unlock:
70664+ mutex_unlock(&inode->i_mutex);
70665+ mnt_drop_write(path->mnt);
70666+ return error;
70667+}
70668+
70669 SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
70670 {
70671- struct inode * inode;
70672- struct dentry * dentry;
70673 struct file * file;
70674 int err = -EBADF;
70675- struct iattr newattrs;
70676
70677 file = fget(fd);
70678- if (!file)
70679- goto out;
70680-
70681- dentry = file->f_path.dentry;
70682- inode = dentry->d_inode;
70683-
70684- audit_inode(NULL, dentry);
70685-
70686- err = mnt_want_write_file(file);
70687- if (err)
70688- goto out_putf;
70689- mutex_lock(&inode->i_mutex);
70690- if (mode == (mode_t) -1)
70691- mode = inode->i_mode;
70692- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
70693- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
70694- err = notify_change(dentry, &newattrs);
70695- mutex_unlock(&inode->i_mutex);
70696- mnt_drop_write(file->f_path.mnt);
70697-out_putf:
70698- fput(file);
70699-out:
70700+ if (file) {
70701+ audit_inode(NULL, file->f_path.dentry);
70702+ err = chmod_common(&file->f_path, mode);
70703+ fput(file);
70704+ }
70705 return err;
70706 }
70707
70708 SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
70709 {
70710 struct path path;
70711- struct inode *inode;
70712 int error;
70713- struct iattr newattrs;
70714
70715 error = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path);
70716- if (error)
70717- goto out;
70718- inode = path.dentry->d_inode;
70719-
70720- error = mnt_want_write(path.mnt);
70721- if (error)
70722- goto dput_and_out;
70723- mutex_lock(&inode->i_mutex);
70724- if (mode == (mode_t) -1)
70725- mode = inode->i_mode;
70726- newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
70727- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
70728- error = notify_change(path.dentry, &newattrs);
70729- mutex_unlock(&inode->i_mutex);
70730- mnt_drop_write(path.mnt);
70731-dput_and_out:
70732- path_put(&path);
70733-out:
70734+ if (!error) {
70735+ error = chmod_common(&path, mode);
70736+ path_put(&path);
70737+ }
70738 return error;
70739 }
70740
70741@@ -664,12 +677,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
70742 return sys_fchmodat(AT_FDCWD, filename, mode);
70743 }
70744
70745-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
70746+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
70747 {
70748 struct inode *inode = dentry->d_inode;
70749 int error;
70750 struct iattr newattrs;
70751
70752+ if (!gr_acl_handle_chown(dentry, mnt))
70753+ return -EACCES;
70754+
70755 newattrs.ia_valid = ATTR_CTIME;
70756 if (user != (uid_t) -1) {
70757 newattrs.ia_valid |= ATTR_UID;
70758@@ -700,7 +716,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
70759 error = mnt_want_write(path.mnt);
70760 if (error)
70761 goto out_release;
70762- error = chown_common(path.dentry, user, group);
70763+ error = chown_common(path.dentry, user, group, path.mnt);
70764 mnt_drop_write(path.mnt);
70765 out_release:
70766 path_put(&path);
70767@@ -725,7 +741,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
70768 error = mnt_want_write(path.mnt);
70769 if (error)
70770 goto out_release;
70771- error = chown_common(path.dentry, user, group);
70772+ error = chown_common(path.dentry, user, group, path.mnt);
70773 mnt_drop_write(path.mnt);
70774 out_release:
70775 path_put(&path);
70776@@ -744,7 +760,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
70777 error = mnt_want_write(path.mnt);
70778 if (error)
70779 goto out_release;
70780- error = chown_common(path.dentry, user, group);
70781+ error = chown_common(path.dentry, user, group, path.mnt);
70782 mnt_drop_write(path.mnt);
70783 out_release:
70784 path_put(&path);
70785@@ -767,7 +783,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
70786 goto out_fput;
70787 dentry = file->f_path.dentry;
70788 audit_inode(NULL, dentry);
70789- error = chown_common(dentry, user, group);
70790+ error = chown_common(dentry, user, group, file->f_path.mnt);
70791 mnt_drop_write(file->f_path.mnt);
70792 out_fput:
70793 fput(file);
70794@@ -1036,7 +1052,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
70795 if (!IS_ERR(tmp)) {
70796 fd = get_unused_fd_flags(flags);
70797 if (fd >= 0) {
70798- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
70799+ struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
70800 if (IS_ERR(f)) {
70801 put_unused_fd(fd);
70802 fd = PTR_ERR(f);
70803diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
70804index 6ab70f4..f4103d1 100644
70805--- a/fs/partitions/efi.c
70806+++ b/fs/partitions/efi.c
70807@@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
70808 if (!bdev || !gpt)
70809 return NULL;
70810
70811+ if (!le32_to_cpu(gpt->num_partition_entries))
70812+ return NULL;
70813+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
70814+ if (!pte)
70815+ return NULL;
70816+
70817 count = le32_to_cpu(gpt->num_partition_entries) *
70818 le32_to_cpu(gpt->sizeof_partition_entry);
70819- if (!count)
70820- return NULL;
70821- pte = kzalloc(count, GFP_KERNEL);
70822- if (!pte)
70823- return NULL;
70824-
70825 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
70826 (u8 *) pte,
70827 count) < count) {
70828diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
70829index dd6efdb..3babc6c 100644
70830--- a/fs/partitions/ldm.c
70831+++ b/fs/partitions/ldm.c
70832@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
70833 ldm_error ("A VBLK claims to have %d parts.", num);
70834 return false;
70835 }
70836+
70837 if (rec >= num) {
70838 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
70839 return false;
70840@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
70841 goto found;
70842 }
70843
70844- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
70845+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
70846 if (!f) {
70847 ldm_crit ("Out of memory.");
70848 return false;
70849diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
70850index 5765198..7f8e9e0 100644
70851--- a/fs/partitions/mac.c
70852+++ b/fs/partitions/mac.c
70853@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
70854 return 0; /* not a MacOS disk */
70855 }
70856 blocks_in_map = be32_to_cpu(part->map_count);
70857- if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
70858- put_dev_sector(sect);
70859- return 0;
70860- }
70861 printk(" [mac]");
70862+ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
70863+ put_dev_sector(sect);
70864+ return 0;
70865+ }
70866 for (slot = 1; slot <= blocks_in_map; ++slot) {
70867 int pos = slot * secsize;
70868 put_dev_sector(sect);
70869diff --git a/fs/pipe.c b/fs/pipe.c
70870index d0cc080..8a6f211 100644
70871--- a/fs/pipe.c
70872+++ b/fs/pipe.c
70873@@ -401,9 +401,9 @@ redo:
70874 }
70875 if (bufs) /* More to do? */
70876 continue;
70877- if (!pipe->writers)
70878+ if (!atomic_read(&pipe->writers))
70879 break;
70880- if (!pipe->waiting_writers) {
70881+ if (!atomic_read(&pipe->waiting_writers)) {
70882 /* syscall merging: Usually we must not sleep
70883 * if O_NONBLOCK is set, or if we got some data.
70884 * But if a writer sleeps in kernel space, then
70885@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
70886 mutex_lock(&inode->i_mutex);
70887 pipe = inode->i_pipe;
70888
70889- if (!pipe->readers) {
70890+ if (!atomic_read(&pipe->readers)) {
70891 send_sig(SIGPIPE, current, 0);
70892 ret = -EPIPE;
70893 goto out;
70894@@ -511,7 +511,7 @@ redo1:
70895 for (;;) {
70896 int bufs;
70897
70898- if (!pipe->readers) {
70899+ if (!atomic_read(&pipe->readers)) {
70900 send_sig(SIGPIPE, current, 0);
70901 if (!ret)
70902 ret = -EPIPE;
70903@@ -597,9 +597,9 @@ redo2:
70904 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
70905 do_wakeup = 0;
70906 }
70907- pipe->waiting_writers++;
70908+ atomic_inc(&pipe->waiting_writers);
70909 pipe_wait(pipe);
70910- pipe->waiting_writers--;
70911+ atomic_dec(&pipe->waiting_writers);
70912 }
70913 out:
70914 mutex_unlock(&inode->i_mutex);
70915@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
70916 mask = 0;
70917 if (filp->f_mode & FMODE_READ) {
70918 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
70919- if (!pipe->writers && filp->f_version != pipe->w_counter)
70920+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
70921 mask |= POLLHUP;
70922 }
70923
70924@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
70925 * Most Unices do not set POLLERR for FIFOs but on Linux they
70926 * behave exactly like pipes for poll().
70927 */
70928- if (!pipe->readers)
70929+ if (!atomic_read(&pipe->readers))
70930 mask |= POLLERR;
70931 }
70932
70933@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
70934
70935 mutex_lock(&inode->i_mutex);
70936 pipe = inode->i_pipe;
70937- pipe->readers -= decr;
70938- pipe->writers -= decw;
70939+ atomic_sub(decr, &pipe->readers);
70940+ atomic_sub(decw, &pipe->writers);
70941
70942- if (!pipe->readers && !pipe->writers) {
70943+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
70944 free_pipe_info(inode);
70945 } else {
70946 wake_up_interruptible_sync(&pipe->wait);
70947@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
70948
70949 if (inode->i_pipe) {
70950 ret = 0;
70951- inode->i_pipe->readers++;
70952+ atomic_inc(&inode->i_pipe->readers);
70953 }
70954
70955 mutex_unlock(&inode->i_mutex);
70956@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
70957
70958 if (inode->i_pipe) {
70959 ret = 0;
70960- inode->i_pipe->writers++;
70961+ atomic_inc(&inode->i_pipe->writers);
70962 }
70963
70964 mutex_unlock(&inode->i_mutex);
70965@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
70966 if (inode->i_pipe) {
70967 ret = 0;
70968 if (filp->f_mode & FMODE_READ)
70969- inode->i_pipe->readers++;
70970+ atomic_inc(&inode->i_pipe->readers);
70971 if (filp->f_mode & FMODE_WRITE)
70972- inode->i_pipe->writers++;
70973+ atomic_inc(&inode->i_pipe->writers);
70974 }
70975
70976 mutex_unlock(&inode->i_mutex);
70977@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
70978 inode->i_pipe = NULL;
70979 }
70980
70981-static struct vfsmount *pipe_mnt __read_mostly;
70982+struct vfsmount *pipe_mnt __read_mostly;
70983 static int pipefs_delete_dentry(struct dentry *dentry)
70984 {
70985 /*
70986@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
70987 goto fail_iput;
70988 inode->i_pipe = pipe;
70989
70990- pipe->readers = pipe->writers = 1;
70991+ atomic_set(&pipe->readers, 1);
70992+ atomic_set(&pipe->writers, 1);
70993 inode->i_fop = &rdwr_pipefifo_fops;
70994
70995 /*
70996diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
70997index 50f8f06..c5755df 100644
70998--- a/fs/proc/Kconfig
70999+++ b/fs/proc/Kconfig
71000@@ -30,12 +30,12 @@ config PROC_FS
71001
71002 config PROC_KCORE
71003 bool "/proc/kcore support" if !ARM
71004- depends on PROC_FS && MMU
71005+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
71006
71007 config PROC_VMCORE
71008 bool "/proc/vmcore support (EXPERIMENTAL)"
71009- depends on PROC_FS && CRASH_DUMP
71010- default y
71011+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
71012+ default n
71013 help
71014 Exports the dump image of crashed kernel in ELF format.
71015
71016@@ -59,8 +59,8 @@ config PROC_SYSCTL
71017 limited in memory.
71018
71019 config PROC_PAGE_MONITOR
71020- default y
71021- depends on PROC_FS && MMU
71022+ default n
71023+ depends on PROC_FS && MMU && !GRKERNSEC
71024 bool "Enable /proc page monitoring" if EMBEDDED
71025 help
71026 Various /proc files exist to monitor process memory utilization:
71027diff --git a/fs/proc/array.c b/fs/proc/array.c
71028index c5ef152..28c94f7 100644
71029--- a/fs/proc/array.c
71030+++ b/fs/proc/array.c
71031@@ -60,6 +60,7 @@
71032 #include <linux/tty.h>
71033 #include <linux/string.h>
71034 #include <linux/mman.h>
71035+#include <linux/grsecurity.h>
71036 #include <linux/proc_fs.h>
71037 #include <linux/ioport.h>
71038 #include <linux/uaccess.h>
71039@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
71040 p->nivcsw);
71041 }
71042
71043+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71044+static inline void task_pax(struct seq_file *m, struct task_struct *p)
71045+{
71046+ if (p->mm)
71047+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
71048+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
71049+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
71050+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
71051+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
71052+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
71053+ else
71054+ seq_printf(m, "PaX:\t-----\n");
71055+}
71056+#endif
71057+
71058 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
71059 struct pid *pid, struct task_struct *task)
71060 {
71061@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
71062 task_cap(m, task);
71063 cpuset_task_status_allowed(m, task);
71064 task_context_switch_counts(m, task);
71065+
71066+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71067+ task_pax(m, task);
71068+#endif
71069+
71070+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
71071+ task_grsec_rbac(m, task);
71072+#endif
71073+
71074 return 0;
71075 }
71076
71077+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71078+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
71079+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
71080+ _mm->pax_flags & MF_PAX_SEGMEXEC))
71081+#endif
71082+
71083 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71084 struct pid *pid, struct task_struct *task, int whole)
71085 {
71086@@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71087 cputime_t cutime, cstime, utime, stime;
71088 cputime_t cgtime, gtime;
71089 unsigned long rsslim = 0;
71090- char tcomm[sizeof(task->comm)];
71091+ char tcomm[sizeof(task->comm)] = { 0 };
71092 unsigned long flags;
71093
71094+ pax_track_stack();
71095+
71096+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71097+ if (current->exec_id != m->exec_id) {
71098+ gr_log_badprocpid("stat");
71099+ return 0;
71100+ }
71101+#endif
71102+
71103 state = *get_task_state(task);
71104 vsize = eip = esp = 0;
71105 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
71106@@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71107 gtime = task_gtime(task);
71108 }
71109
71110+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71111+ if (PAX_RAND_FLAGS(mm)) {
71112+ eip = 0;
71113+ esp = 0;
71114+ wchan = 0;
71115+ }
71116+#endif
71117+#ifdef CONFIG_GRKERNSEC_HIDESYM
71118+ wchan = 0;
71119+ eip =0;
71120+ esp =0;
71121+#endif
71122+
71123 /* scale priority and nice values from timeslices to -20..20 */
71124 /* to make it look like a "normal" Unix priority/nice value */
71125 priority = task_prio(task);
71126@@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
71127 vsize,
71128 mm ? get_mm_rss(mm) : 0,
71129 rsslim,
71130+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71131+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
71132+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
71133+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
71134+#else
71135 mm ? (permitted ? mm->start_code : 1) : 0,
71136 mm ? (permitted ? mm->end_code : 1) : 0,
71137 (permitted && mm) ? mm->start_stack : 0,
71138+#endif
71139 esp,
71140 eip,
71141 /* The signal information here is obsolete.
71142@@ -517,8 +576,16 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
71143 struct pid *pid, struct task_struct *task)
71144 {
71145 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
71146- struct mm_struct *mm = get_task_mm(task);
71147+ struct mm_struct *mm;
71148
71149+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71150+ if (current->exec_id != m->exec_id) {
71151+ gr_log_badprocpid("statm");
71152+ return 0;
71153+ }
71154+#endif
71155+
71156+ mm = get_task_mm(task);
71157 if (mm) {
71158 size = task_statm(mm, &shared, &text, &data, &resident);
71159 mmput(mm);
71160@@ -528,3 +595,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
71161
71162 return 0;
71163 }
71164+
71165+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
71166+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
71167+{
71168+ u32 curr_ip = 0;
71169+ unsigned long flags;
71170+
71171+ if (lock_task_sighand(task, &flags)) {
71172+ curr_ip = task->signal->curr_ip;
71173+ unlock_task_sighand(task, &flags);
71174+ }
71175+
71176+ return sprintf(buffer, "%pI4\n", &curr_ip);
71177+}
71178+#endif
71179diff --git a/fs/proc/base.c b/fs/proc/base.c
71180index 67f7dc0..a86ad9a 100644
71181--- a/fs/proc/base.c
71182+++ b/fs/proc/base.c
71183@@ -102,6 +102,22 @@ struct pid_entry {
71184 union proc_op op;
71185 };
71186
71187+struct getdents_callback {
71188+ struct linux_dirent __user * current_dir;
71189+ struct linux_dirent __user * previous;
71190+ struct file * file;
71191+ int count;
71192+ int error;
71193+};
71194+
71195+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
71196+ loff_t offset, u64 ino, unsigned int d_type)
71197+{
71198+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
71199+ buf->error = -EINVAL;
71200+ return 0;
71201+}
71202+
71203 #define NOD(NAME, MODE, IOP, FOP, OP) { \
71204 .name = (NAME), \
71205 .len = sizeof(NAME) - 1, \
71206@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
71207 if (task == current)
71208 return 0;
71209
71210+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
71211+ return -EPERM;
71212+
71213 /*
71214 * If current is actively ptrace'ing, and would also be
71215 * permitted to freshly attach with ptrace now, permit it.
71216@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
71217 if (!mm->arg_end)
71218 goto out_mm; /* Shh! No looking before we're done */
71219
71220+ if (gr_acl_handle_procpidmem(task))
71221+ goto out_mm;
71222+
71223 len = mm->arg_end - mm->arg_start;
71224
71225 if (len > PAGE_SIZE)
71226@@ -287,12 +309,28 @@ out:
71227 return res;
71228 }
71229
71230+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71231+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
71232+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
71233+ _mm->pax_flags & MF_PAX_SEGMEXEC))
71234+#endif
71235+
71236 static int proc_pid_auxv(struct task_struct *task, char *buffer)
71237 {
71238 int res = 0;
71239 struct mm_struct *mm = get_task_mm(task);
71240 if (mm) {
71241 unsigned int nwords = 0;
71242+
71243+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71244+ /* allow if we're currently ptracing this task */
71245+ if (PAX_RAND_FLAGS(mm) &&
71246+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
71247+ mmput(mm);
71248+ return 0;
71249+ }
71250+#endif
71251+
71252 do {
71253 nwords += 2;
71254 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
71255@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
71256 }
71257
71258
71259-#ifdef CONFIG_KALLSYMS
71260+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71261 /*
71262 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
71263 * Returns the resolved symbol. If that fails, simply return the address.
71264@@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
71265 mutex_unlock(&task->cred_guard_mutex);
71266 }
71267
71268-#ifdef CONFIG_STACKTRACE
71269+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71270
71271 #define MAX_STACK_TRACE_DEPTH 64
71272
71273@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
71274 return count;
71275 }
71276
71277-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
71278+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
71279 static int proc_pid_syscall(struct task_struct *task, char *buffer)
71280 {
71281 long nr;
71282@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
71283 /************************************************************************/
71284
71285 /* permission checks */
71286-static int proc_fd_access_allowed(struct inode *inode)
71287+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
71288 {
71289 struct task_struct *task;
71290 int allowed = 0;
71291@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
71292 */
71293 task = get_proc_task(inode);
71294 if (task) {
71295- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
71296+ if (log)
71297+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
71298+ else
71299+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
71300 put_task_struct(task);
71301 }
71302 return allowed;
71303@@ -806,9 +847,16 @@ static const struct file_operations proc_single_file_operations = {
71304 static int mem_open(struct inode* inode, struct file* file)
71305 {
71306 file->private_data = (void*)((long)current->self_exec_id);
71307+
71308+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71309+ file->f_version = current->exec_id;
71310+#endif
71311+
71312 return 0;
71313 }
71314
71315+static int task_dumpable(struct task_struct *task);
71316+
71317 static ssize_t mem_read(struct file * file, char __user * buf,
71318 size_t count, loff_t *ppos)
71319 {
71320@@ -818,6 +866,13 @@ static ssize_t mem_read(struct file * file, char __user * buf,
71321 int ret = -ESRCH;
71322 struct mm_struct *mm;
71323
71324+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71325+ if (file->f_version != current->exec_id) {
71326+ gr_log_badprocpid("mem");
71327+ return 0;
71328+ }
71329+#endif
71330+
71331 if (!task)
71332 goto out_no_task;
71333
71334@@ -963,6 +1018,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
71335 if (!task)
71336 goto out_no_task;
71337
71338+ if (gr_acl_handle_procpidmem(task))
71339+ goto out;
71340+
71341 if (!ptrace_may_access(task, PTRACE_MODE_READ))
71342 goto out;
71343
71344@@ -1377,7 +1435,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
71345 path_put(&nd->path);
71346
71347 /* Are we allowed to snoop on the tasks file descriptors? */
71348- if (!proc_fd_access_allowed(inode))
71349+ if (!proc_fd_access_allowed(inode,0))
71350 goto out;
71351
71352 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
71353@@ -1417,8 +1475,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
71354 struct path path;
71355
71356 /* Are we allowed to snoop on the tasks file descriptors? */
71357- if (!proc_fd_access_allowed(inode))
71358- goto out;
71359+ /* logging this is needed for learning on chromium to work properly,
71360+ but we don't want to flood the logs from 'ps' which does a readlink
71361+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
71362+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
71363+ */
71364+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
71365+ if (!proc_fd_access_allowed(inode,0))
71366+ goto out;
71367+ } else {
71368+ if (!proc_fd_access_allowed(inode,1))
71369+ goto out;
71370+ }
71371
71372 error = PROC_I(inode)->op.proc_get_link(inode, &path);
71373 if (error)
71374@@ -1483,7 +1551,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
71375 rcu_read_lock();
71376 cred = __task_cred(task);
71377 inode->i_uid = cred->euid;
71378+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71379+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
71380+#else
71381 inode->i_gid = cred->egid;
71382+#endif
71383 rcu_read_unlock();
71384 }
71385 security_task_to_inode(task, inode);
71386@@ -1501,6 +1573,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
71387 struct inode *inode = dentry->d_inode;
71388 struct task_struct *task;
71389 const struct cred *cred;
71390+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71391+ const struct cred *tmpcred = current_cred();
71392+#endif
71393
71394 generic_fillattr(inode, stat);
71395
71396@@ -1508,13 +1583,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
71397 stat->uid = 0;
71398 stat->gid = 0;
71399 task = pid_task(proc_pid(inode), PIDTYPE_PID);
71400+
71401+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
71402+ rcu_read_unlock();
71403+ return -ENOENT;
71404+ }
71405+
71406 if (task) {
71407+ cred = __task_cred(task);
71408+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71409+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
71410+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71411+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
71412+#endif
71413+ ) {
71414+#endif
71415 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
71416+#ifdef CONFIG_GRKERNSEC_PROC_USER
71417+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
71418+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71419+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
71420+#endif
71421 task_dumpable(task)) {
71422- cred = __task_cred(task);
71423 stat->uid = cred->euid;
71424+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71425+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
71426+#else
71427 stat->gid = cred->egid;
71428+#endif
71429 }
71430+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71431+ } else {
71432+ rcu_read_unlock();
71433+ return -ENOENT;
71434+ }
71435+#endif
71436 }
71437 rcu_read_unlock();
71438 return 0;
71439@@ -1545,11 +1648,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
71440
71441 if (task) {
71442 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
71443+#ifdef CONFIG_GRKERNSEC_PROC_USER
71444+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
71445+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71446+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
71447+#endif
71448 task_dumpable(task)) {
71449 rcu_read_lock();
71450 cred = __task_cred(task);
71451 inode->i_uid = cred->euid;
71452+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71453+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
71454+#else
71455 inode->i_gid = cred->egid;
71456+#endif
71457 rcu_read_unlock();
71458 } else {
71459 inode->i_uid = 0;
71460@@ -1670,7 +1782,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
71461 int fd = proc_fd(inode);
71462
71463 if (task) {
71464- files = get_files_struct(task);
71465+ if (!gr_acl_handle_procpidmem(task))
71466+ files = get_files_struct(task);
71467 put_task_struct(task);
71468 }
71469 if (files) {
71470@@ -1922,12 +2035,22 @@ static const struct file_operations proc_fd_operations = {
71471 static int proc_fd_permission(struct inode *inode, int mask)
71472 {
71473 int rv;
71474+ struct task_struct *task;
71475
71476 rv = generic_permission(inode, mask, NULL);
71477- if (rv == 0)
71478- return 0;
71479+
71480 if (task_pid(current) == proc_pid(inode))
71481 rv = 0;
71482+
71483+ task = get_proc_task(inode);
71484+ if (task == NULL)
71485+ return rv;
71486+
71487+ if (gr_acl_handle_procpidmem(task))
71488+ rv = -EACCES;
71489+
71490+ put_task_struct(task);
71491+
71492 return rv;
71493 }
71494
71495@@ -2036,6 +2159,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
71496 if (!task)
71497 goto out_no_task;
71498
71499+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
71500+ goto out;
71501+
71502 /*
71503 * Yes, it does not scale. And it should not. Don't add
71504 * new entries into /proc/<tgid>/ without very good reasons.
71505@@ -2080,6 +2206,9 @@ static int proc_pident_readdir(struct file *filp,
71506 if (!task)
71507 goto out_no_task;
71508
71509+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
71510+ goto out;
71511+
71512 ret = 0;
71513 i = filp->f_pos;
71514 switch (i) {
71515@@ -2347,7 +2476,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
71516 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
71517 void *cookie)
71518 {
71519- char *s = nd_get_link(nd);
71520+ const char *s = nd_get_link(nd);
71521 if (!IS_ERR(s))
71522 __putname(s);
71523 }
71524@@ -2553,7 +2682,7 @@ static const struct pid_entry tgid_base_stuff[] = {
71525 #ifdef CONFIG_SCHED_DEBUG
71526 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
71527 #endif
71528-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
71529+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
71530 INF("syscall", S_IRUGO, proc_pid_syscall),
71531 #endif
71532 INF("cmdline", S_IRUGO, proc_pid_cmdline),
71533@@ -2578,10 +2707,10 @@ static const struct pid_entry tgid_base_stuff[] = {
71534 #ifdef CONFIG_SECURITY
71535 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
71536 #endif
71537-#ifdef CONFIG_KALLSYMS
71538+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71539 INF("wchan", S_IRUGO, proc_pid_wchan),
71540 #endif
71541-#ifdef CONFIG_STACKTRACE
71542+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71543 ONE("stack", S_IRUGO, proc_pid_stack),
71544 #endif
71545 #ifdef CONFIG_SCHEDSTATS
71546@@ -2611,6 +2740,9 @@ static const struct pid_entry tgid_base_stuff[] = {
71547 #ifdef CONFIG_TASK_IO_ACCOUNTING
71548 INF("io", S_IRUSR, proc_tgid_io_accounting),
71549 #endif
71550+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
71551+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
71552+#endif
71553 };
71554
71555 static int proc_tgid_base_readdir(struct file * filp,
71556@@ -2735,7 +2867,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
71557 if (!inode)
71558 goto out;
71559
71560+#ifdef CONFIG_GRKERNSEC_PROC_USER
71561+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
71562+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71563+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
71564+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
71565+#else
71566 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
71567+#endif
71568 inode->i_op = &proc_tgid_base_inode_operations;
71569 inode->i_fop = &proc_tgid_base_operations;
71570 inode->i_flags|=S_IMMUTABLE;
71571@@ -2777,7 +2916,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
71572 if (!task)
71573 goto out;
71574
71575+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
71576+ goto out_put_task;
71577+
71578 result = proc_pid_instantiate(dir, dentry, task, NULL);
71579+out_put_task:
71580 put_task_struct(task);
71581 out:
71582 return result;
71583@@ -2842,6 +2985,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
71584 {
71585 unsigned int nr;
71586 struct task_struct *reaper;
71587+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71588+ const struct cred *tmpcred = current_cred();
71589+ const struct cred *itercred;
71590+#endif
71591+ filldir_t __filldir = filldir;
71592 struct tgid_iter iter;
71593 struct pid_namespace *ns;
71594
71595@@ -2865,8 +3013,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
71596 for (iter = next_tgid(ns, iter);
71597 iter.task;
71598 iter.tgid += 1, iter = next_tgid(ns, iter)) {
71599+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71600+ rcu_read_lock();
71601+ itercred = __task_cred(iter.task);
71602+#endif
71603+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
71604+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71605+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
71606+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71607+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
71608+#endif
71609+ )
71610+#endif
71611+ )
71612+ __filldir = &gr_fake_filldir;
71613+ else
71614+ __filldir = filldir;
71615+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71616+ rcu_read_unlock();
71617+#endif
71618 filp->f_pos = iter.tgid + TGID_OFFSET;
71619- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
71620+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
71621 put_task_struct(iter.task);
71622 goto out;
71623 }
71624@@ -2892,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
71625 #ifdef CONFIG_SCHED_DEBUG
71626 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
71627 #endif
71628-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
71629+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
71630 INF("syscall", S_IRUGO, proc_pid_syscall),
71631 #endif
71632 INF("cmdline", S_IRUGO, proc_pid_cmdline),
71633@@ -2916,10 +3083,10 @@ static const struct pid_entry tid_base_stuff[] = {
71634 #ifdef CONFIG_SECURITY
71635 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
71636 #endif
71637-#ifdef CONFIG_KALLSYMS
71638+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71639 INF("wchan", S_IRUGO, proc_pid_wchan),
71640 #endif
71641-#ifdef CONFIG_STACKTRACE
71642+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
71643 ONE("stack", S_IRUGO, proc_pid_stack),
71644 #endif
71645 #ifdef CONFIG_SCHEDSTATS
71646diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
71647index 82676e3..5f8518a 100644
71648--- a/fs/proc/cmdline.c
71649+++ b/fs/proc/cmdline.c
71650@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
71651
71652 static int __init proc_cmdline_init(void)
71653 {
71654+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71655+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
71656+#else
71657 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
71658+#endif
71659 return 0;
71660 }
71661 module_init(proc_cmdline_init);
71662diff --git a/fs/proc/devices.c b/fs/proc/devices.c
71663index 59ee7da..469b4b6 100644
71664--- a/fs/proc/devices.c
71665+++ b/fs/proc/devices.c
71666@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
71667
71668 static int __init proc_devices_init(void)
71669 {
71670+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71671+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
71672+#else
71673 proc_create("devices", 0, NULL, &proc_devinfo_operations);
71674+#endif
71675 return 0;
71676 }
71677 module_init(proc_devices_init);
71678diff --git a/fs/proc/inode.c b/fs/proc/inode.c
71679index d78ade3..81767f9 100644
71680--- a/fs/proc/inode.c
71681+++ b/fs/proc/inode.c
71682@@ -18,12 +18,19 @@
71683 #include <linux/module.h>
71684 #include <linux/smp_lock.h>
71685 #include <linux/sysctl.h>
71686+#include <linux/grsecurity.h>
71687
71688 #include <asm/system.h>
71689 #include <asm/uaccess.h>
71690
71691 #include "internal.h"
71692
71693+#ifdef CONFIG_PROC_SYSCTL
71694+extern const struct inode_operations proc_sys_inode_operations;
71695+extern const struct inode_operations proc_sys_dir_operations;
71696+#endif
71697+
71698+
71699 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
71700 {
71701 atomic_inc(&de->count);
71702@@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
71703 de_put(de);
71704 if (PROC_I(inode)->sysctl)
71705 sysctl_head_put(PROC_I(inode)->sysctl);
71706+
71707+#ifdef CONFIG_PROC_SYSCTL
71708+ if (inode->i_op == &proc_sys_inode_operations ||
71709+ inode->i_op == &proc_sys_dir_operations)
71710+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
71711+#endif
71712+
71713 clear_inode(inode);
71714 }
71715
71716@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
71717 if (de->mode) {
71718 inode->i_mode = de->mode;
71719 inode->i_uid = de->uid;
71720+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71721+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
71722+#else
71723 inode->i_gid = de->gid;
71724+#endif
71725 }
71726 if (de->size)
71727 inode->i_size = de->size;
71728diff --git a/fs/proc/internal.h b/fs/proc/internal.h
71729index 753ca37..26bcf3b 100644
71730--- a/fs/proc/internal.h
71731+++ b/fs/proc/internal.h
71732@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
71733 struct pid *pid, struct task_struct *task);
71734 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
71735 struct pid *pid, struct task_struct *task);
71736+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
71737+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
71738+#endif
71739 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
71740
71741 extern const struct file_operations proc_maps_operations;
71742diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
71743index b442dac..aab29cb 100644
71744--- a/fs/proc/kcore.c
71745+++ b/fs/proc/kcore.c
71746@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
71747 off_t offset = 0;
71748 struct kcore_list *m;
71749
71750+ pax_track_stack();
71751+
71752 /* setup ELF header */
71753 elf = (struct elfhdr *) bufp;
71754 bufp += sizeof(struct elfhdr);
71755@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
71756 * the addresses in the elf_phdr on our list.
71757 */
71758 start = kc_offset_to_vaddr(*fpos - elf_buflen);
71759- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
71760+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
71761+ if (tsz > buflen)
71762 tsz = buflen;
71763-
71764+
71765 while (buflen) {
71766 struct kcore_list *m;
71767
71768@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
71769 kfree(elf_buf);
71770 } else {
71771 if (kern_addr_valid(start)) {
71772- unsigned long n;
71773+ char *elf_buf;
71774+ mm_segment_t oldfs;
71775
71776- n = copy_to_user(buffer, (char *)start, tsz);
71777- /*
71778- * We cannot distingush between fault on source
71779- * and fault on destination. When this happens
71780- * we clear too and hope it will trigger the
71781- * EFAULT again.
71782- */
71783- if (n) {
71784- if (clear_user(buffer + tsz - n,
71785- n))
71786+ elf_buf = kmalloc(tsz, GFP_KERNEL);
71787+ if (!elf_buf)
71788+ return -ENOMEM;
71789+ oldfs = get_fs();
71790+ set_fs(KERNEL_DS);
71791+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
71792+ set_fs(oldfs);
71793+ if (copy_to_user(buffer, elf_buf, tsz)) {
71794+ kfree(elf_buf);
71795 return -EFAULT;
71796+ }
71797 }
71798+ set_fs(oldfs);
71799+ kfree(elf_buf);
71800 } else {
71801 if (clear_user(buffer, tsz))
71802 return -EFAULT;
71803@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
71804
71805 static int open_kcore(struct inode *inode, struct file *filp)
71806 {
71807+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
71808+ return -EPERM;
71809+#endif
71810 if (!capable(CAP_SYS_RAWIO))
71811 return -EPERM;
71812 if (kcore_need_update)
71813diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
71814index 7ca7834..cfe90a4 100644
71815--- a/fs/proc/kmsg.c
71816+++ b/fs/proc/kmsg.c
71817@@ -12,37 +12,37 @@
71818 #include <linux/poll.h>
71819 #include <linux/proc_fs.h>
71820 #include <linux/fs.h>
71821+#include <linux/syslog.h>
71822
71823 #include <asm/uaccess.h>
71824 #include <asm/io.h>
71825
71826 extern wait_queue_head_t log_wait;
71827
71828-extern int do_syslog(int type, char __user *bug, int count);
71829-
71830 static int kmsg_open(struct inode * inode, struct file * file)
71831 {
71832- return do_syslog(1,NULL,0);
71833+ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
71834 }
71835
71836 static int kmsg_release(struct inode * inode, struct file * file)
71837 {
71838- (void) do_syslog(0,NULL,0);
71839+ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
71840 return 0;
71841 }
71842
71843 static ssize_t kmsg_read(struct file *file, char __user *buf,
71844 size_t count, loff_t *ppos)
71845 {
71846- if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
71847+ if ((file->f_flags & O_NONBLOCK) &&
71848+ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
71849 return -EAGAIN;
71850- return do_syslog(2, buf, count);
71851+ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
71852 }
71853
71854 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
71855 {
71856 poll_wait(file, &log_wait, wait);
71857- if (do_syslog(9, NULL, 0))
71858+ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
71859 return POLLIN | POLLRDNORM;
71860 return 0;
71861 }
71862diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
71863index a65239c..ad1182a 100644
71864--- a/fs/proc/meminfo.c
71865+++ b/fs/proc/meminfo.c
71866@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
71867 unsigned long pages[NR_LRU_LISTS];
71868 int lru;
71869
71870+ pax_track_stack();
71871+
71872 /*
71873 * display in kilobytes.
71874 */
71875@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
71876 vmi.used >> 10,
71877 vmi.largest_chunk >> 10
71878 #ifdef CONFIG_MEMORY_FAILURE
71879- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
71880+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
71881 #endif
71882 );
71883
71884diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
71885index 9fe7d7e..cdb62c9 100644
71886--- a/fs/proc/nommu.c
71887+++ b/fs/proc/nommu.c
71888@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
71889 if (len < 1)
71890 len = 1;
71891 seq_printf(m, "%*c", len, ' ');
71892- seq_path(m, &file->f_path, "");
71893+ seq_path(m, &file->f_path, "\n\\");
71894 }
71895
71896 seq_putc(m, '\n');
71897diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
71898index 04d1270..25e1173 100644
71899--- a/fs/proc/proc_net.c
71900+++ b/fs/proc/proc_net.c
71901@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
71902 struct task_struct *task;
71903 struct nsproxy *ns;
71904 struct net *net = NULL;
71905+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71906+ const struct cred *cred = current_cred();
71907+#endif
71908+
71909+#ifdef CONFIG_GRKERNSEC_PROC_USER
71910+ if (cred->fsuid)
71911+ return net;
71912+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71913+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
71914+ return net;
71915+#endif
71916
71917 rcu_read_lock();
71918 task = pid_task(proc_pid(dir), PIDTYPE_PID);
71919diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
71920index f667e8a..55f4d96 100644
71921--- a/fs/proc/proc_sysctl.c
71922+++ b/fs/proc/proc_sysctl.c
71923@@ -7,11 +7,13 @@
71924 #include <linux/security.h>
71925 #include "internal.h"
71926
71927+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
71928+
71929 static const struct dentry_operations proc_sys_dentry_operations;
71930 static const struct file_operations proc_sys_file_operations;
71931-static const struct inode_operations proc_sys_inode_operations;
71932+const struct inode_operations proc_sys_inode_operations;
71933 static const struct file_operations proc_sys_dir_file_operations;
71934-static const struct inode_operations proc_sys_dir_operations;
71935+const struct inode_operations proc_sys_dir_operations;
71936
71937 static struct inode *proc_sys_make_inode(struct super_block *sb,
71938 struct ctl_table_header *head, struct ctl_table *table)
71939@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
71940 if (!p)
71941 goto out;
71942
71943+ if (gr_handle_sysctl(p, MAY_EXEC))
71944+ goto out;
71945+
71946 err = ERR_PTR(-ENOMEM);
71947 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
71948 if (h)
71949@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
71950
71951 err = NULL;
71952 dentry->d_op = &proc_sys_dentry_operations;
71953+
71954+ gr_handle_proc_create(dentry, inode);
71955+
71956 d_add(dentry, inode);
71957
71958 out:
71959@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
71960 return -ENOMEM;
71961 } else {
71962 child->d_op = &proc_sys_dentry_operations;
71963+
71964+ gr_handle_proc_create(child, inode);
71965+
71966 d_add(child, inode);
71967 }
71968 } else {
71969@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
71970 if (*pos < file->f_pos)
71971 continue;
71972
71973+ if (gr_handle_sysctl(table, 0))
71974+ continue;
71975+
71976 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
71977 if (res)
71978 return res;
71979@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
71980 if (IS_ERR(head))
71981 return PTR_ERR(head);
71982
71983+ if (table && gr_handle_sysctl(table, MAY_EXEC))
71984+ return -ENOENT;
71985+
71986 generic_fillattr(inode, stat);
71987 if (table)
71988 stat->mode = (stat->mode & S_IFMT) | table->mode;
71989@@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
71990 };
71991
71992 static const struct file_operations proc_sys_dir_file_operations = {
71993+ .read = generic_read_dir,
71994 .readdir = proc_sys_readdir,
71995 .llseek = generic_file_llseek,
71996 };
71997
71998-static const struct inode_operations proc_sys_inode_operations = {
71999+const struct inode_operations proc_sys_inode_operations = {
72000 .permission = proc_sys_permission,
72001 .setattr = proc_sys_setattr,
72002 .getattr = proc_sys_getattr,
72003 };
72004
72005-static const struct inode_operations proc_sys_dir_operations = {
72006+const struct inode_operations proc_sys_dir_operations = {
72007 .lookup = proc_sys_lookup,
72008 .permission = proc_sys_permission,
72009 .setattr = proc_sys_setattr,
72010diff --git a/fs/proc/root.c b/fs/proc/root.c
72011index b080b79..d957e63 100644
72012--- a/fs/proc/root.c
72013+++ b/fs/proc/root.c
72014@@ -134,7 +134,15 @@ void __init proc_root_init(void)
72015 #ifdef CONFIG_PROC_DEVICETREE
72016 proc_device_tree_init();
72017 #endif
72018+#ifdef CONFIG_GRKERNSEC_PROC_ADD
72019+#ifdef CONFIG_GRKERNSEC_PROC_USER
72020+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
72021+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
72022+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
72023+#endif
72024+#else
72025 proc_mkdir("bus", NULL);
72026+#endif
72027 proc_sys_init();
72028 }
72029
72030diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
72031index 3b7b82a..4b420b0 100644
72032--- a/fs/proc/task_mmu.c
72033+++ b/fs/proc/task_mmu.c
72034@@ -8,6 +8,7 @@
72035 #include <linux/mempolicy.h>
72036 #include <linux/swap.h>
72037 #include <linux/swapops.h>
72038+#include <linux/grsecurity.h>
72039
72040 #include <asm/elf.h>
72041 #include <asm/uaccess.h>
72042@@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
72043 "VmStk:\t%8lu kB\n"
72044 "VmExe:\t%8lu kB\n"
72045 "VmLib:\t%8lu kB\n"
72046- "VmPTE:\t%8lu kB\n",
72047- hiwater_vm << (PAGE_SHIFT-10),
72048+ "VmPTE:\t%8lu kB\n"
72049+
72050+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72051+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
72052+#endif
72053+
72054+ ,hiwater_vm << (PAGE_SHIFT-10),
72055 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
72056 mm->locked_vm << (PAGE_SHIFT-10),
72057 hiwater_rss << (PAGE_SHIFT-10),
72058 total_rss << (PAGE_SHIFT-10),
72059 data << (PAGE_SHIFT-10),
72060 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
72061- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
72062+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
72063+
72064+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72065+ , mm->context.user_cs_base, mm->context.user_cs_limit
72066+#endif
72067+
72068+ );
72069 }
72070
72071 unsigned long task_vsize(struct mm_struct *mm)
72072@@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
72073 struct proc_maps_private *priv = m->private;
72074 struct vm_area_struct *vma = v;
72075
72076- vma_stop(priv, vma);
72077+ if (!IS_ERR(vma))
72078+ vma_stop(priv, vma);
72079 if (priv->task)
72080 put_task_struct(priv->task);
72081 }
72082@@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
72083 return ret;
72084 }
72085
72086+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72087+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
72088+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
72089+ _mm->pax_flags & MF_PAX_SEGMEXEC))
72090+#endif
72091+
72092 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72093 {
72094 struct mm_struct *mm = vma->vm_mm;
72095@@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72096 int flags = vma->vm_flags;
72097 unsigned long ino = 0;
72098 unsigned long long pgoff = 0;
72099- unsigned long start;
72100 dev_t dev = 0;
72101 int len;
72102
72103@@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72104 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
72105 }
72106
72107- /* We don't show the stack guard page in /proc/maps */
72108- start = vma->vm_start;
72109- if (vma->vm_flags & VM_GROWSDOWN)
72110- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
72111- start += PAGE_SIZE;
72112-
72113 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
72114- start,
72115+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72116+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
72117+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
72118+#else
72119+ vma->vm_start,
72120 vma->vm_end,
72121+#endif
72122 flags & VM_READ ? 'r' : '-',
72123 flags & VM_WRITE ? 'w' : '-',
72124 flags & VM_EXEC ? 'x' : '-',
72125 flags & VM_MAYSHARE ? 's' : 'p',
72126+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72127+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
72128+#else
72129 pgoff,
72130+#endif
72131 MAJOR(dev), MINOR(dev), ino, &len);
72132
72133 /*
72134@@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72135 */
72136 if (file) {
72137 pad_len_spaces(m, len);
72138- seq_path(m, &file->f_path, "\n");
72139+ seq_path(m, &file->f_path, "\n\\");
72140 } else {
72141 const char *name = arch_vma_name(vma);
72142 if (!name) {
72143@@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
72144 if (vma->vm_start <= mm->brk &&
72145 vma->vm_end >= mm->start_brk) {
72146 name = "[heap]";
72147- } else if (vma->vm_start <= mm->start_stack &&
72148- vma->vm_end >= mm->start_stack) {
72149+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
72150+ (vma->vm_start <= mm->start_stack &&
72151+ vma->vm_end >= mm->start_stack)) {
72152 name = "[stack]";
72153 }
72154 } else {
72155@@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
72156 struct proc_maps_private *priv = m->private;
72157 struct task_struct *task = priv->task;
72158
72159+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72160+ if (current->exec_id != m->exec_id) {
72161+ gr_log_badprocpid("maps");
72162+ return 0;
72163+ }
72164+#endif
72165+
72166 show_map_vma(m, vma);
72167
72168 if (m->count < m->size) /* vma is copied successfully */
72169@@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
72170 .private = &mss,
72171 };
72172
72173+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72174+ if (current->exec_id != m->exec_id) {
72175+ gr_log_badprocpid("smaps");
72176+ return 0;
72177+ }
72178+#endif
72179 memset(&mss, 0, sizeof mss);
72180- mss.vma = vma;
72181- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
72182- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
72183+
72184+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72185+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
72186+#endif
72187+ mss.vma = vma;
72188+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
72189+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
72190+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72191+ }
72192+#endif
72193
72194 show_map_vma(m, vma);
72195
72196@@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
72197 "Swap: %8lu kB\n"
72198 "KernelPageSize: %8lu kB\n"
72199 "MMUPageSize: %8lu kB\n",
72200+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72201+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
72202+#else
72203 (vma->vm_end - vma->vm_start) >> 10,
72204+#endif
72205 mss.resident >> 10,
72206 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
72207 mss.shared_clean >> 10,
72208diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
72209index 8f5c05d..c99c76d 100644
72210--- a/fs/proc/task_nommu.c
72211+++ b/fs/proc/task_nommu.c
72212@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
72213 else
72214 bytes += kobjsize(mm);
72215
72216- if (current->fs && current->fs->users > 1)
72217+ if (current->fs && atomic_read(&current->fs->users) > 1)
72218 sbytes += kobjsize(current->fs);
72219 else
72220 bytes += kobjsize(current->fs);
72221@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
72222 if (len < 1)
72223 len = 1;
72224 seq_printf(m, "%*c", len, ' ');
72225- seq_path(m, &file->f_path, "");
72226+ seq_path(m, &file->f_path, "\n\\");
72227 }
72228
72229 seq_putc(m, '\n');
72230diff --git a/fs/readdir.c b/fs/readdir.c
72231index 7723401..30059a6 100644
72232--- a/fs/readdir.c
72233+++ b/fs/readdir.c
72234@@ -16,6 +16,7 @@
72235 #include <linux/security.h>
72236 #include <linux/syscalls.h>
72237 #include <linux/unistd.h>
72238+#include <linux/namei.h>
72239
72240 #include <asm/uaccess.h>
72241
72242@@ -67,6 +68,7 @@ struct old_linux_dirent {
72243
72244 struct readdir_callback {
72245 struct old_linux_dirent __user * dirent;
72246+ struct file * file;
72247 int result;
72248 };
72249
72250@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
72251 buf->result = -EOVERFLOW;
72252 return -EOVERFLOW;
72253 }
72254+
72255+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72256+ return 0;
72257+
72258 buf->result++;
72259 dirent = buf->dirent;
72260 if (!access_ok(VERIFY_WRITE, dirent,
72261@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
72262
72263 buf.result = 0;
72264 buf.dirent = dirent;
72265+ buf.file = file;
72266
72267 error = vfs_readdir(file, fillonedir, &buf);
72268 if (buf.result)
72269@@ -142,6 +149,7 @@ struct linux_dirent {
72270 struct getdents_callback {
72271 struct linux_dirent __user * current_dir;
72272 struct linux_dirent __user * previous;
72273+ struct file * file;
72274 int count;
72275 int error;
72276 };
72277@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
72278 buf->error = -EOVERFLOW;
72279 return -EOVERFLOW;
72280 }
72281+
72282+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72283+ return 0;
72284+
72285 dirent = buf->previous;
72286 if (dirent) {
72287 if (__put_user(offset, &dirent->d_off))
72288@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
72289 buf.previous = NULL;
72290 buf.count = count;
72291 buf.error = 0;
72292+ buf.file = file;
72293
72294 error = vfs_readdir(file, filldir, &buf);
72295 if (error >= 0)
72296@@ -228,6 +241,7 @@ out:
72297 struct getdents_callback64 {
72298 struct linux_dirent64 __user * current_dir;
72299 struct linux_dirent64 __user * previous;
72300+ struct file *file;
72301 int count;
72302 int error;
72303 };
72304@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
72305 buf->error = -EINVAL; /* only used if we fail.. */
72306 if (reclen > buf->count)
72307 return -EINVAL;
72308+
72309+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
72310+ return 0;
72311+
72312 dirent = buf->previous;
72313 if (dirent) {
72314 if (__put_user(offset, &dirent->d_off))
72315@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
72316
72317 buf.current_dir = dirent;
72318 buf.previous = NULL;
72319+ buf.file = file;
72320 buf.count = count;
72321 buf.error = 0;
72322
72323@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
72324 error = buf.error;
72325 lastdirent = buf.previous;
72326 if (lastdirent) {
72327- typeof(lastdirent->d_off) d_off = file->f_pos;
72328+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
72329 if (__put_user(d_off, &lastdirent->d_off))
72330 error = -EFAULT;
72331 else
72332diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
72333index d42c30c..4fd8718 100644
72334--- a/fs/reiserfs/dir.c
72335+++ b/fs/reiserfs/dir.c
72336@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
72337 struct reiserfs_dir_entry de;
72338 int ret = 0;
72339
72340+ pax_track_stack();
72341+
72342 reiserfs_write_lock(inode->i_sb);
72343
72344 reiserfs_check_lock_depth(inode->i_sb, "readdir");
72345diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
72346index 128d3f7..8840d44 100644
72347--- a/fs/reiserfs/do_balan.c
72348+++ b/fs/reiserfs/do_balan.c
72349@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
72350 return;
72351 }
72352
72353- atomic_inc(&(fs_generation(tb->tb_sb)));
72354+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
72355 do_balance_starts(tb);
72356
72357 /* balance leaf returns 0 except if combining L R and S into
72358diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
72359index 72cb1cc..d0e3181 100644
72360--- a/fs/reiserfs/item_ops.c
72361+++ b/fs/reiserfs/item_ops.c
72362@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
72363 vi->vi_index, vi->vi_type, vi->vi_ih);
72364 }
72365
72366-static struct item_operations stat_data_ops = {
72367+static const struct item_operations stat_data_ops = {
72368 .bytes_number = sd_bytes_number,
72369 .decrement_key = sd_decrement_key,
72370 .is_left_mergeable = sd_is_left_mergeable,
72371@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
72372 vi->vi_index, vi->vi_type, vi->vi_ih);
72373 }
72374
72375-static struct item_operations direct_ops = {
72376+static const struct item_operations direct_ops = {
72377 .bytes_number = direct_bytes_number,
72378 .decrement_key = direct_decrement_key,
72379 .is_left_mergeable = direct_is_left_mergeable,
72380@@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
72381 vi->vi_index, vi->vi_type, vi->vi_ih);
72382 }
72383
72384-static struct item_operations indirect_ops = {
72385+static const struct item_operations indirect_ops = {
72386 .bytes_number = indirect_bytes_number,
72387 .decrement_key = indirect_decrement_key,
72388 .is_left_mergeable = indirect_is_left_mergeable,
72389@@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
72390 printk("\n");
72391 }
72392
72393-static struct item_operations direntry_ops = {
72394+static const struct item_operations direntry_ops = {
72395 .bytes_number = direntry_bytes_number,
72396 .decrement_key = direntry_decrement_key,
72397 .is_left_mergeable = direntry_is_left_mergeable,
72398@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
72399 "Invalid item type observed, run fsck ASAP");
72400 }
72401
72402-static struct item_operations errcatch_ops = {
72403+static const struct item_operations errcatch_ops = {
72404 errcatch_bytes_number,
72405 errcatch_decrement_key,
72406 errcatch_is_left_mergeable,
72407@@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
72408 #error Item types must use disk-format assigned values.
72409 #endif
72410
72411-struct item_operations *item_ops[TYPE_ANY + 1] = {
72412+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
72413 &stat_data_ops,
72414 &indirect_ops,
72415 &direct_ops,
72416diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
72417index b5fe0aa..e0e25c4 100644
72418--- a/fs/reiserfs/journal.c
72419+++ b/fs/reiserfs/journal.c
72420@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
72421 struct buffer_head *bh;
72422 int i, j;
72423
72424+ pax_track_stack();
72425+
72426 bh = __getblk(dev, block, bufsize);
72427 if (buffer_uptodate(bh))
72428 return (bh);
72429diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
72430index 2715791..b8996db 100644
72431--- a/fs/reiserfs/namei.c
72432+++ b/fs/reiserfs/namei.c
72433@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
72434 unsigned long savelink = 1;
72435 struct timespec ctime;
72436
72437+ pax_track_stack();
72438+
72439 /* three balancings: (1) old name removal, (2) new name insertion
72440 and (3) maybe "save" link insertion
72441 stat data updates: (1) old directory,
72442diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
72443index 9229e55..3d2e3b7 100644
72444--- a/fs/reiserfs/procfs.c
72445+++ b/fs/reiserfs/procfs.c
72446@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
72447 "SMALL_TAILS " : "NO_TAILS ",
72448 replay_only(sb) ? "REPLAY_ONLY " : "",
72449 convert_reiserfs(sb) ? "CONV " : "",
72450- atomic_read(&r->s_generation_counter),
72451+ atomic_read_unchecked(&r->s_generation_counter),
72452 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
72453 SF(s_do_balance), SF(s_unneeded_left_neighbor),
72454 SF(s_good_search_by_key_reada), SF(s_bmaps),
72455@@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
72456 struct journal_params *jp = &rs->s_v1.s_journal;
72457 char b[BDEVNAME_SIZE];
72458
72459+ pax_track_stack();
72460+
72461 seq_printf(m, /* on-disk fields */
72462 "jp_journal_1st_block: \t%i\n"
72463 "jp_journal_dev: \t%s[%x]\n"
72464diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
72465index d036ee5..4c7dca1 100644
72466--- a/fs/reiserfs/stree.c
72467+++ b/fs/reiserfs/stree.c
72468@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
72469 int iter = 0;
72470 #endif
72471
72472+ pax_track_stack();
72473+
72474 BUG_ON(!th->t_trans_id);
72475
72476 init_tb_struct(th, &s_del_balance, sb, path,
72477@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
72478 int retval;
72479 int quota_cut_bytes = 0;
72480
72481+ pax_track_stack();
72482+
72483 BUG_ON(!th->t_trans_id);
72484
72485 le_key2cpu_key(&cpu_key, key);
72486@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
72487 int quota_cut_bytes;
72488 loff_t tail_pos = 0;
72489
72490+ pax_track_stack();
72491+
72492 BUG_ON(!th->t_trans_id);
72493
72494 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
72495@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
72496 int retval;
72497 int fs_gen;
72498
72499+ pax_track_stack();
72500+
72501 BUG_ON(!th->t_trans_id);
72502
72503 fs_gen = get_generation(inode->i_sb);
72504@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
72505 int fs_gen = 0;
72506 int quota_bytes = 0;
72507
72508+ pax_track_stack();
72509+
72510 BUG_ON(!th->t_trans_id);
72511
72512 if (inode) { /* Do we count quotas for item? */
72513diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
72514index 7cb1285..c726cd0 100644
72515--- a/fs/reiserfs/super.c
72516+++ b/fs/reiserfs/super.c
72517@@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
72518 {.option_name = NULL}
72519 };
72520
72521+ pax_track_stack();
72522+
72523 *blocks = 0;
72524 if (!options || !*options)
72525 /* use default configuration: create tails, journaling on, no
72526diff --git a/fs/select.c b/fs/select.c
72527index fd38ce2..f5381b8 100644
72528--- a/fs/select.c
72529+++ b/fs/select.c
72530@@ -20,6 +20,7 @@
72531 #include <linux/module.h>
72532 #include <linux/slab.h>
72533 #include <linux/poll.h>
72534+#include <linux/security.h>
72535 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
72536 #include <linux/file.h>
72537 #include <linux/fdtable.h>
72538@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
72539 int retval, i, timed_out = 0;
72540 unsigned long slack = 0;
72541
72542+ pax_track_stack();
72543+
72544 rcu_read_lock();
72545 retval = max_select_fd(n, fds);
72546 rcu_read_unlock();
72547@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
72548 /* Allocate small arguments on the stack to save memory and be faster */
72549 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
72550
72551+ pax_track_stack();
72552+
72553 ret = -EINVAL;
72554 if (n < 0)
72555 goto out_nofds;
72556@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
72557 struct poll_list *walk = head;
72558 unsigned long todo = nfds;
72559
72560+ pax_track_stack();
72561+
72562+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
72563 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
72564 return -EINVAL;
72565
72566diff --git a/fs/seq_file.c b/fs/seq_file.c
72567index eae7d9d..4ddabe2 100644
72568--- a/fs/seq_file.c
72569+++ b/fs/seq_file.c
72570@@ -9,6 +9,7 @@
72571 #include <linux/module.h>
72572 #include <linux/seq_file.h>
72573 #include <linux/slab.h>
72574+#include <linux/sched.h>
72575
72576 #include <asm/uaccess.h>
72577 #include <asm/page.h>
72578@@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
72579 memset(p, 0, sizeof(*p));
72580 mutex_init(&p->lock);
72581 p->op = op;
72582+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72583+ p->exec_id = current->exec_id;
72584+#endif
72585
72586 /*
72587 * Wrappers around seq_open(e.g. swaps_open) need to be
72588@@ -76,7 +80,8 @@ static int traverse(struct seq_file *m, loff_t offset)
72589 return 0;
72590 }
72591 if (!m->buf) {
72592- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
72593+ m->size = PAGE_SIZE;
72594+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
72595 if (!m->buf)
72596 return -ENOMEM;
72597 }
72598@@ -116,7 +121,8 @@ static int traverse(struct seq_file *m, loff_t offset)
72599 Eoverflow:
72600 m->op->stop(m, p);
72601 kfree(m->buf);
72602- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
72603+ m->size <<= 1;
72604+ m->buf = kmalloc(m->size, GFP_KERNEL);
72605 return !m->buf ? -ENOMEM : -EAGAIN;
72606 }
72607
72608@@ -169,7 +175,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
72609 m->version = file->f_version;
72610 /* grab buffer if we didn't have one */
72611 if (!m->buf) {
72612- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
72613+ m->size = PAGE_SIZE;
72614+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
72615 if (!m->buf)
72616 goto Enomem;
72617 }
72618@@ -210,7 +217,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
72619 goto Fill;
72620 m->op->stop(m, p);
72621 kfree(m->buf);
72622- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
72623+ m->size <<= 1;
72624+ m->buf = kmalloc(m->size, GFP_KERNEL);
72625 if (!m->buf)
72626 goto Enomem;
72627 m->count = 0;
72628@@ -551,7 +559,7 @@ static void single_stop(struct seq_file *p, void *v)
72629 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
72630 void *data)
72631 {
72632- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
72633+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
72634 int res = -ENOMEM;
72635
72636 if (op) {
72637diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
72638index 71c29b6..54694dd 100644
72639--- a/fs/smbfs/proc.c
72640+++ b/fs/smbfs/proc.c
72641@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
72642
72643 out:
72644 if (server->local_nls != NULL && server->remote_nls != NULL)
72645- server->ops->convert = convert_cp;
72646+ *(void **)&server->ops->convert = convert_cp;
72647 else
72648- server->ops->convert = convert_memcpy;
72649+ *(void **)&server->ops->convert = convert_memcpy;
72650
72651 smb_unlock_server(server);
72652 return n;
72653@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
72654
72655 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
72656 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
72657- server->ops->getattr = smb_proc_getattr_core;
72658+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
72659 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
72660- server->ops->getattr = smb_proc_getattr_ff;
72661+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
72662 }
72663
72664 /* Decode server capabilities */
72665@@ -3439,7 +3439,7 @@ out:
72666 static void
72667 install_ops(struct smb_ops *dst, struct smb_ops *src)
72668 {
72669- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
72670+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
72671 }
72672
72673 /* < LANMAN2 */
72674diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
72675index 00b2909..2ace383 100644
72676--- a/fs/smbfs/symlink.c
72677+++ b/fs/smbfs/symlink.c
72678@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
72679
72680 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
72681 {
72682- char *s = nd_get_link(nd);
72683+ const char *s = nd_get_link(nd);
72684 if (!IS_ERR(s))
72685 __putname(s);
72686 }
72687diff --git a/fs/splice.c b/fs/splice.c
72688index bb92b7c..5aa72b0 100644
72689--- a/fs/splice.c
72690+++ b/fs/splice.c
72691@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
72692 pipe_lock(pipe);
72693
72694 for (;;) {
72695- if (!pipe->readers) {
72696+ if (!atomic_read(&pipe->readers)) {
72697 send_sig(SIGPIPE, current, 0);
72698 if (!ret)
72699 ret = -EPIPE;
72700@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
72701 do_wakeup = 0;
72702 }
72703
72704- pipe->waiting_writers++;
72705+ atomic_inc(&pipe->waiting_writers);
72706 pipe_wait(pipe);
72707- pipe->waiting_writers--;
72708+ atomic_dec(&pipe->waiting_writers);
72709 }
72710
72711 pipe_unlock(pipe);
72712@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
72713 .spd_release = spd_release_page,
72714 };
72715
72716+ pax_track_stack();
72717+
72718 index = *ppos >> PAGE_CACHE_SHIFT;
72719 loff = *ppos & ~PAGE_CACHE_MASK;
72720 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
72721@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
72722 old_fs = get_fs();
72723 set_fs(get_ds());
72724 /* The cast to a user pointer is valid due to the set_fs() */
72725- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
72726+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
72727 set_fs(old_fs);
72728
72729 return res;
72730@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
72731 old_fs = get_fs();
72732 set_fs(get_ds());
72733 /* The cast to a user pointer is valid due to the set_fs() */
72734- res = vfs_write(file, (const char __user *)buf, count, &pos);
72735+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
72736 set_fs(old_fs);
72737
72738 return res;
72739@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
72740 .spd_release = spd_release_page,
72741 };
72742
72743+ pax_track_stack();
72744+
72745 index = *ppos >> PAGE_CACHE_SHIFT;
72746 offset = *ppos & ~PAGE_CACHE_MASK;
72747 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
72748@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
72749 goto err;
72750
72751 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
72752- vec[i].iov_base = (void __user *) page_address(page);
72753+ vec[i].iov_base = (__force void __user *) page_address(page);
72754 vec[i].iov_len = this_len;
72755 pages[i] = page;
72756 spd.nr_pages++;
72757@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
72758 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
72759 {
72760 while (!pipe->nrbufs) {
72761- if (!pipe->writers)
72762+ if (!atomic_read(&pipe->writers))
72763 return 0;
72764
72765- if (!pipe->waiting_writers && sd->num_spliced)
72766+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
72767 return 0;
72768
72769 if (sd->flags & SPLICE_F_NONBLOCK)
72770@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
72771 * out of the pipe right after the splice_to_pipe(). So set
72772 * PIPE_READERS appropriately.
72773 */
72774- pipe->readers = 1;
72775+ atomic_set(&pipe->readers, 1);
72776
72777 current->splice_pipe = pipe;
72778 }
72779@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
72780 .spd_release = spd_release_page,
72781 };
72782
72783+ pax_track_stack();
72784+
72785 pipe = pipe_info(file->f_path.dentry->d_inode);
72786 if (!pipe)
72787 return -EBADF;
72788@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
72789 ret = -ERESTARTSYS;
72790 break;
72791 }
72792- if (!pipe->writers)
72793+ if (!atomic_read(&pipe->writers))
72794 break;
72795- if (!pipe->waiting_writers) {
72796+ if (!atomic_read(&pipe->waiting_writers)) {
72797 if (flags & SPLICE_F_NONBLOCK) {
72798 ret = -EAGAIN;
72799 break;
72800@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
72801 pipe_lock(pipe);
72802
72803 while (pipe->nrbufs >= PIPE_BUFFERS) {
72804- if (!pipe->readers) {
72805+ if (!atomic_read(&pipe->readers)) {
72806 send_sig(SIGPIPE, current, 0);
72807 ret = -EPIPE;
72808 break;
72809@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
72810 ret = -ERESTARTSYS;
72811 break;
72812 }
72813- pipe->waiting_writers++;
72814+ atomic_inc(&pipe->waiting_writers);
72815 pipe_wait(pipe);
72816- pipe->waiting_writers--;
72817+ atomic_dec(&pipe->waiting_writers);
72818 }
72819
72820 pipe_unlock(pipe);
72821@@ -1786,14 +1792,14 @@ retry:
72822 pipe_double_lock(ipipe, opipe);
72823
72824 do {
72825- if (!opipe->readers) {
72826+ if (!atomic_read(&opipe->readers)) {
72827 send_sig(SIGPIPE, current, 0);
72828 if (!ret)
72829 ret = -EPIPE;
72830 break;
72831 }
72832
72833- if (!ipipe->nrbufs && !ipipe->writers)
72834+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
72835 break;
72836
72837 /*
72838@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
72839 pipe_double_lock(ipipe, opipe);
72840
72841 do {
72842- if (!opipe->readers) {
72843+ if (!atomic_read(&opipe->readers)) {
72844 send_sig(SIGPIPE, current, 0);
72845 if (!ret)
72846 ret = -EPIPE;
72847@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
72848 * return EAGAIN if we have the potential of some data in the
72849 * future, otherwise just return 0
72850 */
72851- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
72852+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
72853 ret = -EAGAIN;
72854
72855 pipe_unlock(ipipe);
72856diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
72857index e020183..18d64b4 100644
72858--- a/fs/sysfs/dir.c
72859+++ b/fs/sysfs/dir.c
72860@@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
72861 struct sysfs_dirent *sd;
72862 int rc;
72863
72864+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
72865+ const char *parent_name = parent_sd->s_name;
72866+
72867+ mode = S_IFDIR | S_IRWXU;
72868+
72869+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
72870+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
72871+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
72872+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
72873+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
72874+#endif
72875+
72876 /* allocate */
72877 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
72878 if (!sd)
72879diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
72880index 7118a38..70af853 100644
72881--- a/fs/sysfs/file.c
72882+++ b/fs/sysfs/file.c
72883@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
72884
72885 struct sysfs_open_dirent {
72886 atomic_t refcnt;
72887- atomic_t event;
72888+ atomic_unchecked_t event;
72889 wait_queue_head_t poll;
72890 struct list_head buffers; /* goes through sysfs_buffer.list */
72891 };
72892@@ -53,7 +53,7 @@ struct sysfs_buffer {
72893 size_t count;
72894 loff_t pos;
72895 char * page;
72896- struct sysfs_ops * ops;
72897+ const struct sysfs_ops * ops;
72898 struct mutex mutex;
72899 int needs_read_fill;
72900 int event;
72901@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
72902 {
72903 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
72904 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
72905- struct sysfs_ops * ops = buffer->ops;
72906+ const struct sysfs_ops * ops = buffer->ops;
72907 int ret = 0;
72908 ssize_t count;
72909
72910@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
72911 if (!sysfs_get_active_two(attr_sd))
72912 return -ENODEV;
72913
72914- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
72915+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
72916 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
72917
72918 sysfs_put_active_two(attr_sd);
72919@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
72920 {
72921 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
72922 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
72923- struct sysfs_ops * ops = buffer->ops;
72924+ const struct sysfs_ops * ops = buffer->ops;
72925 int rc;
72926
72927 /* need attr_sd for attr and ops, its parent for kobj */
72928@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
72929 return -ENOMEM;
72930
72931 atomic_set(&new_od->refcnt, 0);
72932- atomic_set(&new_od->event, 1);
72933+ atomic_set_unchecked(&new_od->event, 1);
72934 init_waitqueue_head(&new_od->poll);
72935 INIT_LIST_HEAD(&new_od->buffers);
72936 goto retry;
72937@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
72938 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
72939 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
72940 struct sysfs_buffer *buffer;
72941- struct sysfs_ops *ops;
72942+ const struct sysfs_ops *ops;
72943 int error = -EACCES;
72944 char *p;
72945
72946@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
72947
72948 sysfs_put_active_two(attr_sd);
72949
72950- if (buffer->event != atomic_read(&od->event))
72951+ if (buffer->event != atomic_read_unchecked(&od->event))
72952 goto trigger;
72953
72954 return DEFAULT_POLLMASK;
72955@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
72956
72957 od = sd->s_attr.open;
72958 if (od) {
72959- atomic_inc(&od->event);
72960+ atomic_inc_unchecked(&od->event);
72961 wake_up_interruptible(&od->poll);
72962 }
72963
72964diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
72965index c5081ad..342ea86 100644
72966--- a/fs/sysfs/symlink.c
72967+++ b/fs/sysfs/symlink.c
72968@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
72969
72970 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
72971 {
72972- char *page = nd_get_link(nd);
72973+ const char *page = nd_get_link(nd);
72974 if (!IS_ERR(page))
72975 free_page((unsigned long)page);
72976 }
72977diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
72978index 1e06853..b06d325 100644
72979--- a/fs/udf/balloc.c
72980+++ b/fs/udf/balloc.c
72981@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
72982
72983 mutex_lock(&sbi->s_alloc_mutex);
72984 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
72985- if (bloc->logicalBlockNum < 0 ||
72986- (bloc->logicalBlockNum + count) >
72987- partmap->s_partition_len) {
72988+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
72989 udf_debug("%d < %d || %d + %d > %d\n",
72990 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
72991 count, partmap->s_partition_len);
72992@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
72993
72994 mutex_lock(&sbi->s_alloc_mutex);
72995 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
72996- if (bloc->logicalBlockNum < 0 ||
72997- (bloc->logicalBlockNum + count) >
72998- partmap->s_partition_len) {
72999+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
73000 udf_debug("%d < %d || %d + %d > %d\n",
73001 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
73002 partmap->s_partition_len);
73003diff --git a/fs/udf/inode.c b/fs/udf/inode.c
73004index 6d24c2c..fff470f 100644
73005--- a/fs/udf/inode.c
73006+++ b/fs/udf/inode.c
73007@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
73008 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
73009 int lastblock = 0;
73010
73011+ pax_track_stack();
73012+
73013 prev_epos.offset = udf_file_entry_alloc_offset(inode);
73014 prev_epos.block = iinfo->i_location;
73015 prev_epos.bh = NULL;
73016diff --git a/fs/udf/misc.c b/fs/udf/misc.c
73017index 9215700..bf1f68e 100644
73018--- a/fs/udf/misc.c
73019+++ b/fs/udf/misc.c
73020@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
73021
73022 u8 udf_tag_checksum(const struct tag *t)
73023 {
73024- u8 *data = (u8 *)t;
73025+ const u8 *data = (const u8 *)t;
73026 u8 checksum = 0;
73027 int i;
73028 for (i = 0; i < sizeof(struct tag); ++i)
73029diff --git a/fs/utimes.c b/fs/utimes.c
73030index e4c75db..b4df0e0 100644
73031--- a/fs/utimes.c
73032+++ b/fs/utimes.c
73033@@ -1,6 +1,7 @@
73034 #include <linux/compiler.h>
73035 #include <linux/file.h>
73036 #include <linux/fs.h>
73037+#include <linux/security.h>
73038 #include <linux/linkage.h>
73039 #include <linux/mount.h>
73040 #include <linux/namei.h>
73041@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
73042 goto mnt_drop_write_and_out;
73043 }
73044 }
73045+
73046+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
73047+ error = -EACCES;
73048+ goto mnt_drop_write_and_out;
73049+ }
73050+
73051 mutex_lock(&inode->i_mutex);
73052 error = notify_change(path->dentry, &newattrs);
73053 mutex_unlock(&inode->i_mutex);
73054diff --git a/fs/xattr.c b/fs/xattr.c
73055index 6d4f6d3..cda3958 100644
73056--- a/fs/xattr.c
73057+++ b/fs/xattr.c
73058@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
73059 * Extended attribute SET operations
73060 */
73061 static long
73062-setxattr(struct dentry *d, const char __user *name, const void __user *value,
73063+setxattr(struct path *path, const char __user *name, const void __user *value,
73064 size_t size, int flags)
73065 {
73066 int error;
73067@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
73068 return PTR_ERR(kvalue);
73069 }
73070
73071- error = vfs_setxattr(d, kname, kvalue, size, flags);
73072+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
73073+ error = -EACCES;
73074+ goto out;
73075+ }
73076+
73077+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
73078+out:
73079 kfree(kvalue);
73080 return error;
73081 }
73082@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
73083 return error;
73084 error = mnt_want_write(path.mnt);
73085 if (!error) {
73086- error = setxattr(path.dentry, name, value, size, flags);
73087+ error = setxattr(&path, name, value, size, flags);
73088 mnt_drop_write(path.mnt);
73089 }
73090 path_put(&path);
73091@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
73092 return error;
73093 error = mnt_want_write(path.mnt);
73094 if (!error) {
73095- error = setxattr(path.dentry, name, value, size, flags);
73096+ error = setxattr(&path, name, value, size, flags);
73097 mnt_drop_write(path.mnt);
73098 }
73099 path_put(&path);
73100@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
73101 const void __user *,value, size_t, size, int, flags)
73102 {
73103 struct file *f;
73104- struct dentry *dentry;
73105 int error = -EBADF;
73106
73107 f = fget(fd);
73108 if (!f)
73109 return error;
73110- dentry = f->f_path.dentry;
73111- audit_inode(NULL, dentry);
73112+ audit_inode(NULL, f->f_path.dentry);
73113 error = mnt_want_write_file(f);
73114 if (!error) {
73115- error = setxattr(dentry, name, value, size, flags);
73116+ error = setxattr(&f->f_path, name, value, size, flags);
73117 mnt_drop_write(f->f_path.mnt);
73118 }
73119 fput(f);
73120diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
73121index c6ad7c7..f2847a7 100644
73122--- a/fs/xattr_acl.c
73123+++ b/fs/xattr_acl.c
73124@@ -17,8 +17,8 @@
73125 struct posix_acl *
73126 posix_acl_from_xattr(const void *value, size_t size)
73127 {
73128- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
73129- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
73130+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
73131+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
73132 int count;
73133 struct posix_acl *acl;
73134 struct posix_acl_entry *acl_e;
73135diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
73136index 942362f..88f96f5 100644
73137--- a/fs/xfs/linux-2.6/xfs_ioctl.c
73138+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
73139@@ -134,7 +134,7 @@ xfs_find_handle(
73140 }
73141
73142 error = -EFAULT;
73143- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
73144+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
73145 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
73146 goto out_put;
73147
73148@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
73149 if (IS_ERR(dentry))
73150 return PTR_ERR(dentry);
73151
73152- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
73153+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
73154 if (!kbuf)
73155 goto out_dput;
73156
73157@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
73158 xfs_mount_t *mp,
73159 void __user *arg)
73160 {
73161- xfs_fsop_geom_t fsgeo;
73162+ xfs_fsop_geom_t fsgeo;
73163 int error;
73164
73165 error = xfs_fs_geometry(mp, &fsgeo, 3);
73166diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
73167index bad485a..479bd32 100644
73168--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
73169+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
73170@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
73171 xfs_fsop_geom_t fsgeo;
73172 int error;
73173
73174+ memset(&fsgeo, 0, sizeof(fsgeo));
73175 error = xfs_fs_geometry(mp, &fsgeo, 3);
73176 if (error)
73177 return -error;
73178diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
73179index 1f3b4b8..6102f6d 100644
73180--- a/fs/xfs/linux-2.6/xfs_iops.c
73181+++ b/fs/xfs/linux-2.6/xfs_iops.c
73182@@ -468,7 +468,7 @@ xfs_vn_put_link(
73183 struct nameidata *nd,
73184 void *p)
73185 {
73186- char *s = nd_get_link(nd);
73187+ const char *s = nd_get_link(nd);
73188
73189 if (!IS_ERR(s))
73190 kfree(s);
73191diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
73192index 8971fb0..5fc1eb2 100644
73193--- a/fs/xfs/xfs_bmap.c
73194+++ b/fs/xfs/xfs_bmap.c
73195@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
73196 int nmap,
73197 int ret_nmap);
73198 #else
73199-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
73200+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
73201 #endif /* DEBUG */
73202
73203 #if defined(XFS_RW_TRACE)
73204diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
73205index e89734e..5e84d8d 100644
73206--- a/fs/xfs/xfs_dir2_sf.c
73207+++ b/fs/xfs/xfs_dir2_sf.c
73208@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
73209 }
73210
73211 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
73212- if (filldir(dirent, sfep->name, sfep->namelen,
73213+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
73214+ char name[sfep->namelen];
73215+ memcpy(name, sfep->name, sfep->namelen);
73216+ if (filldir(dirent, name, sfep->namelen,
73217+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
73218+ *offset = off & 0x7fffffff;
73219+ return 0;
73220+ }
73221+ } else if (filldir(dirent, sfep->name, sfep->namelen,
73222 off & 0x7fffffff, ino, DT_UNKNOWN)) {
73223 *offset = off & 0x7fffffff;
73224 return 0;
73225diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
73226index 8f32f50..b6a41e8 100644
73227--- a/fs/xfs/xfs_vnodeops.c
73228+++ b/fs/xfs/xfs_vnodeops.c
73229@@ -564,13 +564,18 @@ xfs_readlink(
73230
73231 xfs_ilock(ip, XFS_ILOCK_SHARED);
73232
73233- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
73234- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
73235-
73236 pathlen = ip->i_d.di_size;
73237 if (!pathlen)
73238 goto out;
73239
73240+ if (pathlen > MAXPATHLEN) {
73241+ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
73242+ __func__, (unsigned long long)ip->i_ino, pathlen);
73243+ ASSERT(0);
73244+ error = XFS_ERROR(EFSCORRUPTED);
73245+ goto out;
73246+ }
73247+
73248 if (ip->i_df.if_flags & XFS_IFINLINE) {
73249 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
73250 link[pathlen] = '\0';
73251diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
73252new file mode 100644
73253index 0000000..50819f8
73254--- /dev/null
73255+++ b/grsecurity/Kconfig
73256@@ -0,0 +1,1077 @@
73257+#
73258+# grecurity configuration
73259+#
73260+
73261+menu "Grsecurity"
73262+
73263+config GRKERNSEC
73264+ bool "Grsecurity"
73265+ select CRYPTO
73266+ select CRYPTO_SHA256
73267+ help
73268+ If you say Y here, you will be able to configure many features
73269+ that will enhance the security of your system. It is highly
73270+ recommended that you say Y here and read through the help
73271+ for each option so that you fully understand the features and
73272+ can evaluate their usefulness for your machine.
73273+
73274+choice
73275+ prompt "Security Level"
73276+ depends on GRKERNSEC
73277+ default GRKERNSEC_CUSTOM
73278+
73279+config GRKERNSEC_LOW
73280+ bool "Low"
73281+ select GRKERNSEC_LINK
73282+ select GRKERNSEC_FIFO
73283+ select GRKERNSEC_RANDNET
73284+ select GRKERNSEC_DMESG
73285+ select GRKERNSEC_CHROOT
73286+ select GRKERNSEC_CHROOT_CHDIR
73287+
73288+ help
73289+ If you choose this option, several of the grsecurity options will
73290+ be enabled that will give you greater protection against a number
73291+ of attacks, while assuring that none of your software will have any
73292+ conflicts with the additional security measures. If you run a lot
73293+ of unusual software, or you are having problems with the higher
73294+ security levels, you should say Y here. With this option, the
73295+ following features are enabled:
73296+
73297+ - Linking restrictions
73298+ - FIFO restrictions
73299+ - Restricted dmesg
73300+ - Enforced chdir("/") on chroot
73301+ - Runtime module disabling
73302+
73303+config GRKERNSEC_MEDIUM
73304+ bool "Medium"
73305+ select PAX
73306+ select PAX_EI_PAX
73307+ select PAX_PT_PAX_FLAGS
73308+ select PAX_HAVE_ACL_FLAGS
73309+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
73310+ select GRKERNSEC_CHROOT
73311+ select GRKERNSEC_CHROOT_SYSCTL
73312+ select GRKERNSEC_LINK
73313+ select GRKERNSEC_FIFO
73314+ select GRKERNSEC_DMESG
73315+ select GRKERNSEC_RANDNET
73316+ select GRKERNSEC_FORKFAIL
73317+ select GRKERNSEC_TIME
73318+ select GRKERNSEC_SIGNAL
73319+ select GRKERNSEC_CHROOT
73320+ select GRKERNSEC_CHROOT_UNIX
73321+ select GRKERNSEC_CHROOT_MOUNT
73322+ select GRKERNSEC_CHROOT_PIVOT
73323+ select GRKERNSEC_CHROOT_DOUBLE
73324+ select GRKERNSEC_CHROOT_CHDIR
73325+ select GRKERNSEC_CHROOT_MKNOD
73326+ select GRKERNSEC_PROC
73327+ select GRKERNSEC_PROC_USERGROUP
73328+ select PAX_RANDUSTACK
73329+ select PAX_ASLR
73330+ select PAX_RANDMMAP
73331+ select PAX_REFCOUNT if (X86 || SPARC64)
73332+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
73333+
73334+ help
73335+ If you say Y here, several features in addition to those included
73336+ in the low additional security level will be enabled. These
73337+ features provide even more security to your system, though in rare
73338+ cases they may be incompatible with very old or poorly written
73339+ software. If you enable this option, make sure that your auth
73340+ service (identd) is running as gid 1001. With this option,
73341+ the following features (in addition to those provided in the
73342+ low additional security level) will be enabled:
73343+
73344+ - Failed fork logging
73345+ - Time change logging
73346+ - Signal logging
73347+ - Deny mounts in chroot
73348+ - Deny double chrooting
73349+ - Deny sysctl writes in chroot
73350+ - Deny mknod in chroot
73351+ - Deny access to abstract AF_UNIX sockets out of chroot
73352+ - Deny pivot_root in chroot
73353+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
73354+ - /proc restrictions with special GID set to 10 (usually wheel)
73355+ - Address Space Layout Randomization (ASLR)
73356+ - Prevent exploitation of most refcount overflows
73357+ - Bounds checking of copying between the kernel and userland
73358+
73359+config GRKERNSEC_HIGH
73360+ bool "High"
73361+ select GRKERNSEC_LINK
73362+ select GRKERNSEC_FIFO
73363+ select GRKERNSEC_DMESG
73364+ select GRKERNSEC_FORKFAIL
73365+ select GRKERNSEC_TIME
73366+ select GRKERNSEC_SIGNAL
73367+ select GRKERNSEC_CHROOT
73368+ select GRKERNSEC_CHROOT_SHMAT
73369+ select GRKERNSEC_CHROOT_UNIX
73370+ select GRKERNSEC_CHROOT_MOUNT
73371+ select GRKERNSEC_CHROOT_FCHDIR
73372+ select GRKERNSEC_CHROOT_PIVOT
73373+ select GRKERNSEC_CHROOT_DOUBLE
73374+ select GRKERNSEC_CHROOT_CHDIR
73375+ select GRKERNSEC_CHROOT_MKNOD
73376+ select GRKERNSEC_CHROOT_CAPS
73377+ select GRKERNSEC_CHROOT_SYSCTL
73378+ select GRKERNSEC_CHROOT_FINDTASK
73379+ select GRKERNSEC_SYSFS_RESTRICT
73380+ select GRKERNSEC_PROC
73381+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
73382+ select GRKERNSEC_HIDESYM
73383+ select GRKERNSEC_BRUTE
73384+ select GRKERNSEC_PROC_USERGROUP
73385+ select GRKERNSEC_KMEM
73386+ select GRKERNSEC_RESLOG
73387+ select GRKERNSEC_RANDNET
73388+ select GRKERNSEC_PROC_ADD
73389+ select GRKERNSEC_CHROOT_CHMOD
73390+ select GRKERNSEC_CHROOT_NICE
73391+ select GRKERNSEC_SETXID
73392+ select GRKERNSEC_AUDIT_MOUNT
73393+ select GRKERNSEC_MODHARDEN if (MODULES)
73394+ select GRKERNSEC_HARDEN_PTRACE
73395+ select GRKERNSEC_PTRACE_READEXEC
73396+ select GRKERNSEC_VM86 if (X86_32)
73397+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
73398+ select PAX
73399+ select PAX_RANDUSTACK
73400+ select PAX_ASLR
73401+ select PAX_RANDMMAP
73402+ select PAX_NOEXEC
73403+ select PAX_MPROTECT
73404+ select PAX_EI_PAX
73405+ select PAX_PT_PAX_FLAGS
73406+ select PAX_HAVE_ACL_FLAGS
73407+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
73408+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
73409+ select PAX_RANDKSTACK if (X86_TSC && X86)
73410+ select PAX_SEGMEXEC if (X86_32)
73411+ select PAX_PAGEEXEC
73412+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
73413+ select PAX_EMUTRAMP if (PARISC)
73414+ select PAX_EMUSIGRT if (PARISC)
73415+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
73416+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
73417+ select PAX_REFCOUNT if (X86 || SPARC64)
73418+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
73419+ help
73420+ If you say Y here, many of the features of grsecurity will be
73421+ enabled, which will protect you against many kinds of attacks
73422+ against your system. The heightened security comes at a cost
73423+ of an increased chance of incompatibilities with rare software
73424+ on your machine. Since this security level enables PaX, you should
73425+ view <http://pax.grsecurity.net> and read about the PaX
73426+ project. While you are there, download chpax and run it on
73427+ binaries that cause problems with PaX. Also remember that
73428+ since the /proc restrictions are enabled, you must run your
73429+ identd as gid 1001. This security level enables the following
73430+ features in addition to those listed in the low and medium
73431+ security levels:
73432+
73433+ - Additional /proc restrictions
73434+ - Chmod restrictions in chroot
73435+ - No signals, ptrace, or viewing of processes outside of chroot
73436+ - Capability restrictions in chroot
73437+ - Deny fchdir out of chroot
73438+ - Priority restrictions in chroot
73439+ - Segmentation-based implementation of PaX
73440+ - Mprotect restrictions
73441+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
73442+ - Kernel stack randomization
73443+ - Mount/unmount/remount logging
73444+ - Kernel symbol hiding
73445+ - Hardening of module auto-loading
73446+ - Ptrace restrictions
73447+ - Restricted vm86 mode
73448+ - Restricted sysfs/debugfs
73449+ - Active kernel exploit response
73450+
73451+config GRKERNSEC_CUSTOM
73452+ bool "Custom"
73453+ help
73454+ If you say Y here, you will be able to configure every grsecurity
73455+ option, which allows you to enable many more features that aren't
73456+ covered in the basic security levels. These additional features
73457+ include TPE, socket restrictions, and the sysctl system for
73458+ grsecurity. It is advised that you read through the help for
73459+ each option to determine its usefulness in your situation.
73460+
73461+endchoice
73462+
73463+menu "Memory Protections"
73464+depends on GRKERNSEC
73465+
73466+config GRKERNSEC_KMEM
73467+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
73468+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
73469+ help
73470+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
73471+ be written to or read from to modify or leak the contents of the running
73472+ kernel. /dev/port will also not be allowed to be opened. If you have module
73473+ support disabled, enabling this will close up four ways that are
73474+ currently used to insert malicious code into the running kernel.
73475+ Even with all these features enabled, we still highly recommend that
73476+ you use the RBAC system, as it is still possible for an attacker to
73477+ modify the running kernel through privileged I/O granted by ioperm/iopl.
73478+ If you are not using XFree86, you may be able to stop this additional
73479+ case by enabling the 'Disable privileged I/O' option. Though nothing
73480+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
73481+ but only to video memory, which is the only writing we allow in this
73482+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
73483+ not be allowed to mprotect it with PROT_WRITE later.
73484+ It is highly recommended that you say Y here if you meet all the
73485+ conditions above.
73486+
73487+config GRKERNSEC_VM86
73488+ bool "Restrict VM86 mode"
73489+ depends on X86_32
73490+
73491+ help
73492+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
73493+ make use of a special execution mode on 32bit x86 processors called
73494+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
73495+ video cards and will still work with this option enabled. The purpose
73496+ of the option is to prevent exploitation of emulation errors in
73497+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
73498+ Nearly all users should be able to enable this option.
73499+
73500+config GRKERNSEC_IO
73501+ bool "Disable privileged I/O"
73502+ depends on X86
73503+ select RTC_CLASS
73504+ select RTC_INTF_DEV
73505+ select RTC_DRV_CMOS
73506+
73507+ help
73508+ If you say Y here, all ioperm and iopl calls will return an error.
73509+ Ioperm and iopl can be used to modify the running kernel.
73510+ Unfortunately, some programs need this access to operate properly,
73511+ the most notable of which are XFree86 and hwclock. hwclock can be
73512+ remedied by having RTC support in the kernel, so real-time
73513+ clock support is enabled if this option is enabled, to ensure
73514+ that hwclock operates correctly. XFree86 still will not
73515+ operate correctly with this option enabled, so DO NOT CHOOSE Y
73516+ IF YOU USE XFree86. If you use XFree86 and you still want to
73517+ protect your kernel against modification, use the RBAC system.
73518+
73519+config GRKERNSEC_PROC_MEMMAP
73520+ bool "Harden ASLR against information leaks and entropy reduction"
73521+ default y if (PAX_NOEXEC || PAX_ASLR)
73522+ depends on PAX_NOEXEC || PAX_ASLR
73523+ help
73524+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
73525+ give no information about the addresses of its mappings if
73526+ PaX features that rely on random addresses are enabled on the task.
73527+ In addition to sanitizing this information and disabling other
73528+ dangerous sources of information, this option causes reads of sensitive
73529+ /proc/<pid> entries where the file descriptor was opened in a different
73530+ task than the one performing the read. Such attempts are logged.
73531+ This option also limits argv/env strings for suid/sgid binaries
73532+ to 512KB to prevent a complete exhaustion of the stack entropy provided
73533+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
73534+ binaries to prevent alternative mmap layouts from being abused.
73535+
73536+ If you use PaX it is essential that you say Y here as it closes up
73537+ several holes that make full ASLR useless locally.
73538+
73539+config GRKERNSEC_BRUTE
73540+ bool "Deter exploit bruteforcing"
73541+ help
73542+ If you say Y here, attempts to bruteforce exploits against forking
73543+ daemons such as apache or sshd, as well as against suid/sgid binaries
73544+ will be deterred. When a child of a forking daemon is killed by PaX
73545+ or crashes due to an illegal instruction or other suspicious signal,
73546+ the parent process will be delayed 30 seconds upon every subsequent
73547+ fork until the administrator is able to assess the situation and
73548+ restart the daemon.
73549+ In the suid/sgid case, the attempt is logged, the user has all their
73550+ processes terminated, and they are prevented from executing any further
73551+ processes for 15 minutes.
73552+ It is recommended that you also enable signal logging in the auditing
73553+ section so that logs are generated when a process triggers a suspicious
73554+ signal.
73555+ If the sysctl option is enabled, a sysctl option with name
73556+ "deter_bruteforce" is created.
73557+
73558+config GRKERNSEC_MODHARDEN
73559+ bool "Harden module auto-loading"
73560+ depends on MODULES
73561+ help
73562+ If you say Y here, module auto-loading in response to use of some
73563+ feature implemented by an unloaded module will be restricted to
73564+ root users. Enabling this option helps defend against attacks
73565+ by unprivileged users who abuse the auto-loading behavior to
73566+ cause a vulnerable module to load that is then exploited.
73567+
73568+ If this option prevents a legitimate use of auto-loading for a
73569+ non-root user, the administrator can execute modprobe manually
73570+ with the exact name of the module mentioned in the alert log.
73571+ Alternatively, the administrator can add the module to the list
73572+ of modules loaded at boot by modifying init scripts.
73573+
73574+ Modification of init scripts will most likely be needed on
73575+ Ubuntu servers with encrypted home directory support enabled,
73576+ as the first non-root user logging in will cause the ecb(aes),
73577+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
73578+
73579+config GRKERNSEC_HIDESYM
73580+ bool "Hide kernel symbols"
73581+ help
73582+ If you say Y here, getting information on loaded modules, and
73583+ displaying all kernel symbols through a syscall will be restricted
73584+ to users with CAP_SYS_MODULE. For software compatibility reasons,
73585+ /proc/kallsyms will be restricted to the root user. The RBAC
73586+ system can hide that entry even from root.
73587+
73588+ This option also prevents leaking of kernel addresses through
73589+ several /proc entries.
73590+
73591+ Note that this option is only effective provided the following
73592+ conditions are met:
73593+ 1) The kernel using grsecurity is not precompiled by some distribution
73594+ 2) You have also enabled GRKERNSEC_DMESG
73595+ 3) You are using the RBAC system and hiding other files such as your
73596+ kernel image and System.map. Alternatively, enabling this option
73597+ causes the permissions on /boot, /lib/modules, and the kernel
73598+ source directory to change at compile time to prevent
73599+ reading by non-root users.
73600+ If the above conditions are met, this option will aid in providing a
73601+ useful protection against local kernel exploitation of overflows
73602+ and arbitrary read/write vulnerabilities.
73603+
73604+config GRKERNSEC_KERN_LOCKOUT
73605+ bool "Active kernel exploit response"
73606+ depends on X86 || ARM || PPC || SPARC
73607+ help
73608+ If you say Y here, when a PaX alert is triggered due to suspicious
73609+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
73610+ or an OOPs occurs due to bad memory accesses, instead of just
73611+ terminating the offending process (and potentially allowing
73612+ a subsequent exploit from the same user), we will take one of two
73613+ actions:
73614+ If the user was root, we will panic the system
73615+ If the user was non-root, we will log the attempt, terminate
73616+ all processes owned by the user, then prevent them from creating
73617+ any new processes until the system is restarted
73618+ This deters repeated kernel exploitation/bruteforcing attempts
73619+ and is useful for later forensics.
73620+
73621+endmenu
73622+menu "Role Based Access Control Options"
73623+depends on GRKERNSEC
73624+
73625+config GRKERNSEC_RBAC_DEBUG
73626+ bool
73627+
73628+config GRKERNSEC_NO_RBAC
73629+ bool "Disable RBAC system"
73630+ help
73631+ If you say Y here, the /dev/grsec device will be removed from the kernel,
73632+ preventing the RBAC system from being enabled. You should only say Y
73633+ here if you have no intention of using the RBAC system, so as to prevent
73634+ an attacker with root access from misusing the RBAC system to hide files
73635+ and processes when loadable module support and /dev/[k]mem have been
73636+ locked down.
73637+
73638+config GRKERNSEC_ACL_HIDEKERN
73639+ bool "Hide kernel processes"
73640+ help
73641+ If you say Y here, all kernel threads will be hidden to all
73642+ processes but those whose subject has the "view hidden processes"
73643+ flag.
73644+
73645+config GRKERNSEC_ACL_MAXTRIES
73646+ int "Maximum tries before password lockout"
73647+ default 3
73648+ help
73649+ This option enforces the maximum number of times a user can attempt
73650+ to authorize themselves with the grsecurity RBAC system before being
73651+ denied the ability to attempt authorization again for a specified time.
73652+ The lower the number, the harder it will be to brute-force a password.
73653+
73654+config GRKERNSEC_ACL_TIMEOUT
73655+ int "Time to wait after max password tries, in seconds"
73656+ default 30
73657+ help
73658+ This option specifies the time the user must wait after attempting to
73659+ authorize to the RBAC system with the maximum number of invalid
73660+ passwords. The higher the number, the harder it will be to brute-force
73661+ a password.
73662+
73663+endmenu
73664+menu "Filesystem Protections"
73665+depends on GRKERNSEC
73666+
73667+config GRKERNSEC_PROC
73668+ bool "Proc restrictions"
73669+ help
73670+ If you say Y here, the permissions of the /proc filesystem
73671+ will be altered to enhance system security and privacy. You MUST
73672+ choose either a user only restriction or a user and group restriction.
73673+ Depending upon the option you choose, you can either restrict users to
73674+ see only the processes they themselves run, or choose a group that can
73675+ view all processes and files normally restricted to root if you choose
73676+ the "restrict to user only" option. NOTE: If you're running identd or
73677+ ntpd as a non-root user, you will have to run it as the group you
73678+ specify here.
73679+
73680+config GRKERNSEC_PROC_USER
73681+ bool "Restrict /proc to user only"
73682+ depends on GRKERNSEC_PROC
73683+ help
73684+ If you say Y here, non-root users will only be able to view their own
73685+ processes, and restricts them from viewing network-related information,
73686+ and viewing kernel symbol and module information.
73687+
73688+config GRKERNSEC_PROC_USERGROUP
73689+ bool "Allow special group"
73690+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
73691+ help
73692+ If you say Y here, you will be able to select a group that will be
73693+ able to view all processes and network-related information. If you've
73694+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
73695+ remain hidden. This option is useful if you want to run identd as
73696+ a non-root user.
73697+
73698+config GRKERNSEC_PROC_GID
73699+ int "GID for special group"
73700+ depends on GRKERNSEC_PROC_USERGROUP
73701+ default 1001
73702+
73703+config GRKERNSEC_PROC_ADD
73704+ bool "Additional restrictions"
73705+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
73706+ help
73707+ If you say Y here, additional restrictions will be placed on
73708+ /proc that keep normal users from viewing device information and
73709+ slabinfo information that could be useful for exploits.
73710+
73711+config GRKERNSEC_LINK
73712+ bool "Linking restrictions"
73713+ help
73714+ If you say Y here, /tmp race exploits will be prevented, since users
73715+ will no longer be able to follow symlinks owned by other users in
73716+ world-writable +t directories (e.g. /tmp), unless the owner of the
73717+ symlink is the owner of the directory. users will also not be
73718+ able to hardlink to files they do not own. If the sysctl option is
73719+ enabled, a sysctl option with name "linking_restrictions" is created.
73720+
73721+config GRKERNSEC_FIFO
73722+ bool "FIFO restrictions"
73723+ help
73724+ If you say Y here, users will not be able to write to FIFOs they don't
73725+ own in world-writable +t directories (e.g. /tmp), unless the owner of
73726+ the FIFO is the same owner of the directory it's held in. If the sysctl
73727+ option is enabled, a sysctl option with name "fifo_restrictions" is
73728+ created.
73729+
73730+config GRKERNSEC_SYSFS_RESTRICT
73731+ bool "Sysfs/debugfs restriction"
73732+ depends on SYSFS
73733+ help
73734+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
73735+ any filesystem normally mounted under it (e.g. debugfs) will be
73736+ mostly accessible only by root. These filesystems generally provide access
73737+ to hardware and debug information that isn't appropriate for unprivileged
73738+ users of the system. Sysfs and debugfs have also become a large source
73739+ of new vulnerabilities, ranging from infoleaks to local compromise.
73740+ There has been very little oversight with an eye toward security involved
73741+ in adding new exporters of information to these filesystems, so their
73742+ use is discouraged.
73743+ For reasons of compatibility, a few directories have been whitelisted
73744+ for access by non-root users:
73745+ /sys/fs/selinux
73746+ /sys/fs/fuse
73747+ /sys/devices/system/cpu
73748+
73749+config GRKERNSEC_ROFS
73750+ bool "Runtime read-only mount protection"
73751+ help
73752+ If you say Y here, a sysctl option with name "romount_protect" will
73753+ be created. By setting this option to 1 at runtime, filesystems
73754+ will be protected in the following ways:
73755+ * No new writable mounts will be allowed
73756+ * Existing read-only mounts won't be able to be remounted read/write
73757+ * Write operations will be denied on all block devices
73758+ This option acts independently of grsec_lock: once it is set to 1,
73759+ it cannot be turned off. Therefore, please be mindful of the resulting
73760+ behavior if this option is enabled in an init script on a read-only
73761+ filesystem. This feature is mainly intended for secure embedded systems.
73762+
73763+config GRKERNSEC_CHROOT
73764+ bool "Chroot jail restrictions"
73765+ help
73766+ If you say Y here, you will be able to choose several options that will
73767+ make breaking out of a chrooted jail much more difficult. If you
73768+ encounter no software incompatibilities with the following options, it
73769+ is recommended that you enable each one.
73770+
73771+config GRKERNSEC_CHROOT_MOUNT
73772+ bool "Deny mounts"
73773+ depends on GRKERNSEC_CHROOT
73774+ help
73775+ If you say Y here, processes inside a chroot will not be able to
73776+ mount or remount filesystems. If the sysctl option is enabled, a
73777+ sysctl option with name "chroot_deny_mount" is created.
73778+
73779+config GRKERNSEC_CHROOT_DOUBLE
73780+ bool "Deny double-chroots"
73781+ depends on GRKERNSEC_CHROOT
73782+ help
73783+ If you say Y here, processes inside a chroot will not be able to chroot
73784+ again outside the chroot. This is a widely used method of breaking
73785+ out of a chroot jail and should not be allowed. If the sysctl
73786+ option is enabled, a sysctl option with name
73787+ "chroot_deny_chroot" is created.
73788+
73789+config GRKERNSEC_CHROOT_PIVOT
73790+ bool "Deny pivot_root in chroot"
73791+ depends on GRKERNSEC_CHROOT
73792+ help
73793+ If you say Y here, processes inside a chroot will not be able to use
73794+ a function called pivot_root() that was introduced in Linux 2.3.41. It
73795+ works similar to chroot in that it changes the root filesystem. This
73796+ function could be misused in a chrooted process to attempt to break out
73797+ of the chroot, and therefore should not be allowed. If the sysctl
73798+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
73799+ created.
73800+
73801+config GRKERNSEC_CHROOT_CHDIR
73802+ bool "Enforce chdir(\"/\") on all chroots"
73803+ depends on GRKERNSEC_CHROOT
73804+ help
73805+ If you say Y here, the current working directory of all newly-chrooted
73806+ applications will be set to the the root directory of the chroot.
73807+ The man page on chroot(2) states:
73808+ Note that this call does not change the current working
73809+ directory, so that `.' can be outside the tree rooted at
73810+ `/'. In particular, the super-user can escape from a
73811+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
73812+
73813+ It is recommended that you say Y here, since it's not known to break
73814+ any software. If the sysctl option is enabled, a sysctl option with
73815+ name "chroot_enforce_chdir" is created.
73816+
73817+config GRKERNSEC_CHROOT_CHMOD
73818+ bool "Deny (f)chmod +s"
73819+ depends on GRKERNSEC_CHROOT
73820+ help
73821+ If you say Y here, processes inside a chroot will not be able to chmod
73822+ or fchmod files to make them have suid or sgid bits. This protects
73823+ against another published method of breaking a chroot. If the sysctl
73824+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
73825+ created.
73826+
73827+config GRKERNSEC_CHROOT_FCHDIR
73828+ bool "Deny fchdir out of chroot"
73829+ depends on GRKERNSEC_CHROOT
73830+ help
73831+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
73832+ to a file descriptor of the chrooting process that points to a directory
73833+ outside the filesystem will be stopped. If the sysctl option
73834+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
73835+
73836+config GRKERNSEC_CHROOT_MKNOD
73837+ bool "Deny mknod"
73838+ depends on GRKERNSEC_CHROOT
73839+ help
73840+ If you say Y here, processes inside a chroot will not be allowed to
73841+ mknod. The problem with using mknod inside a chroot is that it
73842+ would allow an attacker to create a device entry that is the same
73843+ as one on the physical root of your system, which could range from
73844+ anything from the console device to a device for your harddrive (which
73845+ they could then use to wipe the drive or steal data). It is recommended
73846+ that you say Y here, unless you run into software incompatibilities.
73847+ If the sysctl option is enabled, a sysctl option with name
73848+ "chroot_deny_mknod" is created.
73849+
73850+config GRKERNSEC_CHROOT_SHMAT
73851+ bool "Deny shmat() out of chroot"
73852+ depends on GRKERNSEC_CHROOT
73853+ help
73854+ If you say Y here, processes inside a chroot will not be able to attach
73855+ to shared memory segments that were created outside of the chroot jail.
73856+ It is recommended that you say Y here. If the sysctl option is enabled,
73857+ a sysctl option with name "chroot_deny_shmat" is created.
73858+
73859+config GRKERNSEC_CHROOT_UNIX
73860+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
73861+ depends on GRKERNSEC_CHROOT
73862+ help
73863+ If you say Y here, processes inside a chroot will not be able to
73864+ connect to abstract (meaning not belonging to a filesystem) Unix
73865+ domain sockets that were bound outside of a chroot. It is recommended
73866+ that you say Y here. If the sysctl option is enabled, a sysctl option
73867+ with name "chroot_deny_unix" is created.
73868+
73869+config GRKERNSEC_CHROOT_FINDTASK
73870+ bool "Protect outside processes"
73871+ depends on GRKERNSEC_CHROOT
73872+ help
73873+ If you say Y here, processes inside a chroot will not be able to
73874+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
73875+ getsid, or view any process outside of the chroot. If the sysctl
73876+ option is enabled, a sysctl option with name "chroot_findtask" is
73877+ created.
73878+
73879+config GRKERNSEC_CHROOT_NICE
73880+ bool "Restrict priority changes"
73881+ depends on GRKERNSEC_CHROOT
73882+ help
73883+ If you say Y here, processes inside a chroot will not be able to raise
73884+ the priority of processes in the chroot, or alter the priority of
73885+ processes outside the chroot. This provides more security than simply
73886+ removing CAP_SYS_NICE from the process' capability set. If the
73887+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
73888+ is created.
73889+
73890+config GRKERNSEC_CHROOT_SYSCTL
73891+ bool "Deny sysctl writes"
73892+ depends on GRKERNSEC_CHROOT
73893+ help
73894+ If you say Y here, an attacker in a chroot will not be able to
73895+ write to sysctl entries, either by sysctl(2) or through a /proc
73896+ interface. It is strongly recommended that you say Y here. If the
73897+ sysctl option is enabled, a sysctl option with name
73898+ "chroot_deny_sysctl" is created.
73899+
73900+config GRKERNSEC_CHROOT_CAPS
73901+ bool "Capability restrictions"
73902+ depends on GRKERNSEC_CHROOT
73903+ help
73904+ If you say Y here, the capabilities on all processes within a
73905+ chroot jail will be lowered to stop module insertion, raw i/o,
73906+ system and net admin tasks, rebooting the system, modifying immutable
73907+ files, modifying IPC owned by another, and changing the system time.
73908+ This is left an option because it can break some apps. Disable this
73909+ if your chrooted apps are having problems performing those kinds of
73910+ tasks. If the sysctl option is enabled, a sysctl option with
73911+ name "chroot_caps" is created.
73912+
73913+endmenu
73914+menu "Kernel Auditing"
73915+depends on GRKERNSEC
73916+
73917+config GRKERNSEC_AUDIT_GROUP
73918+ bool "Single group for auditing"
73919+ help
73920+ If you say Y here, the exec, chdir, and (un)mount logging features
73921+ will only operate on a group you specify. This option is recommended
73922+ if you only want to watch certain users instead of having a large
73923+ amount of logs from the entire system. If the sysctl option is enabled,
73924+ a sysctl option with name "audit_group" is created.
73925+
73926+config GRKERNSEC_AUDIT_GID
73927+ int "GID for auditing"
73928+ depends on GRKERNSEC_AUDIT_GROUP
73929+ default 1007
73930+
73931+config GRKERNSEC_EXECLOG
73932+ bool "Exec logging"
73933+ help
73934+ If you say Y here, all execve() calls will be logged (since the
73935+ other exec*() calls are frontends to execve(), all execution
73936+ will be logged). Useful for shell-servers that like to keep track
73937+ of their users. If the sysctl option is enabled, a sysctl option with
73938+ name "exec_logging" is created.
73939+ WARNING: This option when enabled will produce a LOT of logs, especially
73940+ on an active system.
73941+
73942+config GRKERNSEC_RESLOG
73943+ bool "Resource logging"
73944+ help
73945+ If you say Y here, all attempts to overstep resource limits will
73946+ be logged with the resource name, the requested size, and the current
73947+ limit. It is highly recommended that you say Y here. If the sysctl
73948+ option is enabled, a sysctl option with name "resource_logging" is
73949+ created. If the RBAC system is enabled, the sysctl value is ignored.
73950+
73951+config GRKERNSEC_CHROOT_EXECLOG
73952+ bool "Log execs within chroot"
73953+ help
73954+ If you say Y here, all executions inside a chroot jail will be logged
73955+ to syslog. This can cause a large amount of logs if certain
73956+ applications (eg. djb's daemontools) are installed on the system, and
73957+ is therefore left as an option. If the sysctl option is enabled, a
73958+ sysctl option with name "chroot_execlog" is created.
73959+
73960+config GRKERNSEC_AUDIT_PTRACE
73961+ bool "Ptrace logging"
73962+ help
73963+ If you say Y here, all attempts to attach to a process via ptrace
73964+ will be logged. If the sysctl option is enabled, a sysctl option
73965+ with name "audit_ptrace" is created.
73966+
73967+config GRKERNSEC_AUDIT_CHDIR
73968+ bool "Chdir logging"
73969+ help
73970+ If you say Y here, all chdir() calls will be logged. If the sysctl
73971+ option is enabled, a sysctl option with name "audit_chdir" is created.
73972+
73973+config GRKERNSEC_AUDIT_MOUNT
73974+ bool "(Un)Mount logging"
73975+ help
73976+ If you say Y here, all mounts and unmounts will be logged. If the
73977+ sysctl option is enabled, a sysctl option with name "audit_mount" is
73978+ created.
73979+
73980+config GRKERNSEC_SIGNAL
73981+ bool "Signal logging"
73982+ help
73983+ If you say Y here, certain important signals will be logged, such as
73984+ SIGSEGV, which will as a result inform you of when a error in a program
73985+ occurred, which in some cases could mean a possible exploit attempt.
73986+ If the sysctl option is enabled, a sysctl option with name
73987+ "signal_logging" is created.
73988+
73989+config GRKERNSEC_FORKFAIL
73990+ bool "Fork failure logging"
73991+ help
73992+ If you say Y here, all failed fork() attempts will be logged.
73993+ This could suggest a fork bomb, or someone attempting to overstep
73994+ their process limit. If the sysctl option is enabled, a sysctl option
73995+ with name "forkfail_logging" is created.
73996+
73997+config GRKERNSEC_TIME
73998+ bool "Time change logging"
73999+ help
74000+ If you say Y here, any changes of the system clock will be logged.
74001+ If the sysctl option is enabled, a sysctl option with name
74002+ "timechange_logging" is created.
74003+
74004+config GRKERNSEC_PROC_IPADDR
74005+ bool "/proc/<pid>/ipaddr support"
74006+ help
74007+ If you say Y here, a new entry will be added to each /proc/<pid>
74008+ directory that contains the IP address of the person using the task.
74009+ The IP is carried across local TCP and AF_UNIX stream sockets.
74010+ This information can be useful for IDS/IPSes to perform remote response
74011+ to a local attack. The entry is readable by only the owner of the
74012+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
74013+ the RBAC system), and thus does not create privacy concerns.
74014+
74015+config GRKERNSEC_RWXMAP_LOG
74016+ bool 'Denied RWX mmap/mprotect logging'
74017+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
74018+ help
74019+ If you say Y here, calls to mmap() and mprotect() with explicit
74020+ usage of PROT_WRITE and PROT_EXEC together will be logged when
74021+ denied by the PAX_MPROTECT feature. If the sysctl option is
74022+ enabled, a sysctl option with name "rwxmap_logging" is created.
74023+
74024+config GRKERNSEC_AUDIT_TEXTREL
74025+ bool 'ELF text relocations logging (READ HELP)'
74026+ depends on PAX_MPROTECT
74027+ help
74028+ If you say Y here, text relocations will be logged with the filename
74029+ of the offending library or binary. The purpose of the feature is
74030+ to help Linux distribution developers get rid of libraries and
74031+ binaries that need text relocations which hinder the future progress
74032+ of PaX. Only Linux distribution developers should say Y here, and
74033+ never on a production machine, as this option creates an information
74034+ leak that could aid an attacker in defeating the randomization of
74035+ a single memory region. If the sysctl option is enabled, a sysctl
74036+ option with name "audit_textrel" is created.
74037+
74038+endmenu
74039+
74040+menu "Executable Protections"
74041+depends on GRKERNSEC
74042+
74043+config GRKERNSEC_DMESG
74044+ bool "Dmesg(8) restriction"
74045+ help
74046+ If you say Y here, non-root users will not be able to use dmesg(8)
74047+ to view up to the last 4kb of messages in the kernel's log buffer.
74048+ The kernel's log buffer often contains kernel addresses and other
74049+ identifying information useful to an attacker in fingerprinting a
74050+ system for a targeted exploit.
74051+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
74052+ created.
74053+
74054+config GRKERNSEC_HARDEN_PTRACE
74055+ bool "Deter ptrace-based process snooping"
74056+ help
74057+ If you say Y here, TTY sniffers and other malicious monitoring
74058+ programs implemented through ptrace will be defeated. If you
74059+ have been using the RBAC system, this option has already been
74060+ enabled for several years for all users, with the ability to make
74061+ fine-grained exceptions.
74062+
74063+ This option only affects the ability of non-root users to ptrace
74064+ processes that are not a descendent of the ptracing process.
74065+ This means that strace ./binary and gdb ./binary will still work,
74066+ but attaching to arbitrary processes will not. If the sysctl
74067+ option is enabled, a sysctl option with name "harden_ptrace" is
74068+ created.
74069+
74070+config GRKERNSEC_PTRACE_READEXEC
74071+ bool "Require read access to ptrace sensitive binaries"
74072+ help
74073+ If you say Y here, unprivileged users will not be able to ptrace unreadable
74074+ binaries. This option is useful in environments that
74075+ remove the read bits (e.g. file mode 4711) from suid binaries to
74076+ prevent infoleaking of their contents. This option adds
74077+ consistency to the use of that file mode, as the binary could normally
74078+ be read out when run without privileges while ptracing.
74079+
74080+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
74081+ is created.
74082+
74083+config GRKERNSEC_SETXID
74084+ bool "Enforce consistent multithreaded privileges"
74085+ help
74086+ If you say Y here, a change from a root uid to a non-root uid
74087+ in a multithreaded application will cause the resulting uids,
74088+ gids, supplementary groups, and capabilities in that thread
74089+ to be propagated to the other threads of the process. In most
74090+ cases this is unnecessary, as glibc will emulate this behavior
74091+ on behalf of the application. Other libcs do not act in the
74092+ same way, allowing the other threads of the process to continue
74093+ running with root privileges. If the sysctl option is enabled,
74094+ a sysctl option with name "consistent_setxid" is created.
74095+
74096+config GRKERNSEC_TPE
74097+ bool "Trusted Path Execution (TPE)"
74098+ help
74099+ If you say Y here, you will be able to choose a gid to add to the
74100+ supplementary groups of users you want to mark as "untrusted."
74101+ These users will not be able to execute any files that are not in
74102+ root-owned directories writable only by root. If the sysctl option
74103+ is enabled, a sysctl option with name "tpe" is created.
74104+
74105+config GRKERNSEC_TPE_ALL
74106+ bool "Partially restrict all non-root users"
74107+ depends on GRKERNSEC_TPE
74108+ help
74109+ If you say Y here, all non-root users will be covered under
74110+ a weaker TPE restriction. This is separate from, and in addition to,
74111+ the main TPE options that you have selected elsewhere. Thus, if a
74112+ "trusted" GID is chosen, this restriction applies to even that GID.
74113+ Under this restriction, all non-root users will only be allowed to
74114+ execute files in directories they own that are not group or
74115+ world-writable, or in directories owned by root and writable only by
74116+ root. If the sysctl option is enabled, a sysctl option with name
74117+ "tpe_restrict_all" is created.
74118+
74119+config GRKERNSEC_TPE_INVERT
74120+ bool "Invert GID option"
74121+ depends on GRKERNSEC_TPE
74122+ help
74123+ If you say Y here, the group you specify in the TPE configuration will
74124+ decide what group TPE restrictions will be *disabled* for. This
74125+ option is useful if you want TPE restrictions to be applied to most
74126+ users on the system. If the sysctl option is enabled, a sysctl option
74127+ with name "tpe_invert" is created. Unlike other sysctl options, this
74128+ entry will default to on for backward-compatibility.
74129+
74130+config GRKERNSEC_TPE_GID
74131+ int "GID for untrusted users"
74132+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
74133+ default 1005
74134+ help
74135+ Setting this GID determines what group TPE restrictions will be
74136+ *enabled* for. If the sysctl option is enabled, a sysctl option
74137+ with name "tpe_gid" is created.
74138+
74139+config GRKERNSEC_TPE_GID
74140+ int "GID for trusted users"
74141+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
74142+ default 1005
74143+ help
74144+ Setting this GID determines what group TPE restrictions will be
74145+ *disabled* for. If the sysctl option is enabled, a sysctl option
74146+ with name "tpe_gid" is created.
74147+
74148+endmenu
74149+menu "Network Protections"
74150+depends on GRKERNSEC
74151+
74152+config GRKERNSEC_RANDNET
74153+ bool "Larger entropy pools"
74154+ help
74155+ If you say Y here, the entropy pools used for many features of Linux
74156+ and grsecurity will be doubled in size. Since several grsecurity
74157+ features use additional randomness, it is recommended that you say Y
74158+ here. Saying Y here has a similar effect as modifying
74159+ /proc/sys/kernel/random/poolsize.
74160+
74161+config GRKERNSEC_BLACKHOLE
74162+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
74163+ depends on NET
74164+ help
74165+ If you say Y here, neither TCP resets nor ICMP
74166+ destination-unreachable packets will be sent in response to packets
74167+ sent to ports for which no associated listening process exists.
74168+ This feature supports both IPV4 and IPV6 and exempts the
74169+ loopback interface from blackholing. Enabling this feature
74170+ makes a host more resilient to DoS attacks and reduces network
74171+ visibility against scanners.
74172+
74173+ The blackhole feature as-implemented is equivalent to the FreeBSD
74174+ blackhole feature, as it prevents RST responses to all packets, not
74175+ just SYNs. Under most application behavior this causes no
74176+ problems, but applications (like haproxy) may not close certain
74177+ connections in a way that cleanly terminates them on the remote
74178+ end, leaving the remote host in LAST_ACK state. Because of this
74179+ side-effect and to prevent intentional LAST_ACK DoSes, this
74180+ feature also adds automatic mitigation against such attacks.
74181+ The mitigation drastically reduces the amount of time a socket
74182+ can spend in LAST_ACK state. If you're using haproxy and not
74183+ all servers it connects to have this option enabled, consider
74184+ disabling this feature on the haproxy host.
74185+
74186+ If the sysctl option is enabled, two sysctl options with names
74187+ "ip_blackhole" and "lastack_retries" will be created.
74188+ While "ip_blackhole" takes the standard zero/non-zero on/off
74189+ toggle, "lastack_retries" uses the same kinds of values as
74190+ "tcp_retries1" and "tcp_retries2". The default value of 4
74191+ prevents a socket from lasting more than 45 seconds in LAST_ACK
74192+ state.
74193+
74194+config GRKERNSEC_SOCKET
74195+ bool "Socket restrictions"
74196+ depends on NET
74197+ help
74198+ If you say Y here, you will be able to choose from several options.
74199+ If you assign a GID on your system and add it to the supplementary
74200+ groups of users you want to restrict socket access to, this patch
74201+ will perform up to three things, based on the option(s) you choose.
74202+
74203+config GRKERNSEC_SOCKET_ALL
74204+ bool "Deny any sockets to group"
74205+ depends on GRKERNSEC_SOCKET
74206+ help
74207+ If you say Y here, you will be able to choose a GID of whose users will
74208+ be unable to connect to other hosts from your machine or run server
74209+ applications from your machine. If the sysctl option is enabled, a
74210+ sysctl option with name "socket_all" is created.
74211+
74212+config GRKERNSEC_SOCKET_ALL_GID
74213+ int "GID to deny all sockets for"
74214+ depends on GRKERNSEC_SOCKET_ALL
74215+ default 1004
74216+ help
74217+ Here you can choose the GID to disable socket access for. Remember to
74218+ add the users you want socket access disabled for to the GID
74219+ specified here. If the sysctl option is enabled, a sysctl option
74220+ with name "socket_all_gid" is created.
74221+
74222+config GRKERNSEC_SOCKET_CLIENT
74223+ bool "Deny client sockets to group"
74224+ depends on GRKERNSEC_SOCKET
74225+ help
74226+ If you say Y here, you will be able to choose a GID of whose users will
74227+ be unable to connect to other hosts from your machine, but will be
74228+ able to run servers. If this option is enabled, all users in the group
74229+ you specify will have to use passive mode when initiating ftp transfers
74230+ from the shell on your machine. If the sysctl option is enabled, a
74231+ sysctl option with name "socket_client" is created.
74232+
74233+config GRKERNSEC_SOCKET_CLIENT_GID
74234+ int "GID to deny client sockets for"
74235+ depends on GRKERNSEC_SOCKET_CLIENT
74236+ default 1003
74237+ help
74238+ Here you can choose the GID to disable client socket access for.
74239+ Remember to add the users you want client socket access disabled for to
74240+ the GID specified here. If the sysctl option is enabled, a sysctl
74241+ option with name "socket_client_gid" is created.
74242+
74243+config GRKERNSEC_SOCKET_SERVER
74244+ bool "Deny server sockets to group"
74245+ depends on GRKERNSEC_SOCKET
74246+ help
74247+ If you say Y here, you will be able to choose a GID of whose users will
74248+ be unable to run server applications from your machine. If the sysctl
74249+ option is enabled, a sysctl option with name "socket_server" is created.
74250+
74251+config GRKERNSEC_SOCKET_SERVER_GID
74252+ int "GID to deny server sockets for"
74253+ depends on GRKERNSEC_SOCKET_SERVER
74254+ default 1002
74255+ help
74256+ Here you can choose the GID to disable server socket access for.
74257+ Remember to add the users you want server socket access disabled for to
74258+ the GID specified here. If the sysctl option is enabled, a sysctl
74259+ option with name "socket_server_gid" is created.
74260+
74261+endmenu
74262+menu "Sysctl support"
74263+depends on GRKERNSEC && SYSCTL
74264+
74265+config GRKERNSEC_SYSCTL
74266+ bool "Sysctl support"
74267+ help
74268+ If you say Y here, you will be able to change the options that
74269+ grsecurity runs with at bootup, without having to recompile your
74270+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
74271+ to enable (1) or disable (0) various features. All the sysctl entries
74272+ are mutable until the "grsec_lock" entry is set to a non-zero value.
74273+ All features enabled in the kernel configuration are disabled at boot
74274+ if you do not say Y to the "Turn on features by default" option.
74275+ All options should be set at startup, and the grsec_lock entry should
74276+ be set to a non-zero value after all the options are set.
74277+ *THIS IS EXTREMELY IMPORTANT*
74278+
74279+config GRKERNSEC_SYSCTL_DISTRO
74280+ bool "Extra sysctl support for distro makers (READ HELP)"
74281+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
74282+ help
74283+ If you say Y here, additional sysctl options will be created
74284+ for features that affect processes running as root. Therefore,
74285+ it is critical when using this option that the grsec_lock entry be
74286+ enabled after boot. Only distros with prebuilt kernel packages
74287+ with this option enabled that can ensure grsec_lock is enabled
74288+ after boot should use this option.
74289+ *Failure to set grsec_lock after boot makes all grsec features
74290+ this option covers useless*
74291+
74292+ Currently this option creates the following sysctl entries:
74293+ "Disable Privileged I/O": "disable_priv_io"
74294+
74295+config GRKERNSEC_SYSCTL_ON
74296+ bool "Turn on features by default"
74297+ depends on GRKERNSEC_SYSCTL
74298+ help
74299+ If you say Y here, instead of having all features enabled in the
74300+ kernel configuration disabled at boot time, the features will be
74301+ enabled at boot time. It is recommended you say Y here unless
74302+ there is some reason you would want all sysctl-tunable features to
74303+ be disabled by default. As mentioned elsewhere, it is important
74304+ to enable the grsec_lock entry once you have finished modifying
74305+ the sysctl entries.
74306+
74307+endmenu
74308+menu "Logging Options"
74309+depends on GRKERNSEC
74310+
74311+config GRKERNSEC_FLOODTIME
74312+ int "Seconds in between log messages (minimum)"
74313+ default 10
74314+ help
74315+ This option allows you to enforce the number of seconds between
74316+ grsecurity log messages. The default should be suitable for most
74317+ people, however, if you choose to change it, choose a value small enough
74318+ to allow informative logs to be produced, but large enough to
74319+ prevent flooding.
74320+
74321+config GRKERNSEC_FLOODBURST
74322+ int "Number of messages in a burst (maximum)"
74323+ default 6
74324+ help
74325+ This option allows you to choose the maximum number of messages allowed
74326+ within the flood time interval you chose in a separate option. The
74327+ default should be suitable for most people, however if you find that
74328+ many of your logs are being interpreted as flooding, you may want to
74329+ raise this value.
74330+
74331+endmenu
74332+
74333+endmenu
74334diff --git a/grsecurity/Makefile b/grsecurity/Makefile
74335new file mode 100644
74336index 0000000..1b9afa9
74337--- /dev/null
74338+++ b/grsecurity/Makefile
74339@@ -0,0 +1,38 @@
74340+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
74341+# during 2001-2009 it has been completely redesigned by Brad Spengler
74342+# into an RBAC system
74343+#
74344+# All code in this directory and various hooks inserted throughout the kernel
74345+# are copyright Brad Spengler - Open Source Security, Inc., and released
74346+# under the GPL v2 or higher
74347+
74348+KBUILD_CFLAGS += -Werror
74349+
74350+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
74351+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
74352+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
74353+
74354+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
74355+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
74356+ gracl_learn.o grsec_log.o
74357+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
74358+
74359+ifdef CONFIG_NET
74360+obj-y += grsec_sock.o
74361+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
74362+endif
74363+
74364+ifndef CONFIG_GRKERNSEC
74365+obj-y += grsec_disabled.o
74366+endif
74367+
74368+ifdef CONFIG_GRKERNSEC_HIDESYM
74369+extra-y := grsec_hidesym.o
74370+$(obj)/grsec_hidesym.o:
74371+ @-chmod -f 500 /boot
74372+ @-chmod -f 500 /lib/modules
74373+ @-chmod -f 500 /lib64/modules
74374+ @-chmod -f 500 /lib32/modules
74375+ @-chmod -f 700 .
74376+ @echo ' grsec: protected kernel image paths'
74377+endif
74378diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
74379new file mode 100644
74380index 0000000..dc4812b
74381--- /dev/null
74382+++ b/grsecurity/gracl.c
74383@@ -0,0 +1,4148 @@
74384+#include <linux/kernel.h>
74385+#include <linux/module.h>
74386+#include <linux/sched.h>
74387+#include <linux/mm.h>
74388+#include <linux/file.h>
74389+#include <linux/fs.h>
74390+#include <linux/namei.h>
74391+#include <linux/mount.h>
74392+#include <linux/tty.h>
74393+#include <linux/proc_fs.h>
74394+#include <linux/smp_lock.h>
74395+#include <linux/slab.h>
74396+#include <linux/vmalloc.h>
74397+#include <linux/types.h>
74398+#include <linux/sysctl.h>
74399+#include <linux/netdevice.h>
74400+#include <linux/ptrace.h>
74401+#include <linux/gracl.h>
74402+#include <linux/gralloc.h>
74403+#include <linux/security.h>
74404+#include <linux/grinternal.h>
74405+#include <linux/pid_namespace.h>
74406+#include <linux/fdtable.h>
74407+#include <linux/percpu.h>
74408+
74409+#include <asm/uaccess.h>
74410+#include <asm/errno.h>
74411+#include <asm/mman.h>
74412+
74413+static struct acl_role_db acl_role_set;
74414+static struct name_db name_set;
74415+static struct inodev_db inodev_set;
74416+
74417+/* for keeping track of userspace pointers used for subjects, so we
74418+ can share references in the kernel as well
74419+*/
74420+
74421+static struct dentry *real_root;
74422+static struct vfsmount *real_root_mnt;
74423+
74424+static struct acl_subj_map_db subj_map_set;
74425+
74426+static struct acl_role_label *default_role;
74427+
74428+static struct acl_role_label *role_list;
74429+
74430+static u16 acl_sp_role_value;
74431+
74432+extern char *gr_shared_page[4];
74433+static DEFINE_MUTEX(gr_dev_mutex);
74434+DEFINE_RWLOCK(gr_inode_lock);
74435+
74436+struct gr_arg *gr_usermode;
74437+
74438+static unsigned int gr_status __read_only = GR_STATUS_INIT;
74439+
74440+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
74441+extern void gr_clear_learn_entries(void);
74442+
74443+#ifdef CONFIG_GRKERNSEC_RESLOG
74444+extern void gr_log_resource(const struct task_struct *task,
74445+ const int res, const unsigned long wanted, const int gt);
74446+#endif
74447+
74448+unsigned char *gr_system_salt;
74449+unsigned char *gr_system_sum;
74450+
74451+static struct sprole_pw **acl_special_roles = NULL;
74452+static __u16 num_sprole_pws = 0;
74453+
74454+static struct acl_role_label *kernel_role = NULL;
74455+
74456+static unsigned int gr_auth_attempts = 0;
74457+static unsigned long gr_auth_expires = 0UL;
74458+
74459+#ifdef CONFIG_NET
74460+extern struct vfsmount *sock_mnt;
74461+#endif
74462+extern struct vfsmount *pipe_mnt;
74463+extern struct vfsmount *shm_mnt;
74464+#ifdef CONFIG_HUGETLBFS
74465+extern struct vfsmount *hugetlbfs_vfsmount;
74466+#endif
74467+
74468+static struct acl_object_label *fakefs_obj_rw;
74469+static struct acl_object_label *fakefs_obj_rwx;
74470+
74471+extern int gr_init_uidset(void);
74472+extern void gr_free_uidset(void);
74473+extern void gr_remove_uid(uid_t uid);
74474+extern int gr_find_uid(uid_t uid);
74475+
74476+__inline__ int
74477+gr_acl_is_enabled(void)
74478+{
74479+ return (gr_status & GR_READY);
74480+}
74481+
74482+#ifdef CONFIG_BTRFS_FS
74483+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
74484+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
74485+#endif
74486+
74487+static inline dev_t __get_dev(const struct dentry *dentry)
74488+{
74489+#ifdef CONFIG_BTRFS_FS
74490+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
74491+ return get_btrfs_dev_from_inode(dentry->d_inode);
74492+ else
74493+#endif
74494+ return dentry->d_inode->i_sb->s_dev;
74495+}
74496+
74497+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
74498+{
74499+ return __get_dev(dentry);
74500+}
74501+
74502+static char gr_task_roletype_to_char(struct task_struct *task)
74503+{
74504+ switch (task->role->roletype &
74505+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
74506+ GR_ROLE_SPECIAL)) {
74507+ case GR_ROLE_DEFAULT:
74508+ return 'D';
74509+ case GR_ROLE_USER:
74510+ return 'U';
74511+ case GR_ROLE_GROUP:
74512+ return 'G';
74513+ case GR_ROLE_SPECIAL:
74514+ return 'S';
74515+ }
74516+
74517+ return 'X';
74518+}
74519+
74520+char gr_roletype_to_char(void)
74521+{
74522+ return gr_task_roletype_to_char(current);
74523+}
74524+
74525+__inline__ int
74526+gr_acl_tpe_check(void)
74527+{
74528+ if (unlikely(!(gr_status & GR_READY)))
74529+ return 0;
74530+ if (current->role->roletype & GR_ROLE_TPE)
74531+ return 1;
74532+ else
74533+ return 0;
74534+}
74535+
74536+int
74537+gr_handle_rawio(const struct inode *inode)
74538+{
74539+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
74540+ if (inode && S_ISBLK(inode->i_mode) &&
74541+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
74542+ !capable(CAP_SYS_RAWIO))
74543+ return 1;
74544+#endif
74545+ return 0;
74546+}
74547+
74548+static int
74549+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
74550+{
74551+ if (likely(lena != lenb))
74552+ return 0;
74553+
74554+ return !memcmp(a, b, lena);
74555+}
74556+
74557+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
74558+{
74559+ *buflen -= namelen;
74560+ if (*buflen < 0)
74561+ return -ENAMETOOLONG;
74562+ *buffer -= namelen;
74563+ memcpy(*buffer, str, namelen);
74564+ return 0;
74565+}
74566+
74567+/* this must be called with vfsmount_lock and dcache_lock held */
74568+
74569+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
74570+ struct dentry *root, struct vfsmount *rootmnt,
74571+ char *buffer, int buflen)
74572+{
74573+ char * end = buffer+buflen;
74574+ char * retval;
74575+ int namelen;
74576+
74577+ *--end = '\0';
74578+ buflen--;
74579+
74580+ if (buflen < 1)
74581+ goto Elong;
74582+ /* Get '/' right */
74583+ retval = end-1;
74584+ *retval = '/';
74585+
74586+ for (;;) {
74587+ struct dentry * parent;
74588+
74589+ if (dentry == root && vfsmnt == rootmnt)
74590+ break;
74591+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
74592+ /* Global root? */
74593+ if (vfsmnt->mnt_parent == vfsmnt)
74594+ goto global_root;
74595+ dentry = vfsmnt->mnt_mountpoint;
74596+ vfsmnt = vfsmnt->mnt_parent;
74597+ continue;
74598+ }
74599+ parent = dentry->d_parent;
74600+ prefetch(parent);
74601+ namelen = dentry->d_name.len;
74602+ buflen -= namelen + 1;
74603+ if (buflen < 0)
74604+ goto Elong;
74605+ end -= namelen;
74606+ memcpy(end, dentry->d_name.name, namelen);
74607+ *--end = '/';
74608+ retval = end;
74609+ dentry = parent;
74610+ }
74611+
74612+out:
74613+ return retval;
74614+
74615+global_root:
74616+ namelen = dentry->d_name.len;
74617+ buflen -= namelen;
74618+ if (buflen < 0)
74619+ goto Elong;
74620+ retval -= namelen-1; /* hit the slash */
74621+ memcpy(retval, dentry->d_name.name, namelen);
74622+ goto out;
74623+Elong:
74624+ retval = ERR_PTR(-ENAMETOOLONG);
74625+ goto out;
74626+}
74627+
74628+static char *
74629+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
74630+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
74631+{
74632+ char *retval;
74633+
74634+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
74635+ if (unlikely(IS_ERR(retval)))
74636+ retval = strcpy(buf, "<path too long>");
74637+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
74638+ retval[1] = '\0';
74639+
74640+ return retval;
74641+}
74642+
74643+static char *
74644+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
74645+ char *buf, int buflen)
74646+{
74647+ char *res;
74648+
74649+ /* we can use real_root, real_root_mnt, because this is only called
74650+ by the RBAC system */
74651+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
74652+
74653+ return res;
74654+}
74655+
74656+static char *
74657+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
74658+ char *buf, int buflen)
74659+{
74660+ char *res;
74661+ struct dentry *root;
74662+ struct vfsmount *rootmnt;
74663+ struct task_struct *reaper = &init_task;
74664+
74665+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
74666+ read_lock(&reaper->fs->lock);
74667+ root = dget(reaper->fs->root.dentry);
74668+ rootmnt = mntget(reaper->fs->root.mnt);
74669+ read_unlock(&reaper->fs->lock);
74670+
74671+ spin_lock(&dcache_lock);
74672+ spin_lock(&vfsmount_lock);
74673+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
74674+ spin_unlock(&vfsmount_lock);
74675+ spin_unlock(&dcache_lock);
74676+
74677+ dput(root);
74678+ mntput(rootmnt);
74679+ return res;
74680+}
74681+
74682+static char *
74683+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
74684+{
74685+ char *ret;
74686+ spin_lock(&dcache_lock);
74687+ spin_lock(&vfsmount_lock);
74688+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
74689+ PAGE_SIZE);
74690+ spin_unlock(&vfsmount_lock);
74691+ spin_unlock(&dcache_lock);
74692+ return ret;
74693+}
74694+
74695+static char *
74696+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
74697+{
74698+ char *ret;
74699+ char *buf;
74700+ int buflen;
74701+
74702+ spin_lock(&dcache_lock);
74703+ spin_lock(&vfsmount_lock);
74704+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
74705+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
74706+ buflen = (int)(ret - buf);
74707+ if (buflen >= 5)
74708+ prepend(&ret, &buflen, "/proc", 5);
74709+ else
74710+ ret = strcpy(buf, "<path too long>");
74711+ spin_unlock(&vfsmount_lock);
74712+ spin_unlock(&dcache_lock);
74713+ return ret;
74714+}
74715+
74716+char *
74717+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
74718+{
74719+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
74720+ PAGE_SIZE);
74721+}
74722+
74723+char *
74724+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
74725+{
74726+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
74727+ PAGE_SIZE);
74728+}
74729+
74730+char *
74731+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
74732+{
74733+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
74734+ PAGE_SIZE);
74735+}
74736+
74737+char *
74738+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
74739+{
74740+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
74741+ PAGE_SIZE);
74742+}
74743+
74744+char *
74745+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
74746+{
74747+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
74748+ PAGE_SIZE);
74749+}
74750+
74751+__inline__ __u32
74752+to_gr_audit(const __u32 reqmode)
74753+{
74754+ /* masks off auditable permission flags, then shifts them to create
74755+ auditing flags, and adds the special case of append auditing if
74756+ we're requesting write */
74757+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
74758+}
74759+
74760+struct acl_subject_label *
74761+lookup_subject_map(const struct acl_subject_label *userp)
74762+{
74763+ unsigned int index = shash(userp, subj_map_set.s_size);
74764+ struct subject_map *match;
74765+
74766+ match = subj_map_set.s_hash[index];
74767+
74768+ while (match && match->user != userp)
74769+ match = match->next;
74770+
74771+ if (match != NULL)
74772+ return match->kernel;
74773+ else
74774+ return NULL;
74775+}
74776+
74777+static void
74778+insert_subj_map_entry(struct subject_map *subjmap)
74779+{
74780+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
74781+ struct subject_map **curr;
74782+
74783+ subjmap->prev = NULL;
74784+
74785+ curr = &subj_map_set.s_hash[index];
74786+ if (*curr != NULL)
74787+ (*curr)->prev = subjmap;
74788+
74789+ subjmap->next = *curr;
74790+ *curr = subjmap;
74791+
74792+ return;
74793+}
74794+
74795+static struct acl_role_label *
74796+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
74797+ const gid_t gid)
74798+{
74799+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
74800+ struct acl_role_label *match;
74801+ struct role_allowed_ip *ipp;
74802+ unsigned int x;
74803+ u32 curr_ip = task->signal->curr_ip;
74804+
74805+ task->signal->saved_ip = curr_ip;
74806+
74807+ match = acl_role_set.r_hash[index];
74808+
74809+ while (match) {
74810+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
74811+ for (x = 0; x < match->domain_child_num; x++) {
74812+ if (match->domain_children[x] == uid)
74813+ goto found;
74814+ }
74815+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
74816+ break;
74817+ match = match->next;
74818+ }
74819+found:
74820+ if (match == NULL) {
74821+ try_group:
74822+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
74823+ match = acl_role_set.r_hash[index];
74824+
74825+ while (match) {
74826+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
74827+ for (x = 0; x < match->domain_child_num; x++) {
74828+ if (match->domain_children[x] == gid)
74829+ goto found2;
74830+ }
74831+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
74832+ break;
74833+ match = match->next;
74834+ }
74835+found2:
74836+ if (match == NULL)
74837+ match = default_role;
74838+ if (match->allowed_ips == NULL)
74839+ return match;
74840+ else {
74841+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
74842+ if (likely
74843+ ((ntohl(curr_ip) & ipp->netmask) ==
74844+ (ntohl(ipp->addr) & ipp->netmask)))
74845+ return match;
74846+ }
74847+ match = default_role;
74848+ }
74849+ } else if (match->allowed_ips == NULL) {
74850+ return match;
74851+ } else {
74852+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
74853+ if (likely
74854+ ((ntohl(curr_ip) & ipp->netmask) ==
74855+ (ntohl(ipp->addr) & ipp->netmask)))
74856+ return match;
74857+ }
74858+ goto try_group;
74859+ }
74860+
74861+ return match;
74862+}
74863+
74864+struct acl_subject_label *
74865+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
74866+ const struct acl_role_label *role)
74867+{
74868+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
74869+ struct acl_subject_label *match;
74870+
74871+ match = role->subj_hash[index];
74872+
74873+ while (match && (match->inode != ino || match->device != dev ||
74874+ (match->mode & GR_DELETED))) {
74875+ match = match->next;
74876+ }
74877+
74878+ if (match && !(match->mode & GR_DELETED))
74879+ return match;
74880+ else
74881+ return NULL;
74882+}
74883+
74884+struct acl_subject_label *
74885+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
74886+ const struct acl_role_label *role)
74887+{
74888+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
74889+ struct acl_subject_label *match;
74890+
74891+ match = role->subj_hash[index];
74892+
74893+ while (match && (match->inode != ino || match->device != dev ||
74894+ !(match->mode & GR_DELETED))) {
74895+ match = match->next;
74896+ }
74897+
74898+ if (match && (match->mode & GR_DELETED))
74899+ return match;
74900+ else
74901+ return NULL;
74902+}
74903+
74904+static struct acl_object_label *
74905+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
74906+ const struct acl_subject_label *subj)
74907+{
74908+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
74909+ struct acl_object_label *match;
74910+
74911+ match = subj->obj_hash[index];
74912+
74913+ while (match && (match->inode != ino || match->device != dev ||
74914+ (match->mode & GR_DELETED))) {
74915+ match = match->next;
74916+ }
74917+
74918+ if (match && !(match->mode & GR_DELETED))
74919+ return match;
74920+ else
74921+ return NULL;
74922+}
74923+
74924+static struct acl_object_label *
74925+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
74926+ const struct acl_subject_label *subj)
74927+{
74928+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
74929+ struct acl_object_label *match;
74930+
74931+ match = subj->obj_hash[index];
74932+
74933+ while (match && (match->inode != ino || match->device != dev ||
74934+ !(match->mode & GR_DELETED))) {
74935+ match = match->next;
74936+ }
74937+
74938+ if (match && (match->mode & GR_DELETED))
74939+ return match;
74940+
74941+ match = subj->obj_hash[index];
74942+
74943+ while (match && (match->inode != ino || match->device != dev ||
74944+ (match->mode & GR_DELETED))) {
74945+ match = match->next;
74946+ }
74947+
74948+ if (match && !(match->mode & GR_DELETED))
74949+ return match;
74950+ else
74951+ return NULL;
74952+}
74953+
74954+static struct name_entry *
74955+lookup_name_entry(const char *name)
74956+{
74957+ unsigned int len = strlen(name);
74958+ unsigned int key = full_name_hash(name, len);
74959+ unsigned int index = key % name_set.n_size;
74960+ struct name_entry *match;
74961+
74962+ match = name_set.n_hash[index];
74963+
74964+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
74965+ match = match->next;
74966+
74967+ return match;
74968+}
74969+
74970+static struct name_entry *
74971+lookup_name_entry_create(const char *name)
74972+{
74973+ unsigned int len = strlen(name);
74974+ unsigned int key = full_name_hash(name, len);
74975+ unsigned int index = key % name_set.n_size;
74976+ struct name_entry *match;
74977+
74978+ match = name_set.n_hash[index];
74979+
74980+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
74981+ !match->deleted))
74982+ match = match->next;
74983+
74984+ if (match && match->deleted)
74985+ return match;
74986+
74987+ match = name_set.n_hash[index];
74988+
74989+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
74990+ match->deleted))
74991+ match = match->next;
74992+
74993+ if (match && !match->deleted)
74994+ return match;
74995+ else
74996+ return NULL;
74997+}
74998+
74999+static struct inodev_entry *
75000+lookup_inodev_entry(const ino_t ino, const dev_t dev)
75001+{
75002+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
75003+ struct inodev_entry *match;
75004+
75005+ match = inodev_set.i_hash[index];
75006+
75007+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
75008+ match = match->next;
75009+
75010+ return match;
75011+}
75012+
75013+static void
75014+insert_inodev_entry(struct inodev_entry *entry)
75015+{
75016+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
75017+ inodev_set.i_size);
75018+ struct inodev_entry **curr;
75019+
75020+ entry->prev = NULL;
75021+
75022+ curr = &inodev_set.i_hash[index];
75023+ if (*curr != NULL)
75024+ (*curr)->prev = entry;
75025+
75026+ entry->next = *curr;
75027+ *curr = entry;
75028+
75029+ return;
75030+}
75031+
75032+static void
75033+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
75034+{
75035+ unsigned int index =
75036+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
75037+ struct acl_role_label **curr;
75038+ struct acl_role_label *tmp;
75039+
75040+ curr = &acl_role_set.r_hash[index];
75041+
75042+ /* if role was already inserted due to domains and already has
75043+ a role in the same bucket as it attached, then we need to
75044+ combine these two buckets
75045+ */
75046+ if (role->next) {
75047+ tmp = role->next;
75048+ while (tmp->next)
75049+ tmp = tmp->next;
75050+ tmp->next = *curr;
75051+ } else
75052+ role->next = *curr;
75053+ *curr = role;
75054+
75055+ return;
75056+}
75057+
75058+static void
75059+insert_acl_role_label(struct acl_role_label *role)
75060+{
75061+ int i;
75062+
75063+ if (role_list == NULL) {
75064+ role_list = role;
75065+ role->prev = NULL;
75066+ } else {
75067+ role->prev = role_list;
75068+ role_list = role;
75069+ }
75070+
75071+ /* used for hash chains */
75072+ role->next = NULL;
75073+
75074+ if (role->roletype & GR_ROLE_DOMAIN) {
75075+ for (i = 0; i < role->domain_child_num; i++)
75076+ __insert_acl_role_label(role, role->domain_children[i]);
75077+ } else
75078+ __insert_acl_role_label(role, role->uidgid);
75079+}
75080+
75081+static int
75082+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
75083+{
75084+ struct name_entry **curr, *nentry;
75085+ struct inodev_entry *ientry;
75086+ unsigned int len = strlen(name);
75087+ unsigned int key = full_name_hash(name, len);
75088+ unsigned int index = key % name_set.n_size;
75089+
75090+ curr = &name_set.n_hash[index];
75091+
75092+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
75093+ curr = &((*curr)->next);
75094+
75095+ if (*curr != NULL)
75096+ return 1;
75097+
75098+ nentry = acl_alloc(sizeof (struct name_entry));
75099+ if (nentry == NULL)
75100+ return 0;
75101+ ientry = acl_alloc(sizeof (struct inodev_entry));
75102+ if (ientry == NULL)
75103+ return 0;
75104+ ientry->nentry = nentry;
75105+
75106+ nentry->key = key;
75107+ nentry->name = name;
75108+ nentry->inode = inode;
75109+ nentry->device = device;
75110+ nentry->len = len;
75111+ nentry->deleted = deleted;
75112+
75113+ nentry->prev = NULL;
75114+ curr = &name_set.n_hash[index];
75115+ if (*curr != NULL)
75116+ (*curr)->prev = nentry;
75117+ nentry->next = *curr;
75118+ *curr = nentry;
75119+
75120+ /* insert us into the table searchable by inode/dev */
75121+ insert_inodev_entry(ientry);
75122+
75123+ return 1;
75124+}
75125+
75126+static void
75127+insert_acl_obj_label(struct acl_object_label *obj,
75128+ struct acl_subject_label *subj)
75129+{
75130+ unsigned int index =
75131+ fhash(obj->inode, obj->device, subj->obj_hash_size);
75132+ struct acl_object_label **curr;
75133+
75134+
75135+ obj->prev = NULL;
75136+
75137+ curr = &subj->obj_hash[index];
75138+ if (*curr != NULL)
75139+ (*curr)->prev = obj;
75140+
75141+ obj->next = *curr;
75142+ *curr = obj;
75143+
75144+ return;
75145+}
75146+
75147+static void
75148+insert_acl_subj_label(struct acl_subject_label *obj,
75149+ struct acl_role_label *role)
75150+{
75151+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
75152+ struct acl_subject_label **curr;
75153+
75154+ obj->prev = NULL;
75155+
75156+ curr = &role->subj_hash[index];
75157+ if (*curr != NULL)
75158+ (*curr)->prev = obj;
75159+
75160+ obj->next = *curr;
75161+ *curr = obj;
75162+
75163+ return;
75164+}
75165+
75166+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
75167+
75168+static void *
75169+create_table(__u32 * len, int elementsize)
75170+{
75171+ unsigned int table_sizes[] = {
75172+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
75173+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
75174+ 4194301, 8388593, 16777213, 33554393, 67108859
75175+ };
75176+ void *newtable = NULL;
75177+ unsigned int pwr = 0;
75178+
75179+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
75180+ table_sizes[pwr] <= *len)
75181+ pwr++;
75182+
75183+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
75184+ return newtable;
75185+
75186+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
75187+ newtable =
75188+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
75189+ else
75190+ newtable = vmalloc(table_sizes[pwr] * elementsize);
75191+
75192+ *len = table_sizes[pwr];
75193+
75194+ return newtable;
75195+}
75196+
75197+static int
75198+init_variables(const struct gr_arg *arg)
75199+{
75200+ struct task_struct *reaper = &init_task;
75201+ unsigned int stacksize;
75202+
75203+ subj_map_set.s_size = arg->role_db.num_subjects;
75204+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
75205+ name_set.n_size = arg->role_db.num_objects;
75206+ inodev_set.i_size = arg->role_db.num_objects;
75207+
75208+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
75209+ !name_set.n_size || !inodev_set.i_size)
75210+ return 1;
75211+
75212+ if (!gr_init_uidset())
75213+ return 1;
75214+
75215+ /* set up the stack that holds allocation info */
75216+
75217+ stacksize = arg->role_db.num_pointers + 5;
75218+
75219+ if (!acl_alloc_stack_init(stacksize))
75220+ return 1;
75221+
75222+ /* grab reference for the real root dentry and vfsmount */
75223+ read_lock(&reaper->fs->lock);
75224+ real_root = dget(reaper->fs->root.dentry);
75225+ real_root_mnt = mntget(reaper->fs->root.mnt);
75226+ read_unlock(&reaper->fs->lock);
75227+
75228+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75229+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
75230+#endif
75231+
75232+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
75233+ if (fakefs_obj_rw == NULL)
75234+ return 1;
75235+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
75236+
75237+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
75238+ if (fakefs_obj_rwx == NULL)
75239+ return 1;
75240+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
75241+
75242+ subj_map_set.s_hash =
75243+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
75244+ acl_role_set.r_hash =
75245+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
75246+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
75247+ inodev_set.i_hash =
75248+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
75249+
75250+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
75251+ !name_set.n_hash || !inodev_set.i_hash)
75252+ return 1;
75253+
75254+ memset(subj_map_set.s_hash, 0,
75255+ sizeof(struct subject_map *) * subj_map_set.s_size);
75256+ memset(acl_role_set.r_hash, 0,
75257+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
75258+ memset(name_set.n_hash, 0,
75259+ sizeof (struct name_entry *) * name_set.n_size);
75260+ memset(inodev_set.i_hash, 0,
75261+ sizeof (struct inodev_entry *) * inodev_set.i_size);
75262+
75263+ return 0;
75264+}
75265+
75266+/* free information not needed after startup
75267+ currently contains user->kernel pointer mappings for subjects
75268+*/
75269+
75270+static void
75271+free_init_variables(void)
75272+{
75273+ __u32 i;
75274+
75275+ if (subj_map_set.s_hash) {
75276+ for (i = 0; i < subj_map_set.s_size; i++) {
75277+ if (subj_map_set.s_hash[i]) {
75278+ kfree(subj_map_set.s_hash[i]);
75279+ subj_map_set.s_hash[i] = NULL;
75280+ }
75281+ }
75282+
75283+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
75284+ PAGE_SIZE)
75285+ kfree(subj_map_set.s_hash);
75286+ else
75287+ vfree(subj_map_set.s_hash);
75288+ }
75289+
75290+ return;
75291+}
75292+
75293+static void
75294+free_variables(void)
75295+{
75296+ struct acl_subject_label *s;
75297+ struct acl_role_label *r;
75298+ struct task_struct *task, *task2;
75299+ unsigned int x;
75300+
75301+ gr_clear_learn_entries();
75302+
75303+ read_lock(&tasklist_lock);
75304+ do_each_thread(task2, task) {
75305+ task->acl_sp_role = 0;
75306+ task->acl_role_id = 0;
75307+ task->acl = NULL;
75308+ task->role = NULL;
75309+ } while_each_thread(task2, task);
75310+ read_unlock(&tasklist_lock);
75311+
75312+ /* release the reference to the real root dentry and vfsmount */
75313+ if (real_root)
75314+ dput(real_root);
75315+ real_root = NULL;
75316+ if (real_root_mnt)
75317+ mntput(real_root_mnt);
75318+ real_root_mnt = NULL;
75319+
75320+ /* free all object hash tables */
75321+
75322+ FOR_EACH_ROLE_START(r)
75323+ if (r->subj_hash == NULL)
75324+ goto next_role;
75325+ FOR_EACH_SUBJECT_START(r, s, x)
75326+ if (s->obj_hash == NULL)
75327+ break;
75328+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75329+ kfree(s->obj_hash);
75330+ else
75331+ vfree(s->obj_hash);
75332+ FOR_EACH_SUBJECT_END(s, x)
75333+ FOR_EACH_NESTED_SUBJECT_START(r, s)
75334+ if (s->obj_hash == NULL)
75335+ break;
75336+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
75337+ kfree(s->obj_hash);
75338+ else
75339+ vfree(s->obj_hash);
75340+ FOR_EACH_NESTED_SUBJECT_END(s)
75341+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
75342+ kfree(r->subj_hash);
75343+ else
75344+ vfree(r->subj_hash);
75345+ r->subj_hash = NULL;
75346+next_role:
75347+ FOR_EACH_ROLE_END(r)
75348+
75349+ acl_free_all();
75350+
75351+ if (acl_role_set.r_hash) {
75352+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
75353+ PAGE_SIZE)
75354+ kfree(acl_role_set.r_hash);
75355+ else
75356+ vfree(acl_role_set.r_hash);
75357+ }
75358+ if (name_set.n_hash) {
75359+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
75360+ PAGE_SIZE)
75361+ kfree(name_set.n_hash);
75362+ else
75363+ vfree(name_set.n_hash);
75364+ }
75365+
75366+ if (inodev_set.i_hash) {
75367+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
75368+ PAGE_SIZE)
75369+ kfree(inodev_set.i_hash);
75370+ else
75371+ vfree(inodev_set.i_hash);
75372+ }
75373+
75374+ gr_free_uidset();
75375+
75376+ memset(&name_set, 0, sizeof (struct name_db));
75377+ memset(&inodev_set, 0, sizeof (struct inodev_db));
75378+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
75379+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
75380+
75381+ default_role = NULL;
75382+ role_list = NULL;
75383+
75384+ return;
75385+}
75386+
75387+static __u32
75388+count_user_objs(struct acl_object_label *userp)
75389+{
75390+ struct acl_object_label o_tmp;
75391+ __u32 num = 0;
75392+
75393+ while (userp) {
75394+ if (copy_from_user(&o_tmp, userp,
75395+ sizeof (struct acl_object_label)))
75396+ break;
75397+
75398+ userp = o_tmp.prev;
75399+ num++;
75400+ }
75401+
75402+ return num;
75403+}
75404+
75405+static struct acl_subject_label *
75406+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
75407+
75408+static int
75409+copy_user_glob(struct acl_object_label *obj)
75410+{
75411+ struct acl_object_label *g_tmp, **guser;
75412+ unsigned int len;
75413+ char *tmp;
75414+
75415+ if (obj->globbed == NULL)
75416+ return 0;
75417+
75418+ guser = &obj->globbed;
75419+ while (*guser) {
75420+ g_tmp = (struct acl_object_label *)
75421+ acl_alloc(sizeof (struct acl_object_label));
75422+ if (g_tmp == NULL)
75423+ return -ENOMEM;
75424+
75425+ if (copy_from_user(g_tmp, *guser,
75426+ sizeof (struct acl_object_label)))
75427+ return -EFAULT;
75428+
75429+ len = strnlen_user(g_tmp->filename, PATH_MAX);
75430+
75431+ if (!len || len >= PATH_MAX)
75432+ return -EINVAL;
75433+
75434+ if ((tmp = (char *) acl_alloc(len)) == NULL)
75435+ return -ENOMEM;
75436+
75437+ if (copy_from_user(tmp, g_tmp->filename, len))
75438+ return -EFAULT;
75439+ tmp[len-1] = '\0';
75440+ g_tmp->filename = tmp;
75441+
75442+ *guser = g_tmp;
75443+ guser = &(g_tmp->next);
75444+ }
75445+
75446+ return 0;
75447+}
75448+
75449+static int
75450+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
75451+ struct acl_role_label *role)
75452+{
75453+ struct acl_object_label *o_tmp;
75454+ unsigned int len;
75455+ int ret;
75456+ char *tmp;
75457+
75458+ while (userp) {
75459+ if ((o_tmp = (struct acl_object_label *)
75460+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
75461+ return -ENOMEM;
75462+
75463+ if (copy_from_user(o_tmp, userp,
75464+ sizeof (struct acl_object_label)))
75465+ return -EFAULT;
75466+
75467+ userp = o_tmp->prev;
75468+
75469+ len = strnlen_user(o_tmp->filename, PATH_MAX);
75470+
75471+ if (!len || len >= PATH_MAX)
75472+ return -EINVAL;
75473+
75474+ if ((tmp = (char *) acl_alloc(len)) == NULL)
75475+ return -ENOMEM;
75476+
75477+ if (copy_from_user(tmp, o_tmp->filename, len))
75478+ return -EFAULT;
75479+ tmp[len-1] = '\0';
75480+ o_tmp->filename = tmp;
75481+
75482+ insert_acl_obj_label(o_tmp, subj);
75483+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
75484+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
75485+ return -ENOMEM;
75486+
75487+ ret = copy_user_glob(o_tmp);
75488+ if (ret)
75489+ return ret;
75490+
75491+ if (o_tmp->nested) {
75492+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
75493+ if (IS_ERR(o_tmp->nested))
75494+ return PTR_ERR(o_tmp->nested);
75495+
75496+ /* insert into nested subject list */
75497+ o_tmp->nested->next = role->hash->first;
75498+ role->hash->first = o_tmp->nested;
75499+ }
75500+ }
75501+
75502+ return 0;
75503+}
75504+
75505+static __u32
75506+count_user_subjs(struct acl_subject_label *userp)
75507+{
75508+ struct acl_subject_label s_tmp;
75509+ __u32 num = 0;
75510+
75511+ while (userp) {
75512+ if (copy_from_user(&s_tmp, userp,
75513+ sizeof (struct acl_subject_label)))
75514+ break;
75515+
75516+ userp = s_tmp.prev;
75517+ /* do not count nested subjects against this count, since
75518+ they are not included in the hash table, but are
75519+ attached to objects. We have already counted
75520+ the subjects in userspace for the allocation
75521+ stack
75522+ */
75523+ if (!(s_tmp.mode & GR_NESTED))
75524+ num++;
75525+ }
75526+
75527+ return num;
75528+}
75529+
75530+static int
75531+copy_user_allowedips(struct acl_role_label *rolep)
75532+{
75533+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
75534+
75535+ ruserip = rolep->allowed_ips;
75536+
75537+ while (ruserip) {
75538+ rlast = rtmp;
75539+
75540+ if ((rtmp = (struct role_allowed_ip *)
75541+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
75542+ return -ENOMEM;
75543+
75544+ if (copy_from_user(rtmp, ruserip,
75545+ sizeof (struct role_allowed_ip)))
75546+ return -EFAULT;
75547+
75548+ ruserip = rtmp->prev;
75549+
75550+ if (!rlast) {
75551+ rtmp->prev = NULL;
75552+ rolep->allowed_ips = rtmp;
75553+ } else {
75554+ rlast->next = rtmp;
75555+ rtmp->prev = rlast;
75556+ }
75557+
75558+ if (!ruserip)
75559+ rtmp->next = NULL;
75560+ }
75561+
75562+ return 0;
75563+}
75564+
75565+static int
75566+copy_user_transitions(struct acl_role_label *rolep)
75567+{
75568+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
75569+
75570+ unsigned int len;
75571+ char *tmp;
75572+
75573+ rusertp = rolep->transitions;
75574+
75575+ while (rusertp) {
75576+ rlast = rtmp;
75577+
75578+ if ((rtmp = (struct role_transition *)
75579+ acl_alloc(sizeof (struct role_transition))) == NULL)
75580+ return -ENOMEM;
75581+
75582+ if (copy_from_user(rtmp, rusertp,
75583+ sizeof (struct role_transition)))
75584+ return -EFAULT;
75585+
75586+ rusertp = rtmp->prev;
75587+
75588+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
75589+
75590+ if (!len || len >= GR_SPROLE_LEN)
75591+ return -EINVAL;
75592+
75593+ if ((tmp = (char *) acl_alloc(len)) == NULL)
75594+ return -ENOMEM;
75595+
75596+ if (copy_from_user(tmp, rtmp->rolename, len))
75597+ return -EFAULT;
75598+ tmp[len-1] = '\0';
75599+ rtmp->rolename = tmp;
75600+
75601+ if (!rlast) {
75602+ rtmp->prev = NULL;
75603+ rolep->transitions = rtmp;
75604+ } else {
75605+ rlast->next = rtmp;
75606+ rtmp->prev = rlast;
75607+ }
75608+
75609+ if (!rusertp)
75610+ rtmp->next = NULL;
75611+ }
75612+
75613+ return 0;
75614+}
75615+
75616+static struct acl_subject_label *
75617+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
75618+{
75619+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
75620+ unsigned int len;
75621+ char *tmp;
75622+ __u32 num_objs;
75623+ struct acl_ip_label **i_tmp, *i_utmp2;
75624+ struct gr_hash_struct ghash;
75625+ struct subject_map *subjmap;
75626+ unsigned int i_num;
75627+ int err;
75628+
75629+ s_tmp = lookup_subject_map(userp);
75630+
75631+ /* we've already copied this subject into the kernel, just return
75632+ the reference to it, and don't copy it over again
75633+ */
75634+ if (s_tmp)
75635+ return(s_tmp);
75636+
75637+ if ((s_tmp = (struct acl_subject_label *)
75638+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
75639+ return ERR_PTR(-ENOMEM);
75640+
75641+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
75642+ if (subjmap == NULL)
75643+ return ERR_PTR(-ENOMEM);
75644+
75645+ subjmap->user = userp;
75646+ subjmap->kernel = s_tmp;
75647+ insert_subj_map_entry(subjmap);
75648+
75649+ if (copy_from_user(s_tmp, userp,
75650+ sizeof (struct acl_subject_label)))
75651+ return ERR_PTR(-EFAULT);
75652+
75653+ len = strnlen_user(s_tmp->filename, PATH_MAX);
75654+
75655+ if (!len || len >= PATH_MAX)
75656+ return ERR_PTR(-EINVAL);
75657+
75658+ if ((tmp = (char *) acl_alloc(len)) == NULL)
75659+ return ERR_PTR(-ENOMEM);
75660+
75661+ if (copy_from_user(tmp, s_tmp->filename, len))
75662+ return ERR_PTR(-EFAULT);
75663+ tmp[len-1] = '\0';
75664+ s_tmp->filename = tmp;
75665+
75666+ if (!strcmp(s_tmp->filename, "/"))
75667+ role->root_label = s_tmp;
75668+
75669+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
75670+ return ERR_PTR(-EFAULT);
75671+
75672+ /* copy user and group transition tables */
75673+
75674+ if (s_tmp->user_trans_num) {
75675+ uid_t *uidlist;
75676+
75677+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
75678+ if (uidlist == NULL)
75679+ return ERR_PTR(-ENOMEM);
75680+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
75681+ return ERR_PTR(-EFAULT);
75682+
75683+ s_tmp->user_transitions = uidlist;
75684+ }
75685+
75686+ if (s_tmp->group_trans_num) {
75687+ gid_t *gidlist;
75688+
75689+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
75690+ if (gidlist == NULL)
75691+ return ERR_PTR(-ENOMEM);
75692+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
75693+ return ERR_PTR(-EFAULT);
75694+
75695+ s_tmp->group_transitions = gidlist;
75696+ }
75697+
75698+ /* set up object hash table */
75699+ num_objs = count_user_objs(ghash.first);
75700+
75701+ s_tmp->obj_hash_size = num_objs;
75702+ s_tmp->obj_hash =
75703+ (struct acl_object_label **)
75704+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
75705+
75706+ if (!s_tmp->obj_hash)
75707+ return ERR_PTR(-ENOMEM);
75708+
75709+ memset(s_tmp->obj_hash, 0,
75710+ s_tmp->obj_hash_size *
75711+ sizeof (struct acl_object_label *));
75712+
75713+ /* add in objects */
75714+ err = copy_user_objs(ghash.first, s_tmp, role);
75715+
75716+ if (err)
75717+ return ERR_PTR(err);
75718+
75719+ /* set pointer for parent subject */
75720+ if (s_tmp->parent_subject) {
75721+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
75722+
75723+ if (IS_ERR(s_tmp2))
75724+ return s_tmp2;
75725+
75726+ s_tmp->parent_subject = s_tmp2;
75727+ }
75728+
75729+ /* add in ip acls */
75730+
75731+ if (!s_tmp->ip_num) {
75732+ s_tmp->ips = NULL;
75733+ goto insert;
75734+ }
75735+
75736+ i_tmp =
75737+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
75738+ sizeof (struct acl_ip_label *));
75739+
75740+ if (!i_tmp)
75741+ return ERR_PTR(-ENOMEM);
75742+
75743+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
75744+ *(i_tmp + i_num) =
75745+ (struct acl_ip_label *)
75746+ acl_alloc(sizeof (struct acl_ip_label));
75747+ if (!*(i_tmp + i_num))
75748+ return ERR_PTR(-ENOMEM);
75749+
75750+ if (copy_from_user
75751+ (&i_utmp2, s_tmp->ips + i_num,
75752+ sizeof (struct acl_ip_label *)))
75753+ return ERR_PTR(-EFAULT);
75754+
75755+ if (copy_from_user
75756+ (*(i_tmp + i_num), i_utmp2,
75757+ sizeof (struct acl_ip_label)))
75758+ return ERR_PTR(-EFAULT);
75759+
75760+ if ((*(i_tmp + i_num))->iface == NULL)
75761+ continue;
75762+
75763+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
75764+ if (!len || len >= IFNAMSIZ)
75765+ return ERR_PTR(-EINVAL);
75766+ tmp = acl_alloc(len);
75767+ if (tmp == NULL)
75768+ return ERR_PTR(-ENOMEM);
75769+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
75770+ return ERR_PTR(-EFAULT);
75771+ (*(i_tmp + i_num))->iface = tmp;
75772+ }
75773+
75774+ s_tmp->ips = i_tmp;
75775+
75776+insert:
75777+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
75778+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
75779+ return ERR_PTR(-ENOMEM);
75780+
75781+ return s_tmp;
75782+}
75783+
75784+static int
75785+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
75786+{
75787+ struct acl_subject_label s_pre;
75788+ struct acl_subject_label * ret;
75789+ int err;
75790+
75791+ while (userp) {
75792+ if (copy_from_user(&s_pre, userp,
75793+ sizeof (struct acl_subject_label)))
75794+ return -EFAULT;
75795+
75796+ /* do not add nested subjects here, add
75797+ while parsing objects
75798+ */
75799+
75800+ if (s_pre.mode & GR_NESTED) {
75801+ userp = s_pre.prev;
75802+ continue;
75803+ }
75804+
75805+ ret = do_copy_user_subj(userp, role);
75806+
75807+ err = PTR_ERR(ret);
75808+ if (IS_ERR(ret))
75809+ return err;
75810+
75811+ insert_acl_subj_label(ret, role);
75812+
75813+ userp = s_pre.prev;
75814+ }
75815+
75816+ return 0;
75817+}
75818+
75819+static int
75820+copy_user_acl(struct gr_arg *arg)
75821+{
75822+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
75823+ struct sprole_pw *sptmp;
75824+ struct gr_hash_struct *ghash;
75825+ uid_t *domainlist;
75826+ unsigned int r_num;
75827+ unsigned int len;
75828+ char *tmp;
75829+ int err = 0;
75830+ __u16 i;
75831+ __u32 num_subjs;
75832+
75833+ /* we need a default and kernel role */
75834+ if (arg->role_db.num_roles < 2)
75835+ return -EINVAL;
75836+
75837+ /* copy special role authentication info from userspace */
75838+
75839+ num_sprole_pws = arg->num_sprole_pws;
75840+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
75841+
75842+ if (!acl_special_roles) {
75843+ err = -ENOMEM;
75844+ goto cleanup;
75845+ }
75846+
75847+ for (i = 0; i < num_sprole_pws; i++) {
75848+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
75849+ if (!sptmp) {
75850+ err = -ENOMEM;
75851+ goto cleanup;
75852+ }
75853+ if (copy_from_user(sptmp, arg->sprole_pws + i,
75854+ sizeof (struct sprole_pw))) {
75855+ err = -EFAULT;
75856+ goto cleanup;
75857+ }
75858+
75859+ len =
75860+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
75861+
75862+ if (!len || len >= GR_SPROLE_LEN) {
75863+ err = -EINVAL;
75864+ goto cleanup;
75865+ }
75866+
75867+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
75868+ err = -ENOMEM;
75869+ goto cleanup;
75870+ }
75871+
75872+ if (copy_from_user(tmp, sptmp->rolename, len)) {
75873+ err = -EFAULT;
75874+ goto cleanup;
75875+ }
75876+ tmp[len-1] = '\0';
75877+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
75878+ printk(KERN_ALERT "Copying special role %s\n", tmp);
75879+#endif
75880+ sptmp->rolename = tmp;
75881+ acl_special_roles[i] = sptmp;
75882+ }
75883+
75884+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
75885+
75886+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
75887+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
75888+
75889+ if (!r_tmp) {
75890+ err = -ENOMEM;
75891+ goto cleanup;
75892+ }
75893+
75894+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
75895+ sizeof (struct acl_role_label *))) {
75896+ err = -EFAULT;
75897+ goto cleanup;
75898+ }
75899+
75900+ if (copy_from_user(r_tmp, r_utmp2,
75901+ sizeof (struct acl_role_label))) {
75902+ err = -EFAULT;
75903+ goto cleanup;
75904+ }
75905+
75906+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
75907+
75908+ if (!len || len >= PATH_MAX) {
75909+ err = -EINVAL;
75910+ goto cleanup;
75911+ }
75912+
75913+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
75914+ err = -ENOMEM;
75915+ goto cleanup;
75916+ }
75917+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
75918+ err = -EFAULT;
75919+ goto cleanup;
75920+ }
75921+ tmp[len-1] = '\0';
75922+ r_tmp->rolename = tmp;
75923+
75924+ if (!strcmp(r_tmp->rolename, "default")
75925+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
75926+ default_role = r_tmp;
75927+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
75928+ kernel_role = r_tmp;
75929+ }
75930+
75931+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
75932+ err = -ENOMEM;
75933+ goto cleanup;
75934+ }
75935+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
75936+ err = -EFAULT;
75937+ goto cleanup;
75938+ }
75939+
75940+ r_tmp->hash = ghash;
75941+
75942+ num_subjs = count_user_subjs(r_tmp->hash->first);
75943+
75944+ r_tmp->subj_hash_size = num_subjs;
75945+ r_tmp->subj_hash =
75946+ (struct acl_subject_label **)
75947+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
75948+
75949+ if (!r_tmp->subj_hash) {
75950+ err = -ENOMEM;
75951+ goto cleanup;
75952+ }
75953+
75954+ err = copy_user_allowedips(r_tmp);
75955+ if (err)
75956+ goto cleanup;
75957+
75958+ /* copy domain info */
75959+ if (r_tmp->domain_children != NULL) {
75960+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
75961+ if (domainlist == NULL) {
75962+ err = -ENOMEM;
75963+ goto cleanup;
75964+ }
75965+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
75966+ err = -EFAULT;
75967+ goto cleanup;
75968+ }
75969+ r_tmp->domain_children = domainlist;
75970+ }
75971+
75972+ err = copy_user_transitions(r_tmp);
75973+ if (err)
75974+ goto cleanup;
75975+
75976+ memset(r_tmp->subj_hash, 0,
75977+ r_tmp->subj_hash_size *
75978+ sizeof (struct acl_subject_label *));
75979+
75980+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
75981+
75982+ if (err)
75983+ goto cleanup;
75984+
75985+ /* set nested subject list to null */
75986+ r_tmp->hash->first = NULL;
75987+
75988+ insert_acl_role_label(r_tmp);
75989+ }
75990+
75991+ goto return_err;
75992+ cleanup:
75993+ free_variables();
75994+ return_err:
75995+ return err;
75996+
75997+}
75998+
75999+static int
76000+gracl_init(struct gr_arg *args)
76001+{
76002+ int error = 0;
76003+
76004+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
76005+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
76006+
76007+ if (init_variables(args)) {
76008+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
76009+ error = -ENOMEM;
76010+ free_variables();
76011+ goto out;
76012+ }
76013+
76014+ error = copy_user_acl(args);
76015+ free_init_variables();
76016+ if (error) {
76017+ free_variables();
76018+ goto out;
76019+ }
76020+
76021+ if ((error = gr_set_acls(0))) {
76022+ free_variables();
76023+ goto out;
76024+ }
76025+
76026+ pax_open_kernel();
76027+ gr_status |= GR_READY;
76028+ pax_close_kernel();
76029+
76030+ out:
76031+ return error;
76032+}
76033+
76034+/* derived from glibc fnmatch() 0: match, 1: no match*/
76035+
76036+static int
76037+glob_match(const char *p, const char *n)
76038+{
76039+ char c;
76040+
76041+ while ((c = *p++) != '\0') {
76042+ switch (c) {
76043+ case '?':
76044+ if (*n == '\0')
76045+ return 1;
76046+ else if (*n == '/')
76047+ return 1;
76048+ break;
76049+ case '\\':
76050+ if (*n != c)
76051+ return 1;
76052+ break;
76053+ case '*':
76054+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
76055+ if (*n == '/')
76056+ return 1;
76057+ else if (c == '?') {
76058+ if (*n == '\0')
76059+ return 1;
76060+ else
76061+ ++n;
76062+ }
76063+ }
76064+ if (c == '\0') {
76065+ return 0;
76066+ } else {
76067+ const char *endp;
76068+
76069+ if ((endp = strchr(n, '/')) == NULL)
76070+ endp = n + strlen(n);
76071+
76072+ if (c == '[') {
76073+ for (--p; n < endp; ++n)
76074+ if (!glob_match(p, n))
76075+ return 0;
76076+ } else if (c == '/') {
76077+ while (*n != '\0' && *n != '/')
76078+ ++n;
76079+ if (*n == '/' && !glob_match(p, n + 1))
76080+ return 0;
76081+ } else {
76082+ for (--p; n < endp; ++n)
76083+ if (*n == c && !glob_match(p, n))
76084+ return 0;
76085+ }
76086+
76087+ return 1;
76088+ }
76089+ case '[':
76090+ {
76091+ int not;
76092+ char cold;
76093+
76094+ if (*n == '\0' || *n == '/')
76095+ return 1;
76096+
76097+ not = (*p == '!' || *p == '^');
76098+ if (not)
76099+ ++p;
76100+
76101+ c = *p++;
76102+ for (;;) {
76103+ unsigned char fn = (unsigned char)*n;
76104+
76105+ if (c == '\0')
76106+ return 1;
76107+ else {
76108+ if (c == fn)
76109+ goto matched;
76110+ cold = c;
76111+ c = *p++;
76112+
76113+ if (c == '-' && *p != ']') {
76114+ unsigned char cend = *p++;
76115+
76116+ if (cend == '\0')
76117+ return 1;
76118+
76119+ if (cold <= fn && fn <= cend)
76120+ goto matched;
76121+
76122+ c = *p++;
76123+ }
76124+ }
76125+
76126+ if (c == ']')
76127+ break;
76128+ }
76129+ if (!not)
76130+ return 1;
76131+ break;
76132+ matched:
76133+ while (c != ']') {
76134+ if (c == '\0')
76135+ return 1;
76136+
76137+ c = *p++;
76138+ }
76139+ if (not)
76140+ return 1;
76141+ }
76142+ break;
76143+ default:
76144+ if (c != *n)
76145+ return 1;
76146+ }
76147+
76148+ ++n;
76149+ }
76150+
76151+ if (*n == '\0')
76152+ return 0;
76153+
76154+ if (*n == '/')
76155+ return 0;
76156+
76157+ return 1;
76158+}
76159+
76160+static struct acl_object_label *
76161+chk_glob_label(struct acl_object_label *globbed,
76162+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
76163+{
76164+ struct acl_object_label *tmp;
76165+
76166+ if (*path == NULL)
76167+ *path = gr_to_filename_nolock(dentry, mnt);
76168+
76169+ tmp = globbed;
76170+
76171+ while (tmp) {
76172+ if (!glob_match(tmp->filename, *path))
76173+ return tmp;
76174+ tmp = tmp->next;
76175+ }
76176+
76177+ return NULL;
76178+}
76179+
76180+static struct acl_object_label *
76181+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
76182+ const ino_t curr_ino, const dev_t curr_dev,
76183+ const struct acl_subject_label *subj, char **path, const int checkglob)
76184+{
76185+ struct acl_subject_label *tmpsubj;
76186+ struct acl_object_label *retval;
76187+ struct acl_object_label *retval2;
76188+
76189+ tmpsubj = (struct acl_subject_label *) subj;
76190+ read_lock(&gr_inode_lock);
76191+ do {
76192+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
76193+ if (retval) {
76194+ if (checkglob && retval->globbed) {
76195+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
76196+ if (retval2)
76197+ retval = retval2;
76198+ }
76199+ break;
76200+ }
76201+ } while ((tmpsubj = tmpsubj->parent_subject));
76202+ read_unlock(&gr_inode_lock);
76203+
76204+ return retval;
76205+}
76206+
76207+static __inline__ struct acl_object_label *
76208+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
76209+ const struct dentry *curr_dentry,
76210+ const struct acl_subject_label *subj, char **path, const int checkglob)
76211+{
76212+ int newglob = checkglob;
76213+
76214+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
76215+ as we don't want a / * rule to match instead of the / object
76216+ don't do this for create lookups that call this function though, since they're looking up
76217+ on the parent and thus need globbing checks on all paths
76218+ */
76219+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
76220+ newglob = GR_NO_GLOB;
76221+
76222+ return __full_lookup(orig_dentry, orig_mnt,
76223+ curr_dentry->d_inode->i_ino,
76224+ __get_dev(curr_dentry), subj, path, newglob);
76225+}
76226+
76227+static struct acl_object_label *
76228+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76229+ const struct acl_subject_label *subj, char *path, const int checkglob)
76230+{
76231+ struct dentry *dentry = (struct dentry *) l_dentry;
76232+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
76233+ struct acl_object_label *retval;
76234+
76235+ spin_lock(&dcache_lock);
76236+ spin_lock(&vfsmount_lock);
76237+
76238+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
76239+#ifdef CONFIG_NET
76240+ mnt == sock_mnt ||
76241+#endif
76242+#ifdef CONFIG_HUGETLBFS
76243+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
76244+#endif
76245+ /* ignore Eric Biederman */
76246+ IS_PRIVATE(l_dentry->d_inode))) {
76247+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
76248+ goto out;
76249+ }
76250+
76251+ for (;;) {
76252+ if (dentry == real_root && mnt == real_root_mnt)
76253+ break;
76254+
76255+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
76256+ if (mnt->mnt_parent == mnt)
76257+ break;
76258+
76259+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76260+ if (retval != NULL)
76261+ goto out;
76262+
76263+ dentry = mnt->mnt_mountpoint;
76264+ mnt = mnt->mnt_parent;
76265+ continue;
76266+ }
76267+
76268+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76269+ if (retval != NULL)
76270+ goto out;
76271+
76272+ dentry = dentry->d_parent;
76273+ }
76274+
76275+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
76276+
76277+ if (retval == NULL)
76278+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
76279+out:
76280+ spin_unlock(&vfsmount_lock);
76281+ spin_unlock(&dcache_lock);
76282+
76283+ BUG_ON(retval == NULL);
76284+
76285+ return retval;
76286+}
76287+
76288+static __inline__ struct acl_object_label *
76289+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76290+ const struct acl_subject_label *subj)
76291+{
76292+ char *path = NULL;
76293+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
76294+}
76295+
76296+static __inline__ struct acl_object_label *
76297+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76298+ const struct acl_subject_label *subj)
76299+{
76300+ char *path = NULL;
76301+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
76302+}
76303+
76304+static __inline__ struct acl_object_label *
76305+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76306+ const struct acl_subject_label *subj, char *path)
76307+{
76308+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
76309+}
76310+
76311+static struct acl_subject_label *
76312+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
76313+ const struct acl_role_label *role)
76314+{
76315+ struct dentry *dentry = (struct dentry *) l_dentry;
76316+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
76317+ struct acl_subject_label *retval;
76318+
76319+ spin_lock(&dcache_lock);
76320+ spin_lock(&vfsmount_lock);
76321+
76322+ for (;;) {
76323+ if (dentry == real_root && mnt == real_root_mnt)
76324+ break;
76325+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
76326+ if (mnt->mnt_parent == mnt)
76327+ break;
76328+
76329+ read_lock(&gr_inode_lock);
76330+ retval =
76331+ lookup_acl_subj_label(dentry->d_inode->i_ino,
76332+ __get_dev(dentry), role);
76333+ read_unlock(&gr_inode_lock);
76334+ if (retval != NULL)
76335+ goto out;
76336+
76337+ dentry = mnt->mnt_mountpoint;
76338+ mnt = mnt->mnt_parent;
76339+ continue;
76340+ }
76341+
76342+ read_lock(&gr_inode_lock);
76343+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
76344+ __get_dev(dentry), role);
76345+ read_unlock(&gr_inode_lock);
76346+ if (retval != NULL)
76347+ goto out;
76348+
76349+ dentry = dentry->d_parent;
76350+ }
76351+
76352+ read_lock(&gr_inode_lock);
76353+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
76354+ __get_dev(dentry), role);
76355+ read_unlock(&gr_inode_lock);
76356+
76357+ if (unlikely(retval == NULL)) {
76358+ read_lock(&gr_inode_lock);
76359+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
76360+ __get_dev(real_root), role);
76361+ read_unlock(&gr_inode_lock);
76362+ }
76363+out:
76364+ spin_unlock(&vfsmount_lock);
76365+ spin_unlock(&dcache_lock);
76366+
76367+ BUG_ON(retval == NULL);
76368+
76369+ return retval;
76370+}
76371+
76372+static void
76373+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
76374+{
76375+ struct task_struct *task = current;
76376+ const struct cred *cred = current_cred();
76377+
76378+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
76379+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
76380+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
76381+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
76382+
76383+ return;
76384+}
76385+
76386+static void
76387+gr_log_learn_sysctl(const char *path, const __u32 mode)
76388+{
76389+ struct task_struct *task = current;
76390+ const struct cred *cred = current_cred();
76391+
76392+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
76393+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
76394+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
76395+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
76396+
76397+ return;
76398+}
76399+
76400+static void
76401+gr_log_learn_id_change(const char type, const unsigned int real,
76402+ const unsigned int effective, const unsigned int fs)
76403+{
76404+ struct task_struct *task = current;
76405+ const struct cred *cred = current_cred();
76406+
76407+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
76408+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
76409+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
76410+ type, real, effective, fs, &task->signal->saved_ip);
76411+
76412+ return;
76413+}
76414+
76415+__u32
76416+gr_search_file(const struct dentry * dentry, const __u32 mode,
76417+ const struct vfsmount * mnt)
76418+{
76419+ __u32 retval = mode;
76420+ struct acl_subject_label *curracl;
76421+ struct acl_object_label *currobj;
76422+
76423+ if (unlikely(!(gr_status & GR_READY)))
76424+ return (mode & ~GR_AUDITS);
76425+
76426+ curracl = current->acl;
76427+
76428+ currobj = chk_obj_label(dentry, mnt, curracl);
76429+ retval = currobj->mode & mode;
76430+
76431+ /* if we're opening a specified transfer file for writing
76432+ (e.g. /dev/initctl), then transfer our role to init
76433+ */
76434+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
76435+ current->role->roletype & GR_ROLE_PERSIST)) {
76436+ struct task_struct *task = init_pid_ns.child_reaper;
76437+
76438+ if (task->role != current->role) {
76439+ task->acl_sp_role = 0;
76440+ task->acl_role_id = current->acl_role_id;
76441+ task->role = current->role;
76442+ rcu_read_lock();
76443+ read_lock(&grsec_exec_file_lock);
76444+ gr_apply_subject_to_task(task);
76445+ read_unlock(&grsec_exec_file_lock);
76446+ rcu_read_unlock();
76447+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
76448+ }
76449+ }
76450+
76451+ if (unlikely
76452+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
76453+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
76454+ __u32 new_mode = mode;
76455+
76456+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
76457+
76458+ retval = new_mode;
76459+
76460+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
76461+ new_mode |= GR_INHERIT;
76462+
76463+ if (!(mode & GR_NOLEARN))
76464+ gr_log_learn(dentry, mnt, new_mode);
76465+ }
76466+
76467+ return retval;
76468+}
76469+
76470+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
76471+ const struct dentry *parent,
76472+ const struct vfsmount *mnt)
76473+{
76474+ struct name_entry *match;
76475+ struct acl_object_label *matchpo;
76476+ struct acl_subject_label *curracl;
76477+ char *path;
76478+
76479+ if (unlikely(!(gr_status & GR_READY)))
76480+ return NULL;
76481+
76482+ preempt_disable();
76483+ path = gr_to_filename_rbac(new_dentry, mnt);
76484+ match = lookup_name_entry_create(path);
76485+
76486+ curracl = current->acl;
76487+
76488+ if (match) {
76489+ read_lock(&gr_inode_lock);
76490+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
76491+ read_unlock(&gr_inode_lock);
76492+
76493+ if (matchpo) {
76494+ preempt_enable();
76495+ return matchpo;
76496+ }
76497+ }
76498+
76499+ // lookup parent
76500+
76501+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
76502+
76503+ preempt_enable();
76504+ return matchpo;
76505+}
76506+
76507+__u32
76508+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
76509+ const struct vfsmount * mnt, const __u32 mode)
76510+{
76511+ struct acl_object_label *matchpo;
76512+ __u32 retval;
76513+
76514+ if (unlikely(!(gr_status & GR_READY)))
76515+ return (mode & ~GR_AUDITS);
76516+
76517+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
76518+
76519+ retval = matchpo->mode & mode;
76520+
76521+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
76522+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
76523+ __u32 new_mode = mode;
76524+
76525+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
76526+
76527+ gr_log_learn(new_dentry, mnt, new_mode);
76528+ return new_mode;
76529+ }
76530+
76531+ return retval;
76532+}
76533+
76534+__u32
76535+gr_check_link(const struct dentry * new_dentry,
76536+ const struct dentry * parent_dentry,
76537+ const struct vfsmount * parent_mnt,
76538+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
76539+{
76540+ struct acl_object_label *obj;
76541+ __u32 oldmode, newmode;
76542+ __u32 needmode;
76543+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
76544+ GR_DELETE | GR_INHERIT;
76545+
76546+ if (unlikely(!(gr_status & GR_READY)))
76547+ return (GR_CREATE | GR_LINK);
76548+
76549+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
76550+ oldmode = obj->mode;
76551+
76552+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
76553+ newmode = obj->mode;
76554+
76555+ needmode = newmode & checkmodes;
76556+
76557+ // old name for hardlink must have at least the permissions of the new name
76558+ if ((oldmode & needmode) != needmode)
76559+ goto bad;
76560+
76561+ // if old name had restrictions/auditing, make sure the new name does as well
76562+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
76563+
76564+ // don't allow hardlinking of suid/sgid files without permission
76565+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
76566+ needmode |= GR_SETID;
76567+
76568+ if ((newmode & needmode) != needmode)
76569+ goto bad;
76570+
76571+ // enforce minimum permissions
76572+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
76573+ return newmode;
76574+bad:
76575+ needmode = oldmode;
76576+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
76577+ needmode |= GR_SETID;
76578+
76579+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
76580+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
76581+ return (GR_CREATE | GR_LINK);
76582+ } else if (newmode & GR_SUPPRESS)
76583+ return GR_SUPPRESS;
76584+ else
76585+ return 0;
76586+}
76587+
76588+int
76589+gr_check_hidden_task(const struct task_struct *task)
76590+{
76591+ if (unlikely(!(gr_status & GR_READY)))
76592+ return 0;
76593+
76594+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
76595+ return 1;
76596+
76597+ return 0;
76598+}
76599+
76600+int
76601+gr_check_protected_task(const struct task_struct *task)
76602+{
76603+ if (unlikely(!(gr_status & GR_READY) || !task))
76604+ return 0;
76605+
76606+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
76607+ task->acl != current->acl)
76608+ return 1;
76609+
76610+ return 0;
76611+}
76612+
76613+int
76614+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
76615+{
76616+ struct task_struct *p;
76617+ int ret = 0;
76618+
76619+ if (unlikely(!(gr_status & GR_READY) || !pid))
76620+ return ret;
76621+
76622+ read_lock(&tasklist_lock);
76623+ do_each_pid_task(pid, type, p) {
76624+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
76625+ p->acl != current->acl) {
76626+ ret = 1;
76627+ goto out;
76628+ }
76629+ } while_each_pid_task(pid, type, p);
76630+out:
76631+ read_unlock(&tasklist_lock);
76632+
76633+ return ret;
76634+}
76635+
76636+void
76637+gr_copy_label(struct task_struct *tsk)
76638+{
76639+ /* plain copying of fields is already done by dup_task_struct */
76640+ tsk->signal->used_accept = 0;
76641+ tsk->acl_sp_role = 0;
76642+ //tsk->acl_role_id = current->acl_role_id;
76643+ //tsk->acl = current->acl;
76644+ //tsk->role = current->role;
76645+ tsk->signal->curr_ip = current->signal->curr_ip;
76646+ tsk->signal->saved_ip = current->signal->saved_ip;
76647+ if (current->exec_file)
76648+ get_file(current->exec_file);
76649+ //tsk->exec_file = current->exec_file;
76650+ //tsk->is_writable = current->is_writable;
76651+ if (unlikely(current->signal->used_accept)) {
76652+ current->signal->curr_ip = 0;
76653+ current->signal->saved_ip = 0;
76654+ }
76655+
76656+ return;
76657+}
76658+
76659+static void
76660+gr_set_proc_res(struct task_struct *task)
76661+{
76662+ struct acl_subject_label *proc;
76663+ unsigned short i;
76664+
76665+ proc = task->acl;
76666+
76667+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
76668+ return;
76669+
76670+ for (i = 0; i < RLIM_NLIMITS; i++) {
76671+ if (!(proc->resmask & (1 << i)))
76672+ continue;
76673+
76674+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
76675+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
76676+ }
76677+
76678+ return;
76679+}
76680+
76681+extern int __gr_process_user_ban(struct user_struct *user);
76682+
76683+int
76684+gr_check_user_change(int real, int effective, int fs)
76685+{
76686+ unsigned int i;
76687+ __u16 num;
76688+ uid_t *uidlist;
76689+ int curuid;
76690+ int realok = 0;
76691+ int effectiveok = 0;
76692+ int fsok = 0;
76693+
76694+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
76695+ struct user_struct *user;
76696+
76697+ if (real == -1)
76698+ goto skipit;
76699+
76700+ user = find_user(real);
76701+ if (user == NULL)
76702+ goto skipit;
76703+
76704+ if (__gr_process_user_ban(user)) {
76705+ /* for find_user */
76706+ free_uid(user);
76707+ return 1;
76708+ }
76709+
76710+ /* for find_user */
76711+ free_uid(user);
76712+
76713+skipit:
76714+#endif
76715+
76716+ if (unlikely(!(gr_status & GR_READY)))
76717+ return 0;
76718+
76719+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
76720+ gr_log_learn_id_change('u', real, effective, fs);
76721+
76722+ num = current->acl->user_trans_num;
76723+ uidlist = current->acl->user_transitions;
76724+
76725+ if (uidlist == NULL)
76726+ return 0;
76727+
76728+ if (real == -1)
76729+ realok = 1;
76730+ if (effective == -1)
76731+ effectiveok = 1;
76732+ if (fs == -1)
76733+ fsok = 1;
76734+
76735+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
76736+ for (i = 0; i < num; i++) {
76737+ curuid = (int)uidlist[i];
76738+ if (real == curuid)
76739+ realok = 1;
76740+ if (effective == curuid)
76741+ effectiveok = 1;
76742+ if (fs == curuid)
76743+ fsok = 1;
76744+ }
76745+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
76746+ for (i = 0; i < num; i++) {
76747+ curuid = (int)uidlist[i];
76748+ if (real == curuid)
76749+ break;
76750+ if (effective == curuid)
76751+ break;
76752+ if (fs == curuid)
76753+ break;
76754+ }
76755+ /* not in deny list */
76756+ if (i == num) {
76757+ realok = 1;
76758+ effectiveok = 1;
76759+ fsok = 1;
76760+ }
76761+ }
76762+
76763+ if (realok && effectiveok && fsok)
76764+ return 0;
76765+ else {
76766+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
76767+ return 1;
76768+ }
76769+}
76770+
76771+int
76772+gr_check_group_change(int real, int effective, int fs)
76773+{
76774+ unsigned int i;
76775+ __u16 num;
76776+ gid_t *gidlist;
76777+ int curgid;
76778+ int realok = 0;
76779+ int effectiveok = 0;
76780+ int fsok = 0;
76781+
76782+ if (unlikely(!(gr_status & GR_READY)))
76783+ return 0;
76784+
76785+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
76786+ gr_log_learn_id_change('g', real, effective, fs);
76787+
76788+ num = current->acl->group_trans_num;
76789+ gidlist = current->acl->group_transitions;
76790+
76791+ if (gidlist == NULL)
76792+ return 0;
76793+
76794+ if (real == -1)
76795+ realok = 1;
76796+ if (effective == -1)
76797+ effectiveok = 1;
76798+ if (fs == -1)
76799+ fsok = 1;
76800+
76801+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
76802+ for (i = 0; i < num; i++) {
76803+ curgid = (int)gidlist[i];
76804+ if (real == curgid)
76805+ realok = 1;
76806+ if (effective == curgid)
76807+ effectiveok = 1;
76808+ if (fs == curgid)
76809+ fsok = 1;
76810+ }
76811+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
76812+ for (i = 0; i < num; i++) {
76813+ curgid = (int)gidlist[i];
76814+ if (real == curgid)
76815+ break;
76816+ if (effective == curgid)
76817+ break;
76818+ if (fs == curgid)
76819+ break;
76820+ }
76821+ /* not in deny list */
76822+ if (i == num) {
76823+ realok = 1;
76824+ effectiveok = 1;
76825+ fsok = 1;
76826+ }
76827+ }
76828+
76829+ if (realok && effectiveok && fsok)
76830+ return 0;
76831+ else {
76832+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
76833+ return 1;
76834+ }
76835+}
76836+
76837+extern int gr_acl_is_capable(const int cap);
76838+
76839+void
76840+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
76841+{
76842+ struct acl_role_label *role = task->role;
76843+ struct acl_subject_label *subj = NULL;
76844+ struct acl_object_label *obj;
76845+ struct file *filp;
76846+
76847+ if (unlikely(!(gr_status & GR_READY)))
76848+ return;
76849+
76850+ filp = task->exec_file;
76851+
76852+ /* kernel process, we'll give them the kernel role */
76853+ if (unlikely(!filp)) {
76854+ task->role = kernel_role;
76855+ task->acl = kernel_role->root_label;
76856+ return;
76857+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
76858+ role = lookup_acl_role_label(task, uid, gid);
76859+
76860+ /* don't change the role if we're not a privileged process */
76861+ if (role && task->role != role &&
76862+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
76863+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
76864+ return;
76865+
76866+ /* perform subject lookup in possibly new role
76867+ we can use this result below in the case where role == task->role
76868+ */
76869+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
76870+
76871+ /* if we changed uid/gid, but result in the same role
76872+ and are using inheritance, don't lose the inherited subject
76873+ if current subject is other than what normal lookup
76874+ would result in, we arrived via inheritance, don't
76875+ lose subject
76876+ */
76877+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
76878+ (subj == task->acl)))
76879+ task->acl = subj;
76880+
76881+ task->role = role;
76882+
76883+ task->is_writable = 0;
76884+
76885+ /* ignore additional mmap checks for processes that are writable
76886+ by the default ACL */
76887+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
76888+ if (unlikely(obj->mode & GR_WRITE))
76889+ task->is_writable = 1;
76890+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
76891+ if (unlikely(obj->mode & GR_WRITE))
76892+ task->is_writable = 1;
76893+
76894+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
76895+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
76896+#endif
76897+
76898+ gr_set_proc_res(task);
76899+
76900+ return;
76901+}
76902+
76903+int
76904+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
76905+ const int unsafe_flags)
76906+{
76907+ struct task_struct *task = current;
76908+ struct acl_subject_label *newacl;
76909+ struct acl_object_label *obj;
76910+ __u32 retmode;
76911+
76912+ if (unlikely(!(gr_status & GR_READY)))
76913+ return 0;
76914+
76915+ newacl = chk_subj_label(dentry, mnt, task->role);
76916+
76917+ task_lock(task);
76918+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
76919+ !(task->role->roletype & GR_ROLE_GOD) &&
76920+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
76921+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
76922+ task_unlock(task);
76923+ if (unsafe_flags & LSM_UNSAFE_SHARE)
76924+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
76925+ else
76926+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
76927+ return -EACCES;
76928+ }
76929+ task_unlock(task);
76930+
76931+ obj = chk_obj_label(dentry, mnt, task->acl);
76932+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
76933+
76934+ if (!(task->acl->mode & GR_INHERITLEARN) &&
76935+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
76936+ if (obj->nested)
76937+ task->acl = obj->nested;
76938+ else
76939+ task->acl = newacl;
76940+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
76941+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
76942+
76943+ task->is_writable = 0;
76944+
76945+ /* ignore additional mmap checks for processes that are writable
76946+ by the default ACL */
76947+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
76948+ if (unlikely(obj->mode & GR_WRITE))
76949+ task->is_writable = 1;
76950+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
76951+ if (unlikely(obj->mode & GR_WRITE))
76952+ task->is_writable = 1;
76953+
76954+ gr_set_proc_res(task);
76955+
76956+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
76957+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
76958+#endif
76959+ return 0;
76960+}
76961+
76962+/* always called with valid inodev ptr */
76963+static void
76964+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
76965+{
76966+ struct acl_object_label *matchpo;
76967+ struct acl_subject_label *matchps;
76968+ struct acl_subject_label *subj;
76969+ struct acl_role_label *role;
76970+ unsigned int x;
76971+
76972+ FOR_EACH_ROLE_START(role)
76973+ FOR_EACH_SUBJECT_START(role, subj, x)
76974+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
76975+ matchpo->mode |= GR_DELETED;
76976+ FOR_EACH_SUBJECT_END(subj,x)
76977+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
76978+ if (subj->inode == ino && subj->device == dev)
76979+ subj->mode |= GR_DELETED;
76980+ FOR_EACH_NESTED_SUBJECT_END(subj)
76981+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
76982+ matchps->mode |= GR_DELETED;
76983+ FOR_EACH_ROLE_END(role)
76984+
76985+ inodev->nentry->deleted = 1;
76986+
76987+ return;
76988+}
76989+
76990+void
76991+gr_handle_delete(const ino_t ino, const dev_t dev)
76992+{
76993+ struct inodev_entry *inodev;
76994+
76995+ if (unlikely(!(gr_status & GR_READY)))
76996+ return;
76997+
76998+ write_lock(&gr_inode_lock);
76999+ inodev = lookup_inodev_entry(ino, dev);
77000+ if (inodev != NULL)
77001+ do_handle_delete(inodev, ino, dev);
77002+ write_unlock(&gr_inode_lock);
77003+
77004+ return;
77005+}
77006+
77007+static void
77008+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
77009+ const ino_t newinode, const dev_t newdevice,
77010+ struct acl_subject_label *subj)
77011+{
77012+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
77013+ struct acl_object_label *match;
77014+
77015+ match = subj->obj_hash[index];
77016+
77017+ while (match && (match->inode != oldinode ||
77018+ match->device != olddevice ||
77019+ !(match->mode & GR_DELETED)))
77020+ match = match->next;
77021+
77022+ if (match && (match->inode == oldinode)
77023+ && (match->device == olddevice)
77024+ && (match->mode & GR_DELETED)) {
77025+ if (match->prev == NULL) {
77026+ subj->obj_hash[index] = match->next;
77027+ if (match->next != NULL)
77028+ match->next->prev = NULL;
77029+ } else {
77030+ match->prev->next = match->next;
77031+ if (match->next != NULL)
77032+ match->next->prev = match->prev;
77033+ }
77034+ match->prev = NULL;
77035+ match->next = NULL;
77036+ match->inode = newinode;
77037+ match->device = newdevice;
77038+ match->mode &= ~GR_DELETED;
77039+
77040+ insert_acl_obj_label(match, subj);
77041+ }
77042+
77043+ return;
77044+}
77045+
77046+static void
77047+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
77048+ const ino_t newinode, const dev_t newdevice,
77049+ struct acl_role_label *role)
77050+{
77051+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
77052+ struct acl_subject_label *match;
77053+
77054+ match = role->subj_hash[index];
77055+
77056+ while (match && (match->inode != oldinode ||
77057+ match->device != olddevice ||
77058+ !(match->mode & GR_DELETED)))
77059+ match = match->next;
77060+
77061+ if (match && (match->inode == oldinode)
77062+ && (match->device == olddevice)
77063+ && (match->mode & GR_DELETED)) {
77064+ if (match->prev == NULL) {
77065+ role->subj_hash[index] = match->next;
77066+ if (match->next != NULL)
77067+ match->next->prev = NULL;
77068+ } else {
77069+ match->prev->next = match->next;
77070+ if (match->next != NULL)
77071+ match->next->prev = match->prev;
77072+ }
77073+ match->prev = NULL;
77074+ match->next = NULL;
77075+ match->inode = newinode;
77076+ match->device = newdevice;
77077+ match->mode &= ~GR_DELETED;
77078+
77079+ insert_acl_subj_label(match, role);
77080+ }
77081+
77082+ return;
77083+}
77084+
77085+static void
77086+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
77087+ const ino_t newinode, const dev_t newdevice)
77088+{
77089+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
77090+ struct inodev_entry *match;
77091+
77092+ match = inodev_set.i_hash[index];
77093+
77094+ while (match && (match->nentry->inode != oldinode ||
77095+ match->nentry->device != olddevice || !match->nentry->deleted))
77096+ match = match->next;
77097+
77098+ if (match && (match->nentry->inode == oldinode)
77099+ && (match->nentry->device == olddevice) &&
77100+ match->nentry->deleted) {
77101+ if (match->prev == NULL) {
77102+ inodev_set.i_hash[index] = match->next;
77103+ if (match->next != NULL)
77104+ match->next->prev = NULL;
77105+ } else {
77106+ match->prev->next = match->next;
77107+ if (match->next != NULL)
77108+ match->next->prev = match->prev;
77109+ }
77110+ match->prev = NULL;
77111+ match->next = NULL;
77112+ match->nentry->inode = newinode;
77113+ match->nentry->device = newdevice;
77114+ match->nentry->deleted = 0;
77115+
77116+ insert_inodev_entry(match);
77117+ }
77118+
77119+ return;
77120+}
77121+
77122+static void
77123+__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
77124+{
77125+ struct acl_subject_label *subj;
77126+ struct acl_role_label *role;
77127+ unsigned int x;
77128+
77129+ FOR_EACH_ROLE_START(role)
77130+ update_acl_subj_label(matchn->inode, matchn->device,
77131+ inode, dev, role);
77132+
77133+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
77134+ if ((subj->inode == inode) && (subj->device == dev)) {
77135+ subj->inode = inode;
77136+ subj->device = dev;
77137+ }
77138+ FOR_EACH_NESTED_SUBJECT_END(subj)
77139+ FOR_EACH_SUBJECT_START(role, subj, x)
77140+ update_acl_obj_label(matchn->inode, matchn->device,
77141+ inode, dev, subj);
77142+ FOR_EACH_SUBJECT_END(subj,x)
77143+ FOR_EACH_ROLE_END(role)
77144+
77145+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
77146+
77147+ return;
77148+}
77149+
77150+static void
77151+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
77152+ const struct vfsmount *mnt)
77153+{
77154+ ino_t ino = dentry->d_inode->i_ino;
77155+ dev_t dev = __get_dev(dentry);
77156+
77157+ __do_handle_create(matchn, ino, dev);
77158+
77159+ return;
77160+}
77161+
77162+void
77163+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
77164+{
77165+ struct name_entry *matchn;
77166+
77167+ if (unlikely(!(gr_status & GR_READY)))
77168+ return;
77169+
77170+ preempt_disable();
77171+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
77172+
77173+ if (unlikely((unsigned long)matchn)) {
77174+ write_lock(&gr_inode_lock);
77175+ do_handle_create(matchn, dentry, mnt);
77176+ write_unlock(&gr_inode_lock);
77177+ }
77178+ preempt_enable();
77179+
77180+ return;
77181+}
77182+
77183+void
77184+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
77185+{
77186+ struct name_entry *matchn;
77187+
77188+ if (unlikely(!(gr_status & GR_READY)))
77189+ return;
77190+
77191+ preempt_disable();
77192+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
77193+
77194+ if (unlikely((unsigned long)matchn)) {
77195+ write_lock(&gr_inode_lock);
77196+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
77197+ write_unlock(&gr_inode_lock);
77198+ }
77199+ preempt_enable();
77200+
77201+ return;
77202+}
77203+
77204+void
77205+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
77206+ struct dentry *old_dentry,
77207+ struct dentry *new_dentry,
77208+ struct vfsmount *mnt, const __u8 replace)
77209+{
77210+ struct name_entry *matchn;
77211+ struct inodev_entry *inodev;
77212+ struct inode *inode = new_dentry->d_inode;
77213+ ino_t oldinode = old_dentry->d_inode->i_ino;
77214+ dev_t olddev = __get_dev(old_dentry);
77215+
77216+ /* vfs_rename swaps the name and parent link for old_dentry and
77217+ new_dentry
77218+ at this point, old_dentry has the new name, parent link, and inode
77219+ for the renamed file
77220+ if a file is being replaced by a rename, new_dentry has the inode
77221+ and name for the replaced file
77222+ */
77223+
77224+ if (unlikely(!(gr_status & GR_READY)))
77225+ return;
77226+
77227+ preempt_disable();
77228+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
77229+
77230+ /* we wouldn't have to check d_inode if it weren't for
77231+ NFS silly-renaming
77232+ */
77233+
77234+ write_lock(&gr_inode_lock);
77235+ if (unlikely(replace && inode)) {
77236+ ino_t newinode = inode->i_ino;
77237+ dev_t newdev = __get_dev(new_dentry);
77238+ inodev = lookup_inodev_entry(newinode, newdev);
77239+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
77240+ do_handle_delete(inodev, newinode, newdev);
77241+ }
77242+
77243+ inodev = lookup_inodev_entry(oldinode, olddev);
77244+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
77245+ do_handle_delete(inodev, oldinode, olddev);
77246+
77247+ if (unlikely((unsigned long)matchn))
77248+ do_handle_create(matchn, old_dentry, mnt);
77249+
77250+ write_unlock(&gr_inode_lock);
77251+ preempt_enable();
77252+
77253+ return;
77254+}
77255+
77256+static int
77257+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
77258+ unsigned char **sum)
77259+{
77260+ struct acl_role_label *r;
77261+ struct role_allowed_ip *ipp;
77262+ struct role_transition *trans;
77263+ unsigned int i;
77264+ int found = 0;
77265+ u32 curr_ip = current->signal->curr_ip;
77266+
77267+ current->signal->saved_ip = curr_ip;
77268+
77269+ /* check transition table */
77270+
77271+ for (trans = current->role->transitions; trans; trans = trans->next) {
77272+ if (!strcmp(rolename, trans->rolename)) {
77273+ found = 1;
77274+ break;
77275+ }
77276+ }
77277+
77278+ if (!found)
77279+ return 0;
77280+
77281+ /* handle special roles that do not require authentication
77282+ and check ip */
77283+
77284+ FOR_EACH_ROLE_START(r)
77285+ if (!strcmp(rolename, r->rolename) &&
77286+ (r->roletype & GR_ROLE_SPECIAL)) {
77287+ found = 0;
77288+ if (r->allowed_ips != NULL) {
77289+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
77290+ if ((ntohl(curr_ip) & ipp->netmask) ==
77291+ (ntohl(ipp->addr) & ipp->netmask))
77292+ found = 1;
77293+ }
77294+ } else
77295+ found = 2;
77296+ if (!found)
77297+ return 0;
77298+
77299+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
77300+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
77301+ *salt = NULL;
77302+ *sum = NULL;
77303+ return 1;
77304+ }
77305+ }
77306+ FOR_EACH_ROLE_END(r)
77307+
77308+ for (i = 0; i < num_sprole_pws; i++) {
77309+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
77310+ *salt = acl_special_roles[i]->salt;
77311+ *sum = acl_special_roles[i]->sum;
77312+ return 1;
77313+ }
77314+ }
77315+
77316+ return 0;
77317+}
77318+
77319+static void
77320+assign_special_role(char *rolename)
77321+{
77322+ struct acl_object_label *obj;
77323+ struct acl_role_label *r;
77324+ struct acl_role_label *assigned = NULL;
77325+ struct task_struct *tsk;
77326+ struct file *filp;
77327+
77328+ FOR_EACH_ROLE_START(r)
77329+ if (!strcmp(rolename, r->rolename) &&
77330+ (r->roletype & GR_ROLE_SPECIAL)) {
77331+ assigned = r;
77332+ break;
77333+ }
77334+ FOR_EACH_ROLE_END(r)
77335+
77336+ if (!assigned)
77337+ return;
77338+
77339+ read_lock(&tasklist_lock);
77340+ read_lock(&grsec_exec_file_lock);
77341+
77342+ tsk = current->real_parent;
77343+ if (tsk == NULL)
77344+ goto out_unlock;
77345+
77346+ filp = tsk->exec_file;
77347+ if (filp == NULL)
77348+ goto out_unlock;
77349+
77350+ tsk->is_writable = 0;
77351+
77352+ tsk->acl_sp_role = 1;
77353+ tsk->acl_role_id = ++acl_sp_role_value;
77354+ tsk->role = assigned;
77355+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
77356+
77357+ /* ignore additional mmap checks for processes that are writable
77358+ by the default ACL */
77359+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
77360+ if (unlikely(obj->mode & GR_WRITE))
77361+ tsk->is_writable = 1;
77362+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
77363+ if (unlikely(obj->mode & GR_WRITE))
77364+ tsk->is_writable = 1;
77365+
77366+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77367+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
77368+#endif
77369+
77370+out_unlock:
77371+ read_unlock(&grsec_exec_file_lock);
77372+ read_unlock(&tasklist_lock);
77373+ return;
77374+}
77375+
77376+int gr_check_secure_terminal(struct task_struct *task)
77377+{
77378+ struct task_struct *p, *p2, *p3;
77379+ struct files_struct *files;
77380+ struct fdtable *fdt;
77381+ struct file *our_file = NULL, *file;
77382+ int i;
77383+
77384+ if (task->signal->tty == NULL)
77385+ return 1;
77386+
77387+ files = get_files_struct(task);
77388+ if (files != NULL) {
77389+ rcu_read_lock();
77390+ fdt = files_fdtable(files);
77391+ for (i=0; i < fdt->max_fds; i++) {
77392+ file = fcheck_files(files, i);
77393+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
77394+ get_file(file);
77395+ our_file = file;
77396+ }
77397+ }
77398+ rcu_read_unlock();
77399+ put_files_struct(files);
77400+ }
77401+
77402+ if (our_file == NULL)
77403+ return 1;
77404+
77405+ read_lock(&tasklist_lock);
77406+ do_each_thread(p2, p) {
77407+ files = get_files_struct(p);
77408+ if (files == NULL ||
77409+ (p->signal && p->signal->tty == task->signal->tty)) {
77410+ if (files != NULL)
77411+ put_files_struct(files);
77412+ continue;
77413+ }
77414+ rcu_read_lock();
77415+ fdt = files_fdtable(files);
77416+ for (i=0; i < fdt->max_fds; i++) {
77417+ file = fcheck_files(files, i);
77418+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
77419+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
77420+ p3 = task;
77421+ while (p3->pid > 0) {
77422+ if (p3 == p)
77423+ break;
77424+ p3 = p3->real_parent;
77425+ }
77426+ if (p3 == p)
77427+ break;
77428+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
77429+ gr_handle_alertkill(p);
77430+ rcu_read_unlock();
77431+ put_files_struct(files);
77432+ read_unlock(&tasklist_lock);
77433+ fput(our_file);
77434+ return 0;
77435+ }
77436+ }
77437+ rcu_read_unlock();
77438+ put_files_struct(files);
77439+ } while_each_thread(p2, p);
77440+ read_unlock(&tasklist_lock);
77441+
77442+ fput(our_file);
77443+ return 1;
77444+}
77445+
77446+ssize_t
77447+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
77448+{
77449+ struct gr_arg_wrapper uwrap;
77450+ unsigned char *sprole_salt = NULL;
77451+ unsigned char *sprole_sum = NULL;
77452+ int error = sizeof (struct gr_arg_wrapper);
77453+ int error2 = 0;
77454+
77455+ mutex_lock(&gr_dev_mutex);
77456+
77457+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
77458+ error = -EPERM;
77459+ goto out;
77460+ }
77461+
77462+ if (count != sizeof (struct gr_arg_wrapper)) {
77463+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
77464+ error = -EINVAL;
77465+ goto out;
77466+ }
77467+
77468+
77469+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
77470+ gr_auth_expires = 0;
77471+ gr_auth_attempts = 0;
77472+ }
77473+
77474+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
77475+ error = -EFAULT;
77476+ goto out;
77477+ }
77478+
77479+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
77480+ error = -EINVAL;
77481+ goto out;
77482+ }
77483+
77484+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
77485+ error = -EFAULT;
77486+ goto out;
77487+ }
77488+
77489+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
77490+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
77491+ time_after(gr_auth_expires, get_seconds())) {
77492+ error = -EBUSY;
77493+ goto out;
77494+ }
77495+
77496+ /* if non-root trying to do anything other than use a special role,
77497+ do not attempt authentication, do not count towards authentication
77498+ locking
77499+ */
77500+
77501+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
77502+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
77503+ current_uid()) {
77504+ error = -EPERM;
77505+ goto out;
77506+ }
77507+
77508+ /* ensure pw and special role name are null terminated */
77509+
77510+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
77511+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
77512+
77513+ /* Okay.
77514+ * We have our enough of the argument structure..(we have yet
77515+ * to copy_from_user the tables themselves) . Copy the tables
77516+ * only if we need them, i.e. for loading operations. */
77517+
77518+ switch (gr_usermode->mode) {
77519+ case GR_STATUS:
77520+ if (gr_status & GR_READY) {
77521+ error = 1;
77522+ if (!gr_check_secure_terminal(current))
77523+ error = 3;
77524+ } else
77525+ error = 2;
77526+ goto out;
77527+ case GR_SHUTDOWN:
77528+ if ((gr_status & GR_READY)
77529+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
77530+ pax_open_kernel();
77531+ gr_status &= ~GR_READY;
77532+ pax_close_kernel();
77533+
77534+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
77535+ free_variables();
77536+ memset(gr_usermode, 0, sizeof (struct gr_arg));
77537+ memset(gr_system_salt, 0, GR_SALT_LEN);
77538+ memset(gr_system_sum, 0, GR_SHA_LEN);
77539+ } else if (gr_status & GR_READY) {
77540+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
77541+ error = -EPERM;
77542+ } else {
77543+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
77544+ error = -EAGAIN;
77545+ }
77546+ break;
77547+ case GR_ENABLE:
77548+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
77549+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
77550+ else {
77551+ if (gr_status & GR_READY)
77552+ error = -EAGAIN;
77553+ else
77554+ error = error2;
77555+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
77556+ }
77557+ break;
77558+ case GR_RELOAD:
77559+ if (!(gr_status & GR_READY)) {
77560+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
77561+ error = -EAGAIN;
77562+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
77563+ lock_kernel();
77564+
77565+ pax_open_kernel();
77566+ gr_status &= ~GR_READY;
77567+ pax_close_kernel();
77568+
77569+ free_variables();
77570+ if (!(error2 = gracl_init(gr_usermode))) {
77571+ unlock_kernel();
77572+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
77573+ } else {
77574+ unlock_kernel();
77575+ error = error2;
77576+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
77577+ }
77578+ } else {
77579+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
77580+ error = -EPERM;
77581+ }
77582+ break;
77583+ case GR_SEGVMOD:
77584+ if (unlikely(!(gr_status & GR_READY))) {
77585+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
77586+ error = -EAGAIN;
77587+ break;
77588+ }
77589+
77590+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
77591+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
77592+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
77593+ struct acl_subject_label *segvacl;
77594+ segvacl =
77595+ lookup_acl_subj_label(gr_usermode->segv_inode,
77596+ gr_usermode->segv_device,
77597+ current->role);
77598+ if (segvacl) {
77599+ segvacl->crashes = 0;
77600+ segvacl->expires = 0;
77601+ }
77602+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
77603+ gr_remove_uid(gr_usermode->segv_uid);
77604+ }
77605+ } else {
77606+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
77607+ error = -EPERM;
77608+ }
77609+ break;
77610+ case GR_SPROLE:
77611+ case GR_SPROLEPAM:
77612+ if (unlikely(!(gr_status & GR_READY))) {
77613+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
77614+ error = -EAGAIN;
77615+ break;
77616+ }
77617+
77618+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
77619+ current->role->expires = 0;
77620+ current->role->auth_attempts = 0;
77621+ }
77622+
77623+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
77624+ time_after(current->role->expires, get_seconds())) {
77625+ error = -EBUSY;
77626+ goto out;
77627+ }
77628+
77629+ if (lookup_special_role_auth
77630+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
77631+ && ((!sprole_salt && !sprole_sum)
77632+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
77633+ char *p = "";
77634+ assign_special_role(gr_usermode->sp_role);
77635+ read_lock(&tasklist_lock);
77636+ if (current->real_parent)
77637+ p = current->real_parent->role->rolename;
77638+ read_unlock(&tasklist_lock);
77639+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
77640+ p, acl_sp_role_value);
77641+ } else {
77642+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
77643+ error = -EPERM;
77644+ if(!(current->role->auth_attempts++))
77645+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
77646+
77647+ goto out;
77648+ }
77649+ break;
77650+ case GR_UNSPROLE:
77651+ if (unlikely(!(gr_status & GR_READY))) {
77652+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
77653+ error = -EAGAIN;
77654+ break;
77655+ }
77656+
77657+ if (current->role->roletype & GR_ROLE_SPECIAL) {
77658+ char *p = "";
77659+ int i = 0;
77660+
77661+ read_lock(&tasklist_lock);
77662+ if (current->real_parent) {
77663+ p = current->real_parent->role->rolename;
77664+ i = current->real_parent->acl_role_id;
77665+ }
77666+ read_unlock(&tasklist_lock);
77667+
77668+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
77669+ gr_set_acls(1);
77670+ } else {
77671+ error = -EPERM;
77672+ goto out;
77673+ }
77674+ break;
77675+ default:
77676+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
77677+ error = -EINVAL;
77678+ break;
77679+ }
77680+
77681+ if (error != -EPERM)
77682+ goto out;
77683+
77684+ if(!(gr_auth_attempts++))
77685+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
77686+
77687+ out:
77688+ mutex_unlock(&gr_dev_mutex);
77689+ return error;
77690+}
77691+
77692+/* must be called with
77693+ rcu_read_lock();
77694+ read_lock(&tasklist_lock);
77695+ read_lock(&grsec_exec_file_lock);
77696+*/
77697+int gr_apply_subject_to_task(struct task_struct *task)
77698+{
77699+ struct acl_object_label *obj;
77700+ char *tmpname;
77701+ struct acl_subject_label *tmpsubj;
77702+ struct file *filp;
77703+ struct name_entry *nmatch;
77704+
77705+ filp = task->exec_file;
77706+ if (filp == NULL)
77707+ return 0;
77708+
77709+ /* the following is to apply the correct subject
77710+ on binaries running when the RBAC system
77711+ is enabled, when the binaries have been
77712+ replaced or deleted since their execution
77713+ -----
77714+ when the RBAC system starts, the inode/dev
77715+ from exec_file will be one the RBAC system
77716+ is unaware of. It only knows the inode/dev
77717+ of the present file on disk, or the absence
77718+ of it.
77719+ */
77720+ preempt_disable();
77721+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
77722+
77723+ nmatch = lookup_name_entry(tmpname);
77724+ preempt_enable();
77725+ tmpsubj = NULL;
77726+ if (nmatch) {
77727+ if (nmatch->deleted)
77728+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
77729+ else
77730+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
77731+ if (tmpsubj != NULL)
77732+ task->acl = tmpsubj;
77733+ }
77734+ if (tmpsubj == NULL)
77735+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
77736+ task->role);
77737+ if (task->acl) {
77738+ task->is_writable = 0;
77739+ /* ignore additional mmap checks for processes that are writable
77740+ by the default ACL */
77741+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
77742+ if (unlikely(obj->mode & GR_WRITE))
77743+ task->is_writable = 1;
77744+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
77745+ if (unlikely(obj->mode & GR_WRITE))
77746+ task->is_writable = 1;
77747+
77748+ gr_set_proc_res(task);
77749+
77750+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
77751+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
77752+#endif
77753+ } else {
77754+ return 1;
77755+ }
77756+
77757+ return 0;
77758+}
77759+
77760+int
77761+gr_set_acls(const int type)
77762+{
77763+ struct task_struct *task, *task2;
77764+ struct acl_role_label *role = current->role;
77765+ __u16 acl_role_id = current->acl_role_id;
77766+ const struct cred *cred;
77767+ int ret;
77768+
77769+ rcu_read_lock();
77770+ read_lock(&tasklist_lock);
77771+ read_lock(&grsec_exec_file_lock);
77772+ do_each_thread(task2, task) {
77773+ /* check to see if we're called from the exit handler,
77774+ if so, only replace ACLs that have inherited the admin
77775+ ACL */
77776+
77777+ if (type && (task->role != role ||
77778+ task->acl_role_id != acl_role_id))
77779+ continue;
77780+
77781+ task->acl_role_id = 0;
77782+ task->acl_sp_role = 0;
77783+
77784+ if (task->exec_file) {
77785+ cred = __task_cred(task);
77786+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
77787+
77788+ ret = gr_apply_subject_to_task(task);
77789+ if (ret) {
77790+ read_unlock(&grsec_exec_file_lock);
77791+ read_unlock(&tasklist_lock);
77792+ rcu_read_unlock();
77793+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
77794+ return ret;
77795+ }
77796+ } else {
77797+ // it's a kernel process
77798+ task->role = kernel_role;
77799+ task->acl = kernel_role->root_label;
77800+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
77801+ task->acl->mode &= ~GR_PROCFIND;
77802+#endif
77803+ }
77804+ } while_each_thread(task2, task);
77805+ read_unlock(&grsec_exec_file_lock);
77806+ read_unlock(&tasklist_lock);
77807+ rcu_read_unlock();
77808+
77809+ return 0;
77810+}
77811+
77812+void
77813+gr_learn_resource(const struct task_struct *task,
77814+ const int res, const unsigned long wanted, const int gt)
77815+{
77816+ struct acl_subject_label *acl;
77817+ const struct cred *cred;
77818+
77819+ if (unlikely((gr_status & GR_READY) &&
77820+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
77821+ goto skip_reslog;
77822+
77823+#ifdef CONFIG_GRKERNSEC_RESLOG
77824+ gr_log_resource(task, res, wanted, gt);
77825+#endif
77826+ skip_reslog:
77827+
77828+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
77829+ return;
77830+
77831+ acl = task->acl;
77832+
77833+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
77834+ !(acl->resmask & (1 << (unsigned short) res))))
77835+ return;
77836+
77837+ if (wanted >= acl->res[res].rlim_cur) {
77838+ unsigned long res_add;
77839+
77840+ res_add = wanted;
77841+ switch (res) {
77842+ case RLIMIT_CPU:
77843+ res_add += GR_RLIM_CPU_BUMP;
77844+ break;
77845+ case RLIMIT_FSIZE:
77846+ res_add += GR_RLIM_FSIZE_BUMP;
77847+ break;
77848+ case RLIMIT_DATA:
77849+ res_add += GR_RLIM_DATA_BUMP;
77850+ break;
77851+ case RLIMIT_STACK:
77852+ res_add += GR_RLIM_STACK_BUMP;
77853+ break;
77854+ case RLIMIT_CORE:
77855+ res_add += GR_RLIM_CORE_BUMP;
77856+ break;
77857+ case RLIMIT_RSS:
77858+ res_add += GR_RLIM_RSS_BUMP;
77859+ break;
77860+ case RLIMIT_NPROC:
77861+ res_add += GR_RLIM_NPROC_BUMP;
77862+ break;
77863+ case RLIMIT_NOFILE:
77864+ res_add += GR_RLIM_NOFILE_BUMP;
77865+ break;
77866+ case RLIMIT_MEMLOCK:
77867+ res_add += GR_RLIM_MEMLOCK_BUMP;
77868+ break;
77869+ case RLIMIT_AS:
77870+ res_add += GR_RLIM_AS_BUMP;
77871+ break;
77872+ case RLIMIT_LOCKS:
77873+ res_add += GR_RLIM_LOCKS_BUMP;
77874+ break;
77875+ case RLIMIT_SIGPENDING:
77876+ res_add += GR_RLIM_SIGPENDING_BUMP;
77877+ break;
77878+ case RLIMIT_MSGQUEUE:
77879+ res_add += GR_RLIM_MSGQUEUE_BUMP;
77880+ break;
77881+ case RLIMIT_NICE:
77882+ res_add += GR_RLIM_NICE_BUMP;
77883+ break;
77884+ case RLIMIT_RTPRIO:
77885+ res_add += GR_RLIM_RTPRIO_BUMP;
77886+ break;
77887+ case RLIMIT_RTTIME:
77888+ res_add += GR_RLIM_RTTIME_BUMP;
77889+ break;
77890+ }
77891+
77892+ acl->res[res].rlim_cur = res_add;
77893+
77894+ if (wanted > acl->res[res].rlim_max)
77895+ acl->res[res].rlim_max = res_add;
77896+
77897+ /* only log the subject filename, since resource logging is supported for
77898+ single-subject learning only */
77899+ rcu_read_lock();
77900+ cred = __task_cred(task);
77901+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
77902+ task->role->roletype, cred->uid, cred->gid, acl->filename,
77903+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
77904+ "", (unsigned long) res, &task->signal->saved_ip);
77905+ rcu_read_unlock();
77906+ }
77907+
77908+ return;
77909+}
77910+
77911+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
77912+void
77913+pax_set_initial_flags(struct linux_binprm *bprm)
77914+{
77915+ struct task_struct *task = current;
77916+ struct acl_subject_label *proc;
77917+ unsigned long flags;
77918+
77919+ if (unlikely(!(gr_status & GR_READY)))
77920+ return;
77921+
77922+ flags = pax_get_flags(task);
77923+
77924+ proc = task->acl;
77925+
77926+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
77927+ flags &= ~MF_PAX_PAGEEXEC;
77928+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
77929+ flags &= ~MF_PAX_SEGMEXEC;
77930+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
77931+ flags &= ~MF_PAX_RANDMMAP;
77932+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
77933+ flags &= ~MF_PAX_EMUTRAMP;
77934+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
77935+ flags &= ~MF_PAX_MPROTECT;
77936+
77937+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
77938+ flags |= MF_PAX_PAGEEXEC;
77939+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
77940+ flags |= MF_PAX_SEGMEXEC;
77941+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
77942+ flags |= MF_PAX_RANDMMAP;
77943+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
77944+ flags |= MF_PAX_EMUTRAMP;
77945+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
77946+ flags |= MF_PAX_MPROTECT;
77947+
77948+ pax_set_flags(task, flags);
77949+
77950+ return;
77951+}
77952+#endif
77953+
77954+#ifdef CONFIG_SYSCTL
77955+/* Eric Biederman likes breaking userland ABI and every inode-based security
77956+ system to save 35kb of memory */
77957+
77958+/* we modify the passed in filename, but adjust it back before returning */
77959+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
77960+{
77961+ struct name_entry *nmatch;
77962+ char *p, *lastp = NULL;
77963+ struct acl_object_label *obj = NULL, *tmp;
77964+ struct acl_subject_label *tmpsubj;
77965+ char c = '\0';
77966+
77967+ read_lock(&gr_inode_lock);
77968+
77969+ p = name + len - 1;
77970+ do {
77971+ nmatch = lookup_name_entry(name);
77972+ if (lastp != NULL)
77973+ *lastp = c;
77974+
77975+ if (nmatch == NULL)
77976+ goto next_component;
77977+ tmpsubj = current->acl;
77978+ do {
77979+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
77980+ if (obj != NULL) {
77981+ tmp = obj->globbed;
77982+ while (tmp) {
77983+ if (!glob_match(tmp->filename, name)) {
77984+ obj = tmp;
77985+ goto found_obj;
77986+ }
77987+ tmp = tmp->next;
77988+ }
77989+ goto found_obj;
77990+ }
77991+ } while ((tmpsubj = tmpsubj->parent_subject));
77992+next_component:
77993+ /* end case */
77994+ if (p == name)
77995+ break;
77996+
77997+ while (*p != '/')
77998+ p--;
77999+ if (p == name)
78000+ lastp = p + 1;
78001+ else {
78002+ lastp = p;
78003+ p--;
78004+ }
78005+ c = *lastp;
78006+ *lastp = '\0';
78007+ } while (1);
78008+found_obj:
78009+ read_unlock(&gr_inode_lock);
78010+ /* obj returned will always be non-null */
78011+ return obj;
78012+}
78013+
78014+/* returns 0 when allowing, non-zero on error
78015+ op of 0 is used for readdir, so we don't log the names of hidden files
78016+*/
78017+__u32
78018+gr_handle_sysctl(const struct ctl_table *table, const int op)
78019+{
78020+ ctl_table *tmp;
78021+ const char *proc_sys = "/proc/sys";
78022+ char *path;
78023+ struct acl_object_label *obj;
78024+ unsigned short len = 0, pos = 0, depth = 0, i;
78025+ __u32 err = 0;
78026+ __u32 mode = 0;
78027+
78028+ if (unlikely(!(gr_status & GR_READY)))
78029+ return 0;
78030+
78031+ /* for now, ignore operations on non-sysctl entries if it's not a
78032+ readdir*/
78033+ if (table->child != NULL && op != 0)
78034+ return 0;
78035+
78036+ mode |= GR_FIND;
78037+ /* it's only a read if it's an entry, read on dirs is for readdir */
78038+ if (op & MAY_READ)
78039+ mode |= GR_READ;
78040+ if (op & MAY_WRITE)
78041+ mode |= GR_WRITE;
78042+
78043+ preempt_disable();
78044+
78045+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
78046+
78047+ /* it's only a read/write if it's an actual entry, not a dir
78048+ (which are opened for readdir)
78049+ */
78050+
78051+ /* convert the requested sysctl entry into a pathname */
78052+
78053+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
78054+ len += strlen(tmp->procname);
78055+ len++;
78056+ depth++;
78057+ }
78058+
78059+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
78060+ /* deny */
78061+ goto out;
78062+ }
78063+
78064+ memset(path, 0, PAGE_SIZE);
78065+
78066+ memcpy(path, proc_sys, strlen(proc_sys));
78067+
78068+ pos += strlen(proc_sys);
78069+
78070+ for (; depth > 0; depth--) {
78071+ path[pos] = '/';
78072+ pos++;
78073+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
78074+ if (depth == i) {
78075+ memcpy(path + pos, tmp->procname,
78076+ strlen(tmp->procname));
78077+ pos += strlen(tmp->procname);
78078+ }
78079+ i++;
78080+ }
78081+ }
78082+
78083+ obj = gr_lookup_by_name(path, pos);
78084+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
78085+
78086+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
78087+ ((err & mode) != mode))) {
78088+ __u32 new_mode = mode;
78089+
78090+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
78091+
78092+ err = 0;
78093+ gr_log_learn_sysctl(path, new_mode);
78094+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
78095+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
78096+ err = -ENOENT;
78097+ } else if (!(err & GR_FIND)) {
78098+ err = -ENOENT;
78099+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
78100+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
78101+ path, (mode & GR_READ) ? " reading" : "",
78102+ (mode & GR_WRITE) ? " writing" : "");
78103+ err = -EACCES;
78104+ } else if ((err & mode) != mode) {
78105+ err = -EACCES;
78106+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
78107+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
78108+ path, (mode & GR_READ) ? " reading" : "",
78109+ (mode & GR_WRITE) ? " writing" : "");
78110+ err = 0;
78111+ } else
78112+ err = 0;
78113+
78114+ out:
78115+ preempt_enable();
78116+
78117+ return err;
78118+}
78119+#endif
78120+
78121+int
78122+gr_handle_proc_ptrace(struct task_struct *task)
78123+{
78124+ struct file *filp;
78125+ struct task_struct *tmp = task;
78126+ struct task_struct *curtemp = current;
78127+ __u32 retmode;
78128+
78129+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
78130+ if (unlikely(!(gr_status & GR_READY)))
78131+ return 0;
78132+#endif
78133+
78134+ read_lock(&tasklist_lock);
78135+ read_lock(&grsec_exec_file_lock);
78136+ filp = task->exec_file;
78137+
78138+ while (tmp->pid > 0) {
78139+ if (tmp == curtemp)
78140+ break;
78141+ tmp = tmp->real_parent;
78142+ }
78143+
78144+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
78145+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
78146+ read_unlock(&grsec_exec_file_lock);
78147+ read_unlock(&tasklist_lock);
78148+ return 1;
78149+ }
78150+
78151+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78152+ if (!(gr_status & GR_READY)) {
78153+ read_unlock(&grsec_exec_file_lock);
78154+ read_unlock(&tasklist_lock);
78155+ return 0;
78156+ }
78157+#endif
78158+
78159+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
78160+ read_unlock(&grsec_exec_file_lock);
78161+ read_unlock(&tasklist_lock);
78162+
78163+ if (retmode & GR_NOPTRACE)
78164+ return 1;
78165+
78166+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
78167+ && (current->acl != task->acl || (current->acl != current->role->root_label
78168+ && current->pid != task->pid)))
78169+ return 1;
78170+
78171+ return 0;
78172+}
78173+
78174+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
78175+{
78176+ if (unlikely(!(gr_status & GR_READY)))
78177+ return;
78178+
78179+ if (!(current->role->roletype & GR_ROLE_GOD))
78180+ return;
78181+
78182+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
78183+ p->role->rolename, gr_task_roletype_to_char(p),
78184+ p->acl->filename);
78185+}
78186+
78187+int
78188+gr_handle_ptrace(struct task_struct *task, const long request)
78189+{
78190+ struct task_struct *tmp = task;
78191+ struct task_struct *curtemp = current;
78192+ __u32 retmode;
78193+
78194+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
78195+ if (unlikely(!(gr_status & GR_READY)))
78196+ return 0;
78197+#endif
78198+
78199+ read_lock(&tasklist_lock);
78200+ while (tmp->pid > 0) {
78201+ if (tmp == curtemp)
78202+ break;
78203+ tmp = tmp->real_parent;
78204+ }
78205+
78206+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
78207+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
78208+ read_unlock(&tasklist_lock);
78209+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78210+ return 1;
78211+ }
78212+ read_unlock(&tasklist_lock);
78213+
78214+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
78215+ if (!(gr_status & GR_READY))
78216+ return 0;
78217+#endif
78218+
78219+ read_lock(&grsec_exec_file_lock);
78220+ if (unlikely(!task->exec_file)) {
78221+ read_unlock(&grsec_exec_file_lock);
78222+ return 0;
78223+ }
78224+
78225+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
78226+ read_unlock(&grsec_exec_file_lock);
78227+
78228+ if (retmode & GR_NOPTRACE) {
78229+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78230+ return 1;
78231+ }
78232+
78233+ if (retmode & GR_PTRACERD) {
78234+ switch (request) {
78235+ case PTRACE_POKETEXT:
78236+ case PTRACE_POKEDATA:
78237+ case PTRACE_POKEUSR:
78238+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
78239+ case PTRACE_SETREGS:
78240+ case PTRACE_SETFPREGS:
78241+#endif
78242+#ifdef CONFIG_X86
78243+ case PTRACE_SETFPXREGS:
78244+#endif
78245+#ifdef CONFIG_ALTIVEC
78246+ case PTRACE_SETVRREGS:
78247+#endif
78248+ return 1;
78249+ default:
78250+ return 0;
78251+ }
78252+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
78253+ !(current->role->roletype & GR_ROLE_GOD) &&
78254+ (current->acl != task->acl)) {
78255+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
78256+ return 1;
78257+ }
78258+
78259+ return 0;
78260+}
78261+
78262+static int is_writable_mmap(const struct file *filp)
78263+{
78264+ struct task_struct *task = current;
78265+ struct acl_object_label *obj, *obj2;
78266+
78267+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
78268+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
78269+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
78270+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
78271+ task->role->root_label);
78272+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
78273+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
78274+ return 1;
78275+ }
78276+ }
78277+ return 0;
78278+}
78279+
78280+int
78281+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
78282+{
78283+ __u32 mode;
78284+
78285+ if (unlikely(!file || !(prot & PROT_EXEC)))
78286+ return 1;
78287+
78288+ if (is_writable_mmap(file))
78289+ return 0;
78290+
78291+ mode =
78292+ gr_search_file(file->f_path.dentry,
78293+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
78294+ file->f_path.mnt);
78295+
78296+ if (!gr_tpe_allow(file))
78297+ return 0;
78298+
78299+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
78300+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78301+ return 0;
78302+ } else if (unlikely(!(mode & GR_EXEC))) {
78303+ return 0;
78304+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
78305+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78306+ return 1;
78307+ }
78308+
78309+ return 1;
78310+}
78311+
78312+int
78313+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
78314+{
78315+ __u32 mode;
78316+
78317+ if (unlikely(!file || !(prot & PROT_EXEC)))
78318+ return 1;
78319+
78320+ if (is_writable_mmap(file))
78321+ return 0;
78322+
78323+ mode =
78324+ gr_search_file(file->f_path.dentry,
78325+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
78326+ file->f_path.mnt);
78327+
78328+ if (!gr_tpe_allow(file))
78329+ return 0;
78330+
78331+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
78332+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78333+ return 0;
78334+ } else if (unlikely(!(mode & GR_EXEC))) {
78335+ return 0;
78336+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
78337+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
78338+ return 1;
78339+ }
78340+
78341+ return 1;
78342+}
78343+
78344+void
78345+gr_acl_handle_psacct(struct task_struct *task, const long code)
78346+{
78347+ unsigned long runtime;
78348+ unsigned long cputime;
78349+ unsigned int wday, cday;
78350+ __u8 whr, chr;
78351+ __u8 wmin, cmin;
78352+ __u8 wsec, csec;
78353+ struct timespec timeval;
78354+
78355+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
78356+ !(task->acl->mode & GR_PROCACCT)))
78357+ return;
78358+
78359+ do_posix_clock_monotonic_gettime(&timeval);
78360+ runtime = timeval.tv_sec - task->start_time.tv_sec;
78361+ wday = runtime / (3600 * 24);
78362+ runtime -= wday * (3600 * 24);
78363+ whr = runtime / 3600;
78364+ runtime -= whr * 3600;
78365+ wmin = runtime / 60;
78366+ runtime -= wmin * 60;
78367+ wsec = runtime;
78368+
78369+ cputime = (task->utime + task->stime) / HZ;
78370+ cday = cputime / (3600 * 24);
78371+ cputime -= cday * (3600 * 24);
78372+ chr = cputime / 3600;
78373+ cputime -= chr * 3600;
78374+ cmin = cputime / 60;
78375+ cputime -= cmin * 60;
78376+ csec = cputime;
78377+
78378+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
78379+
78380+ return;
78381+}
78382+
78383+void gr_set_kernel_label(struct task_struct *task)
78384+{
78385+ if (gr_status & GR_READY) {
78386+ task->role = kernel_role;
78387+ task->acl = kernel_role->root_label;
78388+ }
78389+ return;
78390+}
78391+
78392+#ifdef CONFIG_TASKSTATS
78393+int gr_is_taskstats_denied(int pid)
78394+{
78395+ struct task_struct *task;
78396+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78397+ const struct cred *cred;
78398+#endif
78399+ int ret = 0;
78400+
78401+ /* restrict taskstats viewing to un-chrooted root users
78402+ who have the 'view' subject flag if the RBAC system is enabled
78403+ */
78404+
78405+ rcu_read_lock();
78406+ read_lock(&tasklist_lock);
78407+ task = find_task_by_vpid(pid);
78408+ if (task) {
78409+#ifdef CONFIG_GRKERNSEC_CHROOT
78410+ if (proc_is_chrooted(task))
78411+ ret = -EACCES;
78412+#endif
78413+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78414+ cred = __task_cred(task);
78415+#ifdef CONFIG_GRKERNSEC_PROC_USER
78416+ if (cred->uid != 0)
78417+ ret = -EACCES;
78418+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78419+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
78420+ ret = -EACCES;
78421+#endif
78422+#endif
78423+ if (gr_status & GR_READY) {
78424+ if (!(task->acl->mode & GR_VIEW))
78425+ ret = -EACCES;
78426+ }
78427+ } else
78428+ ret = -ENOENT;
78429+
78430+ read_unlock(&tasklist_lock);
78431+ rcu_read_unlock();
78432+
78433+ return ret;
78434+}
78435+#endif
78436+
78437+/* AUXV entries are filled via a descendant of search_binary_handler
78438+ after we've already applied the subject for the target
78439+*/
78440+int gr_acl_enable_at_secure(void)
78441+{
78442+ if (unlikely(!(gr_status & GR_READY)))
78443+ return 0;
78444+
78445+ if (current->acl->mode & GR_ATSECURE)
78446+ return 1;
78447+
78448+ return 0;
78449+}
78450+
78451+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
78452+{
78453+ struct task_struct *task = current;
78454+ struct dentry *dentry = file->f_path.dentry;
78455+ struct vfsmount *mnt = file->f_path.mnt;
78456+ struct acl_object_label *obj, *tmp;
78457+ struct acl_subject_label *subj;
78458+ unsigned int bufsize;
78459+ int is_not_root;
78460+ char *path;
78461+ dev_t dev = __get_dev(dentry);
78462+
78463+ if (unlikely(!(gr_status & GR_READY)))
78464+ return 1;
78465+
78466+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
78467+ return 1;
78468+
78469+ /* ignore Eric Biederman */
78470+ if (IS_PRIVATE(dentry->d_inode))
78471+ return 1;
78472+
78473+ subj = task->acl;
78474+ do {
78475+ obj = lookup_acl_obj_label(ino, dev, subj);
78476+ if (obj != NULL)
78477+ return (obj->mode & GR_FIND) ? 1 : 0;
78478+ } while ((subj = subj->parent_subject));
78479+
78480+ /* this is purely an optimization since we're looking for an object
78481+ for the directory we're doing a readdir on
78482+ if it's possible for any globbed object to match the entry we're
78483+ filling into the directory, then the object we find here will be
78484+ an anchor point with attached globbed objects
78485+ */
78486+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
78487+ if (obj->globbed == NULL)
78488+ return (obj->mode & GR_FIND) ? 1 : 0;
78489+
78490+ is_not_root = ((obj->filename[0] == '/') &&
78491+ (obj->filename[1] == '\0')) ? 0 : 1;
78492+ bufsize = PAGE_SIZE - namelen - is_not_root;
78493+
78494+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
78495+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
78496+ return 1;
78497+
78498+ preempt_disable();
78499+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
78500+ bufsize);
78501+
78502+ bufsize = strlen(path);
78503+
78504+ /* if base is "/", don't append an additional slash */
78505+ if (is_not_root)
78506+ *(path + bufsize) = '/';
78507+ memcpy(path + bufsize + is_not_root, name, namelen);
78508+ *(path + bufsize + namelen + is_not_root) = '\0';
78509+
78510+ tmp = obj->globbed;
78511+ while (tmp) {
78512+ if (!glob_match(tmp->filename, path)) {
78513+ preempt_enable();
78514+ return (tmp->mode & GR_FIND) ? 1 : 0;
78515+ }
78516+ tmp = tmp->next;
78517+ }
78518+ preempt_enable();
78519+ return (obj->mode & GR_FIND) ? 1 : 0;
78520+}
78521+
78522+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
78523+EXPORT_SYMBOL(gr_acl_is_enabled);
78524+#endif
78525+EXPORT_SYMBOL(gr_learn_resource);
78526+EXPORT_SYMBOL(gr_set_kernel_label);
78527+#ifdef CONFIG_SECURITY
78528+EXPORT_SYMBOL(gr_check_user_change);
78529+EXPORT_SYMBOL(gr_check_group_change);
78530+#endif
78531+
78532diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
78533new file mode 100644
78534index 0000000..34fefda
78535--- /dev/null
78536+++ b/grsecurity/gracl_alloc.c
78537@@ -0,0 +1,105 @@
78538+#include <linux/kernel.h>
78539+#include <linux/mm.h>
78540+#include <linux/slab.h>
78541+#include <linux/vmalloc.h>
78542+#include <linux/gracl.h>
78543+#include <linux/grsecurity.h>
78544+
78545+static unsigned long alloc_stack_next = 1;
78546+static unsigned long alloc_stack_size = 1;
78547+static void **alloc_stack;
78548+
78549+static __inline__ int
78550+alloc_pop(void)
78551+{
78552+ if (alloc_stack_next == 1)
78553+ return 0;
78554+
78555+ kfree(alloc_stack[alloc_stack_next - 2]);
78556+
78557+ alloc_stack_next--;
78558+
78559+ return 1;
78560+}
78561+
78562+static __inline__ int
78563+alloc_push(void *buf)
78564+{
78565+ if (alloc_stack_next >= alloc_stack_size)
78566+ return 1;
78567+
78568+ alloc_stack[alloc_stack_next - 1] = buf;
78569+
78570+ alloc_stack_next++;
78571+
78572+ return 0;
78573+}
78574+
78575+void *
78576+acl_alloc(unsigned long len)
78577+{
78578+ void *ret = NULL;
78579+
78580+ if (!len || len > PAGE_SIZE)
78581+ goto out;
78582+
78583+ ret = kmalloc(len, GFP_KERNEL);
78584+
78585+ if (ret) {
78586+ if (alloc_push(ret)) {
78587+ kfree(ret);
78588+ ret = NULL;
78589+ }
78590+ }
78591+
78592+out:
78593+ return ret;
78594+}
78595+
78596+void *
78597+acl_alloc_num(unsigned long num, unsigned long len)
78598+{
78599+ if (!len || (num > (PAGE_SIZE / len)))
78600+ return NULL;
78601+
78602+ return acl_alloc(num * len);
78603+}
78604+
78605+void
78606+acl_free_all(void)
78607+{
78608+ if (gr_acl_is_enabled() || !alloc_stack)
78609+ return;
78610+
78611+ while (alloc_pop()) ;
78612+
78613+ if (alloc_stack) {
78614+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
78615+ kfree(alloc_stack);
78616+ else
78617+ vfree(alloc_stack);
78618+ }
78619+
78620+ alloc_stack = NULL;
78621+ alloc_stack_size = 1;
78622+ alloc_stack_next = 1;
78623+
78624+ return;
78625+}
78626+
78627+int
78628+acl_alloc_stack_init(unsigned long size)
78629+{
78630+ if ((size * sizeof (void *)) <= PAGE_SIZE)
78631+ alloc_stack =
78632+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
78633+ else
78634+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
78635+
78636+ alloc_stack_size = size;
78637+
78638+ if (!alloc_stack)
78639+ return 0;
78640+ else
78641+ return 1;
78642+}
78643diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
78644new file mode 100644
78645index 0000000..955ddfb
78646--- /dev/null
78647+++ b/grsecurity/gracl_cap.c
78648@@ -0,0 +1,101 @@
78649+#include <linux/kernel.h>
78650+#include <linux/module.h>
78651+#include <linux/sched.h>
78652+#include <linux/gracl.h>
78653+#include <linux/grsecurity.h>
78654+#include <linux/grinternal.h>
78655+
78656+extern const char *captab_log[];
78657+extern int captab_log_entries;
78658+
78659+int
78660+gr_acl_is_capable(const int cap)
78661+{
78662+ struct task_struct *task = current;
78663+ const struct cred *cred = current_cred();
78664+ struct acl_subject_label *curracl;
78665+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
78666+ kernel_cap_t cap_audit = __cap_empty_set;
78667+
78668+ if (!gr_acl_is_enabled())
78669+ return 1;
78670+
78671+ curracl = task->acl;
78672+
78673+ cap_drop = curracl->cap_lower;
78674+ cap_mask = curracl->cap_mask;
78675+ cap_audit = curracl->cap_invert_audit;
78676+
78677+ while ((curracl = curracl->parent_subject)) {
78678+ /* if the cap isn't specified in the current computed mask but is specified in the
78679+ current level subject, and is lowered in the current level subject, then add
78680+ it to the set of dropped capabilities
78681+ otherwise, add the current level subject's mask to the current computed mask
78682+ */
78683+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
78684+ cap_raise(cap_mask, cap);
78685+ if (cap_raised(curracl->cap_lower, cap))
78686+ cap_raise(cap_drop, cap);
78687+ if (cap_raised(curracl->cap_invert_audit, cap))
78688+ cap_raise(cap_audit, cap);
78689+ }
78690+ }
78691+
78692+ if (!cap_raised(cap_drop, cap)) {
78693+ if (cap_raised(cap_audit, cap))
78694+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
78695+ return 1;
78696+ }
78697+
78698+ curracl = task->acl;
78699+
78700+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
78701+ && cap_raised(cred->cap_effective, cap)) {
78702+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
78703+ task->role->roletype, cred->uid,
78704+ cred->gid, task->exec_file ?
78705+ gr_to_filename(task->exec_file->f_path.dentry,
78706+ task->exec_file->f_path.mnt) : curracl->filename,
78707+ curracl->filename, 0UL,
78708+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
78709+ return 1;
78710+ }
78711+
78712+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
78713+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
78714+ return 0;
78715+}
78716+
78717+int
78718+gr_acl_is_capable_nolog(const int cap)
78719+{
78720+ struct acl_subject_label *curracl;
78721+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
78722+
78723+ if (!gr_acl_is_enabled())
78724+ return 1;
78725+
78726+ curracl = current->acl;
78727+
78728+ cap_drop = curracl->cap_lower;
78729+ cap_mask = curracl->cap_mask;
78730+
78731+ while ((curracl = curracl->parent_subject)) {
78732+ /* if the cap isn't specified in the current computed mask but is specified in the
78733+ current level subject, and is lowered in the current level subject, then add
78734+ it to the set of dropped capabilities
78735+ otherwise, add the current level subject's mask to the current computed mask
78736+ */
78737+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
78738+ cap_raise(cap_mask, cap);
78739+ if (cap_raised(curracl->cap_lower, cap))
78740+ cap_raise(cap_drop, cap);
78741+ }
78742+ }
78743+
78744+ if (!cap_raised(cap_drop, cap))
78745+ return 1;
78746+
78747+ return 0;
78748+}
78749+
78750diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
78751new file mode 100644
78752index 0000000..523e7e8
78753--- /dev/null
78754+++ b/grsecurity/gracl_fs.c
78755@@ -0,0 +1,435 @@
78756+#include <linux/kernel.h>
78757+#include <linux/sched.h>
78758+#include <linux/types.h>
78759+#include <linux/fs.h>
78760+#include <linux/file.h>
78761+#include <linux/stat.h>
78762+#include <linux/grsecurity.h>
78763+#include <linux/grinternal.h>
78764+#include <linux/gracl.h>
78765+
78766+umode_t
78767+gr_acl_umask(void)
78768+{
78769+ if (unlikely(!gr_acl_is_enabled()))
78770+ return 0;
78771+
78772+ return current->role->umask;
78773+}
78774+
78775+__u32
78776+gr_acl_handle_hidden_file(const struct dentry * dentry,
78777+ const struct vfsmount * mnt)
78778+{
78779+ __u32 mode;
78780+
78781+ if (unlikely(!dentry->d_inode))
78782+ return GR_FIND;
78783+
78784+ mode =
78785+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
78786+
78787+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
78788+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
78789+ return mode;
78790+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
78791+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
78792+ return 0;
78793+ } else if (unlikely(!(mode & GR_FIND)))
78794+ return 0;
78795+
78796+ return GR_FIND;
78797+}
78798+
78799+__u32
78800+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
78801+ int acc_mode)
78802+{
78803+ __u32 reqmode = GR_FIND;
78804+ __u32 mode;
78805+
78806+ if (unlikely(!dentry->d_inode))
78807+ return reqmode;
78808+
78809+ if (acc_mode & MAY_APPEND)
78810+ reqmode |= GR_APPEND;
78811+ else if (acc_mode & MAY_WRITE)
78812+ reqmode |= GR_WRITE;
78813+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
78814+ reqmode |= GR_READ;
78815+
78816+ mode =
78817+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
78818+ mnt);
78819+
78820+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
78821+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
78822+ reqmode & GR_READ ? " reading" : "",
78823+ reqmode & GR_WRITE ? " writing" : reqmode &
78824+ GR_APPEND ? " appending" : "");
78825+ return reqmode;
78826+ } else
78827+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
78828+ {
78829+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
78830+ reqmode & GR_READ ? " reading" : "",
78831+ reqmode & GR_WRITE ? " writing" : reqmode &
78832+ GR_APPEND ? " appending" : "");
78833+ return 0;
78834+ } else if (unlikely((mode & reqmode) != reqmode))
78835+ return 0;
78836+
78837+ return reqmode;
78838+}
78839+
78840+__u32
78841+gr_acl_handle_creat(const struct dentry * dentry,
78842+ const struct dentry * p_dentry,
78843+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
78844+ const int imode)
78845+{
78846+ __u32 reqmode = GR_WRITE | GR_CREATE;
78847+ __u32 mode;
78848+
78849+ if (acc_mode & MAY_APPEND)
78850+ reqmode |= GR_APPEND;
78851+ // if a directory was required or the directory already exists, then
78852+ // don't count this open as a read
78853+ if ((acc_mode & MAY_READ) &&
78854+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
78855+ reqmode |= GR_READ;
78856+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
78857+ reqmode |= GR_SETID;
78858+
78859+ mode =
78860+ gr_check_create(dentry, p_dentry, p_mnt,
78861+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
78862+
78863+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
78864+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
78865+ reqmode & GR_READ ? " reading" : "",
78866+ reqmode & GR_WRITE ? " writing" : reqmode &
78867+ GR_APPEND ? " appending" : "");
78868+ return reqmode;
78869+ } else
78870+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
78871+ {
78872+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
78873+ reqmode & GR_READ ? " reading" : "",
78874+ reqmode & GR_WRITE ? " writing" : reqmode &
78875+ GR_APPEND ? " appending" : "");
78876+ return 0;
78877+ } else if (unlikely((mode & reqmode) != reqmode))
78878+ return 0;
78879+
78880+ return reqmode;
78881+}
78882+
78883+__u32
78884+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
78885+ const int fmode)
78886+{
78887+ __u32 mode, reqmode = GR_FIND;
78888+
78889+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
78890+ reqmode |= GR_EXEC;
78891+ if (fmode & S_IWOTH)
78892+ reqmode |= GR_WRITE;
78893+ if (fmode & S_IROTH)
78894+ reqmode |= GR_READ;
78895+
78896+ mode =
78897+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
78898+ mnt);
78899+
78900+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
78901+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
78902+ reqmode & GR_READ ? " reading" : "",
78903+ reqmode & GR_WRITE ? " writing" : "",
78904+ reqmode & GR_EXEC ? " executing" : "");
78905+ return reqmode;
78906+ } else
78907+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
78908+ {
78909+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
78910+ reqmode & GR_READ ? " reading" : "",
78911+ reqmode & GR_WRITE ? " writing" : "",
78912+ reqmode & GR_EXEC ? " executing" : "");
78913+ return 0;
78914+ } else if (unlikely((mode & reqmode) != reqmode))
78915+ return 0;
78916+
78917+ return reqmode;
78918+}
78919+
78920+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
78921+{
78922+ __u32 mode;
78923+
78924+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
78925+
78926+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
78927+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
78928+ return mode;
78929+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
78930+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
78931+ return 0;
78932+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
78933+ return 0;
78934+
78935+ return (reqmode);
78936+}
78937+
78938+__u32
78939+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
78940+{
78941+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
78942+}
78943+
78944+__u32
78945+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
78946+{
78947+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
78948+}
78949+
78950+__u32
78951+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
78952+{
78953+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
78954+}
78955+
78956+__u32
78957+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
78958+{
78959+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
78960+}
78961+
78962+__u32
78963+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
78964+ umode_t *modeptr)
78965+{
78966+ mode_t mode;
78967+
78968+ *modeptr &= ~(mode_t)gr_acl_umask();
78969+ mode = *modeptr;
78970+
78971+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
78972+ return 1;
78973+
78974+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
78975+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
78976+ GR_CHMOD_ACL_MSG);
78977+ } else {
78978+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
78979+ }
78980+}
78981+
78982+__u32
78983+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
78984+{
78985+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
78986+}
78987+
78988+__u32
78989+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
78990+{
78991+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
78992+}
78993+
78994+__u32
78995+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
78996+{
78997+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
78998+}
78999+
79000+__u32
79001+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
79002+{
79003+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
79004+ GR_UNIXCONNECT_ACL_MSG);
79005+}
79006+
79007+/* hardlinks require at minimum create and link permission,
79008+ any additional privilege required is based on the
79009+ privilege of the file being linked to
79010+*/
79011+__u32
79012+gr_acl_handle_link(const struct dentry * new_dentry,
79013+ const struct dentry * parent_dentry,
79014+ const struct vfsmount * parent_mnt,
79015+ const struct dentry * old_dentry,
79016+ const struct vfsmount * old_mnt, const char *to)
79017+{
79018+ __u32 mode;
79019+ __u32 needmode = GR_CREATE | GR_LINK;
79020+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
79021+
79022+ mode =
79023+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
79024+ old_mnt);
79025+
79026+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
79027+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
79028+ return mode;
79029+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
79030+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
79031+ return 0;
79032+ } else if (unlikely((mode & needmode) != needmode))
79033+ return 0;
79034+
79035+ return 1;
79036+}
79037+
79038+__u32
79039+gr_acl_handle_symlink(const struct dentry * new_dentry,
79040+ const struct dentry * parent_dentry,
79041+ const struct vfsmount * parent_mnt, const char *from)
79042+{
79043+ __u32 needmode = GR_WRITE | GR_CREATE;
79044+ __u32 mode;
79045+
79046+ mode =
79047+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
79048+ GR_CREATE | GR_AUDIT_CREATE |
79049+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
79050+
79051+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
79052+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
79053+ return mode;
79054+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
79055+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
79056+ return 0;
79057+ } else if (unlikely((mode & needmode) != needmode))
79058+ return 0;
79059+
79060+ return (GR_WRITE | GR_CREATE);
79061+}
79062+
79063+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
79064+{
79065+ __u32 mode;
79066+
79067+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
79068+
79069+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
79070+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
79071+ return mode;
79072+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
79073+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
79074+ return 0;
79075+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
79076+ return 0;
79077+
79078+ return (reqmode);
79079+}
79080+
79081+__u32
79082+gr_acl_handle_mknod(const struct dentry * new_dentry,
79083+ const struct dentry * parent_dentry,
79084+ const struct vfsmount * parent_mnt,
79085+ const int mode)
79086+{
79087+ __u32 reqmode = GR_WRITE | GR_CREATE;
79088+ if (unlikely(mode & (S_ISUID | S_ISGID)))
79089+ reqmode |= GR_SETID;
79090+
79091+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
79092+ reqmode, GR_MKNOD_ACL_MSG);
79093+}
79094+
79095+__u32
79096+gr_acl_handle_mkdir(const struct dentry *new_dentry,
79097+ const struct dentry *parent_dentry,
79098+ const struct vfsmount *parent_mnt)
79099+{
79100+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
79101+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
79102+}
79103+
79104+#define RENAME_CHECK_SUCCESS(old, new) \
79105+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
79106+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
79107+
79108+int
79109+gr_acl_handle_rename(struct dentry *new_dentry,
79110+ struct dentry *parent_dentry,
79111+ const struct vfsmount *parent_mnt,
79112+ struct dentry *old_dentry,
79113+ struct inode *old_parent_inode,
79114+ struct vfsmount *old_mnt, const char *newname)
79115+{
79116+ __u32 comp1, comp2;
79117+ int error = 0;
79118+
79119+ if (unlikely(!gr_acl_is_enabled()))
79120+ return 0;
79121+
79122+ if (!new_dentry->d_inode) {
79123+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
79124+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
79125+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
79126+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
79127+ GR_DELETE | GR_AUDIT_DELETE |
79128+ GR_AUDIT_READ | GR_AUDIT_WRITE |
79129+ GR_SUPPRESS, old_mnt);
79130+ } else {
79131+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
79132+ GR_CREATE | GR_DELETE |
79133+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
79134+ GR_AUDIT_READ | GR_AUDIT_WRITE |
79135+ GR_SUPPRESS, parent_mnt);
79136+ comp2 =
79137+ gr_search_file(old_dentry,
79138+ GR_READ | GR_WRITE | GR_AUDIT_READ |
79139+ GR_DELETE | GR_AUDIT_DELETE |
79140+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
79141+ }
79142+
79143+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
79144+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
79145+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
79146+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
79147+ && !(comp2 & GR_SUPPRESS)) {
79148+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
79149+ error = -EACCES;
79150+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
79151+ error = -EACCES;
79152+
79153+ return error;
79154+}
79155+
79156+void
79157+gr_acl_handle_exit(void)
79158+{
79159+ u16 id;
79160+ char *rolename;
79161+ struct file *exec_file;
79162+
79163+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
79164+ !(current->role->roletype & GR_ROLE_PERSIST))) {
79165+ id = current->acl_role_id;
79166+ rolename = current->role->rolename;
79167+ gr_set_acls(1);
79168+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
79169+ }
79170+
79171+ write_lock(&grsec_exec_file_lock);
79172+ exec_file = current->exec_file;
79173+ current->exec_file = NULL;
79174+ write_unlock(&grsec_exec_file_lock);
79175+
79176+ if (exec_file)
79177+ fput(exec_file);
79178+}
79179+
79180+int
79181+gr_acl_handle_procpidmem(const struct task_struct *task)
79182+{
79183+ if (unlikely(!gr_acl_is_enabled()))
79184+ return 0;
79185+
79186+ if (task != current && task->acl->mode & GR_PROTPROCFD)
79187+ return -EACCES;
79188+
79189+ return 0;
79190+}
79191diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
79192new file mode 100644
79193index 0000000..cd07b96
79194--- /dev/null
79195+++ b/grsecurity/gracl_ip.c
79196@@ -0,0 +1,382 @@
79197+#include <linux/kernel.h>
79198+#include <asm/uaccess.h>
79199+#include <asm/errno.h>
79200+#include <net/sock.h>
79201+#include <linux/file.h>
79202+#include <linux/fs.h>
79203+#include <linux/net.h>
79204+#include <linux/in.h>
79205+#include <linux/skbuff.h>
79206+#include <linux/ip.h>
79207+#include <linux/udp.h>
79208+#include <linux/smp_lock.h>
79209+#include <linux/types.h>
79210+#include <linux/sched.h>
79211+#include <linux/netdevice.h>
79212+#include <linux/inetdevice.h>
79213+#include <linux/gracl.h>
79214+#include <linux/grsecurity.h>
79215+#include <linux/grinternal.h>
79216+
79217+#define GR_BIND 0x01
79218+#define GR_CONNECT 0x02
79219+#define GR_INVERT 0x04
79220+#define GR_BINDOVERRIDE 0x08
79221+#define GR_CONNECTOVERRIDE 0x10
79222+#define GR_SOCK_FAMILY 0x20
79223+
79224+static const char * gr_protocols[IPPROTO_MAX] = {
79225+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
79226+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
79227+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
79228+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
79229+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
79230+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
79231+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
79232+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
79233+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
79234+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
79235+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
79236+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
79237+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
79238+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
79239+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
79240+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
79241+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
79242+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
79243+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
79244+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
79245+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
79246+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
79247+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
79248+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
79249+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
79250+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
79251+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
79252+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
79253+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
79254+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
79255+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
79256+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
79257+ };
79258+
79259+static const char * gr_socktypes[SOCK_MAX] = {
79260+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
79261+ "unknown:7", "unknown:8", "unknown:9", "packet"
79262+ };
79263+
79264+static const char * gr_sockfamilies[AF_MAX+1] = {
79265+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
79266+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
79267+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
79268+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
79269+ };
79270+
79271+const char *
79272+gr_proto_to_name(unsigned char proto)
79273+{
79274+ return gr_protocols[proto];
79275+}
79276+
79277+const char *
79278+gr_socktype_to_name(unsigned char type)
79279+{
79280+ return gr_socktypes[type];
79281+}
79282+
79283+const char *
79284+gr_sockfamily_to_name(unsigned char family)
79285+{
79286+ return gr_sockfamilies[family];
79287+}
79288+
79289+int
79290+gr_search_socket(const int domain, const int type, const int protocol)
79291+{
79292+ struct acl_subject_label *curr;
79293+ const struct cred *cred = current_cred();
79294+
79295+ if (unlikely(!gr_acl_is_enabled()))
79296+ goto exit;
79297+
79298+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
79299+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
79300+ goto exit; // let the kernel handle it
79301+
79302+ curr = current->acl;
79303+
79304+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
79305+ /* the family is allowed, if this is PF_INET allow it only if
79306+ the extra sock type/protocol checks pass */
79307+ if (domain == PF_INET)
79308+ goto inet_check;
79309+ goto exit;
79310+ } else {
79311+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79312+ __u32 fakeip = 0;
79313+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79314+ current->role->roletype, cred->uid,
79315+ cred->gid, current->exec_file ?
79316+ gr_to_filename(current->exec_file->f_path.dentry,
79317+ current->exec_file->f_path.mnt) :
79318+ curr->filename, curr->filename,
79319+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
79320+ &current->signal->saved_ip);
79321+ goto exit;
79322+ }
79323+ goto exit_fail;
79324+ }
79325+
79326+inet_check:
79327+ /* the rest of this checking is for IPv4 only */
79328+ if (!curr->ips)
79329+ goto exit;
79330+
79331+ if ((curr->ip_type & (1 << type)) &&
79332+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
79333+ goto exit;
79334+
79335+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79336+ /* we don't place acls on raw sockets , and sometimes
79337+ dgram/ip sockets are opened for ioctl and not
79338+ bind/connect, so we'll fake a bind learn log */
79339+ if (type == SOCK_RAW || type == SOCK_PACKET) {
79340+ __u32 fakeip = 0;
79341+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79342+ current->role->roletype, cred->uid,
79343+ cred->gid, current->exec_file ?
79344+ gr_to_filename(current->exec_file->f_path.dentry,
79345+ current->exec_file->f_path.mnt) :
79346+ curr->filename, curr->filename,
79347+ &fakeip, 0, type,
79348+ protocol, GR_CONNECT, &current->signal->saved_ip);
79349+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
79350+ __u32 fakeip = 0;
79351+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79352+ current->role->roletype, cred->uid,
79353+ cred->gid, current->exec_file ?
79354+ gr_to_filename(current->exec_file->f_path.dentry,
79355+ current->exec_file->f_path.mnt) :
79356+ curr->filename, curr->filename,
79357+ &fakeip, 0, type,
79358+ protocol, GR_BIND, &current->signal->saved_ip);
79359+ }
79360+ /* we'll log when they use connect or bind */
79361+ goto exit;
79362+ }
79363+
79364+exit_fail:
79365+ if (domain == PF_INET)
79366+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
79367+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
79368+ else
79369+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
79370+ gr_socktype_to_name(type), protocol);
79371+
79372+ return 0;
79373+exit:
79374+ return 1;
79375+}
79376+
79377+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
79378+{
79379+ if ((ip->mode & mode) &&
79380+ (ip_port >= ip->low) &&
79381+ (ip_port <= ip->high) &&
79382+ ((ntohl(ip_addr) & our_netmask) ==
79383+ (ntohl(our_addr) & our_netmask))
79384+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
79385+ && (ip->type & (1 << type))) {
79386+ if (ip->mode & GR_INVERT)
79387+ return 2; // specifically denied
79388+ else
79389+ return 1; // allowed
79390+ }
79391+
79392+ return 0; // not specifically allowed, may continue parsing
79393+}
79394+
79395+static int
79396+gr_search_connectbind(const int full_mode, struct sock *sk,
79397+ struct sockaddr_in *addr, const int type)
79398+{
79399+ char iface[IFNAMSIZ] = {0};
79400+ struct acl_subject_label *curr;
79401+ struct acl_ip_label *ip;
79402+ struct inet_sock *isk;
79403+ struct net_device *dev;
79404+ struct in_device *idev;
79405+ unsigned long i;
79406+ int ret;
79407+ int mode = full_mode & (GR_BIND | GR_CONNECT);
79408+ __u32 ip_addr = 0;
79409+ __u32 our_addr;
79410+ __u32 our_netmask;
79411+ char *p;
79412+ __u16 ip_port = 0;
79413+ const struct cred *cred = current_cred();
79414+
79415+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
79416+ return 0;
79417+
79418+ curr = current->acl;
79419+ isk = inet_sk(sk);
79420+
79421+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
79422+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
79423+ addr->sin_addr.s_addr = curr->inaddr_any_override;
79424+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
79425+ struct sockaddr_in saddr;
79426+ int err;
79427+
79428+ saddr.sin_family = AF_INET;
79429+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
79430+ saddr.sin_port = isk->sport;
79431+
79432+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
79433+ if (err)
79434+ return err;
79435+
79436+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
79437+ if (err)
79438+ return err;
79439+ }
79440+
79441+ if (!curr->ips)
79442+ return 0;
79443+
79444+ ip_addr = addr->sin_addr.s_addr;
79445+ ip_port = ntohs(addr->sin_port);
79446+
79447+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
79448+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
79449+ current->role->roletype, cred->uid,
79450+ cred->gid, current->exec_file ?
79451+ gr_to_filename(current->exec_file->f_path.dentry,
79452+ current->exec_file->f_path.mnt) :
79453+ curr->filename, curr->filename,
79454+ &ip_addr, ip_port, type,
79455+ sk->sk_protocol, mode, &current->signal->saved_ip);
79456+ return 0;
79457+ }
79458+
79459+ for (i = 0; i < curr->ip_num; i++) {
79460+ ip = *(curr->ips + i);
79461+ if (ip->iface != NULL) {
79462+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
79463+ p = strchr(iface, ':');
79464+ if (p != NULL)
79465+ *p = '\0';
79466+ dev = dev_get_by_name(sock_net(sk), iface);
79467+ if (dev == NULL)
79468+ continue;
79469+ idev = in_dev_get(dev);
79470+ if (idev == NULL) {
79471+ dev_put(dev);
79472+ continue;
79473+ }
79474+ rcu_read_lock();
79475+ for_ifa(idev) {
79476+ if (!strcmp(ip->iface, ifa->ifa_label)) {
79477+ our_addr = ifa->ifa_address;
79478+ our_netmask = 0xffffffff;
79479+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
79480+ if (ret == 1) {
79481+ rcu_read_unlock();
79482+ in_dev_put(idev);
79483+ dev_put(dev);
79484+ return 0;
79485+ } else if (ret == 2) {
79486+ rcu_read_unlock();
79487+ in_dev_put(idev);
79488+ dev_put(dev);
79489+ goto denied;
79490+ }
79491+ }
79492+ } endfor_ifa(idev);
79493+ rcu_read_unlock();
79494+ in_dev_put(idev);
79495+ dev_put(dev);
79496+ } else {
79497+ our_addr = ip->addr;
79498+ our_netmask = ip->netmask;
79499+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
79500+ if (ret == 1)
79501+ return 0;
79502+ else if (ret == 2)
79503+ goto denied;
79504+ }
79505+ }
79506+
79507+denied:
79508+ if (mode == GR_BIND)
79509+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
79510+ else if (mode == GR_CONNECT)
79511+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
79512+
79513+ return -EACCES;
79514+}
79515+
79516+int
79517+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
79518+{
79519+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
79520+}
79521+
79522+int
79523+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
79524+{
79525+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
79526+}
79527+
79528+int gr_search_listen(struct socket *sock)
79529+{
79530+ struct sock *sk = sock->sk;
79531+ struct sockaddr_in addr;
79532+
79533+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
79534+ addr.sin_port = inet_sk(sk)->sport;
79535+
79536+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
79537+}
79538+
79539+int gr_search_accept(struct socket *sock)
79540+{
79541+ struct sock *sk = sock->sk;
79542+ struct sockaddr_in addr;
79543+
79544+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
79545+ addr.sin_port = inet_sk(sk)->sport;
79546+
79547+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
79548+}
79549+
79550+int
79551+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
79552+{
79553+ if (addr)
79554+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
79555+ else {
79556+ struct sockaddr_in sin;
79557+ const struct inet_sock *inet = inet_sk(sk);
79558+
79559+ sin.sin_addr.s_addr = inet->daddr;
79560+ sin.sin_port = inet->dport;
79561+
79562+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
79563+ }
79564+}
79565+
79566+int
79567+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
79568+{
79569+ struct sockaddr_in sin;
79570+
79571+ if (unlikely(skb->len < sizeof (struct udphdr)))
79572+ return 0; // skip this packet
79573+
79574+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
79575+ sin.sin_port = udp_hdr(skb)->source;
79576+
79577+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
79578+}
79579diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
79580new file mode 100644
79581index 0000000..34bdd46
79582--- /dev/null
79583+++ b/grsecurity/gracl_learn.c
79584@@ -0,0 +1,208 @@
79585+#include <linux/kernel.h>
79586+#include <linux/mm.h>
79587+#include <linux/sched.h>
79588+#include <linux/poll.h>
79589+#include <linux/smp_lock.h>
79590+#include <linux/string.h>
79591+#include <linux/file.h>
79592+#include <linux/types.h>
79593+#include <linux/vmalloc.h>
79594+#include <linux/grinternal.h>
79595+
79596+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
79597+ size_t count, loff_t *ppos);
79598+extern int gr_acl_is_enabled(void);
79599+
79600+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
79601+static int gr_learn_attached;
79602+
79603+/* use a 512k buffer */
79604+#define LEARN_BUFFER_SIZE (512 * 1024)
79605+
79606+static DEFINE_SPINLOCK(gr_learn_lock);
79607+static DEFINE_MUTEX(gr_learn_user_mutex);
79608+
79609+/* we need to maintain two buffers, so that the kernel context of grlearn
79610+ uses a semaphore around the userspace copying, and the other kernel contexts
79611+ use a spinlock when copying into the buffer, since they cannot sleep
79612+*/
79613+static char *learn_buffer;
79614+static char *learn_buffer_user;
79615+static int learn_buffer_len;
79616+static int learn_buffer_user_len;
79617+
79618+static ssize_t
79619+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
79620+{
79621+ DECLARE_WAITQUEUE(wait, current);
79622+ ssize_t retval = 0;
79623+
79624+ add_wait_queue(&learn_wait, &wait);
79625+ set_current_state(TASK_INTERRUPTIBLE);
79626+ do {
79627+ mutex_lock(&gr_learn_user_mutex);
79628+ spin_lock(&gr_learn_lock);
79629+ if (learn_buffer_len)
79630+ break;
79631+ spin_unlock(&gr_learn_lock);
79632+ mutex_unlock(&gr_learn_user_mutex);
79633+ if (file->f_flags & O_NONBLOCK) {
79634+ retval = -EAGAIN;
79635+ goto out;
79636+ }
79637+ if (signal_pending(current)) {
79638+ retval = -ERESTARTSYS;
79639+ goto out;
79640+ }
79641+
79642+ schedule();
79643+ } while (1);
79644+
79645+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
79646+ learn_buffer_user_len = learn_buffer_len;
79647+ retval = learn_buffer_len;
79648+ learn_buffer_len = 0;
79649+
79650+ spin_unlock(&gr_learn_lock);
79651+
79652+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
79653+ retval = -EFAULT;
79654+
79655+ mutex_unlock(&gr_learn_user_mutex);
79656+out:
79657+ set_current_state(TASK_RUNNING);
79658+ remove_wait_queue(&learn_wait, &wait);
79659+ return retval;
79660+}
79661+
79662+static unsigned int
79663+poll_learn(struct file * file, poll_table * wait)
79664+{
79665+ poll_wait(file, &learn_wait, wait);
79666+
79667+ if (learn_buffer_len)
79668+ return (POLLIN | POLLRDNORM);
79669+
79670+ return 0;
79671+}
79672+
79673+void
79674+gr_clear_learn_entries(void)
79675+{
79676+ char *tmp;
79677+
79678+ mutex_lock(&gr_learn_user_mutex);
79679+ spin_lock(&gr_learn_lock);
79680+ tmp = learn_buffer;
79681+ learn_buffer = NULL;
79682+ spin_unlock(&gr_learn_lock);
79683+ if (tmp)
79684+ vfree(tmp);
79685+ if (learn_buffer_user != NULL) {
79686+ vfree(learn_buffer_user);
79687+ learn_buffer_user = NULL;
79688+ }
79689+ learn_buffer_len = 0;
79690+ mutex_unlock(&gr_learn_user_mutex);
79691+
79692+ return;
79693+}
79694+
79695+void
79696+gr_add_learn_entry(const char *fmt, ...)
79697+{
79698+ va_list args;
79699+ unsigned int len;
79700+
79701+ if (!gr_learn_attached)
79702+ return;
79703+
79704+ spin_lock(&gr_learn_lock);
79705+
79706+ /* leave a gap at the end so we know when it's "full" but don't have to
79707+ compute the exact length of the string we're trying to append
79708+ */
79709+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
79710+ spin_unlock(&gr_learn_lock);
79711+ wake_up_interruptible(&learn_wait);
79712+ return;
79713+ }
79714+ if (learn_buffer == NULL) {
79715+ spin_unlock(&gr_learn_lock);
79716+ return;
79717+ }
79718+
79719+ va_start(args, fmt);
79720+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
79721+ va_end(args);
79722+
79723+ learn_buffer_len += len + 1;
79724+
79725+ spin_unlock(&gr_learn_lock);
79726+ wake_up_interruptible(&learn_wait);
79727+
79728+ return;
79729+}
79730+
79731+static int
79732+open_learn(struct inode *inode, struct file *file)
79733+{
79734+ if (file->f_mode & FMODE_READ && gr_learn_attached)
79735+ return -EBUSY;
79736+ if (file->f_mode & FMODE_READ) {
79737+ int retval = 0;
79738+ mutex_lock(&gr_learn_user_mutex);
79739+ if (learn_buffer == NULL)
79740+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
79741+ if (learn_buffer_user == NULL)
79742+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
79743+ if (learn_buffer == NULL) {
79744+ retval = -ENOMEM;
79745+ goto out_error;
79746+ }
79747+ if (learn_buffer_user == NULL) {
79748+ retval = -ENOMEM;
79749+ goto out_error;
79750+ }
79751+ learn_buffer_len = 0;
79752+ learn_buffer_user_len = 0;
79753+ gr_learn_attached = 1;
79754+out_error:
79755+ mutex_unlock(&gr_learn_user_mutex);
79756+ return retval;
79757+ }
79758+ return 0;
79759+}
79760+
79761+static int
79762+close_learn(struct inode *inode, struct file *file)
79763+{
79764+ if (file->f_mode & FMODE_READ) {
79765+ char *tmp = NULL;
79766+ mutex_lock(&gr_learn_user_mutex);
79767+ spin_lock(&gr_learn_lock);
79768+ tmp = learn_buffer;
79769+ learn_buffer = NULL;
79770+ spin_unlock(&gr_learn_lock);
79771+ if (tmp)
79772+ vfree(tmp);
79773+ if (learn_buffer_user != NULL) {
79774+ vfree(learn_buffer_user);
79775+ learn_buffer_user = NULL;
79776+ }
79777+ learn_buffer_len = 0;
79778+ learn_buffer_user_len = 0;
79779+ gr_learn_attached = 0;
79780+ mutex_unlock(&gr_learn_user_mutex);
79781+ }
79782+
79783+ return 0;
79784+}
79785+
79786+const struct file_operations grsec_fops = {
79787+ .read = read_learn,
79788+ .write = write_grsec_handler,
79789+ .open = open_learn,
79790+ .release = close_learn,
79791+ .poll = poll_learn,
79792+};
79793diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
79794new file mode 100644
79795index 0000000..70b2179
79796--- /dev/null
79797+++ b/grsecurity/gracl_res.c
79798@@ -0,0 +1,67 @@
79799+#include <linux/kernel.h>
79800+#include <linux/sched.h>
79801+#include <linux/gracl.h>
79802+#include <linux/grinternal.h>
79803+
79804+static const char *restab_log[] = {
79805+ [RLIMIT_CPU] = "RLIMIT_CPU",
79806+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
79807+ [RLIMIT_DATA] = "RLIMIT_DATA",
79808+ [RLIMIT_STACK] = "RLIMIT_STACK",
79809+ [RLIMIT_CORE] = "RLIMIT_CORE",
79810+ [RLIMIT_RSS] = "RLIMIT_RSS",
79811+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
79812+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
79813+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
79814+ [RLIMIT_AS] = "RLIMIT_AS",
79815+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
79816+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
79817+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
79818+ [RLIMIT_NICE] = "RLIMIT_NICE",
79819+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
79820+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
79821+ [GR_CRASH_RES] = "RLIMIT_CRASH"
79822+};
79823+
79824+void
79825+gr_log_resource(const struct task_struct *task,
79826+ const int res, const unsigned long wanted, const int gt)
79827+{
79828+ const struct cred *cred;
79829+ unsigned long rlim;
79830+
79831+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
79832+ return;
79833+
79834+ // not yet supported resource
79835+ if (unlikely(!restab_log[res]))
79836+ return;
79837+
79838+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
79839+ rlim = task->signal->rlim[res].rlim_max;
79840+ else
79841+ rlim = task->signal->rlim[res].rlim_cur;
79842+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
79843+ return;
79844+
79845+ rcu_read_lock();
79846+ cred = __task_cred(task);
79847+
79848+ if (res == RLIMIT_NPROC &&
79849+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
79850+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
79851+ goto out_rcu_unlock;
79852+ else if (res == RLIMIT_MEMLOCK &&
79853+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
79854+ goto out_rcu_unlock;
79855+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
79856+ goto out_rcu_unlock;
79857+ rcu_read_unlock();
79858+
79859+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
79860+
79861+ return;
79862+out_rcu_unlock:
79863+ rcu_read_unlock();
79864+ return;
79865+}
79866diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
79867new file mode 100644
79868index 0000000..1d1b734
79869--- /dev/null
79870+++ b/grsecurity/gracl_segv.c
79871@@ -0,0 +1,284 @@
79872+#include <linux/kernel.h>
79873+#include <linux/mm.h>
79874+#include <asm/uaccess.h>
79875+#include <asm/errno.h>
79876+#include <asm/mman.h>
79877+#include <net/sock.h>
79878+#include <linux/file.h>
79879+#include <linux/fs.h>
79880+#include <linux/net.h>
79881+#include <linux/in.h>
79882+#include <linux/smp_lock.h>
79883+#include <linux/slab.h>
79884+#include <linux/types.h>
79885+#include <linux/sched.h>
79886+#include <linux/timer.h>
79887+#include <linux/gracl.h>
79888+#include <linux/grsecurity.h>
79889+#include <linux/grinternal.h>
79890+
79891+static struct crash_uid *uid_set;
79892+static unsigned short uid_used;
79893+static DEFINE_SPINLOCK(gr_uid_lock);
79894+extern rwlock_t gr_inode_lock;
79895+extern struct acl_subject_label *
79896+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
79897+ struct acl_role_label *role);
79898+extern int gr_fake_force_sig(int sig, struct task_struct *t);
79899+
79900+int
79901+gr_init_uidset(void)
79902+{
79903+ uid_set =
79904+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
79905+ uid_used = 0;
79906+
79907+ return uid_set ? 1 : 0;
79908+}
79909+
79910+void
79911+gr_free_uidset(void)
79912+{
79913+ if (uid_set)
79914+ kfree(uid_set);
79915+
79916+ return;
79917+}
79918+
79919+int
79920+gr_find_uid(const uid_t uid)
79921+{
79922+ struct crash_uid *tmp = uid_set;
79923+ uid_t buid;
79924+ int low = 0, high = uid_used - 1, mid;
79925+
79926+ while (high >= low) {
79927+ mid = (low + high) >> 1;
79928+ buid = tmp[mid].uid;
79929+ if (buid == uid)
79930+ return mid;
79931+ if (buid > uid)
79932+ high = mid - 1;
79933+ if (buid < uid)
79934+ low = mid + 1;
79935+ }
79936+
79937+ return -1;
79938+}
79939+
79940+static __inline__ void
79941+gr_insertsort(void)
79942+{
79943+ unsigned short i, j;
79944+ struct crash_uid index;
79945+
79946+ for (i = 1; i < uid_used; i++) {
79947+ index = uid_set[i];
79948+ j = i;
79949+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
79950+ uid_set[j] = uid_set[j - 1];
79951+ j--;
79952+ }
79953+ uid_set[j] = index;
79954+ }
79955+
79956+ return;
79957+}
79958+
79959+static __inline__ void
79960+gr_insert_uid(const uid_t uid, const unsigned long expires)
79961+{
79962+ int loc;
79963+
79964+ if (uid_used == GR_UIDTABLE_MAX)
79965+ return;
79966+
79967+ loc = gr_find_uid(uid);
79968+
79969+ if (loc >= 0) {
79970+ uid_set[loc].expires = expires;
79971+ return;
79972+ }
79973+
79974+ uid_set[uid_used].uid = uid;
79975+ uid_set[uid_used].expires = expires;
79976+ uid_used++;
79977+
79978+ gr_insertsort();
79979+
79980+ return;
79981+}
79982+
79983+void
79984+gr_remove_uid(const unsigned short loc)
79985+{
79986+ unsigned short i;
79987+
79988+ for (i = loc + 1; i < uid_used; i++)
79989+ uid_set[i - 1] = uid_set[i];
79990+
79991+ uid_used--;
79992+
79993+ return;
79994+}
79995+
79996+int
79997+gr_check_crash_uid(const uid_t uid)
79998+{
79999+ int loc;
80000+ int ret = 0;
80001+
80002+ if (unlikely(!gr_acl_is_enabled()))
80003+ return 0;
80004+
80005+ spin_lock(&gr_uid_lock);
80006+ loc = gr_find_uid(uid);
80007+
80008+ if (loc < 0)
80009+ goto out_unlock;
80010+
80011+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
80012+ gr_remove_uid(loc);
80013+ else
80014+ ret = 1;
80015+
80016+out_unlock:
80017+ spin_unlock(&gr_uid_lock);
80018+ return ret;
80019+}
80020+
80021+static __inline__ int
80022+proc_is_setxid(const struct cred *cred)
80023+{
80024+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
80025+ cred->uid != cred->fsuid)
80026+ return 1;
80027+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
80028+ cred->gid != cred->fsgid)
80029+ return 1;
80030+
80031+ return 0;
80032+}
80033+
80034+void
80035+gr_handle_crash(struct task_struct *task, const int sig)
80036+{
80037+ struct acl_subject_label *curr;
80038+ struct task_struct *tsk, *tsk2;
80039+ const struct cred *cred;
80040+ const struct cred *cred2;
80041+
80042+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
80043+ return;
80044+
80045+ if (unlikely(!gr_acl_is_enabled()))
80046+ return;
80047+
80048+ curr = task->acl;
80049+
80050+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
80051+ return;
80052+
80053+ if (time_before_eq(curr->expires, get_seconds())) {
80054+ curr->expires = 0;
80055+ curr->crashes = 0;
80056+ }
80057+
80058+ curr->crashes++;
80059+
80060+ if (!curr->expires)
80061+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
80062+
80063+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
80064+ time_after(curr->expires, get_seconds())) {
80065+ rcu_read_lock();
80066+ cred = __task_cred(task);
80067+ if (cred->uid && proc_is_setxid(cred)) {
80068+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
80069+ spin_lock(&gr_uid_lock);
80070+ gr_insert_uid(cred->uid, curr->expires);
80071+ spin_unlock(&gr_uid_lock);
80072+ curr->expires = 0;
80073+ curr->crashes = 0;
80074+ read_lock(&tasklist_lock);
80075+ do_each_thread(tsk2, tsk) {
80076+ cred2 = __task_cred(tsk);
80077+ if (tsk != task && cred2->uid == cred->uid)
80078+ gr_fake_force_sig(SIGKILL, tsk);
80079+ } while_each_thread(tsk2, tsk);
80080+ read_unlock(&tasklist_lock);
80081+ } else {
80082+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
80083+ read_lock(&tasklist_lock);
80084+ read_lock(&grsec_exec_file_lock);
80085+ do_each_thread(tsk2, tsk) {
80086+ if (likely(tsk != task)) {
80087+ // if this thread has the same subject as the one that triggered
80088+ // RES_CRASH and it's the same binary, kill it
80089+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
80090+ gr_fake_force_sig(SIGKILL, tsk);
80091+ }
80092+ } while_each_thread(tsk2, tsk);
80093+ read_unlock(&grsec_exec_file_lock);
80094+ read_unlock(&tasklist_lock);
80095+ }
80096+ rcu_read_unlock();
80097+ }
80098+
80099+ return;
80100+}
80101+
80102+int
80103+gr_check_crash_exec(const struct file *filp)
80104+{
80105+ struct acl_subject_label *curr;
80106+
80107+ if (unlikely(!gr_acl_is_enabled()))
80108+ return 0;
80109+
80110+ read_lock(&gr_inode_lock);
80111+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
80112+ filp->f_path.dentry->d_inode->i_sb->s_dev,
80113+ current->role);
80114+ read_unlock(&gr_inode_lock);
80115+
80116+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
80117+ (!curr->crashes && !curr->expires))
80118+ return 0;
80119+
80120+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
80121+ time_after(curr->expires, get_seconds()))
80122+ return 1;
80123+ else if (time_before_eq(curr->expires, get_seconds())) {
80124+ curr->crashes = 0;
80125+ curr->expires = 0;
80126+ }
80127+
80128+ return 0;
80129+}
80130+
80131+void
80132+gr_handle_alertkill(struct task_struct *task)
80133+{
80134+ struct acl_subject_label *curracl;
80135+ __u32 curr_ip;
80136+ struct task_struct *p, *p2;
80137+
80138+ if (unlikely(!gr_acl_is_enabled()))
80139+ return;
80140+
80141+ curracl = task->acl;
80142+ curr_ip = task->signal->curr_ip;
80143+
80144+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
80145+ read_lock(&tasklist_lock);
80146+ do_each_thread(p2, p) {
80147+ if (p->signal->curr_ip == curr_ip)
80148+ gr_fake_force_sig(SIGKILL, p);
80149+ } while_each_thread(p2, p);
80150+ read_unlock(&tasklist_lock);
80151+ } else if (curracl->mode & GR_KILLPROC)
80152+ gr_fake_force_sig(SIGKILL, task);
80153+
80154+ return;
80155+}
80156diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
80157new file mode 100644
80158index 0000000..9d83a69
80159--- /dev/null
80160+++ b/grsecurity/gracl_shm.c
80161@@ -0,0 +1,40 @@
80162+#include <linux/kernel.h>
80163+#include <linux/mm.h>
80164+#include <linux/sched.h>
80165+#include <linux/file.h>
80166+#include <linux/ipc.h>
80167+#include <linux/gracl.h>
80168+#include <linux/grsecurity.h>
80169+#include <linux/grinternal.h>
80170+
80171+int
80172+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
80173+ const time_t shm_createtime, const uid_t cuid, const int shmid)
80174+{
80175+ struct task_struct *task;
80176+
80177+ if (!gr_acl_is_enabled())
80178+ return 1;
80179+
80180+ rcu_read_lock();
80181+ read_lock(&tasklist_lock);
80182+
80183+ task = find_task_by_vpid(shm_cprid);
80184+
80185+ if (unlikely(!task))
80186+ task = find_task_by_vpid(shm_lapid);
80187+
80188+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
80189+ (task->pid == shm_lapid)) &&
80190+ (task->acl->mode & GR_PROTSHM) &&
80191+ (task->acl != current->acl))) {
80192+ read_unlock(&tasklist_lock);
80193+ rcu_read_unlock();
80194+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
80195+ return 0;
80196+ }
80197+ read_unlock(&tasklist_lock);
80198+ rcu_read_unlock();
80199+
80200+ return 1;
80201+}
80202diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
80203new file mode 100644
80204index 0000000..bc0be01
80205--- /dev/null
80206+++ b/grsecurity/grsec_chdir.c
80207@@ -0,0 +1,19 @@
80208+#include <linux/kernel.h>
80209+#include <linux/sched.h>
80210+#include <linux/fs.h>
80211+#include <linux/file.h>
80212+#include <linux/grsecurity.h>
80213+#include <linux/grinternal.h>
80214+
80215+void
80216+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
80217+{
80218+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
80219+ if ((grsec_enable_chdir && grsec_enable_group &&
80220+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
80221+ !grsec_enable_group)) {
80222+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
80223+ }
80224+#endif
80225+ return;
80226+}
80227diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
80228new file mode 100644
80229index 0000000..197bdd5
80230--- /dev/null
80231+++ b/grsecurity/grsec_chroot.c
80232@@ -0,0 +1,386 @@
80233+#include <linux/kernel.h>
80234+#include <linux/module.h>
80235+#include <linux/sched.h>
80236+#include <linux/file.h>
80237+#include <linux/fs.h>
80238+#include <linux/mount.h>
80239+#include <linux/types.h>
80240+#include <linux/pid_namespace.h>
80241+#include <linux/grsecurity.h>
80242+#include <linux/grinternal.h>
80243+
80244+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
80245+{
80246+#ifdef CONFIG_GRKERNSEC
80247+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
80248+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
80249+ task->gr_is_chrooted = 1;
80250+ else
80251+ task->gr_is_chrooted = 0;
80252+
80253+ task->gr_chroot_dentry = path->dentry;
80254+#endif
80255+ return;
80256+}
80257+
80258+void gr_clear_chroot_entries(struct task_struct *task)
80259+{
80260+#ifdef CONFIG_GRKERNSEC
80261+ task->gr_is_chrooted = 0;
80262+ task->gr_chroot_dentry = NULL;
80263+#endif
80264+ return;
80265+}
80266+
80267+int
80268+gr_handle_chroot_unix(const pid_t pid)
80269+{
80270+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
80271+ struct task_struct *p;
80272+
80273+ if (unlikely(!grsec_enable_chroot_unix))
80274+ return 1;
80275+
80276+ if (likely(!proc_is_chrooted(current)))
80277+ return 1;
80278+
80279+ rcu_read_lock();
80280+ read_lock(&tasklist_lock);
80281+
80282+ p = find_task_by_vpid_unrestricted(pid);
80283+ if (unlikely(p && !have_same_root(current, p))) {
80284+ read_unlock(&tasklist_lock);
80285+ rcu_read_unlock();
80286+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
80287+ return 0;
80288+ }
80289+ read_unlock(&tasklist_lock);
80290+ rcu_read_unlock();
80291+#endif
80292+ return 1;
80293+}
80294+
80295+int
80296+gr_handle_chroot_nice(void)
80297+{
80298+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80299+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
80300+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
80301+ return -EPERM;
80302+ }
80303+#endif
80304+ return 0;
80305+}
80306+
80307+int
80308+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
80309+{
80310+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
80311+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
80312+ && proc_is_chrooted(current)) {
80313+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
80314+ return -EACCES;
80315+ }
80316+#endif
80317+ return 0;
80318+}
80319+
80320+int
80321+gr_handle_chroot_rawio(const struct inode *inode)
80322+{
80323+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
80324+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
80325+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
80326+ return 1;
80327+#endif
80328+ return 0;
80329+}
80330+
80331+int
80332+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
80333+{
80334+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80335+ struct task_struct *p;
80336+ int ret = 0;
80337+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
80338+ return ret;
80339+
80340+ read_lock(&tasklist_lock);
80341+ do_each_pid_task(pid, type, p) {
80342+ if (!have_same_root(current, p)) {
80343+ ret = 1;
80344+ goto out;
80345+ }
80346+ } while_each_pid_task(pid, type, p);
80347+out:
80348+ read_unlock(&tasklist_lock);
80349+ return ret;
80350+#endif
80351+ return 0;
80352+}
80353+
80354+int
80355+gr_pid_is_chrooted(struct task_struct *p)
80356+{
80357+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
80358+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
80359+ return 0;
80360+
80361+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
80362+ !have_same_root(current, p)) {
80363+ return 1;
80364+ }
80365+#endif
80366+ return 0;
80367+}
80368+
80369+EXPORT_SYMBOL(gr_pid_is_chrooted);
80370+
80371+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
80372+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
80373+{
80374+ struct dentry *dentry = (struct dentry *)u_dentry;
80375+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
80376+ struct dentry *realroot;
80377+ struct vfsmount *realrootmnt;
80378+ struct dentry *currentroot;
80379+ struct vfsmount *currentmnt;
80380+ struct task_struct *reaper = &init_task;
80381+ int ret = 1;
80382+
80383+ read_lock(&reaper->fs->lock);
80384+ realrootmnt = mntget(reaper->fs->root.mnt);
80385+ realroot = dget(reaper->fs->root.dentry);
80386+ read_unlock(&reaper->fs->lock);
80387+
80388+ read_lock(&current->fs->lock);
80389+ currentmnt = mntget(current->fs->root.mnt);
80390+ currentroot = dget(current->fs->root.dentry);
80391+ read_unlock(&current->fs->lock);
80392+
80393+ spin_lock(&dcache_lock);
80394+ for (;;) {
80395+ if (unlikely((dentry == realroot && mnt == realrootmnt)
80396+ || (dentry == currentroot && mnt == currentmnt)))
80397+ break;
80398+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
80399+ if (mnt->mnt_parent == mnt)
80400+ break;
80401+ dentry = mnt->mnt_mountpoint;
80402+ mnt = mnt->mnt_parent;
80403+ continue;
80404+ }
80405+ dentry = dentry->d_parent;
80406+ }
80407+ spin_unlock(&dcache_lock);
80408+
80409+ dput(currentroot);
80410+ mntput(currentmnt);
80411+
80412+ /* access is outside of chroot */
80413+ if (dentry == realroot && mnt == realrootmnt)
80414+ ret = 0;
80415+
80416+ dput(realroot);
80417+ mntput(realrootmnt);
80418+ return ret;
80419+}
80420+#endif
80421+
80422+int
80423+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
80424+{
80425+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
80426+ if (!grsec_enable_chroot_fchdir)
80427+ return 1;
80428+
80429+ if (!proc_is_chrooted(current))
80430+ return 1;
80431+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
80432+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
80433+ return 0;
80434+ }
80435+#endif
80436+ return 1;
80437+}
80438+
80439+int
80440+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
80441+ const time_t shm_createtime)
80442+{
80443+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
80444+ struct task_struct *p;
80445+ time_t starttime;
80446+
80447+ if (unlikely(!grsec_enable_chroot_shmat))
80448+ return 1;
80449+
80450+ if (likely(!proc_is_chrooted(current)))
80451+ return 1;
80452+
80453+ rcu_read_lock();
80454+ read_lock(&tasklist_lock);
80455+
80456+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
80457+ starttime = p->start_time.tv_sec;
80458+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
80459+ if (have_same_root(current, p)) {
80460+ goto allow;
80461+ } else {
80462+ read_unlock(&tasklist_lock);
80463+ rcu_read_unlock();
80464+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
80465+ return 0;
80466+ }
80467+ }
80468+ /* creator exited, pid reuse, fall through to next check */
80469+ }
80470+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
80471+ if (unlikely(!have_same_root(current, p))) {
80472+ read_unlock(&tasklist_lock);
80473+ rcu_read_unlock();
80474+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
80475+ return 0;
80476+ }
80477+ }
80478+
80479+allow:
80480+ read_unlock(&tasklist_lock);
80481+ rcu_read_unlock();
80482+#endif
80483+ return 1;
80484+}
80485+
80486+void
80487+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
80488+{
80489+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
80490+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
80491+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
80492+#endif
80493+ return;
80494+}
80495+
80496+int
80497+gr_handle_chroot_mknod(const struct dentry *dentry,
80498+ const struct vfsmount *mnt, const int mode)
80499+{
80500+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
80501+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
80502+ proc_is_chrooted(current)) {
80503+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
80504+ return -EPERM;
80505+ }
80506+#endif
80507+ return 0;
80508+}
80509+
80510+int
80511+gr_handle_chroot_mount(const struct dentry *dentry,
80512+ const struct vfsmount *mnt, const char *dev_name)
80513+{
80514+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
80515+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
80516+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
80517+ return -EPERM;
80518+ }
80519+#endif
80520+ return 0;
80521+}
80522+
80523+int
80524+gr_handle_chroot_pivot(void)
80525+{
80526+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
80527+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
80528+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
80529+ return -EPERM;
80530+ }
80531+#endif
80532+ return 0;
80533+}
80534+
80535+int
80536+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
80537+{
80538+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
80539+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
80540+ !gr_is_outside_chroot(dentry, mnt)) {
80541+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
80542+ return -EPERM;
80543+ }
80544+#endif
80545+ return 0;
80546+}
80547+
80548+extern const char *captab_log[];
80549+extern int captab_log_entries;
80550+
80551+int
80552+gr_chroot_is_capable(const int cap)
80553+{
80554+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
80555+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
80556+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
80557+ if (cap_raised(chroot_caps, cap)) {
80558+ const struct cred *creds = current_cred();
80559+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
80560+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
80561+ }
80562+ return 0;
80563+ }
80564+ }
80565+#endif
80566+ return 1;
80567+}
80568+
80569+int
80570+gr_chroot_is_capable_nolog(const int cap)
80571+{
80572+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
80573+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
80574+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
80575+ if (cap_raised(chroot_caps, cap)) {
80576+ return 0;
80577+ }
80578+ }
80579+#endif
80580+ return 1;
80581+}
80582+
80583+int
80584+gr_handle_chroot_sysctl(const int op)
80585+{
80586+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
80587+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
80588+ && (op & MAY_WRITE))
80589+ return -EACCES;
80590+#endif
80591+ return 0;
80592+}
80593+
80594+void
80595+gr_handle_chroot_chdir(struct path *path)
80596+{
80597+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
80598+ if (grsec_enable_chroot_chdir)
80599+ set_fs_pwd(current->fs, path);
80600+#endif
80601+ return;
80602+}
80603+
80604+int
80605+gr_handle_chroot_chmod(const struct dentry *dentry,
80606+ const struct vfsmount *mnt, const int mode)
80607+{
80608+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
80609+ /* allow chmod +s on directories, but not on files */
80610+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
80611+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
80612+ proc_is_chrooted(current)) {
80613+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
80614+ return -EPERM;
80615+ }
80616+#endif
80617+ return 0;
80618+}
80619diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
80620new file mode 100644
80621index 0000000..40545bf
80622--- /dev/null
80623+++ b/grsecurity/grsec_disabled.c
80624@@ -0,0 +1,437 @@
80625+#include <linux/kernel.h>
80626+#include <linux/module.h>
80627+#include <linux/sched.h>
80628+#include <linux/file.h>
80629+#include <linux/fs.h>
80630+#include <linux/kdev_t.h>
80631+#include <linux/net.h>
80632+#include <linux/in.h>
80633+#include <linux/ip.h>
80634+#include <linux/skbuff.h>
80635+#include <linux/sysctl.h>
80636+
80637+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
80638+void
80639+pax_set_initial_flags(struct linux_binprm *bprm)
80640+{
80641+ return;
80642+}
80643+#endif
80644+
80645+#ifdef CONFIG_SYSCTL
80646+__u32
80647+gr_handle_sysctl(const struct ctl_table * table, const int op)
80648+{
80649+ return 0;
80650+}
80651+#endif
80652+
80653+#ifdef CONFIG_TASKSTATS
80654+int gr_is_taskstats_denied(int pid)
80655+{
80656+ return 0;
80657+}
80658+#endif
80659+
80660+int
80661+gr_acl_is_enabled(void)
80662+{
80663+ return 0;
80664+}
80665+
80666+void
80667+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
80668+{
80669+ return;
80670+}
80671+
80672+int
80673+gr_handle_rawio(const struct inode *inode)
80674+{
80675+ return 0;
80676+}
80677+
80678+void
80679+gr_acl_handle_psacct(struct task_struct *task, const long code)
80680+{
80681+ return;
80682+}
80683+
80684+int
80685+gr_handle_ptrace(struct task_struct *task, const long request)
80686+{
80687+ return 0;
80688+}
80689+
80690+int
80691+gr_handle_proc_ptrace(struct task_struct *task)
80692+{
80693+ return 0;
80694+}
80695+
80696+void
80697+gr_learn_resource(const struct task_struct *task,
80698+ const int res, const unsigned long wanted, const int gt)
80699+{
80700+ return;
80701+}
80702+
80703+int
80704+gr_set_acls(const int type)
80705+{
80706+ return 0;
80707+}
80708+
80709+int
80710+gr_check_hidden_task(const struct task_struct *tsk)
80711+{
80712+ return 0;
80713+}
80714+
80715+int
80716+gr_check_protected_task(const struct task_struct *task)
80717+{
80718+ return 0;
80719+}
80720+
80721+int
80722+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
80723+{
80724+ return 0;
80725+}
80726+
80727+void
80728+gr_copy_label(struct task_struct *tsk)
80729+{
80730+ return;
80731+}
80732+
80733+void
80734+gr_set_pax_flags(struct task_struct *task)
80735+{
80736+ return;
80737+}
80738+
80739+int
80740+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
80741+ const int unsafe_share)
80742+{
80743+ return 0;
80744+}
80745+
80746+void
80747+gr_handle_delete(const ino_t ino, const dev_t dev)
80748+{
80749+ return;
80750+}
80751+
80752+void
80753+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
80754+{
80755+ return;
80756+}
80757+
80758+void
80759+gr_handle_crash(struct task_struct *task, const int sig)
80760+{
80761+ return;
80762+}
80763+
80764+int
80765+gr_check_crash_exec(const struct file *filp)
80766+{
80767+ return 0;
80768+}
80769+
80770+int
80771+gr_check_crash_uid(const uid_t uid)
80772+{
80773+ return 0;
80774+}
80775+
80776+void
80777+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
80778+ struct dentry *old_dentry,
80779+ struct dentry *new_dentry,
80780+ struct vfsmount *mnt, const __u8 replace)
80781+{
80782+ return;
80783+}
80784+
80785+int
80786+gr_search_socket(const int family, const int type, const int protocol)
80787+{
80788+ return 1;
80789+}
80790+
80791+int
80792+gr_search_connectbind(const int mode, const struct socket *sock,
80793+ const struct sockaddr_in *addr)
80794+{
80795+ return 0;
80796+}
80797+
80798+void
80799+gr_handle_alertkill(struct task_struct *task)
80800+{
80801+ return;
80802+}
80803+
80804+__u32
80805+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
80806+{
80807+ return 1;
80808+}
80809+
80810+__u32
80811+gr_acl_handle_hidden_file(const struct dentry * dentry,
80812+ const struct vfsmount * mnt)
80813+{
80814+ return 1;
80815+}
80816+
80817+__u32
80818+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
80819+ int acc_mode)
80820+{
80821+ return 1;
80822+}
80823+
80824+__u32
80825+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
80826+{
80827+ return 1;
80828+}
80829+
80830+__u32
80831+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
80832+{
80833+ return 1;
80834+}
80835+
80836+int
80837+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
80838+ unsigned int *vm_flags)
80839+{
80840+ return 1;
80841+}
80842+
80843+__u32
80844+gr_acl_handle_truncate(const struct dentry * dentry,
80845+ const struct vfsmount * mnt)
80846+{
80847+ return 1;
80848+}
80849+
80850+__u32
80851+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
80852+{
80853+ return 1;
80854+}
80855+
80856+__u32
80857+gr_acl_handle_access(const struct dentry * dentry,
80858+ const struct vfsmount * mnt, const int fmode)
80859+{
80860+ return 1;
80861+}
80862+
80863+__u32
80864+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
80865+ umode_t *mode)
80866+{
80867+ return 1;
80868+}
80869+
80870+__u32
80871+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
80872+{
80873+ return 1;
80874+}
80875+
80876+__u32
80877+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
80878+{
80879+ return 1;
80880+}
80881+
80882+void
80883+grsecurity_init(void)
80884+{
80885+ return;
80886+}
80887+
80888+umode_t gr_acl_umask(void)
80889+{
80890+ return 0;
80891+}
80892+
80893+__u32
80894+gr_acl_handle_mknod(const struct dentry * new_dentry,
80895+ const struct dentry * parent_dentry,
80896+ const struct vfsmount * parent_mnt,
80897+ const int mode)
80898+{
80899+ return 1;
80900+}
80901+
80902+__u32
80903+gr_acl_handle_mkdir(const struct dentry * new_dentry,
80904+ const struct dentry * parent_dentry,
80905+ const struct vfsmount * parent_mnt)
80906+{
80907+ return 1;
80908+}
80909+
80910+__u32
80911+gr_acl_handle_symlink(const struct dentry * new_dentry,
80912+ const struct dentry * parent_dentry,
80913+ const struct vfsmount * parent_mnt, const char *from)
80914+{
80915+ return 1;
80916+}
80917+
80918+__u32
80919+gr_acl_handle_link(const struct dentry * new_dentry,
80920+ const struct dentry * parent_dentry,
80921+ const struct vfsmount * parent_mnt,
80922+ const struct dentry * old_dentry,
80923+ const struct vfsmount * old_mnt, const char *to)
80924+{
80925+ return 1;
80926+}
80927+
80928+int
80929+gr_acl_handle_rename(const struct dentry *new_dentry,
80930+ const struct dentry *parent_dentry,
80931+ const struct vfsmount *parent_mnt,
80932+ const struct dentry *old_dentry,
80933+ const struct inode *old_parent_inode,
80934+ const struct vfsmount *old_mnt, const char *newname)
80935+{
80936+ return 0;
80937+}
80938+
80939+int
80940+gr_acl_handle_filldir(const struct file *file, const char *name,
80941+ const int namelen, const ino_t ino)
80942+{
80943+ return 1;
80944+}
80945+
80946+int
80947+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
80948+ const time_t shm_createtime, const uid_t cuid, const int shmid)
80949+{
80950+ return 1;
80951+}
80952+
80953+int
80954+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
80955+{
80956+ return 0;
80957+}
80958+
80959+int
80960+gr_search_accept(const struct socket *sock)
80961+{
80962+ return 0;
80963+}
80964+
80965+int
80966+gr_search_listen(const struct socket *sock)
80967+{
80968+ return 0;
80969+}
80970+
80971+int
80972+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
80973+{
80974+ return 0;
80975+}
80976+
80977+__u32
80978+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
80979+{
80980+ return 1;
80981+}
80982+
80983+__u32
80984+gr_acl_handle_creat(const struct dentry * dentry,
80985+ const struct dentry * p_dentry,
80986+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
80987+ const int imode)
80988+{
80989+ return 1;
80990+}
80991+
80992+void
80993+gr_acl_handle_exit(void)
80994+{
80995+ return;
80996+}
80997+
80998+int
80999+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
81000+{
81001+ return 1;
81002+}
81003+
81004+void
81005+gr_set_role_label(const uid_t uid, const gid_t gid)
81006+{
81007+ return;
81008+}
81009+
81010+int
81011+gr_acl_handle_procpidmem(const struct task_struct *task)
81012+{
81013+ return 0;
81014+}
81015+
81016+int
81017+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
81018+{
81019+ return 0;
81020+}
81021+
81022+int
81023+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
81024+{
81025+ return 0;
81026+}
81027+
81028+void
81029+gr_set_kernel_label(struct task_struct *task)
81030+{
81031+ return;
81032+}
81033+
81034+int
81035+gr_check_user_change(int real, int effective, int fs)
81036+{
81037+ return 0;
81038+}
81039+
81040+int
81041+gr_check_group_change(int real, int effective, int fs)
81042+{
81043+ return 0;
81044+}
81045+
81046+int gr_acl_enable_at_secure(void)
81047+{
81048+ return 0;
81049+}
81050+
81051+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
81052+{
81053+ return dentry->d_inode->i_sb->s_dev;
81054+}
81055+
81056+EXPORT_SYMBOL(gr_learn_resource);
81057+EXPORT_SYMBOL(gr_set_kernel_label);
81058+#ifdef CONFIG_SECURITY
81059+EXPORT_SYMBOL(gr_check_user_change);
81060+EXPORT_SYMBOL(gr_check_group_change);
81061+#endif
81062diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
81063new file mode 100644
81064index 0000000..a96e155
81065--- /dev/null
81066+++ b/grsecurity/grsec_exec.c
81067@@ -0,0 +1,204 @@
81068+#include <linux/kernel.h>
81069+#include <linux/sched.h>
81070+#include <linux/file.h>
81071+#include <linux/binfmts.h>
81072+#include <linux/smp_lock.h>
81073+#include <linux/fs.h>
81074+#include <linux/types.h>
81075+#include <linux/grdefs.h>
81076+#include <linux/grinternal.h>
81077+#include <linux/capability.h>
81078+#include <linux/compat.h>
81079+#include <linux/module.h>
81080+
81081+#include <asm/uaccess.h>
81082+
81083+#ifdef CONFIG_GRKERNSEC_EXECLOG
81084+static char gr_exec_arg_buf[132];
81085+static DEFINE_MUTEX(gr_exec_arg_mutex);
81086+#endif
81087+
81088+void
81089+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
81090+{
81091+#ifdef CONFIG_GRKERNSEC_EXECLOG
81092+ char *grarg = gr_exec_arg_buf;
81093+ unsigned int i, x, execlen = 0;
81094+ char c;
81095+
81096+ if (!((grsec_enable_execlog && grsec_enable_group &&
81097+ in_group_p(grsec_audit_gid))
81098+ || (grsec_enable_execlog && !grsec_enable_group)))
81099+ return;
81100+
81101+ mutex_lock(&gr_exec_arg_mutex);
81102+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
81103+
81104+ if (unlikely(argv == NULL))
81105+ goto log;
81106+
81107+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
81108+ const char __user *p;
81109+ unsigned int len;
81110+
81111+ if (copy_from_user(&p, argv + i, sizeof(p)))
81112+ goto log;
81113+ if (!p)
81114+ goto log;
81115+ len = strnlen_user(p, 128 - execlen);
81116+ if (len > 128 - execlen)
81117+ len = 128 - execlen;
81118+ else if (len > 0)
81119+ len--;
81120+ if (copy_from_user(grarg + execlen, p, len))
81121+ goto log;
81122+
81123+ /* rewrite unprintable characters */
81124+ for (x = 0; x < len; x++) {
81125+ c = *(grarg + execlen + x);
81126+ if (c < 32 || c > 126)
81127+ *(grarg + execlen + x) = ' ';
81128+ }
81129+
81130+ execlen += len;
81131+ *(grarg + execlen) = ' ';
81132+ *(grarg + execlen + 1) = '\0';
81133+ execlen++;
81134+ }
81135+
81136+ log:
81137+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
81138+ bprm->file->f_path.mnt, grarg);
81139+ mutex_unlock(&gr_exec_arg_mutex);
81140+#endif
81141+ return;
81142+}
81143+
81144+#ifdef CONFIG_COMPAT
81145+void
81146+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
81147+{
81148+#ifdef CONFIG_GRKERNSEC_EXECLOG
81149+ char *grarg = gr_exec_arg_buf;
81150+ unsigned int i, x, execlen = 0;
81151+ char c;
81152+
81153+ if (!((grsec_enable_execlog && grsec_enable_group &&
81154+ in_group_p(grsec_audit_gid))
81155+ || (grsec_enable_execlog && !grsec_enable_group)))
81156+ return;
81157+
81158+ mutex_lock(&gr_exec_arg_mutex);
81159+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
81160+
81161+ if (unlikely(argv == NULL))
81162+ goto log;
81163+
81164+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
81165+ compat_uptr_t p;
81166+ unsigned int len;
81167+
81168+ if (get_user(p, argv + i))
81169+ goto log;
81170+ len = strnlen_user(compat_ptr(p), 128 - execlen);
81171+ if (len > 128 - execlen)
81172+ len = 128 - execlen;
81173+ else if (len > 0)
81174+ len--;
81175+ else
81176+ goto log;
81177+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
81178+ goto log;
81179+
81180+ /* rewrite unprintable characters */
81181+ for (x = 0; x < len; x++) {
81182+ c = *(grarg + execlen + x);
81183+ if (c < 32 || c > 126)
81184+ *(grarg + execlen + x) = ' ';
81185+ }
81186+
81187+ execlen += len;
81188+ *(grarg + execlen) = ' ';
81189+ *(grarg + execlen + 1) = '\0';
81190+ execlen++;
81191+ }
81192+
81193+ log:
81194+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
81195+ bprm->file->f_path.mnt, grarg);
81196+ mutex_unlock(&gr_exec_arg_mutex);
81197+#endif
81198+ return;
81199+}
81200+#endif
81201+
81202+#ifdef CONFIG_GRKERNSEC
81203+extern int gr_acl_is_capable(const int cap);
81204+extern int gr_acl_is_capable_nolog(const int cap);
81205+extern int gr_chroot_is_capable(const int cap);
81206+extern int gr_chroot_is_capable_nolog(const int cap);
81207+#endif
81208+
81209+const char *captab_log[] = {
81210+ "CAP_CHOWN",
81211+ "CAP_DAC_OVERRIDE",
81212+ "CAP_DAC_READ_SEARCH",
81213+ "CAP_FOWNER",
81214+ "CAP_FSETID",
81215+ "CAP_KILL",
81216+ "CAP_SETGID",
81217+ "CAP_SETUID",
81218+ "CAP_SETPCAP",
81219+ "CAP_LINUX_IMMUTABLE",
81220+ "CAP_NET_BIND_SERVICE",
81221+ "CAP_NET_BROADCAST",
81222+ "CAP_NET_ADMIN",
81223+ "CAP_NET_RAW",
81224+ "CAP_IPC_LOCK",
81225+ "CAP_IPC_OWNER",
81226+ "CAP_SYS_MODULE",
81227+ "CAP_SYS_RAWIO",
81228+ "CAP_SYS_CHROOT",
81229+ "CAP_SYS_PTRACE",
81230+ "CAP_SYS_PACCT",
81231+ "CAP_SYS_ADMIN",
81232+ "CAP_SYS_BOOT",
81233+ "CAP_SYS_NICE",
81234+ "CAP_SYS_RESOURCE",
81235+ "CAP_SYS_TIME",
81236+ "CAP_SYS_TTY_CONFIG",
81237+ "CAP_MKNOD",
81238+ "CAP_LEASE",
81239+ "CAP_AUDIT_WRITE",
81240+ "CAP_AUDIT_CONTROL",
81241+ "CAP_SETFCAP",
81242+ "CAP_MAC_OVERRIDE",
81243+ "CAP_MAC_ADMIN"
81244+};
81245+
81246+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
81247+
81248+int gr_is_capable(const int cap)
81249+{
81250+#ifdef CONFIG_GRKERNSEC
81251+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
81252+ return 1;
81253+ return 0;
81254+#else
81255+ return 1;
81256+#endif
81257+}
81258+
81259+int gr_is_capable_nolog(const int cap)
81260+{
81261+#ifdef CONFIG_GRKERNSEC
81262+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
81263+ return 1;
81264+ return 0;
81265+#else
81266+ return 1;
81267+#endif
81268+}
81269+
81270+EXPORT_SYMBOL(gr_is_capable);
81271+EXPORT_SYMBOL(gr_is_capable_nolog);
81272diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
81273new file mode 100644
81274index 0000000..d3ee748
81275--- /dev/null
81276+++ b/grsecurity/grsec_fifo.c
81277@@ -0,0 +1,24 @@
81278+#include <linux/kernel.h>
81279+#include <linux/sched.h>
81280+#include <linux/fs.h>
81281+#include <linux/file.h>
81282+#include <linux/grinternal.h>
81283+
81284+int
81285+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
81286+ const struct dentry *dir, const int flag, const int acc_mode)
81287+{
81288+#ifdef CONFIG_GRKERNSEC_FIFO
81289+ const struct cred *cred = current_cred();
81290+
81291+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
81292+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
81293+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
81294+ (cred->fsuid != dentry->d_inode->i_uid)) {
81295+ if (!inode_permission(dentry->d_inode, acc_mode))
81296+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
81297+ return -EACCES;
81298+ }
81299+#endif
81300+ return 0;
81301+}
81302diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
81303new file mode 100644
81304index 0000000..8ca18bf
81305--- /dev/null
81306+++ b/grsecurity/grsec_fork.c
81307@@ -0,0 +1,23 @@
81308+#include <linux/kernel.h>
81309+#include <linux/sched.h>
81310+#include <linux/grsecurity.h>
81311+#include <linux/grinternal.h>
81312+#include <linux/errno.h>
81313+
81314+void
81315+gr_log_forkfail(const int retval)
81316+{
81317+#ifdef CONFIG_GRKERNSEC_FORKFAIL
81318+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
81319+ switch (retval) {
81320+ case -EAGAIN:
81321+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
81322+ break;
81323+ case -ENOMEM:
81324+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
81325+ break;
81326+ }
81327+ }
81328+#endif
81329+ return;
81330+}
81331diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
81332new file mode 100644
81333index 0000000..1e995d3
81334--- /dev/null
81335+++ b/grsecurity/grsec_init.c
81336@@ -0,0 +1,278 @@
81337+#include <linux/kernel.h>
81338+#include <linux/sched.h>
81339+#include <linux/mm.h>
81340+#include <linux/smp_lock.h>
81341+#include <linux/gracl.h>
81342+#include <linux/slab.h>
81343+#include <linux/vmalloc.h>
81344+#include <linux/percpu.h>
81345+#include <linux/module.h>
81346+
81347+int grsec_enable_ptrace_readexec;
81348+int grsec_enable_setxid;
81349+int grsec_enable_brute;
81350+int grsec_enable_link;
81351+int grsec_enable_dmesg;
81352+int grsec_enable_harden_ptrace;
81353+int grsec_enable_fifo;
81354+int grsec_enable_execlog;
81355+int grsec_enable_signal;
81356+int grsec_enable_forkfail;
81357+int grsec_enable_audit_ptrace;
81358+int grsec_enable_time;
81359+int grsec_enable_audit_textrel;
81360+int grsec_enable_group;
81361+int grsec_audit_gid;
81362+int grsec_enable_chdir;
81363+int grsec_enable_mount;
81364+int grsec_enable_rofs;
81365+int grsec_enable_chroot_findtask;
81366+int grsec_enable_chroot_mount;
81367+int grsec_enable_chroot_shmat;
81368+int grsec_enable_chroot_fchdir;
81369+int grsec_enable_chroot_double;
81370+int grsec_enable_chroot_pivot;
81371+int grsec_enable_chroot_chdir;
81372+int grsec_enable_chroot_chmod;
81373+int grsec_enable_chroot_mknod;
81374+int grsec_enable_chroot_nice;
81375+int grsec_enable_chroot_execlog;
81376+int grsec_enable_chroot_caps;
81377+int grsec_enable_chroot_sysctl;
81378+int grsec_enable_chroot_unix;
81379+int grsec_enable_tpe;
81380+int grsec_tpe_gid;
81381+int grsec_enable_blackhole;
81382+#ifdef CONFIG_IPV6_MODULE
81383+EXPORT_SYMBOL(grsec_enable_blackhole);
81384+#endif
81385+int grsec_lastack_retries;
81386+int grsec_enable_tpe_all;
81387+int grsec_enable_tpe_invert;
81388+int grsec_enable_socket_all;
81389+int grsec_socket_all_gid;
81390+int grsec_enable_socket_client;
81391+int grsec_socket_client_gid;
81392+int grsec_enable_socket_server;
81393+int grsec_socket_server_gid;
81394+int grsec_resource_logging;
81395+int grsec_disable_privio;
81396+int grsec_enable_log_rwxmaps;
81397+int grsec_lock;
81398+
81399+DEFINE_SPINLOCK(grsec_alert_lock);
81400+unsigned long grsec_alert_wtime = 0;
81401+unsigned long grsec_alert_fyet = 0;
81402+
81403+DEFINE_SPINLOCK(grsec_audit_lock);
81404+
81405+DEFINE_RWLOCK(grsec_exec_file_lock);
81406+
81407+char *gr_shared_page[4];
81408+
81409+char *gr_alert_log_fmt;
81410+char *gr_audit_log_fmt;
81411+char *gr_alert_log_buf;
81412+char *gr_audit_log_buf;
81413+
81414+extern struct gr_arg *gr_usermode;
81415+extern unsigned char *gr_system_salt;
81416+extern unsigned char *gr_system_sum;
81417+
81418+void __init
81419+grsecurity_init(void)
81420+{
81421+ int j;
81422+ /* create the per-cpu shared pages */
81423+
81424+#ifdef CONFIG_X86
81425+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
81426+#endif
81427+
81428+ for (j = 0; j < 4; j++) {
81429+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
81430+ if (gr_shared_page[j] == NULL) {
81431+ panic("Unable to allocate grsecurity shared page");
81432+ return;
81433+ }
81434+ }
81435+
81436+ /* allocate log buffers */
81437+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
81438+ if (!gr_alert_log_fmt) {
81439+ panic("Unable to allocate grsecurity alert log format buffer");
81440+ return;
81441+ }
81442+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
81443+ if (!gr_audit_log_fmt) {
81444+ panic("Unable to allocate grsecurity audit log format buffer");
81445+ return;
81446+ }
81447+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
81448+ if (!gr_alert_log_buf) {
81449+ panic("Unable to allocate grsecurity alert log buffer");
81450+ return;
81451+ }
81452+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
81453+ if (!gr_audit_log_buf) {
81454+ panic("Unable to allocate grsecurity audit log buffer");
81455+ return;
81456+ }
81457+
81458+ /* allocate memory for authentication structure */
81459+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
81460+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
81461+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
81462+
81463+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
81464+ panic("Unable to allocate grsecurity authentication structure");
81465+ return;
81466+ }
81467+
81468+
81469+#ifdef CONFIG_GRKERNSEC_IO
81470+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
81471+ grsec_disable_privio = 1;
81472+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
81473+ grsec_disable_privio = 1;
81474+#else
81475+ grsec_disable_privio = 0;
81476+#endif
81477+#endif
81478+
81479+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
81480+ /* for backward compatibility, tpe_invert always defaults to on if
81481+ enabled in the kernel
81482+ */
81483+ grsec_enable_tpe_invert = 1;
81484+#endif
81485+
81486+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
81487+#ifndef CONFIG_GRKERNSEC_SYSCTL
81488+ grsec_lock = 1;
81489+#endif
81490+
81491+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
81492+ grsec_enable_audit_textrel = 1;
81493+#endif
81494+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
81495+ grsec_enable_log_rwxmaps = 1;
81496+#endif
81497+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
81498+ grsec_enable_group = 1;
81499+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
81500+#endif
81501+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
81502+ grsec_enable_chdir = 1;
81503+#endif
81504+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
81505+ grsec_enable_harden_ptrace = 1;
81506+#endif
81507+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
81508+ grsec_enable_mount = 1;
81509+#endif
81510+#ifdef CONFIG_GRKERNSEC_LINK
81511+ grsec_enable_link = 1;
81512+#endif
81513+#ifdef CONFIG_GRKERNSEC_BRUTE
81514+ grsec_enable_brute = 1;
81515+#endif
81516+#ifdef CONFIG_GRKERNSEC_DMESG
81517+ grsec_enable_dmesg = 1;
81518+#endif
81519+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81520+ grsec_enable_blackhole = 1;
81521+ grsec_lastack_retries = 4;
81522+#endif
81523+#ifdef CONFIG_GRKERNSEC_FIFO
81524+ grsec_enable_fifo = 1;
81525+#endif
81526+#ifdef CONFIG_GRKERNSEC_EXECLOG
81527+ grsec_enable_execlog = 1;
81528+#endif
81529+#ifdef CONFIG_GRKERNSEC_SETXID
81530+ grsec_enable_setxid = 1;
81531+#endif
81532+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
81533+ grsec_enable_ptrace_readexec = 1;
81534+#endif
81535+#ifdef CONFIG_GRKERNSEC_SIGNAL
81536+ grsec_enable_signal = 1;
81537+#endif
81538+#ifdef CONFIG_GRKERNSEC_FORKFAIL
81539+ grsec_enable_forkfail = 1;
81540+#endif
81541+#ifdef CONFIG_GRKERNSEC_TIME
81542+ grsec_enable_time = 1;
81543+#endif
81544+#ifdef CONFIG_GRKERNSEC_RESLOG
81545+ grsec_resource_logging = 1;
81546+#endif
81547+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
81548+ grsec_enable_chroot_findtask = 1;
81549+#endif
81550+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
81551+ grsec_enable_chroot_unix = 1;
81552+#endif
81553+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
81554+ grsec_enable_chroot_mount = 1;
81555+#endif
81556+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
81557+ grsec_enable_chroot_fchdir = 1;
81558+#endif
81559+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
81560+ grsec_enable_chroot_shmat = 1;
81561+#endif
81562+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
81563+ grsec_enable_audit_ptrace = 1;
81564+#endif
81565+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
81566+ grsec_enable_chroot_double = 1;
81567+#endif
81568+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
81569+ grsec_enable_chroot_pivot = 1;
81570+#endif
81571+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
81572+ grsec_enable_chroot_chdir = 1;
81573+#endif
81574+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
81575+ grsec_enable_chroot_chmod = 1;
81576+#endif
81577+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
81578+ grsec_enable_chroot_mknod = 1;
81579+#endif
81580+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
81581+ grsec_enable_chroot_nice = 1;
81582+#endif
81583+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
81584+ grsec_enable_chroot_execlog = 1;
81585+#endif
81586+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
81587+ grsec_enable_chroot_caps = 1;
81588+#endif
81589+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
81590+ grsec_enable_chroot_sysctl = 1;
81591+#endif
81592+#ifdef CONFIG_GRKERNSEC_TPE
81593+ grsec_enable_tpe = 1;
81594+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
81595+#ifdef CONFIG_GRKERNSEC_TPE_ALL
81596+ grsec_enable_tpe_all = 1;
81597+#endif
81598+#endif
81599+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
81600+ grsec_enable_socket_all = 1;
81601+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
81602+#endif
81603+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
81604+ grsec_enable_socket_client = 1;
81605+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
81606+#endif
81607+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
81608+ grsec_enable_socket_server = 1;
81609+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
81610+#endif
81611+#endif
81612+
81613+ return;
81614+}
81615diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
81616new file mode 100644
81617index 0000000..3efe141
81618--- /dev/null
81619+++ b/grsecurity/grsec_link.c
81620@@ -0,0 +1,43 @@
81621+#include <linux/kernel.h>
81622+#include <linux/sched.h>
81623+#include <linux/fs.h>
81624+#include <linux/file.h>
81625+#include <linux/grinternal.h>
81626+
81627+int
81628+gr_handle_follow_link(const struct inode *parent,
81629+ const struct inode *inode,
81630+ const struct dentry *dentry, const struct vfsmount *mnt)
81631+{
81632+#ifdef CONFIG_GRKERNSEC_LINK
81633+ const struct cred *cred = current_cred();
81634+
81635+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
81636+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
81637+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
81638+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
81639+ return -EACCES;
81640+ }
81641+#endif
81642+ return 0;
81643+}
81644+
81645+int
81646+gr_handle_hardlink(const struct dentry *dentry,
81647+ const struct vfsmount *mnt,
81648+ struct inode *inode, const int mode, const char *to)
81649+{
81650+#ifdef CONFIG_GRKERNSEC_LINK
81651+ const struct cred *cred = current_cred();
81652+
81653+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
81654+ (!S_ISREG(mode) || (mode & S_ISUID) ||
81655+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
81656+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
81657+ !capable(CAP_FOWNER) && cred->uid) {
81658+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
81659+ return -EPERM;
81660+ }
81661+#endif
81662+ return 0;
81663+}
81664diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
81665new file mode 100644
81666index 0000000..a45d2e9
81667--- /dev/null
81668+++ b/grsecurity/grsec_log.c
81669@@ -0,0 +1,322 @@
81670+#include <linux/kernel.h>
81671+#include <linux/sched.h>
81672+#include <linux/file.h>
81673+#include <linux/tty.h>
81674+#include <linux/fs.h>
81675+#include <linux/grinternal.h>
81676+
81677+#ifdef CONFIG_TREE_PREEMPT_RCU
81678+#define DISABLE_PREEMPT() preempt_disable()
81679+#define ENABLE_PREEMPT() preempt_enable()
81680+#else
81681+#define DISABLE_PREEMPT()
81682+#define ENABLE_PREEMPT()
81683+#endif
81684+
81685+#define BEGIN_LOCKS(x) \
81686+ DISABLE_PREEMPT(); \
81687+ rcu_read_lock(); \
81688+ read_lock(&tasklist_lock); \
81689+ read_lock(&grsec_exec_file_lock); \
81690+ if (x != GR_DO_AUDIT) \
81691+ spin_lock(&grsec_alert_lock); \
81692+ else \
81693+ spin_lock(&grsec_audit_lock)
81694+
81695+#define END_LOCKS(x) \
81696+ if (x != GR_DO_AUDIT) \
81697+ spin_unlock(&grsec_alert_lock); \
81698+ else \
81699+ spin_unlock(&grsec_audit_lock); \
81700+ read_unlock(&grsec_exec_file_lock); \
81701+ read_unlock(&tasklist_lock); \
81702+ rcu_read_unlock(); \
81703+ ENABLE_PREEMPT(); \
81704+ if (x == GR_DONT_AUDIT) \
81705+ gr_handle_alertkill(current)
81706+
81707+enum {
81708+ FLOODING,
81709+ NO_FLOODING
81710+};
81711+
81712+extern char *gr_alert_log_fmt;
81713+extern char *gr_audit_log_fmt;
81714+extern char *gr_alert_log_buf;
81715+extern char *gr_audit_log_buf;
81716+
81717+static int gr_log_start(int audit)
81718+{
81719+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
81720+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
81721+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
81722+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
81723+ unsigned long curr_secs = get_seconds();
81724+
81725+ if (audit == GR_DO_AUDIT)
81726+ goto set_fmt;
81727+
81728+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
81729+ grsec_alert_wtime = curr_secs;
81730+ grsec_alert_fyet = 0;
81731+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
81732+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
81733+ grsec_alert_fyet++;
81734+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
81735+ grsec_alert_wtime = curr_secs;
81736+ grsec_alert_fyet++;
81737+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
81738+ return FLOODING;
81739+ }
81740+ else return FLOODING;
81741+
81742+set_fmt:
81743+#endif
81744+ memset(buf, 0, PAGE_SIZE);
81745+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
81746+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
81747+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
81748+ } else if (current->signal->curr_ip) {
81749+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
81750+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
81751+ } else if (gr_acl_is_enabled()) {
81752+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
81753+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
81754+ } else {
81755+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
81756+ strcpy(buf, fmt);
81757+ }
81758+
81759+ return NO_FLOODING;
81760+}
81761+
81762+static void gr_log_middle(int audit, const char *msg, va_list ap)
81763+ __attribute__ ((format (printf, 2, 0)));
81764+
81765+static void gr_log_middle(int audit, const char *msg, va_list ap)
81766+{
81767+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
81768+ unsigned int len = strlen(buf);
81769+
81770+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
81771+
81772+ return;
81773+}
81774+
81775+static void gr_log_middle_varargs(int audit, const char *msg, ...)
81776+ __attribute__ ((format (printf, 2, 3)));
81777+
81778+static void gr_log_middle_varargs(int audit, const char *msg, ...)
81779+{
81780+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
81781+ unsigned int len = strlen(buf);
81782+ va_list ap;
81783+
81784+ va_start(ap, msg);
81785+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
81786+ va_end(ap);
81787+
81788+ return;
81789+}
81790+
81791+static void gr_log_end(int audit, int append_default)
81792+{
81793+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
81794+
81795+ if (append_default) {
81796+ unsigned int len = strlen(buf);
81797+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
81798+ }
81799+
81800+ printk("%s\n", buf);
81801+
81802+ return;
81803+}
81804+
81805+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
81806+{
81807+ int logtype;
81808+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
81809+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
81810+ void *voidptr = NULL;
81811+ int num1 = 0, num2 = 0;
81812+ unsigned long ulong1 = 0, ulong2 = 0;
81813+ struct dentry *dentry = NULL;
81814+ struct vfsmount *mnt = NULL;
81815+ struct file *file = NULL;
81816+ struct task_struct *task = NULL;
81817+ const struct cred *cred, *pcred;
81818+ va_list ap;
81819+
81820+ BEGIN_LOCKS(audit);
81821+ logtype = gr_log_start(audit);
81822+ if (logtype == FLOODING) {
81823+ END_LOCKS(audit);
81824+ return;
81825+ }
81826+ va_start(ap, argtypes);
81827+ switch (argtypes) {
81828+ case GR_TTYSNIFF:
81829+ task = va_arg(ap, struct task_struct *);
81830+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
81831+ break;
81832+ case GR_SYSCTL_HIDDEN:
81833+ str1 = va_arg(ap, char *);
81834+ gr_log_middle_varargs(audit, msg, result, str1);
81835+ break;
81836+ case GR_RBAC:
81837+ dentry = va_arg(ap, struct dentry *);
81838+ mnt = va_arg(ap, struct vfsmount *);
81839+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
81840+ break;
81841+ case GR_RBAC_STR:
81842+ dentry = va_arg(ap, struct dentry *);
81843+ mnt = va_arg(ap, struct vfsmount *);
81844+ str1 = va_arg(ap, char *);
81845+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
81846+ break;
81847+ case GR_STR_RBAC:
81848+ str1 = va_arg(ap, char *);
81849+ dentry = va_arg(ap, struct dentry *);
81850+ mnt = va_arg(ap, struct vfsmount *);
81851+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
81852+ break;
81853+ case GR_RBAC_MODE2:
81854+ dentry = va_arg(ap, struct dentry *);
81855+ mnt = va_arg(ap, struct vfsmount *);
81856+ str1 = va_arg(ap, char *);
81857+ str2 = va_arg(ap, char *);
81858+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
81859+ break;
81860+ case GR_RBAC_MODE3:
81861+ dentry = va_arg(ap, struct dentry *);
81862+ mnt = va_arg(ap, struct vfsmount *);
81863+ str1 = va_arg(ap, char *);
81864+ str2 = va_arg(ap, char *);
81865+ str3 = va_arg(ap, char *);
81866+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
81867+ break;
81868+ case GR_FILENAME:
81869+ dentry = va_arg(ap, struct dentry *);
81870+ mnt = va_arg(ap, struct vfsmount *);
81871+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
81872+ break;
81873+ case GR_STR_FILENAME:
81874+ str1 = va_arg(ap, char *);
81875+ dentry = va_arg(ap, struct dentry *);
81876+ mnt = va_arg(ap, struct vfsmount *);
81877+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
81878+ break;
81879+ case GR_FILENAME_STR:
81880+ dentry = va_arg(ap, struct dentry *);
81881+ mnt = va_arg(ap, struct vfsmount *);
81882+ str1 = va_arg(ap, char *);
81883+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
81884+ break;
81885+ case GR_FILENAME_TWO_INT:
81886+ dentry = va_arg(ap, struct dentry *);
81887+ mnt = va_arg(ap, struct vfsmount *);
81888+ num1 = va_arg(ap, int);
81889+ num2 = va_arg(ap, int);
81890+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
81891+ break;
81892+ case GR_FILENAME_TWO_INT_STR:
81893+ dentry = va_arg(ap, struct dentry *);
81894+ mnt = va_arg(ap, struct vfsmount *);
81895+ num1 = va_arg(ap, int);
81896+ num2 = va_arg(ap, int);
81897+ str1 = va_arg(ap, char *);
81898+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
81899+ break;
81900+ case GR_TEXTREL:
81901+ file = va_arg(ap, struct file *);
81902+ ulong1 = va_arg(ap, unsigned long);
81903+ ulong2 = va_arg(ap, unsigned long);
81904+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
81905+ break;
81906+ case GR_PTRACE:
81907+ task = va_arg(ap, struct task_struct *);
81908+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
81909+ break;
81910+ case GR_RESOURCE:
81911+ task = va_arg(ap, struct task_struct *);
81912+ cred = __task_cred(task);
81913+ pcred = __task_cred(task->real_parent);
81914+ ulong1 = va_arg(ap, unsigned long);
81915+ str1 = va_arg(ap, char *);
81916+ ulong2 = va_arg(ap, unsigned long);
81917+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
81918+ break;
81919+ case GR_CAP:
81920+ task = va_arg(ap, struct task_struct *);
81921+ cred = __task_cred(task);
81922+ pcred = __task_cred(task->real_parent);
81923+ str1 = va_arg(ap, char *);
81924+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
81925+ break;
81926+ case GR_SIG:
81927+ str1 = va_arg(ap, char *);
81928+ voidptr = va_arg(ap, void *);
81929+ gr_log_middle_varargs(audit, msg, str1, voidptr);
81930+ break;
81931+ case GR_SIG2:
81932+ task = va_arg(ap, struct task_struct *);
81933+ cred = __task_cred(task);
81934+ pcred = __task_cred(task->real_parent);
81935+ num1 = va_arg(ap, int);
81936+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
81937+ break;
81938+ case GR_CRASH1:
81939+ task = va_arg(ap, struct task_struct *);
81940+ cred = __task_cred(task);
81941+ pcred = __task_cred(task->real_parent);
81942+ ulong1 = va_arg(ap, unsigned long);
81943+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
81944+ break;
81945+ case GR_CRASH2:
81946+ task = va_arg(ap, struct task_struct *);
81947+ cred = __task_cred(task);
81948+ pcred = __task_cred(task->real_parent);
81949+ ulong1 = va_arg(ap, unsigned long);
81950+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
81951+ break;
81952+ case GR_RWXMAP:
81953+ file = va_arg(ap, struct file *);
81954+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
81955+ break;
81956+ case GR_PSACCT:
81957+ {
81958+ unsigned int wday, cday;
81959+ __u8 whr, chr;
81960+ __u8 wmin, cmin;
81961+ __u8 wsec, csec;
81962+ char cur_tty[64] = { 0 };
81963+ char parent_tty[64] = { 0 };
81964+
81965+ task = va_arg(ap, struct task_struct *);
81966+ wday = va_arg(ap, unsigned int);
81967+ cday = va_arg(ap, unsigned int);
81968+ whr = va_arg(ap, int);
81969+ chr = va_arg(ap, int);
81970+ wmin = va_arg(ap, int);
81971+ cmin = va_arg(ap, int);
81972+ wsec = va_arg(ap, int);
81973+ csec = va_arg(ap, int);
81974+ ulong1 = va_arg(ap, unsigned long);
81975+ cred = __task_cred(task);
81976+ pcred = __task_cred(task->real_parent);
81977+
81978+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
81979+ }
81980+ break;
81981+ default:
81982+ gr_log_middle(audit, msg, ap);
81983+ }
81984+ va_end(ap);
81985+ // these don't need DEFAULTSECARGS printed on the end
81986+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
81987+ gr_log_end(audit, 0);
81988+ else
81989+ gr_log_end(audit, 1);
81990+ END_LOCKS(audit);
81991+}
81992diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
81993new file mode 100644
81994index 0000000..f536303
81995--- /dev/null
81996+++ b/grsecurity/grsec_mem.c
81997@@ -0,0 +1,40 @@
81998+#include <linux/kernel.h>
81999+#include <linux/sched.h>
82000+#include <linux/mm.h>
82001+#include <linux/mman.h>
82002+#include <linux/grinternal.h>
82003+
82004+void
82005+gr_handle_ioperm(void)
82006+{
82007+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
82008+ return;
82009+}
82010+
82011+void
82012+gr_handle_iopl(void)
82013+{
82014+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
82015+ return;
82016+}
82017+
82018+void
82019+gr_handle_mem_readwrite(u64 from, u64 to)
82020+{
82021+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
82022+ return;
82023+}
82024+
82025+void
82026+gr_handle_vm86(void)
82027+{
82028+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
82029+ return;
82030+}
82031+
82032+void
82033+gr_log_badprocpid(const char *entry)
82034+{
82035+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
82036+ return;
82037+}
82038diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
82039new file mode 100644
82040index 0000000..2131422
82041--- /dev/null
82042+++ b/grsecurity/grsec_mount.c
82043@@ -0,0 +1,62 @@
82044+#include <linux/kernel.h>
82045+#include <linux/sched.h>
82046+#include <linux/mount.h>
82047+#include <linux/grsecurity.h>
82048+#include <linux/grinternal.h>
82049+
82050+void
82051+gr_log_remount(const char *devname, const int retval)
82052+{
82053+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82054+ if (grsec_enable_mount && (retval >= 0))
82055+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
82056+#endif
82057+ return;
82058+}
82059+
82060+void
82061+gr_log_unmount(const char *devname, const int retval)
82062+{
82063+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82064+ if (grsec_enable_mount && (retval >= 0))
82065+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
82066+#endif
82067+ return;
82068+}
82069+
82070+void
82071+gr_log_mount(const char *from, const char *to, const int retval)
82072+{
82073+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
82074+ if (grsec_enable_mount && (retval >= 0))
82075+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
82076+#endif
82077+ return;
82078+}
82079+
82080+int
82081+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
82082+{
82083+#ifdef CONFIG_GRKERNSEC_ROFS
82084+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
82085+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
82086+ return -EPERM;
82087+ } else
82088+ return 0;
82089+#endif
82090+ return 0;
82091+}
82092+
82093+int
82094+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
82095+{
82096+#ifdef CONFIG_GRKERNSEC_ROFS
82097+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
82098+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
82099+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
82100+ return -EPERM;
82101+ } else
82102+ return 0;
82103+#endif
82104+ return 0;
82105+}
82106diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
82107new file mode 100644
82108index 0000000..a3b12a0
82109--- /dev/null
82110+++ b/grsecurity/grsec_pax.c
82111@@ -0,0 +1,36 @@
82112+#include <linux/kernel.h>
82113+#include <linux/sched.h>
82114+#include <linux/mm.h>
82115+#include <linux/file.h>
82116+#include <linux/grinternal.h>
82117+#include <linux/grsecurity.h>
82118+
82119+void
82120+gr_log_textrel(struct vm_area_struct * vma)
82121+{
82122+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
82123+ if (grsec_enable_audit_textrel)
82124+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
82125+#endif
82126+ return;
82127+}
82128+
82129+void
82130+gr_log_rwxmmap(struct file *file)
82131+{
82132+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82133+ if (grsec_enable_log_rwxmaps)
82134+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
82135+#endif
82136+ return;
82137+}
82138+
82139+void
82140+gr_log_rwxmprotect(struct file *file)
82141+{
82142+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82143+ if (grsec_enable_log_rwxmaps)
82144+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
82145+#endif
82146+ return;
82147+}
82148diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
82149new file mode 100644
82150index 0000000..78f8733
82151--- /dev/null
82152+++ b/grsecurity/grsec_ptrace.c
82153@@ -0,0 +1,30 @@
82154+#include <linux/kernel.h>
82155+#include <linux/sched.h>
82156+#include <linux/grinternal.h>
82157+#include <linux/security.h>
82158+
82159+void
82160+gr_audit_ptrace(struct task_struct *task)
82161+{
82162+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
82163+ if (grsec_enable_audit_ptrace)
82164+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
82165+#endif
82166+ return;
82167+}
82168+
82169+int
82170+gr_ptrace_readexec(struct file *file, int unsafe_flags)
82171+{
82172+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
82173+ const struct dentry *dentry = file->f_path.dentry;
82174+ const struct vfsmount *mnt = file->f_path.mnt;
82175+
82176+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
82177+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
82178+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
82179+ return -EACCES;
82180+ }
82181+#endif
82182+ return 0;
82183+}
82184diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
82185new file mode 100644
82186index 0000000..c648492
82187--- /dev/null
82188+++ b/grsecurity/grsec_sig.c
82189@@ -0,0 +1,206 @@
82190+#include <linux/kernel.h>
82191+#include <linux/sched.h>
82192+#include <linux/delay.h>
82193+#include <linux/grsecurity.h>
82194+#include <linux/grinternal.h>
82195+#include <linux/hardirq.h>
82196+
82197+char *signames[] = {
82198+ [SIGSEGV] = "Segmentation fault",
82199+ [SIGILL] = "Illegal instruction",
82200+ [SIGABRT] = "Abort",
82201+ [SIGBUS] = "Invalid alignment/Bus error"
82202+};
82203+
82204+void
82205+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
82206+{
82207+#ifdef CONFIG_GRKERNSEC_SIGNAL
82208+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
82209+ (sig == SIGABRT) || (sig == SIGBUS))) {
82210+ if (t->pid == current->pid) {
82211+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
82212+ } else {
82213+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
82214+ }
82215+ }
82216+#endif
82217+ return;
82218+}
82219+
82220+int
82221+gr_handle_signal(const struct task_struct *p, const int sig)
82222+{
82223+#ifdef CONFIG_GRKERNSEC
82224+ /* ignore the 0 signal for protected task checks */
82225+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
82226+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
82227+ return -EPERM;
82228+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
82229+ return -EPERM;
82230+ }
82231+#endif
82232+ return 0;
82233+}
82234+
82235+#ifdef CONFIG_GRKERNSEC
82236+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
82237+
82238+int gr_fake_force_sig(int sig, struct task_struct *t)
82239+{
82240+ unsigned long int flags;
82241+ int ret, blocked, ignored;
82242+ struct k_sigaction *action;
82243+
82244+ spin_lock_irqsave(&t->sighand->siglock, flags);
82245+ action = &t->sighand->action[sig-1];
82246+ ignored = action->sa.sa_handler == SIG_IGN;
82247+ blocked = sigismember(&t->blocked, sig);
82248+ if (blocked || ignored) {
82249+ action->sa.sa_handler = SIG_DFL;
82250+ if (blocked) {
82251+ sigdelset(&t->blocked, sig);
82252+ recalc_sigpending_and_wake(t);
82253+ }
82254+ }
82255+ if (action->sa.sa_handler == SIG_DFL)
82256+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
82257+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
82258+
82259+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
82260+
82261+ return ret;
82262+}
82263+#endif
82264+
82265+#ifdef CONFIG_GRKERNSEC_BRUTE
82266+#define GR_USER_BAN_TIME (15 * 60)
82267+
82268+static int __get_dumpable(unsigned long mm_flags)
82269+{
82270+ int ret;
82271+
82272+ ret = mm_flags & MMF_DUMPABLE_MASK;
82273+ return (ret >= 2) ? 2 : ret;
82274+}
82275+#endif
82276+
82277+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
82278+{
82279+#ifdef CONFIG_GRKERNSEC_BRUTE
82280+ uid_t uid = 0;
82281+
82282+ if (!grsec_enable_brute)
82283+ return;
82284+
82285+ rcu_read_lock();
82286+ read_lock(&tasklist_lock);
82287+ read_lock(&grsec_exec_file_lock);
82288+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
82289+ p->real_parent->brute = 1;
82290+ else {
82291+ const struct cred *cred = __task_cred(p), *cred2;
82292+ struct task_struct *tsk, *tsk2;
82293+
82294+ if (!__get_dumpable(mm_flags) && cred->uid) {
82295+ struct user_struct *user;
82296+
82297+ uid = cred->uid;
82298+
82299+ /* this is put upon execution past expiration */
82300+ user = find_user(uid);
82301+ if (user == NULL)
82302+ goto unlock;
82303+ user->banned = 1;
82304+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
82305+ if (user->ban_expires == ~0UL)
82306+ user->ban_expires--;
82307+
82308+ do_each_thread(tsk2, tsk) {
82309+ cred2 = __task_cred(tsk);
82310+ if (tsk != p && cred2->uid == uid)
82311+ gr_fake_force_sig(SIGKILL, tsk);
82312+ } while_each_thread(tsk2, tsk);
82313+ }
82314+ }
82315+unlock:
82316+ read_unlock(&grsec_exec_file_lock);
82317+ read_unlock(&tasklist_lock);
82318+ rcu_read_unlock();
82319+
82320+ if (uid)
82321+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
82322+#endif
82323+ return;
82324+}
82325+
82326+void gr_handle_brute_check(void)
82327+{
82328+#ifdef CONFIG_GRKERNSEC_BRUTE
82329+ if (current->brute)
82330+ msleep(30 * 1000);
82331+#endif
82332+ return;
82333+}
82334+
82335+void gr_handle_kernel_exploit(void)
82336+{
82337+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
82338+ const struct cred *cred;
82339+ struct task_struct *tsk, *tsk2;
82340+ struct user_struct *user;
82341+ uid_t uid;
82342+
82343+ if (in_irq() || in_serving_softirq() || in_nmi())
82344+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
82345+
82346+ uid = current_uid();
82347+
82348+ if (uid == 0)
82349+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
82350+ else {
82351+ /* kill all the processes of this user, hold a reference
82352+ to their creds struct, and prevent them from creating
82353+ another process until system reset
82354+ */
82355+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
82356+ /* we intentionally leak this ref */
82357+ user = get_uid(current->cred->user);
82358+ if (user) {
82359+ user->banned = 1;
82360+ user->ban_expires = ~0UL;
82361+ }
82362+
82363+ read_lock(&tasklist_lock);
82364+ do_each_thread(tsk2, tsk) {
82365+ cred = __task_cred(tsk);
82366+ if (cred->uid == uid)
82367+ gr_fake_force_sig(SIGKILL, tsk);
82368+ } while_each_thread(tsk2, tsk);
82369+ read_unlock(&tasklist_lock);
82370+ }
82371+#endif
82372+}
82373+
82374+int __gr_process_user_ban(struct user_struct *user)
82375+{
82376+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
82377+ if (unlikely(user->banned)) {
82378+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
82379+ user->banned = 0;
82380+ user->ban_expires = 0;
82381+ free_uid(user);
82382+ } else
82383+ return -EPERM;
82384+ }
82385+#endif
82386+ return 0;
82387+}
82388+
82389+int gr_process_user_ban(void)
82390+{
82391+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
82392+ return __gr_process_user_ban(current->cred->user);
82393+#endif
82394+ return 0;
82395+}
82396diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
82397new file mode 100644
82398index 0000000..7512ea9
82399--- /dev/null
82400+++ b/grsecurity/grsec_sock.c
82401@@ -0,0 +1,275 @@
82402+#include <linux/kernel.h>
82403+#include <linux/module.h>
82404+#include <linux/sched.h>
82405+#include <linux/file.h>
82406+#include <linux/net.h>
82407+#include <linux/in.h>
82408+#include <linux/ip.h>
82409+#include <net/sock.h>
82410+#include <net/inet_sock.h>
82411+#include <linux/grsecurity.h>
82412+#include <linux/grinternal.h>
82413+#include <linux/gracl.h>
82414+
82415+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
82416+EXPORT_SYMBOL(gr_cap_rtnetlink);
82417+
82418+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
82419+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
82420+
82421+EXPORT_SYMBOL(gr_search_udp_recvmsg);
82422+EXPORT_SYMBOL(gr_search_udp_sendmsg);
82423+
82424+#ifdef CONFIG_UNIX_MODULE
82425+EXPORT_SYMBOL(gr_acl_handle_unix);
82426+EXPORT_SYMBOL(gr_acl_handle_mknod);
82427+EXPORT_SYMBOL(gr_handle_chroot_unix);
82428+EXPORT_SYMBOL(gr_handle_create);
82429+#endif
82430+
82431+#ifdef CONFIG_GRKERNSEC
82432+#define gr_conn_table_size 32749
82433+struct conn_table_entry {
82434+ struct conn_table_entry *next;
82435+ struct signal_struct *sig;
82436+};
82437+
82438+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
82439+DEFINE_SPINLOCK(gr_conn_table_lock);
82440+
82441+extern const char * gr_socktype_to_name(unsigned char type);
82442+extern const char * gr_proto_to_name(unsigned char proto);
82443+extern const char * gr_sockfamily_to_name(unsigned char family);
82444+
82445+static __inline__ int
82446+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
82447+{
82448+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
82449+}
82450+
82451+static __inline__ int
82452+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
82453+ __u16 sport, __u16 dport)
82454+{
82455+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
82456+ sig->gr_sport == sport && sig->gr_dport == dport))
82457+ return 1;
82458+ else
82459+ return 0;
82460+}
82461+
82462+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
82463+{
82464+ struct conn_table_entry **match;
82465+ unsigned int index;
82466+
82467+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
82468+ sig->gr_sport, sig->gr_dport,
82469+ gr_conn_table_size);
82470+
82471+ newent->sig = sig;
82472+
82473+ match = &gr_conn_table[index];
82474+ newent->next = *match;
82475+ *match = newent;
82476+
82477+ return;
82478+}
82479+
82480+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
82481+{
82482+ struct conn_table_entry *match, *last = NULL;
82483+ unsigned int index;
82484+
82485+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
82486+ sig->gr_sport, sig->gr_dport,
82487+ gr_conn_table_size);
82488+
82489+ match = gr_conn_table[index];
82490+ while (match && !conn_match(match->sig,
82491+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
82492+ sig->gr_dport)) {
82493+ last = match;
82494+ match = match->next;
82495+ }
82496+
82497+ if (match) {
82498+ if (last)
82499+ last->next = match->next;
82500+ else
82501+ gr_conn_table[index] = NULL;
82502+ kfree(match);
82503+ }
82504+
82505+ return;
82506+}
82507+
82508+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
82509+ __u16 sport, __u16 dport)
82510+{
82511+ struct conn_table_entry *match;
82512+ unsigned int index;
82513+
82514+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
82515+
82516+ match = gr_conn_table[index];
82517+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
82518+ match = match->next;
82519+
82520+ if (match)
82521+ return match->sig;
82522+ else
82523+ return NULL;
82524+}
82525+
82526+#endif
82527+
82528+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
82529+{
82530+#ifdef CONFIG_GRKERNSEC
82531+ struct signal_struct *sig = task->signal;
82532+ struct conn_table_entry *newent;
82533+
82534+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
82535+ if (newent == NULL)
82536+ return;
82537+ /* no bh lock needed since we are called with bh disabled */
82538+ spin_lock(&gr_conn_table_lock);
82539+ gr_del_task_from_ip_table_nolock(sig);
82540+ sig->gr_saddr = inet->rcv_saddr;
82541+ sig->gr_daddr = inet->daddr;
82542+ sig->gr_sport = inet->sport;
82543+ sig->gr_dport = inet->dport;
82544+ gr_add_to_task_ip_table_nolock(sig, newent);
82545+ spin_unlock(&gr_conn_table_lock);
82546+#endif
82547+ return;
82548+}
82549+
82550+void gr_del_task_from_ip_table(struct task_struct *task)
82551+{
82552+#ifdef CONFIG_GRKERNSEC
82553+ spin_lock_bh(&gr_conn_table_lock);
82554+ gr_del_task_from_ip_table_nolock(task->signal);
82555+ spin_unlock_bh(&gr_conn_table_lock);
82556+#endif
82557+ return;
82558+}
82559+
82560+void
82561+gr_attach_curr_ip(const struct sock *sk)
82562+{
82563+#ifdef CONFIG_GRKERNSEC
82564+ struct signal_struct *p, *set;
82565+ const struct inet_sock *inet = inet_sk(sk);
82566+
82567+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
82568+ return;
82569+
82570+ set = current->signal;
82571+
82572+ spin_lock_bh(&gr_conn_table_lock);
82573+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
82574+ inet->dport, inet->sport);
82575+ if (unlikely(p != NULL)) {
82576+ set->curr_ip = p->curr_ip;
82577+ set->used_accept = 1;
82578+ gr_del_task_from_ip_table_nolock(p);
82579+ spin_unlock_bh(&gr_conn_table_lock);
82580+ return;
82581+ }
82582+ spin_unlock_bh(&gr_conn_table_lock);
82583+
82584+ set->curr_ip = inet->daddr;
82585+ set->used_accept = 1;
82586+#endif
82587+ return;
82588+}
82589+
82590+int
82591+gr_handle_sock_all(const int family, const int type, const int protocol)
82592+{
82593+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
82594+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
82595+ (family != AF_UNIX)) {
82596+ if (family == AF_INET)
82597+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
82598+ else
82599+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
82600+ return -EACCES;
82601+ }
82602+#endif
82603+ return 0;
82604+}
82605+
82606+int
82607+gr_handle_sock_server(const struct sockaddr *sck)
82608+{
82609+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
82610+ if (grsec_enable_socket_server &&
82611+ in_group_p(grsec_socket_server_gid) &&
82612+ sck && (sck->sa_family != AF_UNIX) &&
82613+ (sck->sa_family != AF_LOCAL)) {
82614+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
82615+ return -EACCES;
82616+ }
82617+#endif
82618+ return 0;
82619+}
82620+
82621+int
82622+gr_handle_sock_server_other(const struct sock *sck)
82623+{
82624+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
82625+ if (grsec_enable_socket_server &&
82626+ in_group_p(grsec_socket_server_gid) &&
82627+ sck && (sck->sk_family != AF_UNIX) &&
82628+ (sck->sk_family != AF_LOCAL)) {
82629+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
82630+ return -EACCES;
82631+ }
82632+#endif
82633+ return 0;
82634+}
82635+
82636+int
82637+gr_handle_sock_client(const struct sockaddr *sck)
82638+{
82639+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
82640+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
82641+ sck && (sck->sa_family != AF_UNIX) &&
82642+ (sck->sa_family != AF_LOCAL)) {
82643+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
82644+ return -EACCES;
82645+ }
82646+#endif
82647+ return 0;
82648+}
82649+
82650+kernel_cap_t
82651+gr_cap_rtnetlink(struct sock *sock)
82652+{
82653+#ifdef CONFIG_GRKERNSEC
82654+ if (!gr_acl_is_enabled())
82655+ return current_cap();
82656+ else if (sock->sk_protocol == NETLINK_ISCSI &&
82657+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
82658+ gr_is_capable(CAP_SYS_ADMIN))
82659+ return current_cap();
82660+ else if (sock->sk_protocol == NETLINK_AUDIT &&
82661+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
82662+ gr_is_capable(CAP_AUDIT_WRITE) &&
82663+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
82664+ gr_is_capable(CAP_AUDIT_CONTROL))
82665+ return current_cap();
82666+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
82667+ ((sock->sk_protocol == NETLINK_ROUTE) ?
82668+ gr_is_capable_nolog(CAP_NET_ADMIN) :
82669+ gr_is_capable(CAP_NET_ADMIN)))
82670+ return current_cap();
82671+ else
82672+ return __cap_empty_set;
82673+#else
82674+ return current_cap();
82675+#endif
82676+}
82677diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
82678new file mode 100644
82679index 0000000..31f3258
82680--- /dev/null
82681+++ b/grsecurity/grsec_sysctl.c
82682@@ -0,0 +1,499 @@
82683+#include <linux/kernel.h>
82684+#include <linux/sched.h>
82685+#include <linux/sysctl.h>
82686+#include <linux/grsecurity.h>
82687+#include <linux/grinternal.h>
82688+
82689+int
82690+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
82691+{
82692+#ifdef CONFIG_GRKERNSEC_SYSCTL
82693+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
82694+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
82695+ return -EACCES;
82696+ }
82697+#endif
82698+ return 0;
82699+}
82700+
82701+#ifdef CONFIG_GRKERNSEC_ROFS
82702+static int __maybe_unused one = 1;
82703+#endif
82704+
82705+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
82706+ctl_table grsecurity_table[] = {
82707+#ifdef CONFIG_GRKERNSEC_SYSCTL
82708+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
82709+#ifdef CONFIG_GRKERNSEC_IO
82710+ {
82711+ .ctl_name = CTL_UNNUMBERED,
82712+ .procname = "disable_priv_io",
82713+ .data = &grsec_disable_privio,
82714+ .maxlen = sizeof(int),
82715+ .mode = 0600,
82716+ .proc_handler = &proc_dointvec,
82717+ },
82718+#endif
82719+#endif
82720+#ifdef CONFIG_GRKERNSEC_LINK
82721+ {
82722+ .ctl_name = CTL_UNNUMBERED,
82723+ .procname = "linking_restrictions",
82724+ .data = &grsec_enable_link,
82725+ .maxlen = sizeof(int),
82726+ .mode = 0600,
82727+ .proc_handler = &proc_dointvec,
82728+ },
82729+#endif
82730+#ifdef CONFIG_GRKERNSEC_BRUTE
82731+ {
82732+ .ctl_name = CTL_UNNUMBERED,
82733+ .procname = "deter_bruteforce",
82734+ .data = &grsec_enable_brute,
82735+ .maxlen = sizeof(int),
82736+ .mode = 0600,
82737+ .proc_handler = &proc_dointvec,
82738+ },
82739+#endif
82740+#ifdef CONFIG_GRKERNSEC_FIFO
82741+ {
82742+ .ctl_name = CTL_UNNUMBERED,
82743+ .procname = "fifo_restrictions",
82744+ .data = &grsec_enable_fifo,
82745+ .maxlen = sizeof(int),
82746+ .mode = 0600,
82747+ .proc_handler = &proc_dointvec,
82748+ },
82749+#endif
82750+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
82751+ {
82752+ .ctl_name = CTL_UNNUMBERED,
82753+ .procname = "ptrace_readexec",
82754+ .data = &grsec_enable_ptrace_readexec,
82755+ .maxlen = sizeof(int),
82756+ .mode = 0600,
82757+ .proc_handler = &proc_dointvec,
82758+ },
82759+#endif
82760+#ifdef CONFIG_GRKERNSEC_SETXID
82761+ {
82762+ .ctl_name = CTL_UNNUMBERED,
82763+ .procname = "consistent_setxid",
82764+ .data = &grsec_enable_setxid,
82765+ .maxlen = sizeof(int),
82766+ .mode = 0600,
82767+ .proc_handler = &proc_dointvec,
82768+ },
82769+#endif
82770+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82771+ {
82772+ .ctl_name = CTL_UNNUMBERED,
82773+ .procname = "ip_blackhole",
82774+ .data = &grsec_enable_blackhole,
82775+ .maxlen = sizeof(int),
82776+ .mode = 0600,
82777+ .proc_handler = &proc_dointvec,
82778+ },
82779+ {
82780+ .ctl_name = CTL_UNNUMBERED,
82781+ .procname = "lastack_retries",
82782+ .data = &grsec_lastack_retries,
82783+ .maxlen = sizeof(int),
82784+ .mode = 0600,
82785+ .proc_handler = &proc_dointvec,
82786+ },
82787+#endif
82788+#ifdef CONFIG_GRKERNSEC_EXECLOG
82789+ {
82790+ .ctl_name = CTL_UNNUMBERED,
82791+ .procname = "exec_logging",
82792+ .data = &grsec_enable_execlog,
82793+ .maxlen = sizeof(int),
82794+ .mode = 0600,
82795+ .proc_handler = &proc_dointvec,
82796+ },
82797+#endif
82798+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
82799+ {
82800+ .ctl_name = CTL_UNNUMBERED,
82801+ .procname = "rwxmap_logging",
82802+ .data = &grsec_enable_log_rwxmaps,
82803+ .maxlen = sizeof(int),
82804+ .mode = 0600,
82805+ .proc_handler = &proc_dointvec,
82806+ },
82807+#endif
82808+#ifdef CONFIG_GRKERNSEC_SIGNAL
82809+ {
82810+ .ctl_name = CTL_UNNUMBERED,
82811+ .procname = "signal_logging",
82812+ .data = &grsec_enable_signal,
82813+ .maxlen = sizeof(int),
82814+ .mode = 0600,
82815+ .proc_handler = &proc_dointvec,
82816+ },
82817+#endif
82818+#ifdef CONFIG_GRKERNSEC_FORKFAIL
82819+ {
82820+ .ctl_name = CTL_UNNUMBERED,
82821+ .procname = "forkfail_logging",
82822+ .data = &grsec_enable_forkfail,
82823+ .maxlen = sizeof(int),
82824+ .mode = 0600,
82825+ .proc_handler = &proc_dointvec,
82826+ },
82827+#endif
82828+#ifdef CONFIG_GRKERNSEC_TIME
82829+ {
82830+ .ctl_name = CTL_UNNUMBERED,
82831+ .procname = "timechange_logging",
82832+ .data = &grsec_enable_time,
82833+ .maxlen = sizeof(int),
82834+ .mode = 0600,
82835+ .proc_handler = &proc_dointvec,
82836+ },
82837+#endif
82838+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
82839+ {
82840+ .ctl_name = CTL_UNNUMBERED,
82841+ .procname = "chroot_deny_shmat",
82842+ .data = &grsec_enable_chroot_shmat,
82843+ .maxlen = sizeof(int),
82844+ .mode = 0600,
82845+ .proc_handler = &proc_dointvec,
82846+ },
82847+#endif
82848+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
82849+ {
82850+ .ctl_name = CTL_UNNUMBERED,
82851+ .procname = "chroot_deny_unix",
82852+ .data = &grsec_enable_chroot_unix,
82853+ .maxlen = sizeof(int),
82854+ .mode = 0600,
82855+ .proc_handler = &proc_dointvec,
82856+ },
82857+#endif
82858+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
82859+ {
82860+ .ctl_name = CTL_UNNUMBERED,
82861+ .procname = "chroot_deny_mount",
82862+ .data = &grsec_enable_chroot_mount,
82863+ .maxlen = sizeof(int),
82864+ .mode = 0600,
82865+ .proc_handler = &proc_dointvec,
82866+ },
82867+#endif
82868+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
82869+ {
82870+ .ctl_name = CTL_UNNUMBERED,
82871+ .procname = "chroot_deny_fchdir",
82872+ .data = &grsec_enable_chroot_fchdir,
82873+ .maxlen = sizeof(int),
82874+ .mode = 0600,
82875+ .proc_handler = &proc_dointvec,
82876+ },
82877+#endif
82878+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
82879+ {
82880+ .ctl_name = CTL_UNNUMBERED,
82881+ .procname = "chroot_deny_chroot",
82882+ .data = &grsec_enable_chroot_double,
82883+ .maxlen = sizeof(int),
82884+ .mode = 0600,
82885+ .proc_handler = &proc_dointvec,
82886+ },
82887+#endif
82888+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
82889+ {
82890+ .ctl_name = CTL_UNNUMBERED,
82891+ .procname = "chroot_deny_pivot",
82892+ .data = &grsec_enable_chroot_pivot,
82893+ .maxlen = sizeof(int),
82894+ .mode = 0600,
82895+ .proc_handler = &proc_dointvec,
82896+ },
82897+#endif
82898+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
82899+ {
82900+ .ctl_name = CTL_UNNUMBERED,
82901+ .procname = "chroot_enforce_chdir",
82902+ .data = &grsec_enable_chroot_chdir,
82903+ .maxlen = sizeof(int),
82904+ .mode = 0600,
82905+ .proc_handler = &proc_dointvec,
82906+ },
82907+#endif
82908+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
82909+ {
82910+ .ctl_name = CTL_UNNUMBERED,
82911+ .procname = "chroot_deny_chmod",
82912+ .data = &grsec_enable_chroot_chmod,
82913+ .maxlen = sizeof(int),
82914+ .mode = 0600,
82915+ .proc_handler = &proc_dointvec,
82916+ },
82917+#endif
82918+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
82919+ {
82920+ .ctl_name = CTL_UNNUMBERED,
82921+ .procname = "chroot_deny_mknod",
82922+ .data = &grsec_enable_chroot_mknod,
82923+ .maxlen = sizeof(int),
82924+ .mode = 0600,
82925+ .proc_handler = &proc_dointvec,
82926+ },
82927+#endif
82928+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
82929+ {
82930+ .ctl_name = CTL_UNNUMBERED,
82931+ .procname = "chroot_restrict_nice",
82932+ .data = &grsec_enable_chroot_nice,
82933+ .maxlen = sizeof(int),
82934+ .mode = 0600,
82935+ .proc_handler = &proc_dointvec,
82936+ },
82937+#endif
82938+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
82939+ {
82940+ .ctl_name = CTL_UNNUMBERED,
82941+ .procname = "chroot_execlog",
82942+ .data = &grsec_enable_chroot_execlog,
82943+ .maxlen = sizeof(int),
82944+ .mode = 0600,
82945+ .proc_handler = &proc_dointvec,
82946+ },
82947+#endif
82948+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
82949+ {
82950+ .ctl_name = CTL_UNNUMBERED,
82951+ .procname = "chroot_caps",
82952+ .data = &grsec_enable_chroot_caps,
82953+ .maxlen = sizeof(int),
82954+ .mode = 0600,
82955+ .proc_handler = &proc_dointvec,
82956+ },
82957+#endif
82958+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
82959+ {
82960+ .ctl_name = CTL_UNNUMBERED,
82961+ .procname = "chroot_deny_sysctl",
82962+ .data = &grsec_enable_chroot_sysctl,
82963+ .maxlen = sizeof(int),
82964+ .mode = 0600,
82965+ .proc_handler = &proc_dointvec,
82966+ },
82967+#endif
82968+#ifdef CONFIG_GRKERNSEC_TPE
82969+ {
82970+ .ctl_name = CTL_UNNUMBERED,
82971+ .procname = "tpe",
82972+ .data = &grsec_enable_tpe,
82973+ .maxlen = sizeof(int),
82974+ .mode = 0600,
82975+ .proc_handler = &proc_dointvec,
82976+ },
82977+ {
82978+ .ctl_name = CTL_UNNUMBERED,
82979+ .procname = "tpe_gid",
82980+ .data = &grsec_tpe_gid,
82981+ .maxlen = sizeof(int),
82982+ .mode = 0600,
82983+ .proc_handler = &proc_dointvec,
82984+ },
82985+#endif
82986+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
82987+ {
82988+ .ctl_name = CTL_UNNUMBERED,
82989+ .procname = "tpe_invert",
82990+ .data = &grsec_enable_tpe_invert,
82991+ .maxlen = sizeof(int),
82992+ .mode = 0600,
82993+ .proc_handler = &proc_dointvec,
82994+ },
82995+#endif
82996+#ifdef CONFIG_GRKERNSEC_TPE_ALL
82997+ {
82998+ .ctl_name = CTL_UNNUMBERED,
82999+ .procname = "tpe_restrict_all",
83000+ .data = &grsec_enable_tpe_all,
83001+ .maxlen = sizeof(int),
83002+ .mode = 0600,
83003+ .proc_handler = &proc_dointvec,
83004+ },
83005+#endif
83006+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
83007+ {
83008+ .ctl_name = CTL_UNNUMBERED,
83009+ .procname = "socket_all",
83010+ .data = &grsec_enable_socket_all,
83011+ .maxlen = sizeof(int),
83012+ .mode = 0600,
83013+ .proc_handler = &proc_dointvec,
83014+ },
83015+ {
83016+ .ctl_name = CTL_UNNUMBERED,
83017+ .procname = "socket_all_gid",
83018+ .data = &grsec_socket_all_gid,
83019+ .maxlen = sizeof(int),
83020+ .mode = 0600,
83021+ .proc_handler = &proc_dointvec,
83022+ },
83023+#endif
83024+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
83025+ {
83026+ .ctl_name = CTL_UNNUMBERED,
83027+ .procname = "socket_client",
83028+ .data = &grsec_enable_socket_client,
83029+ .maxlen = sizeof(int),
83030+ .mode = 0600,
83031+ .proc_handler = &proc_dointvec,
83032+ },
83033+ {
83034+ .ctl_name = CTL_UNNUMBERED,
83035+ .procname = "socket_client_gid",
83036+ .data = &grsec_socket_client_gid,
83037+ .maxlen = sizeof(int),
83038+ .mode = 0600,
83039+ .proc_handler = &proc_dointvec,
83040+ },
83041+#endif
83042+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
83043+ {
83044+ .ctl_name = CTL_UNNUMBERED,
83045+ .procname = "socket_server",
83046+ .data = &grsec_enable_socket_server,
83047+ .maxlen = sizeof(int),
83048+ .mode = 0600,
83049+ .proc_handler = &proc_dointvec,
83050+ },
83051+ {
83052+ .ctl_name = CTL_UNNUMBERED,
83053+ .procname = "socket_server_gid",
83054+ .data = &grsec_socket_server_gid,
83055+ .maxlen = sizeof(int),
83056+ .mode = 0600,
83057+ .proc_handler = &proc_dointvec,
83058+ },
83059+#endif
83060+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
83061+ {
83062+ .ctl_name = CTL_UNNUMBERED,
83063+ .procname = "audit_group",
83064+ .data = &grsec_enable_group,
83065+ .maxlen = sizeof(int),
83066+ .mode = 0600,
83067+ .proc_handler = &proc_dointvec,
83068+ },
83069+ {
83070+ .ctl_name = CTL_UNNUMBERED,
83071+ .procname = "audit_gid",
83072+ .data = &grsec_audit_gid,
83073+ .maxlen = sizeof(int),
83074+ .mode = 0600,
83075+ .proc_handler = &proc_dointvec,
83076+ },
83077+#endif
83078+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
83079+ {
83080+ .ctl_name = CTL_UNNUMBERED,
83081+ .procname = "audit_chdir",
83082+ .data = &grsec_enable_chdir,
83083+ .maxlen = sizeof(int),
83084+ .mode = 0600,
83085+ .proc_handler = &proc_dointvec,
83086+ },
83087+#endif
83088+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
83089+ {
83090+ .ctl_name = CTL_UNNUMBERED,
83091+ .procname = "audit_mount",
83092+ .data = &grsec_enable_mount,
83093+ .maxlen = sizeof(int),
83094+ .mode = 0600,
83095+ .proc_handler = &proc_dointvec,
83096+ },
83097+#endif
83098+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
83099+ {
83100+ .ctl_name = CTL_UNNUMBERED,
83101+ .procname = "audit_textrel",
83102+ .data = &grsec_enable_audit_textrel,
83103+ .maxlen = sizeof(int),
83104+ .mode = 0600,
83105+ .proc_handler = &proc_dointvec,
83106+ },
83107+#endif
83108+#ifdef CONFIG_GRKERNSEC_DMESG
83109+ {
83110+ .ctl_name = CTL_UNNUMBERED,
83111+ .procname = "dmesg",
83112+ .data = &grsec_enable_dmesg,
83113+ .maxlen = sizeof(int),
83114+ .mode = 0600,
83115+ .proc_handler = &proc_dointvec,
83116+ },
83117+#endif
83118+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
83119+ {
83120+ .ctl_name = CTL_UNNUMBERED,
83121+ .procname = "chroot_findtask",
83122+ .data = &grsec_enable_chroot_findtask,
83123+ .maxlen = sizeof(int),
83124+ .mode = 0600,
83125+ .proc_handler = &proc_dointvec,
83126+ },
83127+#endif
83128+#ifdef CONFIG_GRKERNSEC_RESLOG
83129+ {
83130+ .ctl_name = CTL_UNNUMBERED,
83131+ .procname = "resource_logging",
83132+ .data = &grsec_resource_logging,
83133+ .maxlen = sizeof(int),
83134+ .mode = 0600,
83135+ .proc_handler = &proc_dointvec,
83136+ },
83137+#endif
83138+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
83139+ {
83140+ .ctl_name = CTL_UNNUMBERED,
83141+ .procname = "audit_ptrace",
83142+ .data = &grsec_enable_audit_ptrace,
83143+ .maxlen = sizeof(int),
83144+ .mode = 0600,
83145+ .proc_handler = &proc_dointvec,
83146+ },
83147+#endif
83148+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
83149+ {
83150+ .ctl_name = CTL_UNNUMBERED,
83151+ .procname = "harden_ptrace",
83152+ .data = &grsec_enable_harden_ptrace,
83153+ .maxlen = sizeof(int),
83154+ .mode = 0600,
83155+ .proc_handler = &proc_dointvec,
83156+ },
83157+#endif
83158+ {
83159+ .ctl_name = CTL_UNNUMBERED,
83160+ .procname = "grsec_lock",
83161+ .data = &grsec_lock,
83162+ .maxlen = sizeof(int),
83163+ .mode = 0600,
83164+ .proc_handler = &proc_dointvec,
83165+ },
83166+#endif
83167+#ifdef CONFIG_GRKERNSEC_ROFS
83168+ {
83169+ .ctl_name = CTL_UNNUMBERED,
83170+ .procname = "romount_protect",
83171+ .data = &grsec_enable_rofs,
83172+ .maxlen = sizeof(int),
83173+ .mode = 0600,
83174+ .proc_handler = &proc_dointvec_minmax,
83175+ .extra1 = &one,
83176+ .extra2 = &one,
83177+ },
83178+#endif
83179+ { .ctl_name = 0 }
83180+};
83181+#endif
83182diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
83183new file mode 100644
83184index 0000000..0dc13c3
83185--- /dev/null
83186+++ b/grsecurity/grsec_time.c
83187@@ -0,0 +1,16 @@
83188+#include <linux/kernel.h>
83189+#include <linux/sched.h>
83190+#include <linux/grinternal.h>
83191+#include <linux/module.h>
83192+
83193+void
83194+gr_log_timechange(void)
83195+{
83196+#ifdef CONFIG_GRKERNSEC_TIME
83197+ if (grsec_enable_time)
83198+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
83199+#endif
83200+ return;
83201+}
83202+
83203+EXPORT_SYMBOL(gr_log_timechange);
83204diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
83205new file mode 100644
83206index 0000000..07e0dc0
83207--- /dev/null
83208+++ b/grsecurity/grsec_tpe.c
83209@@ -0,0 +1,73 @@
83210+#include <linux/kernel.h>
83211+#include <linux/sched.h>
83212+#include <linux/file.h>
83213+#include <linux/fs.h>
83214+#include <linux/grinternal.h>
83215+
83216+extern int gr_acl_tpe_check(void);
83217+
83218+int
83219+gr_tpe_allow(const struct file *file)
83220+{
83221+#ifdef CONFIG_GRKERNSEC
83222+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
83223+ const struct cred *cred = current_cred();
83224+ char *msg = NULL;
83225+ char *msg2 = NULL;
83226+
83227+ // never restrict root
83228+ if (!cred->uid)
83229+ return 1;
83230+
83231+ if (grsec_enable_tpe) {
83232+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
83233+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
83234+ msg = "not being in trusted group";
83235+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
83236+ msg = "being in untrusted group";
83237+#else
83238+ if (in_group_p(grsec_tpe_gid))
83239+ msg = "being in untrusted group";
83240+#endif
83241+ }
83242+ if (!msg && gr_acl_tpe_check())
83243+ msg = "being in untrusted role";
83244+
83245+ // not in any affected group/role
83246+ if (!msg)
83247+ goto next_check;
83248+
83249+ if (inode->i_uid)
83250+ msg2 = "file in non-root-owned directory";
83251+ else if (inode->i_mode & S_IWOTH)
83252+ msg2 = "file in world-writable directory";
83253+ else if (inode->i_mode & S_IWGRP)
83254+ msg2 = "file in group-writable directory";
83255+
83256+ if (msg && msg2) {
83257+ char fullmsg[70] = {0};
83258+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
83259+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
83260+ return 0;
83261+ }
83262+ msg = NULL;
83263+next_check:
83264+#ifdef CONFIG_GRKERNSEC_TPE_ALL
83265+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
83266+ return 1;
83267+
83268+ if (inode->i_uid && (inode->i_uid != cred->uid))
83269+ msg = "directory not owned by user";
83270+ else if (inode->i_mode & S_IWOTH)
83271+ msg = "file in world-writable directory";
83272+ else if (inode->i_mode & S_IWGRP)
83273+ msg = "file in group-writable directory";
83274+
83275+ if (msg) {
83276+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
83277+ return 0;
83278+ }
83279+#endif
83280+#endif
83281+ return 1;
83282+}
83283diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
83284new file mode 100644
83285index 0000000..9f7b1ac
83286--- /dev/null
83287+++ b/grsecurity/grsum.c
83288@@ -0,0 +1,61 @@
83289+#include <linux/err.h>
83290+#include <linux/kernel.h>
83291+#include <linux/sched.h>
83292+#include <linux/mm.h>
83293+#include <linux/scatterlist.h>
83294+#include <linux/crypto.h>
83295+#include <linux/gracl.h>
83296+
83297+
83298+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
83299+#error "crypto and sha256 must be built into the kernel"
83300+#endif
83301+
83302+int
83303+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
83304+{
83305+ char *p;
83306+ struct crypto_hash *tfm;
83307+ struct hash_desc desc;
83308+ struct scatterlist sg;
83309+ unsigned char temp_sum[GR_SHA_LEN];
83310+ volatile int retval = 0;
83311+ volatile int dummy = 0;
83312+ unsigned int i;
83313+
83314+ sg_init_table(&sg, 1);
83315+
83316+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
83317+ if (IS_ERR(tfm)) {
83318+ /* should never happen, since sha256 should be built in */
83319+ return 1;
83320+ }
83321+
83322+ desc.tfm = tfm;
83323+ desc.flags = 0;
83324+
83325+ crypto_hash_init(&desc);
83326+
83327+ p = salt;
83328+ sg_set_buf(&sg, p, GR_SALT_LEN);
83329+ crypto_hash_update(&desc, &sg, sg.length);
83330+
83331+ p = entry->pw;
83332+ sg_set_buf(&sg, p, strlen(p));
83333+
83334+ crypto_hash_update(&desc, &sg, sg.length);
83335+
83336+ crypto_hash_final(&desc, temp_sum);
83337+
83338+ memset(entry->pw, 0, GR_PW_LEN);
83339+
83340+ for (i = 0; i < GR_SHA_LEN; i++)
83341+ if (sum[i] != temp_sum[i])
83342+ retval = 1;
83343+ else
83344+ dummy = 1; // waste a cycle
83345+
83346+ crypto_free_hash(tfm);
83347+
83348+ return retval;
83349+}
83350diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
83351index 3cd9ccd..fe16d47 100644
83352--- a/include/acpi/acpi_bus.h
83353+++ b/include/acpi/acpi_bus.h
83354@@ -107,7 +107,7 @@ struct acpi_device_ops {
83355 acpi_op_bind bind;
83356 acpi_op_unbind unbind;
83357 acpi_op_notify notify;
83358-};
83359+} __no_const;
83360
83361 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
83362
83363diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
83364index f4906f6..71feb73 100644
83365--- a/include/acpi/acpi_drivers.h
83366+++ b/include/acpi/acpi_drivers.h
83367@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
83368 Dock Station
83369 -------------------------------------------------------------------------- */
83370 struct acpi_dock_ops {
83371- acpi_notify_handler handler;
83372- acpi_notify_handler uevent;
83373+ const acpi_notify_handler handler;
83374+ const acpi_notify_handler uevent;
83375 };
83376
83377 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
83378@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
83379 extern int register_dock_notifier(struct notifier_block *nb);
83380 extern void unregister_dock_notifier(struct notifier_block *nb);
83381 extern int register_hotplug_dock_device(acpi_handle handle,
83382- struct acpi_dock_ops *ops,
83383+ const struct acpi_dock_ops *ops,
83384 void *context);
83385 extern void unregister_hotplug_dock_device(acpi_handle handle);
83386 #else
83387@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
83388 {
83389 }
83390 static inline int register_hotplug_dock_device(acpi_handle handle,
83391- struct acpi_dock_ops *ops,
83392+ const struct acpi_dock_ops *ops,
83393 void *context)
83394 {
83395 return -ENODEV;
83396diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
83397index b7babf0..a9ac9fc 100644
83398--- a/include/asm-generic/atomic-long.h
83399+++ b/include/asm-generic/atomic-long.h
83400@@ -22,6 +22,12 @@
83401
83402 typedef atomic64_t atomic_long_t;
83403
83404+#ifdef CONFIG_PAX_REFCOUNT
83405+typedef atomic64_unchecked_t atomic_long_unchecked_t;
83406+#else
83407+typedef atomic64_t atomic_long_unchecked_t;
83408+#endif
83409+
83410 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
83411
83412 static inline long atomic_long_read(atomic_long_t *l)
83413@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
83414 return (long)atomic64_read(v);
83415 }
83416
83417+#ifdef CONFIG_PAX_REFCOUNT
83418+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
83419+{
83420+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83421+
83422+ return (long)atomic64_read_unchecked(v);
83423+}
83424+#endif
83425+
83426 static inline void atomic_long_set(atomic_long_t *l, long i)
83427 {
83428 atomic64_t *v = (atomic64_t *)l;
83429@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
83430 atomic64_set(v, i);
83431 }
83432
83433+#ifdef CONFIG_PAX_REFCOUNT
83434+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
83435+{
83436+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83437+
83438+ atomic64_set_unchecked(v, i);
83439+}
83440+#endif
83441+
83442 static inline void atomic_long_inc(atomic_long_t *l)
83443 {
83444 atomic64_t *v = (atomic64_t *)l;
83445@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
83446 atomic64_inc(v);
83447 }
83448
83449+#ifdef CONFIG_PAX_REFCOUNT
83450+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
83451+{
83452+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83453+
83454+ atomic64_inc_unchecked(v);
83455+}
83456+#endif
83457+
83458 static inline void atomic_long_dec(atomic_long_t *l)
83459 {
83460 atomic64_t *v = (atomic64_t *)l;
83461@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
83462 atomic64_dec(v);
83463 }
83464
83465+#ifdef CONFIG_PAX_REFCOUNT
83466+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
83467+{
83468+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83469+
83470+ atomic64_dec_unchecked(v);
83471+}
83472+#endif
83473+
83474 static inline void atomic_long_add(long i, atomic_long_t *l)
83475 {
83476 atomic64_t *v = (atomic64_t *)l;
83477@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
83478 atomic64_add(i, v);
83479 }
83480
83481+#ifdef CONFIG_PAX_REFCOUNT
83482+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
83483+{
83484+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83485+
83486+ atomic64_add_unchecked(i, v);
83487+}
83488+#endif
83489+
83490 static inline void atomic_long_sub(long i, atomic_long_t *l)
83491 {
83492 atomic64_t *v = (atomic64_t *)l;
83493@@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
83494 return (long)atomic64_inc_return(v);
83495 }
83496
83497+#ifdef CONFIG_PAX_REFCOUNT
83498+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
83499+{
83500+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
83501+
83502+ return (long)atomic64_inc_return_unchecked(v);
83503+}
83504+#endif
83505+
83506 static inline long atomic_long_dec_return(atomic_long_t *l)
83507 {
83508 atomic64_t *v = (atomic64_t *)l;
83509@@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
83510
83511 typedef atomic_t atomic_long_t;
83512
83513+#ifdef CONFIG_PAX_REFCOUNT
83514+typedef atomic_unchecked_t atomic_long_unchecked_t;
83515+#else
83516+typedef atomic_t atomic_long_unchecked_t;
83517+#endif
83518+
83519 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
83520 static inline long atomic_long_read(atomic_long_t *l)
83521 {
83522@@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
83523 return (long)atomic_read(v);
83524 }
83525
83526+#ifdef CONFIG_PAX_REFCOUNT
83527+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
83528+{
83529+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
83530+
83531+ return (long)atomic_read_unchecked(v);
83532+}
83533+#endif
83534+
83535 static inline void atomic_long_set(atomic_long_t *l, long i)
83536 {
83537 atomic_t *v = (atomic_t *)l;
83538@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
83539 atomic_set(v, i);
83540 }
83541
83542+#ifdef CONFIG_PAX_REFCOUNT
83543+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
83544+{
83545+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
83546+
83547+ atomic_set_unchecked(v, i);
83548+}
83549+#endif
83550+
83551 static inline void atomic_long_inc(atomic_long_t *l)
83552 {
83553 atomic_t *v = (atomic_t *)l;
83554@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
83555 atomic_inc(v);
83556 }
83557
83558+#ifdef CONFIG_PAX_REFCOUNT
83559+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
83560+{
83561+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
83562+
83563+ atomic_inc_unchecked(v);
83564+}
83565+#endif
83566+
83567 static inline void atomic_long_dec(atomic_long_t *l)
83568 {
83569 atomic_t *v = (atomic_t *)l;
83570@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
83571 atomic_dec(v);
83572 }
83573
83574+#ifdef CONFIG_PAX_REFCOUNT
83575+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
83576+{
83577+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
83578+
83579+ atomic_dec_unchecked(v);
83580+}
83581+#endif
83582+
83583 static inline void atomic_long_add(long i, atomic_long_t *l)
83584 {
83585 atomic_t *v = (atomic_t *)l;
83586@@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
83587 atomic_add(i, v);
83588 }
83589
83590+#ifdef CONFIG_PAX_REFCOUNT
83591+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
83592+{
83593+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
83594+
83595+ atomic_add_unchecked(i, v);
83596+}
83597+#endif
83598+
83599 static inline void atomic_long_sub(long i, atomic_long_t *l)
83600 {
83601 atomic_t *v = (atomic_t *)l;
83602@@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
83603 return (long)atomic_inc_return(v);
83604 }
83605
83606+#ifdef CONFIG_PAX_REFCOUNT
83607+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
83608+{
83609+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
83610+
83611+ return (long)atomic_inc_return_unchecked(v);
83612+}
83613+#endif
83614+
83615 static inline long atomic_long_dec_return(atomic_long_t *l)
83616 {
83617 atomic_t *v = (atomic_t *)l;
83618@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
83619
83620 #endif /* BITS_PER_LONG == 64 */
83621
83622+#ifdef CONFIG_PAX_REFCOUNT
83623+static inline void pax_refcount_needs_these_functions(void)
83624+{
83625+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
83626+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
83627+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
83628+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
83629+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
83630+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
83631+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
83632+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
83633+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
83634+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
83635+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
83636+
83637+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
83638+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
83639+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
83640+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
83641+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
83642+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
83643+}
83644+#else
83645+#define atomic_read_unchecked(v) atomic_read(v)
83646+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
83647+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
83648+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
83649+#define atomic_inc_unchecked(v) atomic_inc(v)
83650+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
83651+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
83652+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
83653+#define atomic_dec_unchecked(v) atomic_dec(v)
83654+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
83655+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
83656+
83657+#define atomic_long_read_unchecked(v) atomic_long_read(v)
83658+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
83659+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
83660+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
83661+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
83662+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
83663+#endif
83664+
83665 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
83666diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
83667index b18ce4f..2ee2843 100644
83668--- a/include/asm-generic/atomic64.h
83669+++ b/include/asm-generic/atomic64.h
83670@@ -16,6 +16,8 @@ typedef struct {
83671 long long counter;
83672 } atomic64_t;
83673
83674+typedef atomic64_t atomic64_unchecked_t;
83675+
83676 #define ATOMIC64_INIT(i) { (i) }
83677
83678 extern long long atomic64_read(const atomic64_t *v);
83679@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
83680 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
83681 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
83682
83683+#define atomic64_read_unchecked(v) atomic64_read(v)
83684+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
83685+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
83686+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
83687+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
83688+#define atomic64_inc_unchecked(v) atomic64_inc(v)
83689+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
83690+#define atomic64_dec_unchecked(v) atomic64_dec(v)
83691+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
83692+
83693 #endif /* _ASM_GENERIC_ATOMIC64_H */
83694diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
83695index d48ddf0..656a0ac 100644
83696--- a/include/asm-generic/bug.h
83697+++ b/include/asm-generic/bug.h
83698@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
83699
83700 #else /* !CONFIG_BUG */
83701 #ifndef HAVE_ARCH_BUG
83702-#define BUG() do {} while(0)
83703+#define BUG() do { for (;;) ; } while(0)
83704 #endif
83705
83706 #ifndef HAVE_ARCH_BUG_ON
83707-#define BUG_ON(condition) do { if (condition) ; } while(0)
83708+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
83709 #endif
83710
83711 #ifndef HAVE_ARCH_WARN_ON
83712diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
83713index 1bfcfe5..e04c5c9 100644
83714--- a/include/asm-generic/cache.h
83715+++ b/include/asm-generic/cache.h
83716@@ -6,7 +6,7 @@
83717 * cache lines need to provide their own cache.h.
83718 */
83719
83720-#define L1_CACHE_SHIFT 5
83721-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
83722+#define L1_CACHE_SHIFT 5UL
83723+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
83724
83725 #endif /* __ASM_GENERIC_CACHE_H */
83726diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
83727index 6920695..41038bc 100644
83728--- a/include/asm-generic/dma-mapping-common.h
83729+++ b/include/asm-generic/dma-mapping-common.h
83730@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
83731 enum dma_data_direction dir,
83732 struct dma_attrs *attrs)
83733 {
83734- struct dma_map_ops *ops = get_dma_ops(dev);
83735+ const struct dma_map_ops *ops = get_dma_ops(dev);
83736 dma_addr_t addr;
83737
83738 kmemcheck_mark_initialized(ptr, size);
83739@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
83740 enum dma_data_direction dir,
83741 struct dma_attrs *attrs)
83742 {
83743- struct dma_map_ops *ops = get_dma_ops(dev);
83744+ const struct dma_map_ops *ops = get_dma_ops(dev);
83745
83746 BUG_ON(!valid_dma_direction(dir));
83747 if (ops->unmap_page)
83748@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
83749 int nents, enum dma_data_direction dir,
83750 struct dma_attrs *attrs)
83751 {
83752- struct dma_map_ops *ops = get_dma_ops(dev);
83753+ const struct dma_map_ops *ops = get_dma_ops(dev);
83754 int i, ents;
83755 struct scatterlist *s;
83756
83757@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
83758 int nents, enum dma_data_direction dir,
83759 struct dma_attrs *attrs)
83760 {
83761- struct dma_map_ops *ops = get_dma_ops(dev);
83762+ const struct dma_map_ops *ops = get_dma_ops(dev);
83763
83764 BUG_ON(!valid_dma_direction(dir));
83765 debug_dma_unmap_sg(dev, sg, nents, dir);
83766@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
83767 size_t offset, size_t size,
83768 enum dma_data_direction dir)
83769 {
83770- struct dma_map_ops *ops = get_dma_ops(dev);
83771+ const struct dma_map_ops *ops = get_dma_ops(dev);
83772 dma_addr_t addr;
83773
83774 kmemcheck_mark_initialized(page_address(page) + offset, size);
83775@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
83776 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
83777 size_t size, enum dma_data_direction dir)
83778 {
83779- struct dma_map_ops *ops = get_dma_ops(dev);
83780+ const struct dma_map_ops *ops = get_dma_ops(dev);
83781
83782 BUG_ON(!valid_dma_direction(dir));
83783 if (ops->unmap_page)
83784@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
83785 size_t size,
83786 enum dma_data_direction dir)
83787 {
83788- struct dma_map_ops *ops = get_dma_ops(dev);
83789+ const struct dma_map_ops *ops = get_dma_ops(dev);
83790
83791 BUG_ON(!valid_dma_direction(dir));
83792 if (ops->sync_single_for_cpu)
83793@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
83794 dma_addr_t addr, size_t size,
83795 enum dma_data_direction dir)
83796 {
83797- struct dma_map_ops *ops = get_dma_ops(dev);
83798+ const struct dma_map_ops *ops = get_dma_ops(dev);
83799
83800 BUG_ON(!valid_dma_direction(dir));
83801 if (ops->sync_single_for_device)
83802@@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
83803 size_t size,
83804 enum dma_data_direction dir)
83805 {
83806- struct dma_map_ops *ops = get_dma_ops(dev);
83807+ const struct dma_map_ops *ops = get_dma_ops(dev);
83808
83809 BUG_ON(!valid_dma_direction(dir));
83810 if (ops->sync_single_range_for_cpu) {
83811@@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
83812 size_t size,
83813 enum dma_data_direction dir)
83814 {
83815- struct dma_map_ops *ops = get_dma_ops(dev);
83816+ const struct dma_map_ops *ops = get_dma_ops(dev);
83817
83818 BUG_ON(!valid_dma_direction(dir));
83819 if (ops->sync_single_range_for_device) {
83820@@ -155,7 +155,7 @@ static inline void
83821 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
83822 int nelems, enum dma_data_direction dir)
83823 {
83824- struct dma_map_ops *ops = get_dma_ops(dev);
83825+ const struct dma_map_ops *ops = get_dma_ops(dev);
83826
83827 BUG_ON(!valid_dma_direction(dir));
83828 if (ops->sync_sg_for_cpu)
83829@@ -167,7 +167,7 @@ static inline void
83830 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
83831 int nelems, enum dma_data_direction dir)
83832 {
83833- struct dma_map_ops *ops = get_dma_ops(dev);
83834+ const struct dma_map_ops *ops = get_dma_ops(dev);
83835
83836 BUG_ON(!valid_dma_direction(dir));
83837 if (ops->sync_sg_for_device)
83838diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
83839index 0d68a1e..b74a761 100644
83840--- a/include/asm-generic/emergency-restart.h
83841+++ b/include/asm-generic/emergency-restart.h
83842@@ -1,7 +1,7 @@
83843 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
83844 #define _ASM_GENERIC_EMERGENCY_RESTART_H
83845
83846-static inline void machine_emergency_restart(void)
83847+static inline __noreturn void machine_emergency_restart(void)
83848 {
83849 machine_restart(NULL);
83850 }
83851diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
83852index 3c2344f..4590a7d 100644
83853--- a/include/asm-generic/futex.h
83854+++ b/include/asm-generic/futex.h
83855@@ -6,7 +6,7 @@
83856 #include <asm/errno.h>
83857
83858 static inline int
83859-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
83860+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
83861 {
83862 int op = (encoded_op >> 28) & 7;
83863 int cmp = (encoded_op >> 24) & 15;
83864@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
83865 }
83866
83867 static inline int
83868-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
83869+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
83870 {
83871 return -ENOSYS;
83872 }
83873diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
83874index 1ca3efc..e3dc852 100644
83875--- a/include/asm-generic/int-l64.h
83876+++ b/include/asm-generic/int-l64.h
83877@@ -46,6 +46,8 @@ typedef unsigned int u32;
83878 typedef signed long s64;
83879 typedef unsigned long u64;
83880
83881+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
83882+
83883 #define S8_C(x) x
83884 #define U8_C(x) x ## U
83885 #define S16_C(x) x
83886diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
83887index f394147..b6152b9 100644
83888--- a/include/asm-generic/int-ll64.h
83889+++ b/include/asm-generic/int-ll64.h
83890@@ -51,6 +51,8 @@ typedef unsigned int u32;
83891 typedef signed long long s64;
83892 typedef unsigned long long u64;
83893
83894+typedef unsigned long long intoverflow_t;
83895+
83896 #define S8_C(x) x
83897 #define U8_C(x) x ## U
83898 #define S16_C(x) x
83899diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
83900index e5f234a..cdb16b3 100644
83901--- a/include/asm-generic/kmap_types.h
83902+++ b/include/asm-generic/kmap_types.h
83903@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
83904 KMAP_D(16) KM_IRQ_PTE,
83905 KMAP_D(17) KM_NMI,
83906 KMAP_D(18) KM_NMI_PTE,
83907-KMAP_D(19) KM_TYPE_NR
83908+KMAP_D(19) KM_CLEARPAGE,
83909+KMAP_D(20) KM_TYPE_NR
83910 };
83911
83912 #undef KMAP_D
83913diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
83914index 725612b..9cc513a 100644
83915--- a/include/asm-generic/pgtable-nopmd.h
83916+++ b/include/asm-generic/pgtable-nopmd.h
83917@@ -1,14 +1,19 @@
83918 #ifndef _PGTABLE_NOPMD_H
83919 #define _PGTABLE_NOPMD_H
83920
83921-#ifndef __ASSEMBLY__
83922-
83923 #include <asm-generic/pgtable-nopud.h>
83924
83925-struct mm_struct;
83926-
83927 #define __PAGETABLE_PMD_FOLDED
83928
83929+#define PMD_SHIFT PUD_SHIFT
83930+#define PTRS_PER_PMD 1
83931+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
83932+#define PMD_MASK (~(PMD_SIZE-1))
83933+
83934+#ifndef __ASSEMBLY__
83935+
83936+struct mm_struct;
83937+
83938 /*
83939 * Having the pmd type consist of a pud gets the size right, and allows
83940 * us to conceptually access the pud entry that this pmd is folded into
83941@@ -16,11 +21,6 @@ struct mm_struct;
83942 */
83943 typedef struct { pud_t pud; } pmd_t;
83944
83945-#define PMD_SHIFT PUD_SHIFT
83946-#define PTRS_PER_PMD 1
83947-#define PMD_SIZE (1UL << PMD_SHIFT)
83948-#define PMD_MASK (~(PMD_SIZE-1))
83949-
83950 /*
83951 * The "pud_xxx()" functions here are trivial for a folded two-level
83952 * setup: the pmd is never bad, and a pmd always exists (as it's folded
83953diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
83954index 810431d..ccc3638 100644
83955--- a/include/asm-generic/pgtable-nopud.h
83956+++ b/include/asm-generic/pgtable-nopud.h
83957@@ -1,10 +1,15 @@
83958 #ifndef _PGTABLE_NOPUD_H
83959 #define _PGTABLE_NOPUD_H
83960
83961-#ifndef __ASSEMBLY__
83962-
83963 #define __PAGETABLE_PUD_FOLDED
83964
83965+#define PUD_SHIFT PGDIR_SHIFT
83966+#define PTRS_PER_PUD 1
83967+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
83968+#define PUD_MASK (~(PUD_SIZE-1))
83969+
83970+#ifndef __ASSEMBLY__
83971+
83972 /*
83973 * Having the pud type consist of a pgd gets the size right, and allows
83974 * us to conceptually access the pgd entry that this pud is folded into
83975@@ -12,11 +17,6 @@
83976 */
83977 typedef struct { pgd_t pgd; } pud_t;
83978
83979-#define PUD_SHIFT PGDIR_SHIFT
83980-#define PTRS_PER_PUD 1
83981-#define PUD_SIZE (1UL << PUD_SHIFT)
83982-#define PUD_MASK (~(PUD_SIZE-1))
83983-
83984 /*
83985 * The "pgd_xxx()" functions here are trivial for a folded two-level
83986 * setup: the pud is never bad, and a pud always exists (as it's folded
83987diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
83988index e2bd73e..fea8ed3 100644
83989--- a/include/asm-generic/pgtable.h
83990+++ b/include/asm-generic/pgtable.h
83991@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
83992 unsigned long size);
83993 #endif
83994
83995+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
83996+static inline unsigned long pax_open_kernel(void) { return 0; }
83997+#endif
83998+
83999+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
84000+static inline unsigned long pax_close_kernel(void) { return 0; }
84001+#endif
84002+
84003 #endif /* !__ASSEMBLY__ */
84004
84005 #endif /* _ASM_GENERIC_PGTABLE_H */
84006diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
84007index b6e818f..21aa58a 100644
84008--- a/include/asm-generic/vmlinux.lds.h
84009+++ b/include/asm-generic/vmlinux.lds.h
84010@@ -199,6 +199,7 @@
84011 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
84012 VMLINUX_SYMBOL(__start_rodata) = .; \
84013 *(.rodata) *(.rodata.*) \
84014+ *(.data.read_only) \
84015 *(__vermagic) /* Kernel version magic */ \
84016 *(__markers_strings) /* Markers: strings */ \
84017 *(__tracepoints_strings)/* Tracepoints: strings */ \
84018@@ -656,22 +657,24 @@
84019 * section in the linker script will go there too. @phdr should have
84020 * a leading colon.
84021 *
84022- * Note that this macros defines __per_cpu_load as an absolute symbol.
84023+ * Note that this macros defines per_cpu_load as an absolute symbol.
84024 * If there is no need to put the percpu section at a predetermined
84025 * address, use PERCPU().
84026 */
84027 #define PERCPU_VADDR(vaddr, phdr) \
84028- VMLINUX_SYMBOL(__per_cpu_load) = .; \
84029- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
84030+ per_cpu_load = .; \
84031+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
84032 - LOAD_OFFSET) { \
84033+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
84034 VMLINUX_SYMBOL(__per_cpu_start) = .; \
84035 *(.data.percpu.first) \
84036- *(.data.percpu.page_aligned) \
84037 *(.data.percpu) \
84038+ . = ALIGN(PAGE_SIZE); \
84039+ *(.data.percpu.page_aligned) \
84040 *(.data.percpu.shared_aligned) \
84041 VMLINUX_SYMBOL(__per_cpu_end) = .; \
84042 } phdr \
84043- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
84044+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
84045
84046 /**
84047 * PERCPU - define output section for percpu area, simple version
84048diff --git a/include/drm/drmP.h b/include/drm/drmP.h
84049index ebab6a6..351dba1 100644
84050--- a/include/drm/drmP.h
84051+++ b/include/drm/drmP.h
84052@@ -71,6 +71,7 @@
84053 #include <linux/workqueue.h>
84054 #include <linux/poll.h>
84055 #include <asm/pgalloc.h>
84056+#include <asm/local.h>
84057 #include "drm.h"
84058
84059 #include <linux/idr.h>
84060@@ -814,7 +815,7 @@ struct drm_driver {
84061 void (*vgaarb_irq)(struct drm_device *dev, bool state);
84062
84063 /* Driver private ops for this object */
84064- struct vm_operations_struct *gem_vm_ops;
84065+ const struct vm_operations_struct *gem_vm_ops;
84066
84067 int major;
84068 int minor;
84069@@ -917,7 +918,7 @@ struct drm_device {
84070
84071 /** \name Usage Counters */
84072 /*@{ */
84073- int open_count; /**< Outstanding files open */
84074+ local_t open_count; /**< Outstanding files open */
84075 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
84076 atomic_t vma_count; /**< Outstanding vma areas open */
84077 int buf_use; /**< Buffers in use -- cannot alloc */
84078@@ -928,7 +929,7 @@ struct drm_device {
84079 /*@{ */
84080 unsigned long counters;
84081 enum drm_stat_type types[15];
84082- atomic_t counts[15];
84083+ atomic_unchecked_t counts[15];
84084 /*@} */
84085
84086 struct list_head filelist;
84087@@ -1016,7 +1017,7 @@ struct drm_device {
84088 struct pci_controller *hose;
84089 #endif
84090 struct drm_sg_mem *sg; /**< Scatter gather memory */
84091- unsigned int num_crtcs; /**< Number of CRTCs on this device */
84092+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
84093 void *dev_private; /**< device private data */
84094 void *mm_private;
84095 struct address_space *dev_mapping;
84096@@ -1042,11 +1043,11 @@ struct drm_device {
84097 spinlock_t object_name_lock;
84098 struct idr object_name_idr;
84099 atomic_t object_count;
84100- atomic_t object_memory;
84101+ atomic_unchecked_t object_memory;
84102 atomic_t pin_count;
84103- atomic_t pin_memory;
84104+ atomic_unchecked_t pin_memory;
84105 atomic_t gtt_count;
84106- atomic_t gtt_memory;
84107+ atomic_unchecked_t gtt_memory;
84108 uint32_t gtt_total;
84109 uint32_t invalidate_domains; /* domains pending invalidation */
84110 uint32_t flush_domains; /* domains pending flush */
84111diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
84112index b29e201..3413cc9 100644
84113--- a/include/drm/drm_crtc_helper.h
84114+++ b/include/drm/drm_crtc_helper.h
84115@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
84116
84117 /* reload the current crtc LUT */
84118 void (*load_lut)(struct drm_crtc *crtc);
84119-};
84120+} __no_const;
84121
84122 struct drm_encoder_helper_funcs {
84123 void (*dpms)(struct drm_encoder *encoder, int mode);
84124@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
84125 struct drm_connector *connector);
84126 /* disable encoder when not in use - more explicit than dpms off */
84127 void (*disable)(struct drm_encoder *encoder);
84128-};
84129+} __no_const;
84130
84131 struct drm_connector_helper_funcs {
84132 int (*get_modes)(struct drm_connector *connector);
84133diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
84134index b199170..6f9e64c 100644
84135--- a/include/drm/ttm/ttm_memory.h
84136+++ b/include/drm/ttm/ttm_memory.h
84137@@ -47,7 +47,7 @@
84138
84139 struct ttm_mem_shrink {
84140 int (*do_shrink) (struct ttm_mem_shrink *);
84141-};
84142+} __no_const;
84143
84144 /**
84145 * struct ttm_mem_global - Global memory accounting structure.
84146diff --git a/include/linux/a.out.h b/include/linux/a.out.h
84147index e86dfca..40cc55f 100644
84148--- a/include/linux/a.out.h
84149+++ b/include/linux/a.out.h
84150@@ -39,6 +39,14 @@ enum machine_type {
84151 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
84152 };
84153
84154+/* Constants for the N_FLAGS field */
84155+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
84156+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
84157+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
84158+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
84159+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
84160+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
84161+
84162 #if !defined (N_MAGIC)
84163 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
84164 #endif
84165diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
84166index 817b237..62c10bc 100644
84167--- a/include/linux/atmdev.h
84168+++ b/include/linux/atmdev.h
84169@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
84170 #endif
84171
84172 struct k_atm_aal_stats {
84173-#define __HANDLE_ITEM(i) atomic_t i
84174+#define __HANDLE_ITEM(i) atomic_unchecked_t i
84175 __AAL_STAT_ITEMS
84176 #undef __HANDLE_ITEM
84177 };
84178diff --git a/include/linux/backlight.h b/include/linux/backlight.h
84179index 0f5f578..8c4f884 100644
84180--- a/include/linux/backlight.h
84181+++ b/include/linux/backlight.h
84182@@ -36,18 +36,18 @@ struct backlight_device;
84183 struct fb_info;
84184
84185 struct backlight_ops {
84186- unsigned int options;
84187+ const unsigned int options;
84188
84189 #define BL_CORE_SUSPENDRESUME (1 << 0)
84190
84191 /* Notify the backlight driver some property has changed */
84192- int (*update_status)(struct backlight_device *);
84193+ int (* const update_status)(struct backlight_device *);
84194 /* Return the current backlight brightness (accounting for power,
84195 fb_blank etc.) */
84196- int (*get_brightness)(struct backlight_device *);
84197+ int (* const get_brightness)(struct backlight_device *);
84198 /* Check if given framebuffer device is the one bound to this backlight;
84199 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
84200- int (*check_fb)(struct fb_info *);
84201+ int (* const check_fb)(struct fb_info *);
84202 };
84203
84204 /* This structure defines all the properties of a backlight */
84205@@ -86,7 +86,7 @@ struct backlight_device {
84206 registered this device has been unloaded, and if class_get_devdata()
84207 points to something in the body of that driver, it is also invalid. */
84208 struct mutex ops_lock;
84209- struct backlight_ops *ops;
84210+ const struct backlight_ops *ops;
84211
84212 /* The framebuffer notifier block */
84213 struct notifier_block fb_notif;
84214@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
84215 }
84216
84217 extern struct backlight_device *backlight_device_register(const char *name,
84218- struct device *dev, void *devdata, struct backlight_ops *ops);
84219+ struct device *dev, void *devdata, const struct backlight_ops *ops);
84220 extern void backlight_device_unregister(struct backlight_device *bd);
84221 extern void backlight_force_update(struct backlight_device *bd,
84222 enum backlight_update_reason reason);
84223diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
84224index a3d802e..93a2ef4 100644
84225--- a/include/linux/binfmts.h
84226+++ b/include/linux/binfmts.h
84227@@ -18,7 +18,7 @@ struct pt_regs;
84228 #define BINPRM_BUF_SIZE 128
84229
84230 #ifdef __KERNEL__
84231-#include <linux/list.h>
84232+#include <linux/sched.h>
84233
84234 #define CORENAME_MAX_SIZE 128
84235
84236@@ -58,6 +58,7 @@ struct linux_binprm{
84237 unsigned interp_flags;
84238 unsigned interp_data;
84239 unsigned long loader, exec;
84240+ char tcomm[TASK_COMM_LEN];
84241 };
84242
84243 extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
84244@@ -83,6 +84,7 @@ struct linux_binfmt {
84245 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
84246 int (*load_shlib)(struct file *);
84247 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
84248+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
84249 unsigned long min_coredump; /* minimal dump size */
84250 int hasvdso;
84251 };
84252diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
84253index 5eb6cb0..a2906d2 100644
84254--- a/include/linux/blkdev.h
84255+++ b/include/linux/blkdev.h
84256@@ -1281,7 +1281,7 @@ struct block_device_operations {
84257 int (*revalidate_disk) (struct gendisk *);
84258 int (*getgeo)(struct block_device *, struct hd_geometry *);
84259 struct module *owner;
84260-};
84261+} __do_const;
84262
84263 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
84264 unsigned long);
84265diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
84266index 3b73b99..629d21b 100644
84267--- a/include/linux/blktrace_api.h
84268+++ b/include/linux/blktrace_api.h
84269@@ -160,7 +160,7 @@ struct blk_trace {
84270 struct dentry *dir;
84271 struct dentry *dropped_file;
84272 struct dentry *msg_file;
84273- atomic_t dropped;
84274+ atomic_unchecked_t dropped;
84275 };
84276
84277 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
84278diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
84279index 83195fb..0b0f77d 100644
84280--- a/include/linux/byteorder/little_endian.h
84281+++ b/include/linux/byteorder/little_endian.h
84282@@ -42,51 +42,51 @@
84283
84284 static inline __le64 __cpu_to_le64p(const __u64 *p)
84285 {
84286- return (__force __le64)*p;
84287+ return (__force const __le64)*p;
84288 }
84289 static inline __u64 __le64_to_cpup(const __le64 *p)
84290 {
84291- return (__force __u64)*p;
84292+ return (__force const __u64)*p;
84293 }
84294 static inline __le32 __cpu_to_le32p(const __u32 *p)
84295 {
84296- return (__force __le32)*p;
84297+ return (__force const __le32)*p;
84298 }
84299 static inline __u32 __le32_to_cpup(const __le32 *p)
84300 {
84301- return (__force __u32)*p;
84302+ return (__force const __u32)*p;
84303 }
84304 static inline __le16 __cpu_to_le16p(const __u16 *p)
84305 {
84306- return (__force __le16)*p;
84307+ return (__force const __le16)*p;
84308 }
84309 static inline __u16 __le16_to_cpup(const __le16 *p)
84310 {
84311- return (__force __u16)*p;
84312+ return (__force const __u16)*p;
84313 }
84314 static inline __be64 __cpu_to_be64p(const __u64 *p)
84315 {
84316- return (__force __be64)__swab64p(p);
84317+ return (__force const __be64)__swab64p(p);
84318 }
84319 static inline __u64 __be64_to_cpup(const __be64 *p)
84320 {
84321- return __swab64p((__u64 *)p);
84322+ return __swab64p((const __u64 *)p);
84323 }
84324 static inline __be32 __cpu_to_be32p(const __u32 *p)
84325 {
84326- return (__force __be32)__swab32p(p);
84327+ return (__force const __be32)__swab32p(p);
84328 }
84329 static inline __u32 __be32_to_cpup(const __be32 *p)
84330 {
84331- return __swab32p((__u32 *)p);
84332+ return __swab32p((const __u32 *)p);
84333 }
84334 static inline __be16 __cpu_to_be16p(const __u16 *p)
84335 {
84336- return (__force __be16)__swab16p(p);
84337+ return (__force const __be16)__swab16p(p);
84338 }
84339 static inline __u16 __be16_to_cpup(const __be16 *p)
84340 {
84341- return __swab16p((__u16 *)p);
84342+ return __swab16p((const __u16 *)p);
84343 }
84344 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
84345 #define __le64_to_cpus(x) do { (void)(x); } while (0)
84346diff --git a/include/linux/cache.h b/include/linux/cache.h
84347index 97e2488..e7576b9 100644
84348--- a/include/linux/cache.h
84349+++ b/include/linux/cache.h
84350@@ -16,6 +16,10 @@
84351 #define __read_mostly
84352 #endif
84353
84354+#ifndef __read_only
84355+#define __read_only __read_mostly
84356+#endif
84357+
84358 #ifndef ____cacheline_aligned
84359 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
84360 #endif
84361diff --git a/include/linux/capability.h b/include/linux/capability.h
84362index c8f2a5f7..1618a5c 100644
84363--- a/include/linux/capability.h
84364+++ b/include/linux/capability.h
84365@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
84366 (security_real_capable_noaudit((t), (cap)) == 0)
84367
84368 extern int capable(int cap);
84369+int capable_nolog(int cap);
84370
84371 /* audit system wants to get cap info from files as well */
84372 struct dentry;
84373diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
84374index 450fa59..86019fb 100644
84375--- a/include/linux/compiler-gcc4.h
84376+++ b/include/linux/compiler-gcc4.h
84377@@ -36,4 +36,16 @@
84378 the kernel context */
84379 #define __cold __attribute__((__cold__))
84380
84381+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
84382+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
84383+#define __bos0(ptr) __bos((ptr), 0)
84384+#define __bos1(ptr) __bos((ptr), 1)
84385+
84386+#if __GNUC_MINOR__ >= 5
84387+#ifdef CONSTIFY_PLUGIN
84388+#define __no_const __attribute__((no_const))
84389+#define __do_const __attribute__((do_const))
84390+#endif
84391+#endif
84392+
84393 #endif
84394diff --git a/include/linux/compiler.h b/include/linux/compiler.h
84395index 04fb513..fd6477b 100644
84396--- a/include/linux/compiler.h
84397+++ b/include/linux/compiler.h
84398@@ -5,11 +5,14 @@
84399
84400 #ifdef __CHECKER__
84401 # define __user __attribute__((noderef, address_space(1)))
84402+# define __force_user __force __user
84403 # define __kernel /* default address space */
84404+# define __force_kernel __force __kernel
84405 # define __safe __attribute__((safe))
84406 # define __force __attribute__((force))
84407 # define __nocast __attribute__((nocast))
84408 # define __iomem __attribute__((noderef, address_space(2)))
84409+# define __force_iomem __force __iomem
84410 # define __acquires(x) __attribute__((context(x,0,1)))
84411 # define __releases(x) __attribute__((context(x,1,0)))
84412 # define __acquire(x) __context__(x,1)
84413@@ -17,13 +20,34 @@
84414 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
84415 extern void __chk_user_ptr(const volatile void __user *);
84416 extern void __chk_io_ptr(const volatile void __iomem *);
84417+#elif defined(CHECKER_PLUGIN)
84418+//# define __user
84419+//# define __force_user
84420+//# define __kernel
84421+//# define __force_kernel
84422+# define __safe
84423+# define __force
84424+# define __nocast
84425+# define __iomem
84426+# define __force_iomem
84427+# define __chk_user_ptr(x) (void)0
84428+# define __chk_io_ptr(x) (void)0
84429+# define __builtin_warning(x, y...) (1)
84430+# define __acquires(x)
84431+# define __releases(x)
84432+# define __acquire(x) (void)0
84433+# define __release(x) (void)0
84434+# define __cond_lock(x,c) (c)
84435 #else
84436 # define __user
84437+# define __force_user
84438 # define __kernel
84439+# define __force_kernel
84440 # define __safe
84441 # define __force
84442 # define __nocast
84443 # define __iomem
84444+# define __force_iomem
84445 # define __chk_user_ptr(x) (void)0
84446 # define __chk_io_ptr(x) (void)0
84447 # define __builtin_warning(x, y...) (1)
84448@@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
84449 # define __attribute_const__ /* unimplemented */
84450 #endif
84451
84452+#ifndef __no_const
84453+# define __no_const
84454+#endif
84455+
84456+#ifndef __do_const
84457+# define __do_const
84458+#endif
84459+
84460 /*
84461 * Tell gcc if a function is cold. The compiler will assume any path
84462 * directly leading to the call is unlikely.
84463@@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
84464 #define __cold
84465 #endif
84466
84467+#ifndef __alloc_size
84468+#define __alloc_size(...)
84469+#endif
84470+
84471+#ifndef __bos
84472+#define __bos(ptr, arg)
84473+#endif
84474+
84475+#ifndef __bos0
84476+#define __bos0(ptr)
84477+#endif
84478+
84479+#ifndef __bos1
84480+#define __bos1(ptr)
84481+#endif
84482+
84483 /* Simple shorthand for a section definition */
84484 #ifndef __section
84485 # define __section(S) __attribute__ ((__section__(#S)))
84486@@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
84487 * use is to mediate communication between process-level code and irq/NMI
84488 * handlers, all running on the same CPU.
84489 */
84490-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
84491+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
84492+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
84493
84494 #endif /* __LINUX_COMPILER_H */
84495diff --git a/include/linux/crypto.h b/include/linux/crypto.h
84496index fd92988..a3164bd 100644
84497--- a/include/linux/crypto.h
84498+++ b/include/linux/crypto.h
84499@@ -394,7 +394,7 @@ struct cipher_tfm {
84500 const u8 *key, unsigned int keylen);
84501 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
84502 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
84503-};
84504+} __no_const;
84505
84506 struct hash_tfm {
84507 int (*init)(struct hash_desc *desc);
84508@@ -415,13 +415,13 @@ struct compress_tfm {
84509 int (*cot_decompress)(struct crypto_tfm *tfm,
84510 const u8 *src, unsigned int slen,
84511 u8 *dst, unsigned int *dlen);
84512-};
84513+} __no_const;
84514
84515 struct rng_tfm {
84516 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
84517 unsigned int dlen);
84518 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
84519-};
84520+} __no_const;
84521
84522 #define crt_ablkcipher crt_u.ablkcipher
84523 #define crt_aead crt_u.aead
84524diff --git a/include/linux/dcache.h b/include/linux/dcache.h
84525index 30b93b2..cd7a8db 100644
84526--- a/include/linux/dcache.h
84527+++ b/include/linux/dcache.h
84528@@ -119,6 +119,8 @@ struct dentry {
84529 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
84530 };
84531
84532+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
84533+
84534 /*
84535 * dentry->d_lock spinlock nesting subclasses:
84536 *
84537diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
84538index 3e9bd6a..f4e1aa0 100644
84539--- a/include/linux/decompress/mm.h
84540+++ b/include/linux/decompress/mm.h
84541@@ -78,7 +78,7 @@ static void free(void *where)
84542 * warnings when not needed (indeed large_malloc / large_free are not
84543 * needed by inflate */
84544
84545-#define malloc(a) kmalloc(a, GFP_KERNEL)
84546+#define malloc(a) kmalloc((a), GFP_KERNEL)
84547 #define free(a) kfree(a)
84548
84549 #define large_malloc(a) vmalloc(a)
84550diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
84551index 91b7618..92a93d32 100644
84552--- a/include/linux/dma-mapping.h
84553+++ b/include/linux/dma-mapping.h
84554@@ -16,51 +16,51 @@ enum dma_data_direction {
84555 };
84556
84557 struct dma_map_ops {
84558- void* (*alloc_coherent)(struct device *dev, size_t size,
84559+ void* (* const alloc_coherent)(struct device *dev, size_t size,
84560 dma_addr_t *dma_handle, gfp_t gfp);
84561- void (*free_coherent)(struct device *dev, size_t size,
84562+ void (* const free_coherent)(struct device *dev, size_t size,
84563 void *vaddr, dma_addr_t dma_handle);
84564- dma_addr_t (*map_page)(struct device *dev, struct page *page,
84565+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
84566 unsigned long offset, size_t size,
84567 enum dma_data_direction dir,
84568 struct dma_attrs *attrs);
84569- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
84570+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
84571 size_t size, enum dma_data_direction dir,
84572 struct dma_attrs *attrs);
84573- int (*map_sg)(struct device *dev, struct scatterlist *sg,
84574+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
84575 int nents, enum dma_data_direction dir,
84576 struct dma_attrs *attrs);
84577- void (*unmap_sg)(struct device *dev,
84578+ void (* const unmap_sg)(struct device *dev,
84579 struct scatterlist *sg, int nents,
84580 enum dma_data_direction dir,
84581 struct dma_attrs *attrs);
84582- void (*sync_single_for_cpu)(struct device *dev,
84583+ void (* const sync_single_for_cpu)(struct device *dev,
84584 dma_addr_t dma_handle, size_t size,
84585 enum dma_data_direction dir);
84586- void (*sync_single_for_device)(struct device *dev,
84587+ void (* const sync_single_for_device)(struct device *dev,
84588 dma_addr_t dma_handle, size_t size,
84589 enum dma_data_direction dir);
84590- void (*sync_single_range_for_cpu)(struct device *dev,
84591+ void (* const sync_single_range_for_cpu)(struct device *dev,
84592 dma_addr_t dma_handle,
84593 unsigned long offset,
84594 size_t size,
84595 enum dma_data_direction dir);
84596- void (*sync_single_range_for_device)(struct device *dev,
84597+ void (* const sync_single_range_for_device)(struct device *dev,
84598 dma_addr_t dma_handle,
84599 unsigned long offset,
84600 size_t size,
84601 enum dma_data_direction dir);
84602- void (*sync_sg_for_cpu)(struct device *dev,
84603+ void (* const sync_sg_for_cpu)(struct device *dev,
84604 struct scatterlist *sg, int nents,
84605 enum dma_data_direction dir);
84606- void (*sync_sg_for_device)(struct device *dev,
84607+ void (* const sync_sg_for_device)(struct device *dev,
84608 struct scatterlist *sg, int nents,
84609 enum dma_data_direction dir);
84610- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
84611- int (*dma_supported)(struct device *dev, u64 mask);
84612+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
84613+ int (* const dma_supported)(struct device *dev, u64 mask);
84614 int (*set_dma_mask)(struct device *dev, u64 mask);
84615 int is_phys;
84616-};
84617+} __do_const;
84618
84619 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
84620
84621diff --git a/include/linux/dst.h b/include/linux/dst.h
84622index e26fed8..b976d9f 100644
84623--- a/include/linux/dst.h
84624+++ b/include/linux/dst.h
84625@@ -380,7 +380,7 @@ struct dst_node
84626 struct thread_pool *pool;
84627
84628 /* Transaction IDs live here */
84629- atomic_long_t gen;
84630+ atomic_long_unchecked_t gen;
84631
84632 /*
84633 * How frequently and how many times transaction
84634diff --git a/include/linux/elf.h b/include/linux/elf.h
84635index 90a4ed0..d652617 100644
84636--- a/include/linux/elf.h
84637+++ b/include/linux/elf.h
84638@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
84639 #define PT_GNU_EH_FRAME 0x6474e550
84640
84641 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
84642+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
84643+
84644+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
84645+
84646+/* Constants for the e_flags field */
84647+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
84648+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
84649+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
84650+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
84651+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
84652+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
84653
84654 /* These constants define the different elf file types */
84655 #define ET_NONE 0
84656@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
84657 #define DT_DEBUG 21
84658 #define DT_TEXTREL 22
84659 #define DT_JMPREL 23
84660+#define DT_FLAGS 30
84661+ #define DF_TEXTREL 0x00000004
84662 #define DT_ENCODING 32
84663 #define OLD_DT_LOOS 0x60000000
84664 #define DT_LOOS 0x6000000d
84665@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
84666 #define PF_W 0x2
84667 #define PF_X 0x1
84668
84669+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
84670+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
84671+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
84672+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
84673+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
84674+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
84675+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
84676+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
84677+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
84678+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
84679+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
84680+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
84681+
84682 typedef struct elf32_phdr{
84683 Elf32_Word p_type;
84684 Elf32_Off p_offset;
84685@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
84686 #define EI_OSABI 7
84687 #define EI_PAD 8
84688
84689+#define EI_PAX 14
84690+
84691 #define ELFMAG0 0x7f /* EI_MAG */
84692 #define ELFMAG1 'E'
84693 #define ELFMAG2 'L'
84694@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
84695 #define elf_phdr elf32_phdr
84696 #define elf_note elf32_note
84697 #define elf_addr_t Elf32_Off
84698+#define elf_dyn Elf32_Dyn
84699
84700 #else
84701
84702@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
84703 #define elf_phdr elf64_phdr
84704 #define elf_note elf64_note
84705 #define elf_addr_t Elf64_Off
84706+#define elf_dyn Elf64_Dyn
84707
84708 #endif
84709
84710diff --git a/include/linux/fs.h b/include/linux/fs.h
84711index 1b9a47a..6fe2934 100644
84712--- a/include/linux/fs.h
84713+++ b/include/linux/fs.h
84714@@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
84715 unsigned long, unsigned long);
84716
84717 struct address_space_operations {
84718- int (*writepage)(struct page *page, struct writeback_control *wbc);
84719- int (*readpage)(struct file *, struct page *);
84720- void (*sync_page)(struct page *);
84721+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
84722+ int (* const readpage)(struct file *, struct page *);
84723+ void (* const sync_page)(struct page *);
84724
84725 /* Write back some dirty pages from this mapping. */
84726- int (*writepages)(struct address_space *, struct writeback_control *);
84727+ int (* const writepages)(struct address_space *, struct writeback_control *);
84728
84729 /* Set a page dirty. Return true if this dirtied it */
84730- int (*set_page_dirty)(struct page *page);
84731+ int (* const set_page_dirty)(struct page *page);
84732
84733- int (*readpages)(struct file *filp, struct address_space *mapping,
84734+ int (* const readpages)(struct file *filp, struct address_space *mapping,
84735 struct list_head *pages, unsigned nr_pages);
84736
84737- int (*write_begin)(struct file *, struct address_space *mapping,
84738+ int (* const write_begin)(struct file *, struct address_space *mapping,
84739 loff_t pos, unsigned len, unsigned flags,
84740 struct page **pagep, void **fsdata);
84741- int (*write_end)(struct file *, struct address_space *mapping,
84742+ int (* const write_end)(struct file *, struct address_space *mapping,
84743 loff_t pos, unsigned len, unsigned copied,
84744 struct page *page, void *fsdata);
84745
84746 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
84747- sector_t (*bmap)(struct address_space *, sector_t);
84748- void (*invalidatepage) (struct page *, unsigned long);
84749- int (*releasepage) (struct page *, gfp_t);
84750- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
84751+ sector_t (* const bmap)(struct address_space *, sector_t);
84752+ void (* const invalidatepage) (struct page *, unsigned long);
84753+ int (* const releasepage) (struct page *, gfp_t);
84754+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
84755 loff_t offset, unsigned long nr_segs);
84756- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
84757+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
84758 void **, unsigned long *);
84759 /* migrate the contents of a page to the specified target */
84760- int (*migratepage) (struct address_space *,
84761+ int (* const migratepage) (struct address_space *,
84762 struct page *, struct page *);
84763- int (*launder_page) (struct page *);
84764- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
84765+ int (* const launder_page) (struct page *);
84766+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
84767 unsigned long);
84768- int (*error_remove_page)(struct address_space *, struct page *);
84769+ int (* const error_remove_page)(struct address_space *, struct page *);
84770 };
84771
84772 /*
84773@@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
84774 typedef struct files_struct *fl_owner_t;
84775
84776 struct file_lock_operations {
84777- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
84778- void (*fl_release_private)(struct file_lock *);
84779+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
84780+ void (* const fl_release_private)(struct file_lock *);
84781 };
84782
84783 struct lock_manager_operations {
84784- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
84785- void (*fl_notify)(struct file_lock *); /* unblock callback */
84786- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
84787- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
84788- void (*fl_release_private)(struct file_lock *);
84789- void (*fl_break)(struct file_lock *);
84790- int (*fl_mylease)(struct file_lock *, struct file_lock *);
84791- int (*fl_change)(struct file_lock **, int);
84792+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
84793+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
84794+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
84795+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
84796+ void (* const fl_release_private)(struct file_lock *);
84797+ void (* const fl_break)(struct file_lock *);
84798+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
84799+ int (* const fl_change)(struct file_lock **, int);
84800 };
84801
84802 struct lock_manager {
84803@@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
84804 unsigned int fi_flags; /* Flags as passed from user */
84805 unsigned int fi_extents_mapped; /* Number of mapped extents */
84806 unsigned int fi_extents_max; /* Size of fiemap_extent array */
84807- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
84808+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
84809 * array */
84810 };
84811 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
84812@@ -1512,7 +1512,8 @@ struct file_operations {
84813 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
84814 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
84815 int (*setlease)(struct file *, long, struct file_lock **);
84816-};
84817+} __do_const;
84818+typedef struct file_operations __no_const file_operations_no_const;
84819
84820 struct inode_operations {
84821 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
84822@@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
84823 unsigned long, loff_t *);
84824
84825 struct super_operations {
84826- struct inode *(*alloc_inode)(struct super_block *sb);
84827- void (*destroy_inode)(struct inode *);
84828+ struct inode *(* const alloc_inode)(struct super_block *sb);
84829+ void (* const destroy_inode)(struct inode *);
84830
84831- void (*dirty_inode) (struct inode *);
84832- int (*write_inode) (struct inode *, int);
84833- void (*drop_inode) (struct inode *);
84834- void (*delete_inode) (struct inode *);
84835- void (*put_super) (struct super_block *);
84836- void (*write_super) (struct super_block *);
84837- int (*sync_fs)(struct super_block *sb, int wait);
84838- int (*freeze_fs) (struct super_block *);
84839- int (*unfreeze_fs) (struct super_block *);
84840- int (*statfs) (struct dentry *, struct kstatfs *);
84841- int (*remount_fs) (struct super_block *, int *, char *);
84842- void (*clear_inode) (struct inode *);
84843- void (*umount_begin) (struct super_block *);
84844+ void (* const dirty_inode) (struct inode *);
84845+ int (* const write_inode) (struct inode *, int);
84846+ void (* const drop_inode) (struct inode *);
84847+ void (* const delete_inode) (struct inode *);
84848+ void (* const put_super) (struct super_block *);
84849+ void (* const write_super) (struct super_block *);
84850+ int (* const sync_fs)(struct super_block *sb, int wait);
84851+ int (* const freeze_fs) (struct super_block *);
84852+ int (* const unfreeze_fs) (struct super_block *);
84853+ int (* const statfs) (struct dentry *, struct kstatfs *);
84854+ int (* const remount_fs) (struct super_block *, int *, char *);
84855+ void (* const clear_inode) (struct inode *);
84856+ void (* const umount_begin) (struct super_block *);
84857
84858- int (*show_options)(struct seq_file *, struct vfsmount *);
84859- int (*show_stats)(struct seq_file *, struct vfsmount *);
84860+ int (* const show_options)(struct seq_file *, struct vfsmount *);
84861+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
84862 #ifdef CONFIG_QUOTA
84863- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
84864- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
84865+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
84866+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
84867 #endif
84868- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
84869+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
84870 };
84871
84872 /*
84873diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
84874index 78a05bf..2a7d3e1 100644
84875--- a/include/linux/fs_struct.h
84876+++ b/include/linux/fs_struct.h
84877@@ -4,7 +4,7 @@
84878 #include <linux/path.h>
84879
84880 struct fs_struct {
84881- int users;
84882+ atomic_t users;
84883 rwlock_t lock;
84884 int umask;
84885 int in_exec;
84886diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
84887index 7be0c6f..2f63a2b 100644
84888--- a/include/linux/fscache-cache.h
84889+++ b/include/linux/fscache-cache.h
84890@@ -116,7 +116,7 @@ struct fscache_operation {
84891 #endif
84892 };
84893
84894-extern atomic_t fscache_op_debug_id;
84895+extern atomic_unchecked_t fscache_op_debug_id;
84896 extern const struct slow_work_ops fscache_op_slow_work_ops;
84897
84898 extern void fscache_enqueue_operation(struct fscache_operation *);
84899@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
84900 fscache_operation_release_t release)
84901 {
84902 atomic_set(&op->usage, 1);
84903- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
84904+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
84905 op->release = release;
84906 INIT_LIST_HEAD(&op->pend_link);
84907 fscache_set_op_state(op, "Init");
84908diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
84909index 4d6f47b..00bcedb 100644
84910--- a/include/linux/fsnotify_backend.h
84911+++ b/include/linux/fsnotify_backend.h
84912@@ -86,6 +86,7 @@ struct fsnotify_ops {
84913 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
84914 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
84915 };
84916+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
84917
84918 /*
84919 * A group is a "thing" that wants to receive notification about filesystem
84920diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
84921index 4ec5e67..42f1eb9 100644
84922--- a/include/linux/ftrace_event.h
84923+++ b/include/linux/ftrace_event.h
84924@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
84925 int filter_type);
84926 extern int trace_define_common_fields(struct ftrace_event_call *call);
84927
84928-#define is_signed_type(type) (((type)(-1)) < 0)
84929+#define is_signed_type(type) (((type)(-1)) < (type)1)
84930
84931 int trace_set_clr_event(const char *system, const char *event, int set);
84932
84933diff --git a/include/linux/genhd.h b/include/linux/genhd.h
84934index 297df45..b6a74ff 100644
84935--- a/include/linux/genhd.h
84936+++ b/include/linux/genhd.h
84937@@ -161,7 +161,7 @@ struct gendisk {
84938
84939 struct timer_rand_state *random;
84940
84941- atomic_t sync_io; /* RAID */
84942+ atomic_unchecked_t sync_io; /* RAID */
84943 struct work_struct async_notify;
84944 #ifdef CONFIG_BLK_DEV_INTEGRITY
84945 struct blk_integrity *integrity;
84946diff --git a/include/linux/gracl.h b/include/linux/gracl.h
84947new file mode 100644
84948index 0000000..af663cf
84949--- /dev/null
84950+++ b/include/linux/gracl.h
84951@@ -0,0 +1,319 @@
84952+#ifndef GR_ACL_H
84953+#define GR_ACL_H
84954+
84955+#include <linux/grdefs.h>
84956+#include <linux/resource.h>
84957+#include <linux/capability.h>
84958+#include <linux/dcache.h>
84959+#include <asm/resource.h>
84960+
84961+/* Major status information */
84962+
84963+#define GR_VERSION "grsecurity 2.9"
84964+#define GRSECURITY_VERSION 0x2900
84965+
84966+enum {
84967+ GR_SHUTDOWN = 0,
84968+ GR_ENABLE = 1,
84969+ GR_SPROLE = 2,
84970+ GR_RELOAD = 3,
84971+ GR_SEGVMOD = 4,
84972+ GR_STATUS = 5,
84973+ GR_UNSPROLE = 6,
84974+ GR_PASSSET = 7,
84975+ GR_SPROLEPAM = 8,
84976+};
84977+
84978+/* Password setup definitions
84979+ * kernel/grhash.c */
84980+enum {
84981+ GR_PW_LEN = 128,
84982+ GR_SALT_LEN = 16,
84983+ GR_SHA_LEN = 32,
84984+};
84985+
84986+enum {
84987+ GR_SPROLE_LEN = 64,
84988+};
84989+
84990+enum {
84991+ GR_NO_GLOB = 0,
84992+ GR_REG_GLOB,
84993+ GR_CREATE_GLOB
84994+};
84995+
84996+#define GR_NLIMITS 32
84997+
84998+/* Begin Data Structures */
84999+
85000+struct sprole_pw {
85001+ unsigned char *rolename;
85002+ unsigned char salt[GR_SALT_LEN];
85003+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
85004+};
85005+
85006+struct name_entry {
85007+ __u32 key;
85008+ ino_t inode;
85009+ dev_t device;
85010+ char *name;
85011+ __u16 len;
85012+ __u8 deleted;
85013+ struct name_entry *prev;
85014+ struct name_entry *next;
85015+};
85016+
85017+struct inodev_entry {
85018+ struct name_entry *nentry;
85019+ struct inodev_entry *prev;
85020+ struct inodev_entry *next;
85021+};
85022+
85023+struct acl_role_db {
85024+ struct acl_role_label **r_hash;
85025+ __u32 r_size;
85026+};
85027+
85028+struct inodev_db {
85029+ struct inodev_entry **i_hash;
85030+ __u32 i_size;
85031+};
85032+
85033+struct name_db {
85034+ struct name_entry **n_hash;
85035+ __u32 n_size;
85036+};
85037+
85038+struct crash_uid {
85039+ uid_t uid;
85040+ unsigned long expires;
85041+};
85042+
85043+struct gr_hash_struct {
85044+ void **table;
85045+ void **nametable;
85046+ void *first;
85047+ __u32 table_size;
85048+ __u32 used_size;
85049+ int type;
85050+};
85051+
85052+/* Userspace Grsecurity ACL data structures */
85053+
85054+struct acl_subject_label {
85055+ char *filename;
85056+ ino_t inode;
85057+ dev_t device;
85058+ __u32 mode;
85059+ kernel_cap_t cap_mask;
85060+ kernel_cap_t cap_lower;
85061+ kernel_cap_t cap_invert_audit;
85062+
85063+ struct rlimit res[GR_NLIMITS];
85064+ __u32 resmask;
85065+
85066+ __u8 user_trans_type;
85067+ __u8 group_trans_type;
85068+ uid_t *user_transitions;
85069+ gid_t *group_transitions;
85070+ __u16 user_trans_num;
85071+ __u16 group_trans_num;
85072+
85073+ __u32 sock_families[2];
85074+ __u32 ip_proto[8];
85075+ __u32 ip_type;
85076+ struct acl_ip_label **ips;
85077+ __u32 ip_num;
85078+ __u32 inaddr_any_override;
85079+
85080+ __u32 crashes;
85081+ unsigned long expires;
85082+
85083+ struct acl_subject_label *parent_subject;
85084+ struct gr_hash_struct *hash;
85085+ struct acl_subject_label *prev;
85086+ struct acl_subject_label *next;
85087+
85088+ struct acl_object_label **obj_hash;
85089+ __u32 obj_hash_size;
85090+ __u16 pax_flags;
85091+};
85092+
85093+struct role_allowed_ip {
85094+ __u32 addr;
85095+ __u32 netmask;
85096+
85097+ struct role_allowed_ip *prev;
85098+ struct role_allowed_ip *next;
85099+};
85100+
85101+struct role_transition {
85102+ char *rolename;
85103+
85104+ struct role_transition *prev;
85105+ struct role_transition *next;
85106+};
85107+
85108+struct acl_role_label {
85109+ char *rolename;
85110+ uid_t uidgid;
85111+ __u16 roletype;
85112+
85113+ __u16 auth_attempts;
85114+ unsigned long expires;
85115+
85116+ struct acl_subject_label *root_label;
85117+ struct gr_hash_struct *hash;
85118+
85119+ struct acl_role_label *prev;
85120+ struct acl_role_label *next;
85121+
85122+ struct role_transition *transitions;
85123+ struct role_allowed_ip *allowed_ips;
85124+ uid_t *domain_children;
85125+ __u16 domain_child_num;
85126+
85127+ mode_t umask;
85128+
85129+ struct acl_subject_label **subj_hash;
85130+ __u32 subj_hash_size;
85131+};
85132+
85133+struct user_acl_role_db {
85134+ struct acl_role_label **r_table;
85135+ __u32 num_pointers; /* Number of allocations to track */
85136+ __u32 num_roles; /* Number of roles */
85137+ __u32 num_domain_children; /* Number of domain children */
85138+ __u32 num_subjects; /* Number of subjects */
85139+ __u32 num_objects; /* Number of objects */
85140+};
85141+
85142+struct acl_object_label {
85143+ char *filename;
85144+ ino_t inode;
85145+ dev_t device;
85146+ __u32 mode;
85147+
85148+ struct acl_subject_label *nested;
85149+ struct acl_object_label *globbed;
85150+
85151+ /* next two structures not used */
85152+
85153+ struct acl_object_label *prev;
85154+ struct acl_object_label *next;
85155+};
85156+
85157+struct acl_ip_label {
85158+ char *iface;
85159+ __u32 addr;
85160+ __u32 netmask;
85161+ __u16 low, high;
85162+ __u8 mode;
85163+ __u32 type;
85164+ __u32 proto[8];
85165+
85166+ /* next two structures not used */
85167+
85168+ struct acl_ip_label *prev;
85169+ struct acl_ip_label *next;
85170+};
85171+
85172+struct gr_arg {
85173+ struct user_acl_role_db role_db;
85174+ unsigned char pw[GR_PW_LEN];
85175+ unsigned char salt[GR_SALT_LEN];
85176+ unsigned char sum[GR_SHA_LEN];
85177+ unsigned char sp_role[GR_SPROLE_LEN];
85178+ struct sprole_pw *sprole_pws;
85179+ dev_t segv_device;
85180+ ino_t segv_inode;
85181+ uid_t segv_uid;
85182+ __u16 num_sprole_pws;
85183+ __u16 mode;
85184+};
85185+
85186+struct gr_arg_wrapper {
85187+ struct gr_arg *arg;
85188+ __u32 version;
85189+ __u32 size;
85190+};
85191+
85192+struct subject_map {
85193+ struct acl_subject_label *user;
85194+ struct acl_subject_label *kernel;
85195+ struct subject_map *prev;
85196+ struct subject_map *next;
85197+};
85198+
85199+struct acl_subj_map_db {
85200+ struct subject_map **s_hash;
85201+ __u32 s_size;
85202+};
85203+
85204+/* End Data Structures Section */
85205+
85206+/* Hash functions generated by empirical testing by Brad Spengler
85207+ Makes good use of the low bits of the inode. Generally 0-1 times
85208+ in loop for successful match. 0-3 for unsuccessful match.
85209+ Shift/add algorithm with modulus of table size and an XOR*/
85210+
85211+static __inline__ unsigned int
85212+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
85213+{
85214+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
85215+}
85216+
85217+ static __inline__ unsigned int
85218+shash(const struct acl_subject_label *userp, const unsigned int sz)
85219+{
85220+ return ((const unsigned long)userp % sz);
85221+}
85222+
85223+static __inline__ unsigned int
85224+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
85225+{
85226+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
85227+}
85228+
85229+static __inline__ unsigned int
85230+nhash(const char *name, const __u16 len, const unsigned int sz)
85231+{
85232+ return full_name_hash((const unsigned char *)name, len) % sz;
85233+}
85234+
85235+#define FOR_EACH_ROLE_START(role) \
85236+ role = role_list; \
85237+ while (role) {
85238+
85239+#define FOR_EACH_ROLE_END(role) \
85240+ role = role->prev; \
85241+ }
85242+
85243+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
85244+ subj = NULL; \
85245+ iter = 0; \
85246+ while (iter < role->subj_hash_size) { \
85247+ if (subj == NULL) \
85248+ subj = role->subj_hash[iter]; \
85249+ if (subj == NULL) { \
85250+ iter++; \
85251+ continue; \
85252+ }
85253+
85254+#define FOR_EACH_SUBJECT_END(subj,iter) \
85255+ subj = subj->next; \
85256+ if (subj == NULL) \
85257+ iter++; \
85258+ }
85259+
85260+
85261+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
85262+ subj = role->hash->first; \
85263+ while (subj != NULL) {
85264+
85265+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
85266+ subj = subj->next; \
85267+ }
85268+
85269+#endif
85270+
85271diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
85272new file mode 100644
85273index 0000000..323ecf2
85274--- /dev/null
85275+++ b/include/linux/gralloc.h
85276@@ -0,0 +1,9 @@
85277+#ifndef __GRALLOC_H
85278+#define __GRALLOC_H
85279+
85280+void acl_free_all(void);
85281+int acl_alloc_stack_init(unsigned long size);
85282+void *acl_alloc(unsigned long len);
85283+void *acl_alloc_num(unsigned long num, unsigned long len);
85284+
85285+#endif
85286diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
85287new file mode 100644
85288index 0000000..70d6cd5
85289--- /dev/null
85290+++ b/include/linux/grdefs.h
85291@@ -0,0 +1,140 @@
85292+#ifndef GRDEFS_H
85293+#define GRDEFS_H
85294+
85295+/* Begin grsecurity status declarations */
85296+
85297+enum {
85298+ GR_READY = 0x01,
85299+ GR_STATUS_INIT = 0x00 // disabled state
85300+};
85301+
85302+/* Begin ACL declarations */
85303+
85304+/* Role flags */
85305+
85306+enum {
85307+ GR_ROLE_USER = 0x0001,
85308+ GR_ROLE_GROUP = 0x0002,
85309+ GR_ROLE_DEFAULT = 0x0004,
85310+ GR_ROLE_SPECIAL = 0x0008,
85311+ GR_ROLE_AUTH = 0x0010,
85312+ GR_ROLE_NOPW = 0x0020,
85313+ GR_ROLE_GOD = 0x0040,
85314+ GR_ROLE_LEARN = 0x0080,
85315+ GR_ROLE_TPE = 0x0100,
85316+ GR_ROLE_DOMAIN = 0x0200,
85317+ GR_ROLE_PAM = 0x0400,
85318+ GR_ROLE_PERSIST = 0x800
85319+};
85320+
85321+/* ACL Subject and Object mode flags */
85322+enum {
85323+ GR_DELETED = 0x80000000
85324+};
85325+
85326+/* ACL Object-only mode flags */
85327+enum {
85328+ GR_READ = 0x00000001,
85329+ GR_APPEND = 0x00000002,
85330+ GR_WRITE = 0x00000004,
85331+ GR_EXEC = 0x00000008,
85332+ GR_FIND = 0x00000010,
85333+ GR_INHERIT = 0x00000020,
85334+ GR_SETID = 0x00000040,
85335+ GR_CREATE = 0x00000080,
85336+ GR_DELETE = 0x00000100,
85337+ GR_LINK = 0x00000200,
85338+ GR_AUDIT_READ = 0x00000400,
85339+ GR_AUDIT_APPEND = 0x00000800,
85340+ GR_AUDIT_WRITE = 0x00001000,
85341+ GR_AUDIT_EXEC = 0x00002000,
85342+ GR_AUDIT_FIND = 0x00004000,
85343+ GR_AUDIT_INHERIT= 0x00008000,
85344+ GR_AUDIT_SETID = 0x00010000,
85345+ GR_AUDIT_CREATE = 0x00020000,
85346+ GR_AUDIT_DELETE = 0x00040000,
85347+ GR_AUDIT_LINK = 0x00080000,
85348+ GR_PTRACERD = 0x00100000,
85349+ GR_NOPTRACE = 0x00200000,
85350+ GR_SUPPRESS = 0x00400000,
85351+ GR_NOLEARN = 0x00800000,
85352+ GR_INIT_TRANSFER= 0x01000000
85353+};
85354+
85355+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
85356+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
85357+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
85358+
85359+/* ACL subject-only mode flags */
85360+enum {
85361+ GR_KILL = 0x00000001,
85362+ GR_VIEW = 0x00000002,
85363+ GR_PROTECTED = 0x00000004,
85364+ GR_LEARN = 0x00000008,
85365+ GR_OVERRIDE = 0x00000010,
85366+ /* just a placeholder, this mode is only used in userspace */
85367+ GR_DUMMY = 0x00000020,
85368+ GR_PROTSHM = 0x00000040,
85369+ GR_KILLPROC = 0x00000080,
85370+ GR_KILLIPPROC = 0x00000100,
85371+ /* just a placeholder, this mode is only used in userspace */
85372+ GR_NOTROJAN = 0x00000200,
85373+ GR_PROTPROCFD = 0x00000400,
85374+ GR_PROCACCT = 0x00000800,
85375+ GR_RELAXPTRACE = 0x00001000,
85376+ GR_NESTED = 0x00002000,
85377+ GR_INHERITLEARN = 0x00004000,
85378+ GR_PROCFIND = 0x00008000,
85379+ GR_POVERRIDE = 0x00010000,
85380+ GR_KERNELAUTH = 0x00020000,
85381+ GR_ATSECURE = 0x00040000,
85382+ GR_SHMEXEC = 0x00080000
85383+};
85384+
85385+enum {
85386+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
85387+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
85388+ GR_PAX_ENABLE_MPROTECT = 0x0004,
85389+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
85390+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
85391+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
85392+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
85393+ GR_PAX_DISABLE_MPROTECT = 0x0400,
85394+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
85395+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
85396+};
85397+
85398+enum {
85399+ GR_ID_USER = 0x01,
85400+ GR_ID_GROUP = 0x02,
85401+};
85402+
85403+enum {
85404+ GR_ID_ALLOW = 0x01,
85405+ GR_ID_DENY = 0x02,
85406+};
85407+
85408+#define GR_CRASH_RES 31
85409+#define GR_UIDTABLE_MAX 500
85410+
85411+/* begin resource learning section */
85412+enum {
85413+ GR_RLIM_CPU_BUMP = 60,
85414+ GR_RLIM_FSIZE_BUMP = 50000,
85415+ GR_RLIM_DATA_BUMP = 10000,
85416+ GR_RLIM_STACK_BUMP = 1000,
85417+ GR_RLIM_CORE_BUMP = 10000,
85418+ GR_RLIM_RSS_BUMP = 500000,
85419+ GR_RLIM_NPROC_BUMP = 1,
85420+ GR_RLIM_NOFILE_BUMP = 5,
85421+ GR_RLIM_MEMLOCK_BUMP = 50000,
85422+ GR_RLIM_AS_BUMP = 500000,
85423+ GR_RLIM_LOCKS_BUMP = 2,
85424+ GR_RLIM_SIGPENDING_BUMP = 5,
85425+ GR_RLIM_MSGQUEUE_BUMP = 10000,
85426+ GR_RLIM_NICE_BUMP = 1,
85427+ GR_RLIM_RTPRIO_BUMP = 1,
85428+ GR_RLIM_RTTIME_BUMP = 1000000
85429+};
85430+
85431+#endif
85432diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
85433new file mode 100644
85434index 0000000..3826b91
85435--- /dev/null
85436+++ b/include/linux/grinternal.h
85437@@ -0,0 +1,219 @@
85438+#ifndef __GRINTERNAL_H
85439+#define __GRINTERNAL_H
85440+
85441+#ifdef CONFIG_GRKERNSEC
85442+
85443+#include <linux/fs.h>
85444+#include <linux/mnt_namespace.h>
85445+#include <linux/nsproxy.h>
85446+#include <linux/gracl.h>
85447+#include <linux/grdefs.h>
85448+#include <linux/grmsg.h>
85449+
85450+void gr_add_learn_entry(const char *fmt, ...)
85451+ __attribute__ ((format (printf, 1, 2)));
85452+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
85453+ const struct vfsmount *mnt);
85454+__u32 gr_check_create(const struct dentry *new_dentry,
85455+ const struct dentry *parent,
85456+ const struct vfsmount *mnt, const __u32 mode);
85457+int gr_check_protected_task(const struct task_struct *task);
85458+__u32 to_gr_audit(const __u32 reqmode);
85459+int gr_set_acls(const int type);
85460+int gr_apply_subject_to_task(struct task_struct *task);
85461+int gr_acl_is_enabled(void);
85462+char gr_roletype_to_char(void);
85463+
85464+void gr_handle_alertkill(struct task_struct *task);
85465+char *gr_to_filename(const struct dentry *dentry,
85466+ const struct vfsmount *mnt);
85467+char *gr_to_filename1(const struct dentry *dentry,
85468+ const struct vfsmount *mnt);
85469+char *gr_to_filename2(const struct dentry *dentry,
85470+ const struct vfsmount *mnt);
85471+char *gr_to_filename3(const struct dentry *dentry,
85472+ const struct vfsmount *mnt);
85473+
85474+extern int grsec_enable_ptrace_readexec;
85475+extern int grsec_enable_harden_ptrace;
85476+extern int grsec_enable_link;
85477+extern int grsec_enable_fifo;
85478+extern int grsec_enable_shm;
85479+extern int grsec_enable_execlog;
85480+extern int grsec_enable_signal;
85481+extern int grsec_enable_audit_ptrace;
85482+extern int grsec_enable_forkfail;
85483+extern int grsec_enable_time;
85484+extern int grsec_enable_rofs;
85485+extern int grsec_enable_chroot_shmat;
85486+extern int grsec_enable_chroot_mount;
85487+extern int grsec_enable_chroot_double;
85488+extern int grsec_enable_chroot_pivot;
85489+extern int grsec_enable_chroot_chdir;
85490+extern int grsec_enable_chroot_chmod;
85491+extern int grsec_enable_chroot_mknod;
85492+extern int grsec_enable_chroot_fchdir;
85493+extern int grsec_enable_chroot_nice;
85494+extern int grsec_enable_chroot_execlog;
85495+extern int grsec_enable_chroot_caps;
85496+extern int grsec_enable_chroot_sysctl;
85497+extern int grsec_enable_chroot_unix;
85498+extern int grsec_enable_tpe;
85499+extern int grsec_tpe_gid;
85500+extern int grsec_enable_tpe_all;
85501+extern int grsec_enable_tpe_invert;
85502+extern int grsec_enable_socket_all;
85503+extern int grsec_socket_all_gid;
85504+extern int grsec_enable_socket_client;
85505+extern int grsec_socket_client_gid;
85506+extern int grsec_enable_socket_server;
85507+extern int grsec_socket_server_gid;
85508+extern int grsec_audit_gid;
85509+extern int grsec_enable_group;
85510+extern int grsec_enable_audit_textrel;
85511+extern int grsec_enable_log_rwxmaps;
85512+extern int grsec_enable_mount;
85513+extern int grsec_enable_chdir;
85514+extern int grsec_resource_logging;
85515+extern int grsec_enable_blackhole;
85516+extern int grsec_lastack_retries;
85517+extern int grsec_enable_brute;
85518+extern int grsec_lock;
85519+
85520+extern spinlock_t grsec_alert_lock;
85521+extern unsigned long grsec_alert_wtime;
85522+extern unsigned long grsec_alert_fyet;
85523+
85524+extern spinlock_t grsec_audit_lock;
85525+
85526+extern rwlock_t grsec_exec_file_lock;
85527+
85528+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
85529+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
85530+ (tsk)->exec_file->f_vfsmnt) : "/")
85531+
85532+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
85533+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
85534+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
85535+
85536+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
85537+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
85538+ (tsk)->exec_file->f_vfsmnt) : "/")
85539+
85540+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
85541+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
85542+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
85543+
85544+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
85545+
85546+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
85547+
85548+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
85549+ (task)->pid, (cred)->uid, \
85550+ (cred)->euid, (cred)->gid, (cred)->egid, \
85551+ gr_parent_task_fullpath(task), \
85552+ (task)->real_parent->comm, (task)->real_parent->pid, \
85553+ (pcred)->uid, (pcred)->euid, \
85554+ (pcred)->gid, (pcred)->egid
85555+
85556+#define GR_CHROOT_CAPS {{ \
85557+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
85558+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
85559+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
85560+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
85561+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
85562+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
85563+ CAP_TO_MASK(CAP_MAC_ADMIN) }}
85564+
85565+#define security_learn(normal_msg,args...) \
85566+({ \
85567+ read_lock(&grsec_exec_file_lock); \
85568+ gr_add_learn_entry(normal_msg "\n", ## args); \
85569+ read_unlock(&grsec_exec_file_lock); \
85570+})
85571+
85572+enum {
85573+ GR_DO_AUDIT,
85574+ GR_DONT_AUDIT,
85575+ GR_DONT_AUDIT_GOOD
85576+};
85577+
85578+enum {
85579+ GR_TTYSNIFF,
85580+ GR_RBAC,
85581+ GR_RBAC_STR,
85582+ GR_STR_RBAC,
85583+ GR_RBAC_MODE2,
85584+ GR_RBAC_MODE3,
85585+ GR_FILENAME,
85586+ GR_SYSCTL_HIDDEN,
85587+ GR_NOARGS,
85588+ GR_ONE_INT,
85589+ GR_ONE_INT_TWO_STR,
85590+ GR_ONE_STR,
85591+ GR_STR_INT,
85592+ GR_TWO_STR_INT,
85593+ GR_TWO_INT,
85594+ GR_TWO_U64,
85595+ GR_THREE_INT,
85596+ GR_FIVE_INT_TWO_STR,
85597+ GR_TWO_STR,
85598+ GR_THREE_STR,
85599+ GR_FOUR_STR,
85600+ GR_STR_FILENAME,
85601+ GR_FILENAME_STR,
85602+ GR_FILENAME_TWO_INT,
85603+ GR_FILENAME_TWO_INT_STR,
85604+ GR_TEXTREL,
85605+ GR_PTRACE,
85606+ GR_RESOURCE,
85607+ GR_CAP,
85608+ GR_SIG,
85609+ GR_SIG2,
85610+ GR_CRASH1,
85611+ GR_CRASH2,
85612+ GR_PSACCT,
85613+ GR_RWXMAP
85614+};
85615+
85616+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
85617+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
85618+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
85619+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
85620+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
85621+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
85622+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
85623+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
85624+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
85625+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
85626+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
85627+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
85628+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
85629+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
85630+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
85631+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
85632+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
85633+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
85634+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
85635+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
85636+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
85637+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
85638+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
85639+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
85640+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
85641+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
85642+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
85643+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
85644+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
85645+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
85646+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
85647+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
85648+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
85649+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
85650+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
85651+
85652+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
85653+
85654+#endif
85655+
85656+#endif
85657diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
85658new file mode 100644
85659index 0000000..f885406
85660--- /dev/null
85661+++ b/include/linux/grmsg.h
85662@@ -0,0 +1,109 @@
85663+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
85664+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
85665+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
85666+#define GR_STOPMOD_MSG "denied modification of module state by "
85667+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
85668+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
85669+#define GR_IOPERM_MSG "denied use of ioperm() by "
85670+#define GR_IOPL_MSG "denied use of iopl() by "
85671+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
85672+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
85673+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
85674+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
85675+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
85676+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
85677+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
85678+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
85679+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
85680+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
85681+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
85682+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
85683+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
85684+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
85685+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
85686+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
85687+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
85688+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
85689+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
85690+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
85691+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
85692+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
85693+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
85694+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
85695+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
85696+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
85697+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
85698+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
85699+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
85700+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
85701+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
85702+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
85703+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
85704+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
85705+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
85706+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
85707+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
85708+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
85709+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
85710+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
85711+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
85712+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
85713+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
85714+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
85715+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
85716+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
85717+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
85718+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
85719+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
85720+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
85721+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
85722+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
85723+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
85724+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
85725+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
85726+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
85727+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
85728+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
85729+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
85730+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
85731+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
85732+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
85733+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
85734+#define GR_FAILFORK_MSG "failed fork with errno %s by "
85735+#define GR_NICE_CHROOT_MSG "denied priority change by "
85736+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
85737+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
85738+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
85739+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
85740+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
85741+#define GR_TIME_MSG "time set by "
85742+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
85743+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
85744+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
85745+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
85746+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
85747+#define GR_BIND_MSG "denied bind() by "
85748+#define GR_CONNECT_MSG "denied connect() by "
85749+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
85750+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
85751+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
85752+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
85753+#define GR_CAP_ACL_MSG "use of %s denied for "
85754+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
85755+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
85756+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
85757+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
85758+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
85759+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
85760+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
85761+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
85762+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
85763+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
85764+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
85765+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
85766+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
85767+#define GR_VM86_MSG "denied use of vm86 by "
85768+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
85769+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
85770+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
85771+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
85772diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
85773new file mode 100644
85774index 0000000..c1793ae
85775--- /dev/null
85776+++ b/include/linux/grsecurity.h
85777@@ -0,0 +1,219 @@
85778+#ifndef GR_SECURITY_H
85779+#define GR_SECURITY_H
85780+#include <linux/fs.h>
85781+#include <linux/fs_struct.h>
85782+#include <linux/binfmts.h>
85783+#include <linux/gracl.h>
85784+#include <linux/compat.h>
85785+
85786+/* notify of brain-dead configs */
85787+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
85788+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
85789+#endif
85790+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
85791+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
85792+#endif
85793+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
85794+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
85795+#endif
85796+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
85797+#error "CONFIG_PAX enabled, but no PaX options are enabled."
85798+#endif
85799+
85800+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
85801+void gr_handle_brute_check(void);
85802+void gr_handle_kernel_exploit(void);
85803+int gr_process_user_ban(void);
85804+
85805+char gr_roletype_to_char(void);
85806+
85807+int gr_acl_enable_at_secure(void);
85808+
85809+int gr_check_user_change(int real, int effective, int fs);
85810+int gr_check_group_change(int real, int effective, int fs);
85811+
85812+void gr_del_task_from_ip_table(struct task_struct *p);
85813+
85814+int gr_pid_is_chrooted(struct task_struct *p);
85815+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
85816+int gr_handle_chroot_nice(void);
85817+int gr_handle_chroot_sysctl(const int op);
85818+int gr_handle_chroot_setpriority(struct task_struct *p,
85819+ const int niceval);
85820+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
85821+int gr_handle_chroot_chroot(const struct dentry *dentry,
85822+ const struct vfsmount *mnt);
85823+void gr_handle_chroot_chdir(struct path *path);
85824+int gr_handle_chroot_chmod(const struct dentry *dentry,
85825+ const struct vfsmount *mnt, const int mode);
85826+int gr_handle_chroot_mknod(const struct dentry *dentry,
85827+ const struct vfsmount *mnt, const int mode);
85828+int gr_handle_chroot_mount(const struct dentry *dentry,
85829+ const struct vfsmount *mnt,
85830+ const char *dev_name);
85831+int gr_handle_chroot_pivot(void);
85832+int gr_handle_chroot_unix(const pid_t pid);
85833+
85834+int gr_handle_rawio(const struct inode *inode);
85835+
85836+void gr_handle_ioperm(void);
85837+void gr_handle_iopl(void);
85838+
85839+umode_t gr_acl_umask(void);
85840+
85841+int gr_tpe_allow(const struct file *file);
85842+
85843+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
85844+void gr_clear_chroot_entries(struct task_struct *task);
85845+
85846+void gr_log_forkfail(const int retval);
85847+void gr_log_timechange(void);
85848+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
85849+void gr_log_chdir(const struct dentry *dentry,
85850+ const struct vfsmount *mnt);
85851+void gr_log_chroot_exec(const struct dentry *dentry,
85852+ const struct vfsmount *mnt);
85853+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
85854+#ifdef CONFIG_COMPAT
85855+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
85856+#endif
85857+void gr_log_remount(const char *devname, const int retval);
85858+void gr_log_unmount(const char *devname, const int retval);
85859+void gr_log_mount(const char *from, const char *to, const int retval);
85860+void gr_log_textrel(struct vm_area_struct *vma);
85861+void gr_log_rwxmmap(struct file *file);
85862+void gr_log_rwxmprotect(struct file *file);
85863+
85864+int gr_handle_follow_link(const struct inode *parent,
85865+ const struct inode *inode,
85866+ const struct dentry *dentry,
85867+ const struct vfsmount *mnt);
85868+int gr_handle_fifo(const struct dentry *dentry,
85869+ const struct vfsmount *mnt,
85870+ const struct dentry *dir, const int flag,
85871+ const int acc_mode);
85872+int gr_handle_hardlink(const struct dentry *dentry,
85873+ const struct vfsmount *mnt,
85874+ struct inode *inode,
85875+ const int mode, const char *to);
85876+
85877+int gr_is_capable(const int cap);
85878+int gr_is_capable_nolog(const int cap);
85879+void gr_learn_resource(const struct task_struct *task, const int limit,
85880+ const unsigned long wanted, const int gt);
85881+void gr_copy_label(struct task_struct *tsk);
85882+void gr_handle_crash(struct task_struct *task, const int sig);
85883+int gr_handle_signal(const struct task_struct *p, const int sig);
85884+int gr_check_crash_uid(const uid_t uid);
85885+int gr_check_protected_task(const struct task_struct *task);
85886+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
85887+int gr_acl_handle_mmap(const struct file *file,
85888+ const unsigned long prot);
85889+int gr_acl_handle_mprotect(const struct file *file,
85890+ const unsigned long prot);
85891+int gr_check_hidden_task(const struct task_struct *tsk);
85892+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
85893+ const struct vfsmount *mnt);
85894+__u32 gr_acl_handle_utime(const struct dentry *dentry,
85895+ const struct vfsmount *mnt);
85896+__u32 gr_acl_handle_access(const struct dentry *dentry,
85897+ const struct vfsmount *mnt, const int fmode);
85898+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
85899+ const struct vfsmount *mnt, umode_t *mode);
85900+__u32 gr_acl_handle_chown(const struct dentry *dentry,
85901+ const struct vfsmount *mnt);
85902+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
85903+ const struct vfsmount *mnt);
85904+int gr_handle_ptrace(struct task_struct *task, const long request);
85905+int gr_handle_proc_ptrace(struct task_struct *task);
85906+__u32 gr_acl_handle_execve(const struct dentry *dentry,
85907+ const struct vfsmount *mnt);
85908+int gr_check_crash_exec(const struct file *filp);
85909+int gr_acl_is_enabled(void);
85910+void gr_set_kernel_label(struct task_struct *task);
85911+void gr_set_role_label(struct task_struct *task, const uid_t uid,
85912+ const gid_t gid);
85913+int gr_set_proc_label(const struct dentry *dentry,
85914+ const struct vfsmount *mnt,
85915+ const int unsafe_flags);
85916+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
85917+ const struct vfsmount *mnt);
85918+__u32 gr_acl_handle_open(const struct dentry *dentry,
85919+ const struct vfsmount *mnt, int acc_mode);
85920+__u32 gr_acl_handle_creat(const struct dentry *dentry,
85921+ const struct dentry *p_dentry,
85922+ const struct vfsmount *p_mnt,
85923+ int open_flags, int acc_mode, const int imode);
85924+void gr_handle_create(const struct dentry *dentry,
85925+ const struct vfsmount *mnt);
85926+void gr_handle_proc_create(const struct dentry *dentry,
85927+ const struct inode *inode);
85928+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
85929+ const struct dentry *parent_dentry,
85930+ const struct vfsmount *parent_mnt,
85931+ const int mode);
85932+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
85933+ const struct dentry *parent_dentry,
85934+ const struct vfsmount *parent_mnt);
85935+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
85936+ const struct vfsmount *mnt);
85937+void gr_handle_delete(const ino_t ino, const dev_t dev);
85938+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
85939+ const struct vfsmount *mnt);
85940+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
85941+ const struct dentry *parent_dentry,
85942+ const struct vfsmount *parent_mnt,
85943+ const char *from);
85944+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
85945+ const struct dentry *parent_dentry,
85946+ const struct vfsmount *parent_mnt,
85947+ const struct dentry *old_dentry,
85948+ const struct vfsmount *old_mnt, const char *to);
85949+int gr_acl_handle_rename(struct dentry *new_dentry,
85950+ struct dentry *parent_dentry,
85951+ const struct vfsmount *parent_mnt,
85952+ struct dentry *old_dentry,
85953+ struct inode *old_parent_inode,
85954+ struct vfsmount *old_mnt, const char *newname);
85955+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
85956+ struct dentry *old_dentry,
85957+ struct dentry *new_dentry,
85958+ struct vfsmount *mnt, const __u8 replace);
85959+__u32 gr_check_link(const struct dentry *new_dentry,
85960+ const struct dentry *parent_dentry,
85961+ const struct vfsmount *parent_mnt,
85962+ const struct dentry *old_dentry,
85963+ const struct vfsmount *old_mnt);
85964+int gr_acl_handle_filldir(const struct file *file, const char *name,
85965+ const unsigned int namelen, const ino_t ino);
85966+
85967+__u32 gr_acl_handle_unix(const struct dentry *dentry,
85968+ const struct vfsmount *mnt);
85969+void gr_acl_handle_exit(void);
85970+void gr_acl_handle_psacct(struct task_struct *task, const long code);
85971+int gr_acl_handle_procpidmem(const struct task_struct *task);
85972+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
85973+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
85974+void gr_audit_ptrace(struct task_struct *task);
85975+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
85976+
85977+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
85978+
85979+#ifdef CONFIG_GRKERNSEC
85980+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
85981+void gr_handle_vm86(void);
85982+void gr_handle_mem_readwrite(u64 from, u64 to);
85983+
85984+void gr_log_badprocpid(const char *entry);
85985+
85986+extern int grsec_enable_dmesg;
85987+extern int grsec_disable_privio;
85988+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
85989+extern int grsec_enable_chroot_findtask;
85990+#endif
85991+#ifdef CONFIG_GRKERNSEC_SETXID
85992+extern int grsec_enable_setxid;
85993+#endif
85994+#endif
85995+
85996+#endif
85997diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
85998index 6a87154..a3ce57b 100644
85999--- a/include/linux/hdpu_features.h
86000+++ b/include/linux/hdpu_features.h
86001@@ -3,7 +3,7 @@
86002 struct cpustate_t {
86003 spinlock_t lock;
86004 int excl;
86005- int open_count;
86006+ atomic_t open_count;
86007 unsigned char cached_val;
86008 int inited;
86009 unsigned long *set_addr;
86010diff --git a/include/linux/highmem.h b/include/linux/highmem.h
86011index 211ff44..00ab6d7 100644
86012--- a/include/linux/highmem.h
86013+++ b/include/linux/highmem.h
86014@@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
86015 kunmap_atomic(kaddr, KM_USER0);
86016 }
86017
86018+static inline void sanitize_highpage(struct page *page)
86019+{
86020+ void *kaddr;
86021+ unsigned long flags;
86022+
86023+ local_irq_save(flags);
86024+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
86025+ clear_page(kaddr);
86026+ kunmap_atomic(kaddr, KM_CLEARPAGE);
86027+ local_irq_restore(flags);
86028+}
86029+
86030 static inline void zero_user_segments(struct page *page,
86031 unsigned start1, unsigned end1,
86032 unsigned start2, unsigned end2)
86033diff --git a/include/linux/i2c.h b/include/linux/i2c.h
86034index 7b40cda..24eb44e 100644
86035--- a/include/linux/i2c.h
86036+++ b/include/linux/i2c.h
86037@@ -325,6 +325,7 @@ struct i2c_algorithm {
86038 /* To determine what the adapter supports */
86039 u32 (*functionality) (struct i2c_adapter *);
86040 };
86041+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
86042
86043 /*
86044 * i2c_adapter is the structure used to identify a physical i2c bus along
86045diff --git a/include/linux/i2o.h b/include/linux/i2o.h
86046index 4c4e57d..f3c5303 100644
86047--- a/include/linux/i2o.h
86048+++ b/include/linux/i2o.h
86049@@ -564,7 +564,7 @@ struct i2o_controller {
86050 struct i2o_device *exec; /* Executive */
86051 #if BITS_PER_LONG == 64
86052 spinlock_t context_list_lock; /* lock for context_list */
86053- atomic_t context_list_counter; /* needed for unique contexts */
86054+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
86055 struct list_head context_list; /* list of context id's
86056 and pointers */
86057 #endif
86058diff --git a/include/linux/init_task.h b/include/linux/init_task.h
86059index 21a6f5d..dc42eab 100644
86060--- a/include/linux/init_task.h
86061+++ b/include/linux/init_task.h
86062@@ -83,6 +83,12 @@ extern struct group_info init_groups;
86063 #define INIT_IDS
86064 #endif
86065
86066+#ifdef CONFIG_X86
86067+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
86068+#else
86069+#define INIT_TASK_THREAD_INFO
86070+#endif
86071+
86072 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
86073 /*
86074 * Because of the reduced scope of CAP_SETPCAP when filesystem
86075@@ -156,6 +162,7 @@ extern struct cred init_cred;
86076 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
86077 .comm = "swapper", \
86078 .thread = INIT_THREAD, \
86079+ INIT_TASK_THREAD_INFO \
86080 .fs = &init_fs, \
86081 .files = &init_files, \
86082 .signal = &init_signals, \
86083diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
86084index 4f0a72a..a849599 100644
86085--- a/include/linux/intel-iommu.h
86086+++ b/include/linux/intel-iommu.h
86087@@ -296,7 +296,7 @@ struct iommu_flush {
86088 u8 fm, u64 type);
86089 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
86090 unsigned int size_order, u64 type);
86091-};
86092+} __no_const;
86093
86094 enum {
86095 SR_DMAR_FECTL_REG,
86096diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
86097index c739150..be577b5 100644
86098--- a/include/linux/interrupt.h
86099+++ b/include/linux/interrupt.h
86100@@ -369,7 +369,7 @@ enum
86101 /* map softirq index to softirq name. update 'softirq_to_name' in
86102 * kernel/softirq.c when adding a new softirq.
86103 */
86104-extern char *softirq_to_name[NR_SOFTIRQS];
86105+extern const char * const softirq_to_name[NR_SOFTIRQS];
86106
86107 /* softirq mask and active fields moved to irq_cpustat_t in
86108 * asm/hardirq.h to get better cache usage. KAO
86109@@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
86110
86111 struct softirq_action
86112 {
86113- void (*action)(struct softirq_action *);
86114+ void (*action)(void);
86115 };
86116
86117 asmlinkage void do_softirq(void);
86118 asmlinkage void __do_softirq(void);
86119-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
86120+extern void open_softirq(int nr, void (*action)(void));
86121 extern void softirq_init(void);
86122 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
86123 extern void raise_softirq_irqoff(unsigned int nr);
86124diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
86125index eb73632..19abfc1 100644
86126--- a/include/linux/iocontext.h
86127+++ b/include/linux/iocontext.h
86128@@ -94,14 +94,15 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
86129 return NULL;
86130 }
86131
86132+struct task_struct;
86133 #ifdef CONFIG_BLOCK
86134 int put_io_context(struct io_context *ioc);
86135-void exit_io_context(void);
86136+void exit_io_context(struct task_struct *task);
86137 struct io_context *get_io_context(gfp_t gfp_flags, int node);
86138 struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
86139 void copy_io_context(struct io_context **pdst, struct io_context **psrc);
86140 #else
86141-static inline void exit_io_context(void)
86142+static inline void exit_io_context(struct task_struct *task)
86143 {
86144 }
86145
86146diff --git a/include/linux/irq.h b/include/linux/irq.h
86147index 9e5f45a..025865b 100644
86148--- a/include/linux/irq.h
86149+++ b/include/linux/irq.h
86150@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
86151 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
86152 bool boot)
86153 {
86154+#ifdef CONFIG_CPUMASK_OFFSTACK
86155 gfp_t gfp = GFP_ATOMIC;
86156
86157 if (boot)
86158 gfp = GFP_NOWAIT;
86159
86160-#ifdef CONFIG_CPUMASK_OFFSTACK
86161 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
86162 return false;
86163
86164diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
86165index 7922742..27306a2 100644
86166--- a/include/linux/kallsyms.h
86167+++ b/include/linux/kallsyms.h
86168@@ -15,7 +15,8 @@
86169
86170 struct module;
86171
86172-#ifdef CONFIG_KALLSYMS
86173+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
86174+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
86175 /* Lookup the address for a symbol. Returns 0 if not found. */
86176 unsigned long kallsyms_lookup_name(const char *name);
86177
86178@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
86179 /* Stupid that this does nothing, but I didn't create this mess. */
86180 #define __print_symbol(fmt, addr)
86181 #endif /*CONFIG_KALLSYMS*/
86182+#else /* when included by kallsyms.c, vsnprintf.c, or
86183+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
86184+extern void __print_symbol(const char *fmt, unsigned long address);
86185+extern int sprint_symbol(char *buffer, unsigned long address);
86186+const char *kallsyms_lookup(unsigned long addr,
86187+ unsigned long *symbolsize,
86188+ unsigned long *offset,
86189+ char **modname, char *namebuf);
86190+#endif
86191
86192 /* This macro allows us to keep printk typechecking */
86193 static void __check_printsym_format(const char *fmt, ...)
86194diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
86195index 6adcc29..13369e8 100644
86196--- a/include/linux/kgdb.h
86197+++ b/include/linux/kgdb.h
86198@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
86199
86200 extern int kgdb_connected;
86201
86202-extern atomic_t kgdb_setting_breakpoint;
86203-extern atomic_t kgdb_cpu_doing_single_step;
86204+extern atomic_unchecked_t kgdb_setting_breakpoint;
86205+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
86206
86207 extern struct task_struct *kgdb_usethread;
86208 extern struct task_struct *kgdb_contthread;
86209@@ -235,7 +235,7 @@ struct kgdb_arch {
86210 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
86211 void (*remove_all_hw_break)(void);
86212 void (*correct_hw_break)(void);
86213-};
86214+} __do_const;
86215
86216 /**
86217 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
86218@@ -257,14 +257,14 @@ struct kgdb_io {
86219 int (*init) (void);
86220 void (*pre_exception) (void);
86221 void (*post_exception) (void);
86222-};
86223+} __do_const;
86224
86225-extern struct kgdb_arch arch_kgdb_ops;
86226+extern const struct kgdb_arch arch_kgdb_ops;
86227
86228 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
86229
86230-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
86231-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
86232+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
86233+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
86234
86235 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
86236 extern int kgdb_mem2hex(char *mem, char *buf, int count);
86237diff --git a/include/linux/kmod.h b/include/linux/kmod.h
86238index 0546fe7..2a22bc1 100644
86239--- a/include/linux/kmod.h
86240+++ b/include/linux/kmod.h
86241@@ -31,6 +31,8 @@
86242 * usually useless though. */
86243 extern int __request_module(bool wait, const char *name, ...) \
86244 __attribute__((format(printf, 2, 3)));
86245+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
86246+ __attribute__((format(printf, 3, 4)));
86247 #define request_module(mod...) __request_module(true, mod)
86248 #define request_module_nowait(mod...) __request_module(false, mod)
86249 #define try_then_request_module(x, mod...) \
86250diff --git a/include/linux/kobject.h b/include/linux/kobject.h
86251index 58ae8e0..3950d3c 100644
86252--- a/include/linux/kobject.h
86253+++ b/include/linux/kobject.h
86254@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
86255
86256 struct kobj_type {
86257 void (*release)(struct kobject *kobj);
86258- struct sysfs_ops *sysfs_ops;
86259+ const struct sysfs_ops *sysfs_ops;
86260 struct attribute **default_attrs;
86261 };
86262
86263@@ -118,9 +118,9 @@ struct kobj_uevent_env {
86264 };
86265
86266 struct kset_uevent_ops {
86267- int (*filter)(struct kset *kset, struct kobject *kobj);
86268- const char *(*name)(struct kset *kset, struct kobject *kobj);
86269- int (*uevent)(struct kset *kset, struct kobject *kobj,
86270+ int (* const filter)(struct kset *kset, struct kobject *kobj);
86271+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
86272+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
86273 struct kobj_uevent_env *env);
86274 };
86275
86276@@ -132,7 +132,7 @@ struct kobj_attribute {
86277 const char *buf, size_t count);
86278 };
86279
86280-extern struct sysfs_ops kobj_sysfs_ops;
86281+extern const struct sysfs_ops kobj_sysfs_ops;
86282
86283 /**
86284 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
86285@@ -155,14 +155,14 @@ struct kset {
86286 struct list_head list;
86287 spinlock_t list_lock;
86288 struct kobject kobj;
86289- struct kset_uevent_ops *uevent_ops;
86290+ const struct kset_uevent_ops *uevent_ops;
86291 };
86292
86293 extern void kset_init(struct kset *kset);
86294 extern int __must_check kset_register(struct kset *kset);
86295 extern void kset_unregister(struct kset *kset);
86296 extern struct kset * __must_check kset_create_and_add(const char *name,
86297- struct kset_uevent_ops *u,
86298+ const struct kset_uevent_ops *u,
86299 struct kobject *parent_kobj);
86300
86301 static inline struct kset *to_kset(struct kobject *kobj)
86302diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
86303index c728a50..752d821 100644
86304--- a/include/linux/kvm_host.h
86305+++ b/include/linux/kvm_host.h
86306@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
86307 void vcpu_load(struct kvm_vcpu *vcpu);
86308 void vcpu_put(struct kvm_vcpu *vcpu);
86309
86310-int kvm_init(void *opaque, unsigned int vcpu_size,
86311+int kvm_init(const void *opaque, unsigned int vcpu_size,
86312 struct module *module);
86313 void kvm_exit(void);
86314
86315@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
86316 struct kvm_guest_debug *dbg);
86317 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
86318
86319-int kvm_arch_init(void *opaque);
86320+int kvm_arch_init(const void *opaque);
86321 void kvm_arch_exit(void);
86322
86323 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
86324diff --git a/include/linux/libata.h b/include/linux/libata.h
86325index a069916..223edde 100644
86326--- a/include/linux/libata.h
86327+++ b/include/linux/libata.h
86328@@ -525,11 +525,11 @@ struct ata_ioports {
86329
86330 struct ata_host {
86331 spinlock_t lock;
86332- struct device *dev;
86333+ struct device *dev;
86334 void __iomem * const *iomap;
86335 unsigned int n_ports;
86336 void *private_data;
86337- struct ata_port_operations *ops;
86338+ const struct ata_port_operations *ops;
86339 unsigned long flags;
86340 #ifdef CONFIG_ATA_ACPI
86341 acpi_handle acpi_handle;
86342@@ -710,7 +710,7 @@ struct ata_link {
86343
86344 struct ata_port {
86345 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
86346- struct ata_port_operations *ops;
86347+ const struct ata_port_operations *ops;
86348 spinlock_t *lock;
86349 /* Flags owned by the EH context. Only EH should touch these once the
86350 port is active */
86351@@ -884,7 +884,7 @@ struct ata_port_operations {
86352 * fields must be pointers.
86353 */
86354 const struct ata_port_operations *inherits;
86355-};
86356+} __do_const;
86357
86358 struct ata_port_info {
86359 unsigned long flags;
86360@@ -892,7 +892,7 @@ struct ata_port_info {
86361 unsigned long pio_mask;
86362 unsigned long mwdma_mask;
86363 unsigned long udma_mask;
86364- struct ata_port_operations *port_ops;
86365+ const struct ata_port_operations *port_ops;
86366 void *private_data;
86367 };
86368
86369@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
86370 extern const unsigned long sata_deb_timing_hotplug[];
86371 extern const unsigned long sata_deb_timing_long[];
86372
86373-extern struct ata_port_operations ata_dummy_port_ops;
86374+extern const struct ata_port_operations ata_dummy_port_ops;
86375 extern const struct ata_port_info ata_dummy_port_info;
86376
86377 static inline const unsigned long *
86378@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
86379 struct scsi_host_template *sht);
86380 extern void ata_host_detach(struct ata_host *host);
86381 extern void ata_host_init(struct ata_host *, struct device *,
86382- unsigned long, struct ata_port_operations *);
86383+ unsigned long, const struct ata_port_operations *);
86384 extern int ata_scsi_detect(struct scsi_host_template *sht);
86385 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
86386 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
86387diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
86388index fbc48f8..0886e57 100644
86389--- a/include/linux/lockd/bind.h
86390+++ b/include/linux/lockd/bind.h
86391@@ -23,13 +23,13 @@ struct svc_rqst;
86392 * This is the set of functions for lockd->nfsd communication
86393 */
86394 struct nlmsvc_binding {
86395- __be32 (*fopen)(struct svc_rqst *,
86396+ __be32 (* const fopen)(struct svc_rqst *,
86397 struct nfs_fh *,
86398 struct file **);
86399- void (*fclose)(struct file *);
86400+ void (* const fclose)(struct file *);
86401 };
86402
86403-extern struct nlmsvc_binding * nlmsvc_ops;
86404+extern const struct nlmsvc_binding * nlmsvc_ops;
86405
86406 /*
86407 * Similar to nfs_client_initdata, but without the NFS-specific
86408diff --git a/include/linux/mca.h b/include/linux/mca.h
86409index 3797270..7765ede 100644
86410--- a/include/linux/mca.h
86411+++ b/include/linux/mca.h
86412@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
86413 int region);
86414 void * (*mca_transform_memory)(struct mca_device *,
86415 void *memory);
86416-};
86417+} __no_const;
86418
86419 struct mca_bus {
86420 u64 default_dma_mask;
86421diff --git a/include/linux/memory.h b/include/linux/memory.h
86422index 37fa19b..b597c85 100644
86423--- a/include/linux/memory.h
86424+++ b/include/linux/memory.h
86425@@ -108,7 +108,7 @@ struct memory_accessor {
86426 size_t count);
86427 ssize_t (*write)(struct memory_accessor *, const char *buf,
86428 off_t offset, size_t count);
86429-};
86430+} __no_const;
86431
86432 /*
86433 * Kernel text modification mutex, used for code patching. Users of this lock
86434diff --git a/include/linux/mm.h b/include/linux/mm.h
86435index 11e5be6..1ff2423 100644
86436--- a/include/linux/mm.h
86437+++ b/include/linux/mm.h
86438@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
86439
86440 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
86441 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
86442+
86443+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
86444+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
86445+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
86446+#else
86447 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
86448+#endif
86449+
86450 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
86451 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
86452
86453@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
86454 int set_page_dirty_lock(struct page *page);
86455 int clear_page_dirty_for_io(struct page *page);
86456
86457-/* Is the vma a continuation of the stack vma above it? */
86458-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
86459-{
86460- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
86461-}
86462-
86463 extern unsigned long move_page_tables(struct vm_area_struct *vma,
86464 unsigned long old_addr, struct vm_area_struct *new_vma,
86465 unsigned long new_addr, unsigned long len);
86466@@ -890,6 +891,8 @@ struct shrinker {
86467 extern void register_shrinker(struct shrinker *);
86468 extern void unregister_shrinker(struct shrinker *);
86469
86470+pgprot_t vm_get_page_prot(unsigned long vm_flags);
86471+
86472 int vma_wants_writenotify(struct vm_area_struct *vma);
86473
86474 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
86475@@ -1162,6 +1165,7 @@ out:
86476 }
86477
86478 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
86479+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
86480
86481 extern unsigned long do_brk(unsigned long, unsigned long);
86482
86483@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
86484 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
86485 struct vm_area_struct **pprev);
86486
86487+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
86488+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
86489+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
86490+
86491 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
86492 NULL if none. Assume start_addr < end_addr. */
86493 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
86494@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
86495 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
86496 }
86497
86498-pgprot_t vm_get_page_prot(unsigned long vm_flags);
86499 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
86500 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
86501 unsigned long pfn, unsigned long size, pgprot_t);
86502@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
86503 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
86504 extern int sysctl_memory_failure_early_kill;
86505 extern int sysctl_memory_failure_recovery;
86506-extern atomic_long_t mce_bad_pages;
86507+extern atomic_long_unchecked_t mce_bad_pages;
86508+
86509+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
86510+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
86511+#else
86512+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
86513+#endif
86514
86515 #endif /* __KERNEL__ */
86516 #endif /* _LINUX_MM_H */
86517diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
86518index 9d12ed5..6d9707a 100644
86519--- a/include/linux/mm_types.h
86520+++ b/include/linux/mm_types.h
86521@@ -186,6 +186,8 @@ struct vm_area_struct {
86522 #ifdef CONFIG_NUMA
86523 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
86524 #endif
86525+
86526+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
86527 };
86528
86529 struct core_thread {
86530@@ -287,6 +289,24 @@ struct mm_struct {
86531 #ifdef CONFIG_MMU_NOTIFIER
86532 struct mmu_notifier_mm *mmu_notifier_mm;
86533 #endif
86534+
86535+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
86536+ unsigned long pax_flags;
86537+#endif
86538+
86539+#ifdef CONFIG_PAX_DLRESOLVE
86540+ unsigned long call_dl_resolve;
86541+#endif
86542+
86543+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
86544+ unsigned long call_syscall;
86545+#endif
86546+
86547+#ifdef CONFIG_PAX_ASLR
86548+ unsigned long delta_mmap; /* randomized offset */
86549+ unsigned long delta_stack; /* randomized offset */
86550+#endif
86551+
86552 };
86553
86554 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
86555diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
86556index 4e02ee2..afb159e 100644
86557--- a/include/linux/mmu_notifier.h
86558+++ b/include/linux/mmu_notifier.h
86559@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
86560 */
86561 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
86562 ({ \
86563- pte_t __pte; \
86564+ pte_t ___pte; \
86565 struct vm_area_struct *___vma = __vma; \
86566 unsigned long ___address = __address; \
86567- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
86568+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
86569 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
86570- __pte; \
86571+ ___pte; \
86572 })
86573
86574 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
86575diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
86576index 6c31a2a..4b0e930 100644
86577--- a/include/linux/mmzone.h
86578+++ b/include/linux/mmzone.h
86579@@ -350,7 +350,7 @@ struct zone {
86580 unsigned long flags; /* zone flags, see below */
86581
86582 /* Zone statistics */
86583- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86584+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
86585
86586 /*
86587 * prev_priority holds the scanning priority for this zone. It is
86588diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
86589index f58e9d8..3503935 100644
86590--- a/include/linux/mod_devicetable.h
86591+++ b/include/linux/mod_devicetable.h
86592@@ -12,7 +12,7 @@
86593 typedef unsigned long kernel_ulong_t;
86594 #endif
86595
86596-#define PCI_ANY_ID (~0)
86597+#define PCI_ANY_ID ((__u16)~0)
86598
86599 struct pci_device_id {
86600 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
86601@@ -131,7 +131,7 @@ struct usb_device_id {
86602 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
86603 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
86604
86605-#define HID_ANY_ID (~0)
86606+#define HID_ANY_ID (~0U)
86607
86608 struct hid_device_id {
86609 __u16 bus;
86610diff --git a/include/linux/module.h b/include/linux/module.h
86611index 482efc8..642032b 100644
86612--- a/include/linux/module.h
86613+++ b/include/linux/module.h
86614@@ -16,6 +16,7 @@
86615 #include <linux/kobject.h>
86616 #include <linux/moduleparam.h>
86617 #include <linux/tracepoint.h>
86618+#include <linux/fs.h>
86619
86620 #include <asm/local.h>
86621 #include <asm/module.h>
86622@@ -287,16 +288,16 @@ struct module
86623 int (*init)(void);
86624
86625 /* If this is non-NULL, vfree after init() returns */
86626- void *module_init;
86627+ void *module_init_rx, *module_init_rw;
86628
86629 /* Here is the actual code + data, vfree'd on unload. */
86630- void *module_core;
86631+ void *module_core_rx, *module_core_rw;
86632
86633 /* Here are the sizes of the init and core sections */
86634- unsigned int init_size, core_size;
86635+ unsigned int init_size_rw, core_size_rw;
86636
86637 /* The size of the executable code in each section. */
86638- unsigned int init_text_size, core_text_size;
86639+ unsigned int init_size_rx, core_size_rx;
86640
86641 /* Arch-specific module values */
86642 struct mod_arch_specific arch;
86643@@ -345,6 +346,10 @@ struct module
86644 #ifdef CONFIG_EVENT_TRACING
86645 struct ftrace_event_call *trace_events;
86646 unsigned int num_trace_events;
86647+ struct file_operations trace_id;
86648+ struct file_operations trace_enable;
86649+ struct file_operations trace_format;
86650+ struct file_operations trace_filter;
86651 #endif
86652 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
86653 unsigned long *ftrace_callsites;
86654@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
86655 bool is_module_address(unsigned long addr);
86656 bool is_module_text_address(unsigned long addr);
86657
86658+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
86659+{
86660+
86661+#ifdef CONFIG_PAX_KERNEXEC
86662+ if (ktla_ktva(addr) >= (unsigned long)start &&
86663+ ktla_ktva(addr) < (unsigned long)start + size)
86664+ return 1;
86665+#endif
86666+
86667+ return ((void *)addr >= start && (void *)addr < start + size);
86668+}
86669+
86670+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
86671+{
86672+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
86673+}
86674+
86675+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
86676+{
86677+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
86678+}
86679+
86680+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
86681+{
86682+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
86683+}
86684+
86685+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
86686+{
86687+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
86688+}
86689+
86690 static inline int within_module_core(unsigned long addr, struct module *mod)
86691 {
86692- return (unsigned long)mod->module_core <= addr &&
86693- addr < (unsigned long)mod->module_core + mod->core_size;
86694+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
86695 }
86696
86697 static inline int within_module_init(unsigned long addr, struct module *mod)
86698 {
86699- return (unsigned long)mod->module_init <= addr &&
86700- addr < (unsigned long)mod->module_init + mod->init_size;
86701+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
86702 }
86703
86704 /* Search for module by name: must hold module_mutex. */
86705diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
86706index c1f40c2..682ca53 100644
86707--- a/include/linux/moduleloader.h
86708+++ b/include/linux/moduleloader.h
86709@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
86710 sections. Returns NULL on failure. */
86711 void *module_alloc(unsigned long size);
86712
86713+#ifdef CONFIG_PAX_KERNEXEC
86714+void *module_alloc_exec(unsigned long size);
86715+#else
86716+#define module_alloc_exec(x) module_alloc(x)
86717+#endif
86718+
86719 /* Free memory returned from module_alloc. */
86720 void module_free(struct module *mod, void *module_region);
86721
86722+#ifdef CONFIG_PAX_KERNEXEC
86723+void module_free_exec(struct module *mod, void *module_region);
86724+#else
86725+#define module_free_exec(x, y) module_free((x), (y))
86726+#endif
86727+
86728 /* Apply the given relocation to the (simplified) ELF. Return -error
86729 or 0. */
86730 int apply_relocate(Elf_Shdr *sechdrs,
86731diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
86732index 82a9124..8a5f622 100644
86733--- a/include/linux/moduleparam.h
86734+++ b/include/linux/moduleparam.h
86735@@ -132,7 +132,7 @@ struct kparam_array
86736
86737 /* Actually copy string: maxlen param is usually sizeof(string). */
86738 #define module_param_string(name, string, len, perm) \
86739- static const struct kparam_string __param_string_##name \
86740+ static const struct kparam_string __param_string_##name __used \
86741 = { len, string }; \
86742 __module_param_call(MODULE_PARAM_PREFIX, name, \
86743 param_set_copystring, param_get_string, \
86744@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
86745
86746 /* Comma-separated array: *nump is set to number they actually specified. */
86747 #define module_param_array_named(name, array, type, nump, perm) \
86748- static const struct kparam_array __param_arr_##name \
86749+ static const struct kparam_array __param_arr_##name __used \
86750 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
86751 sizeof(array[0]), array }; \
86752 __module_param_call(MODULE_PARAM_PREFIX, name, \
86753diff --git a/include/linux/mutex.h b/include/linux/mutex.h
86754index 878cab4..c92cb3e 100644
86755--- a/include/linux/mutex.h
86756+++ b/include/linux/mutex.h
86757@@ -51,7 +51,7 @@ struct mutex {
86758 spinlock_t wait_lock;
86759 struct list_head wait_list;
86760 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
86761- struct thread_info *owner;
86762+ struct task_struct *owner;
86763 #endif
86764 #ifdef CONFIG_DEBUG_MUTEXES
86765 const char *name;
86766diff --git a/include/linux/namei.h b/include/linux/namei.h
86767index ec0f607..d19e675 100644
86768--- a/include/linux/namei.h
86769+++ b/include/linux/namei.h
86770@@ -22,7 +22,7 @@ struct nameidata {
86771 unsigned int flags;
86772 int last_type;
86773 unsigned depth;
86774- char *saved_names[MAX_NESTED_LINKS + 1];
86775+ const char *saved_names[MAX_NESTED_LINKS + 1];
86776
86777 /* Intent data */
86778 union {
86779@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
86780 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
86781 extern void unlock_rename(struct dentry *, struct dentry *);
86782
86783-static inline void nd_set_link(struct nameidata *nd, char *path)
86784+static inline void nd_set_link(struct nameidata *nd, const char *path)
86785 {
86786 nd->saved_names[nd->depth] = path;
86787 }
86788
86789-static inline char *nd_get_link(struct nameidata *nd)
86790+static inline const char *nd_get_link(const struct nameidata *nd)
86791 {
86792 return nd->saved_names[nd->depth];
86793 }
86794diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
86795index 9d7e8f7..04428c5 100644
86796--- a/include/linux/netdevice.h
86797+++ b/include/linux/netdevice.h
86798@@ -637,6 +637,7 @@ struct net_device_ops {
86799 u16 xid);
86800 #endif
86801 };
86802+typedef struct net_device_ops __no_const net_device_ops_no_const;
86803
86804 /*
86805 * The DEVICE structure.
86806diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
86807new file mode 100644
86808index 0000000..33f4af8
86809--- /dev/null
86810+++ b/include/linux/netfilter/xt_gradm.h
86811@@ -0,0 +1,9 @@
86812+#ifndef _LINUX_NETFILTER_XT_GRADM_H
86813+#define _LINUX_NETFILTER_XT_GRADM_H 1
86814+
86815+struct xt_gradm_mtinfo {
86816+ __u16 flags;
86817+ __u16 invflags;
86818+};
86819+
86820+#endif
86821diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
86822index b359c4a..c08b334 100644
86823--- a/include/linux/nodemask.h
86824+++ b/include/linux/nodemask.h
86825@@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
86826
86827 #define any_online_node(mask) \
86828 ({ \
86829- int node; \
86830- for_each_node_mask(node, (mask)) \
86831- if (node_online(node)) \
86832+ int __node; \
86833+ for_each_node_mask(__node, (mask)) \
86834+ if (node_online(__node)) \
86835 break; \
86836- node; \
86837+ __node; \
86838 })
86839
86840 #define num_online_nodes() num_node_state(N_ONLINE)
86841diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
86842index 5171639..7cf4235 100644
86843--- a/include/linux/oprofile.h
86844+++ b/include/linux/oprofile.h
86845@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
86846 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
86847 char const * name, ulong * val);
86848
86849-/** Create a file for read-only access to an atomic_t. */
86850+/** Create a file for read-only access to an atomic_unchecked_t. */
86851 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
86852- char const * name, atomic_t * val);
86853+ char const * name, atomic_unchecked_t * val);
86854
86855 /** create a directory */
86856 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
86857diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
86858index 3c62ed4..8924c7c 100644
86859--- a/include/linux/pagemap.h
86860+++ b/include/linux/pagemap.h
86861@@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
86862 if (((unsigned long)uaddr & PAGE_MASK) !=
86863 ((unsigned long)end & PAGE_MASK))
86864 ret = __get_user(c, end);
86865+ (void)c;
86866 }
86867+ (void)c;
86868 return ret;
86869 }
86870
86871diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
86872index 81c9689..a567a55 100644
86873--- a/include/linux/perf_event.h
86874+++ b/include/linux/perf_event.h
86875@@ -476,7 +476,7 @@ struct hw_perf_event {
86876 struct hrtimer hrtimer;
86877 };
86878 };
86879- atomic64_t prev_count;
86880+ atomic64_unchecked_t prev_count;
86881 u64 sample_period;
86882 u64 last_period;
86883 atomic64_t period_left;
86884@@ -557,7 +557,7 @@ struct perf_event {
86885 const struct pmu *pmu;
86886
86887 enum perf_event_active_state state;
86888- atomic64_t count;
86889+ atomic64_unchecked_t count;
86890
86891 /*
86892 * These are the total time in nanoseconds that the event
86893@@ -595,8 +595,8 @@ struct perf_event {
86894 * These accumulate total time (in nanoseconds) that children
86895 * events have been enabled and running, respectively.
86896 */
86897- atomic64_t child_total_time_enabled;
86898- atomic64_t child_total_time_running;
86899+ atomic64_unchecked_t child_total_time_enabled;
86900+ atomic64_unchecked_t child_total_time_running;
86901
86902 /*
86903 * Protect attach/detach and child_list:
86904diff --git a/include/linux/personality.h b/include/linux/personality.h
86905index 1261208..ddef96f 100644
86906--- a/include/linux/personality.h
86907+++ b/include/linux/personality.h
86908@@ -43,6 +43,7 @@ enum {
86909 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
86910 ADDR_NO_RANDOMIZE | \
86911 ADDR_COMPAT_LAYOUT | \
86912+ ADDR_LIMIT_3GB | \
86913 MMAP_PAGE_ZERO)
86914
86915 /*
86916diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
86917index b43a9e0..b77d869 100644
86918--- a/include/linux/pipe_fs_i.h
86919+++ b/include/linux/pipe_fs_i.h
86920@@ -46,9 +46,9 @@ struct pipe_inode_info {
86921 wait_queue_head_t wait;
86922 unsigned int nrbufs, curbuf;
86923 struct page *tmp_page;
86924- unsigned int readers;
86925- unsigned int writers;
86926- unsigned int waiting_writers;
86927+ atomic_t readers;
86928+ atomic_t writers;
86929+ atomic_t waiting_writers;
86930 unsigned int r_counter;
86931 unsigned int w_counter;
86932 struct fasync_struct *fasync_readers;
86933diff --git a/include/linux/poison.h b/include/linux/poison.h
86934index 34066ff..e95d744 100644
86935--- a/include/linux/poison.h
86936+++ b/include/linux/poison.h
86937@@ -19,8 +19,8 @@
86938 * under normal circumstances, used to verify that nobody uses
86939 * non-initialized list entries.
86940 */
86941-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
86942-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
86943+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
86944+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
86945
86946 /********** include/linux/timer.h **********/
86947 /*
86948diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
86949index 4f71bf4..cd2f68e 100644
86950--- a/include/linux/posix-timers.h
86951+++ b/include/linux/posix-timers.h
86952@@ -82,7 +82,8 @@ struct k_clock {
86953 #define TIMER_RETRY 1
86954 void (*timer_get) (struct k_itimer * timr,
86955 struct itimerspec * cur_setting);
86956-};
86957+} __do_const;
86958+typedef struct k_clock __no_const k_clock_no_const;
86959
86960 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock);
86961
86962diff --git a/include/linux/preempt.h b/include/linux/preempt.h
86963index 72b1a10..13303a9 100644
86964--- a/include/linux/preempt.h
86965+++ b/include/linux/preempt.h
86966@@ -110,7 +110,7 @@ struct preempt_ops {
86967 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
86968 void (*sched_out)(struct preempt_notifier *notifier,
86969 struct task_struct *next);
86970-};
86971+} __no_const;
86972
86973 /**
86974 * preempt_notifier - key for installing preemption notifiers
86975diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
86976index af7c36a..a93005c 100644
86977--- a/include/linux/prefetch.h
86978+++ b/include/linux/prefetch.h
86979@@ -11,6 +11,7 @@
86980 #define _LINUX_PREFETCH_H
86981
86982 #include <linux/types.h>
86983+#include <linux/const.h>
86984 #include <asm/processor.h>
86985 #include <asm/cache.h>
86986
86987diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
86988index 379eaed..1bf73e3 100644
86989--- a/include/linux/proc_fs.h
86990+++ b/include/linux/proc_fs.h
86991@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
86992 return proc_create_data(name, mode, parent, proc_fops, NULL);
86993 }
86994
86995+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
86996+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
86997+{
86998+#ifdef CONFIG_GRKERNSEC_PROC_USER
86999+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
87000+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
87001+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
87002+#else
87003+ return proc_create_data(name, mode, parent, proc_fops, NULL);
87004+#endif
87005+}
87006+
87007+
87008 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
87009 mode_t mode, struct proc_dir_entry *base,
87010 read_proc_t *read_proc, void * data)
87011@@ -256,7 +269,7 @@ union proc_op {
87012 int (*proc_show)(struct seq_file *m,
87013 struct pid_namespace *ns, struct pid *pid,
87014 struct task_struct *task);
87015-};
87016+} __no_const;
87017
87018 struct ctl_table_header;
87019 struct ctl_table;
87020diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
87021index 7456d7d..6c1cfc9 100644
87022--- a/include/linux/ptrace.h
87023+++ b/include/linux/ptrace.h
87024@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
87025 extern void exit_ptrace(struct task_struct *tracer);
87026 #define PTRACE_MODE_READ 1
87027 #define PTRACE_MODE_ATTACH 2
87028-/* Returns 0 on success, -errno on denial. */
87029-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
87030 /* Returns true on success, false on denial. */
87031 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
87032+/* Returns true on success, false on denial. */
87033+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
87034
87035 static inline int ptrace_reparented(struct task_struct *child)
87036 {
87037diff --git a/include/linux/random.h b/include/linux/random.h
87038index 2948046..3262567 100644
87039--- a/include/linux/random.h
87040+++ b/include/linux/random.h
87041@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
87042 u32 random32(void);
87043 void srandom32(u32 seed);
87044
87045+static inline unsigned long pax_get_random_long(void)
87046+{
87047+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
87048+}
87049+
87050 #endif /* __KERNEL___ */
87051
87052 #endif /* _LINUX_RANDOM_H */
87053diff --git a/include/linux/reboot.h b/include/linux/reboot.h
87054index 988e55f..17cb4ef 100644
87055--- a/include/linux/reboot.h
87056+++ b/include/linux/reboot.h
87057@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
87058 * Architecture-specific implementations of sys_reboot commands.
87059 */
87060
87061-extern void machine_restart(char *cmd);
87062-extern void machine_halt(void);
87063-extern void machine_power_off(void);
87064+extern void machine_restart(char *cmd) __noreturn;
87065+extern void machine_halt(void) __noreturn;
87066+extern void machine_power_off(void) __noreturn;
87067
87068 extern void machine_shutdown(void);
87069 struct pt_regs;
87070@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
87071 */
87072
87073 extern void kernel_restart_prepare(char *cmd);
87074-extern void kernel_restart(char *cmd);
87075-extern void kernel_halt(void);
87076-extern void kernel_power_off(void);
87077+extern void kernel_restart(char *cmd) __noreturn;
87078+extern void kernel_halt(void) __noreturn;
87079+extern void kernel_power_off(void) __noreturn;
87080
87081 void ctrl_alt_del(void);
87082
87083@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
87084 * Emergency restart, callable from an interrupt handler.
87085 */
87086
87087-extern void emergency_restart(void);
87088+extern void emergency_restart(void) __noreturn;
87089 #include <asm/emergency-restart.h>
87090
87091 #endif
87092diff --git a/include/linux/regset.h b/include/linux/regset.h
87093index 8abee65..5150fd1 100644
87094--- a/include/linux/regset.h
87095+++ b/include/linux/regset.h
87096@@ -335,6 +335,9 @@ static inline int copy_regset_to_user(struct task_struct *target,
87097 {
87098 const struct user_regset *regset = &view->regsets[setno];
87099
87100+ if (!regset->get)
87101+ return -EOPNOTSUPP;
87102+
87103 if (!access_ok(VERIFY_WRITE, data, size))
87104 return -EIO;
87105
87106@@ -358,6 +361,9 @@ static inline int copy_regset_from_user(struct task_struct *target,
87107 {
87108 const struct user_regset *regset = &view->regsets[setno];
87109
87110+ if (!regset->set)
87111+ return -EOPNOTSUPP;
87112+
87113 if (!access_ok(VERIFY_READ, data, size))
87114 return -EIO;
87115
87116diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
87117index dd31e7b..5b03c5c 100644
87118--- a/include/linux/reiserfs_fs.h
87119+++ b/include/linux/reiserfs_fs.h
87120@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
87121 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
87122
87123 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
87124-#define get_generation(s) atomic_read (&fs_generation(s))
87125+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
87126 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
87127 #define __fs_changed(gen,s) (gen != get_generation (s))
87128 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
87129@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
87130 */
87131
87132 struct item_operations {
87133- int (*bytes_number) (struct item_head * ih, int block_size);
87134- void (*decrement_key) (struct cpu_key *);
87135- int (*is_left_mergeable) (struct reiserfs_key * ih,
87136+ int (* const bytes_number) (struct item_head * ih, int block_size);
87137+ void (* const decrement_key) (struct cpu_key *);
87138+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
87139 unsigned long bsize);
87140- void (*print_item) (struct item_head *, char *item);
87141- void (*check_item) (struct item_head *, char *item);
87142+ void (* const print_item) (struct item_head *, char *item);
87143+ void (* const check_item) (struct item_head *, char *item);
87144
87145- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
87146+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
87147 int is_affected, int insert_size);
87148- int (*check_left) (struct virtual_item * vi, int free,
87149+ int (* const check_left) (struct virtual_item * vi, int free,
87150 int start_skip, int end_skip);
87151- int (*check_right) (struct virtual_item * vi, int free);
87152- int (*part_size) (struct virtual_item * vi, int from, int to);
87153- int (*unit_num) (struct virtual_item * vi);
87154- void (*print_vi) (struct virtual_item * vi);
87155+ int (* const check_right) (struct virtual_item * vi, int free);
87156+ int (* const part_size) (struct virtual_item * vi, int from, int to);
87157+ int (* const unit_num) (struct virtual_item * vi);
87158+ void (* const print_vi) (struct virtual_item * vi);
87159 };
87160
87161-extern struct item_operations *item_ops[TYPE_ANY + 1];
87162+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
87163
87164 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
87165 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
87166diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
87167index dab68bb..0688727 100644
87168--- a/include/linux/reiserfs_fs_sb.h
87169+++ b/include/linux/reiserfs_fs_sb.h
87170@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
87171 /* Comment? -Hans */
87172 wait_queue_head_t s_wait;
87173 /* To be obsoleted soon by per buffer seals.. -Hans */
87174- atomic_t s_generation_counter; // increased by one every time the
87175+ atomic_unchecked_t s_generation_counter; // increased by one every time the
87176 // tree gets re-balanced
87177 unsigned long s_properties; /* File system properties. Currently holds
87178 on-disk FS format */
87179diff --git a/include/linux/relay.h b/include/linux/relay.h
87180index 14a86bc..17d0700 100644
87181--- a/include/linux/relay.h
87182+++ b/include/linux/relay.h
87183@@ -159,7 +159,7 @@ struct rchan_callbacks
87184 * The callback should return 0 if successful, negative if not.
87185 */
87186 int (*remove_buf_file)(struct dentry *dentry);
87187-};
87188+} __no_const;
87189
87190 /*
87191 * CONFIG_RELAY kernel API, kernel/relay.c
87192diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
87193index 3392c59..a746428 100644
87194--- a/include/linux/rfkill.h
87195+++ b/include/linux/rfkill.h
87196@@ -144,6 +144,7 @@ struct rfkill_ops {
87197 void (*query)(struct rfkill *rfkill, void *data);
87198 int (*set_block)(void *data, bool blocked);
87199 };
87200+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
87201
87202 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
87203 /**
87204diff --git a/include/linux/sched.h b/include/linux/sched.h
87205index 71849bf..8cf9dd2 100644
87206--- a/include/linux/sched.h
87207+++ b/include/linux/sched.h
87208@@ -101,6 +101,7 @@ struct bio;
87209 struct fs_struct;
87210 struct bts_context;
87211 struct perf_event_context;
87212+struct linux_binprm;
87213
87214 /*
87215 * List of flags we want to share for kernel threads,
87216@@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
87217 extern signed long schedule_timeout_uninterruptible(signed long timeout);
87218 asmlinkage void __schedule(void);
87219 asmlinkage void schedule(void);
87220-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
87221+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
87222
87223 struct nsproxy;
87224 struct user_namespace;
87225@@ -371,9 +372,12 @@ struct user_namespace;
87226 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
87227
87228 extern int sysctl_max_map_count;
87229+extern unsigned long sysctl_heap_stack_gap;
87230
87231 #include <linux/aio.h>
87232
87233+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
87234+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
87235 extern unsigned long
87236 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
87237 unsigned long, unsigned long);
87238@@ -666,6 +670,16 @@ struct signal_struct {
87239 struct tty_audit_buf *tty_audit_buf;
87240 #endif
87241
87242+#ifdef CONFIG_GRKERNSEC
87243+ u32 curr_ip;
87244+ u32 saved_ip;
87245+ u32 gr_saddr;
87246+ u32 gr_daddr;
87247+ u16 gr_sport;
87248+ u16 gr_dport;
87249+ u8 used_accept:1;
87250+#endif
87251+
87252 int oom_adj; /* OOM kill score adjustment (bit shift) */
87253 };
87254
87255@@ -723,6 +737,11 @@ struct user_struct {
87256 struct key *session_keyring; /* UID's default session keyring */
87257 #endif
87258
87259+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
87260+ unsigned int banned;
87261+ unsigned long ban_expires;
87262+#endif
87263+
87264 /* Hash table maintenance information */
87265 struct hlist_node uidhash_node;
87266 uid_t uid;
87267@@ -1328,8 +1347,8 @@ struct task_struct {
87268 struct list_head thread_group;
87269
87270 struct completion *vfork_done; /* for vfork() */
87271- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
87272- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
87273+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
87274+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
87275
87276 cputime_t utime, stime, utimescaled, stimescaled;
87277 cputime_t gtime;
87278@@ -1343,16 +1362,6 @@ struct task_struct {
87279 struct task_cputime cputime_expires;
87280 struct list_head cpu_timers[3];
87281
87282-/* process credentials */
87283- const struct cred *real_cred; /* objective and real subjective task
87284- * credentials (COW) */
87285- const struct cred *cred; /* effective (overridable) subjective task
87286- * credentials (COW) */
87287- struct mutex cred_guard_mutex; /* guard against foreign influences on
87288- * credential calculations
87289- * (notably. ptrace) */
87290- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
87291-
87292 char comm[TASK_COMM_LEN]; /* executable name excluding path
87293 - access with [gs]et_task_comm (which lock
87294 it with task_lock())
87295@@ -1369,6 +1378,10 @@ struct task_struct {
87296 #endif
87297 /* CPU-specific state of this task */
87298 struct thread_struct thread;
87299+/* thread_info moved to task_struct */
87300+#ifdef CONFIG_X86
87301+ struct thread_info tinfo;
87302+#endif
87303 /* filesystem information */
87304 struct fs_struct *fs;
87305 /* open file information */
87306@@ -1436,6 +1449,15 @@ struct task_struct {
87307 int hardirq_context;
87308 int softirq_context;
87309 #endif
87310+
87311+/* process credentials */
87312+ const struct cred *real_cred; /* objective and real subjective task
87313+ * credentials (COW) */
87314+ struct mutex cred_guard_mutex; /* guard against foreign influences on
87315+ * credential calculations
87316+ * (notably. ptrace) */
87317+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
87318+
87319 #ifdef CONFIG_LOCKDEP
87320 # define MAX_LOCK_DEPTH 48UL
87321 u64 curr_chain_key;
87322@@ -1456,6 +1478,9 @@ struct task_struct {
87323
87324 struct backing_dev_info *backing_dev_info;
87325
87326+ const struct cred *cred; /* effective (overridable) subjective task
87327+ * credentials (COW) */
87328+
87329 struct io_context *io_context;
87330
87331 unsigned long ptrace_message;
87332@@ -1519,6 +1544,27 @@ struct task_struct {
87333 unsigned long default_timer_slack_ns;
87334
87335 struct list_head *scm_work_list;
87336+
87337+#ifdef CONFIG_GRKERNSEC
87338+ /* grsecurity */
87339+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
87340+ u64 exec_id;
87341+#endif
87342+#ifdef CONFIG_GRKERNSEC_SETXID
87343+ const struct cred *delayed_cred;
87344+#endif
87345+ struct dentry *gr_chroot_dentry;
87346+ struct acl_subject_label *acl;
87347+ struct acl_role_label *role;
87348+ struct file *exec_file;
87349+ u16 acl_role_id;
87350+ /* is this the task that authenticated to the special role */
87351+ u8 acl_sp_role;
87352+ u8 is_writable;
87353+ u8 brute;
87354+ u8 gr_is_chrooted;
87355+#endif
87356+
87357 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
87358 /* Index of current stored adress in ret_stack */
87359 int curr_ret_stack;
87360@@ -1542,6 +1588,57 @@ struct task_struct {
87361 #endif /* CONFIG_TRACING */
87362 };
87363
87364+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
87365+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
87366+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
87367+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
87368+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
87369+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
87370+
87371+#ifdef CONFIG_PAX_SOFTMODE
87372+extern int pax_softmode;
87373+#endif
87374+
87375+extern int pax_check_flags(unsigned long *);
87376+
87377+/* if tsk != current then task_lock must be held on it */
87378+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
87379+static inline unsigned long pax_get_flags(struct task_struct *tsk)
87380+{
87381+ if (likely(tsk->mm))
87382+ return tsk->mm->pax_flags;
87383+ else
87384+ return 0UL;
87385+}
87386+
87387+/* if tsk != current then task_lock must be held on it */
87388+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
87389+{
87390+ if (likely(tsk->mm)) {
87391+ tsk->mm->pax_flags = flags;
87392+ return 0;
87393+ }
87394+ return -EINVAL;
87395+}
87396+#endif
87397+
87398+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
87399+extern void pax_set_initial_flags(struct linux_binprm *bprm);
87400+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
87401+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
87402+#endif
87403+
87404+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
87405+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
87406+extern void pax_report_refcount_overflow(struct pt_regs *regs);
87407+extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
87408+
87409+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
87410+extern void pax_track_stack(void);
87411+#else
87412+static inline void pax_track_stack(void) {}
87413+#endif
87414+
87415 /* Future-safe accessor for struct task_struct's cpus_allowed. */
87416 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
87417
87418@@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
87419 #define PF_DUMPCORE 0x00000200 /* dumped core */
87420 #define PF_SIGNALED 0x00000400 /* killed by a signal */
87421 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
87422-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
87423+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
87424 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
87425 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
87426 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
87427@@ -1978,7 +2075,9 @@ void yield(void);
87428 extern struct exec_domain default_exec_domain;
87429
87430 union thread_union {
87431+#ifndef CONFIG_X86
87432 struct thread_info thread_info;
87433+#endif
87434 unsigned long stack[THREAD_SIZE/sizeof(long)];
87435 };
87436
87437@@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
87438 */
87439
87440 extern struct task_struct *find_task_by_vpid(pid_t nr);
87441+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
87442 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
87443 struct pid_namespace *ns);
87444
87445@@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
87446 extern void exit_itimers(struct signal_struct *);
87447 extern void flush_itimer_signals(void);
87448
87449-extern NORET_TYPE void do_group_exit(int);
87450+extern __noreturn void do_group_exit(int);
87451
87452 extern void daemonize(const char *, ...);
87453 extern int allow_signal(int);
87454@@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
87455
87456 #endif
87457
87458-static inline int object_is_on_stack(void *obj)
87459+static inline int object_starts_on_stack(void *obj)
87460 {
87461- void *stack = task_stack_page(current);
87462+ const void *stack = task_stack_page(current);
87463
87464 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
87465 }
87466
87467+#ifdef CONFIG_PAX_USERCOPY
87468+extern int object_is_on_stack(const void *obj, unsigned long len);
87469+#endif
87470+
87471 extern void thread_info_cache_init(void);
87472
87473 #ifdef CONFIG_DEBUG_STACK_USAGE
87474@@ -2616,6 +2720,23 @@ static inline unsigned long rlimit_max(unsigned int limit)
87475 return task_rlimit_max(current, limit);
87476 }
87477
87478+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
87479+DECLARE_PER_CPU(u64, exec_counter);
87480+static inline void increment_exec_counter(void)
87481+{
87482+ unsigned int cpu;
87483+ u64 *exec_id_ptr;
87484+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
87485+ cpu = get_cpu();
87486+ exec_id_ptr = &per_cpu(exec_counter, cpu);
87487+ *exec_id_ptr += 1ULL << 16;
87488+ current->exec_id = *exec_id_ptr;
87489+ put_cpu();
87490+}
87491+#else
87492+static inline void increment_exec_counter(void) {}
87493+#endif
87494+
87495 #endif /* __KERNEL__ */
87496
87497 #endif
87498diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
87499index 1ee2c05..81b7ec4 100644
87500--- a/include/linux/screen_info.h
87501+++ b/include/linux/screen_info.h
87502@@ -42,7 +42,8 @@ struct screen_info {
87503 __u16 pages; /* 0x32 */
87504 __u16 vesa_attributes; /* 0x34 */
87505 __u32 capabilities; /* 0x36 */
87506- __u8 _reserved[6]; /* 0x3a */
87507+ __u16 vesapm_size; /* 0x3a */
87508+ __u8 _reserved[4]; /* 0x3c */
87509 } __attribute__((packed));
87510
87511 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
87512diff --git a/include/linux/security.h b/include/linux/security.h
87513index d40d23f..d739b08 100644
87514--- a/include/linux/security.h
87515+++ b/include/linux/security.h
87516@@ -34,6 +34,7 @@
87517 #include <linux/key.h>
87518 #include <linux/xfrm.h>
87519 #include <linux/gfp.h>
87520+#include <linux/grsecurity.h>
87521 #include <net/flow.h>
87522
87523 /* Maximum number of letters for an LSM name string */
87524@@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
87525 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
87526 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
87527 extern int cap_task_setnice(struct task_struct *p, int nice);
87528-extern int cap_syslog(int type);
87529+extern int cap_syslog(int type, bool from_file);
87530 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
87531
87532 struct msghdr;
87533@@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
87534 * logging to the console.
87535 * See the syslog(2) manual page for an explanation of the @type values.
87536 * @type contains the type of action.
87537+ * @from_file indicates the context of action (if it came from /proc).
87538 * Return 0 if permission is granted.
87539 * @settime:
87540 * Check permission to change the system time.
87541@@ -1445,7 +1447,7 @@ struct security_operations {
87542 int (*sysctl) (struct ctl_table *table, int op);
87543 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
87544 int (*quota_on) (struct dentry *dentry);
87545- int (*syslog) (int type);
87546+ int (*syslog) (int type, bool from_file);
87547 int (*settime) (struct timespec *ts, struct timezone *tz);
87548 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
87549
87550@@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
87551 int security_sysctl(struct ctl_table *table, int op);
87552 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
87553 int security_quota_on(struct dentry *dentry);
87554-int security_syslog(int type);
87555+int security_syslog(int type, bool from_file);
87556 int security_settime(struct timespec *ts, struct timezone *tz);
87557 int security_vm_enough_memory(long pages);
87558 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
87559@@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
87560 return 0;
87561 }
87562
87563-static inline int security_syslog(int type)
87564+static inline int security_syslog(int type, bool from_file)
87565 {
87566- return cap_syslog(type);
87567+ return cap_syslog(type, from_file);
87568 }
87569
87570 static inline int security_settime(struct timespec *ts, struct timezone *tz)
87571diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
87572index 8366d8f..cc5f9d6 100644
87573--- a/include/linux/seq_file.h
87574+++ b/include/linux/seq_file.h
87575@@ -23,6 +23,9 @@ struct seq_file {
87576 u64 version;
87577 struct mutex lock;
87578 const struct seq_operations *op;
87579+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
87580+ u64 exec_id;
87581+#endif
87582 void *private;
87583 };
87584
87585@@ -32,6 +35,7 @@ struct seq_operations {
87586 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
87587 int (*show) (struct seq_file *m, void *v);
87588 };
87589+typedef struct seq_operations __no_const seq_operations_no_const;
87590
87591 #define SEQ_SKIP 1
87592
87593diff --git a/include/linux/shm.h b/include/linux/shm.h
87594index eca6235..c7417ed 100644
87595--- a/include/linux/shm.h
87596+++ b/include/linux/shm.h
87597@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
87598 pid_t shm_cprid;
87599 pid_t shm_lprid;
87600 struct user_struct *mlock_user;
87601+#ifdef CONFIG_GRKERNSEC
87602+ time_t shm_createtime;
87603+ pid_t shm_lapid;
87604+#endif
87605 };
87606
87607 /* shm_mode upper byte flags */
87608diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
87609index bcdd660..fd2e332 100644
87610--- a/include/linux/skbuff.h
87611+++ b/include/linux/skbuff.h
87612@@ -14,6 +14,7 @@
87613 #ifndef _LINUX_SKBUFF_H
87614 #define _LINUX_SKBUFF_H
87615
87616+#include <linux/const.h>
87617 #include <linux/kernel.h>
87618 #include <linux/kmemcheck.h>
87619 #include <linux/compiler.h>
87620@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
87621 */
87622 static inline int skb_queue_empty(const struct sk_buff_head *list)
87623 {
87624- return list->next == (struct sk_buff *)list;
87625+ return list->next == (const struct sk_buff *)list;
87626 }
87627
87628 /**
87629@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
87630 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
87631 const struct sk_buff *skb)
87632 {
87633- return (skb->next == (struct sk_buff *) list);
87634+ return (skb->next == (const struct sk_buff *) list);
87635 }
87636
87637 /**
87638@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
87639 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
87640 const struct sk_buff *skb)
87641 {
87642- return (skb->prev == (struct sk_buff *) list);
87643+ return (skb->prev == (const struct sk_buff *) list);
87644 }
87645
87646 /**
87647@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
87648 * headroom, you should not reduce this.
87649 */
87650 #ifndef NET_SKB_PAD
87651-#define NET_SKB_PAD 32
87652+#define NET_SKB_PAD (_AC(32,UL))
87653 #endif
87654
87655 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
87656@@ -1489,6 +1490,22 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
87657 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
87658 }
87659
87660+static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
87661+ unsigned int length, gfp_t gfp)
87662+{
87663+ struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
87664+
87665+ if (NET_IP_ALIGN && skb)
87666+ skb_reserve(skb, NET_IP_ALIGN);
87667+ return skb;
87668+}
87669+
87670+static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
87671+ unsigned int length)
87672+{
87673+ return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
87674+}
87675+
87676 extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
87677
87678 /**
87679diff --git a/include/linux/slab.h b/include/linux/slab.h
87680index 2da8372..a3be824 100644
87681--- a/include/linux/slab.h
87682+++ b/include/linux/slab.h
87683@@ -11,12 +11,20 @@
87684
87685 #include <linux/gfp.h>
87686 #include <linux/types.h>
87687+#include <linux/err.h>
87688
87689 /*
87690 * Flags to pass to kmem_cache_create().
87691 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
87692 */
87693 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
87694+
87695+#ifdef CONFIG_PAX_USERCOPY
87696+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
87697+#else
87698+#define SLAB_USERCOPY 0x00000000UL
87699+#endif
87700+
87701 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
87702 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
87703 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
87704@@ -82,10 +90,13 @@
87705 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
87706 * Both make kfree a no-op.
87707 */
87708-#define ZERO_SIZE_PTR ((void *)16)
87709+#define ZERO_SIZE_PTR \
87710+({ \
87711+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
87712+ (void *)(-MAX_ERRNO-1L); \
87713+})
87714
87715-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
87716- (unsigned long)ZERO_SIZE_PTR)
87717+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
87718
87719 /*
87720 * struct kmem_cache related prototypes
87721@@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
87722 void kfree(const void *);
87723 void kzfree(const void *);
87724 size_t ksize(const void *);
87725+void check_object_size(const void *ptr, unsigned long n, bool to);
87726
87727 /*
87728 * Allocator specific definitions. These are mainly used to establish optimized
87729@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
87730
87731 void __init kmem_cache_init_late(void);
87732
87733+#define kmalloc(x, y) \
87734+({ \
87735+ void *___retval; \
87736+ intoverflow_t ___x = (intoverflow_t)x; \
87737+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
87738+ ___retval = NULL; \
87739+ else \
87740+ ___retval = kmalloc((size_t)___x, (y)); \
87741+ ___retval; \
87742+})
87743+
87744+#define kmalloc_node(x, y, z) \
87745+({ \
87746+ void *___retval; \
87747+ intoverflow_t ___x = (intoverflow_t)x; \
87748+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
87749+ ___retval = NULL; \
87750+ else \
87751+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
87752+ ___retval; \
87753+})
87754+
87755+#define kzalloc(x, y) \
87756+({ \
87757+ void *___retval; \
87758+ intoverflow_t ___x = (intoverflow_t)x; \
87759+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
87760+ ___retval = NULL; \
87761+ else \
87762+ ___retval = kzalloc((size_t)___x, (y)); \
87763+ ___retval; \
87764+})
87765+
87766 #endif /* _LINUX_SLAB_H */
87767diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
87768index 850d057..d9dfe3c 100644
87769--- a/include/linux/slab_def.h
87770+++ b/include/linux/slab_def.h
87771@@ -69,10 +69,10 @@ struct kmem_cache {
87772 unsigned long node_allocs;
87773 unsigned long node_frees;
87774 unsigned long node_overflow;
87775- atomic_t allochit;
87776- atomic_t allocmiss;
87777- atomic_t freehit;
87778- atomic_t freemiss;
87779+ atomic_unchecked_t allochit;
87780+ atomic_unchecked_t allocmiss;
87781+ atomic_unchecked_t freehit;
87782+ atomic_unchecked_t freemiss;
87783
87784 /*
87785 * If debugging is enabled, then the allocator can add additional
87786diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
87787index 5ad70a6..57f9f65 100644
87788--- a/include/linux/slub_def.h
87789+++ b/include/linux/slub_def.h
87790@@ -86,7 +86,7 @@ struct kmem_cache {
87791 struct kmem_cache_order_objects max;
87792 struct kmem_cache_order_objects min;
87793 gfp_t allocflags; /* gfp flags to use on each alloc */
87794- int refcount; /* Refcount for slab cache destroy */
87795+ atomic_t refcount; /* Refcount for slab cache destroy */
87796 void (*ctor)(void *);
87797 int inuse; /* Offset to metadata */
87798 int align; /* Alignment */
87799@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
87800 #endif
87801
87802 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
87803-void *__kmalloc(size_t size, gfp_t flags);
87804+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
87805
87806 #ifdef CONFIG_KMEMTRACE
87807 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
87808diff --git a/include/linux/sonet.h b/include/linux/sonet.h
87809index 67ad11f..0bbd8af 100644
87810--- a/include/linux/sonet.h
87811+++ b/include/linux/sonet.h
87812@@ -61,7 +61,7 @@ struct sonet_stats {
87813 #include <asm/atomic.h>
87814
87815 struct k_sonet_stats {
87816-#define __HANDLE_ITEM(i) atomic_t i
87817+#define __HANDLE_ITEM(i) atomic_unchecked_t i
87818 __SONET_ITEMS
87819 #undef __HANDLE_ITEM
87820 };
87821diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
87822index 6f52b4d..5500323 100644
87823--- a/include/linux/sunrpc/cache.h
87824+++ b/include/linux/sunrpc/cache.h
87825@@ -125,7 +125,7 @@ struct cache_detail {
87826 */
87827 struct cache_req {
87828 struct cache_deferred_req *(*defer)(struct cache_req *req);
87829-};
87830+} __no_const;
87831 /* this must be embedded in a deferred_request that is being
87832 * delayed awaiting cache-fill
87833 */
87834diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
87835index 8ed9642..101ceab 100644
87836--- a/include/linux/sunrpc/clnt.h
87837+++ b/include/linux/sunrpc/clnt.h
87838@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
87839 {
87840 switch (sap->sa_family) {
87841 case AF_INET:
87842- return ntohs(((struct sockaddr_in *)sap)->sin_port);
87843+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
87844 case AF_INET6:
87845- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
87846+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
87847 }
87848 return 0;
87849 }
87850@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
87851 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
87852 const struct sockaddr *src)
87853 {
87854- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
87855+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
87856 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
87857
87858 dsin->sin_family = ssin->sin_family;
87859@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
87860 if (sa->sa_family != AF_INET6)
87861 return 0;
87862
87863- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
87864+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
87865 }
87866
87867 #endif /* __KERNEL__ */
87868diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
87869index c14fe86..393245e 100644
87870--- a/include/linux/sunrpc/svc_rdma.h
87871+++ b/include/linux/sunrpc/svc_rdma.h
87872@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
87873 extern unsigned int svcrdma_max_requests;
87874 extern unsigned int svcrdma_max_req_size;
87875
87876-extern atomic_t rdma_stat_recv;
87877-extern atomic_t rdma_stat_read;
87878-extern atomic_t rdma_stat_write;
87879-extern atomic_t rdma_stat_sq_starve;
87880-extern atomic_t rdma_stat_rq_starve;
87881-extern atomic_t rdma_stat_rq_poll;
87882-extern atomic_t rdma_stat_rq_prod;
87883-extern atomic_t rdma_stat_sq_poll;
87884-extern atomic_t rdma_stat_sq_prod;
87885+extern atomic_unchecked_t rdma_stat_recv;
87886+extern atomic_unchecked_t rdma_stat_read;
87887+extern atomic_unchecked_t rdma_stat_write;
87888+extern atomic_unchecked_t rdma_stat_sq_starve;
87889+extern atomic_unchecked_t rdma_stat_rq_starve;
87890+extern atomic_unchecked_t rdma_stat_rq_poll;
87891+extern atomic_unchecked_t rdma_stat_rq_prod;
87892+extern atomic_unchecked_t rdma_stat_sq_poll;
87893+extern atomic_unchecked_t rdma_stat_sq_prod;
87894
87895 #define RPCRDMA_VERSION 1
87896
87897diff --git a/include/linux/suspend.h b/include/linux/suspend.h
87898index 5e781d8..1e62818 100644
87899--- a/include/linux/suspend.h
87900+++ b/include/linux/suspend.h
87901@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
87902 * which require special recovery actions in that situation.
87903 */
87904 struct platform_suspend_ops {
87905- int (*valid)(suspend_state_t state);
87906- int (*begin)(suspend_state_t state);
87907- int (*prepare)(void);
87908- int (*prepare_late)(void);
87909- int (*enter)(suspend_state_t state);
87910- void (*wake)(void);
87911- void (*finish)(void);
87912- void (*end)(void);
87913- void (*recover)(void);
87914+ int (* const valid)(suspend_state_t state);
87915+ int (* const begin)(suspend_state_t state);
87916+ int (* const prepare)(void);
87917+ int (* const prepare_late)(void);
87918+ int (* const enter)(suspend_state_t state);
87919+ void (* const wake)(void);
87920+ void (* const finish)(void);
87921+ void (* const end)(void);
87922+ void (* const recover)(void);
87923 };
87924
87925 #ifdef CONFIG_SUSPEND
87926@@ -120,7 +120,7 @@ struct platform_suspend_ops {
87927 * suspend_set_ops - set platform dependent suspend operations
87928 * @ops: The new suspend operations to set.
87929 */
87930-extern void suspend_set_ops(struct platform_suspend_ops *ops);
87931+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
87932 extern int suspend_valid_only_mem(suspend_state_t state);
87933
87934 /**
87935@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
87936 #else /* !CONFIG_SUSPEND */
87937 #define suspend_valid_only_mem NULL
87938
87939-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
87940+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
87941 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
87942 #endif /* !CONFIG_SUSPEND */
87943
87944@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
87945 * platforms which require special recovery actions in that situation.
87946 */
87947 struct platform_hibernation_ops {
87948- int (*begin)(void);
87949- void (*end)(void);
87950- int (*pre_snapshot)(void);
87951- void (*finish)(void);
87952- int (*prepare)(void);
87953- int (*enter)(void);
87954- void (*leave)(void);
87955- int (*pre_restore)(void);
87956- void (*restore_cleanup)(void);
87957- void (*recover)(void);
87958+ int (* const begin)(void);
87959+ void (* const end)(void);
87960+ int (* const pre_snapshot)(void);
87961+ void (* const finish)(void);
87962+ int (* const prepare)(void);
87963+ int (* const enter)(void);
87964+ void (* const leave)(void);
87965+ int (* const pre_restore)(void);
87966+ void (* const restore_cleanup)(void);
87967+ void (* const recover)(void);
87968 };
87969
87970 #ifdef CONFIG_HIBERNATION
87971@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
87972 extern void swsusp_unset_page_free(struct page *);
87973 extern unsigned long get_safe_page(gfp_t gfp_mask);
87974
87975-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
87976+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
87977 extern int hibernate(void);
87978 extern bool system_entering_hibernation(void);
87979 #else /* CONFIG_HIBERNATION */
87980@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
87981 static inline void swsusp_set_page_free(struct page *p) {}
87982 static inline void swsusp_unset_page_free(struct page *p) {}
87983
87984-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
87985+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
87986 static inline int hibernate(void) { return -ENOSYS; }
87987 static inline bool system_entering_hibernation(void) { return false; }
87988 #endif /* CONFIG_HIBERNATION */
87989diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
87990index 0eb6942..a805cb6 100644
87991--- a/include/linux/sysctl.h
87992+++ b/include/linux/sysctl.h
87993@@ -164,7 +164,11 @@ enum
87994 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
87995 };
87996
87997-
87998+#ifdef CONFIG_PAX_SOFTMODE
87999+enum {
88000+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
88001+};
88002+#endif
88003
88004 /* CTL_VM names: */
88005 enum
88006@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
88007
88008 extern int proc_dostring(struct ctl_table *, int,
88009 void __user *, size_t *, loff_t *);
88010+extern int proc_dostring_modpriv(struct ctl_table *, int,
88011+ void __user *, size_t *, loff_t *);
88012 extern int proc_dointvec(struct ctl_table *, int,
88013 void __user *, size_t *, loff_t *);
88014 extern int proc_dointvec_minmax(struct ctl_table *, int,
88015@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
88016
88017 extern ctl_handler sysctl_data;
88018 extern ctl_handler sysctl_string;
88019+extern ctl_handler sysctl_string_modpriv;
88020 extern ctl_handler sysctl_intvec;
88021 extern ctl_handler sysctl_jiffies;
88022 extern ctl_handler sysctl_ms_jiffies;
88023diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
88024index 9d68fed..71f02cc 100644
88025--- a/include/linux/sysfs.h
88026+++ b/include/linux/sysfs.h
88027@@ -75,8 +75,8 @@ struct bin_attribute {
88028 };
88029
88030 struct sysfs_ops {
88031- ssize_t (*show)(struct kobject *, struct attribute *,char *);
88032- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
88033+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
88034+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
88035 };
88036
88037 struct sysfs_dirent;
88038diff --git a/include/linux/syslog.h b/include/linux/syslog.h
88039new file mode 100644
88040index 0000000..3891139
88041--- /dev/null
88042+++ b/include/linux/syslog.h
88043@@ -0,0 +1,52 @@
88044+/* Syslog internals
88045+ *
88046+ * Copyright 2010 Canonical, Ltd.
88047+ * Author: Kees Cook <kees.cook@canonical.com>
88048+ *
88049+ * This program is free software; you can redistribute it and/or modify
88050+ * it under the terms of the GNU General Public License as published by
88051+ * the Free Software Foundation; either version 2, or (at your option)
88052+ * any later version.
88053+ *
88054+ * This program is distributed in the hope that it will be useful,
88055+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
88056+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
88057+ * GNU General Public License for more details.
88058+ *
88059+ * You should have received a copy of the GNU General Public License
88060+ * along with this program; see the file COPYING. If not, write to
88061+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
88062+ */
88063+
88064+#ifndef _LINUX_SYSLOG_H
88065+#define _LINUX_SYSLOG_H
88066+
88067+/* Close the log. Currently a NOP. */
88068+#define SYSLOG_ACTION_CLOSE 0
88069+/* Open the log. Currently a NOP. */
88070+#define SYSLOG_ACTION_OPEN 1
88071+/* Read from the log. */
88072+#define SYSLOG_ACTION_READ 2
88073+/* Read all messages remaining in the ring buffer. */
88074+#define SYSLOG_ACTION_READ_ALL 3
88075+/* Read and clear all messages remaining in the ring buffer */
88076+#define SYSLOG_ACTION_READ_CLEAR 4
88077+/* Clear ring buffer. */
88078+#define SYSLOG_ACTION_CLEAR 5
88079+/* Disable printk's to console */
88080+#define SYSLOG_ACTION_CONSOLE_OFF 6
88081+/* Enable printk's to console */
88082+#define SYSLOG_ACTION_CONSOLE_ON 7
88083+/* Set level of messages printed to console */
88084+#define SYSLOG_ACTION_CONSOLE_LEVEL 8
88085+/* Return number of unread characters in the log buffer */
88086+#define SYSLOG_ACTION_SIZE_UNREAD 9
88087+/* Return size of the log buffer */
88088+#define SYSLOG_ACTION_SIZE_BUFFER 10
88089+
88090+#define SYSLOG_FROM_CALL 0
88091+#define SYSLOG_FROM_FILE 1
88092+
88093+int do_syslog(int type, char __user *buf, int count, bool from_file);
88094+
88095+#endif /* _LINUX_SYSLOG_H */
88096diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
88097index a8cc4e1..98d3b85 100644
88098--- a/include/linux/thread_info.h
88099+++ b/include/linux/thread_info.h
88100@@ -23,7 +23,7 @@ struct restart_block {
88101 };
88102 /* For futex_wait and futex_wait_requeue_pi */
88103 struct {
88104- u32 *uaddr;
88105+ u32 __user *uaddr;
88106 u32 val;
88107 u32 flags;
88108 u32 bitset;
88109diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
88110index 1eb44a9..f582df3 100644
88111--- a/include/linux/tracehook.h
88112+++ b/include/linux/tracehook.h
88113@@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
88114 /*
88115 * ptrace report for syscall entry and exit looks identical.
88116 */
88117-static inline void ptrace_report_syscall(struct pt_regs *regs)
88118+static inline int ptrace_report_syscall(struct pt_regs *regs)
88119 {
88120 int ptrace = task_ptrace(current);
88121
88122 if (!(ptrace & PT_PTRACED))
88123- return;
88124+ return 0;
88125
88126 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
88127
88128@@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
88129 send_sig(current->exit_code, current, 1);
88130 current->exit_code = 0;
88131 }
88132+
88133+ return fatal_signal_pending(current);
88134 }
88135
88136 /**
88137@@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
88138 static inline __must_check int tracehook_report_syscall_entry(
88139 struct pt_regs *regs)
88140 {
88141- ptrace_report_syscall(regs);
88142- return 0;
88143+ return ptrace_report_syscall(regs);
88144 }
88145
88146 /**
88147diff --git a/include/linux/tty.h b/include/linux/tty.h
88148index e9c57e9..ee6d489 100644
88149--- a/include/linux/tty.h
88150+++ b/include/linux/tty.h
88151@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
88152 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
88153 extern void tty_ldisc_enable(struct tty_struct *tty);
88154
88155-
88156 /* n_tty.c */
88157 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
88158
88159diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
88160index 0c4ee9b..9f7c426 100644
88161--- a/include/linux/tty_ldisc.h
88162+++ b/include/linux/tty_ldisc.h
88163@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
88164
88165 struct module *owner;
88166
88167- int refcount;
88168+ atomic_t refcount;
88169 };
88170
88171 struct tty_ldisc {
88172diff --git a/include/linux/types.h b/include/linux/types.h
88173index c42724f..d190eee 100644
88174--- a/include/linux/types.h
88175+++ b/include/linux/types.h
88176@@ -191,10 +191,26 @@ typedef struct {
88177 volatile int counter;
88178 } atomic_t;
88179
88180+#ifdef CONFIG_PAX_REFCOUNT
88181+typedef struct {
88182+ volatile int counter;
88183+} atomic_unchecked_t;
88184+#else
88185+typedef atomic_t atomic_unchecked_t;
88186+#endif
88187+
88188 #ifdef CONFIG_64BIT
88189 typedef struct {
88190 volatile long counter;
88191 } atomic64_t;
88192+
88193+#ifdef CONFIG_PAX_REFCOUNT
88194+typedef struct {
88195+ volatile long counter;
88196+} atomic64_unchecked_t;
88197+#else
88198+typedef atomic64_t atomic64_unchecked_t;
88199+#endif
88200 #endif
88201
88202 struct ustat {
88203diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
88204index 6b58367..53a3e8e 100644
88205--- a/include/linux/uaccess.h
88206+++ b/include/linux/uaccess.h
88207@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
88208 long ret; \
88209 mm_segment_t old_fs = get_fs(); \
88210 \
88211- set_fs(KERNEL_DS); \
88212 pagefault_disable(); \
88213- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
88214- pagefault_enable(); \
88215+ set_fs(KERNEL_DS); \
88216+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
88217 set_fs(old_fs); \
88218+ pagefault_enable(); \
88219 ret; \
88220 })
88221
88222@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
88223 * Safely read from address @src to the buffer at @dst. If a kernel fault
88224 * happens, handle that and return -EFAULT.
88225 */
88226-extern long probe_kernel_read(void *dst, void *src, size_t size);
88227+extern long probe_kernel_read(void *dst, const void *src, size_t size);
88228
88229 /*
88230 * probe_kernel_write(): safely attempt to write to a location
88231@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
88232 * Safely write to address @dst from the buffer at @src. If a kernel fault
88233 * happens, handle that and return -EFAULT.
88234 */
88235-extern long probe_kernel_write(void *dst, void *src, size_t size);
88236+extern long probe_kernel_write(void *dst, const void *src, size_t size);
88237
88238 #endif /* __LINUX_UACCESS_H__ */
88239diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
88240index 99c1b4d..bb94261 100644
88241--- a/include/linux/unaligned/access_ok.h
88242+++ b/include/linux/unaligned/access_ok.h
88243@@ -6,32 +6,32 @@
88244
88245 static inline u16 get_unaligned_le16(const void *p)
88246 {
88247- return le16_to_cpup((__le16 *)p);
88248+ return le16_to_cpup((const __le16 *)p);
88249 }
88250
88251 static inline u32 get_unaligned_le32(const void *p)
88252 {
88253- return le32_to_cpup((__le32 *)p);
88254+ return le32_to_cpup((const __le32 *)p);
88255 }
88256
88257 static inline u64 get_unaligned_le64(const void *p)
88258 {
88259- return le64_to_cpup((__le64 *)p);
88260+ return le64_to_cpup((const __le64 *)p);
88261 }
88262
88263 static inline u16 get_unaligned_be16(const void *p)
88264 {
88265- return be16_to_cpup((__be16 *)p);
88266+ return be16_to_cpup((const __be16 *)p);
88267 }
88268
88269 static inline u32 get_unaligned_be32(const void *p)
88270 {
88271- return be32_to_cpup((__be32 *)p);
88272+ return be32_to_cpup((const __be32 *)p);
88273 }
88274
88275 static inline u64 get_unaligned_be64(const void *p)
88276 {
88277- return be64_to_cpup((__be64 *)p);
88278+ return be64_to_cpup((const __be64 *)p);
88279 }
88280
88281 static inline void put_unaligned_le16(u16 val, void *p)
88282diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
88283index 79b9837..b5a56f9 100644
88284--- a/include/linux/vermagic.h
88285+++ b/include/linux/vermagic.h
88286@@ -26,9 +26,35 @@
88287 #define MODULE_ARCH_VERMAGIC ""
88288 #endif
88289
88290+#ifdef CONFIG_PAX_REFCOUNT
88291+#define MODULE_PAX_REFCOUNT "REFCOUNT "
88292+#else
88293+#define MODULE_PAX_REFCOUNT ""
88294+#endif
88295+
88296+#ifdef CONSTIFY_PLUGIN
88297+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
88298+#else
88299+#define MODULE_CONSTIFY_PLUGIN ""
88300+#endif
88301+
88302+#ifdef STACKLEAK_PLUGIN
88303+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
88304+#else
88305+#define MODULE_STACKLEAK_PLUGIN ""
88306+#endif
88307+
88308+#ifdef CONFIG_GRKERNSEC
88309+#define MODULE_GRSEC "GRSEC "
88310+#else
88311+#define MODULE_GRSEC ""
88312+#endif
88313+
88314 #define VERMAGIC_STRING \
88315 UTS_RELEASE " " \
88316 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
88317 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
88318- MODULE_ARCH_VERMAGIC
88319+ MODULE_ARCH_VERMAGIC \
88320+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
88321+ MODULE_GRSEC
88322
88323diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
88324index 819a634..462ac12 100644
88325--- a/include/linux/vmalloc.h
88326+++ b/include/linux/vmalloc.h
88327@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
88328 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
88329 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
88330 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
88331+
88332+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
88333+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
88334+#endif
88335+
88336 /* bits [20..32] reserved for arch specific ioremap internals */
88337
88338 /*
88339@@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
88340
88341 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
88342
88343+#define vmalloc(x) \
88344+({ \
88345+ void *___retval; \
88346+ intoverflow_t ___x = (intoverflow_t)x; \
88347+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
88348+ ___retval = NULL; \
88349+ else \
88350+ ___retval = vmalloc((unsigned long)___x); \
88351+ ___retval; \
88352+})
88353+
88354+#define __vmalloc(x, y, z) \
88355+({ \
88356+ void *___retval; \
88357+ intoverflow_t ___x = (intoverflow_t)x; \
88358+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
88359+ ___retval = NULL; \
88360+ else \
88361+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
88362+ ___retval; \
88363+})
88364+
88365+#define vmalloc_user(x) \
88366+({ \
88367+ void *___retval; \
88368+ intoverflow_t ___x = (intoverflow_t)x; \
88369+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
88370+ ___retval = NULL; \
88371+ else \
88372+ ___retval = vmalloc_user((unsigned long)___x); \
88373+ ___retval; \
88374+})
88375+
88376+#define vmalloc_exec(x) \
88377+({ \
88378+ void *___retval; \
88379+ intoverflow_t ___x = (intoverflow_t)x; \
88380+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
88381+ ___retval = NULL; \
88382+ else \
88383+ ___retval = vmalloc_exec((unsigned long)___x); \
88384+ ___retval; \
88385+})
88386+
88387+#define vmalloc_node(x, y) \
88388+({ \
88389+ void *___retval; \
88390+ intoverflow_t ___x = (intoverflow_t)x; \
88391+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
88392+ ___retval = NULL; \
88393+ else \
88394+ ___retval = vmalloc_node((unsigned long)___x, (y));\
88395+ ___retval; \
88396+})
88397+
88398+#define vmalloc_32(x) \
88399+({ \
88400+ void *___retval; \
88401+ intoverflow_t ___x = (intoverflow_t)x; \
88402+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
88403+ ___retval = NULL; \
88404+ else \
88405+ ___retval = vmalloc_32((unsigned long)___x); \
88406+ ___retval; \
88407+})
88408+
88409+#define vmalloc_32_user(x) \
88410+({ \
88411+ void *___retval; \
88412+ intoverflow_t ___x = (intoverflow_t)x; \
88413+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
88414+ ___retval = NULL; \
88415+ else \
88416+ ___retval = vmalloc_32_user((unsigned long)___x);\
88417+ ___retval; \
88418+})
88419+
88420 #endif /* _LINUX_VMALLOC_H */
88421diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
88422index 13070d6..aa4159a 100644
88423--- a/include/linux/vmstat.h
88424+++ b/include/linux/vmstat.h
88425@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
88426 /*
88427 * Zone based page accounting with per cpu differentials.
88428 */
88429-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
88430+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
88431
88432 static inline void zone_page_state_add(long x, struct zone *zone,
88433 enum zone_stat_item item)
88434 {
88435- atomic_long_add(x, &zone->vm_stat[item]);
88436- atomic_long_add(x, &vm_stat[item]);
88437+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
88438+ atomic_long_add_unchecked(x, &vm_stat[item]);
88439 }
88440
88441 static inline unsigned long global_page_state(enum zone_stat_item item)
88442 {
88443- long x = atomic_long_read(&vm_stat[item]);
88444+ long x = atomic_long_read_unchecked(&vm_stat[item]);
88445 #ifdef CONFIG_SMP
88446 if (x < 0)
88447 x = 0;
88448@@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
88449 static inline unsigned long zone_page_state(struct zone *zone,
88450 enum zone_stat_item item)
88451 {
88452- long x = atomic_long_read(&zone->vm_stat[item]);
88453+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
88454 #ifdef CONFIG_SMP
88455 if (x < 0)
88456 x = 0;
88457@@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
88458 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
88459 enum zone_stat_item item)
88460 {
88461- long x = atomic_long_read(&zone->vm_stat[item]);
88462+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
88463
88464 #ifdef CONFIG_SMP
88465 int cpu;
88466@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
88467
88468 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
88469 {
88470- atomic_long_inc(&zone->vm_stat[item]);
88471- atomic_long_inc(&vm_stat[item]);
88472+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
88473+ atomic_long_inc_unchecked(&vm_stat[item]);
88474 }
88475
88476 static inline void __inc_zone_page_state(struct page *page,
88477@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
88478
88479 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
88480 {
88481- atomic_long_dec(&zone->vm_stat[item]);
88482- atomic_long_dec(&vm_stat[item]);
88483+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
88484+ atomic_long_dec_unchecked(&vm_stat[item]);
88485 }
88486
88487 static inline void __dec_zone_page_state(struct page *page,
88488diff --git a/include/linux/xattr.h b/include/linux/xattr.h
88489index 5c84af8..1a3b6e2 100644
88490--- a/include/linux/xattr.h
88491+++ b/include/linux/xattr.h
88492@@ -33,6 +33,11 @@
88493 #define XATTR_USER_PREFIX "user."
88494 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
88495
88496+/* User namespace */
88497+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
88498+#define XATTR_PAX_FLAGS_SUFFIX "flags"
88499+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
88500+
88501 struct inode;
88502 struct dentry;
88503
88504diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
88505index eed5fcc..5080d24 100644
88506--- a/include/media/saa7146_vv.h
88507+++ b/include/media/saa7146_vv.h
88508@@ -167,7 +167,7 @@ struct saa7146_ext_vv
88509 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
88510
88511 /* the extension can override this */
88512- struct v4l2_ioctl_ops ops;
88513+ v4l2_ioctl_ops_no_const ops;
88514 /* pointer to the saa7146 core ops */
88515 const struct v4l2_ioctl_ops *core_ops;
88516
88517diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
88518index 73c9867..2da8837 100644
88519--- a/include/media/v4l2-dev.h
88520+++ b/include/media/v4l2-dev.h
88521@@ -34,7 +34,7 @@ struct v4l2_device;
88522 #define V4L2_FL_UNREGISTERED (0)
88523
88524 struct v4l2_file_operations {
88525- struct module *owner;
88526+ struct module * const owner;
88527 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
88528 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
88529 unsigned int (*poll) (struct file *, struct poll_table_struct *);
88530@@ -46,6 +46,7 @@ struct v4l2_file_operations {
88531 int (*open) (struct file *);
88532 int (*release) (struct file *);
88533 };
88534+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
88535
88536 /*
88537 * Newer version of video_device, handled by videodev2.c
88538diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
88539index 5d5d550..f559ef1 100644
88540--- a/include/media/v4l2-device.h
88541+++ b/include/media/v4l2-device.h
88542@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
88543 this function returns 0. If the name ends with a digit (e.g. cx18),
88544 then the name will be set to cx18-0 since cx180 looks really odd. */
88545 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
88546- atomic_t *instance);
88547+ atomic_unchecked_t *instance);
88548
88549 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
88550 Since the parent disappears this ensures that v4l2_dev doesn't have an
88551diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
88552index 7a4529d..7244290 100644
88553--- a/include/media/v4l2-ioctl.h
88554+++ b/include/media/v4l2-ioctl.h
88555@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
88556 long (*vidioc_default) (struct file *file, void *fh,
88557 int cmd, void *arg);
88558 };
88559+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
88560
88561
88562 /* v4l debugging and diagnostics */
88563diff --git a/include/net/flow.h b/include/net/flow.h
88564index 809970b..c3df4f3 100644
88565--- a/include/net/flow.h
88566+++ b/include/net/flow.h
88567@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
88568 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
88569 u8 dir, flow_resolve_t resolver);
88570 extern void flow_cache_flush(void);
88571-extern atomic_t flow_cache_genid;
88572+extern atomic_unchecked_t flow_cache_genid;
88573
88574 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
88575 {
88576diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
88577index 15e1f8fe..668837c 100644
88578--- a/include/net/inetpeer.h
88579+++ b/include/net/inetpeer.h
88580@@ -24,7 +24,7 @@ struct inet_peer
88581 __u32 dtime; /* the time of last use of not
88582 * referenced entries */
88583 atomic_t refcnt;
88584- atomic_t rid; /* Frag reception counter */
88585+ atomic_unchecked_t rid; /* Frag reception counter */
88586 __u32 tcp_ts;
88587 unsigned long tcp_ts_stamp;
88588 };
88589diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
88590index 98978e7..2243a3d 100644
88591--- a/include/net/ip_vs.h
88592+++ b/include/net/ip_vs.h
88593@@ -365,7 +365,7 @@ struct ip_vs_conn {
88594 struct ip_vs_conn *control; /* Master control connection */
88595 atomic_t n_control; /* Number of controlled ones */
88596 struct ip_vs_dest *dest; /* real server */
88597- atomic_t in_pkts; /* incoming packet counter */
88598+ atomic_unchecked_t in_pkts; /* incoming packet counter */
88599
88600 /* packet transmitter for different forwarding methods. If it
88601 mangles the packet, it must return NF_DROP or better NF_STOLEN,
88602@@ -466,7 +466,7 @@ struct ip_vs_dest {
88603 union nf_inet_addr addr; /* IP address of the server */
88604 __be16 port; /* port number of the server */
88605 volatile unsigned flags; /* dest status flags */
88606- atomic_t conn_flags; /* flags to copy to conn */
88607+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
88608 atomic_t weight; /* server weight */
88609
88610 atomic_t refcnt; /* reference counter */
88611diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
88612index 69b610a..fe3962c 100644
88613--- a/include/net/irda/ircomm_core.h
88614+++ b/include/net/irda/ircomm_core.h
88615@@ -51,7 +51,7 @@ typedef struct {
88616 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
88617 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
88618 struct ircomm_info *);
88619-} call_t;
88620+} __no_const call_t;
88621
88622 struct ircomm_cb {
88623 irda_queue_t queue;
88624diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
88625index eea2e61..08c692d 100644
88626--- a/include/net/irda/ircomm_tty.h
88627+++ b/include/net/irda/ircomm_tty.h
88628@@ -35,6 +35,7 @@
88629 #include <linux/termios.h>
88630 #include <linux/timer.h>
88631 #include <linux/tty.h> /* struct tty_struct */
88632+#include <asm/local.h>
88633
88634 #include <net/irda/irias_object.h>
88635 #include <net/irda/ircomm_core.h>
88636@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
88637 unsigned short close_delay;
88638 unsigned short closing_wait; /* time to wait before closing */
88639
88640- int open_count;
88641- int blocked_open; /* # of blocked opens */
88642+ local_t open_count;
88643+ local_t blocked_open; /* # of blocked opens */
88644
88645 /* Protect concurent access to :
88646 * o self->open_count
88647diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
88648index f82a1e8..82d81e8 100644
88649--- a/include/net/iucv/af_iucv.h
88650+++ b/include/net/iucv/af_iucv.h
88651@@ -87,7 +87,7 @@ struct iucv_sock {
88652 struct iucv_sock_list {
88653 struct hlist_head head;
88654 rwlock_t lock;
88655- atomic_t autobind_name;
88656+ atomic_unchecked_t autobind_name;
88657 };
88658
88659 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
88660diff --git a/include/net/lapb.h b/include/net/lapb.h
88661index 96cb5dd..25e8d4f 100644
88662--- a/include/net/lapb.h
88663+++ b/include/net/lapb.h
88664@@ -95,7 +95,7 @@ struct lapb_cb {
88665 struct sk_buff_head write_queue;
88666 struct sk_buff_head ack_queue;
88667 unsigned char window;
88668- struct lapb_register_struct callbacks;
88669+ struct lapb_register_struct *callbacks;
88670
88671 /* FRMR control information */
88672 struct lapb_frame frmr_data;
88673diff --git a/include/net/neighbour.h b/include/net/neighbour.h
88674index 3817fda..cdb2343 100644
88675--- a/include/net/neighbour.h
88676+++ b/include/net/neighbour.h
88677@@ -131,7 +131,7 @@ struct neigh_ops
88678 int (*connected_output)(struct sk_buff*);
88679 int (*hh_output)(struct sk_buff*);
88680 int (*queue_xmit)(struct sk_buff*);
88681-};
88682+} __do_const;
88683
88684 struct pneigh_entry
88685 {
88686diff --git a/include/net/netlink.h b/include/net/netlink.h
88687index c344646..4778c71 100644
88688--- a/include/net/netlink.h
88689+++ b/include/net/netlink.h
88690@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
88691 {
88692 return (remaining >= (int) sizeof(struct nlmsghdr) &&
88693 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
88694- nlh->nlmsg_len <= remaining);
88695+ nlh->nlmsg_len <= (unsigned int)remaining);
88696 }
88697
88698 /**
88699@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
88700 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
88701 {
88702 if (mark)
88703- skb_trim(skb, (unsigned char *) mark - skb->data);
88704+ skb_trim(skb, (const unsigned char *) mark - skb->data);
88705 }
88706
88707 /**
88708diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
88709index 9a4b8b7..e49e077 100644
88710--- a/include/net/netns/ipv4.h
88711+++ b/include/net/netns/ipv4.h
88712@@ -54,7 +54,7 @@ struct netns_ipv4 {
88713 int current_rt_cache_rebuild_count;
88714
88715 struct timer_list rt_secret_timer;
88716- atomic_t rt_genid;
88717+ atomic_unchecked_t rt_genid;
88718
88719 #ifdef CONFIG_IP_MROUTE
88720 struct sock *mroute_sk;
88721diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
88722index 8a6d529..171f401 100644
88723--- a/include/net/sctp/sctp.h
88724+++ b/include/net/sctp/sctp.h
88725@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
88726
88727 #else /* SCTP_DEBUG */
88728
88729-#define SCTP_DEBUG_PRINTK(whatever...)
88730-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
88731+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
88732+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
88733 #define SCTP_ENABLE_DEBUG
88734 #define SCTP_DISABLE_DEBUG
88735 #define SCTP_ASSERT(expr, str, func)
88736diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
88737index d97f689..f3b90ab 100644
88738--- a/include/net/secure_seq.h
88739+++ b/include/net/secure_seq.h
88740@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
88741 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
88742 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
88743 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
88744- __be16 dport);
88745+ __be16 dport);
88746 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
88747 __be16 sport, __be16 dport);
88748 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
88749- __be16 sport, __be16 dport);
88750+ __be16 sport, __be16 dport);
88751 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
88752- __be16 sport, __be16 dport);
88753+ __be16 sport, __be16 dport);
88754 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
88755- __be16 sport, __be16 dport);
88756+ __be16 sport, __be16 dport);
88757
88758 #endif /* _NET_SECURE_SEQ */
88759diff --git a/include/net/sock.h b/include/net/sock.h
88760index 78adf52..99afd29 100644
88761--- a/include/net/sock.h
88762+++ b/include/net/sock.h
88763@@ -272,7 +272,7 @@ struct sock {
88764 rwlock_t sk_callback_lock;
88765 int sk_err,
88766 sk_err_soft;
88767- atomic_t sk_drops;
88768+ atomic_unchecked_t sk_drops;
88769 unsigned short sk_ack_backlog;
88770 unsigned short sk_max_ack_backlog;
88771 __u32 sk_priority;
88772@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
88773 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
88774 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
88775 #else
88776-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
88777+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
88778 int inc)
88779 {
88780 }
88781diff --git a/include/net/tcp.h b/include/net/tcp.h
88782index 6cfe18b..dd21acb 100644
88783--- a/include/net/tcp.h
88784+++ b/include/net/tcp.h
88785@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
88786 struct tcp_seq_afinfo {
88787 char *name;
88788 sa_family_t family;
88789- struct file_operations seq_fops;
88790- struct seq_operations seq_ops;
88791+ file_operations_no_const seq_fops;
88792+ seq_operations_no_const seq_ops;
88793 };
88794
88795 struct tcp_iter_state {
88796diff --git a/include/net/udp.h b/include/net/udp.h
88797index f98abd2..b4b042f 100644
88798--- a/include/net/udp.h
88799+++ b/include/net/udp.h
88800@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
88801 char *name;
88802 sa_family_t family;
88803 struct udp_table *udp_table;
88804- struct file_operations seq_fops;
88805- struct seq_operations seq_ops;
88806+ file_operations_no_const seq_fops;
88807+ seq_operations_no_const seq_ops;
88808 };
88809
88810 struct udp_iter_state {
88811diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
88812index cbb822e..e9c1cbe 100644
88813--- a/include/rdma/iw_cm.h
88814+++ b/include/rdma/iw_cm.h
88815@@ -129,7 +129,7 @@ struct iw_cm_verbs {
88816 int backlog);
88817
88818 int (*destroy_listen)(struct iw_cm_id *cm_id);
88819-};
88820+} __no_const;
88821
88822 /**
88823 * iw_create_cm_id - Create an IW CM identifier.
88824diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
88825index 09a124b..caa8ca8 100644
88826--- a/include/scsi/libfc.h
88827+++ b/include/scsi/libfc.h
88828@@ -675,6 +675,7 @@ struct libfc_function_template {
88829 */
88830 void (*disc_stop_final) (struct fc_lport *);
88831 };
88832+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
88833
88834 /* information used by the discovery layer */
88835 struct fc_disc {
88836@@ -707,7 +708,7 @@ struct fc_lport {
88837 struct fc_disc disc;
88838
88839 /* Operational Information */
88840- struct libfc_function_template tt;
88841+ libfc_function_template_no_const tt;
88842 u8 link_up;
88843 u8 qfull;
88844 enum fc_lport_state state;
88845diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
88846index de8e180..f15e0d7 100644
88847--- a/include/scsi/scsi_device.h
88848+++ b/include/scsi/scsi_device.h
88849@@ -156,9 +156,9 @@ struct scsi_device {
88850 unsigned int max_device_blocked; /* what device_blocked counts down from */
88851 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
88852
88853- atomic_t iorequest_cnt;
88854- atomic_t iodone_cnt;
88855- atomic_t ioerr_cnt;
88856+ atomic_unchecked_t iorequest_cnt;
88857+ atomic_unchecked_t iodone_cnt;
88858+ atomic_unchecked_t ioerr_cnt;
88859
88860 struct device sdev_gendev,
88861 sdev_dev;
88862diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
88863index 0b4baba..0106e9e 100644
88864--- a/include/scsi/scsi_host.h
88865+++ b/include/scsi/scsi_host.h
88866@@ -43,6 +43,12 @@ struct blk_queue_tags;
88867 #define DISABLE_CLUSTERING 0
88868 #define ENABLE_CLUSTERING 1
88869
88870+enum {
88871+ SCSI_QDEPTH_DEFAULT, /* default requested change, e.g. from sysfs */
88872+ SCSI_QDEPTH_QFULL, /* scsi-ml requested due to queue full */
88873+ SCSI_QDEPTH_RAMP_UP, /* scsi-ml requested due to threshhold event */
88874+};
88875+
88876 struct scsi_host_template {
88877 struct module *module;
88878 const char *name;
88879diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
88880index fc50bd6..81ba9cb 100644
88881--- a/include/scsi/scsi_transport_fc.h
88882+++ b/include/scsi/scsi_transport_fc.h
88883@@ -708,7 +708,7 @@ struct fc_function_template {
88884 unsigned long show_host_system_hostname:1;
88885
88886 unsigned long disable_target_scan:1;
88887-};
88888+} __do_const;
88889
88890
88891 /**
88892diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
88893index 3dae3f7..8440d6f 100644
88894--- a/include/sound/ac97_codec.h
88895+++ b/include/sound/ac97_codec.h
88896@@ -419,15 +419,15 @@
88897 struct snd_ac97;
88898
88899 struct snd_ac97_build_ops {
88900- int (*build_3d) (struct snd_ac97 *ac97);
88901- int (*build_specific) (struct snd_ac97 *ac97);
88902- int (*build_spdif) (struct snd_ac97 *ac97);
88903- int (*build_post_spdif) (struct snd_ac97 *ac97);
88904+ int (* const build_3d) (struct snd_ac97 *ac97);
88905+ int (* const build_specific) (struct snd_ac97 *ac97);
88906+ int (* const build_spdif) (struct snd_ac97 *ac97);
88907+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
88908 #ifdef CONFIG_PM
88909- void (*suspend) (struct snd_ac97 *ac97);
88910- void (*resume) (struct snd_ac97 *ac97);
88911+ void (* const suspend) (struct snd_ac97 *ac97);
88912+ void (* const resume) (struct snd_ac97 *ac97);
88913 #endif
88914- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
88915+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
88916 };
88917
88918 struct snd_ac97_bus_ops {
88919@@ -477,7 +477,7 @@ struct snd_ac97_template {
88920
88921 struct snd_ac97 {
88922 /* -- lowlevel (hardware) driver specific -- */
88923- struct snd_ac97_build_ops * build_ops;
88924+ const struct snd_ac97_build_ops * build_ops;
88925 void *private_data;
88926 void (*private_free) (struct snd_ac97 *ac97);
88927 /* --- */
88928diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
88929index 891cf1a..a94ba2b 100644
88930--- a/include/sound/ak4xxx-adda.h
88931+++ b/include/sound/ak4xxx-adda.h
88932@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
88933 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
88934 unsigned char val);
88935 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
88936-};
88937+} __no_const;
88938
88939 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
88940
88941diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
88942index 8c05e47..2b5df97 100644
88943--- a/include/sound/hwdep.h
88944+++ b/include/sound/hwdep.h
88945@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
88946 struct snd_hwdep_dsp_status *status);
88947 int (*dsp_load)(struct snd_hwdep *hw,
88948 struct snd_hwdep_dsp_image *image);
88949-};
88950+} __no_const;
88951
88952 struct snd_hwdep {
88953 struct snd_card *card;
88954diff --git a/include/sound/info.h b/include/sound/info.h
88955index 112e894..6fda5b5 100644
88956--- a/include/sound/info.h
88957+++ b/include/sound/info.h
88958@@ -44,7 +44,7 @@ struct snd_info_entry_text {
88959 struct snd_info_buffer *buffer);
88960 void (*write)(struct snd_info_entry *entry,
88961 struct snd_info_buffer *buffer);
88962-};
88963+} __no_const;
88964
88965 struct snd_info_entry_ops {
88966 int (*open)(struct snd_info_entry *entry,
88967diff --git a/include/sound/pcm.h b/include/sound/pcm.h
88968index de6d981..590a550 100644
88969--- a/include/sound/pcm.h
88970+++ b/include/sound/pcm.h
88971@@ -80,6 +80,7 @@ struct snd_pcm_ops {
88972 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
88973 int (*ack)(struct snd_pcm_substream *substream);
88974 };
88975+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
88976
88977 /*
88978 *
88979diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
88980index 736eac7..fe8a80f 100644
88981--- a/include/sound/sb16_csp.h
88982+++ b/include/sound/sb16_csp.h
88983@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
88984 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
88985 int (*csp_stop) (struct snd_sb_csp * p);
88986 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
88987-};
88988+} __no_const;
88989
88990 /*
88991 * CSP private data
88992diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
88993index 444cd6b..3327cc5 100644
88994--- a/include/sound/ymfpci.h
88995+++ b/include/sound/ymfpci.h
88996@@ -358,7 +358,7 @@ struct snd_ymfpci {
88997 spinlock_t reg_lock;
88998 spinlock_t voice_lock;
88999 wait_queue_head_t interrupt_sleep;
89000- atomic_t interrupt_sleep_count;
89001+ atomic_unchecked_t interrupt_sleep_count;
89002 struct snd_info_entry *proc_entry;
89003 const struct firmware *dsp_microcode;
89004 const struct firmware *controller_microcode;
89005diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
89006index b89f9db..f097b38 100644
89007--- a/include/trace/events/irq.h
89008+++ b/include/trace/events/irq.h
89009@@ -34,7 +34,7 @@
89010 */
89011 TRACE_EVENT(irq_handler_entry,
89012
89013- TP_PROTO(int irq, struct irqaction *action),
89014+ TP_PROTO(int irq, const struct irqaction *action),
89015
89016 TP_ARGS(irq, action),
89017
89018@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
89019 */
89020 TRACE_EVENT(irq_handler_exit,
89021
89022- TP_PROTO(int irq, struct irqaction *action, int ret),
89023+ TP_PROTO(int irq, const struct irqaction *action, int ret),
89024
89025 TP_ARGS(irq, action, ret),
89026
89027@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
89028 */
89029 TRACE_EVENT(softirq_entry,
89030
89031- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
89032+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
89033
89034 TP_ARGS(h, vec),
89035
89036@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
89037 */
89038 TRACE_EVENT(softirq_exit,
89039
89040- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
89041+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
89042
89043 TP_ARGS(h, vec),
89044
89045diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
89046index 0993a22..32ba2fe 100644
89047--- a/include/video/uvesafb.h
89048+++ b/include/video/uvesafb.h
89049@@ -177,6 +177,7 @@ struct uvesafb_par {
89050 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
89051 u8 pmi_setpal; /* PMI for palette changes */
89052 u16 *pmi_base; /* protected mode interface location */
89053+ u8 *pmi_code; /* protected mode code location */
89054 void *pmi_start;
89055 void *pmi_pal;
89056 u8 *vbe_state_orig; /*
89057diff --git a/init/Kconfig b/init/Kconfig
89058index d72691b..3996e54 100644
89059--- a/init/Kconfig
89060+++ b/init/Kconfig
89061@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
89062
89063 config COMPAT_BRK
89064 bool "Disable heap randomization"
89065- default y
89066+ default n
89067 help
89068 Randomizing heap placement makes heap exploits harder, but it
89069 also breaks ancient binaries (including anything libc5 based).
89070diff --git a/init/do_mounts.c b/init/do_mounts.c
89071index bb008d0..4fa3933 100644
89072--- a/init/do_mounts.c
89073+++ b/init/do_mounts.c
89074@@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
89075
89076 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
89077 {
89078- int err = sys_mount(name, "/root", fs, flags, data);
89079+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
89080 if (err)
89081 return err;
89082
89083- sys_chdir("/root");
89084+ sys_chdir((__force const char __user *)"/root");
89085 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
89086 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
89087 current->fs->pwd.mnt->mnt_sb->s_type->name,
89088@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
89089 va_start(args, fmt);
89090 vsprintf(buf, fmt, args);
89091 va_end(args);
89092- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
89093+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
89094 if (fd >= 0) {
89095 sys_ioctl(fd, FDEJECT, 0);
89096 sys_close(fd);
89097 }
89098 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
89099- fd = sys_open("/dev/console", O_RDWR, 0);
89100+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
89101 if (fd >= 0) {
89102 sys_ioctl(fd, TCGETS, (long)&termios);
89103 termios.c_lflag &= ~ICANON;
89104 sys_ioctl(fd, TCSETSF, (long)&termios);
89105- sys_read(fd, &c, 1);
89106+ sys_read(fd, (char __user *)&c, 1);
89107 termios.c_lflag |= ICANON;
89108 sys_ioctl(fd, TCSETSF, (long)&termios);
89109 sys_close(fd);
89110@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
89111 mount_root();
89112 out:
89113 devtmpfs_mount("dev");
89114- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89115- sys_chroot(".");
89116+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
89117+ sys_chroot((__force char __user *)".");
89118 }
89119diff --git a/init/do_mounts.h b/init/do_mounts.h
89120index f5b978a..69dbfe8 100644
89121--- a/init/do_mounts.h
89122+++ b/init/do_mounts.h
89123@@ -15,15 +15,15 @@ extern int root_mountflags;
89124
89125 static inline int create_dev(char *name, dev_t dev)
89126 {
89127- sys_unlink(name);
89128- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
89129+ sys_unlink((char __force_user *)name);
89130+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
89131 }
89132
89133 #if BITS_PER_LONG == 32
89134 static inline u32 bstat(char *name)
89135 {
89136 struct stat64 stat;
89137- if (sys_stat64(name, &stat) != 0)
89138+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
89139 return 0;
89140 if (!S_ISBLK(stat.st_mode))
89141 return 0;
89142@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
89143 static inline u32 bstat(char *name)
89144 {
89145 struct stat stat;
89146- if (sys_newstat(name, &stat) != 0)
89147+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
89148 return 0;
89149 if (!S_ISBLK(stat.st_mode))
89150 return 0;
89151diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
89152index 614241b..4da046b 100644
89153--- a/init/do_mounts_initrd.c
89154+++ b/init/do_mounts_initrd.c
89155@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
89156 sys_close(old_fd);sys_close(root_fd);
89157 sys_close(0);sys_close(1);sys_close(2);
89158 sys_setsid();
89159- (void) sys_open("/dev/console",O_RDWR,0);
89160+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
89161 (void) sys_dup(0);
89162 (void) sys_dup(0);
89163 return kernel_execve(shell, argv, envp_init);
89164@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
89165 create_dev("/dev/root.old", Root_RAM0);
89166 /* mount initrd on rootfs' /root */
89167 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
89168- sys_mkdir("/old", 0700);
89169- root_fd = sys_open("/", 0, 0);
89170- old_fd = sys_open("/old", 0, 0);
89171+ sys_mkdir((const char __force_user *)"/old", 0700);
89172+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
89173+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
89174 /* move initrd over / and chdir/chroot in initrd root */
89175- sys_chdir("/root");
89176- sys_mount(".", "/", NULL, MS_MOVE, NULL);
89177- sys_chroot(".");
89178+ sys_chdir((const char __force_user *)"/root");
89179+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
89180+ sys_chroot((const char __force_user *)".");
89181
89182 /*
89183 * In case that a resume from disk is carried out by linuxrc or one of
89184@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
89185
89186 /* move initrd to rootfs' /old */
89187 sys_fchdir(old_fd);
89188- sys_mount("/", ".", NULL, MS_MOVE, NULL);
89189+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
89190 /* switch root and cwd back to / of rootfs */
89191 sys_fchdir(root_fd);
89192- sys_chroot(".");
89193+ sys_chroot((const char __force_user *)".");
89194 sys_close(old_fd);
89195 sys_close(root_fd);
89196
89197 if (new_decode_dev(real_root_dev) == Root_RAM0) {
89198- sys_chdir("/old");
89199+ sys_chdir((const char __force_user *)"/old");
89200 return;
89201 }
89202
89203@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
89204 mount_root();
89205
89206 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
89207- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
89208+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
89209 if (!error)
89210 printk("okay\n");
89211 else {
89212- int fd = sys_open("/dev/root.old", O_RDWR, 0);
89213+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
89214 if (error == -ENOENT)
89215 printk("/initrd does not exist. Ignored.\n");
89216 else
89217 printk("failed\n");
89218 printk(KERN_NOTICE "Unmounting old root\n");
89219- sys_umount("/old", MNT_DETACH);
89220+ sys_umount((char __force_user *)"/old", MNT_DETACH);
89221 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
89222 if (fd < 0) {
89223 error = fd;
89224@@ -119,11 +119,11 @@ int __init initrd_load(void)
89225 * mounted in the normal path.
89226 */
89227 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
89228- sys_unlink("/initrd.image");
89229+ sys_unlink((const char __force_user *)"/initrd.image");
89230 handle_initrd();
89231 return 1;
89232 }
89233 }
89234- sys_unlink("/initrd.image");
89235+ sys_unlink((const char __force_user *)"/initrd.image");
89236 return 0;
89237 }
89238diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
89239index 69aebbf..c0bf6a7 100644
89240--- a/init/do_mounts_md.c
89241+++ b/init/do_mounts_md.c
89242@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
89243 partitioned ? "_d" : "", minor,
89244 md_setup_args[ent].device_names);
89245
89246- fd = sys_open(name, 0, 0);
89247+ fd = sys_open((char __force_user *)name, 0, 0);
89248 if (fd < 0) {
89249 printk(KERN_ERR "md: open failed - cannot start "
89250 "array %s\n", name);
89251@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
89252 * array without it
89253 */
89254 sys_close(fd);
89255- fd = sys_open(name, 0, 0);
89256+ fd = sys_open((char __force_user *)name, 0, 0);
89257 sys_ioctl(fd, BLKRRPART, 0);
89258 }
89259 sys_close(fd);
89260@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
89261
89262 wait_for_device_probe();
89263
89264- fd = sys_open("/dev/md0", 0, 0);
89265+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
89266 if (fd >= 0) {
89267 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
89268 sys_close(fd);
89269diff --git a/init/initramfs.c b/init/initramfs.c
89270index 1fd59b8..a01b079 100644
89271--- a/init/initramfs.c
89272+++ b/init/initramfs.c
89273@@ -74,7 +74,7 @@ static void __init free_hash(void)
89274 }
89275 }
89276
89277-static long __init do_utime(char __user *filename, time_t mtime)
89278+static long __init do_utime(__force char __user *filename, time_t mtime)
89279 {
89280 struct timespec t[2];
89281
89282@@ -109,7 +109,7 @@ static void __init dir_utime(void)
89283 struct dir_entry *de, *tmp;
89284 list_for_each_entry_safe(de, tmp, &dir_list, list) {
89285 list_del(&de->list);
89286- do_utime(de->name, de->mtime);
89287+ do_utime((char __force_user *)de->name, de->mtime);
89288 kfree(de->name);
89289 kfree(de);
89290 }
89291@@ -271,7 +271,7 @@ static int __init maybe_link(void)
89292 if (nlink >= 2) {
89293 char *old = find_link(major, minor, ino, mode, collected);
89294 if (old)
89295- return (sys_link(old, collected) < 0) ? -1 : 1;
89296+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
89297 }
89298 return 0;
89299 }
89300@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
89301 {
89302 struct stat st;
89303
89304- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
89305+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
89306 if (S_ISDIR(st.st_mode))
89307- sys_rmdir(path);
89308+ sys_rmdir((char __force_user *)path);
89309 else
89310- sys_unlink(path);
89311+ sys_unlink((char __force_user *)path);
89312 }
89313 }
89314
89315@@ -305,7 +305,7 @@ static int __init do_name(void)
89316 int openflags = O_WRONLY|O_CREAT;
89317 if (ml != 1)
89318 openflags |= O_TRUNC;
89319- wfd = sys_open(collected, openflags, mode);
89320+ wfd = sys_open((char __force_user *)collected, openflags, mode);
89321
89322 if (wfd >= 0) {
89323 sys_fchown(wfd, uid, gid);
89324@@ -317,17 +317,17 @@ static int __init do_name(void)
89325 }
89326 }
89327 } else if (S_ISDIR(mode)) {
89328- sys_mkdir(collected, mode);
89329- sys_chown(collected, uid, gid);
89330- sys_chmod(collected, mode);
89331+ sys_mkdir((char __force_user *)collected, mode);
89332+ sys_chown((char __force_user *)collected, uid, gid);
89333+ sys_chmod((char __force_user *)collected, mode);
89334 dir_add(collected, mtime);
89335 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
89336 S_ISFIFO(mode) || S_ISSOCK(mode)) {
89337 if (maybe_link() == 0) {
89338- sys_mknod(collected, mode, rdev);
89339- sys_chown(collected, uid, gid);
89340- sys_chmod(collected, mode);
89341- do_utime(collected, mtime);
89342+ sys_mknod((char __force_user *)collected, mode, rdev);
89343+ sys_chown((char __force_user *)collected, uid, gid);
89344+ sys_chmod((char __force_user *)collected, mode);
89345+ do_utime((char __force_user *)collected, mtime);
89346 }
89347 }
89348 return 0;
89349@@ -336,15 +336,15 @@ static int __init do_name(void)
89350 static int __init do_copy(void)
89351 {
89352 if (count >= body_len) {
89353- sys_write(wfd, victim, body_len);
89354+ sys_write(wfd, (char __force_user *)victim, body_len);
89355 sys_close(wfd);
89356- do_utime(vcollected, mtime);
89357+ do_utime((char __force_user *)vcollected, mtime);
89358 kfree(vcollected);
89359 eat(body_len);
89360 state = SkipIt;
89361 return 0;
89362 } else {
89363- sys_write(wfd, victim, count);
89364+ sys_write(wfd, (char __force_user *)victim, count);
89365 body_len -= count;
89366 eat(count);
89367 return 1;
89368@@ -355,9 +355,9 @@ static int __init do_symlink(void)
89369 {
89370 collected[N_ALIGN(name_len) + body_len] = '\0';
89371 clean_path(collected, 0);
89372- sys_symlink(collected + N_ALIGN(name_len), collected);
89373- sys_lchown(collected, uid, gid);
89374- do_utime(collected, mtime);
89375+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
89376+ sys_lchown((char __force_user *)collected, uid, gid);
89377+ do_utime((char __force_user *)collected, mtime);
89378 state = SkipIt;
89379 next_state = Reset;
89380 return 0;
89381diff --git a/init/main.c b/init/main.c
89382index 1eb4bd5..fea5bbe 100644
89383--- a/init/main.c
89384+++ b/init/main.c
89385@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
89386 #ifdef CONFIG_TC
89387 extern void tc_init(void);
89388 #endif
89389+extern void grsecurity_init(void);
89390
89391 enum system_states system_state __read_mostly;
89392 EXPORT_SYMBOL(system_state);
89393@@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
89394
89395 __setup("reset_devices", set_reset_devices);
89396
89397+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
89398+extern char pax_enter_kernel_user[];
89399+extern char pax_exit_kernel_user[];
89400+extern pgdval_t clone_pgd_mask;
89401+#endif
89402+
89403+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
89404+static int __init setup_pax_nouderef(char *str)
89405+{
89406+#ifdef CONFIG_X86_32
89407+ unsigned int cpu;
89408+ struct desc_struct *gdt;
89409+
89410+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
89411+ gdt = get_cpu_gdt_table(cpu);
89412+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
89413+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
89414+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
89415+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
89416+ }
89417+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
89418+#else
89419+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
89420+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
89421+ clone_pgd_mask = ~(pgdval_t)0UL;
89422+#endif
89423+
89424+ return 0;
89425+}
89426+early_param("pax_nouderef", setup_pax_nouderef);
89427+#endif
89428+
89429+#ifdef CONFIG_PAX_SOFTMODE
89430+int pax_softmode;
89431+
89432+static int __init setup_pax_softmode(char *str)
89433+{
89434+ get_option(&str, &pax_softmode);
89435+ return 1;
89436+}
89437+__setup("pax_softmode=", setup_pax_softmode);
89438+#endif
89439+
89440 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
89441 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
89442 static const char *panic_later, *panic_param;
89443@@ -705,52 +749,53 @@ int initcall_debug;
89444 core_param(initcall_debug, initcall_debug, bool, 0644);
89445
89446 static char msgbuf[64];
89447-static struct boot_trace_call call;
89448-static struct boot_trace_ret ret;
89449+static struct boot_trace_call trace_call;
89450+static struct boot_trace_ret trace_ret;
89451
89452 int do_one_initcall(initcall_t fn)
89453 {
89454 int count = preempt_count();
89455 ktime_t calltime, delta, rettime;
89456+ const char *msg1 = "", *msg2 = "";
89457
89458 if (initcall_debug) {
89459- call.caller = task_pid_nr(current);
89460- printk("calling %pF @ %i\n", fn, call.caller);
89461+ trace_call.caller = task_pid_nr(current);
89462+ printk("calling %pF @ %i\n", fn, trace_call.caller);
89463 calltime = ktime_get();
89464- trace_boot_call(&call, fn);
89465+ trace_boot_call(&trace_call, fn);
89466 enable_boot_trace();
89467 }
89468
89469- ret.result = fn();
89470+ trace_ret.result = fn();
89471
89472 if (initcall_debug) {
89473 disable_boot_trace();
89474 rettime = ktime_get();
89475 delta = ktime_sub(rettime, calltime);
89476- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
89477- trace_boot_ret(&ret, fn);
89478+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
89479+ trace_boot_ret(&trace_ret, fn);
89480 printk("initcall %pF returned %d after %Ld usecs\n", fn,
89481- ret.result, ret.duration);
89482+ trace_ret.result, trace_ret.duration);
89483 }
89484
89485 msgbuf[0] = 0;
89486
89487- if (ret.result && ret.result != -ENODEV && initcall_debug)
89488- sprintf(msgbuf, "error code %d ", ret.result);
89489+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
89490+ sprintf(msgbuf, "error code %d ", trace_ret.result);
89491
89492 if (preempt_count() != count) {
89493- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
89494+ msg1 = " preemption imbalance";
89495 preempt_count() = count;
89496 }
89497 if (irqs_disabled()) {
89498- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
89499+ msg2 = " disabled interrupts";
89500 local_irq_enable();
89501 }
89502- if (msgbuf[0]) {
89503- printk("initcall %pF returned with %s\n", fn, msgbuf);
89504+ if (msgbuf[0] || *msg1 || *msg2) {
89505+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
89506 }
89507
89508- return ret.result;
89509+ return trace_ret.result;
89510 }
89511
89512
89513@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
89514 if (!ramdisk_execute_command)
89515 ramdisk_execute_command = "/init";
89516
89517- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
89518+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
89519 ramdisk_execute_command = NULL;
89520 prepare_namespace();
89521 }
89522
89523+ grsecurity_init();
89524+
89525 /*
89526 * Ok, we have completed the initial bootup, and
89527 * we're essentially up and running. Get rid of the
89528diff --git a/init/noinitramfs.c b/init/noinitramfs.c
89529index f4c1a3a..96c19bd 100644
89530--- a/init/noinitramfs.c
89531+++ b/init/noinitramfs.c
89532@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
89533 {
89534 int err;
89535
89536- err = sys_mkdir("/dev", 0755);
89537+ err = sys_mkdir((const char __user *)"/dev", 0755);
89538 if (err < 0)
89539 goto out;
89540
89541@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
89542 if (err < 0)
89543 goto out;
89544
89545- err = sys_mkdir("/root", 0700);
89546+ err = sys_mkdir((const char __user *)"/root", 0700);
89547 if (err < 0)
89548 goto out;
89549
89550diff --git a/ipc/mqueue.c b/ipc/mqueue.c
89551index d01bc14..8df81db 100644
89552--- a/ipc/mqueue.c
89553+++ b/ipc/mqueue.c
89554@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
89555 mq_bytes = (mq_msg_tblsz +
89556 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
89557
89558+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
89559 spin_lock(&mq_lock);
89560 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
89561 u->mq_bytes + mq_bytes >
89562diff --git a/ipc/msg.c b/ipc/msg.c
89563index 779f762..4af9e36 100644
89564--- a/ipc/msg.c
89565+++ b/ipc/msg.c
89566@@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
89567 return security_msg_queue_associate(msq, msgflg);
89568 }
89569
89570+static struct ipc_ops msg_ops = {
89571+ .getnew = newque,
89572+ .associate = msg_security,
89573+ .more_checks = NULL
89574+};
89575+
89576 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
89577 {
89578 struct ipc_namespace *ns;
89579- struct ipc_ops msg_ops;
89580 struct ipc_params msg_params;
89581
89582 ns = current->nsproxy->ipc_ns;
89583
89584- msg_ops.getnew = newque;
89585- msg_ops.associate = msg_security;
89586- msg_ops.more_checks = NULL;
89587-
89588 msg_params.key = key;
89589 msg_params.flg = msgflg;
89590
89591diff --git a/ipc/sem.c b/ipc/sem.c
89592index b781007..f738b04 100644
89593--- a/ipc/sem.c
89594+++ b/ipc/sem.c
89595@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
89596 return 0;
89597 }
89598
89599+static struct ipc_ops sem_ops = {
89600+ .getnew = newary,
89601+ .associate = sem_security,
89602+ .more_checks = sem_more_checks
89603+};
89604+
89605 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
89606 {
89607 struct ipc_namespace *ns;
89608- struct ipc_ops sem_ops;
89609 struct ipc_params sem_params;
89610
89611 ns = current->nsproxy->ipc_ns;
89612@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
89613 if (nsems < 0 || nsems > ns->sc_semmsl)
89614 return -EINVAL;
89615
89616- sem_ops.getnew = newary;
89617- sem_ops.associate = sem_security;
89618- sem_ops.more_checks = sem_more_checks;
89619-
89620 sem_params.key = key;
89621 sem_params.flg = semflg;
89622 sem_params.u.nsems = nsems;
89623@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
89624 ushort* sem_io = fast_sem_io;
89625 int nsems;
89626
89627+ pax_track_stack();
89628+
89629 sma = sem_lock_check(ns, semid);
89630 if (IS_ERR(sma))
89631 return PTR_ERR(sma);
89632@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
89633 unsigned long jiffies_left = 0;
89634 struct ipc_namespace *ns;
89635
89636+ pax_track_stack();
89637+
89638 ns = current->nsproxy->ipc_ns;
89639
89640 if (nsops < 1 || semid < 0)
89641diff --git a/ipc/shm.c b/ipc/shm.c
89642index d30732c..e4992cd 100644
89643--- a/ipc/shm.c
89644+++ b/ipc/shm.c
89645@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
89646 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
89647 #endif
89648
89649+#ifdef CONFIG_GRKERNSEC
89650+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
89651+ const time_t shm_createtime, const uid_t cuid,
89652+ const int shmid);
89653+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
89654+ const time_t shm_createtime);
89655+#endif
89656+
89657 void shm_init_ns(struct ipc_namespace *ns)
89658 {
89659 ns->shm_ctlmax = SHMMAX;
89660@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
89661 shp->shm_lprid = 0;
89662 shp->shm_atim = shp->shm_dtim = 0;
89663 shp->shm_ctim = get_seconds();
89664+#ifdef CONFIG_GRKERNSEC
89665+ {
89666+ struct timespec timeval;
89667+ do_posix_clock_monotonic_gettime(&timeval);
89668+
89669+ shp->shm_createtime = timeval.tv_sec;
89670+ }
89671+#endif
89672 shp->shm_segsz = size;
89673 shp->shm_nattch = 0;
89674 shp->shm_file = file;
89675@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
89676 return 0;
89677 }
89678
89679+static struct ipc_ops shm_ops = {
89680+ .getnew = newseg,
89681+ .associate = shm_security,
89682+ .more_checks = shm_more_checks
89683+};
89684+
89685 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
89686 {
89687 struct ipc_namespace *ns;
89688- struct ipc_ops shm_ops;
89689 struct ipc_params shm_params;
89690
89691 ns = current->nsproxy->ipc_ns;
89692
89693- shm_ops.getnew = newseg;
89694- shm_ops.associate = shm_security;
89695- shm_ops.more_checks = shm_more_checks;
89696-
89697 shm_params.key = key;
89698 shm_params.flg = shmflg;
89699 shm_params.u.size = size;
89700@@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
89701 f_mode = FMODE_READ | FMODE_WRITE;
89702 }
89703 if (shmflg & SHM_EXEC) {
89704+
89705+#ifdef CONFIG_PAX_MPROTECT
89706+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
89707+ goto out;
89708+#endif
89709+
89710 prot |= PROT_EXEC;
89711 acc_mode |= S_IXUGO;
89712 }
89713@@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
89714 if (err)
89715 goto out_unlock;
89716
89717+#ifdef CONFIG_GRKERNSEC
89718+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
89719+ shp->shm_perm.cuid, shmid) ||
89720+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
89721+ err = -EACCES;
89722+ goto out_unlock;
89723+ }
89724+#endif
89725+
89726 path.dentry = dget(shp->shm_file->f_path.dentry);
89727 path.mnt = shp->shm_file->f_path.mnt;
89728 shp->shm_nattch++;
89729+#ifdef CONFIG_GRKERNSEC
89730+ shp->shm_lapid = current->pid;
89731+#endif
89732 size = i_size_read(path.dentry->d_inode);
89733 shm_unlock(shp);
89734
89735diff --git a/kernel/acct.c b/kernel/acct.c
89736index a6605ca..ca91111 100644
89737--- a/kernel/acct.c
89738+++ b/kernel/acct.c
89739@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
89740 */
89741 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
89742 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
89743- file->f_op->write(file, (char *)&ac,
89744+ file->f_op->write(file, (char __force_user *)&ac,
89745 sizeof(acct_t), &file->f_pos);
89746 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
89747 set_fs(fs);
89748diff --git a/kernel/audit.c b/kernel/audit.c
89749index 5feed23..48415fd 100644
89750--- a/kernel/audit.c
89751+++ b/kernel/audit.c
89752@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
89753 3) suppressed due to audit_rate_limit
89754 4) suppressed due to audit_backlog_limit
89755 */
89756-static atomic_t audit_lost = ATOMIC_INIT(0);
89757+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
89758
89759 /* The netlink socket. */
89760 static struct sock *audit_sock;
89761@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
89762 unsigned long now;
89763 int print;
89764
89765- atomic_inc(&audit_lost);
89766+ atomic_inc_unchecked(&audit_lost);
89767
89768 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
89769
89770@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
89771 printk(KERN_WARNING
89772 "audit: audit_lost=%d audit_rate_limit=%d "
89773 "audit_backlog_limit=%d\n",
89774- atomic_read(&audit_lost),
89775+ atomic_read_unchecked(&audit_lost),
89776 audit_rate_limit,
89777 audit_backlog_limit);
89778 audit_panic(message);
89779@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
89780 status_set.pid = audit_pid;
89781 status_set.rate_limit = audit_rate_limit;
89782 status_set.backlog_limit = audit_backlog_limit;
89783- status_set.lost = atomic_read(&audit_lost);
89784+ status_set.lost = atomic_read_unchecked(&audit_lost);
89785 status_set.backlog = skb_queue_len(&audit_skb_queue);
89786 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
89787 &status_set, sizeof(status_set));
89788@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
89789 spin_unlock_irq(&tsk->sighand->siglock);
89790 }
89791 read_unlock(&tasklist_lock);
89792- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
89793- &s, sizeof(s));
89794+
89795+ if (!err)
89796+ audit_send_reply(NETLINK_CB(skb).pid, seq,
89797+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
89798 break;
89799 }
89800 case AUDIT_TTY_SET: {
89801@@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
89802 avail = audit_expand(ab,
89803 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
89804 if (!avail)
89805- goto out;
89806+ goto out_va_end;
89807 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
89808 }
89809- va_end(args2);
89810 if (len > 0)
89811 skb_put(skb, len);
89812+out_va_end:
89813+ va_end(args2);
89814 out:
89815 return;
89816 }
89817diff --git a/kernel/auditsc.c b/kernel/auditsc.c
89818index 267e484..ac41bc3 100644
89819--- a/kernel/auditsc.c
89820+++ b/kernel/auditsc.c
89821@@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
89822 struct audit_buffer **ab,
89823 struct audit_aux_data_execve *axi)
89824 {
89825- int i;
89826- size_t len, len_sent = 0;
89827+ int i, len;
89828+ size_t len_sent = 0;
89829 const char __user *p;
89830 char *buf;
89831
89832@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
89833 }
89834
89835 /* global counter which is incremented every time something logs in */
89836-static atomic_t session_id = ATOMIC_INIT(0);
89837+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
89838
89839 /**
89840 * audit_set_loginuid - set a task's audit_context loginuid
89841@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
89842 */
89843 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
89844 {
89845- unsigned int sessionid = atomic_inc_return(&session_id);
89846+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
89847 struct audit_context *context = task->audit_context;
89848
89849 if (context && context->in_syscall) {
89850diff --git a/kernel/capability.c b/kernel/capability.c
89851index 8a944f5..db5001e 100644
89852--- a/kernel/capability.c
89853+++ b/kernel/capability.c
89854@@ -305,10 +305,26 @@ int capable(int cap)
89855 BUG();
89856 }
89857
89858- if (security_capable(cap) == 0) {
89859+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
89860 current->flags |= PF_SUPERPRIV;
89861 return 1;
89862 }
89863 return 0;
89864 }
89865+
89866+int capable_nolog(int cap)
89867+{
89868+ if (unlikely(!cap_valid(cap))) {
89869+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
89870+ BUG();
89871+ }
89872+
89873+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
89874+ current->flags |= PF_SUPERPRIV;
89875+ return 1;
89876+ }
89877+ return 0;
89878+}
89879+
89880 EXPORT_SYMBOL(capable);
89881+EXPORT_SYMBOL(capable_nolog);
89882diff --git a/kernel/cgroup.c b/kernel/cgroup.c
89883index 1fbcc74..7000012 100644
89884--- a/kernel/cgroup.c
89885+++ b/kernel/cgroup.c
89886@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
89887 struct hlist_head *hhead;
89888 struct cg_cgroup_link *link;
89889
89890+ pax_track_stack();
89891+
89892 /* First see if we already have a cgroup group that matches
89893 * the desired set */
89894 read_lock(&css_set_lock);
89895diff --git a/kernel/compat.c b/kernel/compat.c
89896index 8bc5578..186e44a 100644
89897--- a/kernel/compat.c
89898+++ b/kernel/compat.c
89899@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
89900 mm_segment_t oldfs;
89901 long ret;
89902
89903- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
89904+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
89905 oldfs = get_fs();
89906 set_fs(KERNEL_DS);
89907 ret = hrtimer_nanosleep_restart(restart);
89908@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
89909 oldfs = get_fs();
89910 set_fs(KERNEL_DS);
89911 ret = hrtimer_nanosleep(&tu,
89912- rmtp ? (struct timespec __user *)&rmt : NULL,
89913+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
89914 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
89915 set_fs(oldfs);
89916
89917@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
89918 mm_segment_t old_fs = get_fs();
89919
89920 set_fs(KERNEL_DS);
89921- ret = sys_sigpending((old_sigset_t __user *) &s);
89922+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
89923 set_fs(old_fs);
89924 if (ret == 0)
89925 ret = put_user(s, set);
89926@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
89927 old_fs = get_fs();
89928 set_fs(KERNEL_DS);
89929 ret = sys_sigprocmask(how,
89930- set ? (old_sigset_t __user *) &s : NULL,
89931- oset ? (old_sigset_t __user *) &s : NULL);
89932+ set ? (old_sigset_t __force_user *) &s : NULL,
89933+ oset ? (old_sigset_t __force_user *) &s : NULL);
89934 set_fs(old_fs);
89935 if (ret == 0)
89936 if (oset)
89937@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
89938 mm_segment_t old_fs = get_fs();
89939
89940 set_fs(KERNEL_DS);
89941- ret = sys_old_getrlimit(resource, &r);
89942+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
89943 set_fs(old_fs);
89944
89945 if (!ret) {
89946@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
89947 mm_segment_t old_fs = get_fs();
89948
89949 set_fs(KERNEL_DS);
89950- ret = sys_getrusage(who, (struct rusage __user *) &r);
89951+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
89952 set_fs(old_fs);
89953
89954 if (ret)
89955@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
89956 set_fs (KERNEL_DS);
89957 ret = sys_wait4(pid,
89958 (stat_addr ?
89959- (unsigned int __user *) &status : NULL),
89960- options, (struct rusage __user *) &r);
89961+ (unsigned int __force_user *) &status : NULL),
89962+ options, (struct rusage __force_user *) &r);
89963 set_fs (old_fs);
89964
89965 if (ret > 0) {
89966@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
89967 memset(&info, 0, sizeof(info));
89968
89969 set_fs(KERNEL_DS);
89970- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
89971- uru ? (struct rusage __user *)&ru : NULL);
89972+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
89973+ uru ? (struct rusage __force_user *)&ru : NULL);
89974 set_fs(old_fs);
89975
89976 if ((ret < 0) || (info.si_signo == 0))
89977@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
89978 oldfs = get_fs();
89979 set_fs(KERNEL_DS);
89980 err = sys_timer_settime(timer_id, flags,
89981- (struct itimerspec __user *) &newts,
89982- (struct itimerspec __user *) &oldts);
89983+ (struct itimerspec __force_user *) &newts,
89984+ (struct itimerspec __force_user *) &oldts);
89985 set_fs(oldfs);
89986 if (!err && old && put_compat_itimerspec(old, &oldts))
89987 return -EFAULT;
89988@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
89989 oldfs = get_fs();
89990 set_fs(KERNEL_DS);
89991 err = sys_timer_gettime(timer_id,
89992- (struct itimerspec __user *) &ts);
89993+ (struct itimerspec __force_user *) &ts);
89994 set_fs(oldfs);
89995 if (!err && put_compat_itimerspec(setting, &ts))
89996 return -EFAULT;
89997@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
89998 oldfs = get_fs();
89999 set_fs(KERNEL_DS);
90000 err = sys_clock_settime(which_clock,
90001- (struct timespec __user *) &ts);
90002+ (struct timespec __force_user *) &ts);
90003 set_fs(oldfs);
90004 return err;
90005 }
90006@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
90007 oldfs = get_fs();
90008 set_fs(KERNEL_DS);
90009 err = sys_clock_gettime(which_clock,
90010- (struct timespec __user *) &ts);
90011+ (struct timespec __force_user *) &ts);
90012 set_fs(oldfs);
90013 if (!err && put_compat_timespec(&ts, tp))
90014 return -EFAULT;
90015@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
90016 oldfs = get_fs();
90017 set_fs(KERNEL_DS);
90018 err = sys_clock_getres(which_clock,
90019- (struct timespec __user *) &ts);
90020+ (struct timespec __force_user *) &ts);
90021 set_fs(oldfs);
90022 if (!err && tp && put_compat_timespec(&ts, tp))
90023 return -EFAULT;
90024@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
90025 long err;
90026 mm_segment_t oldfs;
90027 struct timespec tu;
90028- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
90029+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
90030
90031- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
90032+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
90033 oldfs = get_fs();
90034 set_fs(KERNEL_DS);
90035 err = clock_nanosleep_restart(restart);
90036@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
90037 oldfs = get_fs();
90038 set_fs(KERNEL_DS);
90039 err = sys_clock_nanosleep(which_clock, flags,
90040- (struct timespec __user *) &in,
90041- (struct timespec __user *) &out);
90042+ (struct timespec __force_user *) &in,
90043+ (struct timespec __force_user *) &out);
90044 set_fs(oldfs);
90045
90046 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
90047diff --git a/kernel/configs.c b/kernel/configs.c
90048index abaee68..047facd 100644
90049--- a/kernel/configs.c
90050+++ b/kernel/configs.c
90051@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
90052 struct proc_dir_entry *entry;
90053
90054 /* create the current config file */
90055+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
90056+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
90057+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
90058+ &ikconfig_file_ops);
90059+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
90060+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
90061+ &ikconfig_file_ops);
90062+#endif
90063+#else
90064 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
90065 &ikconfig_file_ops);
90066+#endif
90067+
90068 if (!entry)
90069 return -ENOMEM;
90070
90071diff --git a/kernel/cpu.c b/kernel/cpu.c
90072index 3f2f04f..4e53ded 100644
90073--- a/kernel/cpu.c
90074+++ b/kernel/cpu.c
90075@@ -20,7 +20,7 @@
90076 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
90077 static DEFINE_MUTEX(cpu_add_remove_lock);
90078
90079-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
90080+static RAW_NOTIFIER_HEAD(cpu_chain);
90081
90082 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
90083 * Should always be manipulated under cpu_add_remove_lock
90084diff --git a/kernel/cred.c b/kernel/cred.c
90085index 0b5b5fc..f7fe51a 100644
90086--- a/kernel/cred.c
90087+++ b/kernel/cred.c
90088@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
90089 */
90090 void __put_cred(struct cred *cred)
90091 {
90092+ pax_track_stack();
90093+
90094 kdebug("__put_cred(%p{%d,%d})", cred,
90095 atomic_read(&cred->usage),
90096 read_cred_subscribers(cred));
90097@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
90098 {
90099 struct cred *cred;
90100
90101+ pax_track_stack();
90102+
90103 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
90104 atomic_read(&tsk->cred->usage),
90105 read_cred_subscribers(tsk->cred));
90106@@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
90107 validate_creds(cred);
90108 put_cred(cred);
90109 }
90110+
90111+#ifdef CONFIG_GRKERNSEC_SETXID
90112+ cred = (struct cred *) tsk->delayed_cred;
90113+ if (cred) {
90114+ tsk->delayed_cred = NULL;
90115+ validate_creds(cred);
90116+ put_cred(cred);
90117+ }
90118+#endif
90119 }
90120
90121 /**
90122@@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
90123 {
90124 const struct cred *cred;
90125
90126+ pax_track_stack();
90127+
90128 rcu_read_lock();
90129
90130 do {
90131@@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
90132 {
90133 struct cred *new;
90134
90135+ pax_track_stack();
90136+
90137 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
90138 if (!new)
90139 return NULL;
90140@@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
90141 const struct cred *old;
90142 struct cred *new;
90143
90144+ pax_track_stack();
90145+
90146 validate_process_creds();
90147
90148 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
90149@@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
90150 struct thread_group_cred *tgcred = NULL;
90151 struct cred *new;
90152
90153+ pax_track_stack();
90154+
90155 #ifdef CONFIG_KEYS
90156 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
90157 if (!tgcred)
90158@@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
90159 struct cred *new;
90160 int ret;
90161
90162+ pax_track_stack();
90163+
90164 mutex_init(&p->cred_guard_mutex);
90165
90166 if (
90167@@ -523,11 +546,13 @@ error_put:
90168 * Always returns 0 thus allowing this function to be tail-called at the end
90169 * of, say, sys_setgid().
90170 */
90171-int commit_creds(struct cred *new)
90172+static int __commit_creds(struct cred *new)
90173 {
90174 struct task_struct *task = current;
90175 const struct cred *old = task->real_cred;
90176
90177+ pax_track_stack();
90178+
90179 kdebug("commit_creds(%p{%d,%d})", new,
90180 atomic_read(&new->usage),
90181 read_cred_subscribers(new));
90182@@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
90183
90184 get_cred(new); /* we will require a ref for the subj creds too */
90185
90186+ gr_set_role_label(task, new->uid, new->gid);
90187+
90188 /* dumpability changes */
90189 if (old->euid != new->euid ||
90190 old->egid != new->egid ||
90191@@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
90192 key_fsgid_changed(task);
90193
90194 /* do it
90195- * - What if a process setreuid()'s and this brings the
90196- * new uid over his NPROC rlimit? We can check this now
90197- * cheaply with the new uid cache, so if it matters
90198- * we should be checking for it. -DaveM
90199+ * RLIMIT_NPROC limits on user->processes have already been checked
90200+ * in set_user().
90201 */
90202 alter_cred_subscribers(new, 2);
90203 if (new->user != old->user)
90204@@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
90205 put_cred(old);
90206 return 0;
90207 }
90208+
90209+#ifdef CONFIG_GRKERNSEC_SETXID
90210+extern int set_user(struct cred *new);
90211+
90212+void gr_delayed_cred_worker(void)
90213+{
90214+ const struct cred *new = current->delayed_cred;
90215+ struct cred *ncred;
90216+
90217+ current->delayed_cred = NULL;
90218+
90219+ if (current_uid() && new != NULL) {
90220+ // from doing get_cred on it when queueing this
90221+ put_cred(new);
90222+ return;
90223+ } else if (new == NULL)
90224+ return;
90225+
90226+ ncred = prepare_creds();
90227+ if (!ncred)
90228+ goto die;
90229+ // uids
90230+ ncred->uid = new->uid;
90231+ ncred->euid = new->euid;
90232+ ncred->suid = new->suid;
90233+ ncred->fsuid = new->fsuid;
90234+ // gids
90235+ ncred->gid = new->gid;
90236+ ncred->egid = new->egid;
90237+ ncred->sgid = new->sgid;
90238+ ncred->fsgid = new->fsgid;
90239+ // groups
90240+ if (set_groups(ncred, new->group_info) < 0) {
90241+ abort_creds(ncred);
90242+ goto die;
90243+ }
90244+ // caps
90245+ ncred->securebits = new->securebits;
90246+ ncred->cap_inheritable = new->cap_inheritable;
90247+ ncred->cap_permitted = new->cap_permitted;
90248+ ncred->cap_effective = new->cap_effective;
90249+ ncred->cap_bset = new->cap_bset;
90250+
90251+ if (set_user(ncred)) {
90252+ abort_creds(ncred);
90253+ goto die;
90254+ }
90255+
90256+ // from doing get_cred on it when queueing this
90257+ put_cred(new);
90258+
90259+ __commit_creds(ncred);
90260+ return;
90261+die:
90262+ // from doing get_cred on it when queueing this
90263+ put_cred(new);
90264+ do_group_exit(SIGKILL);
90265+}
90266+#endif
90267+
90268+int commit_creds(struct cred *new)
90269+{
90270+#ifdef CONFIG_GRKERNSEC_SETXID
90271+ struct task_struct *t;
90272+
90273+ /* we won't get called with tasklist_lock held for writing
90274+ and interrupts disabled as the cred struct in that case is
90275+ init_cred
90276+ */
90277+ if (grsec_enable_setxid && !current_is_single_threaded() &&
90278+ !current_uid() && new->uid) {
90279+ rcu_read_lock();
90280+ read_lock(&tasklist_lock);
90281+ for (t = next_thread(current); t != current;
90282+ t = next_thread(t)) {
90283+ if (t->delayed_cred == NULL) {
90284+ t->delayed_cred = get_cred(new);
90285+ set_tsk_need_resched(t);
90286+ }
90287+ }
90288+ read_unlock(&tasklist_lock);
90289+ rcu_read_unlock();
90290+ }
90291+#endif
90292+ return __commit_creds(new);
90293+}
90294+
90295 EXPORT_SYMBOL(commit_creds);
90296
90297+
90298 /**
90299 * abort_creds - Discard a set of credentials and unlock the current task
90300 * @new: The credentials that were going to be applied
90301@@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
90302 */
90303 void abort_creds(struct cred *new)
90304 {
90305+ pax_track_stack();
90306+
90307 kdebug("abort_creds(%p{%d,%d})", new,
90308 atomic_read(&new->usage),
90309 read_cred_subscribers(new));
90310@@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
90311 {
90312 const struct cred *old = current->cred;
90313
90314+ pax_track_stack();
90315+
90316 kdebug("override_creds(%p{%d,%d})", new,
90317 atomic_read(&new->usage),
90318 read_cred_subscribers(new));
90319@@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
90320 {
90321 const struct cred *override = current->cred;
90322
90323+ pax_track_stack();
90324+
90325 kdebug("revert_creds(%p{%d,%d})", old,
90326 atomic_read(&old->usage),
90327 read_cred_subscribers(old));
90328@@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
90329 const struct cred *old;
90330 struct cred *new;
90331
90332+ pax_track_stack();
90333+
90334 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
90335 if (!new)
90336 return NULL;
90337@@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
90338 */
90339 int set_security_override(struct cred *new, u32 secid)
90340 {
90341+ pax_track_stack();
90342+
90343 return security_kernel_act_as(new, secid);
90344 }
90345 EXPORT_SYMBOL(set_security_override);
90346@@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
90347 u32 secid;
90348 int ret;
90349
90350+ pax_track_stack();
90351+
90352 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
90353 if (ret < 0)
90354 return ret;
90355diff --git a/kernel/exit.c b/kernel/exit.c
90356index 0f8fae3..66af9b1 100644
90357--- a/kernel/exit.c
90358+++ b/kernel/exit.c
90359@@ -55,6 +55,10 @@
90360 #include <asm/pgtable.h>
90361 #include <asm/mmu_context.h>
90362
90363+#ifdef CONFIG_GRKERNSEC
90364+extern rwlock_t grsec_exec_file_lock;
90365+#endif
90366+
90367 static void exit_mm(struct task_struct * tsk);
90368
90369 static void __unhash_process(struct task_struct *p)
90370@@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
90371 struct task_struct *leader;
90372 int zap_leader;
90373 repeat:
90374+#ifdef CONFIG_NET
90375+ gr_del_task_from_ip_table(p);
90376+#endif
90377+
90378 tracehook_prepare_release_task(p);
90379 /* don't need to get the RCU readlock here - the process is dead and
90380 * can't be modifying its own credentials */
90381@@ -397,7 +405,7 @@ int allow_signal(int sig)
90382 * know it'll be handled, so that they don't get converted to
90383 * SIGKILL or just silently dropped.
90384 */
90385- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
90386+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
90387 recalc_sigpending();
90388 spin_unlock_irq(&current->sighand->siglock);
90389 return 0;
90390@@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
90391 vsnprintf(current->comm, sizeof(current->comm), name, args);
90392 va_end(args);
90393
90394+#ifdef CONFIG_GRKERNSEC
90395+ write_lock(&grsec_exec_file_lock);
90396+ if (current->exec_file) {
90397+ fput(current->exec_file);
90398+ current->exec_file = NULL;
90399+ }
90400+ write_unlock(&grsec_exec_file_lock);
90401+#endif
90402+
90403+ gr_set_kernel_label(current);
90404+
90405 /*
90406 * If we were started as result of loading a module, close all of the
90407 * user space pages. We don't need them, and if we didn't close them
90408@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
90409 struct task_struct *tsk = current;
90410 int group_dead;
90411
90412- profile_task_exit(tsk);
90413-
90414- WARN_ON(atomic_read(&tsk->fs_excl));
90415-
90416+ /*
90417+ * Check this first since set_fs() below depends on
90418+ * current_thread_info(), which we better not access when we're in
90419+ * interrupt context. Other than that, we want to do the set_fs()
90420+ * as early as possible.
90421+ */
90422 if (unlikely(in_interrupt()))
90423 panic("Aiee, killing interrupt handler!");
90424- if (unlikely(!tsk->pid))
90425- panic("Attempted to kill the idle task!");
90426
90427 /*
90428- * If do_exit is called because this processes oopsed, it's possible
90429+ * If do_exit is called because this processes Oops'ed, it's possible
90430 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
90431 * continuing. Amongst other possible reasons, this is to prevent
90432 * mm_release()->clear_child_tid() from writing to a user-controlled
90433@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
90434 */
90435 set_fs(USER_DS);
90436
90437+ profile_task_exit(tsk);
90438+
90439+ WARN_ON(atomic_read(&tsk->fs_excl));
90440+
90441+ if (unlikely(!tsk->pid))
90442+ panic("Attempted to kill the idle task!");
90443+
90444 tracehook_report_exit(&code);
90445
90446 validate_creds_for_do_exit(tsk);
90447@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
90448 tsk->exit_code = code;
90449 taskstats_exit(tsk, group_dead);
90450
90451+ gr_acl_handle_psacct(tsk, code);
90452+ gr_acl_handle_exit();
90453+
90454 exit_mm(tsk);
90455
90456 if (group_dead)
90457@@ -1020,7 +1049,7 @@ NORET_TYPE void do_exit(long code)
90458 tsk->flags |= PF_EXITPIDONE;
90459
90460 if (tsk->io_context)
90461- exit_io_context();
90462+ exit_io_context(tsk);
90463
90464 if (tsk->splice_pipe)
90465 __free_pipe_info(tsk->splice_pipe);
90466@@ -1059,7 +1088,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
90467 * Take down every thread in the group. This is called by fatal signals
90468 * as well as by sys_exit_group (below).
90469 */
90470-NORET_TYPE void
90471+__noreturn void
90472 do_group_exit(int exit_code)
90473 {
90474 struct signal_struct *sig = current->signal;
90475@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
90476
90477 if (unlikely(wo->wo_flags & WNOWAIT)) {
90478 int exit_code = p->exit_code;
90479- int why, status;
90480+ int why;
90481
90482 get_task_struct(p);
90483 read_unlock(&tasklist_lock);
90484diff --git a/kernel/fork.c b/kernel/fork.c
90485index 4bde56f..8976a8f 100644
90486--- a/kernel/fork.c
90487+++ b/kernel/fork.c
90488@@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
90489 *stackend = STACK_END_MAGIC; /* for overflow detection */
90490
90491 #ifdef CONFIG_CC_STACKPROTECTOR
90492- tsk->stack_canary = get_random_int();
90493+ tsk->stack_canary = pax_get_random_long();
90494 #endif
90495
90496 /* One for us, one for whoever does the "release_task()" (usually parent) */
90497@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90498 mm->locked_vm = 0;
90499 mm->mmap = NULL;
90500 mm->mmap_cache = NULL;
90501- mm->free_area_cache = oldmm->mmap_base;
90502- mm->cached_hole_size = ~0UL;
90503+ mm->free_area_cache = oldmm->free_area_cache;
90504+ mm->cached_hole_size = oldmm->cached_hole_size;
90505 mm->map_count = 0;
90506 cpumask_clear(mm_cpumask(mm));
90507 mm->mm_rb = RB_ROOT;
90508@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90509 tmp->vm_flags &= ~VM_LOCKED;
90510 tmp->vm_mm = mm;
90511 tmp->vm_next = tmp->vm_prev = NULL;
90512+ tmp->vm_mirror = NULL;
90513 anon_vma_link(tmp);
90514 file = tmp->vm_file;
90515 if (file) {
90516@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
90517 if (retval)
90518 goto out;
90519 }
90520+
90521+#ifdef CONFIG_PAX_SEGMEXEC
90522+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
90523+ struct vm_area_struct *mpnt_m;
90524+
90525+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
90526+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
90527+
90528+ if (!mpnt->vm_mirror)
90529+ continue;
90530+
90531+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
90532+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
90533+ mpnt->vm_mirror = mpnt_m;
90534+ } else {
90535+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
90536+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
90537+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
90538+ mpnt->vm_mirror->vm_mirror = mpnt;
90539+ }
90540+ }
90541+ BUG_ON(mpnt_m);
90542+ }
90543+#endif
90544+
90545 /* a new mm has just been created */
90546 arch_dup_mmap(oldmm, mm);
90547 retval = 0;
90548@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
90549 write_unlock(&fs->lock);
90550 return -EAGAIN;
90551 }
90552- fs->users++;
90553+ atomic_inc(&fs->users);
90554 write_unlock(&fs->lock);
90555 return 0;
90556 }
90557 tsk->fs = copy_fs_struct(fs);
90558 if (!tsk->fs)
90559 return -ENOMEM;
90560+ gr_set_chroot_entries(tsk, &tsk->fs->root);
90561 return 0;
90562 }
90563
90564@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
90565 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
90566 #endif
90567 retval = -EAGAIN;
90568+
90569+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
90570+
90571 if (atomic_read(&p->real_cred->user->processes) >=
90572 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
90573- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
90574- p->real_cred->user != INIT_USER)
90575+ if (p->real_cred->user != INIT_USER &&
90576+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
90577 goto bad_fork_free;
90578 }
90579+ current->flags &= ~PF_NPROC_EXCEEDED;
90580
90581 retval = copy_creds(p, clone_flags);
90582 if (retval < 0)
90583@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
90584 goto bad_fork_free_pid;
90585 }
90586
90587+ gr_copy_label(p);
90588+
90589 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
90590 /*
90591 * Clear TID on mm_release()?
90592@@ -1299,7 +1332,8 @@ bad_fork_free_pid:
90593 if (pid != &init_struct_pid)
90594 free_pid(pid);
90595 bad_fork_cleanup_io:
90596- put_io_context(p->io_context);
90597+ if (p->io_context)
90598+ exit_io_context(p);
90599 bad_fork_cleanup_namespaces:
90600 exit_task_namespaces(p);
90601 bad_fork_cleanup_mm:
90602@@ -1333,6 +1367,8 @@ bad_fork_cleanup_count:
90603 bad_fork_free:
90604 free_task(p);
90605 fork_out:
90606+ gr_log_forkfail(retval);
90607+
90608 return ERR_PTR(retval);
90609 }
90610
90611@@ -1426,6 +1462,8 @@ long do_fork(unsigned long clone_flags,
90612 if (clone_flags & CLONE_PARENT_SETTID)
90613 put_user(nr, parent_tidptr);
90614
90615+ gr_handle_brute_check();
90616+
90617 if (clone_flags & CLONE_VFORK) {
90618 p->vfork_done = &vfork;
90619 init_completion(&vfork);
90620@@ -1558,7 +1596,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
90621 return 0;
90622
90623 /* don't need lock here; in the worst case we'll do useless copy */
90624- if (fs->users == 1)
90625+ if (atomic_read(&fs->users) == 1)
90626 return 0;
90627
90628 *new_fsp = copy_fs_struct(fs);
90629@@ -1681,7 +1719,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
90630 fs = current->fs;
90631 write_lock(&fs->lock);
90632 current->fs = new_fs;
90633- if (--fs->users)
90634+ gr_set_chroot_entries(current, &current->fs->root);
90635+ if (atomic_dec_return(&fs->users))
90636 new_fs = NULL;
90637 else
90638 new_fs = fs;
90639diff --git a/kernel/futex.c b/kernel/futex.c
90640index fb98c9f..333faec 100644
90641--- a/kernel/futex.c
90642+++ b/kernel/futex.c
90643@@ -54,6 +54,7 @@
90644 #include <linux/mount.h>
90645 #include <linux/pagemap.h>
90646 #include <linux/syscalls.h>
90647+#include <linux/ptrace.h>
90648 #include <linux/signal.h>
90649 #include <linux/module.h>
90650 #include <linux/magic.h>
90651@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
90652 struct page *page;
90653 int err, ro = 0;
90654
90655+#ifdef CONFIG_PAX_SEGMEXEC
90656+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
90657+ return -EFAULT;
90658+#endif
90659+
90660 /*
90661 * The futex address must be "naturally" aligned.
90662 */
90663@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
90664 struct futex_q q;
90665 int ret;
90666
90667+ pax_track_stack();
90668+
90669 if (!bitset)
90670 return -EINVAL;
90671
90672@@ -1871,7 +1879,7 @@ retry:
90673
90674 restart = &current_thread_info()->restart_block;
90675 restart->fn = futex_wait_restart;
90676- restart->futex.uaddr = (u32 *)uaddr;
90677+ restart->futex.uaddr = uaddr;
90678 restart->futex.val = val;
90679 restart->futex.time = abs_time->tv64;
90680 restart->futex.bitset = bitset;
90681@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
90682 struct futex_q q;
90683 int res, ret;
90684
90685+ pax_track_stack();
90686+
90687 if (!bitset)
90688 return -EINVAL;
90689
90690@@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
90691 if (!p)
90692 goto err_unlock;
90693 ret = -EPERM;
90694+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
90695+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
90696+ goto err_unlock;
90697+#endif
90698 pcred = __task_cred(p);
90699 if (cred->euid != pcred->euid &&
90700 cred->euid != pcred->uid &&
90701@@ -2489,7 +2503,7 @@ retry:
90702 */
90703 static inline int fetch_robust_entry(struct robust_list __user **entry,
90704 struct robust_list __user * __user *head,
90705- int *pi)
90706+ unsigned int *pi)
90707 {
90708 unsigned long uentry;
90709
90710@@ -2670,6 +2684,7 @@ static int __init futex_init(void)
90711 {
90712 u32 curval;
90713 int i;
90714+ mm_segment_t oldfs;
90715
90716 /*
90717 * This will fail and we want it. Some arch implementations do
90718@@ -2681,7 +2696,10 @@ static int __init futex_init(void)
90719 * implementation, the non functional ones will return
90720 * -ENOSYS.
90721 */
90722+ oldfs = get_fs();
90723+ set_fs(USER_DS);
90724 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
90725+ set_fs(oldfs);
90726 if (curval == -EFAULT)
90727 futex_cmpxchg_enabled = 1;
90728
90729diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
90730index 2357165..eb25501 100644
90731--- a/kernel/futex_compat.c
90732+++ b/kernel/futex_compat.c
90733@@ -10,6 +10,7 @@
90734 #include <linux/compat.h>
90735 #include <linux/nsproxy.h>
90736 #include <linux/futex.h>
90737+#include <linux/ptrace.h>
90738
90739 #include <asm/uaccess.h>
90740
90741@@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
90742 {
90743 struct compat_robust_list_head __user *head;
90744 unsigned long ret;
90745- const struct cred *cred = current_cred(), *pcred;
90746+ const struct cred *cred = current_cred();
90747+ const struct cred *pcred;
90748
90749 if (!futex_cmpxchg_enabled)
90750 return -ENOSYS;
90751@@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
90752 if (!p)
90753 goto err_unlock;
90754 ret = -EPERM;
90755+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
90756+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
90757+ goto err_unlock;
90758+#endif
90759 pcred = __task_cred(p);
90760 if (cred->euid != pcred->euid &&
90761 cred->euid != pcred->uid &&
90762diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
90763index 9b22d03..6295b62 100644
90764--- a/kernel/gcov/base.c
90765+++ b/kernel/gcov/base.c
90766@@ -102,11 +102,6 @@ void gcov_enable_events(void)
90767 }
90768
90769 #ifdef CONFIG_MODULES
90770-static inline int within(void *addr, void *start, unsigned long size)
90771-{
90772- return ((addr >= start) && (addr < start + size));
90773-}
90774-
90775 /* Update list and generate events when modules are unloaded. */
90776 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
90777 void *data)
90778@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
90779 prev = NULL;
90780 /* Remove entries located in module from linked list. */
90781 for (info = gcov_info_head; info; info = info->next) {
90782- if (within(info, mod->module_core, mod->core_size)) {
90783+ if (within_module_core_rw((unsigned long)info, mod)) {
90784 if (prev)
90785 prev->next = info->next;
90786 else
90787diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
90788index a6e9d00..a0da4f9 100644
90789--- a/kernel/hrtimer.c
90790+++ b/kernel/hrtimer.c
90791@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
90792 local_irq_restore(flags);
90793 }
90794
90795-static void run_hrtimer_softirq(struct softirq_action *h)
90796+static void run_hrtimer_softirq(void)
90797 {
90798 hrtimer_peek_ahead_timers();
90799 }
90800diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
90801index 8b6b8b6..6bc87df 100644
90802--- a/kernel/kallsyms.c
90803+++ b/kernel/kallsyms.c
90804@@ -11,6 +11,9 @@
90805 * Changed the compression method from stem compression to "table lookup"
90806 * compression (see scripts/kallsyms.c for a more complete description)
90807 */
90808+#ifdef CONFIG_GRKERNSEC_HIDESYM
90809+#define __INCLUDED_BY_HIDESYM 1
90810+#endif
90811 #include <linux/kallsyms.h>
90812 #include <linux/module.h>
90813 #include <linux/init.h>
90814@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
90815
90816 static inline int is_kernel_inittext(unsigned long addr)
90817 {
90818+ if (system_state != SYSTEM_BOOTING)
90819+ return 0;
90820+
90821 if (addr >= (unsigned long)_sinittext
90822 && addr <= (unsigned long)_einittext)
90823 return 1;
90824 return 0;
90825 }
90826
90827+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90828+#ifdef CONFIG_MODULES
90829+static inline int is_module_text(unsigned long addr)
90830+{
90831+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
90832+ return 1;
90833+
90834+ addr = ktla_ktva(addr);
90835+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
90836+}
90837+#else
90838+static inline int is_module_text(unsigned long addr)
90839+{
90840+ return 0;
90841+}
90842+#endif
90843+#endif
90844+
90845 static inline int is_kernel_text(unsigned long addr)
90846 {
90847 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
90848@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
90849
90850 static inline int is_kernel(unsigned long addr)
90851 {
90852+
90853+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90854+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
90855+ return 1;
90856+
90857+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
90858+#else
90859 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
90860+#endif
90861+
90862 return 1;
90863 return in_gate_area_no_task(addr);
90864 }
90865
90866 static int is_ksym_addr(unsigned long addr)
90867 {
90868+
90869+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
90870+ if (is_module_text(addr))
90871+ return 0;
90872+#endif
90873+
90874 if (all_var)
90875 return is_kernel(addr);
90876
90877@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
90878
90879 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
90880 {
90881- iter->name[0] = '\0';
90882 iter->nameoff = get_symbol_offset(new_pos);
90883 iter->pos = new_pos;
90884 }
90885@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
90886 {
90887 struct kallsym_iter *iter = m->private;
90888
90889+#ifdef CONFIG_GRKERNSEC_HIDESYM
90890+ if (current_uid())
90891+ return 0;
90892+#endif
90893+
90894 /* Some debugging symbols have no name. Ignore them. */
90895 if (!iter->name[0])
90896 return 0;
90897@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
90898 struct kallsym_iter *iter;
90899 int ret;
90900
90901- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
90902+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
90903 if (!iter)
90904 return -ENOMEM;
90905 reset_iter(iter, 0);
90906diff --git a/kernel/kexec.c b/kernel/kexec.c
90907index f336e21..9c1c20b 100644
90908--- a/kernel/kexec.c
90909+++ b/kernel/kexec.c
90910@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
90911 unsigned long flags)
90912 {
90913 struct compat_kexec_segment in;
90914- struct kexec_segment out, __user *ksegments;
90915+ struct kexec_segment out;
90916+ struct kexec_segment __user *ksegments;
90917 unsigned long i, result;
90918
90919 /* Don't allow clients that don't understand the native
90920diff --git a/kernel/kgdb.c b/kernel/kgdb.c
90921index 53dae4b..9ba3743 100644
90922--- a/kernel/kgdb.c
90923+++ b/kernel/kgdb.c
90924@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
90925 /* Guard for recursive entry */
90926 static int exception_level;
90927
90928-static struct kgdb_io *kgdb_io_ops;
90929+static const struct kgdb_io *kgdb_io_ops;
90930 static DEFINE_SPINLOCK(kgdb_registration_lock);
90931
90932 /* kgdb console driver is loaded */
90933@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
90934 */
90935 static atomic_t passive_cpu_wait[NR_CPUS];
90936 static atomic_t cpu_in_kgdb[NR_CPUS];
90937-atomic_t kgdb_setting_breakpoint;
90938+atomic_unchecked_t kgdb_setting_breakpoint;
90939
90940 struct task_struct *kgdb_usethread;
90941 struct task_struct *kgdb_contthread;
90942@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
90943 sizeof(unsigned long)];
90944
90945 /* to keep track of the CPU which is doing the single stepping*/
90946-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90947+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
90948
90949 /*
90950 * If you are debugging a problem where roundup (the collection of
90951@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
90952 return 0;
90953 if (kgdb_connected)
90954 return 1;
90955- if (atomic_read(&kgdb_setting_breakpoint))
90956+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
90957 return 1;
90958 if (print_wait)
90959 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
90960@@ -1426,8 +1426,8 @@ acquirelock:
90961 * instance of the exception handler wanted to come into the
90962 * debugger on a different CPU via a single step
90963 */
90964- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
90965- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
90966+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
90967+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
90968
90969 atomic_set(&kgdb_active, -1);
90970 touch_softlockup_watchdog();
90971@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
90972 *
90973 * Register it with the KGDB core.
90974 */
90975-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
90976+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
90977 {
90978 int err;
90979
90980@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
90981 *
90982 * Unregister it with the KGDB core.
90983 */
90984-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
90985+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
90986 {
90987 BUG_ON(kgdb_connected);
90988
90989@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
90990 */
90991 void kgdb_breakpoint(void)
90992 {
90993- atomic_set(&kgdb_setting_breakpoint, 1);
90994+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
90995 wmb(); /* Sync point before breakpoint */
90996 arch_kgdb_breakpoint();
90997 wmb(); /* Sync point after breakpoint */
90998- atomic_set(&kgdb_setting_breakpoint, 0);
90999+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
91000 }
91001 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
91002
91003diff --git a/kernel/kmod.c b/kernel/kmod.c
91004index a061472..40884b6 100644
91005--- a/kernel/kmod.c
91006+++ b/kernel/kmod.c
91007@@ -68,13 +68,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
91008 * If module auto-loading support is disabled then this function
91009 * becomes a no-operation.
91010 */
91011-int __request_module(bool wait, const char *fmt, ...)
91012+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
91013 {
91014- va_list args;
91015 char module_name[MODULE_NAME_LEN];
91016 unsigned int max_modprobes;
91017 int ret;
91018- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
91019+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
91020 static char *envp[] = { "HOME=/",
91021 "TERM=linux",
91022 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
91023@@ -87,12 +86,24 @@ int __request_module(bool wait, const char *fmt, ...)
91024 if (ret)
91025 return ret;
91026
91027- va_start(args, fmt);
91028- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
91029- va_end(args);
91030+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
91031 if (ret >= MODULE_NAME_LEN)
91032 return -ENAMETOOLONG;
91033
91034+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91035+ if (!current_uid()) {
91036+ /* hack to workaround consolekit/udisks stupidity */
91037+ read_lock(&tasklist_lock);
91038+ if (!strcmp(current->comm, "mount") &&
91039+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
91040+ read_unlock(&tasklist_lock);
91041+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
91042+ return -EPERM;
91043+ }
91044+ read_unlock(&tasklist_lock);
91045+ }
91046+#endif
91047+
91048 /* If modprobe needs a service that is in a module, we get a recursive
91049 * loop. Limit the number of running kmod threads to max_threads/2 or
91050 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
91051@@ -126,6 +137,48 @@ int __request_module(bool wait, const char *fmt, ...)
91052 atomic_dec(&kmod_concurrent);
91053 return ret;
91054 }
91055+
91056+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
91057+{
91058+ va_list args;
91059+ int ret;
91060+
91061+ va_start(args, fmt);
91062+ ret = ____request_module(wait, module_param, fmt, args);
91063+ va_end(args);
91064+
91065+ return ret;
91066+}
91067+
91068+int __request_module(bool wait, const char *fmt, ...)
91069+{
91070+ va_list args;
91071+ int ret;
91072+
91073+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91074+ if (current_uid()) {
91075+ char module_param[MODULE_NAME_LEN];
91076+
91077+ memset(module_param, 0, sizeof(module_param));
91078+
91079+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
91080+
91081+ va_start(args, fmt);
91082+ ret = ____request_module(wait, module_param, fmt, args);
91083+ va_end(args);
91084+
91085+ return ret;
91086+ }
91087+#endif
91088+
91089+ va_start(args, fmt);
91090+ ret = ____request_module(wait, NULL, fmt, args);
91091+ va_end(args);
91092+
91093+ return ret;
91094+}
91095+
91096+
91097 EXPORT_SYMBOL(__request_module);
91098 #endif /* CONFIG_MODULES */
91099
91100@@ -231,7 +284,7 @@ static int wait_for_helper(void *data)
91101 *
91102 * Thus the __user pointer cast is valid here.
91103 */
91104- sys_wait4(pid, (int __user *)&ret, 0, NULL);
91105+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
91106
91107 /*
91108 * If ret is 0, either ____call_usermodehelper failed and the
91109diff --git a/kernel/kprobes.c b/kernel/kprobes.c
91110index 176d825..77fa8ea 100644
91111--- a/kernel/kprobes.c
91112+++ b/kernel/kprobes.c
91113@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
91114 * kernel image and loaded module images reside. This is required
91115 * so x86_64 can correctly handle the %rip-relative fixups.
91116 */
91117- kip->insns = module_alloc(PAGE_SIZE);
91118+ kip->insns = module_alloc_exec(PAGE_SIZE);
91119 if (!kip->insns) {
91120 kfree(kip);
91121 return NULL;
91122@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
91123 */
91124 if (!list_is_singular(&kprobe_insn_pages)) {
91125 list_del(&kip->list);
91126- module_free(NULL, kip->insns);
91127+ module_free_exec(NULL, kip->insns);
91128 kfree(kip);
91129 }
91130 return 1;
91131@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
91132 {
91133 int i, err = 0;
91134 unsigned long offset = 0, size = 0;
91135- char *modname, namebuf[128];
91136+ char *modname, namebuf[KSYM_NAME_LEN];
91137 const char *symbol_name;
91138 void *addr;
91139 struct kprobe_blackpoint *kb;
91140@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
91141 const char *sym = NULL;
91142 unsigned int i = *(loff_t *) v;
91143 unsigned long offset = 0;
91144- char *modname, namebuf[128];
91145+ char *modname, namebuf[KSYM_NAME_LEN];
91146
91147 head = &kprobe_table[i];
91148 preempt_disable();
91149diff --git a/kernel/lockdep.c b/kernel/lockdep.c
91150index d86fe89..d12fc66 100644
91151--- a/kernel/lockdep.c
91152+++ b/kernel/lockdep.c
91153@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
91154 /*
91155 * Various lockdep statistics:
91156 */
91157-atomic_t chain_lookup_hits;
91158-atomic_t chain_lookup_misses;
91159-atomic_t hardirqs_on_events;
91160-atomic_t hardirqs_off_events;
91161-atomic_t redundant_hardirqs_on;
91162-atomic_t redundant_hardirqs_off;
91163-atomic_t softirqs_on_events;
91164-atomic_t softirqs_off_events;
91165-atomic_t redundant_softirqs_on;
91166-atomic_t redundant_softirqs_off;
91167-atomic_t nr_unused_locks;
91168-atomic_t nr_cyclic_checks;
91169-atomic_t nr_find_usage_forwards_checks;
91170-atomic_t nr_find_usage_backwards_checks;
91171+atomic_unchecked_t chain_lookup_hits;
91172+atomic_unchecked_t chain_lookup_misses;
91173+atomic_unchecked_t hardirqs_on_events;
91174+atomic_unchecked_t hardirqs_off_events;
91175+atomic_unchecked_t redundant_hardirqs_on;
91176+atomic_unchecked_t redundant_hardirqs_off;
91177+atomic_unchecked_t softirqs_on_events;
91178+atomic_unchecked_t softirqs_off_events;
91179+atomic_unchecked_t redundant_softirqs_on;
91180+atomic_unchecked_t redundant_softirqs_off;
91181+atomic_unchecked_t nr_unused_locks;
91182+atomic_unchecked_t nr_cyclic_checks;
91183+atomic_unchecked_t nr_find_usage_forwards_checks;
91184+atomic_unchecked_t nr_find_usage_backwards_checks;
91185 #endif
91186
91187 /*
91188@@ -577,6 +577,10 @@ static int static_obj(void *obj)
91189 int i;
91190 #endif
91191
91192+#ifdef CONFIG_PAX_KERNEXEC
91193+ start = ktla_ktva(start);
91194+#endif
91195+
91196 /*
91197 * static variable?
91198 */
91199@@ -592,8 +596,7 @@ static int static_obj(void *obj)
91200 */
91201 for_each_possible_cpu(i) {
91202 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
91203- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
91204- + per_cpu_offset(i);
91205+ end = start + PERCPU_ENOUGH_ROOM;
91206
91207 if ((addr >= start) && (addr < end))
91208 return 1;
91209@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
91210 if (!static_obj(lock->key)) {
91211 debug_locks_off();
91212 printk("INFO: trying to register non-static key.\n");
91213+ printk("lock:%pS key:%pS.\n", lock, lock->key);
91214 printk("the code is fine but needs lockdep annotation.\n");
91215 printk("turning off the locking correctness validator.\n");
91216 dump_stack();
91217@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
91218 if (!class)
91219 return 0;
91220 }
91221- debug_atomic_inc((atomic_t *)&class->ops);
91222+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
91223 if (very_verbose(class)) {
91224 printk("\nacquire class [%p] %s", class->key, class->name);
91225 if (class->name_version > 1)
91226diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
91227index a2ee95a..092f0f2 100644
91228--- a/kernel/lockdep_internals.h
91229+++ b/kernel/lockdep_internals.h
91230@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
91231 /*
91232 * Various lockdep statistics:
91233 */
91234-extern atomic_t chain_lookup_hits;
91235-extern atomic_t chain_lookup_misses;
91236-extern atomic_t hardirqs_on_events;
91237-extern atomic_t hardirqs_off_events;
91238-extern atomic_t redundant_hardirqs_on;
91239-extern atomic_t redundant_hardirqs_off;
91240-extern atomic_t softirqs_on_events;
91241-extern atomic_t softirqs_off_events;
91242-extern atomic_t redundant_softirqs_on;
91243-extern atomic_t redundant_softirqs_off;
91244-extern atomic_t nr_unused_locks;
91245-extern atomic_t nr_cyclic_checks;
91246-extern atomic_t nr_cyclic_check_recursions;
91247-extern atomic_t nr_find_usage_forwards_checks;
91248-extern atomic_t nr_find_usage_forwards_recursions;
91249-extern atomic_t nr_find_usage_backwards_checks;
91250-extern atomic_t nr_find_usage_backwards_recursions;
91251-# define debug_atomic_inc(ptr) atomic_inc(ptr)
91252-# define debug_atomic_dec(ptr) atomic_dec(ptr)
91253-# define debug_atomic_read(ptr) atomic_read(ptr)
91254+extern atomic_unchecked_t chain_lookup_hits;
91255+extern atomic_unchecked_t chain_lookup_misses;
91256+extern atomic_unchecked_t hardirqs_on_events;
91257+extern atomic_unchecked_t hardirqs_off_events;
91258+extern atomic_unchecked_t redundant_hardirqs_on;
91259+extern atomic_unchecked_t redundant_hardirqs_off;
91260+extern atomic_unchecked_t softirqs_on_events;
91261+extern atomic_unchecked_t softirqs_off_events;
91262+extern atomic_unchecked_t redundant_softirqs_on;
91263+extern atomic_unchecked_t redundant_softirqs_off;
91264+extern atomic_unchecked_t nr_unused_locks;
91265+extern atomic_unchecked_t nr_cyclic_checks;
91266+extern atomic_unchecked_t nr_cyclic_check_recursions;
91267+extern atomic_unchecked_t nr_find_usage_forwards_checks;
91268+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
91269+extern atomic_unchecked_t nr_find_usage_backwards_checks;
91270+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
91271+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
91272+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
91273+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
91274 #else
91275 # define debug_atomic_inc(ptr) do { } while (0)
91276 # define debug_atomic_dec(ptr) do { } while (0)
91277diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
91278index d4aba4f..02a353f 100644
91279--- a/kernel/lockdep_proc.c
91280+++ b/kernel/lockdep_proc.c
91281@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
91282
91283 static void print_name(struct seq_file *m, struct lock_class *class)
91284 {
91285- char str[128];
91286+ char str[KSYM_NAME_LEN];
91287 const char *name = class->name;
91288
91289 if (!name) {
91290diff --git a/kernel/module.c b/kernel/module.c
91291index 4b270e6..2efdb65 100644
91292--- a/kernel/module.c
91293+++ b/kernel/module.c
91294@@ -55,6 +55,7 @@
91295 #include <linux/async.h>
91296 #include <linux/percpu.h>
91297 #include <linux/kmemleak.h>
91298+#include <linux/grsecurity.h>
91299
91300 #define CREATE_TRACE_POINTS
91301 #include <trace/events/module.h>
91302@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
91303 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
91304
91305 /* Bounds of module allocation, for speeding __module_address */
91306-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
91307+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
91308+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
91309
91310 int register_module_notifier(struct notifier_block * nb)
91311 {
91312@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
91313 return true;
91314
91315 list_for_each_entry_rcu(mod, &modules, list) {
91316- struct symsearch arr[] = {
91317+ struct symsearch modarr[] = {
91318 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
91319 NOT_GPL_ONLY, false },
91320 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
91321@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
91322 #endif
91323 };
91324
91325- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
91326+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
91327 return true;
91328 }
91329 return false;
91330@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
91331 void *ptr;
91332 int cpu;
91333
91334- if (align > PAGE_SIZE) {
91335+ if (align-1 >= PAGE_SIZE) {
91336 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
91337 name, align, PAGE_SIZE);
91338 align = PAGE_SIZE;
91339@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
91340 * /sys/module/foo/sections stuff
91341 * J. Corbet <corbet@lwn.net>
91342 */
91343-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
91344+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
91345
91346 static inline bool sect_empty(const Elf_Shdr *sect)
91347 {
91348@@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
91349 destroy_params(mod->kp, mod->num_kp);
91350
91351 /* This may be NULL, but that's OK */
91352- module_free(mod, mod->module_init);
91353+ module_free(mod, mod->module_init_rw);
91354+ module_free_exec(mod, mod->module_init_rx);
91355 kfree(mod->args);
91356 if (mod->percpu)
91357 percpu_modfree(mod->percpu);
91358@@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
91359 percpu_modfree(mod->refptr);
91360 #endif
91361 /* Free lock-classes: */
91362- lockdep_free_key_range(mod->module_core, mod->core_size);
91363+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
91364+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
91365
91366 /* Finally, free the core (containing the module structure) */
91367- module_free(mod, mod->module_core);
91368+ module_free_exec(mod, mod->module_core_rx);
91369+ module_free(mod, mod->module_core_rw);
91370
91371 #ifdef CONFIG_MPU
91372 update_protections(current->mm);
91373@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
91374 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
91375 int ret = 0;
91376 const struct kernel_symbol *ksym;
91377+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91378+ int is_fs_load = 0;
91379+ int register_filesystem_found = 0;
91380+ char *p;
91381+
91382+ p = strstr(mod->args, "grsec_modharden_fs");
91383+
91384+ if (p) {
91385+ char *endptr = p + strlen("grsec_modharden_fs");
91386+ /* copy \0 as well */
91387+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
91388+ is_fs_load = 1;
91389+ }
91390+#endif
91391+
91392
91393 for (i = 1; i < n; i++) {
91394+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91395+ const char *name = strtab + sym[i].st_name;
91396+
91397+ /* it's a real shame this will never get ripped and copied
91398+ upstream! ;(
91399+ */
91400+ if (is_fs_load && !strcmp(name, "register_filesystem"))
91401+ register_filesystem_found = 1;
91402+#endif
91403 switch (sym[i].st_shndx) {
91404 case SHN_COMMON:
91405 /* We compiled with -fno-common. These are not
91406@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
91407 strtab + sym[i].st_name, mod);
91408 /* Ok if resolved. */
91409 if (ksym) {
91410+ pax_open_kernel();
91411 sym[i].st_value = ksym->value;
91412+ pax_close_kernel();
91413 break;
91414 }
91415
91416@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
91417 secbase = (unsigned long)mod->percpu;
91418 else
91419 secbase = sechdrs[sym[i].st_shndx].sh_addr;
91420+ pax_open_kernel();
91421 sym[i].st_value += secbase;
91422+ pax_close_kernel();
91423 break;
91424 }
91425 }
91426
91427+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91428+ if (is_fs_load && !register_filesystem_found) {
91429+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
91430+ ret = -EPERM;
91431+ }
91432+#endif
91433+
91434 return ret;
91435 }
91436
91437@@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
91438 || s->sh_entsize != ~0UL
91439 || strstarts(secstrings + s->sh_name, ".init"))
91440 continue;
91441- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
91442+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
91443+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
91444+ else
91445+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
91446 DEBUGP("\t%s\n", secstrings + s->sh_name);
91447 }
91448- if (m == 0)
91449- mod->core_text_size = mod->core_size;
91450 }
91451
91452 DEBUGP("Init section allocation order:\n");
91453@@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
91454 || s->sh_entsize != ~0UL
91455 || !strstarts(secstrings + s->sh_name, ".init"))
91456 continue;
91457- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
91458- | INIT_OFFSET_MASK);
91459+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
91460+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
91461+ else
91462+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
91463+ s->sh_entsize |= INIT_OFFSET_MASK;
91464 DEBUGP("\t%s\n", secstrings + s->sh_name);
91465 }
91466- if (m == 0)
91467- mod->init_text_size = mod->init_size;
91468 }
91469 }
91470
91471@@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
91472
91473 /* As per nm */
91474 static char elf_type(const Elf_Sym *sym,
91475- Elf_Shdr *sechdrs,
91476- const char *secstrings,
91477- struct module *mod)
91478+ const Elf_Shdr *sechdrs,
91479+ const char *secstrings)
91480 {
91481 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
91482 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
91483@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
91484
91485 /* Put symbol section at end of init part of module. */
91486 symsect->sh_flags |= SHF_ALLOC;
91487- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
91488+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
91489 symindex) | INIT_OFFSET_MASK;
91490 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
91491
91492@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
91493 }
91494
91495 /* Append room for core symbols at end of core part. */
91496- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
91497- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
91498+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
91499+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
91500
91501 /* Put string table section at end of init part of module. */
91502 strsect->sh_flags |= SHF_ALLOC;
91503- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
91504+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
91505 strindex) | INIT_OFFSET_MASK;
91506 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
91507
91508 /* Append room for core symbols' strings at end of core part. */
91509- *pstroffs = mod->core_size;
91510+ *pstroffs = mod->core_size_rx;
91511 __set_bit(0, strmap);
91512- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
91513+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
91514
91515 return symoffs;
91516 }
91517@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
91518 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
91519 mod->strtab = (void *)sechdrs[strindex].sh_addr;
91520
91521+ pax_open_kernel();
91522+
91523 /* Set types up while we still have access to sections. */
91524 for (i = 0; i < mod->num_symtab; i++)
91525 mod->symtab[i].st_info
91526- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
91527+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
91528
91529- mod->core_symtab = dst = mod->module_core + symoffs;
91530+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
91531 src = mod->symtab;
91532 *dst = *src;
91533 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
91534@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
91535 }
91536 mod->core_num_syms = ndst;
91537
91538- mod->core_strtab = s = mod->module_core + stroffs;
91539+ mod->core_strtab = s = mod->module_core_rx + stroffs;
91540 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
91541 if (test_bit(i, strmap))
91542 *++s = mod->strtab[i];
91543+
91544+ pax_close_kernel();
91545 }
91546 #else
91547 static inline unsigned long layout_symtab(struct module *mod,
91548@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
91549 #endif
91550 }
91551
91552-static void *module_alloc_update_bounds(unsigned long size)
91553+static void *module_alloc_update_bounds_rw(unsigned long size)
91554 {
91555 void *ret = module_alloc(size);
91556
91557 if (ret) {
91558 /* Update module bounds. */
91559- if ((unsigned long)ret < module_addr_min)
91560- module_addr_min = (unsigned long)ret;
91561- if ((unsigned long)ret + size > module_addr_max)
91562- module_addr_max = (unsigned long)ret + size;
91563+ if ((unsigned long)ret < module_addr_min_rw)
91564+ module_addr_min_rw = (unsigned long)ret;
91565+ if ((unsigned long)ret + size > module_addr_max_rw)
91566+ module_addr_max_rw = (unsigned long)ret + size;
91567+ }
91568+ return ret;
91569+}
91570+
91571+static void *module_alloc_update_bounds_rx(unsigned long size)
91572+{
91573+ void *ret = module_alloc_exec(size);
91574+
91575+ if (ret) {
91576+ /* Update module bounds. */
91577+ if ((unsigned long)ret < module_addr_min_rx)
91578+ module_addr_min_rx = (unsigned long)ret;
91579+ if ((unsigned long)ret + size > module_addr_max_rx)
91580+ module_addr_max_rx = (unsigned long)ret + size;
91581 }
91582 return ret;
91583 }
91584@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
91585 unsigned int i;
91586
91587 /* only scan the sections containing data */
91588- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
91589- (unsigned long)mod->module_core,
91590+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
91591+ (unsigned long)mod->module_core_rw,
91592 sizeof(struct module), GFP_KERNEL);
91593
91594 for (i = 1; i < hdr->e_shnum; i++) {
91595@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
91596 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
91597 continue;
91598
91599- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
91600- (unsigned long)mod->module_core,
91601+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
91602+ (unsigned long)mod->module_core_rw,
91603 sechdrs[i].sh_size, GFP_KERNEL);
91604 }
91605 }
91606@@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
91607 Elf_Ehdr *hdr;
91608 Elf_Shdr *sechdrs;
91609 char *secstrings, *args, *modmagic, *strtab = NULL;
91610- char *staging;
91611+ char *staging, *license;
91612 unsigned int i;
91613 unsigned int symindex = 0;
91614 unsigned int strindex = 0;
91615@@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
91616 goto free_hdr;
91617 }
91618
91619+ license = get_modinfo(sechdrs, infoindex, "license");
91620+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
91621+ if (!license || !license_is_gpl_compatible(license)) {
91622+ err = -ENOEXEC;
91623+ goto free_hdr;
91624+ }
91625+#endif
91626+
91627 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
91628 /* This is allowed: modprobe --force will invalidate it. */
91629 if (!modmagic) {
91630@@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
91631 secstrings, &stroffs, strmap);
91632
91633 /* Do the allocs. */
91634- ptr = module_alloc_update_bounds(mod->core_size);
91635+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
91636 /*
91637 * The pointer to this block is stored in the module structure
91638 * which is inside the block. Just mark it as not being a
91639@@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
91640 err = -ENOMEM;
91641 goto free_percpu;
91642 }
91643- memset(ptr, 0, mod->core_size);
91644- mod->module_core = ptr;
91645+ memset(ptr, 0, mod->core_size_rw);
91646+ mod->module_core_rw = ptr;
91647
91648- ptr = module_alloc_update_bounds(mod->init_size);
91649+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
91650 /*
91651 * The pointer to this block is stored in the module structure
91652 * which is inside the block. This block doesn't need to be
91653 * scanned as it contains data and code that will be freed
91654 * after the module is initialized.
91655 */
91656- kmemleak_ignore(ptr);
91657- if (!ptr && mod->init_size) {
91658+ kmemleak_not_leak(ptr);
91659+ if (!ptr && mod->init_size_rw) {
91660 err = -ENOMEM;
91661- goto free_core;
91662+ goto free_core_rw;
91663 }
91664- memset(ptr, 0, mod->init_size);
91665- mod->module_init = ptr;
91666+ memset(ptr, 0, mod->init_size_rw);
91667+ mod->module_init_rw = ptr;
91668+
91669+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
91670+ kmemleak_not_leak(ptr);
91671+ if (!ptr) {
91672+ err = -ENOMEM;
91673+ goto free_init_rw;
91674+ }
91675+
91676+ pax_open_kernel();
91677+ memset(ptr, 0, mod->core_size_rx);
91678+ pax_close_kernel();
91679+ mod->module_core_rx = ptr;
91680+
91681+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
91682+ kmemleak_not_leak(ptr);
91683+ if (!ptr && mod->init_size_rx) {
91684+ err = -ENOMEM;
91685+ goto free_core_rx;
91686+ }
91687+
91688+ pax_open_kernel();
91689+ memset(ptr, 0, mod->init_size_rx);
91690+ pax_close_kernel();
91691+ mod->module_init_rx = ptr;
91692
91693 /* Transfer each section which specifies SHF_ALLOC */
91694 DEBUGP("final section addresses:\n");
91695@@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
91696 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
91697 continue;
91698
91699- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
91700- dest = mod->module_init
91701- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
91702- else
91703- dest = mod->module_core + sechdrs[i].sh_entsize;
91704+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
91705+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
91706+ dest = mod->module_init_rw
91707+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
91708+ else
91709+ dest = mod->module_init_rx
91710+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
91711+ } else {
91712+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
91713+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
91714+ else
91715+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
91716+ }
91717
91718- if (sechdrs[i].sh_type != SHT_NOBITS)
91719- memcpy(dest, (void *)sechdrs[i].sh_addr,
91720- sechdrs[i].sh_size);
91721+ if (sechdrs[i].sh_type != SHT_NOBITS) {
91722+
91723+#ifdef CONFIG_PAX_KERNEXEC
91724+#ifdef CONFIG_X86_64
91725+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
91726+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
91727+#endif
91728+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
91729+ pax_open_kernel();
91730+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
91731+ pax_close_kernel();
91732+ } else
91733+#endif
91734+
91735+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
91736+ }
91737 /* Update sh_addr to point to copy in image. */
91738- sechdrs[i].sh_addr = (unsigned long)dest;
91739+
91740+#ifdef CONFIG_PAX_KERNEXEC
91741+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
91742+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
91743+ else
91744+#endif
91745+
91746+ sechdrs[i].sh_addr = (unsigned long)dest;
91747 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
91748 }
91749 /* Module has been moved. */
91750@@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
91751 mod->name);
91752 if (!mod->refptr) {
91753 err = -ENOMEM;
91754- goto free_init;
91755+ goto free_init_rx;
91756 }
91757 #endif
91758 /* Now we've moved module, initialize linked lists, etc. */
91759@@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
91760 goto free_unload;
91761
91762 /* Set up license info based on the info section */
91763- set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
91764+ set_license(mod, license);
91765
91766 /*
91767 * ndiswrapper is under GPL by itself, but loads proprietary modules.
91768@@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
91769 /* Set up MODINFO_ATTR fields */
91770 setup_modinfo(mod, sechdrs, infoindex);
91771
91772+ mod->args = args;
91773+
91774+#ifdef CONFIG_GRKERNSEC_MODHARDEN
91775+ {
91776+ char *p, *p2;
91777+
91778+ if (strstr(mod->args, "grsec_modharden_netdev")) {
91779+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
91780+ err = -EPERM;
91781+ goto cleanup;
91782+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
91783+ p += strlen("grsec_modharden_normal");
91784+ p2 = strstr(p, "_");
91785+ if (p2) {
91786+ *p2 = '\0';
91787+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
91788+ *p2 = '_';
91789+ }
91790+ err = -EPERM;
91791+ goto cleanup;
91792+ }
91793+ }
91794+#endif
91795+
91796+
91797 /* Fix up syms, so that st_value is a pointer to location. */
91798 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
91799 mod);
91800@@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
91801
91802 /* Now do relocations. */
91803 for (i = 1; i < hdr->e_shnum; i++) {
91804- const char *strtab = (char *)sechdrs[strindex].sh_addr;
91805 unsigned int info = sechdrs[i].sh_info;
91806+ strtab = (char *)sechdrs[strindex].sh_addr;
91807
91808 /* Not a valid relocation section? */
91809 if (info >= hdr->e_shnum)
91810@@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
91811 * Do it before processing of module parameters, so the module
91812 * can provide parameter accessor functions of its own.
91813 */
91814- if (mod->module_init)
91815- flush_icache_range((unsigned long)mod->module_init,
91816- (unsigned long)mod->module_init
91817- + mod->init_size);
91818- flush_icache_range((unsigned long)mod->module_core,
91819- (unsigned long)mod->module_core + mod->core_size);
91820+ if (mod->module_init_rx)
91821+ flush_icache_range((unsigned long)mod->module_init_rx,
91822+ (unsigned long)mod->module_init_rx
91823+ + mod->init_size_rx);
91824+ flush_icache_range((unsigned long)mod->module_core_rx,
91825+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
91826
91827 set_fs(old_fs);
91828
91829- mod->args = args;
91830 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
91831 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
91832 mod->name);
91833@@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
91834 free_unload:
91835 module_unload_free(mod);
91836 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
91837+ free_init_rx:
91838 percpu_modfree(mod->refptr);
91839- free_init:
91840 #endif
91841- module_free(mod, mod->module_init);
91842- free_core:
91843- module_free(mod, mod->module_core);
91844+ module_free_exec(mod, mod->module_init_rx);
91845+ free_core_rx:
91846+ module_free_exec(mod, mod->module_core_rx);
91847+ free_init_rw:
91848+ module_free(mod, mod->module_init_rw);
91849+ free_core_rw:
91850+ module_free(mod, mod->module_core_rw);
91851 /* mod will be freed with core. Don't access it beyond this line! */
91852 free_percpu:
91853 if (percpu)
91854@@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
91855 mod->symtab = mod->core_symtab;
91856 mod->strtab = mod->core_strtab;
91857 #endif
91858- module_free(mod, mod->module_init);
91859- mod->module_init = NULL;
91860- mod->init_size = 0;
91861- mod->init_text_size = 0;
91862+ module_free(mod, mod->module_init_rw);
91863+ module_free_exec(mod, mod->module_init_rx);
91864+ mod->module_init_rw = NULL;
91865+ mod->module_init_rx = NULL;
91866+ mod->init_size_rw = 0;
91867+ mod->init_size_rx = 0;
91868 mutex_unlock(&module_mutex);
91869
91870 return 0;
91871@@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
91872 unsigned long nextval;
91873
91874 /* At worse, next value is at end of module */
91875- if (within_module_init(addr, mod))
91876- nextval = (unsigned long)mod->module_init+mod->init_text_size;
91877+ if (within_module_init_rx(addr, mod))
91878+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
91879+ else if (within_module_init_rw(addr, mod))
91880+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
91881+ else if (within_module_core_rx(addr, mod))
91882+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
91883+ else if (within_module_core_rw(addr, mod))
91884+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
91885 else
91886- nextval = (unsigned long)mod->module_core+mod->core_text_size;
91887+ return NULL;
91888
91889 /* Scan for closest preceeding symbol, and next symbol. (ELF
91890 starts real symbols at 1). */
91891@@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
91892 char buf[8];
91893
91894 seq_printf(m, "%s %u",
91895- mod->name, mod->init_size + mod->core_size);
91896+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
91897 print_unload_info(m, mod);
91898
91899 /* Informative for users. */
91900@@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
91901 mod->state == MODULE_STATE_COMING ? "Loading":
91902 "Live");
91903 /* Used by oprofile and other similar tools. */
91904- seq_printf(m, " 0x%p", mod->module_core);
91905+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
91906
91907 /* Taints info */
91908 if (mod->taints)
91909@@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
91910
91911 static int __init proc_modules_init(void)
91912 {
91913+#ifndef CONFIG_GRKERNSEC_HIDESYM
91914+#ifdef CONFIG_GRKERNSEC_PROC_USER
91915+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91916+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
91917+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
91918+#else
91919 proc_create("modules", 0, NULL, &proc_modules_operations);
91920+#endif
91921+#else
91922+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
91923+#endif
91924 return 0;
91925 }
91926 module_init(proc_modules_init);
91927@@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
91928 {
91929 struct module *mod;
91930
91931- if (addr < module_addr_min || addr > module_addr_max)
91932+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
91933+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
91934 return NULL;
91935
91936 list_for_each_entry_rcu(mod, &modules, list)
91937- if (within_module_core(addr, mod)
91938- || within_module_init(addr, mod))
91939+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
91940 return mod;
91941 return NULL;
91942 }
91943@@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
91944 */
91945 struct module *__module_text_address(unsigned long addr)
91946 {
91947- struct module *mod = __module_address(addr);
91948+ struct module *mod;
91949+
91950+#ifdef CONFIG_X86_32
91951+ addr = ktla_ktva(addr);
91952+#endif
91953+
91954+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
91955+ return NULL;
91956+
91957+ mod = __module_address(addr);
91958+
91959 if (mod) {
91960 /* Make sure it's within the text section. */
91961- if (!within(addr, mod->module_init, mod->init_text_size)
91962- && !within(addr, mod->module_core, mod->core_text_size))
91963+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
91964 mod = NULL;
91965 }
91966 return mod;
91967diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
91968index ec815a9..fe46e99 100644
91969--- a/kernel/mutex-debug.c
91970+++ b/kernel/mutex-debug.c
91971@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
91972 }
91973
91974 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91975- struct thread_info *ti)
91976+ struct task_struct *task)
91977 {
91978 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
91979
91980 /* Mark the current thread as blocked on the lock: */
91981- ti->task->blocked_on = waiter;
91982+ task->blocked_on = waiter;
91983 }
91984
91985 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
91986- struct thread_info *ti)
91987+ struct task_struct *task)
91988 {
91989 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
91990- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
91991- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
91992- ti->task->blocked_on = NULL;
91993+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
91994+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
91995+ task->blocked_on = NULL;
91996
91997 list_del_init(&waiter->list);
91998 waiter->task = NULL;
91999@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
92000 return;
92001
92002 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
92003- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
92004+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
92005 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
92006 mutex_clear_owner(lock);
92007 }
92008diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
92009index 6b2d735..372d3c4 100644
92010--- a/kernel/mutex-debug.h
92011+++ b/kernel/mutex-debug.h
92012@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
92013 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
92014 extern void debug_mutex_add_waiter(struct mutex *lock,
92015 struct mutex_waiter *waiter,
92016- struct thread_info *ti);
92017+ struct task_struct *task);
92018 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
92019- struct thread_info *ti);
92020+ struct task_struct *task);
92021 extern void debug_mutex_unlock(struct mutex *lock);
92022 extern void debug_mutex_init(struct mutex *lock, const char *name,
92023 struct lock_class_key *key);
92024
92025 static inline void mutex_set_owner(struct mutex *lock)
92026 {
92027- lock->owner = current_thread_info();
92028+ lock->owner = current;
92029 }
92030
92031 static inline void mutex_clear_owner(struct mutex *lock)
92032diff --git a/kernel/mutex.c b/kernel/mutex.c
92033index f85644c..5ee9f77 100644
92034--- a/kernel/mutex.c
92035+++ b/kernel/mutex.c
92036@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92037 */
92038
92039 for (;;) {
92040- struct thread_info *owner;
92041+ struct task_struct *owner;
92042
92043 /*
92044 * If we own the BKL, then don't spin. The owner of
92045@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92046 spin_lock_mutex(&lock->wait_lock, flags);
92047
92048 debug_mutex_lock_common(lock, &waiter);
92049- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
92050+ debug_mutex_add_waiter(lock, &waiter, task);
92051
92052 /* add waiting tasks to the end of the waitqueue (FIFO): */
92053 list_add_tail(&waiter.list, &lock->wait_list);
92054@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92055 * TASK_UNINTERRUPTIBLE case.)
92056 */
92057 if (unlikely(signal_pending_state(state, task))) {
92058- mutex_remove_waiter(lock, &waiter,
92059- task_thread_info(task));
92060+ mutex_remove_waiter(lock, &waiter, task);
92061 mutex_release(&lock->dep_map, 1, ip);
92062 spin_unlock_mutex(&lock->wait_lock, flags);
92063
92064@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
92065 done:
92066 lock_acquired(&lock->dep_map, ip);
92067 /* got the lock - rejoice! */
92068- mutex_remove_waiter(lock, &waiter, current_thread_info());
92069+ mutex_remove_waiter(lock, &waiter, task);
92070 mutex_set_owner(lock);
92071
92072 /* set it to 0 if there are no waiters left: */
92073diff --git a/kernel/mutex.h b/kernel/mutex.h
92074index 67578ca..4115fbf 100644
92075--- a/kernel/mutex.h
92076+++ b/kernel/mutex.h
92077@@ -19,7 +19,7 @@
92078 #ifdef CONFIG_SMP
92079 static inline void mutex_set_owner(struct mutex *lock)
92080 {
92081- lock->owner = current_thread_info();
92082+ lock->owner = current;
92083 }
92084
92085 static inline void mutex_clear_owner(struct mutex *lock)
92086diff --git a/kernel/panic.c b/kernel/panic.c
92087index 96b45d0..ff70a46 100644
92088--- a/kernel/panic.c
92089+++ b/kernel/panic.c
92090@@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
92091 va_end(args);
92092 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
92093 #ifdef CONFIG_DEBUG_BUGVERBOSE
92094- dump_stack();
92095+ /*
92096+ * Avoid nested stack-dumping if a panic occurs during oops processing
92097+ */
92098+ if (!oops_in_progress)
92099+ dump_stack();
92100 #endif
92101
92102 /*
92103@@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
92104 const char *board;
92105
92106 printk(KERN_WARNING "------------[ cut here ]------------\n");
92107- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
92108+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
92109 board = dmi_get_system_info(DMI_PRODUCT_NAME);
92110 if (board)
92111 printk(KERN_WARNING "Hardware name: %s\n", board);
92112@@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
92113 */
92114 void __stack_chk_fail(void)
92115 {
92116- panic("stack-protector: Kernel stack is corrupted in: %p\n",
92117+ dump_stack();
92118+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
92119 __builtin_return_address(0));
92120 }
92121 EXPORT_SYMBOL(__stack_chk_fail);
92122diff --git a/kernel/params.c b/kernel/params.c
92123index d656c27..21e452c 100644
92124--- a/kernel/params.c
92125+++ b/kernel/params.c
92126@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
92127 return ret;
92128 }
92129
92130-static struct sysfs_ops module_sysfs_ops = {
92131+static const struct sysfs_ops module_sysfs_ops = {
92132 .show = module_attr_show,
92133 .store = module_attr_store,
92134 };
92135@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
92136 return 0;
92137 }
92138
92139-static struct kset_uevent_ops module_uevent_ops = {
92140+static const struct kset_uevent_ops module_uevent_ops = {
92141 .filter = uevent_filter,
92142 };
92143
92144diff --git a/kernel/perf_event.c b/kernel/perf_event.c
92145index 37ebc14..9c121d9 100644
92146--- a/kernel/perf_event.c
92147+++ b/kernel/perf_event.c
92148@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
92149 */
92150 int sysctl_perf_event_sample_rate __read_mostly = 100000;
92151
92152-static atomic64_t perf_event_id;
92153+static atomic64_unchecked_t perf_event_id;
92154
92155 /*
92156 * Lock for (sysadmin-configurable) event reservations:
92157@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
92158 * In order to keep per-task stats reliable we need to flip the event
92159 * values when we flip the contexts.
92160 */
92161- value = atomic64_read(&next_event->count);
92162- value = atomic64_xchg(&event->count, value);
92163- atomic64_set(&next_event->count, value);
92164+ value = atomic64_read_unchecked(&next_event->count);
92165+ value = atomic64_xchg_unchecked(&event->count, value);
92166+ atomic64_set_unchecked(&next_event->count, value);
92167
92168 swap(event->total_time_enabled, next_event->total_time_enabled);
92169 swap(event->total_time_running, next_event->total_time_running);
92170@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
92171 update_event_times(event);
92172 }
92173
92174- return atomic64_read(&event->count);
92175+ return atomic64_read_unchecked(&event->count);
92176 }
92177
92178 /*
92179@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
92180 values[n++] = 1 + leader->nr_siblings;
92181 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92182 values[n++] = leader->total_time_enabled +
92183- atomic64_read(&leader->child_total_time_enabled);
92184+ atomic64_read_unchecked(&leader->child_total_time_enabled);
92185 }
92186 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92187 values[n++] = leader->total_time_running +
92188- atomic64_read(&leader->child_total_time_running);
92189+ atomic64_read_unchecked(&leader->child_total_time_running);
92190 }
92191
92192 size = n * sizeof(u64);
92193@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
92194 values[n++] = perf_event_read_value(event);
92195 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92196 values[n++] = event->total_time_enabled +
92197- atomic64_read(&event->child_total_time_enabled);
92198+ atomic64_read_unchecked(&event->child_total_time_enabled);
92199 }
92200 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92201 values[n++] = event->total_time_running +
92202- atomic64_read(&event->child_total_time_running);
92203+ atomic64_read_unchecked(&event->child_total_time_running);
92204 }
92205 if (read_format & PERF_FORMAT_ID)
92206 values[n++] = primary_event_id(event);
92207@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
92208 static void perf_event_reset(struct perf_event *event)
92209 {
92210 (void)perf_event_read(event);
92211- atomic64_set(&event->count, 0);
92212+ atomic64_set_unchecked(&event->count, 0);
92213 perf_event_update_userpage(event);
92214 }
92215
92216@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
92217 ++userpg->lock;
92218 barrier();
92219 userpg->index = perf_event_index(event);
92220- userpg->offset = atomic64_read(&event->count);
92221+ userpg->offset = atomic64_read_unchecked(&event->count);
92222 if (event->state == PERF_EVENT_STATE_ACTIVE)
92223- userpg->offset -= atomic64_read(&event->hw.prev_count);
92224+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
92225
92226 userpg->time_enabled = event->total_time_enabled +
92227- atomic64_read(&event->child_total_time_enabled);
92228+ atomic64_read_unchecked(&event->child_total_time_enabled);
92229
92230 userpg->time_running = event->total_time_running +
92231- atomic64_read(&event->child_total_time_running);
92232+ atomic64_read_unchecked(&event->child_total_time_running);
92233
92234 barrier();
92235 ++userpg->lock;
92236@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
92237 u64 values[4];
92238 int n = 0;
92239
92240- values[n++] = atomic64_read(&event->count);
92241+ values[n++] = atomic64_read_unchecked(&event->count);
92242 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
92243 values[n++] = event->total_time_enabled +
92244- atomic64_read(&event->child_total_time_enabled);
92245+ atomic64_read_unchecked(&event->child_total_time_enabled);
92246 }
92247 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
92248 values[n++] = event->total_time_running +
92249- atomic64_read(&event->child_total_time_running);
92250+ atomic64_read_unchecked(&event->child_total_time_running);
92251 }
92252 if (read_format & PERF_FORMAT_ID)
92253 values[n++] = primary_event_id(event);
92254@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
92255 if (leader != event)
92256 leader->pmu->read(leader);
92257
92258- values[n++] = atomic64_read(&leader->count);
92259+ values[n++] = atomic64_read_unchecked(&leader->count);
92260 if (read_format & PERF_FORMAT_ID)
92261 values[n++] = primary_event_id(leader);
92262
92263@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
92264 if (sub != event)
92265 sub->pmu->read(sub);
92266
92267- values[n++] = atomic64_read(&sub->count);
92268+ values[n++] = atomic64_read_unchecked(&sub->count);
92269 if (read_format & PERF_FORMAT_ID)
92270 values[n++] = primary_event_id(sub);
92271
92272@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
92273 * need to add enough zero bytes after the string to handle
92274 * the 64bit alignment we do later.
92275 */
92276- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
92277+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
92278 if (!buf) {
92279 name = strncpy(tmp, "//enomem", sizeof(tmp));
92280 goto got_name;
92281 }
92282- name = d_path(&file->f_path, buf, PATH_MAX);
92283+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
92284 if (IS_ERR(name)) {
92285 name = strncpy(tmp, "//toolong", sizeof(tmp));
92286 goto got_name;
92287@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
92288 {
92289 struct hw_perf_event *hwc = &event->hw;
92290
92291- atomic64_add(nr, &event->count);
92292+ atomic64_add_unchecked(nr, &event->count);
92293
92294 if (!hwc->sample_period)
92295 return;
92296@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
92297 u64 now;
92298
92299 now = cpu_clock(cpu);
92300- prev = atomic64_read(&event->hw.prev_count);
92301- atomic64_set(&event->hw.prev_count, now);
92302- atomic64_add(now - prev, &event->count);
92303+ prev = atomic64_read_unchecked(&event->hw.prev_count);
92304+ atomic64_set_unchecked(&event->hw.prev_count, now);
92305+ atomic64_add_unchecked(now - prev, &event->count);
92306 }
92307
92308 static int cpu_clock_perf_event_enable(struct perf_event *event)
92309@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
92310 struct hw_perf_event *hwc = &event->hw;
92311 int cpu = raw_smp_processor_id();
92312
92313- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
92314+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
92315 perf_swevent_start_hrtimer(event);
92316
92317 return 0;
92318@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
92319 u64 prev;
92320 s64 delta;
92321
92322- prev = atomic64_xchg(&event->hw.prev_count, now);
92323+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
92324 delta = now - prev;
92325- atomic64_add(delta, &event->count);
92326+ atomic64_add_unchecked(delta, &event->count);
92327 }
92328
92329 static int task_clock_perf_event_enable(struct perf_event *event)
92330@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
92331
92332 now = event->ctx->time;
92333
92334- atomic64_set(&hwc->prev_count, now);
92335+ atomic64_set_unchecked(&hwc->prev_count, now);
92336
92337 perf_swevent_start_hrtimer(event);
92338
92339@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
92340 event->parent = parent_event;
92341
92342 event->ns = get_pid_ns(current->nsproxy->pid_ns);
92343- event->id = atomic64_inc_return(&perf_event_id);
92344+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
92345
92346 event->state = PERF_EVENT_STATE_INACTIVE;
92347
92348@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
92349 if (child_event->attr.inherit_stat)
92350 perf_event_read_event(child_event, child);
92351
92352- child_val = atomic64_read(&child_event->count);
92353+ child_val = atomic64_read_unchecked(&child_event->count);
92354
92355 /*
92356 * Add back the child's count to the parent's count:
92357 */
92358- atomic64_add(child_val, &parent_event->count);
92359- atomic64_add(child_event->total_time_enabled,
92360+ atomic64_add_unchecked(child_val, &parent_event->count);
92361+ atomic64_add_unchecked(child_event->total_time_enabled,
92362 &parent_event->child_total_time_enabled);
92363- atomic64_add(child_event->total_time_running,
92364+ atomic64_add_unchecked(child_event->total_time_running,
92365 &parent_event->child_total_time_running);
92366
92367 /*
92368diff --git a/kernel/pid.c b/kernel/pid.c
92369index fce7198..4f23a7e 100644
92370--- a/kernel/pid.c
92371+++ b/kernel/pid.c
92372@@ -33,6 +33,7 @@
92373 #include <linux/rculist.h>
92374 #include <linux/bootmem.h>
92375 #include <linux/hash.h>
92376+#include <linux/security.h>
92377 #include <linux/pid_namespace.h>
92378 #include <linux/init_task.h>
92379 #include <linux/syscalls.h>
92380@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
92381
92382 int pid_max = PID_MAX_DEFAULT;
92383
92384-#define RESERVED_PIDS 300
92385+#define RESERVED_PIDS 500
92386
92387 int pid_max_min = RESERVED_PIDS + 1;
92388 int pid_max_max = PID_MAX_LIMIT;
92389@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
92390 */
92391 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
92392 {
92393- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92394+ struct task_struct *task;
92395+
92396+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
92397+
92398+ if (gr_pid_is_chrooted(task))
92399+ return NULL;
92400+
92401+ return task;
92402 }
92403
92404 struct task_struct *find_task_by_vpid(pid_t vnr)
92405@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
92406 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
92407 }
92408
92409+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
92410+{
92411+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
92412+}
92413+
92414 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
92415 {
92416 struct pid *pid;
92417diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
92418index 5c9dc22..d271117 100644
92419--- a/kernel/posix-cpu-timers.c
92420+++ b/kernel/posix-cpu-timers.c
92421@@ -6,6 +6,7 @@
92422 #include <linux/posix-timers.h>
92423 #include <linux/errno.h>
92424 #include <linux/math64.h>
92425+#include <linux/security.h>
92426 #include <asm/uaccess.h>
92427 #include <linux/kernel_stat.h>
92428 #include <trace/events/timer.h>
92429@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
92430
92431 static __init int init_posix_cpu_timers(void)
92432 {
92433- struct k_clock process = {
92434+ static struct k_clock process = {
92435 .clock_getres = process_cpu_clock_getres,
92436 .clock_get = process_cpu_clock_get,
92437 .clock_set = do_posix_clock_nosettime,
92438@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
92439 .nsleep = process_cpu_nsleep,
92440 .nsleep_restart = process_cpu_nsleep_restart,
92441 };
92442- struct k_clock thread = {
92443+ static struct k_clock thread = {
92444 .clock_getres = thread_cpu_clock_getres,
92445 .clock_get = thread_cpu_clock_get,
92446 .clock_set = do_posix_clock_nosettime,
92447diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
92448index 5e76d22..cf1baeb 100644
92449--- a/kernel/posix-timers.c
92450+++ b/kernel/posix-timers.c
92451@@ -42,6 +42,7 @@
92452 #include <linux/compiler.h>
92453 #include <linux/idr.h>
92454 #include <linux/posix-timers.h>
92455+#include <linux/grsecurity.h>
92456 #include <linux/syscalls.h>
92457 #include <linux/wait.h>
92458 #include <linux/workqueue.h>
92459@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
92460 * which we beg off on and pass to do_sys_settimeofday().
92461 */
92462
92463-static struct k_clock posix_clocks[MAX_CLOCKS];
92464+static struct k_clock *posix_clocks[MAX_CLOCKS];
92465
92466 /*
92467 * These ones are defined below.
92468@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
92469 */
92470 #define CLOCK_DISPATCH(clock, call, arglist) \
92471 ((clock) < 0 ? posix_cpu_##call arglist : \
92472- (posix_clocks[clock].call != NULL \
92473- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
92474+ (posix_clocks[clock]->call != NULL \
92475+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
92476
92477 /*
92478 * Default clock hook functions when the struct k_clock passed
92479@@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
92480 struct timespec *tp)
92481 {
92482 tp->tv_sec = 0;
92483- tp->tv_nsec = posix_clocks[which_clock].res;
92484+ tp->tv_nsec = posix_clocks[which_clock]->res;
92485 return 0;
92486 }
92487
92488@@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
92489 return 0;
92490 if ((unsigned) which_clock >= MAX_CLOCKS)
92491 return 1;
92492- if (posix_clocks[which_clock].clock_getres != NULL)
92493+ if (posix_clocks[which_clock] == NULL)
92494 return 0;
92495- if (posix_clocks[which_clock].res != 0)
92496+ if (posix_clocks[which_clock]->clock_getres != NULL)
92497+ return 0;
92498+ if (posix_clocks[which_clock]->res != 0)
92499 return 0;
92500 return 1;
92501 }
92502@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
92503 */
92504 static __init int init_posix_timers(void)
92505 {
92506- struct k_clock clock_realtime = {
92507+ static struct k_clock clock_realtime = {
92508 .clock_getres = hrtimer_get_res,
92509 };
92510- struct k_clock clock_monotonic = {
92511+ static struct k_clock clock_monotonic = {
92512 .clock_getres = hrtimer_get_res,
92513 .clock_get = posix_ktime_get_ts,
92514 .clock_set = do_posix_clock_nosettime,
92515 };
92516- struct k_clock clock_monotonic_raw = {
92517+ static struct k_clock clock_monotonic_raw = {
92518 .clock_getres = hrtimer_get_res,
92519 .clock_get = posix_get_monotonic_raw,
92520 .clock_set = do_posix_clock_nosettime,
92521 .timer_create = no_timer_create,
92522 .nsleep = no_nsleep,
92523 };
92524- struct k_clock clock_realtime_coarse = {
92525+ static struct k_clock clock_realtime_coarse = {
92526 .clock_getres = posix_get_coarse_res,
92527 .clock_get = posix_get_realtime_coarse,
92528 .clock_set = do_posix_clock_nosettime,
92529 .timer_create = no_timer_create,
92530 .nsleep = no_nsleep,
92531 };
92532- struct k_clock clock_monotonic_coarse = {
92533+ static struct k_clock clock_monotonic_coarse = {
92534 .clock_getres = posix_get_coarse_res,
92535 .clock_get = posix_get_monotonic_coarse,
92536 .clock_set = do_posix_clock_nosettime,
92537@@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
92538 .nsleep = no_nsleep,
92539 };
92540
92541+ pax_track_stack();
92542+
92543 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
92544 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
92545 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
92546@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
92547 return;
92548 }
92549
92550- posix_clocks[clock_id] = *new_clock;
92551+ posix_clocks[clock_id] = new_clock;
92552 }
92553 EXPORT_SYMBOL_GPL(register_posix_clock);
92554
92555@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
92556 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
92557 return -EFAULT;
92558
92559+ /* only the CLOCK_REALTIME clock can be set, all other clocks
92560+ have their clock_set fptr set to a nosettime dummy function
92561+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
92562+ call common_clock_set, which calls do_sys_settimeofday, which
92563+ we hook
92564+ */
92565+
92566 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
92567 }
92568
92569diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
92570index 04a9e90..bc355aa 100644
92571--- a/kernel/power/hibernate.c
92572+++ b/kernel/power/hibernate.c
92573@@ -48,14 +48,14 @@ enum {
92574
92575 static int hibernation_mode = HIBERNATION_SHUTDOWN;
92576
92577-static struct platform_hibernation_ops *hibernation_ops;
92578+static const struct platform_hibernation_ops *hibernation_ops;
92579
92580 /**
92581 * hibernation_set_ops - set the global hibernate operations
92582 * @ops: the hibernation operations to use in subsequent hibernation transitions
92583 */
92584
92585-void hibernation_set_ops(struct platform_hibernation_ops *ops)
92586+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
92587 {
92588 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
92589 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
92590diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
92591index e8b3370..484c2e4 100644
92592--- a/kernel/power/poweroff.c
92593+++ b/kernel/power/poweroff.c
92594@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
92595 .enable_mask = SYSRQ_ENABLE_BOOT,
92596 };
92597
92598-static int pm_sysrq_init(void)
92599+static int __init pm_sysrq_init(void)
92600 {
92601 register_sysrq_key('o', &sysrq_poweroff_op);
92602 return 0;
92603diff --git a/kernel/power/process.c b/kernel/power/process.c
92604index e7cd671..56d5f459 100644
92605--- a/kernel/power/process.c
92606+++ b/kernel/power/process.c
92607@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
92608 struct timeval start, end;
92609 u64 elapsed_csecs64;
92610 unsigned int elapsed_csecs;
92611+ bool timedout = false;
92612
92613 do_gettimeofday(&start);
92614
92615 end_time = jiffies + TIMEOUT;
92616 do {
92617 todo = 0;
92618+ if (time_after(jiffies, end_time))
92619+ timedout = true;
92620 read_lock(&tasklist_lock);
92621 do_each_thread(g, p) {
92622 if (frozen(p) || !freezeable(p))
92623@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
92624 * It is "frozen enough". If the task does wake
92625 * up, it will immediately call try_to_freeze.
92626 */
92627- if (!task_is_stopped_or_traced(p) &&
92628- !freezer_should_skip(p))
92629+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
92630 todo++;
92631+ if (timedout) {
92632+ printk(KERN_ERR "Task refusing to freeze:\n");
92633+ sched_show_task(p);
92634+ }
92635+ }
92636 } while_each_thread(g, p);
92637 read_unlock(&tasklist_lock);
92638 yield(); /* Yield is okay here */
92639- if (time_after(jiffies, end_time))
92640- break;
92641- } while (todo);
92642+ } while (todo && !timedout);
92643
92644 do_gettimeofday(&end);
92645 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
92646diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
92647index 40dd021..fb30ceb 100644
92648--- a/kernel/power/suspend.c
92649+++ b/kernel/power/suspend.c
92650@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
92651 [PM_SUSPEND_MEM] = "mem",
92652 };
92653
92654-static struct platform_suspend_ops *suspend_ops;
92655+static const struct platform_suspend_ops *suspend_ops;
92656
92657 /**
92658 * suspend_set_ops - Set the global suspend method table.
92659 * @ops: Pointer to ops structure.
92660 */
92661-void suspend_set_ops(struct platform_suspend_ops *ops)
92662+void suspend_set_ops(const struct platform_suspend_ops *ops)
92663 {
92664 mutex_lock(&pm_mutex);
92665 suspend_ops = ops;
92666diff --git a/kernel/printk.c b/kernel/printk.c
92667index 4cade47..4d17900 100644
92668--- a/kernel/printk.c
92669+++ b/kernel/printk.c
92670@@ -33,6 +33,7 @@
92671 #include <linux/bootmem.h>
92672 #include <linux/syscalls.h>
92673 #include <linux/kexec.h>
92674+#include <linux/syslog.h>
92675
92676 #include <asm/uaccess.h>
92677
92678@@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
92679 }
92680 #endif
92681
92682-/*
92683- * Commands to do_syslog:
92684- *
92685- * 0 -- Close the log. Currently a NOP.
92686- * 1 -- Open the log. Currently a NOP.
92687- * 2 -- Read from the log.
92688- * 3 -- Read all messages remaining in the ring buffer.
92689- * 4 -- Read and clear all messages remaining in the ring buffer
92690- * 5 -- Clear ring buffer.
92691- * 6 -- Disable printk's to console
92692- * 7 -- Enable printk's to console
92693- * 8 -- Set level of messages printed to console
92694- * 9 -- Return number of unread characters in the log buffer
92695- * 10 -- Return size of the log buffer
92696- */
92697-int do_syslog(int type, char __user *buf, int len)
92698+int do_syslog(int type, char __user *buf, int len, bool from_file)
92699 {
92700 unsigned i, j, limit, count;
92701 int do_clear = 0;
92702 char c;
92703 int error = 0;
92704
92705- error = security_syslog(type);
92706+#ifdef CONFIG_GRKERNSEC_DMESG
92707+ if (grsec_enable_dmesg &&
92708+ (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
92709+ !capable(CAP_SYS_ADMIN))
92710+ return -EPERM;
92711+#endif
92712+
92713+ error = security_syslog(type, from_file);
92714 if (error)
92715 return error;
92716
92717 switch (type) {
92718- case 0: /* Close log */
92719+ case SYSLOG_ACTION_CLOSE: /* Close log */
92720 break;
92721- case 1: /* Open log */
92722+ case SYSLOG_ACTION_OPEN: /* Open log */
92723 break;
92724- case 2: /* Read from log */
92725+ case SYSLOG_ACTION_READ: /* Read from log */
92726 error = -EINVAL;
92727 if (!buf || len < 0)
92728 goto out;
92729@@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
92730 if (!error)
92731 error = i;
92732 break;
92733- case 4: /* Read/clear last kernel messages */
92734+ /* Read/clear last kernel messages */
92735+ case SYSLOG_ACTION_READ_CLEAR:
92736 do_clear = 1;
92737 /* FALL THRU */
92738- case 3: /* Read last kernel messages */
92739+ /* Read last kernel messages */
92740+ case SYSLOG_ACTION_READ_ALL:
92741 error = -EINVAL;
92742 if (!buf || len < 0)
92743 goto out;
92744@@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
92745 }
92746 }
92747 break;
92748- case 5: /* Clear ring buffer */
92749+ /* Clear ring buffer */
92750+ case SYSLOG_ACTION_CLEAR:
92751 logged_chars = 0;
92752 break;
92753- case 6: /* Disable logging to console */
92754+ /* Disable logging to console */
92755+ case SYSLOG_ACTION_CONSOLE_OFF:
92756 if (saved_console_loglevel == -1)
92757 saved_console_loglevel = console_loglevel;
92758 console_loglevel = minimum_console_loglevel;
92759 break;
92760- case 7: /* Enable logging to console */
92761+ /* Enable logging to console */
92762+ case SYSLOG_ACTION_CONSOLE_ON:
92763 if (saved_console_loglevel != -1) {
92764 console_loglevel = saved_console_loglevel;
92765 saved_console_loglevel = -1;
92766 }
92767 break;
92768- case 8: /* Set level of messages printed to console */
92769+ /* Set level of messages printed to console */
92770+ case SYSLOG_ACTION_CONSOLE_LEVEL:
92771 error = -EINVAL;
92772 if (len < 1 || len > 8)
92773 goto out;
92774@@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
92775 saved_console_loglevel = -1;
92776 error = 0;
92777 break;
92778- case 9: /* Number of chars in the log buffer */
92779+ /* Number of chars in the log buffer */
92780+ case SYSLOG_ACTION_SIZE_UNREAD:
92781 error = log_end - log_start;
92782 break;
92783- case 10: /* Size of the log buffer */
92784+ /* Size of the log buffer */
92785+ case SYSLOG_ACTION_SIZE_BUFFER:
92786 error = log_buf_len;
92787 break;
92788 default:
92789@@ -415,7 +416,7 @@ out:
92790
92791 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
92792 {
92793- return do_syslog(type, buf, len);
92794+ return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
92795 }
92796
92797 /*
92798diff --git a/kernel/profile.c b/kernel/profile.c
92799index dfadc5b..7f59404 100644
92800--- a/kernel/profile.c
92801+++ b/kernel/profile.c
92802@@ -39,7 +39,7 @@ struct profile_hit {
92803 /* Oprofile timer tick hook */
92804 static int (*timer_hook)(struct pt_regs *) __read_mostly;
92805
92806-static atomic_t *prof_buffer;
92807+static atomic_unchecked_t *prof_buffer;
92808 static unsigned long prof_len, prof_shift;
92809
92810 int prof_on __read_mostly;
92811@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
92812 hits[i].pc = 0;
92813 continue;
92814 }
92815- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
92816+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
92817 hits[i].hits = hits[i].pc = 0;
92818 }
92819 }
92820@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
92821 * Add the current hit(s) and flush the write-queue out
92822 * to the global buffer:
92823 */
92824- atomic_add(nr_hits, &prof_buffer[pc]);
92825+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
92826 for (i = 0; i < NR_PROFILE_HIT; ++i) {
92827- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
92828+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
92829 hits[i].pc = hits[i].hits = 0;
92830 }
92831 out:
92832@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
92833 if (prof_on != type || !prof_buffer)
92834 return;
92835 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
92836- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
92837+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
92838 }
92839 #endif /* !CONFIG_SMP */
92840 EXPORT_SYMBOL_GPL(profile_hits);
92841@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
92842 return -EFAULT;
92843 buf++; p++; count--; read++;
92844 }
92845- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
92846+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
92847 if (copy_to_user(buf, (void *)pnt, count))
92848 return -EFAULT;
92849 read += count;
92850@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
92851 }
92852 #endif
92853 profile_discard_flip_buffers();
92854- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
92855+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
92856 return count;
92857 }
92858
92859diff --git a/kernel/ptrace.c b/kernel/ptrace.c
92860index 05625f6..733bf70 100644
92861--- a/kernel/ptrace.c
92862+++ b/kernel/ptrace.c
92863@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
92864 return ret;
92865 }
92866
92867-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
92868+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
92869+ unsigned int log)
92870 {
92871 const struct cred *cred = current_cred(), *tcred;
92872
92873@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
92874 cred->gid != tcred->egid ||
92875 cred->gid != tcred->sgid ||
92876 cred->gid != tcred->gid) &&
92877- !capable(CAP_SYS_PTRACE)) {
92878+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
92879+ (log && !capable(CAP_SYS_PTRACE)))
92880+ ) {
92881 rcu_read_unlock();
92882 return -EPERM;
92883 }
92884@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
92885 smp_rmb();
92886 if (task->mm)
92887 dumpable = get_dumpable(task->mm);
92888- if (!dumpable && !capable(CAP_SYS_PTRACE))
92889+ if (!dumpable &&
92890+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
92891+ (log && !capable(CAP_SYS_PTRACE))))
92892 return -EPERM;
92893
92894 return security_ptrace_access_check(task, mode);
92895@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
92896 {
92897 int err;
92898 task_lock(task);
92899- err = __ptrace_may_access(task, mode);
92900+ err = __ptrace_may_access(task, mode, 0);
92901+ task_unlock(task);
92902+ return !err;
92903+}
92904+
92905+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
92906+{
92907+ int err;
92908+ task_lock(task);
92909+ err = __ptrace_may_access(task, mode, 1);
92910 task_unlock(task);
92911 return !err;
92912 }
92913@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
92914 goto out;
92915
92916 task_lock(task);
92917- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
92918+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
92919 task_unlock(task);
92920 if (retval)
92921 goto unlock_creds;
92922@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
92923 goto unlock_tasklist;
92924
92925 task->ptrace = PT_PTRACED;
92926- if (capable(CAP_SYS_PTRACE))
92927+ if (capable_nolog(CAP_SYS_PTRACE))
92928 task->ptrace |= PT_PTRACE_CAP;
92929
92930 __ptrace_link(task, current);
92931@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
92932 {
92933 int copied = 0;
92934
92935+ pax_track_stack();
92936+
92937 while (len > 0) {
92938 char buf[128];
92939 int this_len, retval;
92940@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
92941 {
92942 int copied = 0;
92943
92944+ pax_track_stack();
92945+
92946 while (len > 0) {
92947 char buf[128];
92948 int this_len, retval;
92949@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
92950 int ret = -EIO;
92951 siginfo_t siginfo;
92952
92953+ pax_track_stack();
92954+
92955 switch (request) {
92956 case PTRACE_PEEKTEXT:
92957 case PTRACE_PEEKDATA:
92958@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
92959 ret = ptrace_setoptions(child, data);
92960 break;
92961 case PTRACE_GETEVENTMSG:
92962- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
92963+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
92964 break;
92965
92966 case PTRACE_GETSIGINFO:
92967 ret = ptrace_getsiginfo(child, &siginfo);
92968 if (!ret)
92969- ret = copy_siginfo_to_user((siginfo_t __user *) data,
92970+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
92971 &siginfo);
92972 break;
92973
92974 case PTRACE_SETSIGINFO:
92975- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
92976+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
92977 sizeof siginfo))
92978 ret = -EFAULT;
92979 else
92980@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
92981 goto out;
92982 }
92983
92984+ if (gr_handle_ptrace(child, request)) {
92985+ ret = -EPERM;
92986+ goto out_put_task_struct;
92987+ }
92988+
92989 if (request == PTRACE_ATTACH) {
92990 ret = ptrace_attach(child);
92991 /*
92992 * Some architectures need to do book-keeping after
92993 * a ptrace attach.
92994 */
92995- if (!ret)
92996+ if (!ret) {
92997 arch_ptrace_attach(child);
92998+ gr_audit_ptrace(child);
92999+ }
93000 goto out_put_task_struct;
93001 }
93002
93003@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
93004 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
93005 if (copied != sizeof(tmp))
93006 return -EIO;
93007- return put_user(tmp, (unsigned long __user *)data);
93008+ return put_user(tmp, (__force unsigned long __user *)data);
93009 }
93010
93011 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
93012@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
93013 siginfo_t siginfo;
93014 int ret;
93015
93016+ pax_track_stack();
93017+
93018 switch (request) {
93019 case PTRACE_PEEKTEXT:
93020 case PTRACE_PEEKDATA:
93021@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
93022 goto out;
93023 }
93024
93025+ if (gr_handle_ptrace(child, request)) {
93026+ ret = -EPERM;
93027+ goto out_put_task_struct;
93028+ }
93029+
93030 if (request == PTRACE_ATTACH) {
93031 ret = ptrace_attach(child);
93032 /*
93033 * Some architectures need to do book-keeping after
93034 * a ptrace attach.
93035 */
93036- if (!ret)
93037+ if (!ret) {
93038 arch_ptrace_attach(child);
93039+ gr_audit_ptrace(child);
93040+ }
93041 goto out_put_task_struct;
93042 }
93043
93044diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
93045index 697c0a0..2402696 100644
93046--- a/kernel/rcutorture.c
93047+++ b/kernel/rcutorture.c
93048@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
93049 { 0 };
93050 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
93051 { 0 };
93052-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93053-static atomic_t n_rcu_torture_alloc;
93054-static atomic_t n_rcu_torture_alloc_fail;
93055-static atomic_t n_rcu_torture_free;
93056-static atomic_t n_rcu_torture_mberror;
93057-static atomic_t n_rcu_torture_error;
93058+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
93059+static atomic_unchecked_t n_rcu_torture_alloc;
93060+static atomic_unchecked_t n_rcu_torture_alloc_fail;
93061+static atomic_unchecked_t n_rcu_torture_free;
93062+static atomic_unchecked_t n_rcu_torture_mberror;
93063+static atomic_unchecked_t n_rcu_torture_error;
93064 static long n_rcu_torture_timers;
93065 static struct list_head rcu_torture_removed;
93066 static cpumask_var_t shuffle_tmp_mask;
93067@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
93068
93069 spin_lock_bh(&rcu_torture_lock);
93070 if (list_empty(&rcu_torture_freelist)) {
93071- atomic_inc(&n_rcu_torture_alloc_fail);
93072+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
93073 spin_unlock_bh(&rcu_torture_lock);
93074 return NULL;
93075 }
93076- atomic_inc(&n_rcu_torture_alloc);
93077+ atomic_inc_unchecked(&n_rcu_torture_alloc);
93078 p = rcu_torture_freelist.next;
93079 list_del_init(p);
93080 spin_unlock_bh(&rcu_torture_lock);
93081@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
93082 static void
93083 rcu_torture_free(struct rcu_torture *p)
93084 {
93085- atomic_inc(&n_rcu_torture_free);
93086+ atomic_inc_unchecked(&n_rcu_torture_free);
93087 spin_lock_bh(&rcu_torture_lock);
93088 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
93089 spin_unlock_bh(&rcu_torture_lock);
93090@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
93091 i = rp->rtort_pipe_count;
93092 if (i > RCU_TORTURE_PIPE_LEN)
93093 i = RCU_TORTURE_PIPE_LEN;
93094- atomic_inc(&rcu_torture_wcount[i]);
93095+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93096 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93097 rp->rtort_mbtest = 0;
93098 rcu_torture_free(rp);
93099@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
93100 i = rp->rtort_pipe_count;
93101 if (i > RCU_TORTURE_PIPE_LEN)
93102 i = RCU_TORTURE_PIPE_LEN;
93103- atomic_inc(&rcu_torture_wcount[i]);
93104+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93105 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
93106 rp->rtort_mbtest = 0;
93107 list_del(&rp->rtort_free);
93108@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
93109 i = old_rp->rtort_pipe_count;
93110 if (i > RCU_TORTURE_PIPE_LEN)
93111 i = RCU_TORTURE_PIPE_LEN;
93112- atomic_inc(&rcu_torture_wcount[i]);
93113+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
93114 old_rp->rtort_pipe_count++;
93115 cur_ops->deferred_free(old_rp);
93116 }
93117@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
93118 return;
93119 }
93120 if (p->rtort_mbtest == 0)
93121- atomic_inc(&n_rcu_torture_mberror);
93122+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93123 spin_lock(&rand_lock);
93124 cur_ops->read_delay(&rand);
93125 n_rcu_torture_timers++;
93126@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
93127 continue;
93128 }
93129 if (p->rtort_mbtest == 0)
93130- atomic_inc(&n_rcu_torture_mberror);
93131+ atomic_inc_unchecked(&n_rcu_torture_mberror);
93132 cur_ops->read_delay(&rand);
93133 preempt_disable();
93134 pipe_count = p->rtort_pipe_count;
93135@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
93136 rcu_torture_current,
93137 rcu_torture_current_version,
93138 list_empty(&rcu_torture_freelist),
93139- atomic_read(&n_rcu_torture_alloc),
93140- atomic_read(&n_rcu_torture_alloc_fail),
93141- atomic_read(&n_rcu_torture_free),
93142- atomic_read(&n_rcu_torture_mberror),
93143+ atomic_read_unchecked(&n_rcu_torture_alloc),
93144+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
93145+ atomic_read_unchecked(&n_rcu_torture_free),
93146+ atomic_read_unchecked(&n_rcu_torture_mberror),
93147 n_rcu_torture_timers);
93148- if (atomic_read(&n_rcu_torture_mberror) != 0)
93149+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
93150 cnt += sprintf(&page[cnt], " !!!");
93151 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
93152 if (i > 1) {
93153 cnt += sprintf(&page[cnt], "!!! ");
93154- atomic_inc(&n_rcu_torture_error);
93155+ atomic_inc_unchecked(&n_rcu_torture_error);
93156 WARN_ON_ONCE(1);
93157 }
93158 cnt += sprintf(&page[cnt], "Reader Pipe: ");
93159@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
93160 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
93161 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93162 cnt += sprintf(&page[cnt], " %d",
93163- atomic_read(&rcu_torture_wcount[i]));
93164+ atomic_read_unchecked(&rcu_torture_wcount[i]));
93165 }
93166 cnt += sprintf(&page[cnt], "\n");
93167 if (cur_ops->stats)
93168@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
93169
93170 if (cur_ops->cleanup)
93171 cur_ops->cleanup();
93172- if (atomic_read(&n_rcu_torture_error))
93173+ if (atomic_read_unchecked(&n_rcu_torture_error))
93174 rcu_torture_print_module_parms("End of test: FAILURE");
93175 else
93176 rcu_torture_print_module_parms("End of test: SUCCESS");
93177@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
93178
93179 rcu_torture_current = NULL;
93180 rcu_torture_current_version = 0;
93181- atomic_set(&n_rcu_torture_alloc, 0);
93182- atomic_set(&n_rcu_torture_alloc_fail, 0);
93183- atomic_set(&n_rcu_torture_free, 0);
93184- atomic_set(&n_rcu_torture_mberror, 0);
93185- atomic_set(&n_rcu_torture_error, 0);
93186+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
93187+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
93188+ atomic_set_unchecked(&n_rcu_torture_free, 0);
93189+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
93190+ atomic_set_unchecked(&n_rcu_torture_error, 0);
93191 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
93192- atomic_set(&rcu_torture_wcount[i], 0);
93193+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
93194 for_each_possible_cpu(cpu) {
93195 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
93196 per_cpu(rcu_torture_count, cpu)[i] = 0;
93197diff --git a/kernel/rcutree.c b/kernel/rcutree.c
93198index 683c4f3..97f54c6 100644
93199--- a/kernel/rcutree.c
93200+++ b/kernel/rcutree.c
93201@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
93202 /*
93203 * Do softirq processing for the current CPU.
93204 */
93205-static void rcu_process_callbacks(struct softirq_action *unused)
93206+static void rcu_process_callbacks(void)
93207 {
93208 /*
93209 * Memory references from any prior RCU read-side critical sections
93210diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
93211index c03edf7..ac1b341 100644
93212--- a/kernel/rcutree_plugin.h
93213+++ b/kernel/rcutree_plugin.h
93214@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
93215 */
93216 void __rcu_read_lock(void)
93217 {
93218- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
93219+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
93220 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
93221 }
93222 EXPORT_SYMBOL_GPL(__rcu_read_lock);
93223@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
93224 struct task_struct *t = current;
93225
93226 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
93227- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
93228+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
93229 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
93230 rcu_read_unlock_special(t);
93231 }
93232diff --git a/kernel/relay.c b/kernel/relay.c
93233index bf343f5..908e9ee 100644
93234--- a/kernel/relay.c
93235+++ b/kernel/relay.c
93236@@ -1228,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in,
93237 unsigned int flags,
93238 int *nonpad_ret)
93239 {
93240- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
93241+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
93242 struct rchan_buf *rbuf = in->private_data;
93243 unsigned int subbuf_size = rbuf->chan->subbuf_size;
93244 uint64_t pos = (uint64_t) *ppos;
93245@@ -1247,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in,
93246 .ops = &relay_pipe_buf_ops,
93247 .spd_release = relay_page_release,
93248 };
93249+ ssize_t ret;
93250+
93251+ pax_track_stack();
93252
93253 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
93254 return 0;
93255diff --git a/kernel/resource.c b/kernel/resource.c
93256index fb11a58..4e61ae1 100644
93257--- a/kernel/resource.c
93258+++ b/kernel/resource.c
93259@@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
93260
93261 static int __init ioresources_init(void)
93262 {
93263+#ifdef CONFIG_GRKERNSEC_PROC_ADD
93264+#ifdef CONFIG_GRKERNSEC_PROC_USER
93265+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
93266+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
93267+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
93268+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
93269+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
93270+#endif
93271+#else
93272 proc_create("ioports", 0, NULL, &proc_ioports_operations);
93273 proc_create("iomem", 0, NULL, &proc_iomem_operations);
93274+#endif
93275 return 0;
93276 }
93277 __initcall(ioresources_init);
93278diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
93279index a56f629..1fc4989 100644
93280--- a/kernel/rtmutex-tester.c
93281+++ b/kernel/rtmutex-tester.c
93282@@ -21,7 +21,7 @@
93283 #define MAX_RT_TEST_MUTEXES 8
93284
93285 static spinlock_t rttest_lock;
93286-static atomic_t rttest_event;
93287+static atomic_unchecked_t rttest_event;
93288
93289 struct test_thread_data {
93290 int opcode;
93291@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93292
93293 case RTTEST_LOCKCONT:
93294 td->mutexes[td->opdata] = 1;
93295- td->event = atomic_add_return(1, &rttest_event);
93296+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93297 return 0;
93298
93299 case RTTEST_RESET:
93300@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93301 return 0;
93302
93303 case RTTEST_RESETEVENT:
93304- atomic_set(&rttest_event, 0);
93305+ atomic_set_unchecked(&rttest_event, 0);
93306 return 0;
93307
93308 default:
93309@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93310 return ret;
93311
93312 td->mutexes[id] = 1;
93313- td->event = atomic_add_return(1, &rttest_event);
93314+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93315 rt_mutex_lock(&mutexes[id]);
93316- td->event = atomic_add_return(1, &rttest_event);
93317+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93318 td->mutexes[id] = 4;
93319 return 0;
93320
93321@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93322 return ret;
93323
93324 td->mutexes[id] = 1;
93325- td->event = atomic_add_return(1, &rttest_event);
93326+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93327 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
93328- td->event = atomic_add_return(1, &rttest_event);
93329+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93330 td->mutexes[id] = ret ? 0 : 4;
93331 return ret ? -EINTR : 0;
93332
93333@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
93334 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
93335 return ret;
93336
93337- td->event = atomic_add_return(1, &rttest_event);
93338+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93339 rt_mutex_unlock(&mutexes[id]);
93340- td->event = atomic_add_return(1, &rttest_event);
93341+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93342 td->mutexes[id] = 0;
93343 return 0;
93344
93345@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
93346 break;
93347
93348 td->mutexes[dat] = 2;
93349- td->event = atomic_add_return(1, &rttest_event);
93350+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93351 break;
93352
93353 case RTTEST_LOCKBKL:
93354@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
93355 return;
93356
93357 td->mutexes[dat] = 3;
93358- td->event = atomic_add_return(1, &rttest_event);
93359+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93360 break;
93361
93362 case RTTEST_LOCKNOWAIT:
93363@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
93364 return;
93365
93366 td->mutexes[dat] = 1;
93367- td->event = atomic_add_return(1, &rttest_event);
93368+ td->event = atomic_add_return_unchecked(1, &rttest_event);
93369 return;
93370
93371 case RTTEST_LOCKBKL:
93372diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
93373index 29bd4ba..8c5de90 100644
93374--- a/kernel/rtmutex.c
93375+++ b/kernel/rtmutex.c
93376@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
93377 */
93378 spin_lock_irqsave(&pendowner->pi_lock, flags);
93379
93380- WARN_ON(!pendowner->pi_blocked_on);
93381+ BUG_ON(!pendowner->pi_blocked_on);
93382 WARN_ON(pendowner->pi_blocked_on != waiter);
93383 WARN_ON(pendowner->pi_blocked_on->lock != lock);
93384
93385diff --git a/kernel/sched.c b/kernel/sched.c
93386index 0591df8..e3af3a4 100644
93387--- a/kernel/sched.c
93388+++ b/kernel/sched.c
93389@@ -5043,7 +5043,7 @@ out:
93390 * In CONFIG_NO_HZ case, the idle load balance owner will do the
93391 * rebalancing for all the cpus for whom scheduler ticks are stopped.
93392 */
93393-static void run_rebalance_domains(struct softirq_action *h)
93394+static void run_rebalance_domains(void)
93395 {
93396 int this_cpu = smp_processor_id();
93397 struct rq *this_rq = cpu_rq(this_cpu);
93398@@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
93399 }
93400 }
93401
93402+#ifdef CONFIG_GRKERNSEC_SETXID
93403+extern void gr_delayed_cred_worker(void);
93404+static inline void gr_cred_schedule(void)
93405+{
93406+ if (unlikely(current->delayed_cred))
93407+ gr_delayed_cred_worker();
93408+}
93409+#else
93410+static inline void gr_cred_schedule(void)
93411+{
93412+}
93413+#endif
93414+
93415 /*
93416 * schedule() is the main scheduler function.
93417 */
93418@@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
93419 struct rq *rq;
93420 int cpu;
93421
93422+ pax_track_stack();
93423+
93424 need_resched:
93425 preempt_disable();
93426 cpu = smp_processor_id();
93427@@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
93428
93429 schedule_debug(prev);
93430
93431+ gr_cred_schedule();
93432+
93433 if (sched_feat(HRTICK))
93434 hrtick_clear(rq);
93435
93436@@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
93437 * Look out! "owner" is an entirely speculative pointer
93438 * access and not reliable.
93439 */
93440-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
93441+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
93442 {
93443 unsigned int cpu;
93444 struct rq *rq;
93445@@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
93446 * DEBUG_PAGEALLOC could have unmapped it if
93447 * the mutex owner just released it and exited.
93448 */
93449- if (probe_kernel_address(&owner->cpu, cpu))
93450+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
93451 return 0;
93452 #else
93453- cpu = owner->cpu;
93454+ cpu = task_thread_info(owner)->cpu;
93455 #endif
93456
93457 /*
93458@@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
93459 /*
93460 * Is that owner really running on that cpu?
93461 */
93462- if (task_thread_info(rq->curr) != owner || need_resched())
93463+ if (rq->curr != owner || need_resched())
93464 return 0;
93465
93466 cpu_relax();
93467@@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
93468 /* convert nice value [19,-20] to rlimit style value [1,40] */
93469 int nice_rlim = 20 - nice;
93470
93471+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
93472+
93473 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
93474 capable(CAP_SYS_NICE));
93475 }
93476@@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
93477 if (nice > 19)
93478 nice = 19;
93479
93480- if (increment < 0 && !can_nice(current, nice))
93481+ if (increment < 0 && (!can_nice(current, nice) ||
93482+ gr_handle_chroot_nice()))
93483 return -EPERM;
93484
93485 retval = security_task_setnice(current, nice);
93486@@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
93487 long power;
93488 int weight;
93489
93490- WARN_ON(!sd || !sd->groups);
93491+ BUG_ON(!sd || !sd->groups);
93492
93493 if (cpu != group_first_cpu(sd->groups))
93494 return;
93495diff --git a/kernel/signal.c b/kernel/signal.c
93496index 2494827..cda80a0 100644
93497--- a/kernel/signal.c
93498+++ b/kernel/signal.c
93499@@ -41,12 +41,12 @@
93500
93501 static struct kmem_cache *sigqueue_cachep;
93502
93503-static void __user *sig_handler(struct task_struct *t, int sig)
93504+static __sighandler_t sig_handler(struct task_struct *t, int sig)
93505 {
93506 return t->sighand->action[sig - 1].sa.sa_handler;
93507 }
93508
93509-static int sig_handler_ignored(void __user *handler, int sig)
93510+static int sig_handler_ignored(__sighandler_t handler, int sig)
93511 {
93512 /* Is it explicitly or implicitly ignored? */
93513 return handler == SIG_IGN ||
93514@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
93515 static int sig_task_ignored(struct task_struct *t, int sig,
93516 int from_ancestor_ns)
93517 {
93518- void __user *handler;
93519+ __sighandler_t handler;
93520
93521 handler = sig_handler(t, sig);
93522
93523@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
93524 */
93525 user = get_uid(__task_cred(t)->user);
93526 atomic_inc(&user->sigpending);
93527+
93528+ if (!override_rlimit)
93529+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
93530 if (override_rlimit ||
93531 atomic_read(&user->sigpending) <=
93532 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
93533@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
93534
93535 int unhandled_signal(struct task_struct *tsk, int sig)
93536 {
93537- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
93538+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
93539 if (is_global_init(tsk))
93540 return 1;
93541 if (handler != SIG_IGN && handler != SIG_DFL)
93542@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
93543 }
93544 }
93545
93546+ /* allow glibc communication via tgkill to other threads in our
93547+ thread group */
93548+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
93549+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
93550+ && gr_handle_signal(t, sig))
93551+ return -EPERM;
93552+
93553 return security_task_kill(t, info, sig, 0);
93554 }
93555
93556@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
93557 return send_signal(sig, info, p, 1);
93558 }
93559
93560-static int
93561+int
93562 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93563 {
93564 return send_signal(sig, info, t, 0);
93565@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93566 unsigned long int flags;
93567 int ret, blocked, ignored;
93568 struct k_sigaction *action;
93569+ int is_unhandled = 0;
93570
93571 spin_lock_irqsave(&t->sighand->siglock, flags);
93572 action = &t->sighand->action[sig-1];
93573@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
93574 }
93575 if (action->sa.sa_handler == SIG_DFL)
93576 t->signal->flags &= ~SIGNAL_UNKILLABLE;
93577+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
93578+ is_unhandled = 1;
93579 ret = specific_send_sig_info(sig, info, t);
93580 spin_unlock_irqrestore(&t->sighand->siglock, flags);
93581
93582+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
93583+ normal operation */
93584+ if (is_unhandled) {
93585+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
93586+ gr_handle_crash(t, sig);
93587+ }
93588+
93589 return ret;
93590 }
93591
93592@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
93593 {
93594 int ret = check_kill_permission(sig, info, p);
93595
93596- if (!ret && sig)
93597+ if (!ret && sig) {
93598 ret = do_send_sig_info(sig, info, p, true);
93599+ if (!ret)
93600+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
93601+ }
93602
93603 return ret;
93604 }
93605@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
93606 {
93607 siginfo_t info;
93608
93609+ pax_track_stack();
93610+
93611 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
93612
93613 memset(&info, 0, sizeof info);
93614@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
93615 int error = -ESRCH;
93616
93617 rcu_read_lock();
93618- p = find_task_by_vpid(pid);
93619+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
93620+ /* allow glibc communication via tgkill to other threads in our
93621+ thread group */
93622+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
93623+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
93624+ p = find_task_by_vpid_unrestricted(pid);
93625+ else
93626+#endif
93627+ p = find_task_by_vpid(pid);
93628 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
93629 error = check_kill_permission(sig, info, p);
93630 /*
93631diff --git a/kernel/smp.c b/kernel/smp.c
93632index aa9cff3..631a0de 100644
93633--- a/kernel/smp.c
93634+++ b/kernel/smp.c
93635@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
93636 }
93637 EXPORT_SYMBOL(smp_call_function);
93638
93639-void ipi_call_lock(void)
93640+void ipi_call_lock(void) __acquires(call_function.lock)
93641 {
93642 spin_lock(&call_function.lock);
93643 }
93644
93645-void ipi_call_unlock(void)
93646+void ipi_call_unlock(void) __releases(call_function.lock)
93647 {
93648 spin_unlock(&call_function.lock);
93649 }
93650
93651-void ipi_call_lock_irq(void)
93652+void ipi_call_lock_irq(void) __acquires(call_function.lock)
93653 {
93654 spin_lock_irq(&call_function.lock);
93655 }
93656
93657-void ipi_call_unlock_irq(void)
93658+void ipi_call_unlock_irq(void) __releases(call_function.lock)
93659 {
93660 spin_unlock_irq(&call_function.lock);
93661 }
93662diff --git a/kernel/softirq.c b/kernel/softirq.c
93663index 04a0252..580c512 100644
93664--- a/kernel/softirq.c
93665+++ b/kernel/softirq.c
93666@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
93667
93668 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
93669
93670-char *softirq_to_name[NR_SOFTIRQS] = {
93671+const char * const softirq_to_name[NR_SOFTIRQS] = {
93672 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
93673 "TASKLET", "SCHED", "HRTIMER", "RCU"
93674 };
93675@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
93676
93677 asmlinkage void __do_softirq(void)
93678 {
93679- struct softirq_action *h;
93680+ const struct softirq_action *h;
93681 __u32 pending;
93682 int max_restart = MAX_SOFTIRQ_RESTART;
93683 int cpu;
93684@@ -233,7 +233,7 @@ restart:
93685 kstat_incr_softirqs_this_cpu(h - softirq_vec);
93686
93687 trace_softirq_entry(h, softirq_vec);
93688- h->action(h);
93689+ h->action();
93690 trace_softirq_exit(h, softirq_vec);
93691 if (unlikely(prev_count != preempt_count())) {
93692 printk(KERN_ERR "huh, entered softirq %td %s %p"
93693@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
93694 local_irq_restore(flags);
93695 }
93696
93697-void open_softirq(int nr, void (*action)(struct softirq_action *))
93698+void open_softirq(int nr, void (*action)(void))
93699 {
93700- softirq_vec[nr].action = action;
93701+ pax_open_kernel();
93702+ *(void **)&softirq_vec[nr].action = action;
93703+ pax_close_kernel();
93704 }
93705
93706 /*
93707@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
93708
93709 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
93710
93711-static void tasklet_action(struct softirq_action *a)
93712+static void tasklet_action(void)
93713 {
93714 struct tasklet_struct *list;
93715
93716@@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
93717 }
93718 }
93719
93720-static void tasklet_hi_action(struct softirq_action *a)
93721+static void tasklet_hi_action(void)
93722 {
93723 struct tasklet_struct *list;
93724
93725diff --git a/kernel/sys.c b/kernel/sys.c
93726index e9512b1..f07185f 100644
93727--- a/kernel/sys.c
93728+++ b/kernel/sys.c
93729@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
93730 error = -EACCES;
93731 goto out;
93732 }
93733+
93734+ if (gr_handle_chroot_setpriority(p, niceval)) {
93735+ error = -EACCES;
93736+ goto out;
93737+ }
93738+
93739 no_nice = security_task_setnice(p, niceval);
93740 if (no_nice) {
93741 error = no_nice;
93742@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
93743 !(user = find_user(who)))
93744 goto out_unlock; /* No processes for this user */
93745
93746- do_each_thread(g, p)
93747+ do_each_thread(g, p) {
93748 if (__task_cred(p)->uid == who)
93749 error = set_one_prio(p, niceval, error);
93750- while_each_thread(g, p);
93751+ } while_each_thread(g, p);
93752 if (who != cred->uid)
93753 free_uid(user); /* For find_user() */
93754 break;
93755@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
93756 !(user = find_user(who)))
93757 goto out_unlock; /* No processes for this user */
93758
93759- do_each_thread(g, p)
93760+ do_each_thread(g, p) {
93761 if (__task_cred(p)->uid == who) {
93762 niceval = 20 - task_nice(p);
93763 if (niceval > retval)
93764 retval = niceval;
93765 }
93766- while_each_thread(g, p);
93767+ } while_each_thread(g, p);
93768 if (who != cred->uid)
93769 free_uid(user); /* for find_user() */
93770 break;
93771@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
93772 goto error;
93773 }
93774
93775+ if (gr_check_group_change(new->gid, new->egid, -1))
93776+ goto error;
93777+
93778 if (rgid != (gid_t) -1 ||
93779 (egid != (gid_t) -1 && egid != old->gid))
93780 new->sgid = new->egid;
93781@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
93782 goto error;
93783
93784 retval = -EPERM;
93785+
93786+ if (gr_check_group_change(gid, gid, gid))
93787+ goto error;
93788+
93789 if (capable(CAP_SETGID))
93790 new->gid = new->egid = new->sgid = new->fsgid = gid;
93791 else if (gid == old->gid || gid == old->sgid)
93792@@ -559,7 +572,7 @@ error:
93793 /*
93794 * change the user struct in a credentials set to match the new UID
93795 */
93796-static int set_user(struct cred *new)
93797+int set_user(struct cred *new)
93798 {
93799 struct user_struct *new_user;
93800
93801@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
93802 if (!new_user)
93803 return -EAGAIN;
93804
93805+ /*
93806+ * We don't fail in case of NPROC limit excess here because too many
93807+ * poorly written programs don't check set*uid() return code, assuming
93808+ * it never fails if called by root. We may still enforce NPROC limit
93809+ * for programs doing set*uid()+execve() by harmlessly deferring the
93810+ * failure to the execve() stage.
93811+ */
93812 if (atomic_read(&new_user->processes) >=
93813 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
93814- new_user != INIT_USER) {
93815- free_uid(new_user);
93816- return -EAGAIN;
93817- }
93818+ new_user != INIT_USER)
93819+ current->flags |= PF_NPROC_EXCEEDED;
93820+ else
93821+ current->flags &= ~PF_NPROC_EXCEEDED;
93822
93823 free_uid(new->user);
93824 new->user = new_user;
93825@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
93826 goto error;
93827 }
93828
93829+ if (gr_check_user_change(new->uid, new->euid, -1))
93830+ goto error;
93831+
93832 if (new->uid != old->uid) {
93833 retval = set_user(new);
93834 if (retval < 0)
93835@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
93836 goto error;
93837
93838 retval = -EPERM;
93839+
93840+ if (gr_check_crash_uid(uid))
93841+ goto error;
93842+ if (gr_check_user_change(uid, uid, uid))
93843+ goto error;
93844+
93845 if (capable(CAP_SETUID)) {
93846 new->suid = new->uid = uid;
93847 if (uid != old->uid) {
93848@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
93849 goto error;
93850 }
93851
93852+ if (gr_check_user_change(ruid, euid, -1))
93853+ goto error;
93854+
93855 if (ruid != (uid_t) -1) {
93856 new->uid = ruid;
93857 if (ruid != old->uid) {
93858@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
93859 goto error;
93860 }
93861
93862+ if (gr_check_group_change(rgid, egid, -1))
93863+ goto error;
93864+
93865 if (rgid != (gid_t) -1)
93866 new->gid = rgid;
93867 if (egid != (gid_t) -1)
93868@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
93869 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
93870 goto error;
93871
93872+ if (gr_check_user_change(-1, -1, uid))
93873+ goto error;
93874+
93875 if (uid == old->uid || uid == old->euid ||
93876 uid == old->suid || uid == old->fsuid ||
93877 capable(CAP_SETUID)) {
93878@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
93879 if (gid == old->gid || gid == old->egid ||
93880 gid == old->sgid || gid == old->fsgid ||
93881 capable(CAP_SETGID)) {
93882+ if (gr_check_group_change(-1, -1, gid))
93883+ goto error;
93884+
93885 if (gid != old_fsgid) {
93886 new->fsgid = gid;
93887 goto change_okay;
93888@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
93889 error = get_dumpable(me->mm);
93890 break;
93891 case PR_SET_DUMPABLE:
93892- if (arg2 < 0 || arg2 > 1) {
93893+ if (arg2 > 1) {
93894 error = -EINVAL;
93895 break;
93896 }
93897diff --git a/kernel/sysctl.c b/kernel/sysctl.c
93898index b8bd058..ab6a76be 100644
93899--- a/kernel/sysctl.c
93900+++ b/kernel/sysctl.c
93901@@ -63,6 +63,13 @@
93902 static int deprecated_sysctl_warning(struct __sysctl_args *args);
93903
93904 #if defined(CONFIG_SYSCTL)
93905+#include <linux/grsecurity.h>
93906+#include <linux/grinternal.h>
93907+
93908+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
93909+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
93910+ const int op);
93911+extern int gr_handle_chroot_sysctl(const int op);
93912
93913 /* External variables not in a header file. */
93914 extern int C_A_D;
93915@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
93916 static int proc_taint(struct ctl_table *table, int write,
93917 void __user *buffer, size_t *lenp, loff_t *ppos);
93918 #endif
93919+extern ctl_table grsecurity_table[];
93920
93921 static struct ctl_table root_table[];
93922 static struct ctl_table_root sysctl_table_root;
93923@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
93924 int sysctl_legacy_va_layout;
93925 #endif
93926
93927+#ifdef CONFIG_PAX_SOFTMODE
93928+static ctl_table pax_table[] = {
93929+ {
93930+ .ctl_name = CTL_UNNUMBERED,
93931+ .procname = "softmode",
93932+ .data = &pax_softmode,
93933+ .maxlen = sizeof(unsigned int),
93934+ .mode = 0600,
93935+ .proc_handler = &proc_dointvec,
93936+ },
93937+
93938+ { .ctl_name = 0 }
93939+};
93940+#endif
93941+
93942 extern int prove_locking;
93943 extern int lock_stat;
93944
93945@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
93946 #endif
93947
93948 static struct ctl_table kern_table[] = {
93949+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
93950+ {
93951+ .ctl_name = CTL_UNNUMBERED,
93952+ .procname = "grsecurity",
93953+ .mode = 0500,
93954+ .child = grsecurity_table,
93955+ },
93956+#endif
93957+
93958+#ifdef CONFIG_PAX_SOFTMODE
93959+ {
93960+ .ctl_name = CTL_UNNUMBERED,
93961+ .procname = "pax",
93962+ .mode = 0500,
93963+ .child = pax_table,
93964+ },
93965+#endif
93966+
93967 {
93968 .ctl_name = CTL_UNNUMBERED,
93969 .procname = "sched_child_runs_first",
93970@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
93971 .data = &modprobe_path,
93972 .maxlen = KMOD_PATH_LEN,
93973 .mode = 0644,
93974- .proc_handler = &proc_dostring,
93975- .strategy = &sysctl_string,
93976+ .proc_handler = &proc_dostring_modpriv,
93977+ .strategy = &sysctl_string_modpriv,
93978 },
93979 {
93980 .ctl_name = CTL_UNNUMBERED,
93981@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
93982 .mode = 0644,
93983 .proc_handler = &proc_dointvec
93984 },
93985+ {
93986+ .procname = "heap_stack_gap",
93987+ .data = &sysctl_heap_stack_gap,
93988+ .maxlen = sizeof(sysctl_heap_stack_gap),
93989+ .mode = 0644,
93990+ .proc_handler = proc_doulongvec_minmax,
93991+ },
93992 #else
93993 {
93994 .ctl_name = CTL_UNNUMBERED,
93995@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
93996 return 0;
93997 }
93998
93999+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
94000+
94001 static int parse_table(int __user *name, int nlen,
94002 void __user *oldval, size_t __user *oldlenp,
94003 void __user *newval, size_t newlen,
94004@@ -1821,7 +1871,7 @@ repeat:
94005 if (n == table->ctl_name) {
94006 int error;
94007 if (table->child) {
94008- if (sysctl_perm(root, table, MAY_EXEC))
94009+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
94010 return -EPERM;
94011 name++;
94012 nlen--;
94013@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
94014 int error;
94015 int mode;
94016
94017+ if (table->parent != NULL && table->parent->procname != NULL &&
94018+ table->procname != NULL &&
94019+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
94020+ return -EACCES;
94021+ if (gr_handle_chroot_sysctl(op))
94022+ return -EACCES;
94023+ error = gr_handle_sysctl(table, op);
94024+ if (error)
94025+ return error;
94026+
94027+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
94028+ if (error)
94029+ return error;
94030+
94031+ if (root->permissions)
94032+ mode = root->permissions(root, current->nsproxy, table);
94033+ else
94034+ mode = table->mode;
94035+
94036+ return test_perm(mode, op);
94037+}
94038+
94039+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
94040+{
94041+ int error;
94042+ int mode;
94043+
94044 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
94045 if (error)
94046 return error;
94047@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
94048 buffer, lenp, ppos);
94049 }
94050
94051+int proc_dostring_modpriv(struct ctl_table *table, int write,
94052+ void __user *buffer, size_t *lenp, loff_t *ppos)
94053+{
94054+ if (write && !capable(CAP_SYS_MODULE))
94055+ return -EPERM;
94056+
94057+ return _proc_do_string(table->data, table->maxlen, write,
94058+ buffer, lenp, ppos);
94059+}
94060+
94061
94062 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
94063 int *valp,
94064@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
94065 vleft = table->maxlen / sizeof(unsigned long);
94066 left = *lenp;
94067
94068- for (; left && vleft--; i++, min++, max++, first=0) {
94069+ for (; left && vleft--; i++, first=0) {
94070 if (write) {
94071 while (left) {
94072 char c;
94073@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
94074 return -ENOSYS;
94075 }
94076
94077+int proc_dostring_modpriv(struct ctl_table *table, int write,
94078+ void __user *buffer, size_t *lenp, loff_t *ppos)
94079+{
94080+ return -ENOSYS;
94081+}
94082+
94083 int proc_dointvec(struct ctl_table *table, int write,
94084 void __user *buffer, size_t *lenp, loff_t *ppos)
94085 {
94086@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
94087 return 1;
94088 }
94089
94090+int sysctl_string_modpriv(struct ctl_table *table,
94091+ void __user *oldval, size_t __user *oldlenp,
94092+ void __user *newval, size_t newlen)
94093+{
94094+ if (newval && newlen && !capable(CAP_SYS_MODULE))
94095+ return -EPERM;
94096+
94097+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
94098+}
94099+
94100 /*
94101 * This function makes sure that all of the integers in the vector
94102 * are between the minimum and maximum values given in the arrays
94103@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
94104 return -ENOSYS;
94105 }
94106
94107+int sysctl_string_modpriv(struct ctl_table *table,
94108+ void __user *oldval, size_t __user *oldlenp,
94109+ void __user *newval, size_t newlen)
94110+{
94111+ return -ENOSYS;
94112+}
94113+
94114 int sysctl_intvec(struct ctl_table *table,
94115 void __user *oldval, size_t __user *oldlenp,
94116 void __user *newval, size_t newlen)
94117@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
94118 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
94119 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
94120 EXPORT_SYMBOL(proc_dostring);
94121+EXPORT_SYMBOL(proc_dostring_modpriv);
94122 EXPORT_SYMBOL(proc_doulongvec_minmax);
94123 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
94124 EXPORT_SYMBOL(register_sysctl_table);
94125@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
94126 EXPORT_SYMBOL(sysctl_jiffies);
94127 EXPORT_SYMBOL(sysctl_ms_jiffies);
94128 EXPORT_SYMBOL(sysctl_string);
94129+EXPORT_SYMBOL(sysctl_string_modpriv);
94130 EXPORT_SYMBOL(sysctl_data);
94131 EXPORT_SYMBOL(unregister_sysctl_table);
94132diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
94133index 469193c..ea3ecb2 100644
94134--- a/kernel/sysctl_check.c
94135+++ b/kernel/sysctl_check.c
94136@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
94137 } else {
94138 if ((table->strategy == sysctl_data) ||
94139 (table->strategy == sysctl_string) ||
94140+ (table->strategy == sysctl_string_modpriv) ||
94141 (table->strategy == sysctl_intvec) ||
94142 (table->strategy == sysctl_jiffies) ||
94143 (table->strategy == sysctl_ms_jiffies) ||
94144 (table->proc_handler == proc_dostring) ||
94145+ (table->proc_handler == proc_dostring_modpriv) ||
94146 (table->proc_handler == proc_dointvec) ||
94147 (table->proc_handler == proc_dointvec_minmax) ||
94148 (table->proc_handler == proc_dointvec_jiffies) ||
94149diff --git a/kernel/taskstats.c b/kernel/taskstats.c
94150index a4ef542..798bcd7 100644
94151--- a/kernel/taskstats.c
94152+++ b/kernel/taskstats.c
94153@@ -26,9 +26,12 @@
94154 #include <linux/cgroup.h>
94155 #include <linux/fs.h>
94156 #include <linux/file.h>
94157+#include <linux/grsecurity.h>
94158 #include <net/genetlink.h>
94159 #include <asm/atomic.h>
94160
94161+extern int gr_is_taskstats_denied(int pid);
94162+
94163 /*
94164 * Maximum length of a cpumask that can be specified in
94165 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
94166@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
94167 size_t size;
94168 cpumask_var_t mask;
94169
94170+ if (gr_is_taskstats_denied(current->pid))
94171+ return -EACCES;
94172+
94173 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
94174 return -ENOMEM;
94175
94176diff --git a/kernel/time.c b/kernel/time.c
94177index 33df60e..ca768bd 100644
94178--- a/kernel/time.c
94179+++ b/kernel/time.c
94180@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
94181 return error;
94182
94183 if (tz) {
94184+ /* we log in do_settimeofday called below, so don't log twice
94185+ */
94186+ if (!tv)
94187+ gr_log_timechange();
94188+
94189 /* SMP safe, global irq locking makes it work. */
94190 sys_tz = *tz;
94191 update_vsyscall_tz();
94192@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
94193 * Avoid unnecessary multiplications/divisions in the
94194 * two most common HZ cases:
94195 */
94196-unsigned int inline jiffies_to_msecs(const unsigned long j)
94197+inline unsigned int jiffies_to_msecs(const unsigned long j)
94198 {
94199 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
94200 return (MSEC_PER_SEC / HZ) * j;
94201@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
94202 }
94203 EXPORT_SYMBOL(jiffies_to_msecs);
94204
94205-unsigned int inline jiffies_to_usecs(const unsigned long j)
94206+inline unsigned int jiffies_to_usecs(const unsigned long j)
94207 {
94208 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
94209 return (USEC_PER_SEC / HZ) * j;
94210diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
94211index 57b953f..06f149f 100644
94212--- a/kernel/time/tick-broadcast.c
94213+++ b/kernel/time/tick-broadcast.c
94214@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
94215 * then clear the broadcast bit.
94216 */
94217 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
94218- int cpu = smp_processor_id();
94219+ cpu = smp_processor_id();
94220
94221 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
94222 tick_broadcast_clear_oneshot(cpu);
94223diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
94224index 4a71cff..ffb5548 100644
94225--- a/kernel/time/timekeeping.c
94226+++ b/kernel/time/timekeeping.c
94227@@ -14,6 +14,7 @@
94228 #include <linux/init.h>
94229 #include <linux/mm.h>
94230 #include <linux/sched.h>
94231+#include <linux/grsecurity.h>
94232 #include <linux/sysdev.h>
94233 #include <linux/clocksource.h>
94234 #include <linux/jiffies.h>
94235@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
94236 */
94237 struct timespec ts = xtime;
94238 timespec_add_ns(&ts, nsec);
94239- ACCESS_ONCE(xtime_cache) = ts;
94240+ ACCESS_ONCE_RW(xtime_cache) = ts;
94241 }
94242
94243 /* must hold xtime_lock */
94244@@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
94245 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
94246 return -EINVAL;
94247
94248+ gr_log_timechange();
94249+
94250 write_seqlock_irqsave(&xtime_lock, flags);
94251
94252 timekeeping_forward_now();
94253diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
94254index 54c0dda..e9095d9 100644
94255--- a/kernel/time/timer_list.c
94256+++ b/kernel/time/timer_list.c
94257@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
94258
94259 static void print_name_offset(struct seq_file *m, void *sym)
94260 {
94261+#ifdef CONFIG_GRKERNSEC_HIDESYM
94262+ SEQ_printf(m, "<%p>", NULL);
94263+#else
94264 char symname[KSYM_NAME_LEN];
94265
94266 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
94267 SEQ_printf(m, "<%p>", sym);
94268 else
94269 SEQ_printf(m, "%s", symname);
94270+#endif
94271 }
94272
94273 static void
94274@@ -112,7 +116,11 @@ next_one:
94275 static void
94276 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
94277 {
94278+#ifdef CONFIG_GRKERNSEC_HIDESYM
94279+ SEQ_printf(m, " .base: %p\n", NULL);
94280+#else
94281 SEQ_printf(m, " .base: %p\n", base);
94282+#endif
94283 SEQ_printf(m, " .index: %d\n",
94284 base->index);
94285 SEQ_printf(m, " .resolution: %Lu nsecs\n",
94286@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
94287 {
94288 struct proc_dir_entry *pe;
94289
94290+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94291+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
94292+#else
94293 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
94294+#endif
94295 if (!pe)
94296 return -ENOMEM;
94297 return 0;
94298diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
94299index ee5681f..634089b 100644
94300--- a/kernel/time/timer_stats.c
94301+++ b/kernel/time/timer_stats.c
94302@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
94303 static unsigned long nr_entries;
94304 static struct entry entries[MAX_ENTRIES];
94305
94306-static atomic_t overflow_count;
94307+static atomic_unchecked_t overflow_count;
94308
94309 /*
94310 * The entries are in a hash-table, for fast lookup:
94311@@ -140,7 +140,7 @@ static void reset_entries(void)
94312 nr_entries = 0;
94313 memset(entries, 0, sizeof(entries));
94314 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
94315- atomic_set(&overflow_count, 0);
94316+ atomic_set_unchecked(&overflow_count, 0);
94317 }
94318
94319 static struct entry *alloc_entry(void)
94320@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94321 if (likely(entry))
94322 entry->count++;
94323 else
94324- atomic_inc(&overflow_count);
94325+ atomic_inc_unchecked(&overflow_count);
94326
94327 out_unlock:
94328 spin_unlock_irqrestore(lock, flags);
94329@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
94330
94331 static void print_name_offset(struct seq_file *m, unsigned long addr)
94332 {
94333+#ifdef CONFIG_GRKERNSEC_HIDESYM
94334+ seq_printf(m, "<%p>", NULL);
94335+#else
94336 char symname[KSYM_NAME_LEN];
94337
94338 if (lookup_symbol_name(addr, symname) < 0)
94339 seq_printf(m, "<%p>", (void *)addr);
94340 else
94341 seq_printf(m, "%s", symname);
94342+#endif
94343 }
94344
94345 static int tstats_show(struct seq_file *m, void *v)
94346@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
94347
94348 seq_puts(m, "Timer Stats Version: v0.2\n");
94349 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
94350- if (atomic_read(&overflow_count))
94351+ if (atomic_read_unchecked(&overflow_count))
94352 seq_printf(m, "Overflow: %d entries\n",
94353- atomic_read(&overflow_count));
94354+ atomic_read_unchecked(&overflow_count));
94355
94356 for (i = 0; i < nr_entries; i++) {
94357 entry = entries + i;
94358@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
94359 {
94360 struct proc_dir_entry *pe;
94361
94362+#ifdef CONFIG_GRKERNSEC_PROC_ADD
94363+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
94364+#else
94365 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
94366+#endif
94367 if (!pe)
94368 return -ENOMEM;
94369 return 0;
94370diff --git a/kernel/timer.c b/kernel/timer.c
94371index cb3c1f1..8bf5526 100644
94372--- a/kernel/timer.c
94373+++ b/kernel/timer.c
94374@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
94375 /*
94376 * This function runs timers and the timer-tq in bottom half context.
94377 */
94378-static void run_timer_softirq(struct softirq_action *h)
94379+static void run_timer_softirq(void)
94380 {
94381 struct tvec_base *base = __get_cpu_var(tvec_bases);
94382
94383diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
94384index d9d6206..f19467e 100644
94385--- a/kernel/trace/blktrace.c
94386+++ b/kernel/trace/blktrace.c
94387@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
94388 struct blk_trace *bt = filp->private_data;
94389 char buf[16];
94390
94391- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
94392+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
94393
94394 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
94395 }
94396@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
94397 return 1;
94398
94399 bt = buf->chan->private_data;
94400- atomic_inc(&bt->dropped);
94401+ atomic_inc_unchecked(&bt->dropped);
94402 return 0;
94403 }
94404
94405@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
94406
94407 bt->dir = dir;
94408 bt->dev = dev;
94409- atomic_set(&bt->dropped, 0);
94410+ atomic_set_unchecked(&bt->dropped, 0);
94411
94412 ret = -EIO;
94413 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
94414diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
94415index 4872937..c794d40 100644
94416--- a/kernel/trace/ftrace.c
94417+++ b/kernel/trace/ftrace.c
94418@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
94419
94420 ip = rec->ip;
94421
94422+ ret = ftrace_arch_code_modify_prepare();
94423+ FTRACE_WARN_ON(ret);
94424+ if (ret)
94425+ return 0;
94426+
94427 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
94428+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
94429 if (ret) {
94430 ftrace_bug(ret, ip);
94431 rec->flags |= FTRACE_FL_FAILED;
94432- return 0;
94433 }
94434- return 1;
94435+ return ret ? 0 : 1;
94436 }
94437
94438 /*
94439diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
94440index e749a05..19c6e94 100644
94441--- a/kernel/trace/ring_buffer.c
94442+++ b/kernel/trace/ring_buffer.c
94443@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
94444 * the reader page). But if the next page is a header page,
94445 * its flags will be non zero.
94446 */
94447-static int inline
94448+static inline int
94449 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
94450 struct buffer_page *page, struct list_head *list)
94451 {
94452diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
94453index a2a2d1f..7f32b09 100644
94454--- a/kernel/trace/trace.c
94455+++ b/kernel/trace/trace.c
94456@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
94457 size_t rem;
94458 unsigned int i;
94459
94460+ pax_track_stack();
94461+
94462 /* copy the tracer to avoid using a global lock all around */
94463 mutex_lock(&trace_types_lock);
94464 if (unlikely(old_tracer != current_trace && current_trace)) {
94465@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
94466 int entries, size, i;
94467 size_t ret;
94468
94469+ pax_track_stack();
94470+
94471 if (*ppos & (PAGE_SIZE - 1)) {
94472 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
94473 return -EINVAL;
94474@@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
94475 };
94476 #endif
94477
94478-static struct dentry *d_tracer;
94479-
94480 struct dentry *tracing_init_dentry(void)
94481 {
94482+ static struct dentry *d_tracer;
94483 static int once;
94484
94485 if (d_tracer)
94486@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
94487 return d_tracer;
94488 }
94489
94490-static struct dentry *d_percpu;
94491-
94492 struct dentry *tracing_dentry_percpu(void)
94493 {
94494+ static struct dentry *d_percpu;
94495 static int once;
94496 struct dentry *d_tracer;
94497
94498diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
94499index d128f65..f37b4af 100644
94500--- a/kernel/trace/trace_events.c
94501+++ b/kernel/trace/trace_events.c
94502@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
94503 * Modules must own their file_operations to keep up with
94504 * reference counting.
94505 */
94506+
94507 struct ftrace_module_file_ops {
94508 struct list_head list;
94509 struct module *mod;
94510- struct file_operations id;
94511- struct file_operations enable;
94512- struct file_operations format;
94513- struct file_operations filter;
94514 };
94515
94516 static void remove_subsystem_dir(const char *name)
94517@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
94518
94519 file_ops->mod = mod;
94520
94521- file_ops->id = ftrace_event_id_fops;
94522- file_ops->id.owner = mod;
94523-
94524- file_ops->enable = ftrace_enable_fops;
94525- file_ops->enable.owner = mod;
94526-
94527- file_ops->filter = ftrace_event_filter_fops;
94528- file_ops->filter.owner = mod;
94529-
94530- file_ops->format = ftrace_event_format_fops;
94531- file_ops->format.owner = mod;
94532+ pax_open_kernel();
94533+ *(void **)&mod->trace_id.owner = mod;
94534+ *(void **)&mod->trace_enable.owner = mod;
94535+ *(void **)&mod->trace_filter.owner = mod;
94536+ *(void **)&mod->trace_format.owner = mod;
94537+ pax_close_kernel();
94538
94539 list_add(&file_ops->list, &ftrace_module_file_list);
94540
94541@@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
94542 call->mod = mod;
94543 list_add(&call->list, &ftrace_events);
94544 event_create_dir(call, d_events,
94545- &file_ops->id, &file_ops->enable,
94546- &file_ops->filter, &file_ops->format);
94547+ &mod->trace_id, &mod->trace_enable,
94548+ &mod->trace_filter, &mod->trace_format);
94549 }
94550 }
94551
94552diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
94553index 0acd834..b800b56 100644
94554--- a/kernel/trace/trace_mmiotrace.c
94555+++ b/kernel/trace/trace_mmiotrace.c
94556@@ -23,7 +23,7 @@ struct header_iter {
94557 static struct trace_array *mmio_trace_array;
94558 static bool overrun_detected;
94559 static unsigned long prev_overruns;
94560-static atomic_t dropped_count;
94561+static atomic_unchecked_t dropped_count;
94562
94563 static void mmio_reset_data(struct trace_array *tr)
94564 {
94565@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
94566
94567 static unsigned long count_overruns(struct trace_iterator *iter)
94568 {
94569- unsigned long cnt = atomic_xchg(&dropped_count, 0);
94570+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
94571 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
94572
94573 if (over > prev_overruns)
94574@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
94575 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
94576 sizeof(*entry), 0, pc);
94577 if (!event) {
94578- atomic_inc(&dropped_count);
94579+ atomic_inc_unchecked(&dropped_count);
94580 return;
94581 }
94582 entry = ring_buffer_event_data(event);
94583@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
94584 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
94585 sizeof(*entry), 0, pc);
94586 if (!event) {
94587- atomic_inc(&dropped_count);
94588+ atomic_inc_unchecked(&dropped_count);
94589 return;
94590 }
94591 entry = ring_buffer_event_data(event);
94592diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
94593index b6c12c6..41fdc53 100644
94594--- a/kernel/trace/trace_output.c
94595+++ b/kernel/trace/trace_output.c
94596@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
94597 return 0;
94598 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
94599 if (!IS_ERR(p)) {
94600- p = mangle_path(s->buffer + s->len, p, "\n");
94601+ p = mangle_path(s->buffer + s->len, p, "\n\\");
94602 if (p) {
94603 s->len = p - s->buffer;
94604 return 1;
94605diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
94606index 8504ac7..ecf0adb 100644
94607--- a/kernel/trace/trace_stack.c
94608+++ b/kernel/trace/trace_stack.c
94609@@ -50,7 +50,7 @@ static inline void check_stack(void)
94610 return;
94611
94612 /* we do not handle interrupt stacks yet */
94613- if (!object_is_on_stack(&this_size))
94614+ if (!object_starts_on_stack(&this_size))
94615 return;
94616
94617 local_irq_save(flags);
94618diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
94619index 40cafb0..d5ead43 100644
94620--- a/kernel/trace/trace_workqueue.c
94621+++ b/kernel/trace/trace_workqueue.c
94622@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
94623 int cpu;
94624 pid_t pid;
94625 /* Can be inserted from interrupt or user context, need to be atomic */
94626- atomic_t inserted;
94627+ atomic_unchecked_t inserted;
94628 /*
94629 * Don't need to be atomic, works are serialized in a single workqueue thread
94630 * on a single CPU.
94631@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
94632 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
94633 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
94634 if (node->pid == wq_thread->pid) {
94635- atomic_inc(&node->inserted);
94636+ atomic_inc_unchecked(&node->inserted);
94637 goto found;
94638 }
94639 }
94640@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
94641 tsk = get_pid_task(pid, PIDTYPE_PID);
94642 if (tsk) {
94643 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
94644- atomic_read(&cws->inserted), cws->executed,
94645+ atomic_read_unchecked(&cws->inserted), cws->executed,
94646 tsk->comm);
94647 put_task_struct(tsk);
94648 }
94649diff --git a/kernel/user.c b/kernel/user.c
94650index 1b91701..8795237 100644
94651--- a/kernel/user.c
94652+++ b/kernel/user.c
94653@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
94654 spin_lock_irq(&uidhash_lock);
94655 up = uid_hash_find(uid, hashent);
94656 if (up) {
94657+ put_user_ns(ns);
94658 key_put(new->uid_keyring);
94659 key_put(new->session_keyring);
94660 kmem_cache_free(uid_cachep, new);
94661diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
94662index 234ceb1..ad74049 100644
94663--- a/lib/Kconfig.debug
94664+++ b/lib/Kconfig.debug
94665@@ -905,7 +905,7 @@ config LATENCYTOP
94666 select STACKTRACE
94667 select SCHEDSTATS
94668 select SCHED_DEBUG
94669- depends on HAVE_LATENCYTOP_SUPPORT
94670+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
94671 help
94672 Enable this option if you want to use the LatencyTOP tool
94673 to find out which userspace is blocking on what kernel operations.
94674diff --git a/lib/bitmap.c b/lib/bitmap.c
94675index 7025658..8d14cab 100644
94676--- a/lib/bitmap.c
94677+++ b/lib/bitmap.c
94678@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
94679 {
94680 int c, old_c, totaldigits, ndigits, nchunks, nbits;
94681 u32 chunk;
94682- const char __user *ubuf = buf;
94683+ const char __user *ubuf = (const char __force_user *)buf;
94684
94685 bitmap_zero(maskp, nmaskbits);
94686
94687@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
94688 {
94689 if (!access_ok(VERIFY_READ, ubuf, ulen))
94690 return -EFAULT;
94691- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
94692+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
94693 }
94694 EXPORT_SYMBOL(bitmap_parse_user);
94695
94696diff --git a/lib/bug.c b/lib/bug.c
94697index 300e41a..2779eb0 100644
94698--- a/lib/bug.c
94699+++ b/lib/bug.c
94700@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
94701 return BUG_TRAP_TYPE_NONE;
94702
94703 bug = find_bug(bugaddr);
94704+ if (!bug)
94705+ return BUG_TRAP_TYPE_NONE;
94706
94707 printk(KERN_EMERG "------------[ cut here ]------------\n");
94708
94709diff --git a/lib/debugobjects.c b/lib/debugobjects.c
94710index 2b413db..e21d207 100644
94711--- a/lib/debugobjects.c
94712+++ b/lib/debugobjects.c
94713@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
94714 if (limit > 4)
94715 return;
94716
94717- is_on_stack = object_is_on_stack(addr);
94718+ is_on_stack = object_starts_on_stack(addr);
94719 if (is_on_stack == onstack)
94720 return;
94721
94722diff --git a/lib/devres.c b/lib/devres.c
94723index 72c8909..7543868 100644
94724--- a/lib/devres.c
94725+++ b/lib/devres.c
94726@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
94727 {
94728 iounmap(addr);
94729 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
94730- (void *)addr));
94731+ (void __force *)addr));
94732 }
94733 EXPORT_SYMBOL(devm_iounmap);
94734
94735@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
94736 {
94737 ioport_unmap(addr);
94738 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
94739- devm_ioport_map_match, (void *)addr));
94740+ devm_ioport_map_match, (void __force *)addr));
94741 }
94742 EXPORT_SYMBOL(devm_ioport_unmap);
94743
94744diff --git a/lib/dma-debug.c b/lib/dma-debug.c
94745index 084e879..0674448 100644
94746--- a/lib/dma-debug.c
94747+++ b/lib/dma-debug.c
94748@@ -861,7 +861,7 @@ out:
94749
94750 static void check_for_stack(struct device *dev, void *addr)
94751 {
94752- if (object_is_on_stack(addr))
94753+ if (object_starts_on_stack(addr))
94754 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
94755 "stack [addr=%p]\n", addr);
94756 }
94757diff --git a/lib/idr.c b/lib/idr.c
94758index eda7ba3..915dfae 100644
94759--- a/lib/idr.c
94760+++ b/lib/idr.c
94761@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
94762 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
94763
94764 /* if already at the top layer, we need to grow */
94765- if (id >= 1 << (idp->layers * IDR_BITS)) {
94766+ if (id >= (1 << (idp->layers * IDR_BITS))) {
94767 *starting_id = id;
94768 return IDR_NEED_TO_GROW;
94769 }
94770diff --git a/lib/inflate.c b/lib/inflate.c
94771index d102559..4215f31 100644
94772--- a/lib/inflate.c
94773+++ b/lib/inflate.c
94774@@ -266,7 +266,7 @@ static void free(void *where)
94775 malloc_ptr = free_mem_ptr;
94776 }
94777 #else
94778-#define malloc(a) kmalloc(a, GFP_KERNEL)
94779+#define malloc(a) kmalloc((a), GFP_KERNEL)
94780 #define free(a) kfree(a)
94781 #endif
94782
94783diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
94784index bd2bea9..6b3c95e 100644
94785--- a/lib/is_single_threaded.c
94786+++ b/lib/is_single_threaded.c
94787@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
94788 struct task_struct *p, *t;
94789 bool ret;
94790
94791+ if (!mm)
94792+ return true;
94793+
94794 if (atomic_read(&task->signal->live) != 1)
94795 return false;
94796
94797diff --git a/lib/kobject.c b/lib/kobject.c
94798index b512b74..8115eb1 100644
94799--- a/lib/kobject.c
94800+++ b/lib/kobject.c
94801@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
94802 return ret;
94803 }
94804
94805-struct sysfs_ops kobj_sysfs_ops = {
94806+const struct sysfs_ops kobj_sysfs_ops = {
94807 .show = kobj_attr_show,
94808 .store = kobj_attr_store,
94809 };
94810@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
94811 * If the kset was not able to be created, NULL will be returned.
94812 */
94813 static struct kset *kset_create(const char *name,
94814- struct kset_uevent_ops *uevent_ops,
94815+ const struct kset_uevent_ops *uevent_ops,
94816 struct kobject *parent_kobj)
94817 {
94818 struct kset *kset;
94819@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
94820 * If the kset was not able to be created, NULL will be returned.
94821 */
94822 struct kset *kset_create_and_add(const char *name,
94823- struct kset_uevent_ops *uevent_ops,
94824+ const struct kset_uevent_ops *uevent_ops,
94825 struct kobject *parent_kobj)
94826 {
94827 struct kset *kset;
94828diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
94829index 507b821..0bf8ed0 100644
94830--- a/lib/kobject_uevent.c
94831+++ b/lib/kobject_uevent.c
94832@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
94833 const char *subsystem;
94834 struct kobject *top_kobj;
94835 struct kset *kset;
94836- struct kset_uevent_ops *uevent_ops;
94837+ const struct kset_uevent_ops *uevent_ops;
94838 u64 seq;
94839 int i = 0;
94840 int retval = 0;
94841diff --git a/lib/kref.c b/lib/kref.c
94842index 9ecd6e8..12c94c1 100644
94843--- a/lib/kref.c
94844+++ b/lib/kref.c
94845@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
94846 */
94847 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
94848 {
94849- WARN_ON(release == NULL);
94850+ BUG_ON(release == NULL);
94851 WARN_ON(release == (void (*)(struct kref *))kfree);
94852
94853 if (atomic_dec_and_test(&kref->refcount)) {
94854diff --git a/lib/parser.c b/lib/parser.c
94855index b00d020..1b34325 100644
94856--- a/lib/parser.c
94857+++ b/lib/parser.c
94858@@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
94859 char *buf;
94860 int ret;
94861
94862- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
94863+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
94864 if (!buf)
94865 return -ENOMEM;
94866 memcpy(buf, s->from, s->to - s->from);
94867diff --git a/lib/radix-tree.c b/lib/radix-tree.c
94868index 92cdd99..a8149d7 100644
94869--- a/lib/radix-tree.c
94870+++ b/lib/radix-tree.c
94871@@ -81,7 +81,7 @@ struct radix_tree_preload {
94872 int nr;
94873 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
94874 };
94875-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
94876+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
94877
94878 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
94879 {
94880diff --git a/lib/random32.c b/lib/random32.c
94881index 217d5c4..45aba8a 100644
94882--- a/lib/random32.c
94883+++ b/lib/random32.c
94884@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
94885 */
94886 static inline u32 __seed(u32 x, u32 m)
94887 {
94888- return (x < m) ? x + m : x;
94889+ return (x <= m) ? x + m + 1 : x;
94890 }
94891
94892 /**
94893diff --git a/lib/vsprintf.c b/lib/vsprintf.c
94894index 33bed5e..1477e46 100644
94895--- a/lib/vsprintf.c
94896+++ b/lib/vsprintf.c
94897@@ -16,6 +16,9 @@
94898 * - scnprintf and vscnprintf
94899 */
94900
94901+#ifdef CONFIG_GRKERNSEC_HIDESYM
94902+#define __INCLUDED_BY_HIDESYM 1
94903+#endif
94904 #include <stdarg.h>
94905 #include <linux/module.h>
94906 #include <linux/types.h>
94907@@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
94908 return buf;
94909 }
94910
94911-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
94912+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
94913 {
94914 int len, i;
94915
94916 if ((unsigned long)s < PAGE_SIZE)
94917- s = "<NULL>";
94918+ s = "(null)";
94919
94920 len = strnlen(s, spec.precision);
94921
94922@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
94923 unsigned long value = (unsigned long) ptr;
94924 #ifdef CONFIG_KALLSYMS
94925 char sym[KSYM_SYMBOL_LEN];
94926- if (ext != 'f' && ext != 's')
94927+ if (ext != 'f' && ext != 's' && ext != 'a')
94928 sprint_symbol(sym, value);
94929 else
94930 kallsyms_lookup(value, NULL, NULL, NULL, sym);
94931@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
94932 * - 'f' For simple symbolic function names without offset
94933 * - 'S' For symbolic direct pointers with offset
94934 * - 's' For symbolic direct pointers without offset
94935+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
94936+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
94937 * - 'R' For a struct resource pointer, it prints the range of
94938 * addresses (not the name nor the flags)
94939 * - 'M' For a 6-byte MAC address, it prints the address in the
94940@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
94941 struct printf_spec spec)
94942 {
94943 if (!ptr)
94944- return string(buf, end, "(null)", spec);
94945+ return string(buf, end, "(nil)", spec);
94946
94947 switch (*fmt) {
94948 case 'F':
94949@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
94950 case 's':
94951 /* Fallthrough */
94952 case 'S':
94953+#ifdef CONFIG_GRKERNSEC_HIDESYM
94954+ break;
94955+#else
94956+ return symbol_string(buf, end, ptr, spec, *fmt);
94957+#endif
94958+ case 'a':
94959+ /* Fallthrough */
94960+ case 'A':
94961 return symbol_string(buf, end, ptr, spec, *fmt);
94962 case 'R':
94963 return resource_string(buf, end, ptr, spec);
94964@@ -1445,7 +1458,7 @@ do { \
94965 size_t len;
94966 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
94967 || (unsigned long)save_str < PAGE_SIZE)
94968- save_str = "<NULL>";
94969+ save_str = "(null)";
94970 len = strlen(save_str);
94971 if (str + len + 1 < end)
94972 memcpy(str, save_str, len + 1);
94973@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
94974 typeof(type) value; \
94975 if (sizeof(type) == 8) { \
94976 args = PTR_ALIGN(args, sizeof(u32)); \
94977- *(u32 *)&value = *(u32 *)args; \
94978- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
94979+ *(u32 *)&value = *(const u32 *)args; \
94980+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
94981 } else { \
94982 args = PTR_ALIGN(args, sizeof(type)); \
94983- value = *(typeof(type) *)args; \
94984+ value = *(const typeof(type) *)args; \
94985 } \
94986 args += sizeof(type); \
94987 value; \
94988@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
94989 const char *str_arg = args;
94990 size_t len = strlen(str_arg);
94991 args += len + 1;
94992- str = string(str, end, (char *)str_arg, spec);
94993+ str = string(str, end, str_arg, spec);
94994 break;
94995 }
94996
94997diff --git a/localversion-grsec b/localversion-grsec
94998new file mode 100644
94999index 0000000..7cd6065
95000--- /dev/null
95001+++ b/localversion-grsec
95002@@ -0,0 +1 @@
95003+-grsec
95004diff --git a/mm/Kconfig b/mm/Kconfig
95005index 2c19c0b..f3c3f83 100644
95006--- a/mm/Kconfig
95007+++ b/mm/Kconfig
95008@@ -228,7 +228,7 @@ config KSM
95009 config DEFAULT_MMAP_MIN_ADDR
95010 int "Low address space to protect from user allocation"
95011 depends on MMU
95012- default 4096
95013+ default 65536
95014 help
95015 This is the portion of low virtual memory which should be protected
95016 from userspace allocation. Keeping a user from writing to low pages
95017diff --git a/mm/backing-dev.c b/mm/backing-dev.c
95018index 67a33a5..094dcf1 100644
95019--- a/mm/backing-dev.c
95020+++ b/mm/backing-dev.c
95021@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
95022 list_add_tail_rcu(&wb->list, &bdi->wb_list);
95023 spin_unlock(&bdi->wb_lock);
95024
95025- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
95026+ tsk->flags |= PF_SWAPWRITE;
95027 set_freezable();
95028
95029 /*
95030@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
95031 * Add the default flusher task that gets created for any bdi
95032 * that has dirty data pending writeout
95033 */
95034-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
95035+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
95036 {
95037 if (!bdi_cap_writeback_dirty(bdi))
95038 return;
95039diff --git a/mm/filemap.c b/mm/filemap.c
95040index a1fe378..e26702f 100644
95041--- a/mm/filemap.c
95042+++ b/mm/filemap.c
95043@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
95044 struct address_space *mapping = file->f_mapping;
95045
95046 if (!mapping->a_ops->readpage)
95047- return -ENOEXEC;
95048+ return -ENODEV;
95049 file_accessed(file);
95050 vma->vm_ops = &generic_file_vm_ops;
95051 vma->vm_flags |= VM_CAN_NONLINEAR;
95052@@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
95053 *pos = i_size_read(inode);
95054
95055 if (limit != RLIM_INFINITY) {
95056+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
95057 if (*pos >= limit) {
95058 send_sig(SIGXFSZ, current, 0);
95059 return -EFBIG;
95060diff --git a/mm/fremap.c b/mm/fremap.c
95061index b6ec85a..a24ac22 100644
95062--- a/mm/fremap.c
95063+++ b/mm/fremap.c
95064@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95065 retry:
95066 vma = find_vma(mm, start);
95067
95068+#ifdef CONFIG_PAX_SEGMEXEC
95069+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
95070+ goto out;
95071+#endif
95072+
95073 /*
95074 * Make sure the vma is shared, that it supports prefaulting,
95075 * and that the remapped range is valid and fully within
95076@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
95077 /*
95078 * drop PG_Mlocked flag for over-mapped range
95079 */
95080- unsigned int saved_flags = vma->vm_flags;
95081+ unsigned long saved_flags = vma->vm_flags;
95082 munlock_vma_pages_range(vma, start, start + size);
95083 vma->vm_flags = saved_flags;
95084 }
95085diff --git a/mm/highmem.c b/mm/highmem.c
95086index 9c1e627..5ca9447 100644
95087--- a/mm/highmem.c
95088+++ b/mm/highmem.c
95089@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
95090 * So no dangers, even with speculative execution.
95091 */
95092 page = pte_page(pkmap_page_table[i]);
95093+ pax_open_kernel();
95094 pte_clear(&init_mm, (unsigned long)page_address(page),
95095 &pkmap_page_table[i]);
95096-
95097+ pax_close_kernel();
95098 set_page_address(page, NULL);
95099 need_flush = 1;
95100 }
95101@@ -177,9 +178,11 @@ start:
95102 }
95103 }
95104 vaddr = PKMAP_ADDR(last_pkmap_nr);
95105+
95106+ pax_open_kernel();
95107 set_pte_at(&init_mm, vaddr,
95108 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
95109-
95110+ pax_close_kernel();
95111 pkmap_count[last_pkmap_nr] = 1;
95112 set_page_address(page, (void *)vaddr);
95113
95114diff --git a/mm/hugetlb.c b/mm/hugetlb.c
95115index 5e1e508..ac70275 100644
95116--- a/mm/hugetlb.c
95117+++ b/mm/hugetlb.c
95118@@ -869,6 +869,7 @@ free:
95119 list_del(&page->lru);
95120 enqueue_huge_page(h, page);
95121 }
95122+ spin_unlock(&hugetlb_lock);
95123
95124 /* Free unnecessary surplus pages to the buddy allocator */
95125 if (!list_empty(&surplus_list)) {
95126@@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
95127 return 1;
95128 }
95129
95130+#ifdef CONFIG_PAX_SEGMEXEC
95131+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
95132+{
95133+ struct mm_struct *mm = vma->vm_mm;
95134+ struct vm_area_struct *vma_m;
95135+ unsigned long address_m;
95136+ pte_t *ptep_m;
95137+
95138+ vma_m = pax_find_mirror_vma(vma);
95139+ if (!vma_m)
95140+ return;
95141+
95142+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95143+ address_m = address + SEGMEXEC_TASK_SIZE;
95144+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
95145+ get_page(page_m);
95146+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
95147+}
95148+#endif
95149+
95150 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
95151 unsigned long address, pte_t *ptep, pte_t pte,
95152 struct page *pagecache_page)
95153@@ -2004,6 +2025,11 @@ retry_avoidcopy:
95154 huge_ptep_clear_flush(vma, address, ptep);
95155 set_huge_pte_at(mm, address, ptep,
95156 make_huge_pte(vma, new_page, 1));
95157+
95158+#ifdef CONFIG_PAX_SEGMEXEC
95159+ pax_mirror_huge_pte(vma, address, new_page);
95160+#endif
95161+
95162 /* Make the old page be freed below */
95163 new_page = old_page;
95164 }
95165@@ -2135,6 +2161,10 @@ retry:
95166 && (vma->vm_flags & VM_SHARED)));
95167 set_huge_pte_at(mm, address, ptep, new_pte);
95168
95169+#ifdef CONFIG_PAX_SEGMEXEC
95170+ pax_mirror_huge_pte(vma, address, page);
95171+#endif
95172+
95173 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
95174 /* Optimization, do the COW without a second fault */
95175 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
95176@@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95177 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
95178 struct hstate *h = hstate_vma(vma);
95179
95180+#ifdef CONFIG_PAX_SEGMEXEC
95181+ struct vm_area_struct *vma_m;
95182+
95183+ vma_m = pax_find_mirror_vma(vma);
95184+ if (vma_m) {
95185+ unsigned long address_m;
95186+
95187+ if (vma->vm_start > vma_m->vm_start) {
95188+ address_m = address;
95189+ address -= SEGMEXEC_TASK_SIZE;
95190+ vma = vma_m;
95191+ h = hstate_vma(vma);
95192+ } else
95193+ address_m = address + SEGMEXEC_TASK_SIZE;
95194+
95195+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
95196+ return VM_FAULT_OOM;
95197+ address_m &= HPAGE_MASK;
95198+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
95199+ }
95200+#endif
95201+
95202 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
95203 if (!ptep)
95204 return VM_FAULT_OOM;
95205diff --git a/mm/internal.h b/mm/internal.h
95206index f03e8e2..7354343 100644
95207--- a/mm/internal.h
95208+++ b/mm/internal.h
95209@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
95210 * in mm/page_alloc.c
95211 */
95212 extern void __free_pages_bootmem(struct page *page, unsigned int order);
95213+extern void free_compound_page(struct page *page);
95214 extern void prep_compound_page(struct page *page, unsigned long order);
95215
95216
95217diff --git a/mm/kmemleak.c b/mm/kmemleak.c
95218index c346660..b47382f 100644
95219--- a/mm/kmemleak.c
95220+++ b/mm/kmemleak.c
95221@@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
95222
95223 for (i = 0; i < object->trace_len; i++) {
95224 void *ptr = (void *)object->trace[i];
95225- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
95226+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
95227 }
95228 }
95229
95230diff --git a/mm/maccess.c b/mm/maccess.c
95231index 9073695..1127f348 100644
95232--- a/mm/maccess.c
95233+++ b/mm/maccess.c
95234@@ -14,7 +14,7 @@
95235 * Safely read from address @src to the buffer at @dst. If a kernel fault
95236 * happens, handle that and return -EFAULT.
95237 */
95238-long probe_kernel_read(void *dst, void *src, size_t size)
95239+long probe_kernel_read(void *dst, const void *src, size_t size)
95240 {
95241 long ret;
95242 mm_segment_t old_fs = get_fs();
95243@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
95244 set_fs(KERNEL_DS);
95245 pagefault_disable();
95246 ret = __copy_from_user_inatomic(dst,
95247- (__force const void __user *)src, size);
95248+ (const void __force_user *)src, size);
95249 pagefault_enable();
95250 set_fs(old_fs);
95251
95252@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
95253 * Safely write to address @dst from the buffer at @src. If a kernel fault
95254 * happens, handle that and return -EFAULT.
95255 */
95256-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
95257+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
95258 {
95259 long ret;
95260 mm_segment_t old_fs = get_fs();
95261
95262 set_fs(KERNEL_DS);
95263 pagefault_disable();
95264- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
95265+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
95266 pagefault_enable();
95267 set_fs(old_fs);
95268
95269diff --git a/mm/madvise.c b/mm/madvise.c
95270index 35b1479..499f7d4 100644
95271--- a/mm/madvise.c
95272+++ b/mm/madvise.c
95273@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
95274 pgoff_t pgoff;
95275 unsigned long new_flags = vma->vm_flags;
95276
95277+#ifdef CONFIG_PAX_SEGMEXEC
95278+ struct vm_area_struct *vma_m;
95279+#endif
95280+
95281 switch (behavior) {
95282 case MADV_NORMAL:
95283 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
95284@@ -103,6 +107,13 @@ success:
95285 /*
95286 * vm_flags is protected by the mmap_sem held in write mode.
95287 */
95288+
95289+#ifdef CONFIG_PAX_SEGMEXEC
95290+ vma_m = pax_find_mirror_vma(vma);
95291+ if (vma_m)
95292+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
95293+#endif
95294+
95295 vma->vm_flags = new_flags;
95296
95297 out:
95298@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
95299 struct vm_area_struct ** prev,
95300 unsigned long start, unsigned long end)
95301 {
95302+
95303+#ifdef CONFIG_PAX_SEGMEXEC
95304+ struct vm_area_struct *vma_m;
95305+#endif
95306+
95307 *prev = vma;
95308 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
95309 return -EINVAL;
95310@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
95311 zap_page_range(vma, start, end - start, &details);
95312 } else
95313 zap_page_range(vma, start, end - start, NULL);
95314+
95315+#ifdef CONFIG_PAX_SEGMEXEC
95316+ vma_m = pax_find_mirror_vma(vma);
95317+ if (vma_m) {
95318+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
95319+ struct zap_details details = {
95320+ .nonlinear_vma = vma_m,
95321+ .last_index = ULONG_MAX,
95322+ };
95323+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
95324+ } else
95325+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
95326+ }
95327+#endif
95328+
95329 return 0;
95330 }
95331
95332@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
95333 if (end < start)
95334 goto out;
95335
95336+#ifdef CONFIG_PAX_SEGMEXEC
95337+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
95338+ if (end > SEGMEXEC_TASK_SIZE)
95339+ goto out;
95340+ } else
95341+#endif
95342+
95343+ if (end > TASK_SIZE)
95344+ goto out;
95345+
95346 error = 0;
95347 if (end == start)
95348 goto out;
95349diff --git a/mm/memory-failure.c b/mm/memory-failure.c
95350index 8aeba53..b4a4198 100644
95351--- a/mm/memory-failure.c
95352+++ b/mm/memory-failure.c
95353@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
95354
95355 int sysctl_memory_failure_recovery __read_mostly = 1;
95356
95357-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
95358+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
95359
95360 /*
95361 * Send all the processes who have the page mapped an ``action optional''
95362@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
95363 si.si_signo = SIGBUS;
95364 si.si_errno = 0;
95365 si.si_code = BUS_MCEERR_AO;
95366- si.si_addr = (void *)addr;
95367+ si.si_addr = (void __user *)addr;
95368 #ifdef __ARCH_SI_TRAPNO
95369 si.si_trapno = trapno;
95370 #endif
95371@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
95372 return 0;
95373 }
95374
95375- atomic_long_add(1, &mce_bad_pages);
95376+ atomic_long_add_unchecked(1, &mce_bad_pages);
95377
95378 /*
95379 * We need/can do nothing about count=0 pages.
95380diff --git a/mm/memory.c b/mm/memory.c
95381index 6c836d3..48f3264 100644
95382--- a/mm/memory.c
95383+++ b/mm/memory.c
95384@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
95385 return;
95386
95387 pmd = pmd_offset(pud, start);
95388+
95389+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
95390 pud_clear(pud);
95391 pmd_free_tlb(tlb, pmd, start);
95392+#endif
95393+
95394 }
95395
95396 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95397@@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
95398 if (end - 1 > ceiling - 1)
95399 return;
95400
95401+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
95402 pud = pud_offset(pgd, start);
95403 pgd_clear(pgd);
95404 pud_free_tlb(tlb, pud, start);
95405+#endif
95406+
95407 }
95408
95409 /*
95410@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95411 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
95412 i = 0;
95413
95414- do {
95415+ while (nr_pages) {
95416 struct vm_area_struct *vma;
95417
95418- vma = find_extend_vma(mm, start);
95419+ vma = find_vma(mm, start);
95420 if (!vma && in_gate_area(tsk, start)) {
95421 unsigned long pg = start & PAGE_MASK;
95422 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
95423@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95424 continue;
95425 }
95426
95427- if (!vma ||
95428+ if (!vma || start < vma->vm_start ||
95429 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
95430 !(vm_flags & vma->vm_flags))
95431 return i ? : -EFAULT;
95432@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
95433 start += PAGE_SIZE;
95434 nr_pages--;
95435 } while (nr_pages && start < vma->vm_end);
95436- } while (nr_pages);
95437+ }
95438 return i;
95439 }
95440
95441@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
95442 page_add_file_rmap(page);
95443 set_pte_at(mm, addr, pte, mk_pte(page, prot));
95444
95445+#ifdef CONFIG_PAX_SEGMEXEC
95446+ pax_mirror_file_pte(vma, addr, page, ptl);
95447+#endif
95448+
95449 retval = 0;
95450 pte_unmap_unlock(pte, ptl);
95451 return retval;
95452@@ -1560,10 +1571,22 @@ out:
95453 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
95454 struct page *page)
95455 {
95456+
95457+#ifdef CONFIG_PAX_SEGMEXEC
95458+ struct vm_area_struct *vma_m;
95459+#endif
95460+
95461 if (addr < vma->vm_start || addr >= vma->vm_end)
95462 return -EFAULT;
95463 if (!page_count(page))
95464 return -EINVAL;
95465+
95466+#ifdef CONFIG_PAX_SEGMEXEC
95467+ vma_m = pax_find_mirror_vma(vma);
95468+ if (vma_m)
95469+ vma_m->vm_flags |= VM_INSERTPAGE;
95470+#endif
95471+
95472 vma->vm_flags |= VM_INSERTPAGE;
95473 return insert_page(vma, addr, page, vma->vm_page_prot);
95474 }
95475@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
95476 unsigned long pfn)
95477 {
95478 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
95479+ BUG_ON(vma->vm_mirror);
95480
95481 if (addr < vma->vm_start || addr >= vma->vm_end)
95482 return -EFAULT;
95483@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
95484 copy_user_highpage(dst, src, va, vma);
95485 }
95486
95487+#ifdef CONFIG_PAX_SEGMEXEC
95488+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
95489+{
95490+ struct mm_struct *mm = vma->vm_mm;
95491+ spinlock_t *ptl;
95492+ pte_t *pte, entry;
95493+
95494+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
95495+ entry = *pte;
95496+ if (!pte_present(entry)) {
95497+ if (!pte_none(entry)) {
95498+ BUG_ON(pte_file(entry));
95499+ free_swap_and_cache(pte_to_swp_entry(entry));
95500+ pte_clear_not_present_full(mm, address, pte, 0);
95501+ }
95502+ } else {
95503+ struct page *page;
95504+
95505+ flush_cache_page(vma, address, pte_pfn(entry));
95506+ entry = ptep_clear_flush(vma, address, pte);
95507+ BUG_ON(pte_dirty(entry));
95508+ page = vm_normal_page(vma, address, entry);
95509+ if (page) {
95510+ update_hiwater_rss(mm);
95511+ if (PageAnon(page))
95512+ dec_mm_counter(mm, anon_rss);
95513+ else
95514+ dec_mm_counter(mm, file_rss);
95515+ page_remove_rmap(page);
95516+ page_cache_release(page);
95517+ }
95518+ }
95519+ pte_unmap_unlock(pte, ptl);
95520+}
95521+
95522+/* PaX: if vma is mirrored, synchronize the mirror's PTE
95523+ *
95524+ * the ptl of the lower mapped page is held on entry and is not released on exit
95525+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
95526+ */
95527+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
95528+{
95529+ struct mm_struct *mm = vma->vm_mm;
95530+ unsigned long address_m;
95531+ spinlock_t *ptl_m;
95532+ struct vm_area_struct *vma_m;
95533+ pmd_t *pmd_m;
95534+ pte_t *pte_m, entry_m;
95535+
95536+ BUG_ON(!page_m || !PageAnon(page_m));
95537+
95538+ vma_m = pax_find_mirror_vma(vma);
95539+ if (!vma_m)
95540+ return;
95541+
95542+ BUG_ON(!PageLocked(page_m));
95543+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95544+ address_m = address + SEGMEXEC_TASK_SIZE;
95545+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
95546+ pte_m = pte_offset_map_nested(pmd_m, address_m);
95547+ ptl_m = pte_lockptr(mm, pmd_m);
95548+ if (ptl != ptl_m) {
95549+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
95550+ if (!pte_none(*pte_m))
95551+ goto out;
95552+ }
95553+
95554+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
95555+ page_cache_get(page_m);
95556+ page_add_anon_rmap(page_m, vma_m, address_m);
95557+ inc_mm_counter(mm, anon_rss);
95558+ set_pte_at(mm, address_m, pte_m, entry_m);
95559+ update_mmu_cache(vma_m, address_m, entry_m);
95560+out:
95561+ if (ptl != ptl_m)
95562+ spin_unlock(ptl_m);
95563+ pte_unmap_nested(pte_m);
95564+ unlock_page(page_m);
95565+}
95566+
95567+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
95568+{
95569+ struct mm_struct *mm = vma->vm_mm;
95570+ unsigned long address_m;
95571+ spinlock_t *ptl_m;
95572+ struct vm_area_struct *vma_m;
95573+ pmd_t *pmd_m;
95574+ pte_t *pte_m, entry_m;
95575+
95576+ BUG_ON(!page_m || PageAnon(page_m));
95577+
95578+ vma_m = pax_find_mirror_vma(vma);
95579+ if (!vma_m)
95580+ return;
95581+
95582+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95583+ address_m = address + SEGMEXEC_TASK_SIZE;
95584+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
95585+ pte_m = pte_offset_map_nested(pmd_m, address_m);
95586+ ptl_m = pte_lockptr(mm, pmd_m);
95587+ if (ptl != ptl_m) {
95588+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
95589+ if (!pte_none(*pte_m))
95590+ goto out;
95591+ }
95592+
95593+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
95594+ page_cache_get(page_m);
95595+ page_add_file_rmap(page_m);
95596+ inc_mm_counter(mm, file_rss);
95597+ set_pte_at(mm, address_m, pte_m, entry_m);
95598+ update_mmu_cache(vma_m, address_m, entry_m);
95599+out:
95600+ if (ptl != ptl_m)
95601+ spin_unlock(ptl_m);
95602+ pte_unmap_nested(pte_m);
95603+}
95604+
95605+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
95606+{
95607+ struct mm_struct *mm = vma->vm_mm;
95608+ unsigned long address_m;
95609+ spinlock_t *ptl_m;
95610+ struct vm_area_struct *vma_m;
95611+ pmd_t *pmd_m;
95612+ pte_t *pte_m, entry_m;
95613+
95614+ vma_m = pax_find_mirror_vma(vma);
95615+ if (!vma_m)
95616+ return;
95617+
95618+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
95619+ address_m = address + SEGMEXEC_TASK_SIZE;
95620+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
95621+ pte_m = pte_offset_map_nested(pmd_m, address_m);
95622+ ptl_m = pte_lockptr(mm, pmd_m);
95623+ if (ptl != ptl_m) {
95624+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
95625+ if (!pte_none(*pte_m))
95626+ goto out;
95627+ }
95628+
95629+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
95630+ set_pte_at(mm, address_m, pte_m, entry_m);
95631+out:
95632+ if (ptl != ptl_m)
95633+ spin_unlock(ptl_m);
95634+ pte_unmap_nested(pte_m);
95635+}
95636+
95637+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
95638+{
95639+ struct page *page_m;
95640+ pte_t entry;
95641+
95642+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
95643+ goto out;
95644+
95645+ entry = *pte;
95646+ page_m = vm_normal_page(vma, address, entry);
95647+ if (!page_m)
95648+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
95649+ else if (PageAnon(page_m)) {
95650+ if (pax_find_mirror_vma(vma)) {
95651+ pte_unmap_unlock(pte, ptl);
95652+ lock_page(page_m);
95653+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
95654+ if (pte_same(entry, *pte))
95655+ pax_mirror_anon_pte(vma, address, page_m, ptl);
95656+ else
95657+ unlock_page(page_m);
95658+ }
95659+ } else
95660+ pax_mirror_file_pte(vma, address, page_m, ptl);
95661+
95662+out:
95663+ pte_unmap_unlock(pte, ptl);
95664+}
95665+#endif
95666+
95667 /*
95668 * This routine handles present pages, when users try to write
95669 * to a shared page. It is done by copying the page to a new address
95670@@ -2156,6 +2360,12 @@ gotten:
95671 */
95672 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
95673 if (likely(pte_same(*page_table, orig_pte))) {
95674+
95675+#ifdef CONFIG_PAX_SEGMEXEC
95676+ if (pax_find_mirror_vma(vma))
95677+ BUG_ON(!trylock_page(new_page));
95678+#endif
95679+
95680 if (old_page) {
95681 if (!PageAnon(old_page)) {
95682 dec_mm_counter(mm, file_rss);
95683@@ -2207,6 +2417,10 @@ gotten:
95684 page_remove_rmap(old_page);
95685 }
95686
95687+#ifdef CONFIG_PAX_SEGMEXEC
95688+ pax_mirror_anon_pte(vma, address, new_page, ptl);
95689+#endif
95690+
95691 /* Free the old page.. */
95692 new_page = old_page;
95693 ret |= VM_FAULT_WRITE;
95694@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
95695 swap_free(entry);
95696 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
95697 try_to_free_swap(page);
95698+
95699+#ifdef CONFIG_PAX_SEGMEXEC
95700+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
95701+#endif
95702+
95703 unlock_page(page);
95704
95705 if (flags & FAULT_FLAG_WRITE) {
95706@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
95707
95708 /* No need to invalidate - it was non-present before */
95709 update_mmu_cache(vma, address, pte);
95710+
95711+#ifdef CONFIG_PAX_SEGMEXEC
95712+ pax_mirror_anon_pte(vma, address, page, ptl);
95713+#endif
95714+
95715 unlock:
95716 pte_unmap_unlock(page_table, ptl);
95717 out:
95718@@ -2632,40 +2856,6 @@ out_release:
95719 }
95720
95721 /*
95722- * This is like a special single-page "expand_{down|up}wards()",
95723- * except we must first make sure that 'address{-|+}PAGE_SIZE'
95724- * doesn't hit another vma.
95725- */
95726-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
95727-{
95728- address &= PAGE_MASK;
95729- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
95730- struct vm_area_struct *prev = vma->vm_prev;
95731-
95732- /*
95733- * Is there a mapping abutting this one below?
95734- *
95735- * That's only ok if it's the same stack mapping
95736- * that has gotten split..
95737- */
95738- if (prev && prev->vm_end == address)
95739- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
95740-
95741- expand_stack(vma, address - PAGE_SIZE);
95742- }
95743- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
95744- struct vm_area_struct *next = vma->vm_next;
95745-
95746- /* As VM_GROWSDOWN but s/below/above/ */
95747- if (next && next->vm_start == address + PAGE_SIZE)
95748- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
95749-
95750- expand_upwards(vma, address + PAGE_SIZE);
95751- }
95752- return 0;
95753-}
95754-
95755-/*
95756 * We enter with non-exclusive mmap_sem (to exclude vma changes,
95757 * but allow concurrent faults), and pte mapped but not yet locked.
95758 * We return with mmap_sem still held, but pte unmapped and unlocked.
95759@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
95760 unsigned long address, pte_t *page_table, pmd_t *pmd,
95761 unsigned int flags)
95762 {
95763- struct page *page;
95764+ struct page *page = NULL;
95765 spinlock_t *ptl;
95766 pte_t entry;
95767
95768- pte_unmap(page_table);
95769-
95770- /* Check if we need to add a guard page to the stack */
95771- if (check_stack_guard_page(vma, address) < 0)
95772- return VM_FAULT_SIGBUS;
95773-
95774- /* Use the zero-page for reads */
95775 if (!(flags & FAULT_FLAG_WRITE)) {
95776 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
95777 vma->vm_page_prot));
95778- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
95779+ ptl = pte_lockptr(mm, pmd);
95780+ spin_lock(ptl);
95781 if (!pte_none(*page_table))
95782 goto unlock;
95783 goto setpte;
95784 }
95785
95786 /* Allocate our own private page. */
95787+ pte_unmap(page_table);
95788+
95789 if (unlikely(anon_vma_prepare(vma)))
95790 goto oom;
95791 page = alloc_zeroed_user_highpage_movable(vma, address);
95792@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
95793 if (!pte_none(*page_table))
95794 goto release;
95795
95796+#ifdef CONFIG_PAX_SEGMEXEC
95797+ if (pax_find_mirror_vma(vma))
95798+ BUG_ON(!trylock_page(page));
95799+#endif
95800+
95801 inc_mm_counter(mm, anon_rss);
95802 page_add_new_anon_rmap(page, vma, address);
95803 setpte:
95804@@ -2720,6 +2911,12 @@ setpte:
95805
95806 /* No need to invalidate - it was non-present before */
95807 update_mmu_cache(vma, address, entry);
95808+
95809+#ifdef CONFIG_PAX_SEGMEXEC
95810+ if (page)
95811+ pax_mirror_anon_pte(vma, address, page, ptl);
95812+#endif
95813+
95814 unlock:
95815 pte_unmap_unlock(page_table, ptl);
95816 return 0;
95817@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95818 */
95819 /* Only go through if we didn't race with anybody else... */
95820 if (likely(pte_same(*page_table, orig_pte))) {
95821+
95822+#ifdef CONFIG_PAX_SEGMEXEC
95823+ if (anon && pax_find_mirror_vma(vma))
95824+ BUG_ON(!trylock_page(page));
95825+#endif
95826+
95827 flush_icache_page(vma, page);
95828 entry = mk_pte(page, vma->vm_page_prot);
95829 if (flags & FAULT_FLAG_WRITE)
95830@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95831
95832 /* no need to invalidate: a not-present page won't be cached */
95833 update_mmu_cache(vma, address, entry);
95834+
95835+#ifdef CONFIG_PAX_SEGMEXEC
95836+ if (anon)
95837+ pax_mirror_anon_pte(vma, address, page, ptl);
95838+ else
95839+ pax_mirror_file_pte(vma, address, page, ptl);
95840+#endif
95841+
95842 } else {
95843 if (charged)
95844 mem_cgroup_uncharge_page(page);
95845@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
95846 if (flags & FAULT_FLAG_WRITE)
95847 flush_tlb_page(vma, address);
95848 }
95849+
95850+#ifdef CONFIG_PAX_SEGMEXEC
95851+ pax_mirror_pte(vma, address, pte, pmd, ptl);
95852+ return 0;
95853+#endif
95854+
95855 unlock:
95856 pte_unmap_unlock(pte, ptl);
95857 return 0;
95858@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95859 pmd_t *pmd;
95860 pte_t *pte;
95861
95862+#ifdef CONFIG_PAX_SEGMEXEC
95863+ struct vm_area_struct *vma_m;
95864+#endif
95865+
95866 __set_current_state(TASK_RUNNING);
95867
95868 count_vm_event(PGFAULT);
95869@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
95870 if (unlikely(is_vm_hugetlb_page(vma)))
95871 return hugetlb_fault(mm, vma, address, flags);
95872
95873+#ifdef CONFIG_PAX_SEGMEXEC
95874+ vma_m = pax_find_mirror_vma(vma);
95875+ if (vma_m) {
95876+ unsigned long address_m;
95877+ pgd_t *pgd_m;
95878+ pud_t *pud_m;
95879+ pmd_t *pmd_m;
95880+
95881+ if (vma->vm_start > vma_m->vm_start) {
95882+ address_m = address;
95883+ address -= SEGMEXEC_TASK_SIZE;
95884+ vma = vma_m;
95885+ } else
95886+ address_m = address + SEGMEXEC_TASK_SIZE;
95887+
95888+ pgd_m = pgd_offset(mm, address_m);
95889+ pud_m = pud_alloc(mm, pgd_m, address_m);
95890+ if (!pud_m)
95891+ return VM_FAULT_OOM;
95892+ pmd_m = pmd_alloc(mm, pud_m, address_m);
95893+ if (!pmd_m)
95894+ return VM_FAULT_OOM;
95895+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
95896+ return VM_FAULT_OOM;
95897+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
95898+ }
95899+#endif
95900+
95901 pgd = pgd_offset(mm, address);
95902 pud = pud_alloc(mm, pgd, address);
95903 if (!pud)
95904@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
95905 gate_vma.vm_start = FIXADDR_USER_START;
95906 gate_vma.vm_end = FIXADDR_USER_END;
95907 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
95908- gate_vma.vm_page_prot = __P101;
95909+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
95910 /*
95911 * Make sure the vDSO gets into every core dump.
95912 * Dumping its contents makes post-mortem fully interpretable later
95913diff --git a/mm/mempolicy.c b/mm/mempolicy.c
95914index 3c6e3e2..b1ddbb8 100644
95915--- a/mm/mempolicy.c
95916+++ b/mm/mempolicy.c
95917@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
95918 struct vm_area_struct *next;
95919 int err;
95920
95921+#ifdef CONFIG_PAX_SEGMEXEC
95922+ struct vm_area_struct *vma_m;
95923+#endif
95924+
95925 err = 0;
95926 for (; vma && vma->vm_start < end; vma = next) {
95927 next = vma->vm_next;
95928@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
95929 err = policy_vma(vma, new);
95930 if (err)
95931 break;
95932+
95933+#ifdef CONFIG_PAX_SEGMEXEC
95934+ vma_m = pax_find_mirror_vma(vma);
95935+ if (vma_m) {
95936+ err = policy_vma(vma_m, new);
95937+ if (err)
95938+ break;
95939+ }
95940+#endif
95941+
95942 }
95943 return err;
95944 }
95945@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
95946
95947 if (end < start)
95948 return -EINVAL;
95949+
95950+#ifdef CONFIG_PAX_SEGMEXEC
95951+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
95952+ if (end > SEGMEXEC_TASK_SIZE)
95953+ return -EINVAL;
95954+ } else
95955+#endif
95956+
95957+ if (end > TASK_SIZE)
95958+ return -EINVAL;
95959+
95960 if (end == start)
95961 return 0;
95962
95963@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
95964 if (!mm)
95965 return -EINVAL;
95966
95967+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
95968+ if (mm != current->mm &&
95969+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
95970+ err = -EPERM;
95971+ goto out;
95972+ }
95973+#endif
95974+
95975 /*
95976 * Check if this process has the right to modify the specified
95977 * process. The right exists if the process has administrative
95978@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
95979 rcu_read_lock();
95980 tcred = __task_cred(task);
95981 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
95982- cred->uid != tcred->suid && cred->uid != tcred->uid &&
95983- !capable(CAP_SYS_NICE)) {
95984+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
95985 rcu_read_unlock();
95986 err = -EPERM;
95987 goto out;
95988@@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
95989 }
95990 #endif
95991
95992+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
95993+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
95994+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
95995+ _mm->pax_flags & MF_PAX_SEGMEXEC))
95996+#endif
95997+
95998 /*
95999 * Display pages allocated per node and memory policy via /proc.
96000 */
96001@@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
96002 int n;
96003 char buffer[50];
96004
96005+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96006+ if (current->exec_id != m->exec_id) {
96007+ gr_log_badprocpid("numa_maps");
96008+ return 0;
96009+ }
96010+#endif
96011+
96012 if (!mm)
96013 return 0;
96014
96015@@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
96016 mpol_to_str(buffer, sizeof(buffer), pol, 0);
96017 mpol_cond_put(pol);
96018
96019+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96020+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
96021+#else
96022 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
96023+#endif
96024
96025 if (file) {
96026 seq_printf(m, " file=");
96027- seq_path(m, &file->f_path, "\n\t= ");
96028+ seq_path(m, &file->f_path, "\n\t\\= ");
96029 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
96030 seq_printf(m, " heap");
96031 } else if (vma->vm_start <= mm->start_stack &&
96032diff --git a/mm/migrate.c b/mm/migrate.c
96033index aaca868..2ebecdc 100644
96034--- a/mm/migrate.c
96035+++ b/mm/migrate.c
96036@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
96037 unsigned long chunk_start;
96038 int err;
96039
96040+ pax_track_stack();
96041+
96042 task_nodes = cpuset_mems_allowed(task);
96043
96044 err = -ENOMEM;
96045@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96046 if (!mm)
96047 return -EINVAL;
96048
96049+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
96050+ if (mm != current->mm &&
96051+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
96052+ err = -EPERM;
96053+ goto out;
96054+ }
96055+#endif
96056+
96057 /*
96058 * Check if this process has the right to modify the specified
96059 * process. The right exists if the process has administrative
96060@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
96061 rcu_read_lock();
96062 tcred = __task_cred(task);
96063 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
96064- cred->uid != tcred->suid && cred->uid != tcred->uid &&
96065- !capable(CAP_SYS_NICE)) {
96066+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
96067 rcu_read_unlock();
96068 err = -EPERM;
96069 goto out;
96070diff --git a/mm/mlock.c b/mm/mlock.c
96071index 2d846cf..98134d2 100644
96072--- a/mm/mlock.c
96073+++ b/mm/mlock.c
96074@@ -13,6 +13,7 @@
96075 #include <linux/pagemap.h>
96076 #include <linux/mempolicy.h>
96077 #include <linux/syscalls.h>
96078+#include <linux/security.h>
96079 #include <linux/sched.h>
96080 #include <linux/module.h>
96081 #include <linux/rmap.h>
96082@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
96083 }
96084 }
96085
96086-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
96087-{
96088- return (vma->vm_flags & VM_GROWSDOWN) &&
96089- (vma->vm_start == addr) &&
96090- !vma_stack_continue(vma->vm_prev, addr);
96091-}
96092-
96093 /**
96094 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
96095 * @vma: target vma
96096@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
96097 if (vma->vm_flags & VM_WRITE)
96098 gup_flags |= FOLL_WRITE;
96099
96100- /* We don't try to access the guard page of a stack vma */
96101- if (stack_guard_page(vma, start)) {
96102- addr += PAGE_SIZE;
96103- nr_pages--;
96104- }
96105-
96106 while (nr_pages > 0) {
96107 int i;
96108
96109@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
96110 {
96111 unsigned long nstart, end, tmp;
96112 struct vm_area_struct * vma, * prev;
96113- int error;
96114+ int error = -EINVAL;
96115
96116 len = PAGE_ALIGN(len);
96117 end = start + len;
96118@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
96119 return -EINVAL;
96120 if (end == start)
96121 return 0;
96122+ if (end > TASK_SIZE)
96123+ return -EINVAL;
96124+
96125 vma = find_vma_prev(current->mm, start, &prev);
96126 if (!vma || vma->vm_start > start)
96127 return -ENOMEM;
96128@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
96129 for (nstart = start ; ; ) {
96130 unsigned int newflags;
96131
96132+#ifdef CONFIG_PAX_SEGMEXEC
96133+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96134+ break;
96135+#endif
96136+
96137 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
96138
96139 newflags = vma->vm_flags | VM_LOCKED;
96140@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
96141 lock_limit >>= PAGE_SHIFT;
96142
96143 /* check against resource limits */
96144+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
96145 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
96146 error = do_mlock(start, len, 1);
96147 up_write(&current->mm->mmap_sem);
96148@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
96149 static int do_mlockall(int flags)
96150 {
96151 struct vm_area_struct * vma, * prev = NULL;
96152- unsigned int def_flags = 0;
96153
96154 if (flags & MCL_FUTURE)
96155- def_flags = VM_LOCKED;
96156- current->mm->def_flags = def_flags;
96157+ current->mm->def_flags |= VM_LOCKED;
96158+ else
96159+ current->mm->def_flags &= ~VM_LOCKED;
96160 if (flags == MCL_FUTURE)
96161 goto out;
96162
96163 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
96164- unsigned int newflags;
96165+ unsigned long newflags;
96166
96167+#ifdef CONFIG_PAX_SEGMEXEC
96168+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
96169+ break;
96170+#endif
96171+
96172+ BUG_ON(vma->vm_end > TASK_SIZE);
96173 newflags = vma->vm_flags | VM_LOCKED;
96174 if (!(flags & MCL_CURRENT))
96175 newflags &= ~VM_LOCKED;
96176@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
96177 lock_limit >>= PAGE_SHIFT;
96178
96179 ret = -ENOMEM;
96180+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
96181 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
96182 capable(CAP_IPC_LOCK))
96183 ret = do_mlockall(flags);
96184diff --git a/mm/mmap.c b/mm/mmap.c
96185index 4b80cbf..12a7861 100644
96186--- a/mm/mmap.c
96187+++ b/mm/mmap.c
96188@@ -45,6 +45,16 @@
96189 #define arch_rebalance_pgtables(addr, len) (addr)
96190 #endif
96191
96192+static inline void verify_mm_writelocked(struct mm_struct *mm)
96193+{
96194+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
96195+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
96196+ up_read(&mm->mmap_sem);
96197+ BUG();
96198+ }
96199+#endif
96200+}
96201+
96202 static void unmap_region(struct mm_struct *mm,
96203 struct vm_area_struct *vma, struct vm_area_struct *prev,
96204 unsigned long start, unsigned long end);
96205@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
96206 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
96207 *
96208 */
96209-pgprot_t protection_map[16] = {
96210+pgprot_t protection_map[16] __read_only = {
96211 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
96212 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96213 };
96214
96215 pgprot_t vm_get_page_prot(unsigned long vm_flags)
96216 {
96217- return __pgprot(pgprot_val(protection_map[vm_flags &
96218+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
96219 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
96220 pgprot_val(arch_vm_get_page_prot(vm_flags)));
96221+
96222+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96223+ if (!nx_enabled &&
96224+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
96225+ (vm_flags & (VM_READ | VM_WRITE)))
96226+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
96227+#endif
96228+
96229+ return prot;
96230 }
96231 EXPORT_SYMBOL(vm_get_page_prot);
96232
96233 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
96234 int sysctl_overcommit_ratio = 50; /* default is 50% */
96235 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
96236+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
96237 struct percpu_counter vm_committed_as;
96238
96239 /*
96240@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
96241 struct vm_area_struct *next = vma->vm_next;
96242
96243 might_sleep();
96244+ BUG_ON(vma->vm_mirror);
96245 if (vma->vm_ops && vma->vm_ops->close)
96246 vma->vm_ops->close(vma);
96247 if (vma->vm_file) {
96248@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
96249 * not page aligned -Ram Gupta
96250 */
96251 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
96252+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
96253 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
96254 (mm->end_data - mm->start_data) > rlim)
96255 goto out;
96256@@ -704,6 +726,12 @@ static int
96257 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
96258 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96259 {
96260+
96261+#ifdef CONFIG_PAX_SEGMEXEC
96262+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
96263+ return 0;
96264+#endif
96265+
96266 if (is_mergeable_vma(vma, file, vm_flags) &&
96267 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
96268 if (vma->vm_pgoff == vm_pgoff)
96269@@ -723,6 +751,12 @@ static int
96270 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96271 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
96272 {
96273+
96274+#ifdef CONFIG_PAX_SEGMEXEC
96275+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
96276+ return 0;
96277+#endif
96278+
96279 if (is_mergeable_vma(vma, file, vm_flags) &&
96280 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
96281 pgoff_t vm_pglen;
96282@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
96283 struct vm_area_struct *vma_merge(struct mm_struct *mm,
96284 struct vm_area_struct *prev, unsigned long addr,
96285 unsigned long end, unsigned long vm_flags,
96286- struct anon_vma *anon_vma, struct file *file,
96287+ struct anon_vma *anon_vma, struct file *file,
96288 pgoff_t pgoff, struct mempolicy *policy)
96289 {
96290 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
96291 struct vm_area_struct *area, *next;
96292
96293+#ifdef CONFIG_PAX_SEGMEXEC
96294+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
96295+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
96296+
96297+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
96298+#endif
96299+
96300 /*
96301 * We later require that vma->vm_flags == vm_flags,
96302 * so this tests vma->vm_flags & VM_SPECIAL, too.
96303@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96304 if (next && next->vm_end == end) /* cases 6, 7, 8 */
96305 next = next->vm_next;
96306
96307+#ifdef CONFIG_PAX_SEGMEXEC
96308+ if (prev)
96309+ prev_m = pax_find_mirror_vma(prev);
96310+ if (area)
96311+ area_m = pax_find_mirror_vma(area);
96312+ if (next)
96313+ next_m = pax_find_mirror_vma(next);
96314+#endif
96315+
96316 /*
96317 * Can it merge with the predecessor?
96318 */
96319@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96320 /* cases 1, 6 */
96321 vma_adjust(prev, prev->vm_start,
96322 next->vm_end, prev->vm_pgoff, NULL);
96323- } else /* cases 2, 5, 7 */
96324+
96325+#ifdef CONFIG_PAX_SEGMEXEC
96326+ if (prev_m)
96327+ vma_adjust(prev_m, prev_m->vm_start,
96328+ next_m->vm_end, prev_m->vm_pgoff, NULL);
96329+#endif
96330+
96331+ } else { /* cases 2, 5, 7 */
96332 vma_adjust(prev, prev->vm_start,
96333 end, prev->vm_pgoff, NULL);
96334+
96335+#ifdef CONFIG_PAX_SEGMEXEC
96336+ if (prev_m)
96337+ vma_adjust(prev_m, prev_m->vm_start,
96338+ end_m, prev_m->vm_pgoff, NULL);
96339+#endif
96340+
96341+ }
96342 return prev;
96343 }
96344
96345@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
96346 mpol_equal(policy, vma_policy(next)) &&
96347 can_vma_merge_before(next, vm_flags,
96348 anon_vma, file, pgoff+pglen)) {
96349- if (prev && addr < prev->vm_end) /* case 4 */
96350+ if (prev && addr < prev->vm_end) { /* case 4 */
96351 vma_adjust(prev, prev->vm_start,
96352 addr, prev->vm_pgoff, NULL);
96353- else /* cases 3, 8 */
96354+
96355+#ifdef CONFIG_PAX_SEGMEXEC
96356+ if (prev_m)
96357+ vma_adjust(prev_m, prev_m->vm_start,
96358+ addr_m, prev_m->vm_pgoff, NULL);
96359+#endif
96360+
96361+ } else { /* cases 3, 8 */
96362 vma_adjust(area, addr, next->vm_end,
96363 next->vm_pgoff - pglen, NULL);
96364+
96365+#ifdef CONFIG_PAX_SEGMEXEC
96366+ if (area_m)
96367+ vma_adjust(area_m, addr_m, next_m->vm_end,
96368+ next_m->vm_pgoff - pglen, NULL);
96369+#endif
96370+
96371+ }
96372 return area;
96373 }
96374
96375@@ -898,14 +978,11 @@ none:
96376 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
96377 struct file *file, long pages)
96378 {
96379- const unsigned long stack_flags
96380- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
96381-
96382 if (file) {
96383 mm->shared_vm += pages;
96384 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
96385 mm->exec_vm += pages;
96386- } else if (flags & stack_flags)
96387+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
96388 mm->stack_vm += pages;
96389 if (flags & (VM_RESERVED|VM_IO))
96390 mm->reserved_vm += pages;
96391@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96392 * (the exception is when the underlying filesystem is noexec
96393 * mounted, in which case we dont add PROT_EXEC.)
96394 */
96395- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
96396+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
96397 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
96398 prot |= PROT_EXEC;
96399
96400@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96401 /* Obtain the address to map to. we verify (or select) it and ensure
96402 * that it represents a valid section of the address space.
96403 */
96404- addr = get_unmapped_area(file, addr, len, pgoff, flags);
96405+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
96406 if (addr & ~PAGE_MASK)
96407 return addr;
96408
96409@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96410 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
96411 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
96412
96413+#ifdef CONFIG_PAX_MPROTECT
96414+ if (mm->pax_flags & MF_PAX_MPROTECT) {
96415+#ifndef CONFIG_PAX_MPROTECT_COMPAT
96416+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
96417+ gr_log_rwxmmap(file);
96418+
96419+#ifdef CONFIG_PAX_EMUPLT
96420+ vm_flags &= ~VM_EXEC;
96421+#else
96422+ return -EPERM;
96423+#endif
96424+
96425+ }
96426+
96427+ if (!(vm_flags & VM_EXEC))
96428+ vm_flags &= ~VM_MAYEXEC;
96429+#else
96430+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
96431+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
96432+#endif
96433+ else
96434+ vm_flags &= ~VM_MAYWRITE;
96435+ }
96436+#endif
96437+
96438+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96439+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
96440+ vm_flags &= ~VM_PAGEEXEC;
96441+#endif
96442+
96443 if (flags & MAP_LOCKED)
96444 if (!can_do_mlock())
96445 return -EPERM;
96446@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96447 locked += mm->locked_vm;
96448 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
96449 lock_limit >>= PAGE_SHIFT;
96450+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
96451 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
96452 return -EAGAIN;
96453 }
96454@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
96455 if (error)
96456 return error;
96457
96458+ if (!gr_acl_handle_mmap(file, prot))
96459+ return -EACCES;
96460+
96461 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
96462 }
96463 EXPORT_SYMBOL(do_mmap_pgoff);
96464@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
96465 */
96466 int vma_wants_writenotify(struct vm_area_struct *vma)
96467 {
96468- unsigned int vm_flags = vma->vm_flags;
96469+ unsigned long vm_flags = vma->vm_flags;
96470
96471 /* If it was private or non-writable, the write bit is already clear */
96472- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
96473+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
96474 return 0;
96475
96476 /* The backer wishes to know when pages are first written to? */
96477@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
96478 unsigned long charged = 0;
96479 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
96480
96481+#ifdef CONFIG_PAX_SEGMEXEC
96482+ struct vm_area_struct *vma_m = NULL;
96483+#endif
96484+
96485+ /*
96486+ * mm->mmap_sem is required to protect against another thread
96487+ * changing the mappings in case we sleep.
96488+ */
96489+ verify_mm_writelocked(mm);
96490+
96491 /* Clear old maps */
96492 error = -ENOMEM;
96493-munmap_back:
96494 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
96495 if (vma && vma->vm_start < addr + len) {
96496 if (do_munmap(mm, addr, len))
96497 return -ENOMEM;
96498- goto munmap_back;
96499+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
96500+ BUG_ON(vma && vma->vm_start < addr + len);
96501 }
96502
96503 /* Check against address space limit. */
96504@@ -1173,6 +1294,16 @@ munmap_back:
96505 goto unacct_error;
96506 }
96507
96508+#ifdef CONFIG_PAX_SEGMEXEC
96509+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
96510+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
96511+ if (!vma_m) {
96512+ error = -ENOMEM;
96513+ goto free_vma;
96514+ }
96515+ }
96516+#endif
96517+
96518 vma->vm_mm = mm;
96519 vma->vm_start = addr;
96520 vma->vm_end = addr + len;
96521@@ -1180,8 +1311,9 @@ munmap_back:
96522 vma->vm_page_prot = vm_get_page_prot(vm_flags);
96523 vma->vm_pgoff = pgoff;
96524
96525+ error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
96526+
96527 if (file) {
96528- error = -EINVAL;
96529 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
96530 goto free_vma;
96531 if (vm_flags & VM_DENYWRITE) {
96532@@ -1195,6 +1327,19 @@ munmap_back:
96533 error = file->f_op->mmap(file, vma);
96534 if (error)
96535 goto unmap_and_free_vma;
96536+
96537+#ifdef CONFIG_PAX_SEGMEXEC
96538+ if (vma_m && (vm_flags & VM_EXECUTABLE))
96539+ added_exe_file_vma(mm);
96540+#endif
96541+
96542+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
96543+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
96544+ vma->vm_flags |= VM_PAGEEXEC;
96545+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
96546+ }
96547+#endif
96548+
96549 if (vm_flags & VM_EXECUTABLE)
96550 added_exe_file_vma(mm);
96551
96552@@ -1207,6 +1352,8 @@ munmap_back:
96553 pgoff = vma->vm_pgoff;
96554 vm_flags = vma->vm_flags;
96555 } else if (vm_flags & VM_SHARED) {
96556+ if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
96557+ goto free_vma;
96558 error = shmem_zero_setup(vma);
96559 if (error)
96560 goto free_vma;
96561@@ -1218,6 +1365,11 @@ munmap_back:
96562 vma_link(mm, vma, prev, rb_link, rb_parent);
96563 file = vma->vm_file;
96564
96565+#ifdef CONFIG_PAX_SEGMEXEC
96566+ if (vma_m)
96567+ pax_mirror_vma(vma_m, vma);
96568+#endif
96569+
96570 /* Once vma denies write, undo our temporary denial count */
96571 if (correct_wcount)
96572 atomic_inc(&inode->i_writecount);
96573@@ -1226,6 +1378,7 @@ out:
96574
96575 mm->total_vm += len >> PAGE_SHIFT;
96576 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
96577+ track_exec_limit(mm, addr, addr + len, vm_flags);
96578 if (vm_flags & VM_LOCKED) {
96579 /*
96580 * makes pages present; downgrades, drops, reacquires mmap_sem
96581@@ -1248,6 +1401,12 @@ unmap_and_free_vma:
96582 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
96583 charged = 0;
96584 free_vma:
96585+
96586+#ifdef CONFIG_PAX_SEGMEXEC
96587+ if (vma_m)
96588+ kmem_cache_free(vm_area_cachep, vma_m);
96589+#endif
96590+
96591 kmem_cache_free(vm_area_cachep, vma);
96592 unacct_error:
96593 if (charged)
96594@@ -1255,6 +1414,44 @@ unacct_error:
96595 return error;
96596 }
96597
96598+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
96599+{
96600+ if (!vma) {
96601+#ifdef CONFIG_STACK_GROWSUP
96602+ if (addr > sysctl_heap_stack_gap)
96603+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
96604+ else
96605+ vma = find_vma(current->mm, 0);
96606+ if (vma && (vma->vm_flags & VM_GROWSUP))
96607+ return false;
96608+#endif
96609+ return true;
96610+ }
96611+
96612+ if (addr + len > vma->vm_start)
96613+ return false;
96614+
96615+ if (vma->vm_flags & VM_GROWSDOWN)
96616+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
96617+#ifdef CONFIG_STACK_GROWSUP
96618+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
96619+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
96620+#endif
96621+
96622+ return true;
96623+}
96624+
96625+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
96626+{
96627+ if (vma->vm_start < len)
96628+ return -ENOMEM;
96629+ if (!(vma->vm_flags & VM_GROWSDOWN))
96630+ return vma->vm_start - len;
96631+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
96632+ return vma->vm_start - len - sysctl_heap_stack_gap;
96633+ return -ENOMEM;
96634+}
96635+
96636 /* Get an address range which is currently unmapped.
96637 * For shmat() with addr=0.
96638 *
96639@@ -1281,18 +1478,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
96640 if (flags & MAP_FIXED)
96641 return addr;
96642
96643+#ifdef CONFIG_PAX_RANDMMAP
96644+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
96645+#endif
96646+
96647 if (addr) {
96648 addr = PAGE_ALIGN(addr);
96649- vma = find_vma(mm, addr);
96650- if (TASK_SIZE - len >= addr &&
96651- (!vma || addr + len <= vma->vm_start))
96652- return addr;
96653+ if (TASK_SIZE - len >= addr) {
96654+ vma = find_vma(mm, addr);
96655+ if (check_heap_stack_gap(vma, addr, len))
96656+ return addr;
96657+ }
96658 }
96659 if (len > mm->cached_hole_size) {
96660- start_addr = addr = mm->free_area_cache;
96661+ start_addr = addr = mm->free_area_cache;
96662 } else {
96663- start_addr = addr = TASK_UNMAPPED_BASE;
96664- mm->cached_hole_size = 0;
96665+ start_addr = addr = mm->mmap_base;
96666+ mm->cached_hole_size = 0;
96667 }
96668
96669 full_search:
96670@@ -1303,34 +1505,40 @@ full_search:
96671 * Start a new search - just in case we missed
96672 * some holes.
96673 */
96674- if (start_addr != TASK_UNMAPPED_BASE) {
96675- addr = TASK_UNMAPPED_BASE;
96676- start_addr = addr;
96677+ if (start_addr != mm->mmap_base) {
96678+ start_addr = addr = mm->mmap_base;
96679 mm->cached_hole_size = 0;
96680 goto full_search;
96681 }
96682 return -ENOMEM;
96683 }
96684- if (!vma || addr + len <= vma->vm_start) {
96685- /*
96686- * Remember the place where we stopped the search:
96687- */
96688- mm->free_area_cache = addr + len;
96689- return addr;
96690- }
96691+ if (check_heap_stack_gap(vma, addr, len))
96692+ break;
96693 if (addr + mm->cached_hole_size < vma->vm_start)
96694 mm->cached_hole_size = vma->vm_start - addr;
96695 addr = vma->vm_end;
96696 }
96697+
96698+ /*
96699+ * Remember the place where we stopped the search:
96700+ */
96701+ mm->free_area_cache = addr + len;
96702+ return addr;
96703 }
96704 #endif
96705
96706 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
96707 {
96708+
96709+#ifdef CONFIG_PAX_SEGMEXEC
96710+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
96711+ return;
96712+#endif
96713+
96714 /*
96715 * Is this a new hole at the lowest possible address?
96716 */
96717- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
96718+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
96719 mm->free_area_cache = addr;
96720 mm->cached_hole_size = ~0UL;
96721 }
96722@@ -1348,7 +1556,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
96723 {
96724 struct vm_area_struct *vma;
96725 struct mm_struct *mm = current->mm;
96726- unsigned long addr = addr0;
96727+ unsigned long base = mm->mmap_base, addr = addr0;
96728
96729 /* requested length too big for entire address space */
96730 if (len > TASK_SIZE)
96731@@ -1357,13 +1565,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
96732 if (flags & MAP_FIXED)
96733 return addr;
96734
96735+#ifdef CONFIG_PAX_RANDMMAP
96736+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
96737+#endif
96738+
96739 /* requesting a specific address */
96740 if (addr) {
96741 addr = PAGE_ALIGN(addr);
96742- vma = find_vma(mm, addr);
96743- if (TASK_SIZE - len >= addr &&
96744- (!vma || addr + len <= vma->vm_start))
96745- return addr;
96746+ if (TASK_SIZE - len >= addr) {
96747+ vma = find_vma(mm, addr);
96748+ if (check_heap_stack_gap(vma, addr, len))
96749+ return addr;
96750+ }
96751 }
96752
96753 /* check if free_area_cache is useful for us */
96754@@ -1378,7 +1591,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
96755 /* make sure it can fit in the remaining address space */
96756 if (addr > len) {
96757 vma = find_vma(mm, addr-len);
96758- if (!vma || addr <= vma->vm_start)
96759+ if (check_heap_stack_gap(vma, addr - len, len))
96760 /* remember the address as a hint for next time */
96761 return (mm->free_area_cache = addr-len);
96762 }
96763@@ -1395,7 +1608,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
96764 * return with success:
96765 */
96766 vma = find_vma(mm, addr);
96767- if (!vma || addr+len <= vma->vm_start)
96768+ if (check_heap_stack_gap(vma, addr, len))
96769 /* remember the address as a hint for next time */
96770 return (mm->free_area_cache = addr);
96771
96772@@ -1404,8 +1617,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
96773 mm->cached_hole_size = vma->vm_start - addr;
96774
96775 /* try just below the current vma->vm_start */
96776- addr = vma->vm_start-len;
96777- } while (len < vma->vm_start);
96778+ addr = skip_heap_stack_gap(vma, len);
96779+ } while (!IS_ERR_VALUE(addr));
96780
96781 bottomup:
96782 /*
96783@@ -1414,13 +1627,21 @@ bottomup:
96784 * can happen with large stack limits and large mmap()
96785 * allocations.
96786 */
96787+ mm->mmap_base = TASK_UNMAPPED_BASE;
96788+
96789+#ifdef CONFIG_PAX_RANDMMAP
96790+ if (mm->pax_flags & MF_PAX_RANDMMAP)
96791+ mm->mmap_base += mm->delta_mmap;
96792+#endif
96793+
96794+ mm->free_area_cache = mm->mmap_base;
96795 mm->cached_hole_size = ~0UL;
96796- mm->free_area_cache = TASK_UNMAPPED_BASE;
96797 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
96798 /*
96799 * Restore the topdown base:
96800 */
96801- mm->free_area_cache = mm->mmap_base;
96802+ mm->mmap_base = base;
96803+ mm->free_area_cache = base;
96804 mm->cached_hole_size = ~0UL;
96805
96806 return addr;
96807@@ -1429,6 +1650,12 @@ bottomup:
96808
96809 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
96810 {
96811+
96812+#ifdef CONFIG_PAX_SEGMEXEC
96813+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
96814+ return;
96815+#endif
96816+
96817 /*
96818 * Is this a new hole at the highest possible address?
96819 */
96820@@ -1436,8 +1663,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
96821 mm->free_area_cache = addr;
96822
96823 /* dont allow allocations above current base */
96824- if (mm->free_area_cache > mm->mmap_base)
96825+ if (mm->free_area_cache > mm->mmap_base) {
96826 mm->free_area_cache = mm->mmap_base;
96827+ mm->cached_hole_size = ~0UL;
96828+ }
96829 }
96830
96831 unsigned long
96832@@ -1510,40 +1739,49 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
96833
96834 EXPORT_SYMBOL(find_vma);
96835
96836-/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
96837+/*
96838+ * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
96839+ */
96840 struct vm_area_struct *
96841 find_vma_prev(struct mm_struct *mm, unsigned long addr,
96842 struct vm_area_struct **pprev)
96843 {
96844- struct vm_area_struct *vma = NULL, *prev = NULL;
96845- struct rb_node *rb_node;
96846- if (!mm)
96847- goto out;
96848-
96849- /* Guard against addr being lower than the first VMA */
96850- vma = mm->mmap;
96851-
96852- /* Go through the RB tree quickly. */
96853- rb_node = mm->mm_rb.rb_node;
96854-
96855- while (rb_node) {
96856- struct vm_area_struct *vma_tmp;
96857- vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
96858-
96859- if (addr < vma_tmp->vm_end) {
96860- rb_node = rb_node->rb_left;
96861- } else {
96862- prev = vma_tmp;
96863- if (!prev->vm_next || (addr < prev->vm_next->vm_end))
96864- break;
96865+ struct vm_area_struct *vma;
96866+
96867+ vma = find_vma(mm, addr);
96868+ if (vma) {
96869+ *pprev = vma->vm_prev;
96870+ } else {
96871+ struct rb_node *rb_node = mm->mm_rb.rb_node;
96872+ *pprev = NULL;
96873+ while (rb_node) {
96874+ *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
96875 rb_node = rb_node->rb_right;
96876 }
96877 }
96878+ return vma;
96879+}
96880+
96881+#ifdef CONFIG_PAX_SEGMEXEC
96882+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
96883+{
96884+ struct vm_area_struct *vma_m;
96885
96886-out:
96887- *pprev = prev;
96888- return prev ? prev->vm_next : vma;
96889+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
96890+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
96891+ BUG_ON(vma->vm_mirror);
96892+ return NULL;
96893+ }
96894+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
96895+ vma_m = vma->vm_mirror;
96896+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
96897+ BUG_ON(vma->vm_file != vma_m->vm_file);
96898+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
96899+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
96900+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
96901+ return vma_m;
96902 }
96903+#endif
96904
96905 /*
96906 * Verify that the stack growth is acceptable and
96907@@ -1561,6 +1799,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
96908 return -ENOMEM;
96909
96910 /* Stack limit test */
96911+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
96912 if (size > rlim[RLIMIT_STACK].rlim_cur)
96913 return -ENOMEM;
96914
96915@@ -1570,6 +1809,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
96916 unsigned long limit;
96917 locked = mm->locked_vm + grow;
96918 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
96919+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
96920 if (locked > limit && !capable(CAP_IPC_LOCK))
96921 return -ENOMEM;
96922 }
96923@@ -1600,37 +1840,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
96924 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
96925 * vma is the last one with address > vma->vm_end. Have to extend vma.
96926 */
96927+#ifndef CONFIG_IA64
96928+static
96929+#endif
96930 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
96931 {
96932 int error;
96933+ bool locknext;
96934
96935 if (!(vma->vm_flags & VM_GROWSUP))
96936 return -EFAULT;
96937
96938+ /* Also guard against wrapping around to address 0. */
96939+ if (address < PAGE_ALIGN(address+1))
96940+ address = PAGE_ALIGN(address+1);
96941+ else
96942+ return -ENOMEM;
96943+
96944 /*
96945 * We must make sure the anon_vma is allocated
96946 * so that the anon_vma locking is not a noop.
96947 */
96948 if (unlikely(anon_vma_prepare(vma)))
96949 return -ENOMEM;
96950+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
96951+ if (locknext && anon_vma_prepare(vma->vm_next))
96952+ return -ENOMEM;
96953 anon_vma_lock(vma);
96954+ if (locknext)
96955+ anon_vma_lock(vma->vm_next);
96956
96957 /*
96958 * vma->vm_start/vm_end cannot change under us because the caller
96959 * is required to hold the mmap_sem in read mode. We need the
96960- * anon_vma lock to serialize against concurrent expand_stacks.
96961- * Also guard against wrapping around to address 0.
96962+ * anon_vma locks to serialize against concurrent expand_stacks
96963+ * and expand_upwards.
96964 */
96965- if (address < PAGE_ALIGN(address+4))
96966- address = PAGE_ALIGN(address+4);
96967- else {
96968- anon_vma_unlock(vma);
96969- return -ENOMEM;
96970- }
96971 error = 0;
96972
96973 /* Somebody else might have raced and expanded it already */
96974- if (address > vma->vm_end) {
96975+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
96976+ error = -ENOMEM;
96977+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
96978 unsigned long size, grow;
96979
96980 size = address - vma->vm_start;
96981@@ -1643,6 +1894,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
96982 vma->vm_end = address;
96983 }
96984 }
96985+ if (locknext)
96986+ anon_vma_unlock(vma->vm_next);
96987 anon_vma_unlock(vma);
96988 return error;
96989 }
96990@@ -1655,6 +1908,8 @@ static int expand_downwards(struct vm_area_struct *vma,
96991 unsigned long address)
96992 {
96993 int error;
96994+ bool lockprev = false;
96995+ struct vm_area_struct *prev;
96996
96997 /*
96998 * We must make sure the anon_vma is allocated
96999@@ -1668,6 +1923,15 @@ static int expand_downwards(struct vm_area_struct *vma,
97000 if (error)
97001 return error;
97002
97003+ prev = vma->vm_prev;
97004+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
97005+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
97006+#endif
97007+ if (lockprev && anon_vma_prepare(prev))
97008+ return -ENOMEM;
97009+ if (lockprev)
97010+ anon_vma_lock(prev);
97011+
97012 anon_vma_lock(vma);
97013
97014 /*
97015@@ -1677,9 +1941,17 @@ static int expand_downwards(struct vm_area_struct *vma,
97016 */
97017
97018 /* Somebody else might have raced and expanded it already */
97019- if (address < vma->vm_start) {
97020+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
97021+ error = -ENOMEM;
97022+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
97023 unsigned long size, grow;
97024
97025+#ifdef CONFIG_PAX_SEGMEXEC
97026+ struct vm_area_struct *vma_m;
97027+
97028+ vma_m = pax_find_mirror_vma(vma);
97029+#endif
97030+
97031 size = vma->vm_end - address;
97032 grow = (vma->vm_start - address) >> PAGE_SHIFT;
97033
97034@@ -1689,10 +1961,22 @@ static int expand_downwards(struct vm_area_struct *vma,
97035 if (!error) {
97036 vma->vm_start = address;
97037 vma->vm_pgoff -= grow;
97038+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
97039+
97040+#ifdef CONFIG_PAX_SEGMEXEC
97041+ if (vma_m) {
97042+ vma_m->vm_start -= grow << PAGE_SHIFT;
97043+ vma_m->vm_pgoff -= grow;
97044+ }
97045+#endif
97046+
97047+
97048 }
97049 }
97050 }
97051 anon_vma_unlock(vma);
97052+ if (lockprev)
97053+ anon_vma_unlock(prev);
97054 return error;
97055 }
97056
97057@@ -1768,6 +2052,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
97058 do {
97059 long nrpages = vma_pages(vma);
97060
97061+#ifdef CONFIG_PAX_SEGMEXEC
97062+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
97063+ vma = remove_vma(vma);
97064+ continue;
97065+ }
97066+#endif
97067+
97068 mm->total_vm -= nrpages;
97069 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
97070 vma = remove_vma(vma);
97071@@ -1813,6 +2104,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
97072 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
97073 vma->vm_prev = NULL;
97074 do {
97075+
97076+#ifdef CONFIG_PAX_SEGMEXEC
97077+ if (vma->vm_mirror) {
97078+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
97079+ vma->vm_mirror->vm_mirror = NULL;
97080+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
97081+ vma->vm_mirror = NULL;
97082+ }
97083+#endif
97084+
97085 rb_erase(&vma->vm_rb, &mm->mm_rb);
97086 mm->map_count--;
97087 tail_vma = vma;
97088@@ -1840,10 +2141,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97089 struct mempolicy *pol;
97090 struct vm_area_struct *new;
97091
97092+#ifdef CONFIG_PAX_SEGMEXEC
97093+ struct vm_area_struct *vma_m, *new_m = NULL;
97094+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
97095+#endif
97096+
97097 if (is_vm_hugetlb_page(vma) && (addr &
97098 ~(huge_page_mask(hstate_vma(vma)))))
97099 return -EINVAL;
97100
97101+#ifdef CONFIG_PAX_SEGMEXEC
97102+ vma_m = pax_find_mirror_vma(vma);
97103+
97104+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
97105+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
97106+ if (mm->map_count >= sysctl_max_map_count-1)
97107+ return -ENOMEM;
97108+ } else
97109+#endif
97110+
97111 if (mm->map_count >= sysctl_max_map_count)
97112 return -ENOMEM;
97113
97114@@ -1851,6 +2167,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97115 if (!new)
97116 return -ENOMEM;
97117
97118+#ifdef CONFIG_PAX_SEGMEXEC
97119+ if (vma_m) {
97120+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
97121+ if (!new_m) {
97122+ kmem_cache_free(vm_area_cachep, new);
97123+ return -ENOMEM;
97124+ }
97125+ }
97126+#endif
97127+
97128 /* most fields are the same, copy all, and then fixup */
97129 *new = *vma;
97130
97131@@ -1861,8 +2187,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97132 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
97133 }
97134
97135+#ifdef CONFIG_PAX_SEGMEXEC
97136+ if (vma_m) {
97137+ *new_m = *vma_m;
97138+ new_m->vm_mirror = new;
97139+ new->vm_mirror = new_m;
97140+
97141+ if (new_below)
97142+ new_m->vm_end = addr_m;
97143+ else {
97144+ new_m->vm_start = addr_m;
97145+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
97146+ }
97147+ }
97148+#endif
97149+
97150 pol = mpol_dup(vma_policy(vma));
97151 if (IS_ERR(pol)) {
97152+
97153+#ifdef CONFIG_PAX_SEGMEXEC
97154+ if (new_m)
97155+ kmem_cache_free(vm_area_cachep, new_m);
97156+#endif
97157+
97158 kmem_cache_free(vm_area_cachep, new);
97159 return PTR_ERR(pol);
97160 }
97161@@ -1883,6 +2230,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97162 else
97163 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
97164
97165+#ifdef CONFIG_PAX_SEGMEXEC
97166+ if (vma_m) {
97167+ mpol_get(pol);
97168+ vma_set_policy(new_m, pol);
97169+
97170+ if (new_m->vm_file) {
97171+ get_file(new_m->vm_file);
97172+ if (vma_m->vm_flags & VM_EXECUTABLE)
97173+ added_exe_file_vma(mm);
97174+ }
97175+
97176+ if (new_m->vm_ops && new_m->vm_ops->open)
97177+ new_m->vm_ops->open(new_m);
97178+
97179+ if (new_below)
97180+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
97181+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
97182+ else
97183+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
97184+ }
97185+#endif
97186+
97187 return 0;
97188 }
97189
97190@@ -1891,11 +2260,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
97191 * work. This now handles partial unmappings.
97192 * Jeremy Fitzhardinge <jeremy@goop.org>
97193 */
97194+#ifdef CONFIG_PAX_SEGMEXEC
97195 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97196 {
97197+ int ret = __do_munmap(mm, start, len);
97198+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
97199+ return ret;
97200+
97201+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
97202+}
97203+
97204+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97205+#else
97206+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97207+#endif
97208+{
97209 unsigned long end;
97210 struct vm_area_struct *vma, *prev, *last;
97211
97212+ /*
97213+ * mm->mmap_sem is required to protect against another thread
97214+ * changing the mappings in case we sleep.
97215+ */
97216+ verify_mm_writelocked(mm);
97217+
97218 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
97219 return -EINVAL;
97220
97221@@ -1959,6 +2347,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
97222 /* Fix up all other VM information */
97223 remove_vma_list(mm, vma);
97224
97225+ track_exec_limit(mm, start, end, 0UL);
97226+
97227 return 0;
97228 }
97229
97230@@ -1971,22 +2361,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
97231
97232 profile_munmap(addr);
97233
97234+#ifdef CONFIG_PAX_SEGMEXEC
97235+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
97236+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
97237+ return -EINVAL;
97238+#endif
97239+
97240 down_write(&mm->mmap_sem);
97241 ret = do_munmap(mm, addr, len);
97242 up_write(&mm->mmap_sem);
97243 return ret;
97244 }
97245
97246-static inline void verify_mm_writelocked(struct mm_struct *mm)
97247-{
97248-#ifdef CONFIG_DEBUG_VM
97249- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
97250- WARN_ON(1);
97251- up_read(&mm->mmap_sem);
97252- }
97253-#endif
97254-}
97255-
97256 /*
97257 * this is really a simplified "do_mmap". it only handles
97258 * anonymous maps. eventually we may be able to do some
97259@@ -2000,6 +2386,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97260 struct rb_node ** rb_link, * rb_parent;
97261 pgoff_t pgoff = addr >> PAGE_SHIFT;
97262 int error;
97263+ unsigned long charged;
97264
97265 len = PAGE_ALIGN(len);
97266 if (!len)
97267@@ -2011,16 +2398,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97268
97269 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
97270
97271+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
97272+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
97273+ flags &= ~VM_EXEC;
97274+
97275+#ifdef CONFIG_PAX_MPROTECT
97276+ if (mm->pax_flags & MF_PAX_MPROTECT)
97277+ flags &= ~VM_MAYEXEC;
97278+#endif
97279+
97280+ }
97281+#endif
97282+
97283 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
97284 if (error & ~PAGE_MASK)
97285 return error;
97286
97287+ charged = len >> PAGE_SHIFT;
97288+
97289 /*
97290 * mlock MCL_FUTURE?
97291 */
97292 if (mm->def_flags & VM_LOCKED) {
97293 unsigned long locked, lock_limit;
97294- locked = len >> PAGE_SHIFT;
97295+ locked = charged;
97296 locked += mm->locked_vm;
97297 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
97298 lock_limit >>= PAGE_SHIFT;
97299@@ -2037,22 +2438,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97300 /*
97301 * Clear old maps. this also does some error checking for us
97302 */
97303- munmap_back:
97304 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97305 if (vma && vma->vm_start < addr + len) {
97306 if (do_munmap(mm, addr, len))
97307 return -ENOMEM;
97308- goto munmap_back;
97309+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
97310+ BUG_ON(vma && vma->vm_start < addr + len);
97311 }
97312
97313 /* Check against address space limits *after* clearing old maps... */
97314- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
97315+ if (!may_expand_vm(mm, charged))
97316 return -ENOMEM;
97317
97318 if (mm->map_count > sysctl_max_map_count)
97319 return -ENOMEM;
97320
97321- if (security_vm_enough_memory(len >> PAGE_SHIFT))
97322+ if (security_vm_enough_memory(charged))
97323 return -ENOMEM;
97324
97325 /* Can we just expand an old private anonymous mapping? */
97326@@ -2066,7 +2467,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97327 */
97328 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97329 if (!vma) {
97330- vm_unacct_memory(len >> PAGE_SHIFT);
97331+ vm_unacct_memory(charged);
97332 return -ENOMEM;
97333 }
97334
97335@@ -2078,11 +2479,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
97336 vma->vm_page_prot = vm_get_page_prot(flags);
97337 vma_link(mm, vma, prev, rb_link, rb_parent);
97338 out:
97339- mm->total_vm += len >> PAGE_SHIFT;
97340+ mm->total_vm += charged;
97341 if (flags & VM_LOCKED) {
97342 if (!mlock_vma_pages_range(vma, addr, addr + len))
97343- mm->locked_vm += (len >> PAGE_SHIFT);
97344+ mm->locked_vm += charged;
97345 }
97346+ track_exec_limit(mm, addr, addr + len, flags);
97347 return addr;
97348 }
97349
97350@@ -2129,8 +2531,10 @@ void exit_mmap(struct mm_struct *mm)
97351 * Walk the list again, actually closing and freeing it,
97352 * with preemption enabled, without holding any MM locks.
97353 */
97354- while (vma)
97355+ while (vma) {
97356+ vma->vm_mirror = NULL;
97357 vma = remove_vma(vma);
97358+ }
97359
97360 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
97361 }
97362@@ -2144,6 +2548,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
97363 struct vm_area_struct * __vma, * prev;
97364 struct rb_node ** rb_link, * rb_parent;
97365
97366+#ifdef CONFIG_PAX_SEGMEXEC
97367+ struct vm_area_struct *vma_m = NULL;
97368+#endif
97369+
97370 /*
97371 * The vm_pgoff of a purely anonymous vma should be irrelevant
97372 * until its first write fault, when page's anon_vma and index
97373@@ -2166,7 +2574,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
97374 if ((vma->vm_flags & VM_ACCOUNT) &&
97375 security_vm_enough_memory_mm(mm, vma_pages(vma)))
97376 return -ENOMEM;
97377+
97378+#ifdef CONFIG_PAX_SEGMEXEC
97379+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
97380+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97381+ if (!vma_m)
97382+ return -ENOMEM;
97383+ }
97384+#endif
97385+
97386 vma_link(mm, vma, prev, rb_link, rb_parent);
97387+
97388+#ifdef CONFIG_PAX_SEGMEXEC
97389+ if (vma_m)
97390+ pax_mirror_vma(vma_m, vma);
97391+#endif
97392+
97393 return 0;
97394 }
97395
97396@@ -2184,6 +2607,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
97397 struct rb_node **rb_link, *rb_parent;
97398 struct mempolicy *pol;
97399
97400+ BUG_ON(vma->vm_mirror);
97401+
97402 /*
97403 * If anonymous vma has not yet been faulted, update new pgoff
97404 * to match new location, to increase its chance of merging.
97405@@ -2227,6 +2652,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
97406 return new_vma;
97407 }
97408
97409+#ifdef CONFIG_PAX_SEGMEXEC
97410+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
97411+{
97412+ struct vm_area_struct *prev_m;
97413+ struct rb_node **rb_link_m, *rb_parent_m;
97414+ struct mempolicy *pol_m;
97415+
97416+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
97417+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
97418+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
97419+ *vma_m = *vma;
97420+ pol_m = vma_policy(vma_m);
97421+ mpol_get(pol_m);
97422+ vma_set_policy(vma_m, pol_m);
97423+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
97424+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
97425+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
97426+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
97427+ if (vma_m->vm_file)
97428+ get_file(vma_m->vm_file);
97429+ if (vma_m->vm_ops && vma_m->vm_ops->open)
97430+ vma_m->vm_ops->open(vma_m);
97431+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
97432+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
97433+ vma_m->vm_mirror = vma;
97434+ vma->vm_mirror = vma_m;
97435+}
97436+#endif
97437+
97438 /*
97439 * Return true if the calling process may expand its vm space by the passed
97440 * number of pages
97441@@ -2237,7 +2691,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
97442 unsigned long lim;
97443
97444 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
97445-
97446+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
97447 if (cur + npages > lim)
97448 return 0;
97449 return 1;
97450@@ -2307,6 +2761,22 @@ int install_special_mapping(struct mm_struct *mm,
97451 vma->vm_start = addr;
97452 vma->vm_end = addr + len;
97453
97454+#ifdef CONFIG_PAX_MPROTECT
97455+ if (mm->pax_flags & MF_PAX_MPROTECT) {
97456+#ifndef CONFIG_PAX_MPROTECT_COMPAT
97457+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
97458+ return -EPERM;
97459+ if (!(vm_flags & VM_EXEC))
97460+ vm_flags &= ~VM_MAYEXEC;
97461+#else
97462+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
97463+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
97464+#endif
97465+ else
97466+ vm_flags &= ~VM_MAYWRITE;
97467+ }
97468+#endif
97469+
97470 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
97471 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
97472
97473diff --git a/mm/mprotect.c b/mm/mprotect.c
97474index 1737c7e..c7faeb4 100644
97475--- a/mm/mprotect.c
97476+++ b/mm/mprotect.c
97477@@ -24,10 +24,16 @@
97478 #include <linux/mmu_notifier.h>
97479 #include <linux/migrate.h>
97480 #include <linux/perf_event.h>
97481+
97482+#ifdef CONFIG_PAX_MPROTECT
97483+#include <linux/elf.h>
97484+#endif
97485+
97486 #include <asm/uaccess.h>
97487 #include <asm/pgtable.h>
97488 #include <asm/cacheflush.h>
97489 #include <asm/tlbflush.h>
97490+#include <asm/mmu_context.h>
97491
97492 #ifndef pgprot_modify
97493 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
97494@@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
97495 flush_tlb_range(vma, start, end);
97496 }
97497
97498+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
97499+/* called while holding the mmap semaphor for writing except stack expansion */
97500+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
97501+{
97502+ unsigned long oldlimit, newlimit = 0UL;
97503+
97504+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
97505+ return;
97506+
97507+ spin_lock(&mm->page_table_lock);
97508+ oldlimit = mm->context.user_cs_limit;
97509+ if ((prot & VM_EXEC) && oldlimit < end)
97510+ /* USER_CS limit moved up */
97511+ newlimit = end;
97512+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
97513+ /* USER_CS limit moved down */
97514+ newlimit = start;
97515+
97516+ if (newlimit) {
97517+ mm->context.user_cs_limit = newlimit;
97518+
97519+#ifdef CONFIG_SMP
97520+ wmb();
97521+ cpus_clear(mm->context.cpu_user_cs_mask);
97522+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
97523+#endif
97524+
97525+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
97526+ }
97527+ spin_unlock(&mm->page_table_lock);
97528+ if (newlimit == end) {
97529+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
97530+
97531+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
97532+ if (is_vm_hugetlb_page(vma))
97533+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
97534+ else
97535+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
97536+ }
97537+}
97538+#endif
97539+
97540 int
97541 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
97542 unsigned long start, unsigned long end, unsigned long newflags)
97543@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
97544 int error;
97545 int dirty_accountable = 0;
97546
97547+#ifdef CONFIG_PAX_SEGMEXEC
97548+ struct vm_area_struct *vma_m = NULL;
97549+ unsigned long start_m, end_m;
97550+
97551+ start_m = start + SEGMEXEC_TASK_SIZE;
97552+ end_m = end + SEGMEXEC_TASK_SIZE;
97553+#endif
97554+
97555 if (newflags == oldflags) {
97556 *pprev = vma;
97557 return 0;
97558 }
97559
97560+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
97561+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
97562+
97563+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
97564+ return -ENOMEM;
97565+
97566+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
97567+ return -ENOMEM;
97568+ }
97569+
97570 /*
97571 * If we make a private mapping writable we increase our commit;
97572 * but (without finer accounting) cannot reduce our commit if we
97573@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
97574 }
97575 }
97576
97577+#ifdef CONFIG_PAX_SEGMEXEC
97578+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
97579+ if (start != vma->vm_start) {
97580+ error = split_vma(mm, vma, start, 1);
97581+ if (error)
97582+ goto fail;
97583+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
97584+ *pprev = (*pprev)->vm_next;
97585+ }
97586+
97587+ if (end != vma->vm_end) {
97588+ error = split_vma(mm, vma, end, 0);
97589+ if (error)
97590+ goto fail;
97591+ }
97592+
97593+ if (pax_find_mirror_vma(vma)) {
97594+ error = __do_munmap(mm, start_m, end_m - start_m);
97595+ if (error)
97596+ goto fail;
97597+ } else {
97598+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
97599+ if (!vma_m) {
97600+ error = -ENOMEM;
97601+ goto fail;
97602+ }
97603+ vma->vm_flags = newflags;
97604+ pax_mirror_vma(vma_m, vma);
97605+ }
97606+ }
97607+#endif
97608+
97609 /*
97610 * First try to merge with previous and/or next vma.
97611 */
97612@@ -195,9 +293,21 @@ success:
97613 * vm_flags and vm_page_prot are protected by the mmap_sem
97614 * held in write mode.
97615 */
97616+
97617+#ifdef CONFIG_PAX_SEGMEXEC
97618+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
97619+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
97620+#endif
97621+
97622 vma->vm_flags = newflags;
97623+
97624+#ifdef CONFIG_PAX_MPROTECT
97625+ if (mm->binfmt && mm->binfmt->handle_mprotect)
97626+ mm->binfmt->handle_mprotect(vma, newflags);
97627+#endif
97628+
97629 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
97630- vm_get_page_prot(newflags));
97631+ vm_get_page_prot(vma->vm_flags));
97632
97633 if (vma_wants_writenotify(vma)) {
97634 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
97635@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
97636 end = start + len;
97637 if (end <= start)
97638 return -ENOMEM;
97639+
97640+#ifdef CONFIG_PAX_SEGMEXEC
97641+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
97642+ if (end > SEGMEXEC_TASK_SIZE)
97643+ return -EINVAL;
97644+ } else
97645+#endif
97646+
97647+ if (end > TASK_SIZE)
97648+ return -EINVAL;
97649+
97650 if (!arch_validate_prot(prot))
97651 return -EINVAL;
97652
97653@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
97654 /*
97655 * Does the application expect PROT_READ to imply PROT_EXEC:
97656 */
97657- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
97658+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
97659 prot |= PROT_EXEC;
97660
97661 vm_flags = calc_vm_prot_bits(prot);
97662@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
97663 if (start > vma->vm_start)
97664 prev = vma;
97665
97666+#ifdef CONFIG_PAX_MPROTECT
97667+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
97668+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
97669+#endif
97670+
97671 for (nstart = start ; ; ) {
97672 unsigned long newflags;
97673
97674@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
97675
97676 /* newflags >> 4 shift VM_MAY% in place of VM_% */
97677 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
97678+ if (prot & (PROT_WRITE | PROT_EXEC))
97679+ gr_log_rwxmprotect(vma->vm_file);
97680+
97681+ error = -EACCES;
97682+ goto out;
97683+ }
97684+
97685+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
97686 error = -EACCES;
97687 goto out;
97688 }
97689@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
97690 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
97691 if (error)
97692 goto out;
97693+
97694+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
97695+
97696 nstart = tmp;
97697
97698 if (nstart < prev->vm_end)
97699diff --git a/mm/mremap.c b/mm/mremap.c
97700index 3e98d79..1706cec 100644
97701--- a/mm/mremap.c
97702+++ b/mm/mremap.c
97703@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
97704 continue;
97705 pte = ptep_clear_flush(vma, old_addr, old_pte);
97706 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
97707+
97708+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
97709+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
97710+ pte = pte_exprotect(pte);
97711+#endif
97712+
97713 set_pte_at(mm, new_addr, new_pte, pte);
97714 }
97715
97716@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
97717 if (is_vm_hugetlb_page(vma))
97718 goto Einval;
97719
97720+#ifdef CONFIG_PAX_SEGMEXEC
97721+ if (pax_find_mirror_vma(vma))
97722+ goto Einval;
97723+#endif
97724+
97725 /* We can't remap across vm area boundaries */
97726 if (old_len > vma->vm_end - addr)
97727 goto Efault;
97728@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
97729 unsigned long ret = -EINVAL;
97730 unsigned long charged = 0;
97731 unsigned long map_flags;
97732+ unsigned long pax_task_size = TASK_SIZE;
97733
97734 if (new_addr & ~PAGE_MASK)
97735 goto out;
97736
97737- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
97738+#ifdef CONFIG_PAX_SEGMEXEC
97739+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
97740+ pax_task_size = SEGMEXEC_TASK_SIZE;
97741+#endif
97742+
97743+ pax_task_size -= PAGE_SIZE;
97744+
97745+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
97746 goto out;
97747
97748 /* Check if the location we're moving into overlaps the
97749 * old location at all, and fail if it does.
97750 */
97751- if ((new_addr <= addr) && (new_addr+new_len) > addr)
97752- goto out;
97753-
97754- if ((addr <= new_addr) && (addr+old_len) > new_addr)
97755+ if (addr + old_len > new_addr && new_addr + new_len > addr)
97756 goto out;
97757
97758 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
97759@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
97760 struct vm_area_struct *vma;
97761 unsigned long ret = -EINVAL;
97762 unsigned long charged = 0;
97763+ unsigned long pax_task_size = TASK_SIZE;
97764
97765 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
97766 goto out;
97767@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
97768 if (!new_len)
97769 goto out;
97770
97771+#ifdef CONFIG_PAX_SEGMEXEC
97772+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
97773+ pax_task_size = SEGMEXEC_TASK_SIZE;
97774+#endif
97775+
97776+ pax_task_size -= PAGE_SIZE;
97777+
97778+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
97779+ old_len > pax_task_size || addr > pax_task_size-old_len)
97780+ goto out;
97781+
97782 if (flags & MREMAP_FIXED) {
97783 if (flags & MREMAP_MAYMOVE)
97784 ret = mremap_to(addr, old_len, new_addr, new_len);
97785@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
97786 addr + new_len);
97787 }
97788 ret = addr;
97789+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
97790 goto out;
97791 }
97792 }
97793@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
97794 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
97795 if (ret)
97796 goto out;
97797+
97798+ map_flags = vma->vm_flags;
97799 ret = move_vma(vma, addr, old_len, new_len, new_addr);
97800+ if (!(ret & ~PAGE_MASK)) {
97801+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
97802+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
97803+ }
97804 }
97805 out:
97806 if (ret & ~PAGE_MASK)
97807diff --git a/mm/nommu.c b/mm/nommu.c
97808index 406e8d4..53970d3 100644
97809--- a/mm/nommu.c
97810+++ b/mm/nommu.c
97811@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
97812 int sysctl_overcommit_ratio = 50; /* default is 50% */
97813 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
97814 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
97815-int heap_stack_gap = 0;
97816
97817 atomic_long_t mmap_pages_allocated;
97818
97819@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
97820 EXPORT_SYMBOL(find_vma);
97821
97822 /*
97823- * find a VMA
97824- * - we don't extend stack VMAs under NOMMU conditions
97825- */
97826-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
97827-{
97828- return find_vma(mm, addr);
97829-}
97830-
97831-/*
97832 * expand a stack to a given address
97833 * - not supported under NOMMU conditions
97834 */
97835diff --git a/mm/page_alloc.c b/mm/page_alloc.c
97836index 3ecab7e..594a471 100644
97837--- a/mm/page_alloc.c
97838+++ b/mm/page_alloc.c
97839@@ -289,7 +289,7 @@ out:
97840 * This usage means that zero-order pages may not be compound.
97841 */
97842
97843-static void free_compound_page(struct page *page)
97844+void free_compound_page(struct page *page)
97845 {
97846 __free_pages_ok(page, compound_order(page));
97847 }
97848@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
97849 int bad = 0;
97850 int wasMlocked = __TestClearPageMlocked(page);
97851
97852+#ifdef CONFIG_PAX_MEMORY_SANITIZE
97853+ unsigned long index = 1UL << order;
97854+#endif
97855+
97856 kmemcheck_free_shadow(page, order);
97857
97858 for (i = 0 ; i < (1 << order) ; ++i)
97859@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
97860 debug_check_no_obj_freed(page_address(page),
97861 PAGE_SIZE << order);
97862 }
97863+
97864+#ifdef CONFIG_PAX_MEMORY_SANITIZE
97865+ for (; index; --index)
97866+ sanitize_highpage(page + index - 1);
97867+#endif
97868+
97869 arch_free_page(page, order);
97870 kernel_map_pages(page, 1 << order, 0);
97871
97872@@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
97873 arch_alloc_page(page, order);
97874 kernel_map_pages(page, 1 << order, 1);
97875
97876+#ifndef CONFIG_PAX_MEMORY_SANITIZE
97877 if (gfp_flags & __GFP_ZERO)
97878 prep_zero_page(page, order, gfp_flags);
97879+#endif
97880
97881 if (order && (gfp_flags & __GFP_COMP))
97882 prep_compound_page(page, order);
97883@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
97884 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
97885 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
97886 }
97887+
97888+#ifdef CONFIG_PAX_MEMORY_SANITIZE
97889+ sanitize_highpage(page);
97890+#endif
97891+
97892 arch_free_page(page, 0);
97893 kernel_map_pages(page, 1, 0);
97894
97895@@ -2179,6 +2196,8 @@ void show_free_areas(void)
97896 int cpu;
97897 struct zone *zone;
97898
97899+ pax_track_stack();
97900+
97901 for_each_populated_zone(zone) {
97902 show_node(zone);
97903 printk("%s per-cpu:\n", zone->name);
97904@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
97905 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
97906 }
97907 #else
97908-static void inline setup_usemap(struct pglist_data *pgdat,
97909+static inline void setup_usemap(struct pglist_data *pgdat,
97910 struct zone *zone, unsigned long zonesize) {}
97911 #endif /* CONFIG_SPARSEMEM */
97912
97913diff --git a/mm/percpu.c b/mm/percpu.c
97914index c90614a..5f7b7b8 100644
97915--- a/mm/percpu.c
97916+++ b/mm/percpu.c
97917@@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
97918 static unsigned int pcpu_high_unit_cpu __read_mostly;
97919
97920 /* the address of the first chunk which starts with the kernel static area */
97921-void *pcpu_base_addr __read_mostly;
97922+void *pcpu_base_addr __read_only;
97923 EXPORT_SYMBOL_GPL(pcpu_base_addr);
97924
97925 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
97926diff --git a/mm/rmap.c b/mm/rmap.c
97927index dd43373..d848cd7 100644
97928--- a/mm/rmap.c
97929+++ b/mm/rmap.c
97930@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
97931 /* page_table_lock to protect against threads */
97932 spin_lock(&mm->page_table_lock);
97933 if (likely(!vma->anon_vma)) {
97934+
97935+#ifdef CONFIG_PAX_SEGMEXEC
97936+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
97937+
97938+ if (vma_m) {
97939+ BUG_ON(vma_m->anon_vma);
97940+ vma_m->anon_vma = anon_vma;
97941+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
97942+ }
97943+#endif
97944+
97945 vma->anon_vma = anon_vma;
97946 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
97947 allocated = NULL;
97948diff --git a/mm/shmem.c b/mm/shmem.c
97949index 3e0005b..1d659a8 100644
97950--- a/mm/shmem.c
97951+++ b/mm/shmem.c
97952@@ -31,7 +31,7 @@
97953 #include <linux/swap.h>
97954 #include <linux/ima.h>
97955
97956-static struct vfsmount *shm_mnt;
97957+struct vfsmount *shm_mnt;
97958
97959 #ifdef CONFIG_SHMEM
97960 /*
97961@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
97962 goto unlock;
97963 }
97964 entry = shmem_swp_entry(info, index, NULL);
97965+ if (!entry)
97966+ goto unlock;
97967 if (entry->val) {
97968 /*
97969 * The more uptodate page coming down from a stacked
97970@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
97971 struct vm_area_struct pvma;
97972 struct page *page;
97973
97974+ pax_track_stack();
97975+
97976 spol = mpol_cond_copy(&mpol,
97977 mpol_shared_policy_lookup(&info->policy, idx));
97978
97979@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
97980
97981 info = SHMEM_I(inode);
97982 inode->i_size = len-1;
97983- if (len <= (char *)inode - (char *)info) {
97984+ if (len <= (char *)inode - (char *)info && len <= 64) {
97985 /* do it inline */
97986 memcpy(info, symname, len);
97987 inode->i_op = &shmem_symlink_inline_operations;
97988@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
97989 int err = -ENOMEM;
97990
97991 /* Round up to L1_CACHE_BYTES to resist false sharing */
97992- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
97993- L1_CACHE_BYTES), GFP_KERNEL);
97994+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
97995 if (!sbinfo)
97996 return -ENOMEM;
97997
97998diff --git a/mm/slab.c b/mm/slab.c
97999index c8d466a..909e01e 100644
98000--- a/mm/slab.c
98001+++ b/mm/slab.c
98002@@ -174,7 +174,7 @@
98003
98004 /* Legal flag mask for kmem_cache_create(). */
98005 #if DEBUG
98006-# define CREATE_MASK (SLAB_RED_ZONE | \
98007+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
98008 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
98009 SLAB_CACHE_DMA | \
98010 SLAB_STORE_USER | \
98011@@ -182,7 +182,7 @@
98012 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
98013 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
98014 #else
98015-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
98016+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
98017 SLAB_CACHE_DMA | \
98018 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
98019 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
98020@@ -308,7 +308,7 @@ struct kmem_list3 {
98021 * Need this for bootstrapping a per node allocator.
98022 */
98023 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
98024-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
98025+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
98026 #define CACHE_CACHE 0
98027 #define SIZE_AC MAX_NUMNODES
98028 #define SIZE_L3 (2 * MAX_NUMNODES)
98029@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
98030 if ((x)->max_freeable < i) \
98031 (x)->max_freeable = i; \
98032 } while (0)
98033-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
98034-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
98035-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
98036-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
98037+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
98038+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
98039+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
98040+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
98041 #else
98042 #define STATS_INC_ACTIVE(x) do { } while (0)
98043 #define STATS_DEC_ACTIVE(x) do { } while (0)
98044@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
98045 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
98046 */
98047 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
98048- const struct slab *slab, void *obj)
98049+ const struct slab *slab, const void *obj)
98050 {
98051 u32 offset = (obj - slab->s_mem);
98052 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
98053@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
98054 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
98055 sizes[INDEX_AC].cs_size,
98056 ARCH_KMALLOC_MINALIGN,
98057- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98058+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98059 NULL);
98060
98061 if (INDEX_AC != INDEX_L3) {
98062@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
98063 kmem_cache_create(names[INDEX_L3].name,
98064 sizes[INDEX_L3].cs_size,
98065 ARCH_KMALLOC_MINALIGN,
98066- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98067+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98068 NULL);
98069 }
98070
98071@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
98072 sizes->cs_cachep = kmem_cache_create(names->name,
98073 sizes->cs_size,
98074 ARCH_KMALLOC_MINALIGN,
98075- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
98076+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
98077 NULL);
98078 }
98079 #ifdef CONFIG_ZONE_DMA
98080@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
98081 }
98082 /* cpu stats */
98083 {
98084- unsigned long allochit = atomic_read(&cachep->allochit);
98085- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
98086- unsigned long freehit = atomic_read(&cachep->freehit);
98087- unsigned long freemiss = atomic_read(&cachep->freemiss);
98088+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
98089+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
98090+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
98091+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
98092
98093 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
98094 allochit, allocmiss, freehit, freemiss);
98095@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
98096
98097 static int __init slab_proc_init(void)
98098 {
98099- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
98100+ mode_t gr_mode = S_IRUGO;
98101+
98102+#ifdef CONFIG_GRKERNSEC_PROC_ADD
98103+ gr_mode = S_IRUSR;
98104+#endif
98105+
98106+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
98107 #ifdef CONFIG_DEBUG_SLAB_LEAK
98108- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
98109+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
98110 #endif
98111 return 0;
98112 }
98113 module_init(slab_proc_init);
98114 #endif
98115
98116+void check_object_size(const void *ptr, unsigned long n, bool to)
98117+{
98118+
98119+#ifdef CONFIG_PAX_USERCOPY
98120+ struct page *page;
98121+ struct kmem_cache *cachep = NULL;
98122+ struct slab *slabp;
98123+ unsigned int objnr;
98124+ unsigned long offset;
98125+ const char *type;
98126+
98127+ if (!n)
98128+ return;
98129+
98130+ type = "<null>";
98131+ if (ZERO_OR_NULL_PTR(ptr))
98132+ goto report;
98133+
98134+ if (!virt_addr_valid(ptr))
98135+ return;
98136+
98137+ page = virt_to_head_page(ptr);
98138+
98139+ type = "<process stack>";
98140+ if (!PageSlab(page)) {
98141+ if (object_is_on_stack(ptr, n) == -1)
98142+ goto report;
98143+ return;
98144+ }
98145+
98146+ cachep = page_get_cache(page);
98147+ type = cachep->name;
98148+ if (!(cachep->flags & SLAB_USERCOPY))
98149+ goto report;
98150+
98151+ slabp = page_get_slab(page);
98152+ objnr = obj_to_index(cachep, slabp, ptr);
98153+ BUG_ON(objnr >= cachep->num);
98154+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
98155+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
98156+ return;
98157+
98158+report:
98159+ pax_report_usercopy(ptr, n, to, type);
98160+#endif
98161+
98162+}
98163+EXPORT_SYMBOL(check_object_size);
98164+
98165 /**
98166 * ksize - get the actual amount of memory allocated for a given object
98167 * @objp: Pointer to the object
98168diff --git a/mm/slob.c b/mm/slob.c
98169index 837ebd6..0bd23bc 100644
98170--- a/mm/slob.c
98171+++ b/mm/slob.c
98172@@ -29,7 +29,7 @@
98173 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
98174 * alloc_pages() directly, allocating compound pages so the page order
98175 * does not have to be separately tracked, and also stores the exact
98176- * allocation size in page->private so that it can be used to accurately
98177+ * allocation size in slob_page->size so that it can be used to accurately
98178 * provide ksize(). These objects are detected in kfree() because slob_page()
98179 * is false for them.
98180 *
98181@@ -58,6 +58,7 @@
98182 */
98183
98184 #include <linux/kernel.h>
98185+#include <linux/sched.h>
98186 #include <linux/slab.h>
98187 #include <linux/mm.h>
98188 #include <linux/swap.h> /* struct reclaim_state */
98189@@ -100,7 +101,8 @@ struct slob_page {
98190 unsigned long flags; /* mandatory */
98191 atomic_t _count; /* mandatory */
98192 slobidx_t units; /* free units left in page */
98193- unsigned long pad[2];
98194+ unsigned long pad[1];
98195+ unsigned long size; /* size when >=PAGE_SIZE */
98196 slob_t *free; /* first free slob_t in page */
98197 struct list_head list; /* linked list of free pages */
98198 };
98199@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
98200 */
98201 static inline int is_slob_page(struct slob_page *sp)
98202 {
98203- return PageSlab((struct page *)sp);
98204+ return PageSlab((struct page *)sp) && !sp->size;
98205 }
98206
98207 static inline void set_slob_page(struct slob_page *sp)
98208@@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
98209
98210 static inline struct slob_page *slob_page(const void *addr)
98211 {
98212- return (struct slob_page *)virt_to_page(addr);
98213+ return (struct slob_page *)virt_to_head_page(addr);
98214 }
98215
98216 /*
98217@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
98218 /*
98219 * Return the size of a slob block.
98220 */
98221-static slobidx_t slob_units(slob_t *s)
98222+static slobidx_t slob_units(const slob_t *s)
98223 {
98224 if (s->units > 0)
98225 return s->units;
98226@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
98227 /*
98228 * Return the next free slob block pointer after this one.
98229 */
98230-static slob_t *slob_next(slob_t *s)
98231+static slob_t *slob_next(const slob_t *s)
98232 {
98233 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
98234 slobidx_t next;
98235@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
98236 /*
98237 * Returns true if s is the last free block in its page.
98238 */
98239-static int slob_last(slob_t *s)
98240+static int slob_last(const slob_t *s)
98241 {
98242 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
98243 }
98244@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
98245 if (!page)
98246 return NULL;
98247
98248+ set_slob_page(page);
98249 return page_address(page);
98250 }
98251
98252@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
98253 if (!b)
98254 return NULL;
98255 sp = slob_page(b);
98256- set_slob_page(sp);
98257
98258 spin_lock_irqsave(&slob_lock, flags);
98259 sp->units = SLOB_UNITS(PAGE_SIZE);
98260 sp->free = b;
98261+ sp->size = 0;
98262 INIT_LIST_HEAD(&sp->list);
98263 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
98264 set_slob_page_free(sp, slob_list);
98265@@ -475,10 +478,9 @@ out:
98266 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
98267 #endif
98268
98269-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98270+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
98271 {
98272- unsigned int *m;
98273- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98274+ slob_t *m;
98275 void *ret;
98276
98277 lockdep_trace_alloc(gfp);
98278@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98279
98280 if (!m)
98281 return NULL;
98282- *m = size;
98283+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
98284+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
98285+ m[0].units = size;
98286+ m[1].units = align;
98287 ret = (void *)m + align;
98288
98289 trace_kmalloc_node(_RET_IP_, ret,
98290@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98291
98292 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
98293 if (ret) {
98294- struct page *page;
98295- page = virt_to_page(ret);
98296- page->private = size;
98297+ struct slob_page *sp;
98298+ sp = slob_page(ret);
98299+ sp->size = size;
98300 }
98301
98302 trace_kmalloc_node(_RET_IP_, ret,
98303 size, PAGE_SIZE << order, gfp, node);
98304 }
98305
98306- kmemleak_alloc(ret, size, 1, gfp);
98307+ return ret;
98308+}
98309+
98310+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
98311+{
98312+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98313+ void *ret = __kmalloc_node_align(size, gfp, node, align);
98314+
98315+ if (!ZERO_OR_NULL_PTR(ret))
98316+ kmemleak_alloc(ret, size, 1, gfp);
98317 return ret;
98318 }
98319 EXPORT_SYMBOL(__kmalloc_node);
98320@@ -528,13 +542,92 @@ void kfree(const void *block)
98321 sp = slob_page(block);
98322 if (is_slob_page(sp)) {
98323 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98324- unsigned int *m = (unsigned int *)(block - align);
98325- slob_free(m, *m + align);
98326- } else
98327+ slob_t *m = (slob_t *)(block - align);
98328+ slob_free(m, m[0].units + align);
98329+ } else {
98330+ clear_slob_page(sp);
98331+ free_slob_page(sp);
98332+ sp->size = 0;
98333 put_page(&sp->page);
98334+ }
98335 }
98336 EXPORT_SYMBOL(kfree);
98337
98338+void check_object_size(const void *ptr, unsigned long n, bool to)
98339+{
98340+
98341+#ifdef CONFIG_PAX_USERCOPY
98342+ struct slob_page *sp;
98343+ const slob_t *free;
98344+ const void *base;
98345+ unsigned long flags;
98346+ const char *type;
98347+
98348+ if (!n)
98349+ return;
98350+
98351+ type = "<null>";
98352+ if (ZERO_OR_NULL_PTR(ptr))
98353+ goto report;
98354+
98355+ if (!virt_addr_valid(ptr))
98356+ return;
98357+
98358+ type = "<process stack>";
98359+ sp = slob_page(ptr);
98360+ if (!PageSlab((struct page *)sp)) {
98361+ if (object_is_on_stack(ptr, n) == -1)
98362+ goto report;
98363+ return;
98364+ }
98365+
98366+ type = "<slob>";
98367+ if (sp->size) {
98368+ base = page_address(&sp->page);
98369+ if (base <= ptr && n <= sp->size - (ptr - base))
98370+ return;
98371+ goto report;
98372+ }
98373+
98374+ /* some tricky double walking to find the chunk */
98375+ spin_lock_irqsave(&slob_lock, flags);
98376+ base = (void *)((unsigned long)ptr & PAGE_MASK);
98377+ free = sp->free;
98378+
98379+ while (!slob_last(free) && (void *)free <= ptr) {
98380+ base = free + slob_units(free);
98381+ free = slob_next(free);
98382+ }
98383+
98384+ while (base < (void *)free) {
98385+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
98386+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
98387+ int offset;
98388+
98389+ if (ptr < base + align)
98390+ break;
98391+
98392+ offset = ptr - base - align;
98393+ if (offset >= m) {
98394+ base += size;
98395+ continue;
98396+ }
98397+
98398+ if (n > m - offset)
98399+ break;
98400+
98401+ spin_unlock_irqrestore(&slob_lock, flags);
98402+ return;
98403+ }
98404+
98405+ spin_unlock_irqrestore(&slob_lock, flags);
98406+report:
98407+ pax_report_usercopy(ptr, n, to, type);
98408+#endif
98409+
98410+}
98411+EXPORT_SYMBOL(check_object_size);
98412+
98413 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
98414 size_t ksize(const void *block)
98415 {
98416@@ -547,10 +640,10 @@ size_t ksize(const void *block)
98417 sp = slob_page(block);
98418 if (is_slob_page(sp)) {
98419 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
98420- unsigned int *m = (unsigned int *)(block - align);
98421- return SLOB_UNITS(*m) * SLOB_UNIT;
98422+ slob_t *m = (slob_t *)(block - align);
98423+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
98424 } else
98425- return sp->page.private;
98426+ return sp->size;
98427 }
98428 EXPORT_SYMBOL(ksize);
98429
98430@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
98431 {
98432 struct kmem_cache *c;
98433
98434+#ifdef CONFIG_PAX_USERCOPY
98435+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
98436+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
98437+#else
98438 c = slob_alloc(sizeof(struct kmem_cache),
98439 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
98440+#endif
98441
98442 if (c) {
98443 c->name = name;
98444@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
98445 {
98446 void *b;
98447
98448+#ifdef CONFIG_PAX_USERCOPY
98449+ b = __kmalloc_node_align(c->size, flags, node, c->align);
98450+#else
98451 if (c->size < PAGE_SIZE) {
98452 b = slob_alloc(c->size, flags, c->align, node);
98453 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
98454 SLOB_UNITS(c->size) * SLOB_UNIT,
98455 flags, node);
98456 } else {
98457+ struct slob_page *sp;
98458+
98459 b = slob_new_pages(flags, get_order(c->size), node);
98460+ sp = slob_page(b);
98461+ sp->size = c->size;
98462 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
98463 PAGE_SIZE << get_order(c->size),
98464 flags, node);
98465 }
98466+#endif
98467
98468 if (c->ctor)
98469 c->ctor(b);
98470@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
98471
98472 static void __kmem_cache_free(void *b, int size)
98473 {
98474- if (size < PAGE_SIZE)
98475+ struct slob_page *sp = slob_page(b);
98476+
98477+ if (is_slob_page(sp))
98478 slob_free(b, size);
98479- else
98480+ else {
98481+ clear_slob_page(sp);
98482+ free_slob_page(sp);
98483+ sp->size = 0;
98484 slob_free_pages(b, get_order(size));
98485+ }
98486 }
98487
98488 static void kmem_rcu_free(struct rcu_head *head)
98489@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
98490
98491 void kmem_cache_free(struct kmem_cache *c, void *b)
98492 {
98493+ int size = c->size;
98494+
98495+#ifdef CONFIG_PAX_USERCOPY
98496+ if (size + c->align < PAGE_SIZE) {
98497+ size += c->align;
98498+ b -= c->align;
98499+ }
98500+#endif
98501+
98502 kmemleak_free_recursive(b, c->flags);
98503 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
98504 struct slob_rcu *slob_rcu;
98505- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
98506+ slob_rcu = b + (size - sizeof(struct slob_rcu));
98507 INIT_RCU_HEAD(&slob_rcu->head);
98508- slob_rcu->size = c->size;
98509+ slob_rcu->size = size;
98510 call_rcu(&slob_rcu->head, kmem_rcu_free);
98511 } else {
98512- __kmem_cache_free(b, c->size);
98513+ __kmem_cache_free(b, size);
98514 }
98515
98516+#ifdef CONFIG_PAX_USERCOPY
98517+ trace_kfree(_RET_IP_, b);
98518+#else
98519 trace_kmem_cache_free(_RET_IP_, b);
98520+#endif
98521+
98522 }
98523 EXPORT_SYMBOL(kmem_cache_free);
98524
98525diff --git a/mm/slub.c b/mm/slub.c
98526index 4996fc7..87e01d0 100644
98527--- a/mm/slub.c
98528+++ b/mm/slub.c
98529@@ -201,7 +201,7 @@ struct track {
98530
98531 enum track_item { TRACK_ALLOC, TRACK_FREE };
98532
98533-#ifdef CONFIG_SLUB_DEBUG
98534+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
98535 static int sysfs_slab_add(struct kmem_cache *);
98536 static int sysfs_slab_alias(struct kmem_cache *, const char *);
98537 static void sysfs_slab_remove(struct kmem_cache *);
98538@@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
98539 if (!t->addr)
98540 return;
98541
98542- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
98543+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
98544 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
98545 }
98546
98547@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
98548
98549 page = virt_to_head_page(x);
98550
98551+ BUG_ON(!PageSlab(page));
98552+
98553 slab_free(s, page, x, _RET_IP_);
98554
98555 trace_kmem_cache_free(_RET_IP_, x);
98556@@ -1937,7 +1939,7 @@ static int slub_min_objects;
98557 * Merge control. If this is set then no merging of slab caches will occur.
98558 * (Could be removed. This was introduced to pacify the merge skeptics.)
98559 */
98560-static int slub_nomerge;
98561+static int slub_nomerge = 1;
98562
98563 /*
98564 * Calculate the order of allocation given an slab object size.
98565@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
98566 * list to avoid pounding the page allocator excessively.
98567 */
98568 set_min_partial(s, ilog2(s->size));
98569- s->refcount = 1;
98570+ atomic_set(&s->refcount, 1);
98571 #ifdef CONFIG_NUMA
98572 s->remote_node_defrag_ratio = 1000;
98573 #endif
98574@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
98575 void kmem_cache_destroy(struct kmem_cache *s)
98576 {
98577 down_write(&slub_lock);
98578- s->refcount--;
98579- if (!s->refcount) {
98580+ if (atomic_dec_and_test(&s->refcount)) {
98581 list_del(&s->list);
98582 up_write(&slub_lock);
98583 if (kmem_cache_close(s)) {
98584@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
98585 __setup("slub_nomerge", setup_slub_nomerge);
98586
98587 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
98588- const char *name, int size, gfp_t gfp_flags)
98589+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
98590 {
98591- unsigned int flags = 0;
98592-
98593 if (gfp_flags & SLUB_DMA)
98594- flags = SLAB_CACHE_DMA;
98595+ flags |= SLAB_CACHE_DMA;
98596
98597 /*
98598 * This function is called with IRQs disabled during early-boot on
98599@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
98600 EXPORT_SYMBOL(__kmalloc_node);
98601 #endif
98602
98603+void check_object_size(const void *ptr, unsigned long n, bool to)
98604+{
98605+
98606+#ifdef CONFIG_PAX_USERCOPY
98607+ struct page *page;
98608+ struct kmem_cache *s = NULL;
98609+ unsigned long offset;
98610+ const char *type;
98611+
98612+ if (!n)
98613+ return;
98614+
98615+ type = "<null>";
98616+ if (ZERO_OR_NULL_PTR(ptr))
98617+ goto report;
98618+
98619+ if (!virt_addr_valid(ptr))
98620+ return;
98621+
98622+ page = get_object_page(ptr);
98623+
98624+ type = "<process stack>";
98625+ if (!page) {
98626+ if (object_is_on_stack(ptr, n) == -1)
98627+ goto report;
98628+ return;
98629+ }
98630+
98631+ s = page->slab;
98632+ type = s->name;
98633+ if (!(s->flags & SLAB_USERCOPY))
98634+ goto report;
98635+
98636+ offset = (ptr - page_address(page)) % s->size;
98637+ if (offset <= s->objsize && n <= s->objsize - offset)
98638+ return;
98639+
98640+report:
98641+ pax_report_usercopy(ptr, n, to, type);
98642+#endif
98643+
98644+}
98645+EXPORT_SYMBOL(check_object_size);
98646+
98647 size_t ksize(const void *object)
98648 {
98649 struct page *page;
98650@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
98651 * kmem_cache_open for slab_state == DOWN.
98652 */
98653 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
98654- sizeof(struct kmem_cache_node), GFP_NOWAIT);
98655- kmalloc_caches[0].refcount = -1;
98656+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
98657+ atomic_set(&kmalloc_caches[0].refcount, -1);
98658 caches++;
98659
98660 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
98661@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
98662 /* Caches that are not of the two-to-the-power-of size */
98663 if (KMALLOC_MIN_SIZE <= 32) {
98664 create_kmalloc_cache(&kmalloc_caches[1],
98665- "kmalloc-96", 96, GFP_NOWAIT);
98666+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
98667 caches++;
98668 }
98669 if (KMALLOC_MIN_SIZE <= 64) {
98670 create_kmalloc_cache(&kmalloc_caches[2],
98671- "kmalloc-192", 192, GFP_NOWAIT);
98672+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
98673 caches++;
98674 }
98675
98676 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
98677 create_kmalloc_cache(&kmalloc_caches[i],
98678- "kmalloc", 1 << i, GFP_NOWAIT);
98679+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
98680 caches++;
98681 }
98682
98683@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
98684 /*
98685 * We may have set a slab to be unmergeable during bootstrap.
98686 */
98687- if (s->refcount < 0)
98688+ if (atomic_read(&s->refcount) < 0)
98689 return 1;
98690
98691 return 0;
98692@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
98693 if (s) {
98694 int cpu;
98695
98696- s->refcount++;
98697+ atomic_inc(&s->refcount);
98698 /*
98699 * Adjust the object sizes so that we clear
98700 * the complete object on kzalloc.
98701@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
98702
98703 if (sysfs_slab_alias(s, name)) {
98704 down_write(&slub_lock);
98705- s->refcount--;
98706+ atomic_dec(&s->refcount);
98707 up_write(&slub_lock);
98708 goto err;
98709 }
98710@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
98711
98712 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
98713 {
98714- return sprintf(buf, "%d\n", s->refcount - 1);
98715+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
98716 }
98717 SLAB_ATTR_RO(aliases);
98718
98719@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
98720 kfree(s);
98721 }
98722
98723-static struct sysfs_ops slab_sysfs_ops = {
98724+static const struct sysfs_ops slab_sysfs_ops = {
98725 .show = slab_attr_show,
98726 .store = slab_attr_store,
98727 };
98728@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
98729 return 0;
98730 }
98731
98732-static struct kset_uevent_ops slab_uevent_ops = {
98733+static const struct kset_uevent_ops slab_uevent_ops = {
98734 .filter = uevent_filter,
98735 };
98736
98737@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
98738 return name;
98739 }
98740
98741+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
98742 static int sysfs_slab_add(struct kmem_cache *s)
98743 {
98744 int err;
98745@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
98746 kobject_del(&s->kobj);
98747 kobject_put(&s->kobj);
98748 }
98749+#endif
98750
98751 /*
98752 * Need to buffer aliases during bootup until sysfs becomes
98753@@ -4632,6 +4677,7 @@ struct saved_alias {
98754
98755 static struct saved_alias *alias_list;
98756
98757+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
98758 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
98759 {
98760 struct saved_alias *al;
98761@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
98762 alias_list = al;
98763 return 0;
98764 }
98765+#endif
98766
98767 static int __init slab_sysfs_init(void)
98768 {
98769@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
98770
98771 static int __init slab_proc_init(void)
98772 {
98773- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
98774+ mode_t gr_mode = S_IRUGO;
98775+
98776+#ifdef CONFIG_GRKERNSEC_PROC_ADD
98777+ gr_mode = S_IRUSR;
98778+#endif
98779+
98780+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
98781 return 0;
98782 }
98783 module_init(slab_proc_init);
98784diff --git a/mm/swap.c b/mm/swap.c
98785index 308e57d..5de19c0 100644
98786--- a/mm/swap.c
98787+++ b/mm/swap.c
98788@@ -30,6 +30,7 @@
98789 #include <linux/notifier.h>
98790 #include <linux/backing-dev.h>
98791 #include <linux/memcontrol.h>
98792+#include <linux/hugetlb.h>
98793
98794 #include "internal.h"
98795
98796@@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
98797 compound_page_dtor *dtor;
98798
98799 dtor = get_compound_page_dtor(page);
98800+ if (!PageHuge(page))
98801+ BUG_ON(dtor != free_compound_page);
98802 (*dtor)(page);
98803 }
98804 }
98805diff --git a/mm/util.c b/mm/util.c
98806index e48b493..24a601d 100644
98807--- a/mm/util.c
98808+++ b/mm/util.c
98809@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
98810 void arch_pick_mmap_layout(struct mm_struct *mm)
98811 {
98812 mm->mmap_base = TASK_UNMAPPED_BASE;
98813+
98814+#ifdef CONFIG_PAX_RANDMMAP
98815+ if (mm->pax_flags & MF_PAX_RANDMMAP)
98816+ mm->mmap_base += mm->delta_mmap;
98817+#endif
98818+
98819 mm->get_unmapped_area = arch_get_unmapped_area;
98820 mm->unmap_area = arch_unmap_area;
98821 }
98822diff --git a/mm/vmalloc.c b/mm/vmalloc.c
98823index f34ffd0..e60c44f 100644
98824--- a/mm/vmalloc.c
98825+++ b/mm/vmalloc.c
98826@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
98827
98828 pte = pte_offset_kernel(pmd, addr);
98829 do {
98830- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
98831- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
98832+
98833+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
98834+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
98835+ BUG_ON(!pte_exec(*pte));
98836+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
98837+ continue;
98838+ }
98839+#endif
98840+
98841+ {
98842+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
98843+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
98844+ }
98845 } while (pte++, addr += PAGE_SIZE, addr != end);
98846 }
98847
98848@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
98849 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
98850 {
98851 pte_t *pte;
98852+ int ret = -ENOMEM;
98853
98854 /*
98855 * nr is a running index into the array which helps higher level
98856@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
98857 pte = pte_alloc_kernel(pmd, addr);
98858 if (!pte)
98859 return -ENOMEM;
98860+
98861+ pax_open_kernel();
98862 do {
98863 struct page *page = pages[*nr];
98864
98865- if (WARN_ON(!pte_none(*pte)))
98866- return -EBUSY;
98867- if (WARN_ON(!page))
98868- return -ENOMEM;
98869+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
98870+ if (!(pgprot_val(prot) & _PAGE_NX))
98871+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
98872+ else
98873+#endif
98874+
98875+ if (WARN_ON(!pte_none(*pte))) {
98876+ ret = -EBUSY;
98877+ goto out;
98878+ }
98879+ if (WARN_ON(!page)) {
98880+ ret = -ENOMEM;
98881+ goto out;
98882+ }
98883 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
98884 (*nr)++;
98885 } while (pte++, addr += PAGE_SIZE, addr != end);
98886- return 0;
98887+ ret = 0;
98888+out:
98889+ pax_close_kernel();
98890+ return ret;
98891 }
98892
98893 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
98894@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
98895 * and fall back on vmalloc() if that fails. Others
98896 * just put it in the vmalloc space.
98897 */
98898-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
98899+#ifdef CONFIG_MODULES
98900+#ifdef MODULES_VADDR
98901 unsigned long addr = (unsigned long)x;
98902 if (addr >= MODULES_VADDR && addr < MODULES_END)
98903 return 1;
98904 #endif
98905+
98906+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
98907+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
98908+ return 1;
98909+#endif
98910+
98911+#endif
98912+
98913 return is_vmalloc_addr(x);
98914 }
98915
98916@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
98917
98918 if (!pgd_none(*pgd)) {
98919 pud_t *pud = pud_offset(pgd, addr);
98920+#ifdef CONFIG_X86
98921+ if (!pud_large(*pud))
98922+#endif
98923 if (!pud_none(*pud)) {
98924 pmd_t *pmd = pmd_offset(pud, addr);
98925+#ifdef CONFIG_X86
98926+ if (!pmd_large(*pmd))
98927+#endif
98928 if (!pmd_none(*pmd)) {
98929 pte_t *ptep, pte;
98930
98931@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
98932 struct rb_node *tmp;
98933
98934 while (*p) {
98935- struct vmap_area *tmp;
98936+ struct vmap_area *varea;
98937
98938 parent = *p;
98939- tmp = rb_entry(parent, struct vmap_area, rb_node);
98940- if (va->va_start < tmp->va_end)
98941+ varea = rb_entry(parent, struct vmap_area, rb_node);
98942+ if (va->va_start < varea->va_end)
98943 p = &(*p)->rb_left;
98944- else if (va->va_end > tmp->va_start)
98945+ else if (va->va_end > varea->va_start)
98946 p = &(*p)->rb_right;
98947 else
98948 BUG();
98949@@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
98950 struct vm_struct *area;
98951
98952 BUG_ON(in_interrupt());
98953+
98954+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
98955+ if (flags & VM_KERNEXEC) {
98956+ if (start != VMALLOC_START || end != VMALLOC_END)
98957+ return NULL;
98958+ start = (unsigned long)MODULES_EXEC_VADDR;
98959+ end = (unsigned long)MODULES_EXEC_END;
98960+ }
98961+#endif
98962+
98963 if (flags & VM_IOREMAP) {
98964 int bit = fls(size);
98965
98966@@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
98967 if (count > totalram_pages)
98968 return NULL;
98969
98970+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
98971+ if (!(pgprot_val(prot) & _PAGE_NX))
98972+ flags |= VM_KERNEXEC;
98973+#endif
98974+
98975 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
98976 __builtin_return_address(0));
98977 if (!area)
98978@@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
98979 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
98980 return NULL;
98981
98982+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
98983+ if (!(pgprot_val(prot) & _PAGE_NX))
98984+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
98985+ VMALLOC_START, VMALLOC_END, node,
98986+ gfp_mask, caller);
98987+ else
98988+#endif
98989+
98990 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
98991 VMALLOC_START, VMALLOC_END, node,
98992 gfp_mask, caller);
98993@@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
98994 return addr;
98995 }
98996
98997+#undef __vmalloc
98998 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
98999 {
99000 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
99001@@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
99002 * For tight control over page level allocator and protection flags
99003 * use __vmalloc() instead.
99004 */
99005+#undef vmalloc
99006 void *vmalloc(unsigned long size)
99007 {
99008 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
99009@@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
99010 * The resulting memory area is zeroed so it can be mapped to userspace
99011 * without leaking data.
99012 */
99013+#undef vmalloc_user
99014 void *vmalloc_user(unsigned long size)
99015 {
99016 struct vm_struct *area;
99017@@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
99018 * For tight control over page level allocator and protection flags
99019 * use __vmalloc() instead.
99020 */
99021+#undef vmalloc_node
99022 void *vmalloc_node(unsigned long size, int node)
99023 {
99024 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
99025@@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
99026 * For tight control over page level allocator and protection flags
99027 * use __vmalloc() instead.
99028 */
99029-
99030+#undef vmalloc_exec
99031 void *vmalloc_exec(unsigned long size)
99032 {
99033- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
99034+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
99035 -1, __builtin_return_address(0));
99036 }
99037
99038@@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
99039 * Allocate enough 32bit PA addressable pages to cover @size from the
99040 * page level allocator and map them into contiguous kernel virtual space.
99041 */
99042+#undef vmalloc_32
99043 void *vmalloc_32(unsigned long size)
99044 {
99045 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
99046@@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
99047 * The resulting memory area is 32bit addressable and zeroed so it can be
99048 * mapped to userspace without leaking data.
99049 */
99050+#undef vmalloc_32_user
99051 void *vmalloc_32_user(unsigned long size)
99052 {
99053 struct vm_struct *area;
99054@@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
99055 unsigned long uaddr = vma->vm_start;
99056 unsigned long usize = vma->vm_end - vma->vm_start;
99057
99058+ BUG_ON(vma->vm_mirror);
99059+
99060 if ((PAGE_SIZE-1) & (unsigned long)addr)
99061 return -EINVAL;
99062
99063diff --git a/mm/vmstat.c b/mm/vmstat.c
99064index 42d76c6..5643dc4 100644
99065--- a/mm/vmstat.c
99066+++ b/mm/vmstat.c
99067@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
99068 *
99069 * vm_stat contains the global counters
99070 */
99071-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
99072+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
99073 EXPORT_SYMBOL(vm_stat);
99074
99075 #ifdef CONFIG_SMP
99076@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
99077 v = p->vm_stat_diff[i];
99078 p->vm_stat_diff[i] = 0;
99079 local_irq_restore(flags);
99080- atomic_long_add(v, &zone->vm_stat[i]);
99081+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
99082 global_diff[i] += v;
99083 #ifdef CONFIG_NUMA
99084 /* 3 seconds idle till flush */
99085@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
99086
99087 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
99088 if (global_diff[i])
99089- atomic_long_add(global_diff[i], &vm_stat[i]);
99090+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
99091 }
99092
99093 #endif
99094@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
99095 start_cpu_timer(cpu);
99096 #endif
99097 #ifdef CONFIG_PROC_FS
99098- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
99099- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
99100- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
99101- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
99102+ {
99103+ mode_t gr_mode = S_IRUGO;
99104+#ifdef CONFIG_GRKERNSEC_PROC_ADD
99105+ gr_mode = S_IRUSR;
99106+#endif
99107+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
99108+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
99109+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
99110+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
99111+#else
99112+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
99113+#endif
99114+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
99115+ }
99116 #endif
99117 return 0;
99118 }
99119diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
99120index a29c5ab..6143f20 100644
99121--- a/net/8021q/vlan.c
99122+++ b/net/8021q/vlan.c
99123@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
99124 err = -EPERM;
99125 if (!capable(CAP_NET_ADMIN))
99126 break;
99127- if ((args.u.name_type >= 0) &&
99128- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
99129+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
99130 struct vlan_net *vn;
99131
99132 vn = net_generic(net, vlan_net_id);
99133diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
99134index a2d2984..f9eb711 100644
99135--- a/net/9p/trans_fd.c
99136+++ b/net/9p/trans_fd.c
99137@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
99138 oldfs = get_fs();
99139 set_fs(get_ds());
99140 /* The cast to a user pointer is valid due to the set_fs() */
99141- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
99142+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
99143 set_fs(oldfs);
99144
99145 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
99146diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
99147index 02cc7e7..4514f1b 100644
99148--- a/net/atm/atm_misc.c
99149+++ b/net/atm/atm_misc.c
99150@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
99151 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
99152 return 1;
99153 atm_return(vcc,truesize);
99154- atomic_inc(&vcc->stats->rx_drop);
99155+ atomic_inc_unchecked(&vcc->stats->rx_drop);
99156 return 0;
99157 }
99158
99159@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
99160 }
99161 }
99162 atm_return(vcc,guess);
99163- atomic_inc(&vcc->stats->rx_drop);
99164+ atomic_inc_unchecked(&vcc->stats->rx_drop);
99165 return NULL;
99166 }
99167
99168@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
99169
99170 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99171 {
99172-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
99173+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
99174 __SONET_ITEMS
99175 #undef __HANDLE_ITEM
99176 }
99177@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99178
99179 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
99180 {
99181-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
99182+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
99183 __SONET_ITEMS
99184 #undef __HANDLE_ITEM
99185 }
99186diff --git a/net/atm/lec.h b/net/atm/lec.h
99187index 9d14d19..5c145f3 100644
99188--- a/net/atm/lec.h
99189+++ b/net/atm/lec.h
99190@@ -48,7 +48,7 @@ struct lane2_ops {
99191 const u8 *tlvs, u32 sizeoftlvs);
99192 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
99193 const u8 *tlvs, u32 sizeoftlvs);
99194-};
99195+} __no_const;
99196
99197 /*
99198 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
99199diff --git a/net/atm/mpc.h b/net/atm/mpc.h
99200index 0919a88..a23d54e 100644
99201--- a/net/atm/mpc.h
99202+++ b/net/atm/mpc.h
99203@@ -33,7 +33,7 @@ struct mpoa_client {
99204 struct mpc_parameters parameters; /* parameters for this client */
99205
99206 const struct net_device_ops *old_ops;
99207- struct net_device_ops new_ops;
99208+ net_device_ops_no_const new_ops;
99209 };
99210
99211
99212diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
99213index 4504a4b..1733f1e 100644
99214--- a/net/atm/mpoa_caches.c
99215+++ b/net/atm/mpoa_caches.c
99216@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
99217 struct timeval now;
99218 struct k_message msg;
99219
99220+ pax_track_stack();
99221+
99222 do_gettimeofday(&now);
99223
99224 write_lock_irq(&client->egress_lock);
99225diff --git a/net/atm/proc.c b/net/atm/proc.c
99226index ab8419a..aa91497 100644
99227--- a/net/atm/proc.c
99228+++ b/net/atm/proc.c
99229@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
99230 const struct k_atm_aal_stats *stats)
99231 {
99232 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
99233- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
99234- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
99235- atomic_read(&stats->rx_drop));
99236+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
99237+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
99238+ atomic_read_unchecked(&stats->rx_drop));
99239 }
99240
99241 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
99242@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
99243 {
99244 struct sock *sk = sk_atm(vcc);
99245
99246+#ifdef CONFIG_GRKERNSEC_HIDESYM
99247+ seq_printf(seq, "%p ", NULL);
99248+#else
99249 seq_printf(seq, "%p ", vcc);
99250+#endif
99251+
99252 if (!vcc->dev)
99253 seq_printf(seq, "Unassigned ");
99254 else
99255@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
99256 {
99257 if (!vcc->dev)
99258 seq_printf(seq, sizeof(void *) == 4 ?
99259+#ifdef CONFIG_GRKERNSEC_HIDESYM
99260+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
99261+#else
99262 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
99263+#endif
99264 else
99265 seq_printf(seq, "%3d %3d %5d ",
99266 vcc->dev->number, vcc->vpi, vcc->vci);
99267diff --git a/net/atm/resources.c b/net/atm/resources.c
99268index 56b7322..c48b84e 100644
99269--- a/net/atm/resources.c
99270+++ b/net/atm/resources.c
99271@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
99272 static void copy_aal_stats(struct k_atm_aal_stats *from,
99273 struct atm_aal_stats *to)
99274 {
99275-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
99276+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
99277 __AAL_STAT_ITEMS
99278 #undef __HANDLE_ITEM
99279 }
99280@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
99281 static void subtract_aal_stats(struct k_atm_aal_stats *from,
99282 struct atm_aal_stats *to)
99283 {
99284-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
99285+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
99286 __AAL_STAT_ITEMS
99287 #undef __HANDLE_ITEM
99288 }
99289diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
99290index 8567d47..bba2292 100644
99291--- a/net/bridge/br_private.h
99292+++ b/net/bridge/br_private.h
99293@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
99294
99295 #ifdef CONFIG_SYSFS
99296 /* br_sysfs_if.c */
99297-extern struct sysfs_ops brport_sysfs_ops;
99298+extern const struct sysfs_ops brport_sysfs_ops;
99299 extern int br_sysfs_addif(struct net_bridge_port *p);
99300
99301 /* br_sysfs_br.c */
99302diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
99303index 9a52ac5..c97538e 100644
99304--- a/net/bridge/br_stp_if.c
99305+++ b/net/bridge/br_stp_if.c
99306@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
99307 char *envp[] = { NULL };
99308
99309 if (br->stp_enabled == BR_USER_STP) {
99310- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
99311+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
99312 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
99313 br->dev->name, r);
99314
99315diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
99316index 820643a..ce77fb3 100644
99317--- a/net/bridge/br_sysfs_if.c
99318+++ b/net/bridge/br_sysfs_if.c
99319@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
99320 return ret;
99321 }
99322
99323-struct sysfs_ops brport_sysfs_ops = {
99324+const struct sysfs_ops brport_sysfs_ops = {
99325 .show = brport_show,
99326 .store = brport_store,
99327 };
99328diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
99329index d73d47f..72df42a 100644
99330--- a/net/bridge/netfilter/ebtables.c
99331+++ b/net/bridge/netfilter/ebtables.c
99332@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
99333 unsigned int entries_size, nentries;
99334 char *entries;
99335
99336+ pax_track_stack();
99337+
99338 if (cmd == EBT_SO_GET_ENTRIES) {
99339 entries_size = t->private->entries_size;
99340 nentries = t->private->nentries;
99341diff --git a/net/can/bcm.c b/net/can/bcm.c
99342index 2ffd2e0..72a7486 100644
99343--- a/net/can/bcm.c
99344+++ b/net/can/bcm.c
99345@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
99346 struct bcm_sock *bo = bcm_sk(sk);
99347 struct bcm_op *op;
99348
99349+#ifdef CONFIG_GRKERNSEC_HIDESYM
99350+ seq_printf(m, ">>> socket %p", NULL);
99351+ seq_printf(m, " / sk %p", NULL);
99352+ seq_printf(m, " / bo %p", NULL);
99353+#else
99354 seq_printf(m, ">>> socket %p", sk->sk_socket);
99355 seq_printf(m, " / sk %p", sk);
99356 seq_printf(m, " / bo %p", bo);
99357+#endif
99358 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
99359 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
99360 seq_printf(m, " <<<\n");
99361diff --git a/net/compat.c b/net/compat.c
99362index 9559afc..ccd74e1 100644
99363--- a/net/compat.c
99364+++ b/net/compat.c
99365@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
99366 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
99367 __get_user(kmsg->msg_flags, &umsg->msg_flags))
99368 return -EFAULT;
99369- kmsg->msg_name = compat_ptr(tmp1);
99370- kmsg->msg_iov = compat_ptr(tmp2);
99371- kmsg->msg_control = compat_ptr(tmp3);
99372+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
99373+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
99374+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
99375 return 0;
99376 }
99377
99378@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
99379 kern_msg->msg_name = NULL;
99380
99381 tot_len = iov_from_user_compat_to_kern(kern_iov,
99382- (struct compat_iovec __user *)kern_msg->msg_iov,
99383+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
99384 kern_msg->msg_iovlen);
99385 if (tot_len >= 0)
99386 kern_msg->msg_iov = kern_iov;
99387@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
99388
99389 #define CMSG_COMPAT_FIRSTHDR(msg) \
99390 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
99391- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
99392+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
99393 (struct compat_cmsghdr __user *)NULL)
99394
99395 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
99396 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
99397 (ucmlen) <= (unsigned long) \
99398 ((mhdr)->msg_controllen - \
99399- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
99400+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
99401
99402 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
99403 struct compat_cmsghdr __user *cmsg, int cmsg_len)
99404 {
99405 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
99406- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
99407+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
99408 msg->msg_controllen)
99409 return NULL;
99410 return (struct compat_cmsghdr __user *)ptr;
99411@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
99412 {
99413 struct compat_timeval ctv;
99414 struct compat_timespec cts[3];
99415- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
99416+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
99417 struct compat_cmsghdr cmhdr;
99418 int cmlen;
99419
99420@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
99421
99422 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
99423 {
99424- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
99425+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
99426 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
99427 int fdnum = scm->fp->count;
99428 struct file **fp = scm->fp->fp;
99429@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
99430 len = sizeof(ktime);
99431 old_fs = get_fs();
99432 set_fs(KERNEL_DS);
99433- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
99434+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
99435 set_fs(old_fs);
99436
99437 if (!err) {
99438@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
99439 case MCAST_JOIN_GROUP:
99440 case MCAST_LEAVE_GROUP:
99441 {
99442- struct compat_group_req __user *gr32 = (void *)optval;
99443+ struct compat_group_req __user *gr32 = (void __user *)optval;
99444 struct group_req __user *kgr =
99445 compat_alloc_user_space(sizeof(struct group_req));
99446 u32 interface;
99447@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
99448 case MCAST_BLOCK_SOURCE:
99449 case MCAST_UNBLOCK_SOURCE:
99450 {
99451- struct compat_group_source_req __user *gsr32 = (void *)optval;
99452+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
99453 struct group_source_req __user *kgsr = compat_alloc_user_space(
99454 sizeof(struct group_source_req));
99455 u32 interface;
99456@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
99457 }
99458 case MCAST_MSFILTER:
99459 {
99460- struct compat_group_filter __user *gf32 = (void *)optval;
99461+ struct compat_group_filter __user *gf32 = (void __user *)optval;
99462 struct group_filter __user *kgf;
99463 u32 interface, fmode, numsrc;
99464
99465diff --git a/net/core/dev.c b/net/core/dev.c
99466index 84a0705..575db4c 100644
99467--- a/net/core/dev.c
99468+++ b/net/core/dev.c
99469@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
99470 if (no_module && capable(CAP_NET_ADMIN))
99471 no_module = request_module("netdev-%s", name);
99472 if (no_module && capable(CAP_SYS_MODULE)) {
99473+#ifdef CONFIG_GRKERNSEC_MODHARDEN
99474+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
99475+#else
99476 if (!request_module("%s", name))
99477 pr_err("Loading kernel module for a network device "
99478 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
99479 "instead\n", name);
99480+#endif
99481 }
99482 }
99483 EXPORT_SYMBOL(dev_load);
99484@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
99485
99486 struct dev_gso_cb {
99487 void (*destructor)(struct sk_buff *skb);
99488-};
99489+} __no_const;
99490
99491 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
99492
99493@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
99494 }
99495 EXPORT_SYMBOL(netif_rx_ni);
99496
99497-static void net_tx_action(struct softirq_action *h)
99498+static void net_tx_action(void)
99499 {
99500 struct softnet_data *sd = &__get_cpu_var(softnet_data);
99501
99502@@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
99503 EXPORT_SYMBOL(netif_napi_del);
99504
99505
99506-static void net_rx_action(struct softirq_action *h)
99507+static void net_rx_action(void)
99508 {
99509 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
99510 unsigned long time_limit = jiffies + 2;
99511diff --git a/net/core/flow.c b/net/core/flow.c
99512index 9601587..8c4824e 100644
99513--- a/net/core/flow.c
99514+++ b/net/core/flow.c
99515@@ -35,11 +35,11 @@ struct flow_cache_entry {
99516 atomic_t *object_ref;
99517 };
99518
99519-atomic_t flow_cache_genid = ATOMIC_INIT(0);
99520+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
99521
99522 static u32 flow_hash_shift;
99523 #define flow_hash_size (1 << flow_hash_shift)
99524-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
99525+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
99526
99527 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
99528
99529@@ -52,7 +52,7 @@ struct flow_percpu_info {
99530 u32 hash_rnd;
99531 int count;
99532 };
99533-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
99534+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
99535
99536 #define flow_hash_rnd_recalc(cpu) \
99537 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
99538@@ -69,7 +69,7 @@ struct flow_flush_info {
99539 atomic_t cpuleft;
99540 struct completion completion;
99541 };
99542-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
99543+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
99544
99545 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
99546
99547@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
99548 if (fle->family == family &&
99549 fle->dir == dir &&
99550 flow_key_compare(key, &fle->key) == 0) {
99551- if (fle->genid == atomic_read(&flow_cache_genid)) {
99552+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
99553 void *ret = fle->object;
99554
99555 if (ret)
99556@@ -228,7 +228,7 @@ nocache:
99557 err = resolver(net, key, family, dir, &obj, &obj_ref);
99558
99559 if (fle && !err) {
99560- fle->genid = atomic_read(&flow_cache_genid);
99561+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
99562
99563 if (fle->object)
99564 atomic_dec(fle->object_ref);
99565@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
99566
99567 fle = flow_table(cpu)[i];
99568 for (; fle; fle = fle->next) {
99569- unsigned genid = atomic_read(&flow_cache_genid);
99570+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
99571
99572 if (!fle->object || fle->genid == genid)
99573 continue;
99574diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
99575index d4fd895..ac9b1e6 100644
99576--- a/net/core/rtnetlink.c
99577+++ b/net/core/rtnetlink.c
99578@@ -57,7 +57,7 @@ struct rtnl_link
99579 {
99580 rtnl_doit_func doit;
99581 rtnl_dumpit_func dumpit;
99582-};
99583+} __no_const;
99584
99585 static DEFINE_MUTEX(rtnl_mutex);
99586
99587diff --git a/net/core/scm.c b/net/core/scm.c
99588index d98eafc..1a190a9 100644
99589--- a/net/core/scm.c
99590+++ b/net/core/scm.c
99591@@ -191,7 +191,7 @@ error:
99592 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
99593 {
99594 struct cmsghdr __user *cm
99595- = (__force struct cmsghdr __user *)msg->msg_control;
99596+ = (struct cmsghdr __force_user *)msg->msg_control;
99597 struct cmsghdr cmhdr;
99598 int cmlen = CMSG_LEN(len);
99599 int err;
99600@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
99601 err = -EFAULT;
99602 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
99603 goto out;
99604- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
99605+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
99606 goto out;
99607 cmlen = CMSG_SPACE(len);
99608 if (msg->msg_controllen < cmlen)
99609@@ -229,7 +229,7 @@ out:
99610 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
99611 {
99612 struct cmsghdr __user *cm
99613- = (__force struct cmsghdr __user*)msg->msg_control;
99614+ = (struct cmsghdr __force_user *)msg->msg_control;
99615
99616 int fdmax = 0;
99617 int fdnum = scm->fp->count;
99618@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
99619 if (fdnum < fdmax)
99620 fdmax = fdnum;
99621
99622- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
99623+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
99624 i++, cmfptr++)
99625 {
99626 int new_fd;
99627diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
99628index 45329d7..626aaa6 100644
99629--- a/net/core/secure_seq.c
99630+++ b/net/core/secure_seq.c
99631@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
99632 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
99633
99634 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
99635- __be16 dport)
99636+ __be16 dport)
99637 {
99638 u32 secret[MD5_MESSAGE_BYTES / 4];
99639 u32 hash[MD5_DIGEST_WORDS];
99640@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
99641 secret[i] = net_secret[i];
99642
99643 md5_transform(hash, secret);
99644-
99645 return hash[0];
99646 }
99647 #endif
99648diff --git a/net/core/skbuff.c b/net/core/skbuff.c
99649index 025f924..70a71c4 100644
99650--- a/net/core/skbuff.c
99651+++ b/net/core/skbuff.c
99652@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
99653 struct sk_buff *frag_iter;
99654 struct sock *sk = skb->sk;
99655
99656+ pax_track_stack();
99657+
99658 /*
99659 * __skb_splice_bits() only fails if the output has no room left,
99660 * so no point in going over the frag_list for the error case.
99661diff --git a/net/core/sock.c b/net/core/sock.c
99662index 6605e75..3acebda 100644
99663--- a/net/core/sock.c
99664+++ b/net/core/sock.c
99665@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
99666 break;
99667
99668 case SO_PEERCRED:
99669+ {
99670+ struct ucred peercred;
99671 if (len > sizeof(sk->sk_peercred))
99672 len = sizeof(sk->sk_peercred);
99673- if (copy_to_user(optval, &sk->sk_peercred, len))
99674+ peercred = sk->sk_peercred;
99675+ if (copy_to_user(optval, &peercred, len))
99676 return -EFAULT;
99677 goto lenout;
99678+ }
99679
99680 case SO_PEERNAME:
99681 {
99682@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
99683 */
99684 smp_wmb();
99685 atomic_set(&sk->sk_refcnt, 1);
99686- atomic_set(&sk->sk_drops, 0);
99687+ atomic_set_unchecked(&sk->sk_drops, 0);
99688 }
99689 EXPORT_SYMBOL(sock_init_data);
99690
99691diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
99692index 2036568..c55883d 100644
99693--- a/net/decnet/sysctl_net_decnet.c
99694+++ b/net/decnet/sysctl_net_decnet.c
99695@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
99696
99697 if (len > *lenp) len = *lenp;
99698
99699- if (copy_to_user(buffer, addr, len))
99700+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
99701 return -EFAULT;
99702
99703 *lenp = len;
99704@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
99705
99706 if (len > *lenp) len = *lenp;
99707
99708- if (copy_to_user(buffer, devname, len))
99709+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
99710 return -EFAULT;
99711
99712 *lenp = len;
99713diff --git a/net/econet/Kconfig b/net/econet/Kconfig
99714index 39a2d29..f39c0fe 100644
99715--- a/net/econet/Kconfig
99716+++ b/net/econet/Kconfig
99717@@ -4,7 +4,7 @@
99718
99719 config ECONET
99720 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
99721- depends on EXPERIMENTAL && INET
99722+ depends on EXPERIMENTAL && INET && BROKEN
99723 ---help---
99724 Econet is a fairly old and slow networking protocol mainly used by
99725 Acorn computers to access file and print servers. It uses native
99726diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
99727index a413b1b..380849c 100644
99728--- a/net/ieee802154/dgram.c
99729+++ b/net/ieee802154/dgram.c
99730@@ -318,7 +318,7 @@ out:
99731 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
99732 {
99733 if (sock_queue_rcv_skb(sk, skb) < 0) {
99734- atomic_inc(&sk->sk_drops);
99735+ atomic_inc_unchecked(&sk->sk_drops);
99736 kfree_skb(skb);
99737 return NET_RX_DROP;
99738 }
99739diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
99740index 30e74ee..bfc6ee0 100644
99741--- a/net/ieee802154/raw.c
99742+++ b/net/ieee802154/raw.c
99743@@ -206,7 +206,7 @@ out:
99744 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
99745 {
99746 if (sock_queue_rcv_skb(sk, skb) < 0) {
99747- atomic_inc(&sk->sk_drops);
99748+ atomic_inc_unchecked(&sk->sk_drops);
99749 kfree_skb(skb);
99750 return NET_RX_DROP;
99751 }
99752diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
99753index dba56d2..acee5d6 100644
99754--- a/net/ipv4/inet_diag.c
99755+++ b/net/ipv4/inet_diag.c
99756@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
99757 r->idiag_retrans = 0;
99758
99759 r->id.idiag_if = sk->sk_bound_dev_if;
99760+#ifdef CONFIG_GRKERNSEC_HIDESYM
99761+ r->id.idiag_cookie[0] = 0;
99762+ r->id.idiag_cookie[1] = 0;
99763+#else
99764 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
99765 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
99766+#endif
99767
99768 r->id.idiag_sport = inet->sport;
99769 r->id.idiag_dport = inet->dport;
99770@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
99771 r->idiag_family = tw->tw_family;
99772 r->idiag_retrans = 0;
99773 r->id.idiag_if = tw->tw_bound_dev_if;
99774+
99775+#ifdef CONFIG_GRKERNSEC_HIDESYM
99776+ r->id.idiag_cookie[0] = 0;
99777+ r->id.idiag_cookie[1] = 0;
99778+#else
99779 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
99780 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
99781+#endif
99782+
99783 r->id.idiag_sport = tw->tw_sport;
99784 r->id.idiag_dport = tw->tw_dport;
99785 r->id.idiag_src[0] = tw->tw_rcv_saddr;
99786@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
99787 if (sk == NULL)
99788 goto unlock;
99789
99790+#ifndef CONFIG_GRKERNSEC_HIDESYM
99791 err = -ESTALE;
99792 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
99793 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
99794 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
99795 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
99796 goto out;
99797+#endif
99798
99799 err = -ENOMEM;
99800 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
99801@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
99802 r->idiag_retrans = req->retrans;
99803
99804 r->id.idiag_if = sk->sk_bound_dev_if;
99805+
99806+#ifdef CONFIG_GRKERNSEC_HIDESYM
99807+ r->id.idiag_cookie[0] = 0;
99808+ r->id.idiag_cookie[1] = 0;
99809+#else
99810 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
99811 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
99812+#endif
99813
99814 tmo = req->expires - jiffies;
99815 if (tmo < 0)
99816diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
99817index d717267..56de7e7 100644
99818--- a/net/ipv4/inet_hashtables.c
99819+++ b/net/ipv4/inet_hashtables.c
99820@@ -18,12 +18,15 @@
99821 #include <linux/sched.h>
99822 #include <linux/slab.h>
99823 #include <linux/wait.h>
99824+#include <linux/security.h>
99825
99826 #include <net/inet_connection_sock.h>
99827 #include <net/inet_hashtables.h>
99828 #include <net/secure_seq.h>
99829 #include <net/ip.h>
99830
99831+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
99832+
99833 /*
99834 * Allocate and initialize a new local port bind bucket.
99835 * The bindhash mutex for snum's hash chain must be held here.
99836@@ -491,6 +494,8 @@ ok:
99837 }
99838 spin_unlock(&head->lock);
99839
99840+ gr_update_task_in_ip_table(current, inet_sk(sk));
99841+
99842 if (tw) {
99843 inet_twsk_deschedule(tw, death_row);
99844 inet_twsk_put(tw);
99845diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
99846index 13b229f..6956484 100644
99847--- a/net/ipv4/inetpeer.c
99848+++ b/net/ipv4/inetpeer.c
99849@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
99850 struct inet_peer *p, *n;
99851 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
99852
99853+ pax_track_stack();
99854+
99855 /* Look up for the address quickly. */
99856 read_lock_bh(&peer_pool_lock);
99857 p = lookup(daddr, NULL);
99858@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
99859 return NULL;
99860 n->v4daddr = daddr;
99861 atomic_set(&n->refcnt, 1);
99862- atomic_set(&n->rid, 0);
99863+ atomic_set_unchecked(&n->rid, 0);
99864 n->ip_id_count = secure_ip_id(daddr);
99865 n->tcp_ts_stamp = 0;
99866
99867diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
99868index d3fe10b..feeafc9 100644
99869--- a/net/ipv4/ip_fragment.c
99870+++ b/net/ipv4/ip_fragment.c
99871@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
99872 return 0;
99873
99874 start = qp->rid;
99875- end = atomic_inc_return(&peer->rid);
99876+ end = atomic_inc_return_unchecked(&peer->rid);
99877 qp->rid = end;
99878
99879 rc = qp->q.fragments && (end - start) > max;
99880diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
99881index e982b5c..f079d75 100644
99882--- a/net/ipv4/ip_sockglue.c
99883+++ b/net/ipv4/ip_sockglue.c
99884@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
99885 int val;
99886 int len;
99887
99888+ pax_track_stack();
99889+
99890 if (level != SOL_IP)
99891 return -EOPNOTSUPP;
99892
99893@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
99894 if (sk->sk_type != SOCK_STREAM)
99895 return -ENOPROTOOPT;
99896
99897- msg.msg_control = optval;
99898+ msg.msg_control = (void __force_kernel *)optval;
99899 msg.msg_controllen = len;
99900 msg.msg_flags = 0;
99901
99902diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
99903index f8d04c2..c1188f2 100644
99904--- a/net/ipv4/ipconfig.c
99905+++ b/net/ipv4/ipconfig.c
99906@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
99907
99908 mm_segment_t oldfs = get_fs();
99909 set_fs(get_ds());
99910- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
99911+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
99912 set_fs(oldfs);
99913 return res;
99914 }
99915@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
99916
99917 mm_segment_t oldfs = get_fs();
99918 set_fs(get_ds());
99919- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
99920+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
99921 set_fs(oldfs);
99922 return res;
99923 }
99924@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
99925
99926 mm_segment_t oldfs = get_fs();
99927 set_fs(get_ds());
99928- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
99929+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
99930 set_fs(oldfs);
99931 return res;
99932 }
99933diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
99934index c8b0cc3..4da5ae2 100644
99935--- a/net/ipv4/netfilter/arp_tables.c
99936+++ b/net/ipv4/netfilter/arp_tables.c
99937@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
99938 private = &tmp;
99939 }
99940 #endif
99941+ memset(&info, 0, sizeof(info));
99942 info.valid_hooks = t->valid_hooks;
99943 memcpy(info.hook_entry, private->hook_entry,
99944 sizeof(info.hook_entry));
99945diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
99946index c156db2..e772975 100644
99947--- a/net/ipv4/netfilter/ip_queue.c
99948+++ b/net/ipv4/netfilter/ip_queue.c
99949@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
99950
99951 if (v->data_len < sizeof(*user_iph))
99952 return 0;
99953+ if (v->data_len > 65535)
99954+ return -EMSGSIZE;
99955+
99956 diff = v->data_len - e->skb->len;
99957 if (diff < 0) {
99958 if (pskb_trim(e->skb, v->data_len))
99959@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
99960 static inline void
99961 __ipq_rcv_skb(struct sk_buff *skb)
99962 {
99963- int status, type, pid, flags, nlmsglen, skblen;
99964+ int status, type, pid, flags;
99965+ unsigned int nlmsglen, skblen;
99966 struct nlmsghdr *nlh;
99967
99968 skblen = skb->len;
99969diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
99970index 0606db1..02e7e4c 100644
99971--- a/net/ipv4/netfilter/ip_tables.c
99972+++ b/net/ipv4/netfilter/ip_tables.c
99973@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
99974 private = &tmp;
99975 }
99976 #endif
99977+ memset(&info, 0, sizeof(info));
99978 info.valid_hooks = t->valid_hooks;
99979 memcpy(info.hook_entry, private->hook_entry,
99980 sizeof(info.hook_entry));
99981diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
99982index d9521f6..3c3eb25 100644
99983--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
99984+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
99985@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
99986
99987 *len = 0;
99988
99989- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
99990+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
99991 if (*octets == NULL) {
99992 if (net_ratelimit())
99993 printk("OOM in bsalg (%d)\n", __LINE__);
99994diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
99995index ab996f9..3da5f96 100644
99996--- a/net/ipv4/raw.c
99997+++ b/net/ipv4/raw.c
99998@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
99999 /* Charge it to the socket. */
100000
100001 if (sock_queue_rcv_skb(sk, skb) < 0) {
100002- atomic_inc(&sk->sk_drops);
100003+ atomic_inc_unchecked(&sk->sk_drops);
100004 kfree_skb(skb);
100005 return NET_RX_DROP;
100006 }
100007@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
100008 int raw_rcv(struct sock *sk, struct sk_buff *skb)
100009 {
100010 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
100011- atomic_inc(&sk->sk_drops);
100012+ atomic_inc_unchecked(&sk->sk_drops);
100013 kfree_skb(skb);
100014 return NET_RX_DROP;
100015 }
100016@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
100017
100018 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
100019 {
100020+ struct icmp_filter filter;
100021+
100022+ if (optlen < 0)
100023+ return -EINVAL;
100024 if (optlen > sizeof(struct icmp_filter))
100025 optlen = sizeof(struct icmp_filter);
100026- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
100027+ if (copy_from_user(&filter, optval, optlen))
100028 return -EFAULT;
100029+ raw_sk(sk)->filter = filter;
100030+
100031 return 0;
100032 }
100033
100034 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
100035 {
100036 int len, ret = -EFAULT;
100037+ struct icmp_filter filter;
100038
100039 if (get_user(len, optlen))
100040 goto out;
100041@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
100042 if (len > sizeof(struct icmp_filter))
100043 len = sizeof(struct icmp_filter);
100044 ret = -EFAULT;
100045- if (put_user(len, optlen) ||
100046- copy_to_user(optval, &raw_sk(sk)->filter, len))
100047+ filter = raw_sk(sk)->filter;
100048+ if (put_user(len, optlen) || len > sizeof filter ||
100049+ copy_to_user(optval, &filter, len))
100050 goto out;
100051 ret = 0;
100052 out: return ret;
100053@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
100054 sk_wmem_alloc_get(sp),
100055 sk_rmem_alloc_get(sp),
100056 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
100057- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
100058+ atomic_read(&sp->sk_refcnt),
100059+#ifdef CONFIG_GRKERNSEC_HIDESYM
100060+ NULL,
100061+#else
100062+ sp,
100063+#endif
100064+ atomic_read_unchecked(&sp->sk_drops));
100065 }
100066
100067 static int raw_seq_show(struct seq_file *seq, void *v)
100068diff --git a/net/ipv4/route.c b/net/ipv4/route.c
100069index 58f141b..b759702 100644
100070--- a/net/ipv4/route.c
100071+++ b/net/ipv4/route.c
100072@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
100073
100074 static inline int rt_genid(struct net *net)
100075 {
100076- return atomic_read(&net->ipv4.rt_genid);
100077+ return atomic_read_unchecked(&net->ipv4.rt_genid);
100078 }
100079
100080 #ifdef CONFIG_PROC_FS
100081@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
100082 unsigned char shuffle;
100083
100084 get_random_bytes(&shuffle, sizeof(shuffle));
100085- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
100086+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
100087 }
100088
100089 /*
100090@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
100091
100092 static __net_init int rt_secret_timer_init(struct net *net)
100093 {
100094- atomic_set(&net->ipv4.rt_genid,
100095+ atomic_set_unchecked(&net->ipv4.rt_genid,
100096 (int) ((num_physpages ^ (num_physpages>>8)) ^
100097 (jiffies ^ (jiffies >> 7))));
100098
100099diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
100100index f095659..adc892a 100644
100101--- a/net/ipv4/tcp.c
100102+++ b/net/ipv4/tcp.c
100103@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
100104 int val;
100105 int err = 0;
100106
100107+ pax_track_stack();
100108+
100109 /* This is a string value all the others are int's */
100110 if (optname == TCP_CONGESTION) {
100111 char name[TCP_CA_NAME_MAX];
100112@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
100113 struct tcp_sock *tp = tcp_sk(sk);
100114 int val, len;
100115
100116+ pax_track_stack();
100117+
100118 if (get_user(len, optlen))
100119 return -EFAULT;
100120
100121diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
100122index 6fc7961..33bad4a 100644
100123--- a/net/ipv4/tcp_ipv4.c
100124+++ b/net/ipv4/tcp_ipv4.c
100125@@ -85,6 +85,9 @@
100126 int sysctl_tcp_tw_reuse __read_mostly;
100127 int sysctl_tcp_low_latency __read_mostly;
100128
100129+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100130+extern int grsec_enable_blackhole;
100131+#endif
100132
100133 #ifdef CONFIG_TCP_MD5SIG
100134 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
100135@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
100136 return 0;
100137
100138 reset:
100139+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100140+ if (!grsec_enable_blackhole)
100141+#endif
100142 tcp_v4_send_reset(rsk, skb);
100143 discard:
100144 kfree_skb(skb);
100145@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
100146 TCP_SKB_CB(skb)->sacked = 0;
100147
100148 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
100149- if (!sk)
100150+ if (!sk) {
100151+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100152+ ret = 1;
100153+#endif
100154 goto no_tcp_socket;
100155+ }
100156
100157 process:
100158- if (sk->sk_state == TCP_TIME_WAIT)
100159+ if (sk->sk_state == TCP_TIME_WAIT) {
100160+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100161+ ret = 2;
100162+#endif
100163 goto do_time_wait;
100164+ }
100165
100166 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
100167 goto discard_and_relse;
100168@@ -1651,6 +1665,10 @@ no_tcp_socket:
100169 bad_packet:
100170 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
100171 } else {
100172+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100173+ if (!grsec_enable_blackhole || (ret == 1 &&
100174+ (skb->dev->flags & IFF_LOOPBACK)))
100175+#endif
100176 tcp_v4_send_reset(NULL, skb);
100177 }
100178
100179@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
100180 0, /* non standard timer */
100181 0, /* open_requests have no inode */
100182 atomic_read(&sk->sk_refcnt),
100183+#ifdef CONFIG_GRKERNSEC_HIDESYM
100184+ NULL,
100185+#else
100186 req,
100187+#endif
100188 len);
100189 }
100190
100191@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
100192 sock_i_uid(sk),
100193 icsk->icsk_probes_out,
100194 sock_i_ino(sk),
100195- atomic_read(&sk->sk_refcnt), sk,
100196+ atomic_read(&sk->sk_refcnt),
100197+#ifdef CONFIG_GRKERNSEC_HIDESYM
100198+ NULL,
100199+#else
100200+ sk,
100201+#endif
100202 jiffies_to_clock_t(icsk->icsk_rto),
100203 jiffies_to_clock_t(icsk->icsk_ack.ato),
100204 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
100205@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
100206 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
100207 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
100208 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
100209- atomic_read(&tw->tw_refcnt), tw, len);
100210+ atomic_read(&tw->tw_refcnt),
100211+#ifdef CONFIG_GRKERNSEC_HIDESYM
100212+ NULL,
100213+#else
100214+ tw,
100215+#endif
100216+ len);
100217 }
100218
100219 #define TMPSZ 150
100220diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
100221index 4c03598..e09a8e8 100644
100222--- a/net/ipv4/tcp_minisocks.c
100223+++ b/net/ipv4/tcp_minisocks.c
100224@@ -26,6 +26,10 @@
100225 #include <net/inet_common.h>
100226 #include <net/xfrm.h>
100227
100228+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100229+extern int grsec_enable_blackhole;
100230+#endif
100231+
100232 #ifdef CONFIG_SYSCTL
100233 #define SYNC_INIT 0 /* let the user enable it */
100234 #else
100235@@ -672,6 +676,10 @@ listen_overflow:
100236
100237 embryonic_reset:
100238 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
100239+
100240+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100241+ if (!grsec_enable_blackhole)
100242+#endif
100243 if (!(flg & TCP_FLAG_RST))
100244 req->rsk_ops->send_reset(sk, skb);
100245
100246diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
100247index af83bdf..ec91cb2 100644
100248--- a/net/ipv4/tcp_output.c
100249+++ b/net/ipv4/tcp_output.c
100250@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
100251 __u8 *md5_hash_location;
100252 int mss;
100253
100254+ pax_track_stack();
100255+
100256 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
100257 if (skb == NULL)
100258 return NULL;
100259diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
100260index 59f5b5e..193860f 100644
100261--- a/net/ipv4/tcp_probe.c
100262+++ b/net/ipv4/tcp_probe.c
100263@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
100264 if (cnt + width >= len)
100265 break;
100266
100267- if (copy_to_user(buf + cnt, tbuf, width))
100268+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
100269 return -EFAULT;
100270 cnt += width;
100271 }
100272diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
100273index 57d5501..a9ed13a 100644
100274--- a/net/ipv4/tcp_timer.c
100275+++ b/net/ipv4/tcp_timer.c
100276@@ -21,6 +21,10 @@
100277 #include <linux/module.h>
100278 #include <net/tcp.h>
100279
100280+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100281+extern int grsec_lastack_retries;
100282+#endif
100283+
100284 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
100285 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
100286 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
100287@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
100288 }
100289 }
100290
100291+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100292+ if ((sk->sk_state == TCP_LAST_ACK) &&
100293+ (grsec_lastack_retries > 0) &&
100294+ (grsec_lastack_retries < retry_until))
100295+ retry_until = grsec_lastack_retries;
100296+#endif
100297+
100298 if (retransmits_timed_out(sk, retry_until)) {
100299 /* Has it gone just too far? */
100300 tcp_write_err(sk);
100301diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
100302index 8e28770..72105c8 100644
100303--- a/net/ipv4/udp.c
100304+++ b/net/ipv4/udp.c
100305@@ -86,6 +86,7 @@
100306 #include <linux/types.h>
100307 #include <linux/fcntl.h>
100308 #include <linux/module.h>
100309+#include <linux/security.h>
100310 #include <linux/socket.h>
100311 #include <linux/sockios.h>
100312 #include <linux/igmp.h>
100313@@ -106,6 +107,10 @@
100314 #include <net/xfrm.h>
100315 #include "udp_impl.h"
100316
100317+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100318+extern int grsec_enable_blackhole;
100319+#endif
100320+
100321 struct udp_table udp_table;
100322 EXPORT_SYMBOL(udp_table);
100323
100324@@ -371,6 +376,9 @@ found:
100325 return s;
100326 }
100327
100328+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
100329+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
100330+
100331 /*
100332 * This routine is called by the ICMP module when it gets some
100333 * sort of error condition. If err < 0 then the socket should
100334@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
100335 dport = usin->sin_port;
100336 if (dport == 0)
100337 return -EINVAL;
100338+
100339+ err = gr_search_udp_sendmsg(sk, usin);
100340+ if (err)
100341+ return err;
100342 } else {
100343 if (sk->sk_state != TCP_ESTABLISHED)
100344 return -EDESTADDRREQ;
100345+
100346+ err = gr_search_udp_sendmsg(sk, NULL);
100347+ if (err)
100348+ return err;
100349+
100350 daddr = inet->daddr;
100351 dport = inet->dport;
100352 /* Open fast path for connected socket.
100353@@ -945,6 +962,10 @@ try_again:
100354 if (!skb)
100355 goto out;
100356
100357+ err = gr_search_udp_recvmsg(sk, skb);
100358+ if (err)
100359+ goto out_free;
100360+
100361 ulen = skb->len - sizeof(struct udphdr);
100362 copied = len;
100363 if (copied > ulen)
100364@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
100365 if (rc == -ENOMEM) {
100366 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
100367 is_udplite);
100368- atomic_inc(&sk->sk_drops);
100369+ atomic_inc_unchecked(&sk->sk_drops);
100370 }
100371 goto drop;
100372 }
100373@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
100374 goto csum_error;
100375
100376 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
100377+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100378+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
100379+#endif
100380 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
100381
100382 /*
100383@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
100384 sk_wmem_alloc_get(sp),
100385 sk_rmem_alloc_get(sp),
100386 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
100387- atomic_read(&sp->sk_refcnt), sp,
100388- atomic_read(&sp->sk_drops), len);
100389+ atomic_read(&sp->sk_refcnt),
100390+#ifdef CONFIG_GRKERNSEC_HIDESYM
100391+ NULL,
100392+#else
100393+ sp,
100394+#endif
100395+ atomic_read_unchecked(&sp->sk_drops), len);
100396 }
100397
100398 int udp4_seq_show(struct seq_file *seq, void *v)
100399diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
100400index 8ac3d09..fc58c5f 100644
100401--- a/net/ipv6/addrconf.c
100402+++ b/net/ipv6/addrconf.c
100403@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
100404 p.iph.ihl = 5;
100405 p.iph.protocol = IPPROTO_IPV6;
100406 p.iph.ttl = 64;
100407- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
100408+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
100409
100410 if (ops->ndo_do_ioctl) {
100411 mm_segment_t oldfs = get_fs();
100412diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
100413index cc4797d..7cfdfcc 100644
100414--- a/net/ipv6/inet6_connection_sock.c
100415+++ b/net/ipv6/inet6_connection_sock.c
100416@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
100417 #ifdef CONFIG_XFRM
100418 {
100419 struct rt6_info *rt = (struct rt6_info *)dst;
100420- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
100421+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
100422 }
100423 #endif
100424 }
100425@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
100426 #ifdef CONFIG_XFRM
100427 if (dst) {
100428 struct rt6_info *rt = (struct rt6_info *)dst;
100429- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
100430+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
100431 sk->sk_dst_cache = NULL;
100432 dst_release(dst);
100433 dst = NULL;
100434diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
100435index 093e9b2..f72cddb 100644
100436--- a/net/ipv6/inet6_hashtables.c
100437+++ b/net/ipv6/inet6_hashtables.c
100438@@ -119,7 +119,7 @@ out:
100439 }
100440 EXPORT_SYMBOL(__inet6_lookup_established);
100441
100442-static int inline compute_score(struct sock *sk, struct net *net,
100443+static inline int compute_score(struct sock *sk, struct net *net,
100444 const unsigned short hnum,
100445 const struct in6_addr *daddr,
100446 const int dif)
100447diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
100448index 4f7aaf6..f7acf45 100644
100449--- a/net/ipv6/ipv6_sockglue.c
100450+++ b/net/ipv6/ipv6_sockglue.c
100451@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
100452 int val, valbool;
100453 int retv = -ENOPROTOOPT;
100454
100455+ pax_track_stack();
100456+
100457 if (optval == NULL)
100458 val=0;
100459 else {
100460@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
100461 int len;
100462 int val;
100463
100464+ pax_track_stack();
100465+
100466 if (ip6_mroute_opt(optname))
100467 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
100468
100469@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
100470 if (sk->sk_type != SOCK_STREAM)
100471 return -ENOPROTOOPT;
100472
100473- msg.msg_control = optval;
100474+ msg.msg_control = (void __force_kernel *)optval;
100475 msg.msg_controllen = len;
100476 msg.msg_flags = 0;
100477
100478diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
100479index 1cf3f0c..1d4376f 100644
100480--- a/net/ipv6/netfilter/ip6_queue.c
100481+++ b/net/ipv6/netfilter/ip6_queue.c
100482@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
100483
100484 if (v->data_len < sizeof(*user_iph))
100485 return 0;
100486+ if (v->data_len > 65535)
100487+ return -EMSGSIZE;
100488+
100489 diff = v->data_len - e->skb->len;
100490 if (diff < 0) {
100491 if (pskb_trim(e->skb, v->data_len))
100492@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
100493 static inline void
100494 __ipq_rcv_skb(struct sk_buff *skb)
100495 {
100496- int status, type, pid, flags, nlmsglen, skblen;
100497+ int status, type, pid, flags;
100498+ unsigned int nlmsglen, skblen;
100499 struct nlmsghdr *nlh;
100500
100501 skblen = skb->len;
100502diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
100503index 78b5a36..7f37433 100644
100504--- a/net/ipv6/netfilter/ip6_tables.c
100505+++ b/net/ipv6/netfilter/ip6_tables.c
100506@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
100507 private = &tmp;
100508 }
100509 #endif
100510+ memset(&info, 0, sizeof(info));
100511 info.valid_hooks = t->valid_hooks;
100512 memcpy(info.hook_entry, private->hook_entry,
100513 sizeof(info.hook_entry));
100514diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
100515index 4f24570..b813b34 100644
100516--- a/net/ipv6/raw.c
100517+++ b/net/ipv6/raw.c
100518@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
100519 {
100520 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
100521 skb_checksum_complete(skb)) {
100522- atomic_inc(&sk->sk_drops);
100523+ atomic_inc_unchecked(&sk->sk_drops);
100524 kfree_skb(skb);
100525 return NET_RX_DROP;
100526 }
100527
100528 /* Charge it to the socket. */
100529 if (sock_queue_rcv_skb(sk,skb)<0) {
100530- atomic_inc(&sk->sk_drops);
100531+ atomic_inc_unchecked(&sk->sk_drops);
100532 kfree_skb(skb);
100533 return NET_RX_DROP;
100534 }
100535@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
100536 struct raw6_sock *rp = raw6_sk(sk);
100537
100538 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
100539- atomic_inc(&sk->sk_drops);
100540+ atomic_inc_unchecked(&sk->sk_drops);
100541 kfree_skb(skb);
100542 return NET_RX_DROP;
100543 }
100544@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
100545
100546 if (inet->hdrincl) {
100547 if (skb_checksum_complete(skb)) {
100548- atomic_inc(&sk->sk_drops);
100549+ atomic_inc_unchecked(&sk->sk_drops);
100550 kfree_skb(skb);
100551 return NET_RX_DROP;
100552 }
100553@@ -518,7 +518,7 @@ csum_copy_err:
100554 as some normal condition.
100555 */
100556 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
100557- atomic_inc(&sk->sk_drops);
100558+ atomic_inc_unchecked(&sk->sk_drops);
100559 goto out;
100560 }
100561
100562@@ -600,7 +600,7 @@ out:
100563 return err;
100564 }
100565
100566-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
100567+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
100568 struct flowi *fl, struct rt6_info *rt,
100569 unsigned int flags)
100570 {
100571@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
100572 u16 proto;
100573 int err;
100574
100575+ pax_track_stack();
100576+
100577 /* Rough check on arithmetic overflow,
100578 better check is made in ip6_append_data().
100579 */
100580@@ -916,12 +918,17 @@ do_confirm:
100581 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
100582 char __user *optval, int optlen)
100583 {
100584+ struct icmp6_filter filter;
100585+
100586 switch (optname) {
100587 case ICMPV6_FILTER:
100588+ if (optlen < 0)
100589+ return -EINVAL;
100590 if (optlen > sizeof(struct icmp6_filter))
100591 optlen = sizeof(struct icmp6_filter);
100592- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
100593+ if (copy_from_user(&filter, optval, optlen))
100594 return -EFAULT;
100595+ raw6_sk(sk)->filter = filter;
100596 return 0;
100597 default:
100598 return -ENOPROTOOPT;
100599@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
100600 char __user *optval, int __user *optlen)
100601 {
100602 int len;
100603+ struct icmp6_filter filter;
100604
100605 switch (optname) {
100606 case ICMPV6_FILTER:
100607@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
100608 len = sizeof(struct icmp6_filter);
100609 if (put_user(len, optlen))
100610 return -EFAULT;
100611- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
100612+ filter = raw6_sk(sk)->filter;
100613+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
100614 return -EFAULT;
100615 return 0;
100616 default:
100617@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
100618 0, 0L, 0,
100619 sock_i_uid(sp), 0,
100620 sock_i_ino(sp),
100621- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
100622+ atomic_read(&sp->sk_refcnt),
100623+#ifdef CONFIG_GRKERNSEC_HIDESYM
100624+ NULL,
100625+#else
100626+ sp,
100627+#endif
100628+ atomic_read_unchecked(&sp->sk_drops));
100629 }
100630
100631 static int raw6_seq_show(struct seq_file *seq, void *v)
100632diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
100633index faae6df..d4430c1 100644
100634--- a/net/ipv6/tcp_ipv6.c
100635+++ b/net/ipv6/tcp_ipv6.c
100636@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
100637 }
100638 #endif
100639
100640+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100641+extern int grsec_enable_blackhole;
100642+#endif
100643+
100644 static void tcp_v6_hash(struct sock *sk)
100645 {
100646 if (sk->sk_state != TCP_CLOSE) {
100647@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
100648 return 0;
100649
100650 reset:
100651+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100652+ if (!grsec_enable_blackhole)
100653+#endif
100654 tcp_v6_send_reset(sk, skb);
100655 discard:
100656 if (opt_skb)
100657@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
100658 TCP_SKB_CB(skb)->sacked = 0;
100659
100660 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
100661- if (!sk)
100662+ if (!sk) {
100663+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100664+ ret = 1;
100665+#endif
100666 goto no_tcp_socket;
100667+ }
100668
100669 process:
100670- if (sk->sk_state == TCP_TIME_WAIT)
100671+ if (sk->sk_state == TCP_TIME_WAIT) {
100672+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100673+ ret = 2;
100674+#endif
100675 goto do_time_wait;
100676+ }
100677
100678 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
100679 goto discard_and_relse;
100680@@ -1701,6 +1716,10 @@ no_tcp_socket:
100681 bad_packet:
100682 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
100683 } else {
100684+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100685+ if (!grsec_enable_blackhole || (ret == 1 &&
100686+ (skb->dev->flags & IFF_LOOPBACK)))
100687+#endif
100688 tcp_v6_send_reset(NULL, skb);
100689 }
100690
100691@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
100692 uid,
100693 0, /* non standard timer */
100694 0, /* open_requests have no inode */
100695- 0, req);
100696+ 0,
100697+#ifdef CONFIG_GRKERNSEC_HIDESYM
100698+ NULL
100699+#else
100700+ req
100701+#endif
100702+ );
100703 }
100704
100705 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
100706@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
100707 sock_i_uid(sp),
100708 icsk->icsk_probes_out,
100709 sock_i_ino(sp),
100710- atomic_read(&sp->sk_refcnt), sp,
100711+ atomic_read(&sp->sk_refcnt),
100712+#ifdef CONFIG_GRKERNSEC_HIDESYM
100713+ NULL,
100714+#else
100715+ sp,
100716+#endif
100717 jiffies_to_clock_t(icsk->icsk_rto),
100718 jiffies_to_clock_t(icsk->icsk_ack.ato),
100719 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
100720@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
100721 dest->s6_addr32[2], dest->s6_addr32[3], destp,
100722 tw->tw_substate, 0, 0,
100723 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
100724- atomic_read(&tw->tw_refcnt), tw);
100725+ atomic_read(&tw->tw_refcnt),
100726+#ifdef CONFIG_GRKERNSEC_HIDESYM
100727+ NULL
100728+#else
100729+ tw
100730+#endif
100731+ );
100732 }
100733
100734 static int tcp6_seq_show(struct seq_file *seq, void *v)
100735diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
100736index 9cc6289..052c521 100644
100737--- a/net/ipv6/udp.c
100738+++ b/net/ipv6/udp.c
100739@@ -49,6 +49,10 @@
100740 #include <linux/seq_file.h>
100741 #include "udp_impl.h"
100742
100743+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100744+extern int grsec_enable_blackhole;
100745+#endif
100746+
100747 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
100748 {
100749 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
100750@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
100751 if (rc == -ENOMEM) {
100752 UDP6_INC_STATS_BH(sock_net(sk),
100753 UDP_MIB_RCVBUFERRORS, is_udplite);
100754- atomic_inc(&sk->sk_drops);
100755+ atomic_inc_unchecked(&sk->sk_drops);
100756 }
100757 goto drop;
100758 }
100759@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
100760 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
100761 proto == IPPROTO_UDPLITE);
100762
100763+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
100764+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
100765+#endif
100766 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
100767
100768 kfree_skb(skb);
100769@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
100770 0, 0L, 0,
100771 sock_i_uid(sp), 0,
100772 sock_i_ino(sp),
100773- atomic_read(&sp->sk_refcnt), sp,
100774- atomic_read(&sp->sk_drops));
100775+ atomic_read(&sp->sk_refcnt),
100776+#ifdef CONFIG_GRKERNSEC_HIDESYM
100777+ NULL,
100778+#else
100779+ sp,
100780+#endif
100781+ atomic_read_unchecked(&sp->sk_drops));
100782 }
100783
100784 int udp6_seq_show(struct seq_file *seq, void *v)
100785diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
100786index 48bb1e3..5980e6e 100644
100787--- a/net/ipv6/xfrm6_tunnel.c
100788+++ b/net/ipv6/xfrm6_tunnel.c
100789@@ -258,7 +258,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
100790 __be32 spi;
100791
100792 spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
100793- return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
100794+ return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
100795 }
100796
100797 static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
100798diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
100799index 811984d..11f59b7 100644
100800--- a/net/irda/ircomm/ircomm_tty.c
100801+++ b/net/irda/ircomm/ircomm_tty.c
100802@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
100803 add_wait_queue(&self->open_wait, &wait);
100804
100805 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
100806- __FILE__,__LINE__, tty->driver->name, self->open_count );
100807+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
100808
100809 /* As far as I can see, we protect open_count - Jean II */
100810 spin_lock_irqsave(&self->spinlock, flags);
100811 if (!tty_hung_up_p(filp)) {
100812 extra_count = 1;
100813- self->open_count--;
100814+ local_dec(&self->open_count);
100815 }
100816 spin_unlock_irqrestore(&self->spinlock, flags);
100817- self->blocked_open++;
100818+ local_inc(&self->blocked_open);
100819
100820 while (1) {
100821 if (tty->termios->c_cflag & CBAUD) {
100822@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
100823 }
100824
100825 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
100826- __FILE__,__LINE__, tty->driver->name, self->open_count );
100827+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
100828
100829 schedule();
100830 }
100831@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
100832 if (extra_count) {
100833 /* ++ is not atomic, so this should be protected - Jean II */
100834 spin_lock_irqsave(&self->spinlock, flags);
100835- self->open_count++;
100836+ local_inc(&self->open_count);
100837 spin_unlock_irqrestore(&self->spinlock, flags);
100838 }
100839- self->blocked_open--;
100840+ local_dec(&self->blocked_open);
100841
100842 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
100843- __FILE__,__LINE__, tty->driver->name, self->open_count);
100844+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
100845
100846 if (!retval)
100847 self->flags |= ASYNC_NORMAL_ACTIVE;
100848@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
100849 }
100850 /* ++ is not atomic, so this should be protected - Jean II */
100851 spin_lock_irqsave(&self->spinlock, flags);
100852- self->open_count++;
100853+ local_inc(&self->open_count);
100854
100855 tty->driver_data = self;
100856 self->tty = tty;
100857 spin_unlock_irqrestore(&self->spinlock, flags);
100858
100859 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
100860- self->line, self->open_count);
100861+ self->line, local_read(&self->open_count));
100862
100863 /* Not really used by us, but lets do it anyway */
100864 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
100865@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
100866 return;
100867 }
100868
100869- if ((tty->count == 1) && (self->open_count != 1)) {
100870+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
100871 /*
100872 * Uh, oh. tty->count is 1, which means that the tty
100873 * structure will be freed. state->count should always
100874@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
100875 */
100876 IRDA_DEBUG(0, "%s(), bad serial port count; "
100877 "tty->count is 1, state->count is %d\n", __func__ ,
100878- self->open_count);
100879- self->open_count = 1;
100880+ local_read(&self->open_count));
100881+ local_set(&self->open_count, 1);
100882 }
100883
100884- if (--self->open_count < 0) {
100885+ if (local_dec_return(&self->open_count) < 0) {
100886 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
100887- __func__, self->line, self->open_count);
100888- self->open_count = 0;
100889+ __func__, self->line, local_read(&self->open_count));
100890+ local_set(&self->open_count, 0);
100891 }
100892- if (self->open_count) {
100893+ if (local_read(&self->open_count)) {
100894 spin_unlock_irqrestore(&self->spinlock, flags);
100895
100896 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
100897@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
100898 tty->closing = 0;
100899 self->tty = NULL;
100900
100901- if (self->blocked_open) {
100902+ if (local_read(&self->blocked_open)) {
100903 if (self->close_delay)
100904 schedule_timeout_interruptible(self->close_delay);
100905 wake_up_interruptible(&self->open_wait);
100906@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
100907 spin_lock_irqsave(&self->spinlock, flags);
100908 self->flags &= ~ASYNC_NORMAL_ACTIVE;
100909 self->tty = NULL;
100910- self->open_count = 0;
100911+ local_set(&self->open_count, 0);
100912 spin_unlock_irqrestore(&self->spinlock, flags);
100913
100914 wake_up_interruptible(&self->open_wait);
100915@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
100916 seq_putc(m, '\n');
100917
100918 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
100919- seq_printf(m, "Open count: %d\n", self->open_count);
100920+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
100921 seq_printf(m, "Max data size: %d\n", self->max_data_size);
100922 seq_printf(m, "Max header size: %d\n", self->max_header_size);
100923
100924diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
100925index bada1b9..f325943 100644
100926--- a/net/iucv/af_iucv.c
100927+++ b/net/iucv/af_iucv.c
100928@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
100929
100930 write_lock_bh(&iucv_sk_list.lock);
100931
100932- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
100933+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
100934 while (__iucv_get_sock_by_name(name)) {
100935 sprintf(name, "%08x",
100936- atomic_inc_return(&iucv_sk_list.autobind_name));
100937+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
100938 }
100939
100940 write_unlock_bh(&iucv_sk_list.lock);
100941diff --git a/net/key/af_key.c b/net/key/af_key.c
100942index 4e98193..439b449 100644
100943--- a/net/key/af_key.c
100944+++ b/net/key/af_key.c
100945@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
100946 struct xfrm_migrate m[XFRM_MAX_DEPTH];
100947 struct xfrm_kmaddress k;
100948
100949+ pax_track_stack();
100950+
100951 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
100952 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
100953 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
100954@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
100955 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
100956 else
100957 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
100958+#ifdef CONFIG_GRKERNSEC_HIDESYM
100959+ NULL,
100960+#else
100961 s,
100962+#endif
100963 atomic_read(&s->sk_refcnt),
100964 sk_rmem_alloc_get(s),
100965 sk_wmem_alloc_get(s),
100966diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
100967index bda96d1..c038b72 100644
100968--- a/net/lapb/lapb_iface.c
100969+++ b/net/lapb/lapb_iface.c
100970@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
100971 goto out;
100972
100973 lapb->dev = dev;
100974- lapb->callbacks = *callbacks;
100975+ lapb->callbacks = callbacks;
100976
100977 __lapb_insert_cb(lapb);
100978
100979@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
100980
100981 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
100982 {
100983- if (lapb->callbacks.connect_confirmation)
100984- lapb->callbacks.connect_confirmation(lapb->dev, reason);
100985+ if (lapb->callbacks->connect_confirmation)
100986+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
100987 }
100988
100989 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
100990 {
100991- if (lapb->callbacks.connect_indication)
100992- lapb->callbacks.connect_indication(lapb->dev, reason);
100993+ if (lapb->callbacks->connect_indication)
100994+ lapb->callbacks->connect_indication(lapb->dev, reason);
100995 }
100996
100997 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
100998 {
100999- if (lapb->callbacks.disconnect_confirmation)
101000- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
101001+ if (lapb->callbacks->disconnect_confirmation)
101002+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
101003 }
101004
101005 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
101006 {
101007- if (lapb->callbacks.disconnect_indication)
101008- lapb->callbacks.disconnect_indication(lapb->dev, reason);
101009+ if (lapb->callbacks->disconnect_indication)
101010+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
101011 }
101012
101013 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
101014 {
101015- if (lapb->callbacks.data_indication)
101016- return lapb->callbacks.data_indication(lapb->dev, skb);
101017+ if (lapb->callbacks->data_indication)
101018+ return lapb->callbacks->data_indication(lapb->dev, skb);
101019
101020 kfree_skb(skb);
101021 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
101022@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
101023 {
101024 int used = 0;
101025
101026- if (lapb->callbacks.data_transmit) {
101027- lapb->callbacks.data_transmit(lapb->dev, skb);
101028+ if (lapb->callbacks->data_transmit) {
101029+ lapb->callbacks->data_transmit(lapb->dev, skb);
101030 used = 1;
101031 }
101032
101033diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
101034index fe2d3f8..e57f683 100644
101035--- a/net/mac80211/cfg.c
101036+++ b/net/mac80211/cfg.c
101037@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
101038 return err;
101039 }
101040
101041-struct cfg80211_ops mac80211_config_ops = {
101042+const struct cfg80211_ops mac80211_config_ops = {
101043 .add_virtual_intf = ieee80211_add_iface,
101044 .del_virtual_intf = ieee80211_del_iface,
101045 .change_virtual_intf = ieee80211_change_iface,
101046diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
101047index 7d7879f..2d51f62 100644
101048--- a/net/mac80211/cfg.h
101049+++ b/net/mac80211/cfg.h
101050@@ -4,6 +4,6 @@
101051 #ifndef __CFG_H
101052 #define __CFG_H
101053
101054-extern struct cfg80211_ops mac80211_config_ops;
101055+extern const struct cfg80211_ops mac80211_config_ops;
101056
101057 #endif /* __CFG_H */
101058diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
101059index 99c7525..9cb4937 100644
101060--- a/net/mac80211/debugfs_key.c
101061+++ b/net/mac80211/debugfs_key.c
101062@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
101063 size_t count, loff_t *ppos)
101064 {
101065 struct ieee80211_key *key = file->private_data;
101066- int i, res, bufsize = 2 * key->conf.keylen + 2;
101067+ int i, bufsize = 2 * key->conf.keylen + 2;
101068 char *buf = kmalloc(bufsize, GFP_KERNEL);
101069 char *p = buf;
101070+ ssize_t res;
101071+
101072+ if (buf == NULL)
101073+ return -ENOMEM;
101074
101075 for (i = 0; i < key->conf.keylen; i++)
101076 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
101077diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
101078index 33a2e89..08650c8 100644
101079--- a/net/mac80211/debugfs_sta.c
101080+++ b/net/mac80211/debugfs_sta.c
101081@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
101082 int i;
101083 struct sta_info *sta = file->private_data;
101084
101085+ pax_track_stack();
101086+
101087 spin_lock_bh(&sta->lock);
101088 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
101089 sta->ampdu_mlme.dialog_token_allocator + 1);
101090diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
101091index ca62bfe..6657a03 100644
101092--- a/net/mac80211/ieee80211_i.h
101093+++ b/net/mac80211/ieee80211_i.h
101094@@ -25,6 +25,7 @@
101095 #include <linux/etherdevice.h>
101096 #include <net/cfg80211.h>
101097 #include <net/mac80211.h>
101098+#include <asm/local.h>
101099 #include "key.h"
101100 #include "sta_info.h"
101101
101102@@ -635,7 +636,7 @@ struct ieee80211_local {
101103 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
101104 spinlock_t queue_stop_reason_lock;
101105
101106- int open_count;
101107+ local_t open_count;
101108 int monitors, cooked_mntrs;
101109 /* number of interfaces with corresponding FIF_ flags */
101110 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
101111diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
101112index 079c500..eb3c6d4 100644
101113--- a/net/mac80211/iface.c
101114+++ b/net/mac80211/iface.c
101115@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
101116 break;
101117 }
101118
101119- if (local->open_count == 0) {
101120+ if (local_read(&local->open_count) == 0) {
101121 res = drv_start(local);
101122 if (res)
101123 goto err_del_bss;
101124@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
101125 * Validate the MAC address for this device.
101126 */
101127 if (!is_valid_ether_addr(dev->dev_addr)) {
101128- if (!local->open_count)
101129+ if (!local_read(&local->open_count))
101130 drv_stop(local);
101131 return -EADDRNOTAVAIL;
101132 }
101133@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
101134
101135 hw_reconf_flags |= __ieee80211_recalc_idle(local);
101136
101137- local->open_count++;
101138+ local_inc(&local->open_count);
101139 if (hw_reconf_flags) {
101140 ieee80211_hw_config(local, hw_reconf_flags);
101141 /*
101142@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
101143 err_del_interface:
101144 drv_remove_interface(local, &conf);
101145 err_stop:
101146- if (!local->open_count)
101147+ if (!local_read(&local->open_count))
101148 drv_stop(local);
101149 err_del_bss:
101150 sdata->bss = NULL;
101151@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
101152 WARN_ON(!list_empty(&sdata->u.ap.vlans));
101153 }
101154
101155- local->open_count--;
101156+ local_dec(&local->open_count);
101157
101158 switch (sdata->vif.type) {
101159 case NL80211_IFTYPE_AP_VLAN:
101160@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
101161
101162 ieee80211_recalc_ps(local, -1);
101163
101164- if (local->open_count == 0) {
101165+ if (local_read(&local->open_count) == 0) {
101166 ieee80211_clear_tx_pending(local);
101167 ieee80211_stop_device(local);
101168
101169diff --git a/net/mac80211/main.c b/net/mac80211/main.c
101170index 2dfe176..74e4388 100644
101171--- a/net/mac80211/main.c
101172+++ b/net/mac80211/main.c
101173@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
101174 local->hw.conf.power_level = power;
101175 }
101176
101177- if (changed && local->open_count) {
101178+ if (changed && local_read(&local->open_count)) {
101179 ret = drv_config(local, changed);
101180 /*
101181 * Goal:
101182diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
101183index e67eea7..fcc227e 100644
101184--- a/net/mac80211/mlme.c
101185+++ b/net/mac80211/mlme.c
101186@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
101187 bool have_higher_than_11mbit = false, newsta = false;
101188 u16 ap_ht_cap_flags;
101189
101190+ pax_track_stack();
101191+
101192 /*
101193 * AssocResp and ReassocResp have identical structure, so process both
101194 * of them in this function.
101195diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
101196index e535f1c..4d733d1 100644
101197--- a/net/mac80211/pm.c
101198+++ b/net/mac80211/pm.c
101199@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
101200 }
101201
101202 /* stop hardware - this must stop RX */
101203- if (local->open_count)
101204+ if (local_read(&local->open_count))
101205 ieee80211_stop_device(local);
101206
101207 local->suspended = true;
101208diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
101209index b33efc4..0a2efb6 100644
101210--- a/net/mac80211/rate.c
101211+++ b/net/mac80211/rate.c
101212@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
101213 struct rate_control_ref *ref, *old;
101214
101215 ASSERT_RTNL();
101216- if (local->open_count)
101217+ if (local_read(&local->open_count))
101218 return -EBUSY;
101219
101220 ref = rate_control_alloc(name, local);
101221diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
101222index b1d7904..57e4da7 100644
101223--- a/net/mac80211/tx.c
101224+++ b/net/mac80211/tx.c
101225@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
101226 return cpu_to_le16(dur);
101227 }
101228
101229-static int inline is_ieee80211_device(struct ieee80211_local *local,
101230+static inline int is_ieee80211_device(struct ieee80211_local *local,
101231 struct net_device *dev)
101232 {
101233 return local == wdev_priv(dev->ieee80211_ptr);
101234diff --git a/net/mac80211/util.c b/net/mac80211/util.c
101235index 31b1085..48fb26d 100644
101236--- a/net/mac80211/util.c
101237+++ b/net/mac80211/util.c
101238@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
101239 local->resuming = true;
101240
101241 /* restart hardware */
101242- if (local->open_count) {
101243+ if (local_read(&local->open_count)) {
101244 /*
101245 * Upon resume hardware can sometimes be goofy due to
101246 * various platform / driver / bus issues, so restarting
101247diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
101248index 634d14a..b35a608 100644
101249--- a/net/netfilter/Kconfig
101250+++ b/net/netfilter/Kconfig
101251@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
101252
101253 To compile it as a module, choose M here. If unsure, say N.
101254
101255+config NETFILTER_XT_MATCH_GRADM
101256+ tristate '"gradm" match support'
101257+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
101258+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
101259+ ---help---
101260+ The gradm match allows to match on grsecurity RBAC being enabled.
101261+ It is useful when iptables rules are applied early on bootup to
101262+ prevent connections to the machine (except from a trusted host)
101263+ while the RBAC system is disabled.
101264+
101265 config NETFILTER_XT_MATCH_HASHLIMIT
101266 tristate '"hashlimit" match support'
101267 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
101268diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
101269index 49f62ee..a17b2c6 100644
101270--- a/net/netfilter/Makefile
101271+++ b/net/netfilter/Makefile
101272@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
101273 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
101274 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
101275 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
101276+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
101277 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
101278 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
101279 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
101280diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
101281index 3c7e427..724043c 100644
101282--- a/net/netfilter/ipvs/ip_vs_app.c
101283+++ b/net/netfilter/ipvs/ip_vs_app.c
101284@@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
101285 .open = ip_vs_app_open,
101286 .read = seq_read,
101287 .llseek = seq_lseek,
101288- .release = seq_release,
101289+ .release = seq_release_net,
101290 };
101291 #endif
101292
101293diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
101294index 95682e5..457dbac 100644
101295--- a/net/netfilter/ipvs/ip_vs_conn.c
101296+++ b/net/netfilter/ipvs/ip_vs_conn.c
101297@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
101298 /* if the connection is not template and is created
101299 * by sync, preserve the activity flag.
101300 */
101301- cp->flags |= atomic_read(&dest->conn_flags) &
101302+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
101303 (~IP_VS_CONN_F_INACTIVE);
101304 else
101305- cp->flags |= atomic_read(&dest->conn_flags);
101306+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
101307 cp->dest = dest;
101308
101309 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
101310@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
101311 atomic_set(&cp->refcnt, 1);
101312
101313 atomic_set(&cp->n_control, 0);
101314- atomic_set(&cp->in_pkts, 0);
101315+ atomic_set_unchecked(&cp->in_pkts, 0);
101316
101317 atomic_inc(&ip_vs_conn_count);
101318 if (flags & IP_VS_CONN_F_NO_CPORT)
101319@@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
101320 .open = ip_vs_conn_open,
101321 .read = seq_read,
101322 .llseek = seq_lseek,
101323- .release = seq_release,
101324+ .release = seq_release_net,
101325 };
101326
101327 static const char *ip_vs_origin_name(unsigned flags)
101328@@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
101329 .open = ip_vs_conn_sync_open,
101330 .read = seq_read,
101331 .llseek = seq_lseek,
101332- .release = seq_release,
101333+ .release = seq_release_net,
101334 };
101335
101336 #endif
101337@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
101338
101339 /* Don't drop the entry if its number of incoming packets is not
101340 located in [0, 8] */
101341- i = atomic_read(&cp->in_pkts);
101342+ i = atomic_read_unchecked(&cp->in_pkts);
101343 if (i > 8 || i < 0) return 0;
101344
101345 if (!todrop_rate[i]) return 0;
101346diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
101347index b95699f..5fee919 100644
101348--- a/net/netfilter/ipvs/ip_vs_core.c
101349+++ b/net/netfilter/ipvs/ip_vs_core.c
101350@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
101351 ret = cp->packet_xmit(skb, cp, pp);
101352 /* do not touch skb anymore */
101353
101354- atomic_inc(&cp->in_pkts);
101355+ atomic_inc_unchecked(&cp->in_pkts);
101356 ip_vs_conn_put(cp);
101357 return ret;
101358 }
101359@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
101360 * Sync connection if it is about to close to
101361 * encorage the standby servers to update the connections timeout
101362 */
101363- pkts = atomic_add_return(1, &cp->in_pkts);
101364+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
101365 if (af == AF_INET &&
101366 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
101367 (((cp->protocol != IPPROTO_TCP ||
101368diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
101369index 02b2610..2d89424 100644
101370--- a/net/netfilter/ipvs/ip_vs_ctl.c
101371+++ b/net/netfilter/ipvs/ip_vs_ctl.c
101372@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
101373 ip_vs_rs_hash(dest);
101374 write_unlock_bh(&__ip_vs_rs_lock);
101375 }
101376- atomic_set(&dest->conn_flags, conn_flags);
101377+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
101378
101379 /* bind the service */
101380 if (!dest->svc) {
101381@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
101382 " %-7s %-6d %-10d %-10d\n",
101383 &dest->addr.in6,
101384 ntohs(dest->port),
101385- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
101386+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
101387 atomic_read(&dest->weight),
101388 atomic_read(&dest->activeconns),
101389 atomic_read(&dest->inactconns));
101390@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
101391 "%-7s %-6d %-10d %-10d\n",
101392 ntohl(dest->addr.ip),
101393 ntohs(dest->port),
101394- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
101395+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
101396 atomic_read(&dest->weight),
101397 atomic_read(&dest->activeconns),
101398 atomic_read(&dest->inactconns));
101399@@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
101400 .open = ip_vs_info_open,
101401 .read = seq_read,
101402 .llseek = seq_lseek,
101403- .release = seq_release_private,
101404+ .release = seq_release_net,
101405 };
101406
101407 #endif
101408@@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
101409 .open = ip_vs_stats_seq_open,
101410 .read = seq_read,
101411 .llseek = seq_lseek,
101412- .release = single_release,
101413+ .release = single_release_net,
101414 };
101415
101416 #endif
101417@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
101418
101419 entry.addr = dest->addr.ip;
101420 entry.port = dest->port;
101421- entry.conn_flags = atomic_read(&dest->conn_flags);
101422+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
101423 entry.weight = atomic_read(&dest->weight);
101424 entry.u_threshold = dest->u_threshold;
101425 entry.l_threshold = dest->l_threshold;
101426@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
101427 unsigned char arg[128];
101428 int ret = 0;
101429
101430+ pax_track_stack();
101431+
101432 if (!capable(CAP_NET_ADMIN))
101433 return -EPERM;
101434
101435@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
101436 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
101437
101438 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
101439- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
101440+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
101441 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
101442 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
101443 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
101444diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
101445index e177f0d..55e8581 100644
101446--- a/net/netfilter/ipvs/ip_vs_sync.c
101447+++ b/net/netfilter/ipvs/ip_vs_sync.c
101448@@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
101449
101450 if (opt)
101451 memcpy(&cp->in_seq, opt, sizeof(*opt));
101452- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
101453+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
101454 cp->state = state;
101455 cp->old_state = cp->state;
101456 /*
101457diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
101458index 30b3189..e2e4b55 100644
101459--- a/net/netfilter/ipvs/ip_vs_xmit.c
101460+++ b/net/netfilter/ipvs/ip_vs_xmit.c
101461@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
101462 else
101463 rc = NF_ACCEPT;
101464 /* do not touch skb anymore */
101465- atomic_inc(&cp->in_pkts);
101466+ atomic_inc_unchecked(&cp->in_pkts);
101467 goto out;
101468 }
101469
101470@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
101471 else
101472 rc = NF_ACCEPT;
101473 /* do not touch skb anymore */
101474- atomic_inc(&cp->in_pkts);
101475+ atomic_inc_unchecked(&cp->in_pkts);
101476 goto out;
101477 }
101478
101479diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
101480index d521718..d0fd7a1 100644
101481--- a/net/netfilter/nf_conntrack_netlink.c
101482+++ b/net/netfilter/nf_conntrack_netlink.c
101483@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
101484 static int
101485 ctnetlink_parse_tuple(const struct nlattr * const cda[],
101486 struct nf_conntrack_tuple *tuple,
101487- enum ctattr_tuple type, u_int8_t l3num)
101488+ enum ctattr_type type, u_int8_t l3num)
101489 {
101490 struct nlattr *tb[CTA_TUPLE_MAX+1];
101491 int err;
101492diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
101493index f900dc3..5e45346 100644
101494--- a/net/netfilter/nfnetlink_log.c
101495+++ b/net/netfilter/nfnetlink_log.c
101496@@ -68,7 +68,7 @@ struct nfulnl_instance {
101497 };
101498
101499 static DEFINE_RWLOCK(instances_lock);
101500-static atomic_t global_seq;
101501+static atomic_unchecked_t global_seq;
101502
101503 #define INSTANCE_BUCKETS 16
101504 static struct hlist_head instance_table[INSTANCE_BUCKETS];
101505@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
101506 /* global sequence number */
101507 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
101508 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
101509- htonl(atomic_inc_return(&global_seq)));
101510+ htonl(atomic_inc_return_unchecked(&global_seq)));
101511
101512 if (data_len) {
101513 struct nlattr *nla;
101514diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
101515new file mode 100644
101516index 0000000..b1bac76
101517--- /dev/null
101518+++ b/net/netfilter/xt_gradm.c
101519@@ -0,0 +1,51 @@
101520+/*
101521+ * gradm match for netfilter
101522